diff options
Diffstat (limited to 'collections-debian-merged/ansible_collections/community/proxysql')
134 files changed, 9058 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/proxysql/.github/workflows/ansible-test-plugins.yml b/collections-debian-merged/ansible_collections/community/proxysql/.github/workflows/ansible-test-plugins.yml new file mode 100644 index 00000000..5afa0ec2 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/.github/workflows/ansible-test-plugins.yml @@ -0,0 +1,94 @@ +name: Plugins CI +on: + push: + paths: + - 'plugins/**' + - 'tests/**' + - '.github/workflows/ansible-test.yml' + pull_request: + paths: + - 'plugins/**' + - 'tests/**' + - '.github/workflows/ansible-test.yml' + schedule: + - cron: '0 6 * * *' + + +env: + proxysql_version_file: "./ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/defaults/main.yml" + +jobs: + sanity: + name: "Sanity (Python: ${{ matrix.python }}, Ansible: ${{ matrix.ansible }})" + runs-on: ubuntu-latest + strategy: + matrix: + ansible: + - stable-2.9 + - stable-2.10 + - devel + python: + - 2.7 + - 3.8 + steps: + + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/proxysql + + - name: Set up Python ${{ matrix.python }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + + - name: Install ansible-base (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Run sanity tests + run: ansible-test sanity --docker -v --color + working-directory: ./ansible_collections/community/proxysql + + integration: + name: "Integration (Python: ${{ matrix.python }}, Ansible: ${{ matrix.ansible }}, ProxySQL: ${{ matrix.proxysql }})" + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + proxysql: + - 2.0.12 + ansible: + - stable-2.9 + - stable-2.10 + - devel + python: + - 3.6 + steps: + + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/proxysql + + - name: Set up Python ${{ matrix.python }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + + - name: Install ansible-base (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Set ProxySQL version (${{ matrix.proxysql }}) + run: "sed -i 's/^proxysql_version:.*/proxysql_version: \"${{ matrix.proxysql }}\"/g' ${{ env.proxysql_version_file }}" + + - name: Run integration tests + run: ansible-test integration --docker -v --color --retry-on-error --continue-on-error --python ${{ matrix.python }} --diff --coverage + working-directory: ./ansible_collections/community/proxysql + + - name: Generate coverage report. + run: ansible-test coverage xml -v --requirements --group-by command --group-by version + working-directory: ./ansible_collections/community/proxysql + + - uses: codecov/codecov-action@v1 + with: + fail_ci_if_error: false diff --git a/collections-debian-merged/ansible_collections/community/proxysql/.github/workflows/ansible-test-roles.yml b/collections-debian-merged/ansible_collections/community/proxysql/.github/workflows/ansible-test-roles.yml new file mode 100644 index 00000000..796b33d4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/.github/workflows/ansible-test-roles.yml @@ -0,0 +1,56 @@ +name: Roles CI +on: + push: + paths: + - 'roles/**' + - '.github/workflows/ansible-test-roles.yml' + pull_request: + paths: + - 'roles/**' + - '.github/workflows/ansible-test-roles.yml' + schedule: + - cron: '0 6 * * *' + +jobs: + molecule: + name: "Molecule (Python: ${{ matrix.python }}, Ansible: ${{ matrix.ansible }}, ProxySQL: ${{ matrix.proxysql }})" + runs-on: ubuntu-latest + env: + PY_COLORS: 1 + ANSIBLE_FORCE_COLOR: 1 + strategy: + matrix: + proxysql: + - 2.0.12 + ansible: + - stable-2.9 + ### it looks like there's errors for 2.10+ with ansible-lint (https://github.com/ansible/ansible-lint/pull/878) + ### and molecule (_maybe_ relating to https://github.com/ansible-community/molecule/pull/2547) + # - stable-2.10 + # - devel + python: + - 2.7 + - 3.8 + + steps: + + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/proxysql + + - name: Set up Python ${{ matrix.python }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + + - name: Install ansible-base (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Install molecule and related dependencies + run: | + pip install ansible-lint docker flake8 molecule testinfra yamllint + + - name: Run molecule default test scenario + run: for d in roles/*/; do (cd "$d" && molecule --version && molecule test) done + working-directory: ./ansible_collections/community/proxysql diff --git a/collections-debian-merged/ansible_collections/community/proxysql/.gitignore b/collections-debian-merged/ansible_collections/community/proxysql/.gitignore new file mode 100644 index 00000000..f4407229 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/.gitignore @@ -0,0 +1,135 @@ +/tests/output/ +/changelogs/.plugin-cache.yaml + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# MacOS +.DS_Store diff --git a/collections-debian-merged/ansible_collections/community/proxysql/CHANGELOG.rst b/collections-debian-merged/ansible_collections/community/proxysql/CHANGELOG.rst new file mode 100644 index 00000000..13f25075 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/CHANGELOG.rst @@ -0,0 +1,14 @@ +=========================================== +Community ProxySQL Collection Release Notes +=========================================== + +.. contents:: Topics + + +v1.0.0 +====== + +Release Summary +--------------- + +This is the first proper release of the ``community.proxysql`` collection. This changelog contains all changes to the modules in this collection that were added after the release of Ansible 2.9.0. diff --git a/collections-debian-merged/ansible_collections/community/proxysql/FILES.json b/collections-debian-merged/ansible_collections/community/proxysql/FILES.json new file mode 100644 index 00000000..3d76e7cd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/FILES.json @@ -0,0 +1,1293 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "df18179bb2f5447a56ac92261a911649b96821c0b2c08eea62d5cc6b0195203f", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/proxysql.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c609e550d78f21b9da304695610dddf391edf582cc3232308185e126d3bab411", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/mysql.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40da3a84bc167c7b830d29b3395afec29c50b405a5d08dd2287e7b852420628d", + "format": 1 + }, + { + "name": "plugins/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c82ee692702ec1dd604cdbc38ff252114e5204e1b0627045a66c9451e7a918ac", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/proxysql_global_variables.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "014b1dae75b85e284540ec97b5c0b72c1142149d872fb6b82445b9ba3192b481", + "format": 1 + }, + { + "name": "plugins/modules/proxysql_mysql_users.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4133681a67e664d4efcd255912270f595a0ec2674ddee00bd7e3633abd966ea3", + "format": 1 + }, + { + "name": "plugins/modules/proxysql_replication_hostgroups.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dfa280e8cf7fe8039a47014da715579383f5a6abceb5bb212463db638aca8509", + "format": 1 + }, + { + "name": "plugins/modules/proxysql_backend_servers.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f8a407f47b59ed6114386a82842f01b0ccf99c29034cd7f2e526e61fea644e0", + "format": 1 + }, + { + "name": "plugins/modules/proxysql_manage_config.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9e68e00387e306362353be2f45bf1a3d6018db405390325ceb53c2af2c62dd83", + "format": 1 + }, + { + "name": "plugins/modules/proxysql_scheduler.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae1033fdbeea5fe528e0d55846a265937bbead065549fd7c0e6559c86e375fab", + "format": 1 + }, + { + "name": "plugins/modules/proxysql_query_rules.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b0e5c3539c6912aa8b015e3fa0f06c92407e057a46f97e7365a255b66396918", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72685a2fc8a90c5e9b8a70be81685605d47dc104865a9cda5a84636a3ccf38cb", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e54f913747039f5030ddbdf33c05fa6153874b2d9b350b69eb429c5fb0af37f5", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.10.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d63b73068e09e23fcf34e358e6d6371b68e8e3c84148cf1f98a3abb4c998a0c0", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.9.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d63b73068e09e23fcf34e358e6d6371b68e8e3c84148cf1f98a3abb4c998a0c0", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.11.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d63b73068e09e23fcf34e358e6d6371b68e8e3c84148cf1f98a3abb4c998a0c0", + "format": 1 + }, + { + "name": "tests/integration", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eddbfe63deb1681baae8762448d5e57f3076cefb35662d67feb54ec1e5e42634", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1936d9cfdbbe1a70fa02d5f3cc11e8687cdc4d44d548919aeb19a1e59c7b7b7", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/tasks/setvars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "168258643f3bd95eb8789f016b131ea09ee4bce2819cb383b575c6b599ec843a", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_in_memory_only.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c032ed1387f05b1308386b09827dd5d2071a08ce0aabb2b00f24745f226ebb8", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/tasks/base_test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8caca72ab8ae86c7fb6cd97d8ce5f99925ede577aacc0a2e19d11bcc9ccab459", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/tasks/teardown.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5eb06a73519cf236fd11f88fee2f3f1eb04589cef94b01ab511f3d39b1f45c14", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/tasks/cleanup_global_variables.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54bef7fcd97b69cf4a3da069ba0ae429805a43d9dfcabf924be2b42fdfaa44fe", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61cf9831e651fc0e656f30f61839dad0525f0ed47252f314ca4cc38ecb73db93", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1a59613718451d85eae9128f0aefa9e4c4fc30cad626432a0697175d6e7fe861", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/tasks/setup_global_variables.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6e6afa92eb1b90fb60e764ce4e26ac2c8aa3ad4edb08ba0714504f437ffc4254", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_using_check_mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7d96aa5c50de8eaebd51a45345a72d3b800ad254165f7240e95139108bc14caa", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_with_delayed_persist.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "086963c3df0cf2346268e594ea1ff2c1afb628d8d18b5b257381eed64e373429", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eddbfe63deb1681baae8762448d5e57f3076cefb35662d67feb54ec1e5e42634", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ce4a2f09b5255490267f347e989e9d9a4b661b0609a0704abe15cd606467c92d", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba61b379b03efa80a74bbd07eba90b8eff2a199159d54d0e14ee678ad3c4d91d", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_with_delayed_persist.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5dc2f21d4a2cb15553fbf442d8fbdd011904d5a58a9435bff480e7077b1950ad", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_using_check_mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "537daccbdd694a076d54a6cfa95cdffeb5d50b18e63feb0c976e1d0dae112564", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/tasks/base_test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87fe6f756a59f5a10a58382779fc4baf583feafde55b19288ed801edda2775a1", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_in_memory_only.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c72572ca2f1833334aa0275305b889421206cb68487d7bc3e6ffb1bc3e885c7", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_in_memory_only.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b52ae0fc0d46c52c6f60457c61e5e4e197e4f716d3b850967317be9045c6ae06", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/tasks/teardown.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5eb06a73519cf236fd11f88fee2f3f1eb04589cef94b01ab511f3d39b1f45c14", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c5ab3e2c6fcb78cb07a61a05f0561692685d248ab1901795f5d7c8eaa0c8c59", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/tasks/setup_test_user.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d21850ce7b8d3afee26a7137e39434b9e995a305494b8e740d17c031ecb2db8", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_with_delayed_persist.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b640106b84a0b0f8e7fc9060c853b9e999ddc3cff31a6858fe5227c5502d9361", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c86383911ad6210c5b3aa0a8e1a3522d8894f0895faa3b28de72abeab6c063e5", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_using_check_mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b934650b16ace7266589cbf764450b19d0f3bf64057cbb2f0a0a3e6b9a78836e", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_mysql_users/tasks/cleanup_test_users.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54b5bbba0fe1bbd8ae157c78a88be3f84eb639e7c5d4755eff4f9a2f3020b897", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eddbfe63deb1681baae8762448d5e57f3076cefb35662d67feb54ec1e5e42634", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3feaee8d641e7d1613f08a44ba6cedd07322404008aefb15dbeb17657de41f8e", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_with_delayed_persist.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bef077a8cbbaae69fac7eff43367a5497edf98113d38a02695ee879f57d093c0", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_using_check_mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "01022edd33cf2d2a7f49d200227f4689a93bcd1a8482e14244673d423704dcf2", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/tasks/base_test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16a86fb677ed2823a003e6a8e2338619ba87dca669c6bdcc55cc11ba51f6a26e", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_in_memory_only.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a83bca0860b37683a6a9feadb5f4066b194fb6d4feefc073c619d16256eae279", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/tasks/cleanup_test_replication_hostgroups.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5b6b6f078295022ed0324f666c5881fca1e264aeda492f7c99e055a5e131cbe8", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/tasks/teardown.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5eb06a73519cf236fd11f88fee2f3f1eb04589cef94b01ab511f3d39b1f45c14", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_in_memory_only.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "60d841f075202f00127a818721c4dc0ddf5a07199e215c11b7c2da6889da3c3e", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7013da6cfd2c2bdbfd7cabe5d43bddadbb8e93874f26881c87e674d7dc58ca8f", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aa1bce4601915ba2008e60c427bcd98b579c369429de023a1d107bc89cc44e96", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d74b8006808de5276d348b721e3eb9f6186590ac8cd1c26334fbf7556dc080d2", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_with_delayed_persist.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7eb53ae5b0acb972c3ae613275e467fd6943ba032e0ccbfa650f822cdf62eb92", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/tasks/setup_test_replication_hostgroups.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a48efcd2b6fe81742087de8accd75894aab94810dd789c61e6656ae83f971d7", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_using_check_mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c804b499f74343a8b6180a70fba3f51cef4bc9d801e80bf13eda29b258207061", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eddbfe63deb1681baae8762448d5e57f3076cefb35662d67feb54ec1e5e42634", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7e08696def2d9f59f2fe266fd449aa725d825cab1938a4ad3e55f540a08e9fd4", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_with_delayed_persist.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "234bbb9f4f62604bcae828e4c7103cf049f678b84e98f95d3b599a9b25b75372", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_using_check_mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8535b7e77eefeede2ccf1a581c5772ccd29ece436bb43fe9354c4ce597d12e7a", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/tasks/base_test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a225bb44915ef203fb774a982fb6ec3236423c243b9b4afacde913c6a61d85a", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/tasks/teardown.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5eb06a73519cf236fd11f88fee2f3f1eb04589cef94b01ab511f3d39b1f45c14", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4512ae07a5a889f9e4495773dd19d5b17f9ed23d22bd6d212fb66d6deefaae47", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83ea685c14bba012f7140be2361e18826ccc89fb2d37308599ca6d27947d7bf0", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/tasks/cleanup_test_servers.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a937e7fed5fa732e705bb5485e10ed3e4ee4ce56732b7cea637a2c15a6c65744", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_in_memory_only.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fec5250a8ecbad1b09e4521dc2970242914314bc8d9a1ef96c7a408b3ec10396", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_in_memory_only.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "610738ad0b54a0bea86cb8194710ced8a6b128a134401a6df3174d9e037ee5e5", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_with_delayed_persist.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "95e3d27777e3e4d343d5a0f5e9cad32abe15964300e086f057b1bac5dd4bcff8", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/tasks/setup_test_server.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d71295c441b825243b18695c1df3906b65b1dbea905d51836d4f4c12185c54e4", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_using_check_mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79031a983ca3f7bc8cc121f10ed042d3abbaa14eb0593b083b68045ba5efa89a", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59d22ba4ce6723eb8823b72b97b143bc4a473cffa1d4f7656bcfaffe86fe133c", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_proxysql", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_proxysql/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_proxysql/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b911a398debb99123897fa1bc6c9e47342c1d5e72074298f16bd45b36c2b0ea", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_proxysql/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_proxysql/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed36eb4a2ff1222c1fa7de49b04e1d6ece4ee351c1faca004aff316b7613b24f", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_proxysql/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_proxysql/tasks/install.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d9303aabb6ce37aad62f63b5291992fb8abb71e827774480cd9b4a7ab3a0374", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_proxysql/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b642d128d1559de6ced5be05a33c397ce2b9d4ea45ad2ead361da826d39da324", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_proxysql/tasks/config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1893201e678133d0d5161e8580594b07edf1802298e8b03b11dd26ab4fcc85f9", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eddbfe63deb1681baae8762448d5e57f3076cefb35662d67feb54ec1e5e42634", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "286c7b1b1a602e54c6d51577f2510823ad56d3e43bace06440bff6b7ade0e759", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_using_check_mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c531a228418fe61c730c0e63ebbeeb953c932fd881452dd5c19c75ac094127f", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/tasks/base_test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd3ff7d235d9e7e503fc7207262b4a5162b45b6c968450e79e61cfa98e142355", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "97f96fd67d8d51e5eaf7da0178b08fb897140e86848a65de91cc7408afd44b9a", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_in_memory_only.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "177421ff34d94e86b2fcb2155e1feb79d7cfac3dadf3611fcfce1b60b35b64e1", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/tasks/teardown.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5eb06a73519cf236fd11f88fee2f3f1eb04589cef94b01ab511f3d39b1f45c14", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_with_delayed_persist.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "916bbf471171e5090b045fcc7fea752aacb3ec3215aedf0565d89f9c8f60ae09", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_with_delayed_persist.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "56988f37722ee53580862decc18da4f2794561421b466c28c8dea9efee6e3772", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5b153c843eee6d85af677701fa2142118e9215813881ce1766f4b9aae76d7457", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_in_memory_only.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e2eff392d65105633bed8aea1012c6eb482cd2722bee9e9e7afac3aec8feefe9", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/tasks/setup_test_query_rule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0181cbc7a47844868c74d41d80961bb3b93b0b91de2e840e54d27950c474d3e9", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "edb40f9b3a8ea0917e308f07d728de9c2350ff2c60592471d0c0cc5a55d385a3", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/tasks/test_create_using_check_mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8954c69af5bdc7c9a5a8df0bda71b075c0dd04695b18144c886848f1255eaad9", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_query_rules/tasks/cleanup_test_query_rules.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b12aba11cb4a53066eb9da3f022966d969f41e87b8611bcf49aaa0abb65dd395", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eddbfe63deb1681baae8762448d5e57f3076cefb35662d67feb54ec1e5e42634", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31e66cba759c1a94a5c721f270d65536d5e6714fde384439029e6098df1f63f6", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_using_check_mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a07b989a13014c97ed5a41c5e97e94a3930307bf7e15534c2a5a28d22be6abb6", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/tasks/base_test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f67bb111ef1af5fbef98b2be37601de12c8b326808a003634b92fdd5ae1ab27", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_with_delayed_persist.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "32257e461dfb759bdd8a74b80025b2e7c6c483b6c3f9fe044fd08c942ca8f2a0", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e9d68ea7d7daeddfec5c21389ccaf66426fe3d59c397f8f7fd880ab5fcc78783", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/tasks/teardown.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5eb06a73519cf236fd11f88fee2f3f1eb04589cef94b01ab511f3d39b1f45c14", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0fd853064f18012d04e0b9540863548ef61de9a33c240d77b4dbbb9cf6e1c178", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_in_memory_only.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64f9cdac97b35957aad6140ce0a558d3f5ecb6651690b971c782ff297546bd33", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/tasks/setup_test_scheduler.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "698fa198020214b5df4e25da5615fe2d1a6c69a69efccf864ca618e72f6aea49", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/tasks/cleanup_test_schedulers.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea739b76aa34521bbf7ab92bcce9998eecae0eb4905a528fa7a44e096e36a333", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/tasks/test_create_using_check_mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d9c4b763eb46379f79034f467faca1dd85ffeaf367d4c39eb053a50cad4af96", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c254ff8e661671940ac3194dbf3167c03d0c353fd33b2a8e5054f4aa67b845fb", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_with_delayed_persist.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f51e0d0621b57476ba04fd496a33dfbd2668b86a0617067b03317349cb7bc305", + "format": 1 + }, + { + "name": "tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_in_memory_only.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e53b81b893fc6a86d287b1da275bb118e4a290f71751e9aec2138d97be903875", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d720989539ca734a62e3271aabebeba04924b409b4023d91a9b78e30f76f116b", + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d7cec009512ade5c939a5a35ad87df0f870ee645652c8be48060453d4444621", + "format": 1 + }, + { + "name": ".gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4173598eb51b77c32e7d2b31d13e97ebfd353c4037b1d771c17d740727c97b92", + "format": 1 + }, + { + "name": "codecov.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "293f9949eb45b62f7bd3372421ddef68bd72395e65ec027586392460ce47e51a", + "format": 1 + }, + { + "name": "roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/proxysql", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/proxysql/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/proxysql/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7128a60b72a19e830ced5267fafcbd869d338cc8fb612da2462889412fd8898e", + "format": 1 + }, + { + "name": "roles/proxysql/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/proxysql/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9506b5ffe10603fa5adb2eeb60e0272f13b750c9b6456ad2f4c9d5154c3f082a", + "format": 1 + }, + { + "name": "roles/proxysql/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/proxysql/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "906da07db64cc561bf13133eb1102af924b54e69ba126ee52a47d82d36da05ea", + "format": 1 + }, + { + "name": "roles/proxysql/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "74ebbf0f4d833bd4d7c21cb2af3f8409aec25fbee8619817fed8e3907a711706", + "format": 1 + }, + { + "name": "roles/proxysql/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/proxysql/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/proxysql/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/proxysql/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40a929f59746b4ecfda453275d5c2ac65e0c860da5f6ecafde87ee6998240aac", + "format": 1 + }, + { + "name": "roles/proxysql/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "19945130d2b69746bc62ea15ad389c79d6b2c66da73eea5a1fef2f329c953d2c", + "format": 1 + }, + { + "name": "roles/proxysql/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3bb768f9f8c80f7dca2a06d715ef95243e5688cd2f483df863de0efa469c6469", + "format": 1 + }, + { + "name": "roles/proxysql/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "51a20e1f02328909225bbdf6189ad8b565aa7361fb4d337b50f86b6e6b45af4a", + "format": 1 + }, + { + "name": "roles/proxysql/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/proxysql/tasks/users.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a180bea7eefcd358760b0290df266a0cb373d7a5aaa81bf48b1304fafbe966bd", + "format": 1 + }, + { + "name": "roles/proxysql/tasks/setvars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61b6f526c6edfb5e9ce5e3fa3cd2fdd6a63efbb20baa670df590eafffb38b01c", + "format": 1 + }, + { + "name": "roles/proxysql/tasks/install.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2c8649ffbe7dfae537f10f2f7ca590cea14e10146ab857cc5774bbfd27e6e5a", + "format": 1 + }, + { + "name": "roles/proxysql/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "78b9140d0a2b7bd1271fea5a3f7fcab0eda6ee62efc7d90335130e8b98854ecc", + "format": 1 + }, + { + "name": "roles/proxysql/tasks/config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5478f52beeffceef337b9931ba20bf63fc30ebb6228c9ebe42f4ed9222ddd84a", + "format": 1 + }, + { + "name": "roles/proxysql/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/proxysql/templates/client.my.cnf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d93f1748aeeebf51156e4bb034b258872ff962003b2d263e3dabeb0e5468d69a", + "format": 1 + }, + { + "name": "roles/proxysql/templates/proxysql.cnf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23ac685f5a7460daec486176a3aec543d70a3eefb959e18de0205ea2c22eb0f6", + "format": 1 + }, + { + "name": "roles/proxysql/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/proxysql/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b141dc14d3968e920606576539982a82c25c14fb14afbca53c393beca15fcd3f", + "format": 1 + }, + { + "name": "roles/proxysql/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d31aa6c37d7ac61def5551e573528830b89289ed124cce9d92cbd9e15a83e437", + "format": 1 + }, + { + "name": "LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986", + "format": 1 + }, + { + "name": ".github", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/workflows/ansible-test-plugins.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5e07a531b6ce4424316cbe668e9e52e6e9bccf0a60ea6988810b2067e80476e", + "format": 1 + }, + { + "name": ".github/workflows/ansible-test-roles.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7a589a1da4ade8cf10f7f196810dcaec2128db103f61eb2048a82800d7e63618", + "format": 1 + } + ], + "format": 1 +}
\ No newline at end of file diff --git a/collections-debian-merged/ansible_collections/community/proxysql/LICENSE b/collections-debian-merged/ansible_collections/community/proxysql/LICENSE new file mode 100644 index 00000000..f288702d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <https://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + <program> Copyright (C) <year> <name of author> + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +<https://www.gnu.org/licenses/>. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +<https://www.gnu.org/licenses/why-not-lgpl.html>. diff --git a/collections-debian-merged/ansible_collections/community/proxysql/MANIFEST.json b/collections-debian-merged/ansible_collections/community/proxysql/MANIFEST.json new file mode 100644 index 00000000..1430c194 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/MANIFEST.json @@ -0,0 +1,32 @@ +{ + "collection_info": { + "namespace": "community", + "name": "proxysql", + "version": "1.0.0", + "authors": [ + "Ben Mildren (@bmildren)" + ], + "readme": "README.md", + "tags": [ + "database", + "mysql", + "proxysql" + ], + "description": "ProxySQL collection for Ansible", + "license": [], + "license_file": "LICENSE", + "dependencies": {}, + "repository": "https://github.com/ansible-collections/community.proxysql", + "documentation": "https://github.com/ansible-collections/community.proxysql", + "homepage": "https://github.com/ansible-collections/community.proxysql", + "issues": "https://github.com/ansible-collections/community.proxysql/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14dd89e967b39ff42d7df3de39f9cd1c9e526e8ad05e9d1cfac171a35a6d4901", + "format": 1 + }, + "format": 1 +}
\ No newline at end of file diff --git a/collections-debian-merged/ansible_collections/community/proxysql/README.md b/collections-debian-merged/ansible_collections/community/proxysql/README.md new file mode 100644 index 00000000..41410771 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/README.md @@ -0,0 +1,56 @@ +# ProxySQL collection for Ansible +[![Plugins CI](https://github.com/ansible-collections/community.proxysql/workflows/Plugins%20CI/badge.svg?event=push)](https://github.com/ansible-collections/community.proxysql/actions?query=workflow%3A"Plugins+CI") [![Roles CI](https://github.com/ansible-collections/community.proxysql/workflows/Roles%20CI/badge.svg?event=push)](https://github.com/ansible-collections/community.proxysql/actions?query=workflow%3A"Roles+CI") [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.proxysql)](https://codecov.io/gh/ansible-collections/community.proxysql) + +## Included content + +- **Modules**: + - [proxysql_backend_servers](https://docs.ansible.com/ansible/latest/modules/proxysql_backend_servers_module.html) + - [proxysql_global_variables](https://docs.ansible.com/ansible/latest/modules/proxysql_global_variables_module.html) + - [proxysql_manage_config](https://docs.ansible.com/ansible/latest/modules/proxysql_manage_config_module.html) + - [proxysql_mysql_users](https://docs.ansible.com/ansible/latest/modules/proxysql_mysql_users_module.html) + - [proxysql_query_rules](https://docs.ansible.com/ansible/latest/modules/proxysql_query_rules_module.html) + - [proxysql_replication_hostgroups](https://docs.ansible.com/ansible/latest/modules/proxysql_replication_hostgroups_module.html) + - [proxysql_scheduler](https://docs.ansible.com/ansible/latest/modules/proxysql_scheduler_module.html) +- **Roles**: + - proxysql + +## Tested with Ansible + +- 2.9 +- 2.10 +- devel + +## External requirements + +The ProxySQL modules rely on a MySQL connector. The list of supported drivers is below: + +- [PyMySQL](https://github.com/PyMySQL/PyMySQL) +- [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) +- Support for other Python MySQL connectors may be added in a future release. + +## Using this collection + +### Installing the Collection from Ansible Galaxy + +Before using the ProxySQL collection, you need to install it with the Ansible Galaxy CLI: + +```bash +ansible-galaxy collection install community.proxysql +``` + +You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml`, using the format: + +```yaml +--- +collections: + - name: community.proxysql + version: v1.0.0 +``` + +See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details. + +## Licensing + +GNU General Public License v3.0 or later. + +See [LICENSE](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text. diff --git a/collections-debian-merged/ansible_collections/community/proxysql/changelogs/changelog.yaml b/collections-debian-merged/ansible_collections/community/proxysql/changelogs/changelog.yaml new file mode 100644 index 00000000..5eb25417 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/changelogs/changelog.yaml @@ -0,0 +1,8 @@ +ancestor: null +releases: + 1.0.0: + changes: + release_summary: + 'This is the first proper release of the ``community.proxysql`` collection. + This changelog contains all changes to the modules in this collection that + were added after the release of Ansible 2.9.0.' diff --git a/collections-debian-merged/ansible_collections/community/proxysql/changelogs/config.yaml b/collections-debian-merged/ansible_collections/community/proxysql/changelogs/config.yaml new file mode 100644 index 00000000..8412c18c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/changelogs/config.yaml @@ -0,0 +1,31 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +ignore_other_fragment_extensions: true +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: Community ProxySQL Collection +trivial_section_name: trivial +use_fqcn: true diff --git a/collections-debian-merged/ansible_collections/community/proxysql/codecov.yml b/collections-debian-merged/ansible_collections/community/proxysql/codecov.yml new file mode 100644 index 00000000..428982b4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/codecov.yml @@ -0,0 +1,2 @@ +fixes: + - "/ansible_collections/community/proxysql/::" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/meta/runtime.yml b/collections-debian-merged/ansible_collections/community/proxysql/meta/runtime.yml new file mode 100644 index 00000000..2ee3c9fa --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/meta/runtime.yml @@ -0,0 +1,2 @@ +--- +requires_ansible: '>=2.9.10' diff --git a/collections-debian-merged/ansible_collections/community/proxysql/plugins/README.md b/collections-debian-merged/ansible_collections/community/proxysql/plugins/README.md new file mode 100644 index 00000000..6541cf7c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/plugins/README.md @@ -0,0 +1,31 @@ +# Collections Plugins Directory + +This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that +is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that +would contain module utils and modules respectively. + +Here is an example directory of the majority of plugins currently supported by Ansible: + +``` +└── plugins + ├── action + ├── become + ├── cache + ├── callback + ├── cliconf + ├── connection + ├── filter + ├── httpapi + ├── inventory + ├── lookup + ├── module_utils + ├── modules + ├── netconf + ├── shell + ├── strategy + ├── terminal + ├── test + └── vars +``` + +A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible/2.9/plugins/plugins.html).
\ No newline at end of file diff --git a/collections-debian-merged/ansible_collections/community/proxysql/plugins/doc_fragments/proxysql.py b/collections-debian-merged/ansible_collections/community/proxysql/plugins/doc_fragments/proxysql.py new file mode 100644 index 00000000..ae38b94c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/plugins/doc_fragments/proxysql.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Documentation fragment for ProxySQL connectivity + CONNECTIVITY = r''' +options: + login_user: + description: + - The username used to authenticate to ProxySQL admin interface. + type: str + login_password: + description: + - The password used to authenticate to ProxySQL admin interface. + type: str + login_host: + description: + - The host used to connect to ProxySQL admin interface. + type: str + default: '127.0.0.1' + login_unix_socket: + description: + - The socket used to connect to ProxySQL admin interface. + type: str + login_port: + description: + - The port used to connect to ProxySQL admin interface. + type: int + default: 6032 + config_file: + description: + - Specify a config file from which I(login_user) and I(login_password) + are to be read. + type: path + default: '' +requirements: + - PyMySQL (Python 2.7 and Python 3.X), or + - MySQLdb (Python 2.x) +''' + + # Documentation fragment for managing ProxySQL configuration + MANAGING_CONFIG = r''' +options: + save_to_disk: + description: + - Save config to sqlite db on disk to persist the configuration. + type: bool + default: 'yes' + load_to_runtime: + description: + - Dynamically load config to runtime memory. + type: bool + default: 'yes' +''' diff --git a/collections-debian-merged/ansible_collections/community/proxysql/plugins/module_utils/mysql.py b/collections-debian-merged/ansible_collections/community/proxysql/plugins/module_utils/mysql.py new file mode 100644 index 00000000..b5beb027 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/plugins/module_utils/mysql.py @@ -0,0 +1,110 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Jonathan Mainguy <jon@soh.re>, 2015 +# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module +# +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.module_utils.six.moves import configparser + +try: + import pymysql as mysql_driver + _mysql_cursor_param = 'cursor' +except ImportError: + try: + import MySQLdb as mysql_driver + import MySQLdb.cursors + _mysql_cursor_param = 'cursorclass' + except ImportError: + mysql_driver = None + +mysql_driver_fail_msg = 'The PyMySQL (Python 2.7 and Python 3.X) or MySQL-python (Python 2.X) module is required.' + + +def parse_from_mysql_config_file(cnf): + cp = configparser.ConfigParser() + cp.read(cnf) + return cp + + +def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, + ssl_key=None, ssl_ca=None, db=None, cursor_class=None, + connect_timeout=30, autocommit=False, config_overrides_defaults=False): + config = {} + + if config_file and os.path.exists(config_file): + config['read_default_file'] = config_file + cp = parse_from_mysql_config_file(config_file) + # Override some commond defaults with values from config file if needed + if cp and cp.has_section('client') and config_overrides_defaults: + try: + module.params['login_host'] = cp.get('client', 'host', fallback=module.params['login_host']) + module.params['login_port'] = cp.getint('client', 'port', fallback=module.params['login_port']) + except Exception as e: + if "got an unexpected keyword argument 'fallback'" in e.message: + module.fail_json('To use config_overrides_defaults, ' + 'it needs Python 3.5+ as the default interpreter on a target host') + + if ssl_ca is not None or ssl_key is not None or ssl_cert is not None: + config['ssl'] = {} + + if module.params['login_unix_socket']: + config['unix_socket'] = module.params['login_unix_socket'] + else: + config['host'] = module.params['login_host'] + config['port'] = module.params['login_port'] + + # If login_user or login_password are given, they should override the + # config file + if login_user is not None: + config['user'] = login_user + if login_password is not None: + config['passwd'] = login_password + if ssl_cert is not None: + config['ssl']['cert'] = ssl_cert + if ssl_key is not None: + config['ssl']['key'] = ssl_key + if ssl_ca is not None: + config['ssl']['ca'] = ssl_ca + if db is not None: + config['db'] = db + if connect_timeout is not None: + config['connect_timeout'] = connect_timeout + + if _mysql_cursor_param == 'cursor': + # In case of PyMySQL driver: + db_connection = mysql_driver.connect(autocommit=autocommit, **config) + else: + # In case of MySQLdb driver + db_connection = mysql_driver.connect(**config) + if autocommit: + db_connection.autocommit(True) + + if cursor_class == 'DictCursor': + return db_connection.cursor(**{_mysql_cursor_param: mysql_driver.cursors.DictCursor}), db_connection + else: + return db_connection.cursor(), db_connection + + +def mysql_common_argument_spec(): + return dict( + login_user=dict(type='str', default=None), + login_password=dict(type='str', no_log=True), + login_host=dict(type='str', default='localhost'), + login_port=dict(type='int', default=3306), + login_unix_socket=dict(type='str'), + config_file=dict(type='path', default='~/.my.cnf'), + connect_timeout=dict(type='int', default=30), + client_cert=dict(type='path', aliases=['ssl_cert']), + client_key=dict(type='path', aliases=['ssl_key']), + ca_cert=dict(type='path', aliases=['ssl_ca']), + ) diff --git a/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_backend_servers.py b/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_backend_servers.py new file mode 100644 index 00000000..fe4c7954 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_backend_servers.py @@ -0,0 +1,518 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_backend_servers +author: "Ben Mildren (@bmildren)" +short_description: Adds or removes mysql hosts from proxysql admin interface. +description: + - The M(community.proxysql.proxysql_backend_servers) module adds or removes mysql hosts using + the proxysql admin interface. +options: + hostgroup_id: + description: + - The hostgroup in which this mysqld instance is included. An instance + can be part of one or more hostgroups. + type: int + default: 0 + hostname: + description: + - The ip address at which the mysqld instance can be contacted. + type: str + required: True + port: + description: + - The port at which the mysqld instance can be contacted. + type: int + default: 3306 + status: + description: + - ONLINE - Backend server is fully operational. + OFFLINE_SOFT - When a server is put into C(OFFLINE_SOFT) mode, + connections are kept in use until the current + transaction is completed. This allows to gracefully + detach a backend. + OFFLINE_HARD - When a server is put into C(OFFLINE_HARD) mode, the + existing connections are dropped, while new incoming + connections aren't accepted either. + + If omitted the proxysql database default for I(status) is C(ONLINE). + type: str + choices: [ "ONLINE", "OFFLINE_SOFT", "OFFLINE_HARD"] + weight: + description: + - The bigger the weight of a server relative to other weights, the higher + the probability of the server being chosen from the hostgroup. If + omitted the proxysql database default for I(weight) is 1. + type: int + compression: + description: + - If the value of I(compression) is greater than 0, new connections to + that server will use compression. If omitted the proxysql database + default for I(compression) is 0. + type: int + max_connections: + description: + - The maximum number of connections ProxySQL will open to this backend + server. If omitted the proxysql database default for I(max_connections) + is 1000. + type: int + max_replication_lag: + description: + - If greater than 0, ProxySQL will regularly monitor replication lag. If + replication lag goes above I(max_replication_lag), proxysql will + temporarily shun the server until replication catches up. If omitted + the proxysql database default for I(max_replication_lag) is 0. + type: int + use_ssl: + description: + - If I(use_ssl) is set to C(True), connections to this server will be + made using SSL connections. If omitted the proxysql database default + for I(use_ssl) is C(False). + type: bool + max_latency_ms: + description: + - Ping time is monitored regularly. If a host has a ping time greater + than I(max_latency_ms) it is excluded from the connection pool + (although the server stays ONLINE). If omitted the proxysql database + default for I(max_latency_ms) is 0. + type: int + comment: + description: + - Text field that can be used for any purposed defined by the user. + Could be a description of what the host stores, a reminder of when the + host was added or disabled, or a JSON processed by some checker script. + type: str + default: '' + state: + description: + - When C(present) - adds the host, when C(absent) - removes the host. + type: str + choices: [ "present", "absent" ] + default: present +extends_documentation_fragment: +- community.proxysql.proxysql.managing_config +- community.proxysql.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a server, it saves the mysql server config to disk, but +# avoids loading the mysql server config to runtime (this might be because +# several servers are being added and the user wants to push the config to +# runtime in a single batch using the community.general.proxysql_manage_config +# module). It uses supplied credentials to connect to the proxysql admin +# interface. + +- name: Add a server + community.proxysql.proxysql_backend_servers: + login_user: 'admin' + login_password: 'admin' + hostname: 'mysql01' + state: present + load_to_runtime: False + +# This example removes a server, saves the mysql server config to disk, and +# dynamically loads the mysql server config to runtime. It uses credentials +# in a supplied config file to connect to the proxysql admin interface. + +- name: Remove a server + community.proxysql.proxysql_backend_servers: + config_file: '~/proxysql.cnf' + hostname: 'mysql02' + state: absent +''' + +RETURN = ''' +stdout: + description: The mysql host modified or removed from proxysql + returned: On create/update will return the newly modified host, on delete + it will return the deleted record. + type: dict + "sample": { + "changed": true, + "hostname": "192.168.52.1", + "msg": "Added server to mysql_hosts", + "server": { + "comment": "", + "compression": "0", + "hostgroup_id": "1", + "hostname": "192.168.52.1", + "max_connections": "1000", + "max_latency_ms": "0", + "max_replication_lag": "0", + "port": "3306", + "status": "ONLINE", + "use_ssl": "0", + "weight": "1" + }, + "state": "present" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.proxysql.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if module.params["port"] < 0 \ + or module.params["port"] > 65535: + module.fail_json( + msg="port must be a valid unix port number (0-65535)" + ) + + if module.params["compression"]: + if module.params["compression"] < 0 \ + or module.params["compression"] > 102400: + module.fail_json( + msg="compression must be set between 0 and 102400" + ) + + if module.params["max_replication_lag"]: + if module.params["max_replication_lag"] < 0 \ + or module.params["max_replication_lag"] > 126144000: + module.fail_json( + msg="max_replication_lag must be set between 0 and 102400" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE MYSQL SERVERS TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD MYSQL SERVERS TO RUNTIME") + return True + + +class ProxySQLServer(object): + + def __init__(self, module): + self.state = module.params["state"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + + self.hostgroup_id = module.params["hostgroup_id"] + self.hostname = module.params["hostname"] + self.port = module.params["port"] + + config_data_keys = ["status", + "weight", + "compression", + "max_connections", + "max_replication_lag", + "use_ssl", + "max_latency_ms", + "comment"] + + self.config_data = dict((k, module.params[k]) + for k in config_data_keys) + + def check_server_config_exists(self, cursor): + query_string = \ + """SELECT count(*) AS `host_count` + FROM mysql_servers + WHERE hostgroup_id = %s + AND hostname = %s + AND port = %s""" + + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['host_count']) > 0) + + def check_server_config(self, cursor): + query_string = \ + """SELECT count(*) AS `host_count` + FROM mysql_servers + WHERE hostgroup_id = %s + AND hostname = %s + AND port = %s""" + + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + + if isinstance(check_count, tuple): + return int(check_count[0]) > 0 + + return (int(check_count['host_count']) > 0) + + def get_server_config(self, cursor): + query_string = \ + """SELECT * + FROM mysql_servers + WHERE hostgroup_id = %s + AND hostname = %s + AND port = %s""" + + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + cursor.execute(query_string, query_data) + server = cursor.fetchone() + return server + + def create_server_config(self, cursor): + query_string = \ + """INSERT INTO mysql_servers ( + hostgroup_id, + hostname, + port""" + + cols = 3 + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + query_string += ",\n" + col + + query_string += \ + (")\n" + + "VALUES (" + + "%s ," * cols) + + query_string = query_string[:-2] + query_string += ")" + + cursor.execute(query_string, query_data) + return True + + def update_server_config(self, cursor): + query_string = """UPDATE mysql_servers""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\nSET " + col + "= %s," + else: + query_string += "\n " + col + " = %s," + + query_string = query_string[:-1] + query_string += ("\nWHERE hostgroup_id = %s\n AND hostname = %s" + + "\n AND port = %s") + + query_data.append(self.hostgroup_id) + query_data.append(self.hostname) + query_data.append(self.port) + + cursor.execute(query_string, query_data) + return True + + def delete_server_config(self, cursor): + query_string = \ + """DELETE FROM mysql_servers + WHERE hostgroup_id = %s + AND hostname = %s + AND port = %s""" + + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + cursor.execute(query_string, query_data) + return True + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_server(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.create_server_config(cursor) + result['msg'] = "Added server to mysql_hosts" + result['server'] = \ + self.get_server_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Server would have been added to" + + " mysql_hosts, however check_mode" + + " is enabled.") + + def update_server(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.update_server_config(cursor) + result['msg'] = "Updated server in mysql_hosts" + result['server'] = \ + self.get_server_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Server would have been updated in" + + " mysql_hosts, however check_mode" + + " is enabled.") + + def delete_server(self, check_mode, result, cursor): + if not check_mode: + result['server'] = \ + self.get_server_config(cursor) + result['changed'] = \ + self.delete_server_config(cursor) + result['msg'] = "Deleted server from mysql_hosts" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Server would have been deleted from" + + " mysql_hosts, however check_mode is" + + " enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default='127.0.0.1'), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default='', type='path'), + hostgroup_id=dict(default=0, type='int'), + hostname=dict(required=True, type='str'), + port=dict(default=3306, type='int'), + status=dict(choices=['ONLINE', + 'OFFLINE_SOFT', + 'OFFLINE_HARD']), + weight=dict(type='int'), + compression=dict(type='int'), + max_connections=dict(type='int'), + max_replication_lag=dict(type='int'), + use_ssl=dict(type='bool'), + max_latency_ms=dict(type='int'), + comment=dict(default='', type='str'), + state=dict(default='present', choices=['present', + 'absent']), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_server = ProxySQLServer(module) + result = {} + + result['state'] = proxysql_server.state + if proxysql_server.hostname: + result['hostname'] = proxysql_server.hostname + + if proxysql_server.state == "present": + try: + if not proxysql_server.check_server_config(cursor): + if not proxysql_server.check_server_config_exists(cursor): + proxysql_server.create_server(module.check_mode, + result, + cursor) + else: + proxysql_server.update_server(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The server already exists in mysql_hosts" + + " and doesn't need to be updated.") + result['server'] = \ + proxysql_server.get_server_config(cursor) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify server.. %s" % to_native(e) + ) + + elif proxysql_server.state == "absent": + try: + if proxysql_server.check_server_config_exists(cursor): + proxysql_server.delete_server(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The server is already absent from the" + + " mysql_hosts memory configuration") + except mysql_driver.Error as e: + module.fail_json( + msg="unable to remove server.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_global_variables.py b/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_global_variables.py new file mode 100644 index 00000000..1e65bc82 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_global_variables.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_global_variables +author: "Ben Mildren (@bmildren)" +short_description: Gets or sets the proxysql global variables. +description: + - The M(community.proxysql.proxysql_global_variables) module gets or sets the proxysql global + variables. +options: + variable: + description: + - Defines which variable should be returned, or if I(value) is specified + which variable should be updated. + type: str + required: True + value: + description: + - Defines a value the variable specified using I(variable) should be set + to. + type: str +extends_documentation_fragment: +- community.proxysql.proxysql.managing_config +- community.proxysql.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example sets the value of a variable, saves the mysql admin variables +# config to disk, and dynamically loads the mysql admin variables config to +# runtime. It uses supplied credentials to connect to the proxysql admin +# interface. + +- name: Set the value of a variable + community.proxysql.proxysql_global_variables: + login_user: 'admin' + login_password: 'admin' + variable: 'mysql-max_connections' + value: 4096 + +# This example gets the value of a variable. It uses credentials in a +# supplied config file to connect to the proxysql admin interface. + +- name: Get the value of a variable + community.proxysql.proxysql_global_variables: + config_file: '~/proxysql.cnf' + variable: 'mysql-default_query_delay' +''' + +RETURN = ''' +stdout: + description: Returns the mysql variable supplied with it's associated value. + returned: Returns the current variable and value, or the newly set value + for the variable supplied.. + type: dict + "sample": { + "changed": false, + "msg": "The variable is already been set to the supplied value", + "var": { + "variable_name": "mysql-poll_timeout", + "variable_value": "3000" + } + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.proxysql.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(variable, cursor): + if variable.startswith("admin"): + cursor.execute("SAVE ADMIN VARIABLES TO DISK") + else: + cursor.execute("SAVE MYSQL VARIABLES TO DISK") + return True + + +def load_config_to_runtime(variable, cursor): + if variable.startswith("admin"): + cursor.execute("LOAD ADMIN VARIABLES TO RUNTIME") + else: + cursor.execute("LOAD MYSQL VARIABLES TO RUNTIME") + return True + + +def check_config(variable, value, cursor): + query_string = \ + """SELECT count(*) AS `variable_count` + FROM global_variables + WHERE variable_name = %s and variable_value = %s""" + + query_data = \ + [variable, value] + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + + if isinstance(check_count, tuple): + return int(check_count[0]) > 0 + + return (int(check_count['variable_count']) > 0) + + +def get_config(variable, cursor): + + query_string = \ + """SELECT * + FROM global_variables + WHERE variable_name = %s""" + + query_data = \ + [variable, ] + + cursor.execute(query_string, query_data) + row_count = cursor.rowcount + resultset = cursor.fetchone() + + if row_count > 0: + return resultset + else: + return False + + +def set_config(variable, value, cursor): + + query_string = \ + """UPDATE global_variables + SET variable_value = %s + WHERE variable_name = %s""" + + query_data = \ + [value, variable] + + cursor.execute(query_string, query_data) + return True + + +def manage_config(variable, save_to_disk, load_to_runtime, cursor, state): + if state: + if save_to_disk: + save_config_to_disk(variable, cursor) + if load_to_runtime: + load_config_to_runtime(variable, cursor) + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + variable=dict(required=True, type='str'), + value=dict(), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + variable = module.params["variable"] + value = module.params["value"] + save_to_disk = module.params["save_to_disk"] + load_to_runtime = module.params["load_to_runtime"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + result = {} + + if not value: + try: + if get_config(variable, cursor): + result['changed'] = False + result['msg'] = \ + "Returned the variable and it's current value" + result['var'] = get_config(variable, cursor) + else: + module.fail_json( + msg="The variable \"%s\" was not found" % variable + ) + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to get config.. %s" % to_native(e) + ) + else: + try: + if get_config(variable, cursor): + if not check_config(variable, value, cursor): + if not module.check_mode: + result['changed'] = set_config(variable, value, cursor) + result['msg'] = \ + "Set the variable to the supplied value" + result['var'] = get_config(variable, cursor) + manage_config(variable, + save_to_disk, + load_to_runtime, + cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Variable would have been set to" + + " the supplied value, however" + + " check_mode is enabled.") + else: + result['changed'] = False + result['msg'] = ("The variable is already been set to" + + " the supplied value") + result['var'] = get_config(variable, cursor) + else: + module.fail_json( + msg="The variable \"%s\" was not found" % variable + ) + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to set config.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_manage_config.py b/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_manage_config.py new file mode 100644 index 00000000..0ca99b3d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_manage_config.py @@ -0,0 +1,218 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_manage_config + +author: "Ben Mildren (@bmildren)" +short_description: Writes the proxysql configuration settings between layers. +description: + - The M(community.proxysql.proxysql_global_variables) module writes the proxysql configuration + settings between layers. Currently this module will always report a + changed state, so should typically be used with WHEN however this will + change in a future version when the CHECKSUM table commands are available + for all tables in proxysql. +options: + action: + description: + - The supplied I(action) combines with the supplied I(direction) to + provide the semantics of how we want to move the I(config_settings) + between the I(config_layers). + type: str + choices: [ "LOAD", "SAVE" ] + required: True + config_settings: + description: + - The I(config_settings) specifies which configuration we're writing. + type: str + choices: [ "MYSQL USERS", "MYSQL SERVERS", "MYSQL QUERY RULES", + "MYSQL VARIABLES", "ADMIN VARIABLES", "SCHEDULER" ] + required: True + direction: + description: + - FROM - denotes we're reading values FROM the supplied I(config_layer) + and writing to the next layer. + TO - denotes we're reading from the previous layer and writing TO the + supplied I(config_layer)." + type: str + choices: [ "FROM", "TO" ] + required: True + config_layer: + description: + - RUNTIME - represents the in-memory data structures of ProxySQL used by + the threads that are handling the requests. + MEMORY - (sometimes also referred as main) represents the in-memory + SQLite3 database. + DISK - represents the on-disk SQLite3 database. + CONFIG - is the classical config file. You can only LOAD FROM the + config file. + type: str + choices: [ "MEMORY", "DISK", "RUNTIME", "CONFIG" ] + required: True +extends_documentation_fragment: +- community.proxysql.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example saves the mysql users config from memory to disk. It uses +# supplied credentials to connect to the proxysql admin interface. + +- name: Save the mysql users config from memory to disk + community.proxysql.proxysql_manage_config: + login_user: 'admin' + login_password: 'admin' + action: "SAVE" + config_settings: "MYSQL USERS" + direction: "FROM" + config_layer: "MEMORY" + +# This example loads the mysql query rules config from memory to runtime. It +# uses supplied credentials to connect to the proxysql admin interface. + +- name: Load the mysql query rules config from memory to runtime + community.proxysql.proxysql_manage_config: + config_file: '~/proxysql.cnf' + action: "LOAD" + config_settings: "MYSQL QUERY RULES" + direction: "TO" + config_layer: "RUNTIME" +''' + +RETURN = ''' +stdout: + description: Simply reports whether the action reported a change. + returned: Currently the returned value with always be changed=True. + type: dict + "sample": { + "changed": true + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.proxysql.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if module.params["config_layer"] == 'CONFIG' and \ + (module.params["action"] != 'LOAD' or + module.params["direction"] != 'FROM'): + + if (module.params["action"] != 'LOAD' and + module.params["direction"] != 'FROM'): + msg_string = ("Neither the action \"%s\" nor the direction" + + " \"%s\" are valid combination with the CONFIG" + + " config_layer") + module.fail_json(msg=msg_string % (module.params["action"], + module.params["direction"])) + + elif module.params["action"] != 'LOAD': + msg_string = ("The action \"%s\" is not a valid combination" + + " with the CONFIG config_layer") + module.fail_json(msg=msg_string % module.params["action"]) + + else: + msg_string = ("The direction \"%s\" is not a valid combination" + + " with the CONFIG config_layer") + module.fail_json(msg=msg_string % module.params["direction"]) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def manage_config(manage_config_settings, cursor): + + query_string = "%s" % ' '.join(manage_config_settings) + + cursor.execute(query_string) + return True + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + action=dict(required=True, choices=['LOAD', + 'SAVE']), + config_settings=dict(required=True, choices=['MYSQL USERS', + 'MYSQL SERVERS', + 'MYSQL QUERY RULES', + 'MYSQL VARIABLES', + 'ADMIN VARIABLES', + 'SCHEDULER']), + direction=dict(required=True, choices=['FROM', + 'TO']), + config_layer=dict(required=True, choices=['MEMORY', + 'DISK', + 'RUNTIME', + 'CONFIG']) + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + action = module.params["action"] + config_settings = module.params["config_settings"] + direction = module.params["direction"] + config_layer = module.params["config_layer"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + result = {} + + manage_config_settings = \ + [action, config_settings, direction, config_layer] + + try: + result['changed'] = manage_config(manage_config_settings, + cursor) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to manage config.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_mysql_users.py b/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_mysql_users.py new file mode 100644 index 00000000..aa60ed10 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_mysql_users.py @@ -0,0 +1,516 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_mysql_users +author: "Ben Mildren (@bmildren)" +short_description: Adds or removes mysql users from proxysql admin interface. +description: + - The M(community.proxysql.proxysql_mysql_users) module adds or removes mysql users using the + proxysql admin interface. +options: + username: + description: + - Name of the user connecting to the mysqld or ProxySQL instance. + type: str + required: True + password: + description: + - Password of the user connecting to the mysqld or ProxySQL instance. + type: str + encrypt_password: + description: + - Encrypt a cleartext password passed in the I(password) option, using + the method defined in I(encryption_method). + default: False + type: bool + encryption_method: + description: + - Encryption method used when I(encrypt_password) is set to C(True). + type: str + choices: [ "mysql_native_password" ] + default: mysql_native_password + active: + description: + - A user with I(active) set to C(False) will be tracked in the database, + but will be never loaded in the in-memory data structures. If omitted + the proxysql database default for I(active) is C(True). + type: bool + use_ssl: + description: + - If I(use_ssl) is set to C(True), connections by this user will be made + using SSL connections. If omitted the proxysql database default for + I(use_ssl) is C(False). + type: bool + default_hostgroup: + description: + - If there is no matching rule for the queries sent by this user, the + traffic it generates is sent to the specified hostgroup. + If omitted the proxysql database default for I(use_ssl) is 0. + type: int + default_schema: + description: + - The schema to which the connection should change to by default. + type: str + transaction_persistent: + description: + - If this is set for the user with which the MySQL client is connecting + to ProxySQL (thus a "frontend" user), transactions started within a + hostgroup will remain within that hostgroup regardless of any other + rules. + If omitted the proxysql database default for I(transaction_persistent) + is C(False). + type: bool + fast_forward: + description: + - If I(fast_forward) is set to C(True), I(fast_forward) will bypass the + query processing layer (rewriting, caching) and pass through the query + directly as is to the backend server. If omitted the proxysql database + default for I(fast_forward) is C(False). + type: bool + backend: + description: + - If I(backend) is set to C(True), this (username, password) pair is + used for authenticating to the ProxySQL instance. + default: True + type: bool + frontend: + description: + - If I(frontend) is set to C(True), this (username, password) pair is + used for authenticating to the mysqld servers against any hostgroup. + default: True + type: bool + max_connections: + description: + - The maximum number of connections ProxySQL will open to the backend for + this user. If omitted the proxysql database default for + I(max_connections) is 10000. + type: int + state: + description: + - When C(present) - adds the user, when C(absent) - removes the user. + type: str + choices: [ "present", "absent" ] + default: present +extends_documentation_fragment: +- community.proxysql.proxysql.managing_config +- community.proxysql.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a user, it saves the mysql user config to disk, but +# avoids loading the mysql user config to runtime (this might be because +# several users are being added and the user wants to push the config to +# runtime in a single batch using the community.general.proxysql_manage_config +# module). It uses supplied credentials to connect to the proxysql admin +# interface. + +- name: Add a user + community.proxysql.proxysql_mysql_users: + login_user: 'admin' + login_password: 'admin' + username: 'productiondba' + state: present + load_to_runtime: False + +# This example removes a user, saves the mysql user config to disk, and +# dynamically loads the mysql user config to runtime. It uses credentials +# in a supplied config file to connect to the proxysql admin interface. + +- name: Remove a user + community.proxysql.proxysql_mysql_users: + config_file: '~/proxysql.cnf' + username: 'mysqlboy' + state: absent +''' + +RETURN = ''' +stdout: + description: The mysql user modified or removed from proxysql + returned: On create/update will return the newly modified user, on delete + it will return the deleted record. + type: dict + sample: + changed: true + msg: Added user to mysql_users + state: present + user: + active: 1 + backend: 1 + default_hostgroup: 1 + default_schema: null + fast_forward: 0 + frontend: 1 + max_connections: 10000 + password: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + schema_locked: 0 + transaction_persistent: 0 + use_ssl: 0 + username: guest_ro + username: guest_ro +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.proxysql.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native, to_bytes +from hashlib import sha1 + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE MYSQL USERS TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD MYSQL USERS TO RUNTIME") + return True + + +def _mysql_native_password(cleartext_password): + mysql_native_encrypted_password = "*" + sha1(sha1(to_bytes(cleartext_password)).digest()).hexdigest().upper() + return mysql_native_encrypted_password + + +def encrypt_cleartext_password(password_to_encrypt, encryption_method): + encrypted_password = encryption_method(password_to_encrypt) + return encrypted_password + + +encryption_method_map = { + 'mysql_native_password': _mysql_native_password +} + + +class ProxySQLUser(object): + + def __init__(self, module): + self.state = module.params["state"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + + self.username = module.params["username"] + self.backend = module.params["backend"] + self.frontend = module.params["frontend"] + + config_data_keys = ["password", + "active", + "use_ssl", + "default_hostgroup", + "default_schema", + "transaction_persistent", + "fast_forward", + "max_connections"] + + self.config_data = dict((k, module.params[k]) + for k in config_data_keys) + + if module.params["password"] is not None and module.params["encrypt_password"]: + encryption_method = encryption_method_map[module.params["encryption_method"]] + encrypted_password = encrypt_cleartext_password(module.params["password"], encryption_method) + self.config_data["password"] = encrypted_password + + def check_user_config_exists(self, cursor): + query_string = \ + """SELECT count(*) AS `user_count` + FROM mysql_users + WHERE username = %s + AND backend = %s + AND frontend = %s""" + + query_data = \ + [self.username, + self.backend, + self.frontend] + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['user_count']) > 0) + + def check_user_privs(self, cursor): + query_string = \ + """SELECT count(*) AS `user_count` + FROM mysql_users + WHERE username = %s + AND backend = %s + AND frontend = %s""" + + query_data = \ + [self.username, + self.backend, + self.frontend] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['user_count']) > 0) + + def get_user_config(self, cursor): + query_string = \ + """SELECT * + FROM mysql_users + WHERE username = %s + AND backend = %s + AND frontend = %s""" + + query_data = \ + [self.username, + self.backend, + self.frontend] + + cursor.execute(query_string, query_data) + user = cursor.fetchone() + return user + + def create_user_config(self, cursor): + query_string = \ + """INSERT INTO mysql_users ( + username, + backend, + frontend""" + + cols = 3 + query_data = \ + [self.username, + self.backend, + self.frontend] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + query_string += ",\n" + col + + query_string += \ + (")\n" + + "VALUES (" + + "%s ," * cols) + + query_string = query_string[:-2] + query_string += ")" + + cursor.execute(query_string, query_data) + return True + + def update_user_config(self, cursor): + query_string = """UPDATE mysql_users""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\nSET " + col + "= %s," + else: + query_string += "\n " + col + " = %s," + + query_string = query_string[:-1] + query_string += ("\nWHERE username = %s\n AND backend = %s" + + "\n AND frontend = %s") + + query_data.append(self.username) + query_data.append(self.backend) + query_data.append(self.frontend) + + cursor.execute(query_string, query_data) + return True + + def delete_user_config(self, cursor): + query_string = \ + """DELETE FROM mysql_users + WHERE username = %s + AND backend = %s + AND frontend = %s""" + + query_data = \ + [self.username, + self.backend, + self.frontend] + + cursor.execute(query_string, query_data) + return True + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_user(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.create_user_config(cursor) + result['msg'] = "Added user to mysql_users" + result['user'] = \ + self.get_user_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("User would have been added to" + + " mysql_users, however check_mode" + + " is enabled.") + + def update_user(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.update_user_config(cursor) + result['msg'] = "Updated user in mysql_users" + result['user'] = \ + self.get_user_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("User would have been updated in" + + " mysql_users, however check_mode" + + " is enabled.") + + def delete_user(self, check_mode, result, cursor): + if not check_mode: + result['user'] = \ + self.get_user_config(cursor) + result['changed'] = \ + self.delete_user_config(cursor) + result['msg'] = "Deleted user from mysql_users" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("User would have been deleted from" + + " mysql_users, however check_mode is" + + " enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default='', type='path'), + username=dict(required=True, type='str'), + password=dict(no_log=True, type='str'), + encrypt_password=dict(default=False, type='bool'), + encryption_method=dict(default='mysql_native_password', choices=list(encryption_method_map.keys())), + active=dict(type='bool'), + use_ssl=dict(type='bool'), + default_hostgroup=dict(type='int'), + default_schema=dict(type='str'), + transaction_persistent=dict(type='bool'), + fast_forward=dict(type='bool'), + backend=dict(default=True, type='bool'), + frontend=dict(default=True, type='bool'), + max_connections=dict(type='int'), + state=dict(default='present', choices=['present', + 'absent']), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_user = ProxySQLUser(module) + result = {} + + result['state'] = proxysql_user.state + if proxysql_user.username: + result['username'] = proxysql_user.username + + if proxysql_user.state == "present": + try: + if not proxysql_user.check_user_privs(cursor): + if not proxysql_user.check_user_config_exists(cursor): + proxysql_user.create_user(module.check_mode, + result, + cursor) + else: + proxysql_user.update_user(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The user already exists in mysql_users" + + " and doesn't need to be updated.") + result['user'] = \ + proxysql_user.get_user_config(cursor) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify user.. %s" % to_native(e) + ) + + elif proxysql_user.state == "absent": + try: + if proxysql_user.check_user_config_exists(cursor): + proxysql_user.delete_user(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The user is already absent from the" + + " mysql_users memory configuration") + except mysql_driver.Error as e: + module.fail_json( + msg="unable to remove user.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_query_rules.py b/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_query_rules.py new file mode 100644 index 00000000..3088a034 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_query_rules.py @@ -0,0 +1,632 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_query_rules +author: "Ben Mildren (@bmildren)" +short_description: Modifies query rules using the proxysql admin interface. +description: + - The M(community.proxysql.proxysql_query_rules) module modifies query rules using the + proxysql admin interface. +options: + rule_id: + description: + - The unique id of the rule. Rules are processed in rule_id order. + type: int + active: + description: + - A rule with I(active) set to C(False) will be tracked in the database, + but will be never loaded in the in-memory data structures. + type: bool + username: + description: + - Filtering criteria matching username. If I(username) is non-NULL, a + query will match only if the connection is made with the correct + username. + type: str + schemaname: + description: + - Filtering criteria matching schemaname. If I(schemaname) is non-NULL, a + query will match only if the connection uses schemaname as its default + schema. + type: str + flagIN: + description: + - Used in combination with I(flagOUT) and I(apply) to create chains of + rules. + type: int + client_addr: + description: + - Match traffic from a specific source. + type: str + proxy_addr: + description: + - Match incoming traffic on a specific local IP. + type: str + proxy_port: + description: + - Match incoming traffic on a specific local port. + type: int + digest: + description: + - Match queries with a specific digest, as returned by + stats_mysql_query_digest.digest. + type: str + match_digest: + description: + - Regular expression that matches the query digest. The dialect of + regular expressions used is that of re2 - https://github.com/google/re2 + type: str + match_pattern: + description: + - Regular expression that matches the query text. The dialect of regular + expressions used is that of re2 - https://github.com/google/re2 + type: str + negate_match_pattern: + description: + - If I(negate_match_pattern) is set to C(True), only queries not matching + the query text will be considered as a match. This acts as a NOT + operator in front of the regular expression matching against + match_pattern. + type: bool + flagOUT: + description: + - Used in combination with I(flagIN) and apply to create chains of rules. + When set, I(flagOUT) signifies the I(flagIN) to be used in the next + chain of rules. + type: int + replace_pattern: + description: + - This is the pattern with which to replace the matched pattern. Note + that this is optional, and when omitted, the query processor will only + cache, route, or set other parameters without rewriting. + type: str + destination_hostgroup: + description: + - Route matched queries to this hostgroup. This happens unless there is a + started transaction and the logged in user has + I(transaction_persistent) set to C(True) (see M(community.proxysql.proxysql_mysql_users)). + type: int + cache_ttl: + description: + - The number of milliseconds for which to cache the result of the query. + Note in ProxySQL 1.1 I(cache_ttl) was in seconds. + type: int + timeout: + description: + - The maximum timeout in milliseconds with which the matched or rewritten + query should be executed. If a query run for longer than the specific + threshold, the query is automatically killed. If timeout is not + specified, the global variable mysql-default_query_timeout applies. + type: int + retries: + description: + - The maximum number of times a query needs to be re-executed in case of + detected failure during the execution of the query. If retries is not + specified, the global variable mysql-query_retries_on_failure applies. + type: int + delay: + description: + - Number of milliseconds to delay the execution of the query. This is + essentially a throttling mechanism and QoS, and allows a way to give + priority to queries over others. This value is added to the + mysql-default_query_delay global variable that applies to all queries. + type: int + mirror_flagOUT: + description: + - Enables query mirroring. If set I(mirror_flagOUT) can be used to + evaluates the mirrored query against the specified chain of rules. + type: int + mirror_hostgroup: + description: + - Enables query mirroring. If set I(mirror_hostgroup) can be used to + mirror queries to the same or different hostgroup. + type: int + error_msg: + description: + - Query will be blocked, and the specified error_msg will be returned to + the client. + type: str + log: + description: + - Query will be logged. + type: bool + apply: + description: + - Used in combination with I(flagIN) and I(flagOUT) to create chains of + rules. Setting apply to True signifies the last rule to be applied. + type: bool + comment: + description: + - Free form text field, usable for a descriptive comment of the query + rule. + type: str + state: + description: + - When C(present) - adds the rule, when C(absent) - removes the rule. + type: str + choices: [ "present", "absent" ] + default: present + force_delete: + description: + - By default we avoid deleting more than one schedule in a single batch, + however if you need this behaviour and you're not concerned about the + schedules deleted, you can set I(force_delete) to C(True). + type: bool + default: False +extends_documentation_fragment: +- community.proxysql.proxysql.managing_config +- community.proxysql.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a rule to redirect queries from a specific user to another +# hostgroup, it saves the mysql query rule config to disk, but avoids loading +# the mysql query config config to runtime (this might be because several +# rules are being added and the user wants to push the config to runtime in a +# single batch using the community.general.proxysql_manage_config module). It +# uses supplied credentials to connect to the proxysql admin interface. + +- name: Add a rule + community.proxysql.proxysql_query_rules: + login_user: admin + login_password: admin + username: 'guest_ro' + match_pattern: "^SELECT.*" + destination_hostgroup: 1 + active: 1 + retries: 3 + state: present + load_to_runtime: False + +# This example removes all rules that use the username 'guest_ro', saves the +# mysql query rule config to disk, and dynamically loads the mysql query rule +# config to runtime. It uses credentials in a supplied config file to connect +# to the proxysql admin interface. + +- name: Remove rules + community.proxysql.proxysql_query_rules: + config_file: '~/proxysql.cnf' + username: 'guest_ro' + state: absent + force_delete: true +''' + +RETURN = ''' +stdout: + description: The mysql user modified or removed from proxysql + returned: On create/update will return the newly modified rule, in all + other cases will return a list of rules that match the supplied + criteria. + type: dict + "sample": { + "changed": true, + "msg": "Added rule to mysql_query_rules", + "rules": [ + { + "active": "0", + "apply": "0", + "cache_ttl": null, + "client_addr": null, + "comment": null, + "delay": null, + "destination_hostgroup": 1, + "digest": null, + "error_msg": null, + "flagIN": "0", + "flagOUT": null, + "log": null, + "match_digest": null, + "match_pattern": null, + "mirror_flagOUT": null, + "mirror_hostgroup": null, + "negate_match_pattern": "0", + "proxy_addr": null, + "proxy_port": null, + "reconnect": null, + "replace_pattern": null, + "retries": null, + "rule_id": "1", + "schemaname": null, + "timeout": null, + "username": "guest_ro" + } + ], + "state": "present" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.proxysql.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE MYSQL QUERY RULES TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD MYSQL QUERY RULES TO RUNTIME") + return True + + +class ProxyQueryRule(object): + + def __init__(self, module): + self.state = module.params["state"] + self.force_delete = module.params["force_delete"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + + config_data_keys = ["rule_id", + "active", + "username", + "schemaname", + "flagIN", + "client_addr", + "proxy_addr", + "proxy_port", + "digest", + "match_digest", + "match_pattern", + "negate_match_pattern", + "flagOUT", + "replace_pattern", + "destination_hostgroup", + "cache_ttl", + "timeout", + "retries", + "delay", + "mirror_flagOUT", + "mirror_hostgroup", + "error_msg", + "log", + "apply", + "comment"] + + self.config_data = dict((k, module.params[k]) + for k in config_data_keys) + + def check_rule_pk_exists(self, cursor): + query_string = \ + """SELECT count(*) AS `rule_count` + FROM mysql_query_rules + WHERE rule_id = %s""" + + query_data = \ + [self.config_data["rule_id"]] + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['rule_count']) > 0) + + def check_rule_cfg_exists(self, cursor): + query_string = \ + """SELECT count(*) AS `rule_count` + FROM mysql_query_rules""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\n WHERE " + col + " = %s" + else: + query_string += "\n AND " + col + " = %s" + + if cols > 0: + cursor.execute(query_string, query_data) + else: + cursor.execute(query_string) + check_count = cursor.fetchone() + return int(check_count['rule_count']) + + def get_rule_config(self, cursor, created_rule_id=None): + query_string = \ + """SELECT * + FROM mysql_query_rules""" + + if created_rule_id: + query_data = [created_rule_id, ] + query_string += "\nWHERE rule_id = %s" + + cursor.execute(query_string, query_data) + rule = cursor.fetchone() + else: + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\n WHERE " + col + " = %s" + else: + query_string += "\n AND " + col + " = %s" + + if cols > 0: + cursor.execute(query_string, query_data) + else: + cursor.execute(query_string) + rule = cursor.fetchall() + + return rule + + def create_rule_config(self, cursor): + query_string = \ + """INSERT INTO mysql_query_rules (""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + query_string += "\n" + col + "," + + query_string = query_string[:-1] + + query_string += \ + (")\n" + + "VALUES (" + + "%s ," * cols) + + query_string = query_string[:-2] + query_string += ")" + + cursor.execute(query_string, query_data) + new_rule_id = cursor.lastrowid + return True, new_rule_id + + def update_rule_config(self, cursor): + query_string = """UPDATE mysql_query_rules""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None and col != "rule_id": + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\nSET " + col + "= %s," + else: + query_string += "\n " + col + " = %s," + + query_string = query_string[:-1] + query_string += "\nWHERE rule_id = %s" + + query_data.append(self.config_data["rule_id"]) + + cursor.execute(query_string, query_data) + return True + + def delete_rule_config(self, cursor): + query_string = \ + """DELETE FROM mysql_query_rules""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\n WHERE " + col + " = %s" + else: + query_string += "\n AND " + col + " = %s" + + if cols > 0: + cursor.execute(query_string, query_data) + else: + cursor.execute(query_string) + check_count = cursor.rowcount + return True, int(check_count) + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_rule(self, check_mode, result, cursor): + if not check_mode: + result['changed'], new_rule_id = \ + self.create_rule_config(cursor) + result['msg'] = "Added rule to mysql_query_rules" + self.manage_config(cursor, + result['changed']) + result['rules'] = \ + self.get_rule_config(cursor, new_rule_id) + else: + result['changed'] = True + result['msg'] = ("Rule would have been added to" + + " mysql_query_rules, however" + + " check_mode is enabled.") + + def update_rule(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.update_rule_config(cursor) + result['msg'] = "Updated rule in mysql_query_rules" + self.manage_config(cursor, + result['changed']) + result['rules'] = \ + self.get_rule_config(cursor) + else: + result['changed'] = True + result['msg'] = ("Rule would have been updated in" + + " mysql_query_rules, however" + + " check_mode is enabled.") + + def delete_rule(self, check_mode, result, cursor): + if not check_mode: + result['rules'] = \ + self.get_rule_config(cursor) + result['changed'], result['rows_affected'] = \ + self.delete_rule_config(cursor) + result['msg'] = "Deleted rule from mysql_query_rules" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Rule would have been deleted from" + + " mysql_query_rules, however" + + " check_mode is enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + rule_id=dict(type='int'), + active=dict(type='bool'), + username=dict(type='str'), + schemaname=dict(type='str'), + flagIN=dict(type='int'), + client_addr=dict(type='str'), + proxy_addr=dict(type='str'), + proxy_port=dict(type='int'), + digest=dict(type='str'), + match_digest=dict(type='str'), + match_pattern=dict(type='str'), + negate_match_pattern=dict(type='bool'), + flagOUT=dict(type='int'), + replace_pattern=dict(type='str'), + destination_hostgroup=dict(type='int'), + cache_ttl=dict(type='int'), + timeout=dict(type='int'), + retries=dict(type='int'), + delay=dict(type='int'), + mirror_flagOUT=dict(type='int'), + mirror_hostgroup=dict(type='int'), + error_msg=dict(type='str'), + log=dict(type='bool'), + apply=dict(type='bool'), + comment=dict(type='str'), + state=dict(default='present', choices=['present', + 'absent']), + force_delete=dict(default=False, type='bool'), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_query_rule = ProxyQueryRule(module) + result = {} + + result['state'] = proxysql_query_rule.state + + if proxysql_query_rule.state == "present": + try: + if not proxysql_query_rule.check_rule_cfg_exists(cursor): + if proxysql_query_rule.config_data["rule_id"] and \ + proxysql_query_rule.check_rule_pk_exists(cursor): + proxysql_query_rule.update_rule(module.check_mode, + result, + cursor) + else: + proxysql_query_rule.create_rule(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The rule already exists in" + + " mysql_query_rules and doesn't need to be" + + " updated.") + result['rules'] = \ + proxysql_query_rule.get_rule_config(cursor) + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify rule.. %s" % to_native(e) + ) + + elif proxysql_query_rule.state == "absent": + try: + existing_rules = proxysql_query_rule.check_rule_cfg_exists(cursor) + if existing_rules > 0: + if existing_rules == 1 or \ + proxysql_query_rule.force_delete: + proxysql_query_rule.delete_rule(module.check_mode, + result, + cursor) + else: + module.fail_json( + msg=("Operation would delete multiple rules" + + " use force_delete to override this") + ) + else: + result['changed'] = False + result['msg'] = ("The rule is already absent from the" + + " mysql_query_rules memory configuration") + except mysql_driver.Error as e: + module.fail_json( + msg="unable to remove rule.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_replication_hostgroups.py b/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_replication_hostgroups.py new file mode 100644 index 00000000..d8f5ee72 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_replication_hostgroups.py @@ -0,0 +1,381 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_replication_hostgroups +author: "Ben Mildren (@bmildren)" +short_description: Manages replication hostgroups using the proxysql admin + interface. +description: + - Each row in mysql_replication_hostgroups represent a pair of + writer_hostgroup and reader_hostgroup. ProxySQL will monitor the value of + read_only for all the servers in specified hostgroups, and based on the + value of read_only will assign the server to the writer or reader + hostgroups. +options: + writer_hostgroup: + description: + - Id of the writer hostgroup. + type: int + required: True + reader_hostgroup: + description: + - Id of the reader hostgroup. + type: int + required: True + comment: + description: + - Text field that can be used for any purposes defined by the user. + type: str + state: + description: + - When C(present) - adds the replication hostgroup, when C(absent) - + removes the replication hostgroup. + type: str + choices: [ "present", "absent" ] + default: present +extends_documentation_fragment: +- community.proxysql.proxysql.managing_config +- community.proxysql.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a replication hostgroup, it saves the mysql server config +# to disk, but avoids loading the mysql server config to runtime (this might be +# because several replication hostgroup are being added and the user wants to +# push the config to runtime in a single batch using the +# community.general.proxysql_manage_config module). It uses supplied credentials +# to connect to the proxysql admin interface. + +- name: Add a replication hostgroup + community.proxysql.proxysql_replication_hostgroups: + login_user: 'admin' + login_password: 'admin' + writer_hostgroup: 1 + reader_hostgroup: 2 + state: present + load_to_runtime: False + +# This example removes a replication hostgroup, saves the mysql server config +# to disk, and dynamically loads the mysql server config to runtime. It uses +# credentials in a supplied config file to connect to the proxysql admin +# interface. + +- name: Remove a replication hostgroup + community.proxysql.proxysql_replication_hostgroups: + config_file: '~/proxysql.cnf' + writer_hostgroup: 3 + reader_hostgroup: 4 + state: absent +''' + +RETURN = ''' +stdout: + description: The replication hostgroup modified or removed from proxysql + returned: On create/update will return the newly modified group, on delete + it will return the deleted record. + type: dict + "sample": { + "changed": true, + "msg": "Added server to mysql_hosts", + "repl_group": { + "comment": "", + "reader_hostgroup": "1", + "writer_hostgroup": "2" + }, + "state": "present" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.proxysql.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if not module.params["writer_hostgroup"] >= 0: + module.fail_json( + msg="writer_hostgroup must be a integer greater than or equal to 0" + ) + + if not module.params["reader_hostgroup"] == \ + module.params["writer_hostgroup"]: + if not module.params["reader_hostgroup"] > 0: + module.fail_json( + msg=("writer_hostgroup must be a integer greater than" + + " or equal to 0") + ) + else: + module.fail_json( + msg="reader_hostgroup cannot equal writer_hostgroup" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE MYSQL SERVERS TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD MYSQL SERVERS TO RUNTIME") + return True + + +class ProxySQLReplicationHostgroup(object): + + def __init__(self, module): + self.state = module.params["state"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + self.writer_hostgroup = module.params["writer_hostgroup"] + self.reader_hostgroup = module.params["reader_hostgroup"] + self.comment = module.params["comment"] + + def check_repl_group_config(self, cursor, keys): + query_string = \ + """SELECT count(*) AS `repl_groups` + FROM mysql_replication_hostgroups + WHERE writer_hostgroup = %s + AND reader_hostgroup = %s""" + + query_data = \ + [self.writer_hostgroup, + self.reader_hostgroup] + + if self.comment and not keys: + query_string += "\n AND comment = %s" + query_data.append(self.comment) + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['repl_groups']) > 0) + + def get_repl_group_config(self, cursor): + query_string = \ + """SELECT * + FROM mysql_replication_hostgroups + WHERE writer_hostgroup = %s + AND reader_hostgroup = %s""" + + query_data = \ + [self.writer_hostgroup, + self.reader_hostgroup] + + cursor.execute(query_string, query_data) + repl_group = cursor.fetchone() + return repl_group + + def create_repl_group_config(self, cursor): + query_string = \ + """INSERT INTO mysql_replication_hostgroups ( + writer_hostgroup, + reader_hostgroup, + comment) + VALUES (%s, %s, %s)""" + + query_data = \ + [self.writer_hostgroup, + self.reader_hostgroup, + self.comment or ''] + + cursor.execute(query_string, query_data) + return True + + def update_repl_group_config(self, cursor): + query_string = \ + """UPDATE mysql_replication_hostgroups + SET comment = %s + WHERE writer_hostgroup = %s + AND reader_hostgroup = %s""" + + query_data = \ + [self.comment, + self.writer_hostgroup, + self.reader_hostgroup] + + cursor.execute(query_string, query_data) + return True + + def delete_repl_group_config(self, cursor): + query_string = \ + """DELETE FROM mysql_replication_hostgroups + WHERE writer_hostgroup = %s + AND reader_hostgroup = %s""" + + query_data = \ + [self.writer_hostgroup, + self.reader_hostgroup] + + cursor.execute(query_string, query_data) + return True + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_repl_group(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.create_repl_group_config(cursor) + result['msg'] = "Added server to mysql_hosts" + result['repl_group'] = \ + self.get_repl_group_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Repl group would have been added to" + + " mysql_replication_hostgroups, however" + + " check_mode is enabled.") + + def update_repl_group(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.update_repl_group_config(cursor) + result['msg'] = "Updated server in mysql_hosts" + result['repl_group'] = \ + self.get_repl_group_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Repl group would have been updated in" + + " mysql_replication_hostgroups, however" + + " check_mode is enabled.") + + def delete_repl_group(self, check_mode, result, cursor): + if not check_mode: + result['repl_group'] = \ + self.get_repl_group_config(cursor) + result['changed'] = \ + self.delete_repl_group_config(cursor) + result['msg'] = "Deleted server from mysql_hosts" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Repl group would have been deleted from" + + " mysql_replication_hostgroups, however" + + " check_mode is enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + writer_hostgroup=dict(required=True, type='int'), + reader_hostgroup=dict(required=True, type='int'), + comment=dict(type='str'), + state=dict(default='present', choices=['present', + 'absent']), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_repl_group = ProxySQLReplicationHostgroup(module) + result = {} + + result['state'] = proxysql_repl_group.state + + if proxysql_repl_group.state == "present": + try: + if not proxysql_repl_group.check_repl_group_config(cursor, + keys=True): + proxysql_repl_group.create_repl_group(module.check_mode, + result, + cursor) + else: + if not proxysql_repl_group.check_repl_group_config(cursor, + keys=False): + proxysql_repl_group.update_repl_group(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The repl group already exists in" + + " mysql_replication_hostgroups and" + + " doesn't need to be updated.") + result['repl_group'] = \ + proxysql_repl_group.get_repl_group_config(cursor) + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify replication hostgroup.. %s" % to_native(e) + ) + + elif proxysql_repl_group.state == "absent": + try: + if proxysql_repl_group.check_repl_group_config(cursor, + keys=True): + proxysql_repl_group.delete_repl_group(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The repl group is already absent from the" + + " mysql_replication_hostgroups memory" + + " configuration") + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to delete replication hostgroup.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_scheduler.py b/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_scheduler.py new file mode 100644 index 00000000..136b3283 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/plugins/modules/proxysql_scheduler.py @@ -0,0 +1,424 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_scheduler +author: "Ben Mildren (@bmildren)" +short_description: Adds or removes schedules from proxysql admin interface. +description: + - The M(community.proxysql.proxysql_scheduler) module adds or removes schedules using the + proxysql admin interface. +options: + active: + description: + - A schedule with I(active) set to C(False) will be tracked in the + database, but will be never loaded in the in-memory data structures. + type: bool + default: True + interval_ms: + description: + - How often (in millisecond) the job will be started. The minimum value + for I(interval_ms) is 100 milliseconds. + type: int + default: 10000 + filename: + description: + - Full path of the executable to be executed. + type: str + required: True + arg1: + description: + - Argument that can be passed to the job. + type: str + arg2: + description: + - Argument that can be passed to the job. + type: str + arg3: + description: + - Argument that can be passed to the job. + type: str + arg4: + description: + - Argument that can be passed to the job. + type: str + arg5: + description: + - Argument that can be passed to the job. + type: str + comment: + description: + - Text field that can be used for any purposed defined by the user. + type: str + state: + description: + - When C(present) - adds the schedule, when C(absent) - removes the + schedule. + type: str + choices: [ "present", "absent" ] + default: present + force_delete: + description: + - By default we avoid deleting more than one schedule in a single batch, + however if you need this behaviour and you're not concerned about the + schedules deleted, you can set I(force_delete) to C(True). + type: bool + default: False +extends_documentation_fragment: +- community.proxysql.proxysql.managing_config +- community.proxysql.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a schedule, it saves the scheduler config to disk, but +# avoids loading the scheduler config to runtime (this might be because +# several servers are being added and the user wants to push the config to +# runtime in a single batch using the community.general.proxysql_manage_config +# module). It uses supplied credentials to connect to the proxysql admin +# interface. + +- name: Add a schedule + community.proxysql.proxysql_scheduler: + login_user: 'admin' + login_password: 'admin' + interval_ms: 1000 + filename: "/opt/maintenance.py" + state: present + load_to_runtime: False + +# This example removes a schedule, saves the scheduler config to disk, and +# dynamically loads the scheduler config to runtime. It uses credentials +# in a supplied config file to connect to the proxysql admin interface. + +- name: Remove a schedule + community.proxysql.proxysql_scheduler: + config_file: '~/proxysql.cnf' + filename: "/opt/old_script.py" + state: absent +''' + +RETURN = ''' +stdout: + description: The schedule modified or removed from proxysql + returned: On create/update will return the newly modified schedule, on + delete it will return the deleted record. + type: dict + "sample": { + "changed": true, + "filename": "/opt/test.py", + "msg": "Added schedule to scheduler", + "schedules": [ + { + "active": "1", + "arg1": null, + "arg2": null, + "arg3": null, + "arg4": null, + "arg5": null, + "comment": "", + "filename": "/opt/test.py", + "id": "1", + "interval_ms": "10000" + } + ], + "state": "present" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.proxysql.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if module.params["interval_ms"] < 100 \ + or module.params["interval_ms"] > 100000000: + module.fail_json( + msg="interval_ms must between 100ms & 100000000ms" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE SCHEDULER TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD SCHEDULER TO RUNTIME") + return True + + +class ProxySQLSchedule(object): + + def __init__(self, module): + self.state = module.params["state"] + self.force_delete = module.params["force_delete"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + self.active = module.params["active"] + self.interval_ms = module.params["interval_ms"] + self.filename = module.params["filename"] + + config_data_keys = ["arg1", + "arg2", + "arg3", + "arg4", + "arg5", + "comment"] + + self.config_data = dict((k, module.params[k]) + for k in config_data_keys) + + def check_schedule_config(self, cursor): + query_string = \ + """SELECT count(*) AS `schedule_count` + FROM scheduler + WHERE active = %s + AND interval_ms = %s + AND filename = %s""" + + query_data = \ + [self.active, + self.interval_ms, + self.filename] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return int(check_count['schedule_count']) + + def get_schedule_config(self, cursor): + query_string = \ + """SELECT * + FROM scheduler + WHERE active = %s + AND interval_ms = %s + AND filename = %s""" + + query_data = \ + [self.active, + self.interval_ms, + self.filename] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + schedule = cursor.fetchall() + return schedule + + def create_schedule_config(self, cursor): + query_string = \ + """INSERT INTO scheduler ( + active, + interval_ms, + filename""" + + cols = 0 + query_data = \ + [self.active, + self.interval_ms, + self.filename] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + query_string += ",\n" + col + + query_string += \ + (")\n" + + "VALUES (%s, %s, %s" + + ", %s" * cols + + ")") + + cursor.execute(query_string, query_data) + return True + + def delete_schedule_config(self, cursor): + query_string = \ + """DELETE FROM scheduler + WHERE active = %s + AND interval_ms = %s + AND filename = %s""" + + query_data = \ + [self.active, + self.interval_ms, + self.filename] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + check_count = cursor.rowcount + return True, int(check_count) + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_schedule(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.create_schedule_config(cursor) + result['msg'] = "Added schedule to scheduler" + result['schedules'] = \ + self.get_schedule_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Schedule would have been added to" + + " scheduler, however check_mode" + + " is enabled.") + + def delete_schedule(self, check_mode, result, cursor): + if not check_mode: + result['schedules'] = \ + self.get_schedule_config(cursor) + result['changed'] = \ + self.delete_schedule_config(cursor) + result['msg'] = "Deleted schedule from scheduler" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Schedule would have been deleted from" + + " scheduler, however check_mode is" + + " enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + active=dict(default=True, type='bool'), + interval_ms=dict(default=10000, type='int'), + filename=dict(required=True, type='str'), + arg1=dict(type='str'), + arg2=dict(type='str'), + arg3=dict(type='str'), + arg4=dict(type='str'), + arg5=dict(type='str'), + comment=dict(type='str'), + state=dict(default='present', choices=['present', + 'absent']), + force_delete=dict(default=False, type='bool'), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_schedule = ProxySQLSchedule(module) + result = {} + + result['state'] = proxysql_schedule.state + result['filename'] = proxysql_schedule.filename + + if proxysql_schedule.state == "present": + try: + if proxysql_schedule.check_schedule_config(cursor) <= 0: + proxysql_schedule.create_schedule(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The schedule already exists and doesn't" + + " need to be updated.") + result['schedules'] = \ + proxysql_schedule.get_schedule_config(cursor) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify schedule.. %s" % to_native(e) + ) + + elif proxysql_schedule.state == "absent": + try: + existing_schedules = \ + proxysql_schedule.check_schedule_config(cursor) + if existing_schedules > 0: + if existing_schedules == 1 or proxysql_schedule.force_delete: + proxysql_schedule.delete_schedule(module.check_mode, + result, + cursor) + else: + module.fail_json( + msg=("Operation would delete multiple records" + + " use force_delete to override this") + ) + else: + result['changed'] = False + result['msg'] = ("The schedule is already absent from the" + + " memory configuration") + except mysql_driver.Error as e: + module.fail_json( + msg="unable to remove schedule.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/.yamllint b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/.yamllint new file mode 100644 index 00000000..a0270865 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/.yamllint @@ -0,0 +1,11 @@ +--- +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + line-length: disable diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/README.md b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/README.md new file mode 100644 index 00000000..5cc42b1b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/README.md @@ -0,0 +1,43 @@ +Ansible Role: ProxySQL +====================== + +This role installs, and configures ProxySQL. + +Requirements +------------ + +None + +Role Variables +-------------- + +As with all roles designed in Data Platforms, the interface to variables in this role should only be via the role defaults, and it shouldn't be necessary to override the role vars. + +A full list of defaults and their values can be found in the `defaults/main.yml`. + +Dependencies +------------ + +None + +Example Playbook +---------------- + +``` + - hosts: servers + tasks: + - import_role: + name: role_mysql_proxysql + tags: + - proxysql +``` + +License +------- + +BSD + +Author Information +------------------ + +Ben Mildren diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/defaults/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/defaults/main.yml new file mode 100644 index 00000000..93781561 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/defaults/main.yml @@ -0,0 +1,171 @@ +--- +### proxysql install +proxysql_download_src: https://github.com/sysown/proxysql/releases/download +proxysql_version: 2.0.10 +proxysql_mysql_client_version: 5.7 +proxysql_mysql_use_custom_build: false +proxysql_force_restart: false + +proxysql_user: proxysql +proxysql_group: proxysql +proxysql_datadir: /var/lib/proxysql + +proxysql_restart_missing_heartbeats: 10 + +### admin variables +proxysql_admin_user: admin +proxysql_admin_password: admin +proxysql_admin_stats_user: stats +proxysql_admin_stats_password: stats +proxysql_admin_bind_address: 0.0.0.0 +proxysql_admin_port: 6032 +proxysql_admin_socket: /tmp/proxysql_admin.sock +proxysql_admin_read_only: false +proxysql_admin_refresh_interval: 2000 +proxysql_admin_hash_passwords: true + +# cluster +proxysql_admin_cluster_username: +proxysql_admin_cluster_password: +proxysql_admin_cluster_check_interval_ms: 1000 +proxysql_admin_cluster_check_status_frequency: 10 +proxysql_admin_cluster_proxysql_servers_diffs_before_sync: 3 +proxysql_admin_cluster_proxysql_servers_save_to_disk: true +proxysql_admin_checksum_mysql_query_rules: true +proxysql_admin_cluster_mysql_query_rules_diffs_before_sync: 3 +proxysql_admin_cluster_mysql_query_rules_save_to_disk: true +proxysql_admin_checksum_mysql_servers: true +proxysql_admin_cluster_mysql_servers_diffs_before_sync: 3 +proxysql_admin_cluster_mysql_servers_save_to_disk: true +proxysql_admin_checksum_mysql_users: true +proxysql_admin_cluster_mysql_users_diffs_before_sync: 3 +proxysql_admin_cluster_mysql_users_save_to_disk: true + +# historical stats +proxysql_admin_stats_system_cpu: 60 +proxysql_admin_stats_system_memory: 60 +proxysql_admin_stats_mysql_connection_pool: 60 +proxysql_admin_stats_mysql_connections: 60 +proxysql_admin_stats_mysql_query_cache: 60 + +# web interface +proxysql_admin_web_enabled: false +proxysql_admin_web_port: 6080 + + +### mysql variables +proxysql_mysql_bind_address: 0.0.0.0 +proxysql_mysql_port: 6033 +proxysql_mysql_socket: /tmp/proxysql.sock + +# connection pool +proxysql_mysql_connect_retries_delay: 1 +proxysql_mysql_connect_retries_on_failure: 10 +proxysql_mysql_connect_timeout_server: 3000 +proxysql_mysql_connect_timeout_server_max: 10000 +proxysql_mysql_connection_delay_multiplex_ms: 0 +proxysql_mysql_connection_max_age_ms: 0 +proxysql_mysql_connpoll_reset_queue_length: 50 +proxysql_mysql_default_max_latency_ms: 1000 +proxysql_mysql_free_connections_pct: 10 +proxysql_mysql_max_connections: 2048 +proxysql_mysql_multiplexing: true +proxysql_mysql_ping_interval_server_msec: 120000 +proxysql_mysql_ping_timeout_server: 500 +proxysql_mysql_poll_timeout: 2000 +proxysql_mysql_poll_timeout_on_failure: 100 +proxysql_mysql_session_idle_ms: 1000 +proxysql_mysql_session_idle_show_processlist: true +proxysql_mysql_sessions_sort: true +proxysql_mysql_shun_on_failures: 5 +proxysql_mysql_shun_recovery_time_sec: 10 +proxysql_mysql_stacksize: 1048576 +proxysql_mysql_threads: 4 +proxysql_mysql_threshold_query_length: 524288 +proxysql_mysql_threshold_resultset_size: 4194304 +proxysql_mysql_throttle_connections_per_sec_to_hostgroup: 1000000 +proxysql_mysql_throttle_max_bytes_per_second_to_client: 2147483647 +proxysql_mysql_throttle_ratio_server_to_client: 0 + +# session +proxysql_mysql_client_found_rows: true +proxysql_mysql_default_charset: utf8 +proxysql_mysql_default_query_delay: 0 +proxysql_mysql_default_query_timeout: 36000000 +proxysql_mysql_default_schema: information_schema +proxysql_mysql_default_sql_mode: +proxysql_mysql_default_time_zone: SYSTEM +proxysql_mysql_init_connect: +proxysql_mysql_max_allowed_packet: 4194304 +proxysql_mysql_max_transaction_time: 14400000 +proxysql_mysql_query_retries_on_failure: 1 +proxysql_mysql_server_capabilities: 45578 +proxysql_mysql_server_version: 5.5.30 +proxysql_mysql_mysql_wait_timeout: 28800000 +proxysql_mysql_kill_backend_connection_when_disconnect: true + +# ssl +proxysql_mysql_ssl_p2s_ca: +proxysql_mysql_ssl_p2s_cert: +proxysql_mysql_ssl_p2s_cipher: +proxysql_mysql_ssl_p2s_key: + +# query processing +proxysql_mysql_query_processor_iterations: 0 +proxysql_mysql_query_processor_regex: 1 + +# autocommit +proxysql_mysql_autocommit_false_is_transaction: false +proxysql_mysql_autocommit_false_not_reusable: false +proxysql_mysql_enforce_autocommit_on_reads: false +proxysql_mysql_forward_autocommit: false + +# prepared statements +proxysql_mysql_max_stmts_cache: 10000 +proxysql_mysql_max_stmts_per_connection: 20 + +# query cache +proxysql_mysql_query_cache_size_mb: 256 + +# mirroring +proxysql_mysql_mirror_max_concurrency: 16 +proxysql_mysql_mirror_max_queue_length: 32000 + +# monitor +proxysql_mysql_monitor_username: monitor +proxysql_mysql_monitor_password: monitor + +proxysql_mysql_monitor_connect_interval: 60000 +proxysql_mysql_monitor_connect_timeout: 600 +proxysql_mysql_monitor_enabled: true +proxysql_mysql_monitor_groupreplication_healthcheck_interval: 5000 +proxysql_mysql_monitor_groupreplication_healthcheck_timeout: 800 +proxysql_mysql_monitor_history: 600000 +proxysql_mysql_monitor_ping_interval: 10000 +proxysql_mysql_monitor_ping_max_failures: 3 +proxysql_mysql_monitor_ping_timeout: 1000 +proxysql_mysql_monitor_query_interval: 60000 +proxysql_mysql_monitor_query_timeout: 100 +proxysql_mysql_monitor_read_only_interval: 1500 +proxysql_mysql_monitor_read_only_max_timeout_count: 3 +proxysql_mysql_monitor_read_only_timeout: 500 +proxysql_mysql_monitor_replication_lag_interval: 10000 +proxysql_mysql_monitor_replication_lag_timeout: 1000 +proxysql_mysql_monitor_replication_lag_use_percona_heartbeat: +proxysql_mysql_monitor_slave_lag_when_null: 60 +proxysql_mysql_monitor_wait_timeout: true +proxysql_mysql_monitor_writer_is_also_reader: true + +# stats and logging +proxysql_mysql_commands_stats: true +proxysql_mysql_eventslog_filename: +proxysql_mysql_eventslog_filesize: 104857600 +proxysql_mysql_hostgroup_manager_verbose: 0 +proxysql_mysql_long_query_time: 1000 +proxysql_mysql_query_digests: true +proxysql_mysql_query_digests_lowercase: false +proxysql_mysql_query_digests_max_digest_length: 2048 +proxysql_mysql_query_digests_max_query_length: 65000 +proxysql_mysql_stats_time_backend_query: false +proxysql_mysql_stats_time_query_processor: false +proxysql_mysql_verbose_query_error: false diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/handlers/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/handlers/main.yml new file mode 100644 index 00000000..3ed9a13f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/handlers/main.yml @@ -0,0 +1,33 @@ +--- +- name: proxysql | handler | manage admin config + proxysql_global_variables: + config_file: "~/.my.cnf" + variable: "admin-{{ item.value.variable }}" + value: "{{ item.value.variable_value }}" + loop: "{{ proxysql_admin_variables|dict2items }}" + listen: update proxysql config + +- name: proxysql | handler | manage mysql config + proxysql_global_variables: + config_file: "~/.my.cnf" + variable: "mysql-{{ item.value.variable }}" + value: "{{ item.value.variable_value }}" + loop: "{{ proxysql_mysql_variables|dict2items }}" + listen: update proxysql config + +- name: proxysql | handler | manage mysql options + proxysql_global_variables: + config_file: "~/.my.cnf" + variable: "mysql-{{ item.value.variable }}" + value: "{{ item.value.variable_value }}" + load_to_runtime: false + save_to_disk: true + loop: "{{ proxysql_mysql_options|dict2items }}" + listen: update proxysql config + +- name: proxysql | handler | restart proxysql + service: + name: proxysql + state: restarted + when: proxysql_force_restart + listen: restart proxysql diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/meta/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/meta/main.yml new file mode 100644 index 00000000..c0afeec9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/meta/main.yml @@ -0,0 +1,20 @@ +--- +galaxy_info: + author: Ben Mildren + description: Ansible role to install and configure ProxySQL + company: DigitalOcean + + license: license (BSD) + min_ansible_version: 2.9 + + platforms: + - name: Ubuntu + versions: + - xenial + - bionic + + galaxy_tags: + - proxysql + - mysql + +dependencies: [] diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/molecule/default/converge.yml b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/molecule/default/converge.yml new file mode 100644 index 00000000..662cb235 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/molecule/default/converge.yml @@ -0,0 +1,5 @@ +--- +- name: Converge + hosts: all + roles: + - role: proxysql diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/molecule/default/molecule.yml b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/molecule/default/molecule.yml new file mode 100644 index 00000000..c96fea7f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/molecule/default/molecule.yml @@ -0,0 +1,46 @@ +--- +dependency: + name: galaxy +driver: + name: docker +lint: | + set -e + yamllint . + ansible-lint . + flake8 +platforms: + - name: test-proxysql-01 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-ubuntu1804}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: true +provisioner: + name: ansible +scenario: + name: default + converge_sequence: + - dependency + - create + - prepare + - converge + test_sequence: + - lint + - destroy + - dependency + - syntax + - create + - prepare + - converge + - idempotence + # - side_effect + - verify + - destroy +verifier: + name: testinfra + env: + PYTHONWARNINGS: "ignore:.*U.*mode is deprecated:DeprecationWarning" + options: + # show which tests where executed in test output + v: 1 diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/molecule/default/prepare.yml b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/molecule/default/prepare.yml new file mode 100644 index 00000000..07f52352 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/molecule/default/prepare.yml @@ -0,0 +1,20 @@ +--- +- name: Prepare + hosts: all + tasks: + - name: fix trusty image + block: + + - name: remove removed repo + file: + name: '/etc/apt/sources.list.d/jonathonf-python-2_7-trusty.list' + state: absent + + - name: install python-apt + apt: + name: + - python-apt + - python-pkg-resources=3.3-1ubuntu1 + state: present + + when: ansible_lsb.major_release|int == 14 diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/molecule/default/tests/test_default.py b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/molecule/default/tests/test_default.py new file mode 100644 index 00000000..25a93c32 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/molecule/default/tests/test_default.py @@ -0,0 +1,81 @@ +import os +import pytest + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_hosts_file(host): + f = host.file('/etc/hosts') + + assert f.exists + assert f.user == 'root' + assert f.group == 'root' + + +proxysql_user_attributes = ("user_name," + "group_name") + + +@pytest.mark.parametrize(proxysql_user_attributes, [ + ("proxysql", "proxysql"), +]) +def test_proxysql_users(host, + user_name, + group_name): + u = host.user(user_name) + + assert u.exists + assert u.group == group_name + + +proxysql_file_attributes = ("proxysql_file," + "proxysql_file_user," + "proxysql_file_group," + "proxysql_file_mode") + + +@pytest.mark.parametrize(proxysql_file_attributes, [ + ("/root/.my.cnf", None, None, 0o600), + ("/etc/proxysql.cnf", "proxysql", "proxysql", 0o644), +]) +def test_proxysql_files(host, + proxysql_file, + proxysql_file_user, + proxysql_file_group, + proxysql_file_mode): + f = host.file(proxysql_file) + + assert f.exists + assert f.is_file + if proxysql_file_user: + assert f.user == proxysql_file_user + if proxysql_file_group: + assert f.group == proxysql_file_group + if proxysql_file_mode: + assert f.mode == proxysql_file_mode + + +@pytest.mark.parametrize("proxysql_package", [ + ("percona-server-client-5.7"), + ("proxysql"), +]) +def test_proxysql_packages(host, + proxysql_package): + + pkg = host.package(proxysql_package) + + assert pkg.is_installed + + +@pytest.mark.parametrize("proxysql_service", [ + ("proxysql"), +]) +def test_proxysql_services(host, + proxysql_service): + svc = host.service(proxysql_service) + + assert svc.is_enabled + assert svc.is_running diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/config.yml b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/config.yml new file mode 100644 index 00000000..db7038f9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/config.yml @@ -0,0 +1,32 @@ +--- +- name: proxysql | config | copy client my.cnf + template: + src: client.my.cnf.j2 + dest: ~/.my.cnf + mode: 0600 + +- name: proxysql | config | copy proxysql config + template: + src: proxysql.cnf.j2 + dest: /etc/proxysql.cnf + owner: "{{ proxysql_user }}" + group: "{{ proxysql_group }}" + mode: 0644 + notify: + - update proxysql config + - restart proxysql + +- name: proxysql | config | enable and start proxysql + service: + name: proxysql + state: started + enabled: true + +- name: proxysql | config | wait for proxysql + wait_for: + port: "{{ proxysql_admin_port }}" + state: started + timeout: 30 + +- name: proxysql | config | update dynamic config + meta: flush_handlers diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/install.yml b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/install.yml new file mode 100644 index 00000000..538841aa --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/install.yml @@ -0,0 +1,90 @@ +--- +- name: proxysql | install | update apt cache + apt: + cache_valid_time: 14400 + changed_when: false + ignore_errors: "{{ ansible_check_mode }}" + +- name: proxysql | install | install (trusty specific) + block: + + - name: proxysql | install | install platform specific prereqs + apt: + name: "{{ lookup('vars', 'proxysql_' + ansible_lsb.codename + '_prereqs') }}" + state: present + environment: + DEBIAN_FRONTEND: noninteractive + + rescue: + + - name: proxysql | install | handle the error if we failed in check mode, with python-apt uninstalled + assert: + that: ansible_failed_result.msg is match("python-apt must be installed to use check mode.*") + fail_msg: "unknown error during package install" + success_msg: "running in check mode without python-apt installed, ignoring error" + + when: ansible_lsb.major_release|int == 14 + + +- name: proxysql | install | install + block: + + - name: proxysql | install | install platform specific prereqs + apt: + name: "{{ proxysql_prereqs }}" + state: present + environment: + DEBIAN_FRONTEND: noninteractive + + - name: proxysql | install | install python packages + pip: + name: "{{ proxysql_python_packages }}" + executable: pip3 + + rescue: + + - name: proxysql | install | handle the error if we failed in check mode, with python-apt uninstalled + assert: + that: ansible_failed_result.msg is match("python-apt must be installed to use check mode.*") + fail_msg: "unknown error during package install" + success_msg: "running in check mode without python-apt installed, ignoring error" + + when: ansible_lsb.major_release|int > 14 + +- name: proxysql | install | install + block: + + - name: proxysql | install | add apt signing key for percona + apt_key: + keyserver: keyserver.ubuntu.com + id: 4D1BB29D63D98E422B2113B19334A25F8507EFA5 + state: present + + + - name: proxysql | install | add percona repositories + apt_repository: + repo: "{{ item }}" + state: present + loop: "{{ percona_mysql_repos }}" + + - name: proxysql | install | install packages required by proxysql + apt: + name: "{{ proxysql_additional_packages }}" + state: present + environment: + DEBIAN_FRONTEND: noninteractive + + - name: proxysql | install | install proxysql release + apt: + deb: "{{ proxysql_release }}" + state: present + notify: + - restart proxysql + + rescue: + + - name: proxysql | install | handle the error if we failed in check mode, with python-apt uninstalled + assert: + that: ansible_failed_result is search("python-apt must be installed to use check mode") + fail_msg: "unknown error during package install" + success_msg: "running in check mode without python-apt installed, ignoring error" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/main.yml new file mode 100644 index 00000000..9836f366 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- import_tasks: setvars.yml + tags: + - always + +- block: + + - import_tasks: users.yml + tags: + - users + - config + - import_tasks: install.yml + when: not proxysql_mysql_use_custom_build + tags: + - install + + become: true + become_user: root + +- import_tasks: config.yml + tags: + - config diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/setvars.yml b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/setvars.yml new file mode 100644 index 00000000..fe26b488 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/setvars.yml @@ -0,0 +1,8 @@ +--- +- name: proxysql | setvars | set users + set_fact: + admin_credentials_list: "{{ admin_credentials_list | default([]) + [ item.username + ':' + item.password ] }}" + loop: + - { username: "{{ proxysql_admin_user }}", password: "{{ proxysql_admin_password }}" } + - { username: "{{ proxysql_admin_cluster_username }}", password: "{{ proxysql_admin_cluster_password }}" } + when: item.username and item.password diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/users.yml b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/users.yml new file mode 100644 index 00000000..eb8ea675 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/tasks/users.yml @@ -0,0 +1,16 @@ +--- +- name: proxysql | users | create group for proxysql + group: + name: "{{ proxysql_group }}" + system: true + state: present + +- name: proxysql | users | create user for proxysql + user: + name: "{{ proxysql_user }}" + system: true + group: "{{ proxysql_group }}" + comment: "Proxysql Service" + home: "{{ proxysql_datadir }}" + shell: /usr/sbin/nologin + state: present diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/templates/client.my.cnf.j2 b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/templates/client.my.cnf.j2 new file mode 100644 index 00000000..12d44d53 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/templates/client.my.cnf.j2 @@ -0,0 +1,6 @@ +### {{ ansible_managed }} +[client] +user=admin +password=admin +host={{ proxysql_admin_bind_address }} +port={{ proxysql_admin_port }} diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/templates/proxysql.cnf.j2 b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/templates/proxysql.cnf.j2 new file mode 100644 index 00000000..28ce54c6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/templates/proxysql.cnf.j2 @@ -0,0 +1,21 @@ +#jinja2: lstrip_blocks: "true" +datadir="{{ proxysql_datadir }}" +restart_on_missing_heartbeats={{ proxysql_restart_missing_heartbeats }} + +admin_variables= +{ +{% for config_item in proxysql_admin_variables|dictsort %} + {% if config_item.1.variable_value is not none %} + {{ config_item.1.variable }}={{ config_item.1.variable_value | to_json }} + {% endif %} +{% endfor %} +} + +mysql_variables= +{ +{% for config_item in proxysql_mysql_variables|dictsort %} + {% if config_item.1.variable_value is not none %} + {{ config_item.1.variable }}={{ config_item.1.variable_value | to_json }} + {% endif %} +{% endfor %} +} diff --git a/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/vars/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/vars/main.yml new file mode 100644 index 00000000..ffc02dc8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/roles/proxysql/vars/main.yml @@ -0,0 +1,385 @@ +--- +### percona repo +percona_mysql_repos: + - deb http://repo.percona.com/apt {{ ansible_lsb.codename }} main + - deb-src http://repo.percona.com/apt {{ ansible_lsb.codename }} main + +### platform specific packages +proxysql_trusty_prereqs: + - libgnutls28-dev + +proxysql_prereqs: + - dirmngr + - python-setuptools + - python3-pip + - python3-virtualenv + +### proxysql required packages +proxysql_release: "{{ proxysql_download_src }}/v{{ proxysql_version }}/proxysql_{{ proxysql_version }}-{{ ansible_lsb.id | lower }}{{ ansible_lsb.major_release }}_amd64.deb" + +proxysql_additional_packages: + - percona-server-client-{{ proxysql_mysql_client_version }} + - python-mysqldb + +proxysql_python_packages: + - pymysql + +proxysql_admin_variables: + admin_credentials: + variable: "admin_credentials" + variable_value: "{{ admin_credentials_list | join(';') }}" + checksum_mysql_query_rules: + variable: "checksum_mysql_query_rules" + variable_value: "{{ proxysql_admin_checksum_mysql_query_rules | to_json }}" + checksum_mysql_servers: + variable: "checksum_mysql_servers" + variable_value: "{{ proxysql_admin_checksum_mysql_servers | to_json }}" + checksum_mysql_users: + variable: "checksum_mysql_users" + variable_value: "{{ proxysql_admin_checksum_mysql_users | to_json }}" + cluster_check_interval_ms: + variable: "cluster_check_interval_ms" + variable_value: "{{ proxysql_admin_cluster_check_interval_ms }}" + cluster_check_status_frequency: + variable: "cluster_check_status_frequency" + variable_value: "{{ proxysql_admin_cluster_check_status_frequency }}" + cluster_mysql_query_rules_diffs_before_sync: + variable: "cluster_mysql_query_rules_diffs_before_sync" + variable_value: "{{ proxysql_admin_cluster_mysql_query_rules_diffs_before_sync }}" + cluster_mysql_query_rules_save_to_disk: + variable: "cluster_mysql_query_rules_save_to_disk" + variable_value: "{{ proxysql_admin_cluster_mysql_query_rules_save_to_disk | to_json }}" + cluster_mysql_servers_diffs_before_sync: + variable: "cluster_mysql_servers_diffs_before_sync" + variable_value: "{{ proxysql_admin_cluster_mysql_servers_diffs_before_sync }}" + cluster_mysql_servers_save_to_disk: + variable: "cluster_mysql_servers_save_to_disk" + variable_value: "{{ proxysql_admin_cluster_mysql_servers_save_to_disk | to_json }}" + cluster_mysql_users_diffs_before_sync: + variable: "cluster_mysql_users_diffs_before_sync" + variable_value: "{{ proxysql_admin_cluster_mysql_users_diffs_before_sync }}" + cluster_mysql_users_save_to_disk: + variable: "cluster_mysql_users_save_to_disk" + variable_value: "{{ proxysql_admin_cluster_mysql_users_save_to_disk | to_json }}" + cluster_proxysql_servers_diffs_before_sync: + variable: "cluster_proxysql_servers_diffs_before_sync" + variable_value: "{{ proxysql_admin_cluster_proxysql_servers_diffs_before_sync }}" + cluster_proxysql_servers_save_to_disk: + variable: "cluster_proxysql_servers_save_to_disk" + variable_value: "{{ proxysql_admin_cluster_proxysql_servers_save_to_disk | to_json }}" + hash_passwords: + variable: "hash_passwords" + variable_value: "{{ proxysql_admin_hash_passwords | to_json }}" + mysql_ifaces: + variable: "mysql_ifaces" + variable_value: "{{ proxysql_admin_bind_address }}:{{ proxysql_admin_port }};{{ proxysql_admin_socket }}" + read_only: + variable: "read_only" + variable_value: "{{ proxysql_admin_read_only | to_json }}" + refresh_interval: + variable: "refresh_interval" + variable_value: "{{ proxysql_admin_refresh_interval }}" + stats_credentials: + variable: "stats_credentials" + variable_value: "{{ proxysql_admin_stats_user }}:{{ proxysql_admin_stats_password }}" + stats_mysql_connection_pool: + variable: "stats_mysql_connection_pool" + variable_value: "{{ proxysql_admin_stats_mysql_connection_pool }}" + stats_mysql_connections: + variable: "stats_mysql_connections" + variable_value: "{{ proxysql_admin_stats_mysql_connections }}" + stats_mysql_query_cache: + variable: "stats_mysql_query_cache" + variable_value: "{{ proxysql_admin_stats_mysql_query_cache }}" + stats_system_cpu: + variable: "stats_system_cpu" + variable_value: "{{ proxysql_admin_stats_system_cpu }}" + stats_system_memory: + variable: "stats_system_memory" + variable_value: "{{ proxysql_admin_stats_system_memory }}" + web_enabled: + variable: "web_enabled" + variable_value: "{{ proxysql_admin_web_enabled | to_json }}" + web_port: + variable: "web_port" + variable_value: "{{ proxysql_admin_web_port }}" + +proxysql_mysql_variables: + autocommit_false_is_transaction: + variable: "autocommit_false_is_transaction" + variable_value: "{{ proxysql_mysql_autocommit_false_is_transaction | to_json }}" + autocommit_false_not_reusable: + variable: "autocommit_false_not_reusable" + variable_value: "{{ proxysql_mysql_autocommit_false_not_reusable | to_json }}" + client_found_rows: + variable: "client_found_rows" + variable_value: "{{ proxysql_mysql_client_found_rows | to_json }}" + commands_stats: + variable: "commands_stats" + variable_value: "{{ proxysql_mysql_commands_stats | to_json }}" + connect_retries_delay: + variable: "connect_retries_delay" + variable_value: "{{ proxysql_mysql_connect_retries_delay }}" + connect_retries_on_failure: + variable: "connect_retries_on_failure" + variable_value: "{{ proxysql_mysql_connect_retries_on_failure }}" + connect_timeout_server: + variable: "connect_timeout_server" + variable_value: "{{ proxysql_mysql_connect_timeout_server }}" + connect_timeout_server_max: + variable: "connect_timeout_server_max" + variable_value: "{{ proxysql_mysql_connect_timeout_server_max }}" + connection_delay_multiplex_ms: + variable: "connection_delay_multiplex_ms" + variable_value: "{{ proxysql_mysql_connection_delay_multiplex_ms }}" + connection_max_age_ms: + variable: "connection_max_age_ms" + variable_value: "{{ proxysql_mysql_connection_max_age_ms }}" + connpoll_reset_queue_length: + variable: "connpoll_reset_queue_length" + variable_value: "{{ proxysql_mysql_connpoll_reset_queue_length }}" + default_charset: + variable: "default_charset" + variable_value: "{{ proxysql_mysql_default_charset }}" + default_max_latency_ms: + variable: "default_max_latency_ms" + variable_value: "{{ proxysql_mysql_default_max_latency_ms }}" + default_query_delay: + variable: "default_query_delay" + variable_value: "{{ proxysql_mysql_default_query_delay }}" + default_query_timeout: + variable: "default_query_timeout" + variable_value: "{{ proxysql_mysql_default_query_timeout }}" + default_schema: + variable: "default_schema" + variable_value: "{{ proxysql_mysql_default_schema }}" + default_sql_mode: + variable: "default_sql_mode" + variable_value: "{{ proxysql_mysql_default_sql_mode }}" + default_time_zone: + variable: "default_time_zone" + variable_value: "{{ proxysql_mysql_default_time_zone }}" + eventslog_filename: + variable: "eventslog_filename" + variable_value: "{{ proxysql_mysql_eventslog_filename }}" + eventslog_filesize: + variable: "eventslog_filesize" + variable_value: "{{ proxysql_mysql_eventslog_filesize }}" + enforce_autocommit_on_reads: + variable: "enforce_autocommit_on_reads" + variable_value: "{{ proxysql_mysql_enforce_autocommit_on_reads | to_json}}" + forward_autocommit: + variable: "forward_autocommit" + variable_value: "{{ proxysql_mysql_forward_autocommit | to_json}}" + free_connections_pct: + variable: "free_connections_pct" + variable_value: "{{ proxysql_mysql_free_connections_pct }}" + hostgroup_manager_verbose: + variable: "hostgroup_manager_verbose" + variable_value: "{{ proxysql_mysql_hostgroup_manager_verbose }}" + init_connect: + variable: "init_connect" + variable_value: "{{ proxysql_mysql_init_connect }}" + kill_backend_connection_when_disconnect: + variable: "kill_backend_connection_when_disconnect" + variable_value: "{{ proxysql_mysql_kill_backend_connection_when_disconnect | to_json }}" + long_query_time: + variable: "long_query_time" + variable_value: "{{ proxysql_mysql_long_query_time }}" + max_allowed_packet: + variable: "max_allowed_packet" + variable_value: "{{ proxysql_mysql_max_allowed_packet }}" + max_connections: + variable: "max_connections" + variable_value: "{{ proxysql_mysql_max_connections }}" + max_stmts_cache: + variable: "max_stmts_cache" + variable_value: "{{ proxysql_mysql_max_stmts_cache }}" + max_stmts_per_connection: + variable: "max_stmts_per_connection" + variable_value: "{{ proxysql_mysql_max_stmts_per_connection }}" + max_transaction_time: + variable: "max_transaction_time" + variable_value: "{{ proxysql_mysql_max_transaction_time }}" + mirror_max_concurrency: + variable: "mirror_max_concurrency" + variable_value: "{{ proxysql_mysql_mirror_max_concurrency }}" + mirror_max_queue_length: + variable: "mirror_max_queue_length" + variable_value: "{{ proxysql_mysql_mirror_max_queue_length }}" + monitor_connect_interval: + variable: "monitor_connect_interval" + variable_value: "{{ proxysql_mysql_monitor_connect_interval }}" + monitor_connect_timeout: + variable: "monitor_connect_timeout" + variable_value: "{{ proxysql_mysql_monitor_connect_timeout }}" + monitor_enabled: + variable: "monitor_enabled" + variable_value: "{{ proxysql_mysql_monitor_enabled | to_json }}" + monitor_groupreplication_healthcheck_interval: + variable: "monitor_groupreplication_healthcheck_interval" + variable_value: "{{ proxysql_mysql_monitor_groupreplication_healthcheck_interval }}" + monitor_groupreplication_healthcheck_timeout: + variable: "monitor_groupreplication_healthcheck_timeout" + variable_value: "{{ proxysql_mysql_monitor_groupreplication_healthcheck_timeout }}" + monitor_history: + variable: "monitor_history" + variable_value: "{{ proxysql_mysql_monitor_history }}" + monitor_password: + variable: "monitor_password" + variable_value: "{{ proxysql_mysql_monitor_password }}" + monitor_ping_interval: + variable: "monitor_ping_interval" + variable_value: "{{ proxysql_mysql_monitor_ping_interval }}" + monitor_ping_max_failures: + variable: "monitor_ping_max_failures" + variable_value: "{{ proxysql_mysql_monitor_ping_max_failures }}" + monitor_ping_timeout: + variable: "monitor_ping_timeout" + variable_value: "{{ proxysql_mysql_monitor_ping_timeout }}" + monitor_query_interval: + variable: "monitor_query_interval" + variable_value: "{{ proxysql_mysql_monitor_query_interval }}" + monitor_query_timeout: + variable: "monitor_query_timeout" + variable_value: "{{ proxysql_mysql_monitor_query_timeout }}" + monitor_read_only_interval: + variable: "monitor_read_only_interval" + variable_value: "{{ proxysql_mysql_monitor_read_only_interval }}" + monitor_read_only_max_timeout_count: + variable: "monitor_read_only_max_timeout_count" + variable_value: "{{ proxysql_mysql_monitor_read_only_max_timeout_count }}" + monitor_read_only_timeout: + variable: "monitor_read_only_timeout" + variable_value: "{{ proxysql_mysql_monitor_read_only_timeout }}" + monitor_replication_lag_interval: + variable: "monitor_replication_lag_interval" + variable_value: "{{ proxysql_mysql_monitor_replication_lag_interval }}" + monitor_replication_lag_timeout: + variable: "monitor_replication_lag_timeout" + variable_value: "{{ proxysql_mysql_monitor_replication_lag_timeout }}" + monitor_replication_lag_use_percona_heartbeat: + variable: "monitor_replication_lag_use_percona_heartbeat" + variable_value: "{{ proxysql_mysql_monitor_replication_lag_use_percona_heartbeat }}" + monitor_slave_lag_when_null: + variable: "monitor_slave_lag_when_null" + variable_value: "{{ proxysql_mysql_monitor_slave_lag_when_null }}" + monitor_username: + variable: "monitor_username" + variable_value: "{{ proxysql_mysql_monitor_username }}" + monitor_wait_timeout: + variable: "monitor_wait_timeout" + variable_value: "{{ proxysql_mysql_monitor_wait_timeout | to_json }}" + monitor_writer_is_also_reader: + variable: "monitor_writer_is_also_reader" + variable_value: "{{ proxysql_mysql_monitor_writer_is_also_reader | to_json }}" + multiplexing: + variable: "multiplexing" + variable_value: "{{ proxysql_mysql_multiplexing | to_json }}" + mysql_interfaces: + variable: "interfaces" + variable_value: "{{ proxysql_mysql_bind_address }}:{{ proxysql_mysql_port }};{{ proxysql_mysql_socket }}" + ping_interval_server_msec: + variable: "ping_interval_server_msec" + variable_value: "{{ proxysql_mysql_ping_interval_server_msec }}" + ping_timeout_server: + variable: "ping_timeout_server" + variable_value: "{{ proxysql_mysql_ping_timeout_server }}" + poll_timeout: + variable: "poll_timeout" + variable_value: "{{ proxysql_mysql_poll_timeout }}" + poll_timeout_on_failure: + variable: "poll_timeout_on_failure" + variable_value: "{{ proxysql_mysql_poll_timeout_on_failure }}" + query_cache_size_mb: + variable: "query_cache_size_MB" + variable_value: "{{ proxysql_mysql_query_cache_size_mb }}" + query_digests: + variable: "query_digests" + variable_value: "{{ proxysql_mysql_query_digests | to_json }}" + query_digests_lowercase: + variable: "query_digests_lowercase" + variable_value: "{{ proxysql_mysql_query_digests_lowercase | to_json }}" + query_digests_max_digest_length: + variable: "query_digests_max_digest_length" + variable_value: "{{ proxysql_mysql_query_digests_max_digest_length }}" + query_digests_max_query_length: + variable: "query_digests_max_query_length" + variable_value: "{{ proxysql_mysql_query_digests_max_query_length }}" + query_processor_iterations: + variable: "query_processor_iterations" + variable_value: "{{ proxysql_mysql_query_processor_iterations }}" + query_processor_regex: + variable: "query_processor_regex" + variable_value: "{{ proxysql_mysql_query_processor_regex }}" + query_retries_on_failure: + variable: "query_retries_on_failure" + variable_value: "{{ proxysql_mysql_query_retries_on_failure }}" + server_capabilities: + variable: "server_capabilities" + variable_value: "{{ proxysql_mysql_server_capabilities }}" + server_version: + variable: "server_version" + variable_value: "{{ proxysql_mysql_server_version }}" + session_idle_ms: + variable: "session_idle_ms" + variable_value: "{{ proxysql_mysql_session_idle_ms }}" + session_idle_show_processlist: + variable: "session_idle_show_processlist" + variable_value: "{{ proxysql_mysql_session_idle_show_processlist | to_json }}" + sessions_sort: + variable: "sessions_sort" + variable_value: "{{ proxysql_mysql_sessions_sort | to_json }}" + shun_on_failures: + variable: "shun_on_failures" + variable_value: "{{ proxysql_mysql_shun_on_failures }}" + shun_recovery_time_sec: + variable: "shun_recovery_time_sec" + variable_value: "{{ proxysql_mysql_shun_recovery_time_sec }}" + ssl_p2s_ca: + variable: "ssl_p2s_ca" + variable_value: "{{ proxysql_mysql_ssl_p2s_ca }}" + ssl_p2s_cert: + variable: "ssl_p2s_cert" + variable_value: "{{ proxysql_mysql_ssl_p2s_cert }}" + ssl_p2s_cipher: + variable: "ssl_p2s_cipher" + variable_value: "{{ proxysql_mysql_ssl_p2s_cipher }}" + ssl_p2s_key: + variable: "ssl_p2s_key" + variable_value: "{{ proxysql_mysql_ssl_p2s_key }}" + stats_time_backend_query: + variable: "stats_time_backend_query" + variable_value: "{{ proxysql_mysql_stats_time_backend_query | to_json }}" + stats_time_query_processor: + variable: "stats_time_query_processor" + variable_value: "{{ proxysql_mysql_stats_time_query_processor | to_json }}" + threshold_query_length: + variable: "threshold_query_length" + variable_value: "{{ proxysql_mysql_threshold_query_length }}" + threshold_resultset_size: + variable: "threshold_resultset_size" + variable_value: "{{ proxysql_mysql_threshold_resultset_size }}" + throttle_connections_per_sec_to_hostgroup: + variable: "throttle_connections_per_sec_to_hostgroup" + variable_value: "{{ proxysql_mysql_throttle_connections_per_sec_to_hostgroup }}" + throttle_max_bytes_per_second_to_client: + variable: "throttle_max_bytes_per_second_to_client" + variable_value: "{{ proxysql_mysql_throttle_max_bytes_per_second_to_client }}" + throttle_ratio_server_to_client: + variable: "throttle_ratio_server_to_client" + variable_value: "{{ proxysql_mysql_throttle_ratio_server_to_client }}" + verbose_query_error: + variable: "verbose_query_error" + variable_value: "{{ proxysql_mysql_verbose_query_error | to_json }}" + wait_timeout: + variable: "wait_timeout" + variable_value: "{{ proxysql_mysql_mysql_wait_timeout }}" + +proxysql_mysql_options: + mysql_threads: + variable: "threads" + variable_value: "{{ proxysql_mysql_threads }}" + mysql_stacksize: + variable: "stacksize" + variable_value: "{{ proxysql_mysql_stacksize }}" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/defaults/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/defaults/main.yml new file mode 100644 index 00000000..d994a842 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/defaults/main.yml @@ -0,0 +1,5 @@ +--- +proxysql_download_src: https://github.com/sysown/proxysql/releases/download +proxysql_version: 2.0.10 + +proxysql_mysql_client_version: 5.7 diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/tasks/config.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/tasks/config.yml new file mode 100644 index 00000000..a66a01ae --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/tasks/config.yml @@ -0,0 +1,12 @@ +--- +- name: proxysql | config | enable and start proxysql + service: + name: proxysql + state: started + enabled: true + +- name: proxysql | config | wait for proxysql + wait_for: + port: 6032 + state: started + timeout: 30 diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/tasks/install.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/tasks/install.yml new file mode 100644 index 00000000..5ceb67ae --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/tasks/install.yml @@ -0,0 +1,28 @@ +--- +- name: proxysql | install | add apt signing key for percona + apt_key: + keyserver: keyserver.ubuntu.com + id: 4D1BB29D63D98E422B2113B19334A25F8507EFA5 + state: present + +- name: proxysql | install | add percona repositories + apt_repository: + repo: "{{ item }}" + state: present + loop: "{{ proxysql_percona_mysql_repos }}" + +- name: proxysql | install | install proxysql release + apt: + deb: "{{ proxysql_release }}" + state: present + +- name: proxysql | install | install packages required by proxysql + apt: + name: "{{ proxysql_percona_mysql_packages }}" + state: present + environment: + DEBIAN_FRONTEND: noninteractive + +- name: proxysql | install | install python packages + pip: + name: "{{ proxysql_python_packages }}" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/tasks/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/tasks/main.yml new file mode 100644 index 00000000..66e9a653 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- import_tasks: install.yml +- import_tasks: config.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/vars/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/vars/main.yml new file mode 100644 index 00000000..443d7891 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/vars/main.yml @@ -0,0 +1,13 @@ +--- +proxysql_release: "{{ proxysql_download_src }}/v{{ proxysql_version }}/proxysql_{{ proxysql_version }}-{{ ansible_lsb.id | lower }}{{ ansible_lsb.major_release }}_amd64.deb" + +proxysql_percona_mysql_repos: + - deb http://repo.percona.com/apt {{ ansible_lsb.codename }} main + - deb-src http://repo.percona.com/apt {{ ansible_lsb.codename }} main + +proxysql_percona_mysql_packages: + - percona-server-client-{{ proxysql_mysql_client_version }} + # - python-mysqldb + +proxysql_python_packages: + - pymysql diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/defaults/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/defaults/main.yml new file mode 100644 index 00000000..f5a4be59 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/defaults/main.yml @@ -0,0 +1,8 @@ +--- +test_host: mysql01 + +test_proxysql_backend_servers_check_mode: false +test_proxysql_backend_servers_in_memory_only: false +test_proxysql_backend_servers_with_delayed_persist: false +test_proxysql_backend_servers_check_idempotence: false +test_proxysql_backend_servers_cleanup_after_test: true diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/meta/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/meta/main.yml new file mode 100644 index 00000000..2023b8da --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_proxysql diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/base_test.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/base_test.yml new file mode 100644 index 00000000..4cd1d097 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/base_test.yml @@ -0,0 +1,57 @@ +--- +### prepare +- name: "{{ role_name }} | {{ current_test }} | are we performing a delete" + set_fact: + test_delete: "{{ current_test | regex_search('^test_delete') | ternary(true, false) }}" + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start" + include_tasks: "{{ test_delete|ternary('setup_test_server', 'cleanup_test_servers') }}.yml" + when: not test_proxysql_backend_servers_check_idempotence + +### when + +- name: "{{ role_name }} | {{ current_test }} | {{ test_delete|ternary('delete','create') }} test backend mysql server" + proxysql_backend_servers: + login_user: admin + login_password: admin + hostname: "{{ test_host }}" + state: "{{ test_delete|ternary('absent', 'present') }}" + save_to_disk: "{{ not test_proxysql_backend_servers_in_memory_only }}" + load_to_runtime: "{{ not test_proxysql_backend_servers_in_memory_only }}" + check_mode: "{{ test_proxysql_backend_servers_check_mode }}" + register: status + +- name: "{{ role_name }} | {{ current_test }} | persist the changes to disk, and load to runtime" + block: + + - name: "{{ role_name }} | {{ current_test }} | save the mysql servers config from memory to disk" + proxysql_manage_config: + login_user: admin + login_password: admin + action: SAVE + config_settings: MYSQL SERVERS + direction: TO + config_layer: DISK + + - name: "{{ role_name }} | {{ current_test }} | load the mysql servers config from memory to runtime" + proxysql_manage_config: + login_user: admin + login_password: admin + action: LOAD + config_settings: MYSQL SERVERS + direction: TO + config_layer: RUNTIME + + when: test_proxysql_backend_servers_with_delayed_persist + +- name: "{{ role_name }} | {{ current_test }} | check if test backend mysql server exists in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT hostname FROM mysql_servers where hostname = '{{ test_host }}'" + register: memory_result + +- name: "{{ role_name }} | {{ current_test }} | check if test backend mysql server exists on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT hostname FROM disk.mysql_servers where hostname = '{{ test_host }}'" + register: disk_result + +- name: "{{ role_name }} | {{ current_test }} | check if test backend mysql server exists in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT hostname FROM runtime_mysql_servers where hostname = '{{ test_host }}'" + register: runtime_result diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/cleanup_test_servers.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/cleanup_test_servers.yml new file mode 100644 index 00000000..070e15bb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/cleanup_test_servers.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start/finish" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure no hosts are created" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"DELETE FROM mysql_servers" + + - name: "{{ role_name }} | {{ current_test }} | ensure no hosts are saved on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL SERVERS TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure no hosts are loaded to runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL SERVERS TO RUNTIME" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/main.yml new file mode 100644 index 00000000..bef6bbe4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/main.yml @@ -0,0 +1,83 @@ +--- +### tests + +- name: "{{ role_name }} | test_create_using_check_mode | test create backend server using check mode" + import_tasks: test_create_using_check_mode.yml + vars: + test_proxysql_backend_servers_check_mode: true + +- name: "{{ role_name }} | test_delete_using_check_mode | test delete backend server using check mode" + import_tasks: test_delete_using_check_mode.yml + vars: + test_proxysql_backend_servers_check_mode: true + +- name: "{{ role_name }} | test_create_backend_server | test create backend server" + import_tasks: test_create_backend_server.yml + vars: + test_proxysql_backend_servers_cleanup_after_test: false +- name: "{{ role_name }} | test_create_backend_server | test idempotence of create backend server" + import_tasks: test_create_backend_server.yml + vars: + test_proxysql_backend_servers_check_idempotence: true + +- name: "{{ role_name }} | test_delete_backend_server | test delete backend server" + import_tasks: test_delete_backend_server.yml + vars: + test_proxysql_backend_servers_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_backend_server | test idempotence of delete backend server" + import_tasks: test_delete_backend_server.yml + vars: + test_proxysql_backend_servers_check_idempotence: true + +- name: "{{ role_name }} | test_create_backend_server_in_memory_only | test create backend server in memory" + import_tasks: test_create_backend_server_in_memory_only.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_cleanup_after_test: false +- name: "{{ role_name }} | test_create_backend_server_in_memory_only | test idempotence of create backend server in memory" + import_tasks: test_create_backend_server_in_memory_only.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_check_idempotence: true + +- name: "{{ role_name }} | test_delete_backend_server_in_memory_only | test delete backend server in memory" + import_tasks: test_delete_backend_server_in_memory_only.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_backend_server_in_memory_only | test idempotence of delete backend server in memory" + import_tasks: test_delete_backend_server_in_memory_only.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_check_idempotence: true + +- name: "{{ role_name }} | test_create_backend_server_with_delayed_persist | test create backend server with delayed save to disk/load to runtime" + import_tasks: test_create_backend_server_with_delayed_persist.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_with_delayed_persist: true + test_proxysql_backend_servers_cleanup_after_test: false +- name: "{{ role_name }} | test_create_backend_server_with_delayed_persist | test idempotence of create backend server with delayed save to disk/load to runtime" + import_tasks: test_create_backend_server_with_delayed_persist.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_with_delayed_persist: true + test_proxysql_backend_servers_check_idempotence: true + +- name: "{{ role_name }} | test_delete_backend_server_with_delayed_persist | test delete backend server with delayed save to disk/load to runtime" + import_tasks: test_delete_backend_server_with_delayed_persist.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_with_delayed_persist: true + test_proxysql_backend_servers_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_backend_server_with_delayed_persist | test idempotence of delete backend server with delayed save to disk/load to runtime" + import_tasks: test_delete_backend_server_with_delayed_persist.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_with_delayed_persist: true + test_proxysql_backend_servers_check_idempotence: true + +### teardown + +- name: "{{ role_name }} | teardown | perform teardown" + import_tasks: teardown.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/setup_test_server.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/setup_test_server.yml new file mode 100644 index 00000000..64d9dacd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/setup_test_server.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure test backend mysql server is created when we start" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure test backend mysql server is created in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"INSERT OR REPLACE INTO mysql_servers (hostname) VALUES ('{{ test_host }}')" + + - name: "{{ role_name }} | {{ current_test }} | ensure test backend mysql server is created on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL SERVERS TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure test backend mysql server is created in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL SERVERS TO RUNTIME" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/teardown.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/teardown.yml new file mode 100644 index 00000000..0cb5ae1e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/teardown.yml @@ -0,0 +1,6 @@ +--- +- name: "{{ role_name }} | teardown | uninstall proxysql" + apt: + name: proxysql + purge: true + state: absent diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server.yml new file mode 100644 index 00000000..fc199953 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_backend_server | set current test" + set_fact: + current_test: test_create_backend_server + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create server reported a change" + assert: + that: + - "status is {{ test_proxysql_backend_servers_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create server did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create server did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create server did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_host }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml + when: test_proxysql_backend_servers_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_in_memory_only.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_in_memory_only.yml new file mode 100644 index 00000000..fe6148b7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_backend_server_in_memory_only | set current test" + set_fact: + current_test: test_create_backend_server_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create server reported a change" + assert: + that: + - "status is {{ test_proxysql_backend_servers_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create server did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create server didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create server didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml + when: test_proxysql_backend_servers_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_with_delayed_persist.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_with_delayed_persist.yml new file mode 100644 index 00000000..c4c8956f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_backend_server_with_delayed_persist | set current test" + set_fact: + current_test: test_create_backend_server_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create server reported a change" + assert: + that: + - "status is {{ test_proxysql_backend_servers_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create server did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create server did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create server did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_host }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml + when: test_proxysql_backend_servers_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_using_check_mode.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_using_check_mode.yml new file mode 100644 index 00000000..2c2dfa97 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_create_using_check_mode | set current test" + set_fact: + current_test: test_create_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create server in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm create server in check mode didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create server in check mode didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create server in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server.yml new file mode 100644 index 00000000..17305edf --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_backend_server | set current test" + set_fact: + current_test: test_delete_backend_server + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete server reported a change" + assert: + that: + - "status is {{ test_proxysql_backend_servers_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml + when: test_proxysql_backend_servers_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_in_memory_only.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_in_memory_only.yml new file mode 100644 index 00000000..b7882fb6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_backend_server_in_memory_only | set current test" + set_fact: + current_test: test_delete_backend_server_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete server reported a change" + assert: + that: + - "status is {{ test_proxysql_backend_servers_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_host }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml + when: test_proxysql_backend_servers_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_with_delayed_persist.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_with_delayed_persist.yml new file mode 100644 index 00000000..516a0a47 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_backend_server_with_delayed_persist | set current test" + set_fact: + current_test: test_delete_backend_server_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete server reported a change" + assert: + that: + - "status is {{ test_proxysql_backend_servers_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml + when: test_proxysql_backend_servers_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_using_check_mode.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_using_check_mode.yml new file mode 100644 index 00000000..17aac721 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_using_check_mode | set current test" + set_fact: + current_test: test_delete_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete server in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server in check mode didn't make a change in memory" + assert: + that: memory_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server in check mode didn't make a change on disk" + assert: + that: disk_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_host }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/defaults/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/defaults/main.yml new file mode 100644 index 00000000..d6539384 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/defaults/main.yml @@ -0,0 +1,8 @@ +--- +test_variable: mysql-max_connections + +test_proxysql_global_variables_check_mode: false +test_proxysql_global_variables_in_memory_only: false +test_proxysql_global_variables_with_delayed_persist: false +test_proxysql_global_variables_check_idempotence: false +test_proxysql_global_variables_cleanup_after_test: true diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/meta/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/meta/main.yml new file mode 100644 index 00000000..2023b8da --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_proxysql diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/base_test.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/base_test.yml new file mode 100644 index 00000000..7676ddf9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/base_test.yml @@ -0,0 +1,54 @@ +--- +### prepare + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start" + include_tasks: setup_global_variables.yml + when: not test_proxysql_global_variables_check_idempotence + +### when + +- name: "{{ role_name }} | {{ current_test }} | update global variable" + proxysql_global_variables: + login_user: admin + login_password: admin + variable: "{{ test_variable }}" + value: "{{ updated_variable_value }}" + save_to_disk: "{{ not test_proxysql_global_variables_in_memory_only }}" + load_to_runtime: "{{ not test_proxysql_global_variables_in_memory_only }}" + check_mode: "{{ test_proxysql_global_variables_check_mode }}" + register: status + +- name: "{{ role_name }} | {{ current_test }} | persist the changes to disk, and load to runtime" + block: + + - name: "{{ role_name }} | {{ current_test }} | save global variables from memory to disk" + proxysql_manage_config: + login_user: admin + login_password: admin + action: SAVE + config_settings: MYSQL VARIABLES + direction: TO + config_layer: DISK + + - name: "{{ role_name }} | {{ current_test }} | load global variables from memory to runtime" + proxysql_manage_config: + login_user: admin + login_password: admin + action: LOAD + config_settings: MYSQL VARIABLES + direction: TO + config_layer: RUNTIME + + when: test_proxysql_global_variables_with_delayed_persist + +- name: "{{ role_name }} | {{ current_test }} | check if updated global variable value exists in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT variable_value FROM global_variables where variable_name='{{ test_variable }}'" + register: memory_result + +- name: "{{ role_name }} | {{ current_test }} | check if updated global variable value exists on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT variable_value FROM disk.global_variables where variable_name='{{ test_variable }}'" + register: disk_result + +- name: "{{ role_name }} | {{ current_test }} | check if updated global variable value exists in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT variable_value FROM runtime_global_variables where variable_name='{{ test_variable }}'" + register: runtime_result diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/cleanup_global_variables.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/cleanup_global_variables.yml new file mode 100644 index 00000000..68241ac7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/cleanup_global_variables.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start/finish" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure variable value set to original value" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"UPDATE global_variables SET variable_value={{ original_variable_value }} WHERE variable_name='{{ test_variable }}'" + + - name: "{{ role_name }} | {{ current_test }} | ensure original variable value is saved on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL VARIABLES TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure original variable value is loaded to runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL VARIABLES TO RUNTIME" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/main.yml new file mode 100644 index 00000000..d598ce4c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/main.yml @@ -0,0 +1,48 @@ +--- +- name: "{{ role_name }} | setvars | populate base variables for tests" + import_tasks: setvars.yml + +### tests + +- name: "{{ role_name }} | test_update_variable_value_using_check_mode | test updating global variable using check mode" + import_tasks: test_update_variable_value_using_check_mode.yml + vars: + test_proxysql_global_variables_check_mode: true + +- name: "{{ role_name }} | test_update_variable_value | test updating global variable value" + import_tasks: test_update_variable_value.yml + vars: + test_proxysql_global_variables_cleanup_after_test: false +- name: "{{ role_name }} | test_update_variable_value | test idempotence of updating global variable value" + import_tasks: test_update_variable_value.yml + vars: + test_proxysql_global_variables_check_idempotence: true + +- name: "{{ role_name }} | test_update_variable_value_in_memory_only | test updating global variable value in memory" + import_tasks: test_update_variable_value_in_memory_only.yml + vars: + test_proxysql_global_variables_in_memory_only: true + test_proxysql_global_variables_cleanup_after_test: false +- name: "{{ role_name }} | test_update_variable_value_in_memory_only | test idempotence of updating global variable value in memory" + import_tasks: test_update_variable_value_in_memory_only.yml + vars: + test_proxysql_global_variables_in_memory_only: true + test_proxysql_global_variables_check_idempotence: true + +- name: "{{ role_name }} | test_update_variable_value_with_delayed_persist | test updating global variable value with delayed save to disk/load to runtime" + import_tasks: test_update_variable_value_with_delayed_persist.yml + vars: + test_proxysql_global_variables_in_memory_only: true + test_proxysql_global_variables_with_delayed_persist: true + test_proxysql_global_variables_cleanup_after_test: false +- name: "{{ role_name }} | test_update_variable_value_with_delayed_persist | test idempotence of updating global variable value with delayed save to disk/load to runtime" + import_tasks: test_update_variable_value_with_delayed_persist.yml + vars: + test_proxysql_global_variables_in_memory_only: true + test_proxysql_global_variables_with_delayed_persist: true + test_proxysql_global_variables_check_idempotence: true + +### teardown + +- name: "{{ role_name }} | teardown | perform teardown" + import_tasks: teardown.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/setup_global_variables.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/setup_global_variables.yml new file mode 100644 index 00000000..a5b0a8c4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/setup_global_variables.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure test variable value is updated when we start" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure test variable value is updated in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"UPDATE global_variables SET variable_value={{ original_variable_value }} WHERE variable_name='{{ test_variable }}'" + + - name: "{{ role_name }} | {{ current_test }} | ensure test variable value is updated on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL VARIABLES TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure test variable value is updated in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL VARIABLES TO RUNTIME" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/setvars.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/setvars.yml new file mode 100644 index 00000000..6499f12f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/setvars.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | setvars | get original variable value" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT variable_value FROM global_variables where variable_name='{{ test_variable }}'" + register: result + +- name: "{{ role_name }} | setvars | populate original_variable_value variable" + set_fact: + original_variable_value: "{{ result.stdout }}" + +- name: "{{ role_name }} | setvars | populate updated_variable_value variable" + set_fact: + updated_variable_value: "{{ result.stdout|int * 2 }}" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/teardown.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/teardown.yml new file mode 100644 index 00000000..0cb5ae1e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/teardown.yml @@ -0,0 +1,6 @@ +--- +- name: "{{ role_name }} | teardown | uninstall proxysql" + apt: + name: proxysql + purge: true + state: absent diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value.yml new file mode 100644 index 00000000..ff5bc819 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_update_variable_value | set current test" + set_fact: + current_test: test_update_variable_value + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if updating variable value reported a change" + assert: + that: + - "status is {{ test_proxysql_global_variables_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value did make a change in memory" + assert: + that: "memory_result.stdout == '{{ updated_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value did make a change on disk" + assert: + that: "disk_result.stdout == '{{ updated_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value did make a change to runtime" + assert: + that: "runtime_result.stdout == '{{ updated_variable_value }}'" + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_global_variables.yml + when: test_proxysql_global_variables_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_in_memory_only.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_in_memory_only.yml new file mode 100644 index 00000000..11297ec5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_update_variable_value_in_memory_only | set current test" + set_fact: + current_test: test_update_variable_value_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if updating variable value reported a change" + assert: + that: + - "status is {{ test_proxysql_global_variables_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value did make a change in memory" + assert: + that: "memory_result.stdout == '{{ updated_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value didn't make a change on disk" + assert: + that: "disk_result.stdout == '{{ original_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value didn't make a change to runtime" + assert: + that: "runtime_result.stdout == '{{ original_variable_value }}'" + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_global_variables.yml + when: test_proxysql_global_variables_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_using_check_mode.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_using_check_mode.yml new file mode 100644 index 00000000..b98c2d20 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_update_variable_value_using_check_mode | set current test" + set_fact: + current_test: test_update_variable_value_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if updating variable value reported a change in check mode" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm create updating variable value didn't make a change in memory in check mode" + assert: + that: "memory_result.stdout == '{{ original_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm create updating variable value didn't make a change on disk in check mode" + assert: + that: "disk_result.stdout == '{{ original_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm create updating variable value didn't make a change to runtime in check mode" + assert: + that: "runtime_result.stdout == '{{ original_variable_value }}'" + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_global_variables.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_with_delayed_persist.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_with_delayed_persist.yml new file mode 100644 index 00000000..d6bb4246 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_updated_variable_value_with_delayed_persist | set current test" + set_fact: + current_test: test_updated_variable_value_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if updating variable value reported a change" + assert: + that: + - "status is {{ test_proxysql_global_variables_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value did make a change in memory" + assert: + that: "memory_result.stdout == '{{ updated_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value did make a change on disk" + assert: + that: "disk_result.stdout == '{{ updated_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value did make a change to runtime" + assert: + that: "runtime_result.stdout == '{{ updated_variable_value }}'" + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_global_variables.yml + when: test_proxysql_global_variables_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/defaults/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/defaults/main.yml new file mode 100644 index 00000000..bd4f7873 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/defaults/main.yml @@ -0,0 +1,8 @@ +--- +test_user: productiondba + +test_proxysql_mysql_users_check_mode: false +test_proxysql_mysql_users_in_memory_only: false +test_proxysql_mysql_users_with_delayed_persist: false +test_proxysql_mysql_users_check_idempotence: false +test_proxysql_mysql_users_cleanup_after_test: true diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/meta/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/meta/main.yml new file mode 100644 index 00000000..2023b8da --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_proxysql diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/base_test.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/base_test.yml new file mode 100644 index 00000000..2fdd3640 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/base_test.yml @@ -0,0 +1,59 @@ +--- +### prepare +- name: "{{ role_name }} | {{ current_test }} | are we performing a delete" + set_fact: + test_delete: "{{ current_test | regex_search('^test_delete') | ternary(true, false) }}" + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start" + include_tasks: "{{ test_delete|ternary('setup_test_user', 'cleanup_test_users') }}.yml" + when: not test_proxysql_mysql_users_check_idempotence + +### when + +- name: "{{ role_name }} | {{ current_test }} | {{ test_delete|ternary('delete','create') }} test mysql user" + proxysql_mysql_users: + login_user: admin + login_password: admin + username: productiondba + password: productiondbapassword + encrypt_password: true + state: "{{ test_delete|ternary('absent', 'present') }}" + save_to_disk: "{{ not test_proxysql_mysql_users_in_memory_only }}" + load_to_runtime: "{{ not test_proxysql_mysql_users_in_memory_only }}" + check_mode: "{{ test_proxysql_mysql_users_check_mode }}" + register: status + +- name: "{{ role_name }} | {{ current_test }} | persist the changes to disk, and load to runtime" + block: + + - name: "{{ role_name }} | {{ current_test }} | save the mysql users config from memory to disk" + proxysql_manage_config: + login_user: admin + login_password: admin + action: SAVE + config_settings: MYSQL USERS + direction: TO + config_layer: DISK + + - name: "{{ role_name }} | {{ current_test }} | load the mysql users config from memory to runtime" + proxysql_manage_config: + login_user: admin + login_password: admin + action: LOAD + config_settings: MYSQL USERS + direction: TO + config_layer: RUNTIME + + when: test_proxysql_mysql_users_with_delayed_persist + +- name: "{{ role_name }} | {{ current_test }} | check if test mysql user exists in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT username FROM mysql_users where username = '{{ test_user }}'" + register: memory_result + +- name: "{{ role_name }} | {{ current_test }} | check if test mysql user exists on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT username FROM disk.mysql_users where username = '{{ test_user }}'" + register: disk_result + +- name: "{{ role_name }} | {{ current_test }} | check if test mysql user exists in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT username FROM runtime_mysql_users where username = '{{ test_user }}'" + register: runtime_result diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/cleanup_test_users.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/cleanup_test_users.yml new file mode 100644 index 00000000..865f5ef7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/cleanup_test_users.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start/finish" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure no users are created" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"DELETE FROM mysql_users" + + - name: "{{ role_name }} | {{ current_test }} | ensure no users are saved on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL USERS TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure no users are loaded to runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL USERS TO RUNTIME" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/main.yml new file mode 100644 index 00000000..d4d9320b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/main.yml @@ -0,0 +1,83 @@ +--- +### tests + +- name: "{{ role_name }} | test_create_using_check_mode | test create mysql user using check mode" + import_tasks: test_create_using_check_mode.yml + vars: + test_proxysql_mysql_users_check_mode: true + +- name: "{{ role_name }} | test_delete_using_check_mode | test delete mysql user using check mode" + import_tasks: test_delete_using_check_mode.yml + vars: + test_proxysql_mysql_users_check_mode: true + +- name: "{{ role_name }} | test_create_mysql_user | test create mysql user" + import_tasks: test_create_mysql_user.yml + vars: + test_proxysql_mysql_users_cleanup_after_test: false +- name: "{{ role_name }} | test_create_mysql_user | test idempotence of create mysql user" + import_tasks: test_create_mysql_user.yml + vars: + test_proxysql_mysql_users_check_idempotence: true + +- name: "{{ role_name }} | test_delete_mysql_user | test delete mysql user" + import_tasks: test_delete_mysql_user.yml + vars: + test_proxysql_mysql_users_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_mysql_user | test idempotence of delete mysql user" + import_tasks: test_delete_mysql_user.yml + vars: + test_proxysql_mysql_users_check_idempotence: true + +- name: "{{ role_name }} | test_create_mysql_user_in_memory_only | test create mysql user in memory" + import_tasks: test_create_mysql_user_in_memory_only.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_cleanup_after_test: false +- name: "{{ role_name }} | test_create_mysql_user_in_memory_only | test idempotence of create mysql user in memory" + import_tasks: test_create_mysql_user_in_memory_only.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_check_idempotence: true + +- name: "{{ role_name }} | test_delete_mysql_user_in_memory_only | test delete mysql user in memory" + import_tasks: test_delete_mysql_user_in_memory_only.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_mysql_user_in_memory_only | test idempotence of delete mysql user in memory" + import_tasks: test_delete_mysql_user_in_memory_only.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_check_idempotence: true + +- name: "{{ role_name }} | test_create_mysql_user_with_delayed_persist | test create mysql user with delayed save to disk/load to runtime" + import_tasks: test_create_mysql_user_with_delayed_persist.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_with_delayed_persist: true + test_proxysql_mysql_users_cleanup_after_test: false +- name: "{{ role_name }} | test_create_mysql_user_with_delayed_persist | test idempotence of create mysql user with delayed save to disk/load to runtime" + import_tasks: test_create_mysql_user_with_delayed_persist.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_with_delayed_persist: true + test_proxysql_mysql_users_check_idempotence: true + +- name: "{{ role_name }} | test_delete_mysql_user_with_delayed_persist | test delete mysql user with delayed save to disk/load to runtime" + import_tasks: test_delete_mysql_user_with_delayed_persist.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_with_delayed_persist: true + test_proxysql_mysql_users_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_mysql_user_with_delayed_persist | test idempotence of delete mysql user with delayed save to disk/load to runtime" + import_tasks: test_delete_mysql_user_with_delayed_persist.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_with_delayed_persist: true + test_proxysql_mysql_users_check_idempotence: true + +### teardown + +- name: "{{ role_name }} | teardown | perform teardown" + import_tasks: teardown.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/setup_test_user.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/setup_test_user.yml new file mode 100644 index 00000000..9ad43c98 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/setup_test_user.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure test mysql user is created when we start" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure test mysql user is created in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"INSERT OR REPLACE INTO mysql_users (username) VALUES ('{{ test_user }}')" + + - name: "{{ role_name }} | {{ current_test }} | ensure test mysql user is created on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL USERS TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure test mysql user is created in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL USERS TO RUNTIME" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/teardown.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/teardown.yml new file mode 100644 index 00000000..0cb5ae1e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/teardown.yml @@ -0,0 +1,6 @@ +--- +- name: "{{ role_name }} | teardown | uninstall proxysql" + apt: + name: proxysql + purge: true + state: absent diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user.yml new file mode 100644 index 00000000..81616b48 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_mysql_user | set current test" + set_fact: + current_test: test_create_mysql_user + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create user reported a change" + assert: + that: + - "status is {{ test_proxysql_mysql_users_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create user did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create user did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create user did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }}\n{{ test_user }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml + when: test_proxysql_mysql_users_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_in_memory_only.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_in_memory_only.yml new file mode 100644 index 00000000..6abd5553 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_mysql_user_in_memory_only | set current test" + set_fact: + current_test: test_create_mysql_user_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create user reported a change" + assert: + that: + - "status is {{ test_proxysql_mysql_users_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create user did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create user didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create user didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml + when: test_proxysql_mysql_users_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_with_delayed_persist.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_with_delayed_persist.yml new file mode 100644 index 00000000..5a202822 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_mysql_user_with_delayed_persist | set current test" + set_fact: + current_test: test_create_mysql_user_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create user reported a change" + assert: + that: + - "status is {{ test_proxysql_mysql_users_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create user did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create user did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create user did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }}\n{{ test_user }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml + when: test_proxysql_mysql_users_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_using_check_mode.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_using_check_mode.yml new file mode 100644 index 00000000..314d6307 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_create_using_check_mode | set current test" + set_fact: + current_test: test_create_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create user in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm create user in check mode didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create user in check mode didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create user in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user.yml new file mode 100644 index 00000000..48827968 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_mysql_user | set current test" + set_fact: + current_test: test_delete_mysql_user + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete user reported a change" + assert: + that: + - "status is {{ test_proxysql_mysql_users_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml + when: test_proxysql_mysql_users_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_in_memory_only.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_in_memory_only.yml new file mode 100644 index 00000000..7967bf01 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_mysql_user_in_memory_only | set current test" + set_fact: + current_test: test_delete_mysql_user_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete user reported a change" + assert: + that: + - "status is {{ test_proxysql_mysql_users_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }}\n{{ test_user }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml + when: test_proxysql_mysql_users_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_with_delayed_persist.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_with_delayed_persist.yml new file mode 100644 index 00000000..4239b6b4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_mysql_user_with_delayed_persist | set current test" + set_fact: + current_test: test_delete_mysql_user_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete user reported a change" + assert: + that: + - "status is {{ test_proxysql_mysql_users_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml + when: test_proxysql_mysql_users_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_using_check_mode.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_using_check_mode.yml new file mode 100644 index 00000000..a2001540 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_using_check_mode | set current test" + set_fact: + current_test: test_delete_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete user in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user in check mode didn't make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user in check mode didn't make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }}\n{{ test_user }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/defaults/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/defaults/main.yml new file mode 100644 index 00000000..ffcd581f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/defaults/main.yml @@ -0,0 +1,12 @@ +--- +test_user: 'guest_ro' +test_match_pattern: "^SELECT.*" +test_destination_hostgroup: 1 +test_active: 1 +test_retries: 3 + +test_proxysql_query_rules_check_mode: false +test_proxysql_query_rules_in_memory_only: false +test_proxysql_query_rules_with_delayed_persist: false +test_proxysql_query_rules_check_idempotence: false +test_proxysql_query_rules_cleanup_after_test: true diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/meta/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/meta/main.yml new file mode 100644 index 00000000..2023b8da --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_proxysql diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/base_test.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/base_test.yml new file mode 100644 index 00000000..af830bc7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/base_test.yml @@ -0,0 +1,61 @@ +--- +### prepare +- name: "{{ role_name }} | {{ current_test }} | are we performing a delete" + set_fact: + test_delete: "{{ current_test | regex_search('^test_delete') | ternary(true, false) }}" + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start" + include_tasks: "{{ test_delete|ternary('setup_test_query_rule', 'cleanup_test_query_rules') }}.yml" + when: not test_proxysql_query_rules_check_idempotence + +### when + +- name: "{{ role_name }} | {{ current_test }} | {{ test_delete|ternary('delete','create') }} test query rule" + proxysql_query_rules: + login_user: admin + login_password: admin + username: '{{ test_user }}' + match_pattern: '{{ test_match_pattern }}' + destination_hostgroup: '{{ test_destination_hostgroup }}' + active: '{{ test_active }}' + retries: '{{ test_retries }}' + state: "{{ test_delete|ternary('absent', 'present') }}" + save_to_disk: "{{ not test_proxysql_query_rules_in_memory_only }}" + load_to_runtime: "{{ not test_proxysql_query_rules_in_memory_only }}" + check_mode: "{{ test_proxysql_query_rules_check_mode }}" + register: status + +- name: "{{ role_name }} | {{ current_test }} | persist the changes to disk, and load to runtime" + block: + + - name: "{{ role_name }} | {{ current_test }} | save the query rules config from memory to disk" + proxysql_manage_config: + login_user: admin + login_password: admin + action: SAVE + config_settings: MYSQL QUERY RULES + direction: TO + config_layer: DISK + + - name: "{{ role_name }} | {{ current_test }} | load the query rules config from memory to runtime" + proxysql_manage_config: + login_user: admin + login_password: admin + action: LOAD + config_settings: MYSQL QUERY RULES + direction: TO + config_layer: RUNTIME + + when: test_proxysql_query_rules_with_delayed_persist + +- name: "{{ role_name }} | {{ current_test }} | check if test query rule exists in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT username || ',' || match_pattern || ',' || destination_hostgroup || ',' || active || ',' || retries FROM mysql_query_rules where username = '{{ test_user }}' and match_pattern = '{{ test_match_pattern }}' and destination_hostgroup and '{{ test_destination_hostgroup }}' and active = '{{ test_active }}' and retries = '{{ test_retries }}'" + register: memory_result + +- name: "{{ role_name }} | {{ current_test }} | check if test query rule exists on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT username || ',' || match_pattern || ',' || destination_hostgroup || ',' || active || ',' || retries FROM disk.mysql_query_rules where username = '{{ test_user }}' and match_pattern = '{{ test_match_pattern }}' and destination_hostgroup and '{{ test_destination_hostgroup }}' and active = '{{ test_active }}' and retries = '{{ test_retries }}'" + register: disk_result + +- name: "{{ role_name }} | {{ current_test }} | check if test query rule exists in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT username || ',' || match_pattern || ',' || destination_hostgroup || ',' || active || ',' || retries FROM runtime_mysql_query_rules where username = '{{ test_user }}' and match_pattern = '{{ test_match_pattern }}' and destination_hostgroup and '{{ test_destination_hostgroup }}' and active = '{{ test_active }}' and retries = '{{ test_retries }}'" + register: runtime_result diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/cleanup_test_query_rules.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/cleanup_test_query_rules.yml new file mode 100644 index 00000000..3f233b7f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/cleanup_test_query_rules.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start/finish" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure no query rules are created" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"DELETE FROM mysql_query_rules" + + - name: "{{ role_name }} | {{ current_test }} | ensure no query rules are saved on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL QUERY RULES TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure no query rules are loaded to runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL QUERY RULES TO RUNTIME" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/main.yml new file mode 100644 index 00000000..bd18b831 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/main.yml @@ -0,0 +1,83 @@ +--- +### tests + +- name: "{{ role_name }} | test_create_using_check_mode | test create query rule using check mode" + import_tasks: test_create_using_check_mode.yml + vars: + test_proxysql_query_rules_check_mode: true + +- name: "{{ role_name }} | test_delete_using_check_mode | test delete query rule using check mode" + import_tasks: test_delete_using_check_mode.yml + vars: + test_proxysql_query_rules_check_mode: true + +- name: "{{ role_name }} | test_create_query_rule | test create query rule" + import_tasks: test_create_query_rule.yml + vars: + test_proxysql_query_rules_cleanup_after_test: false +- name: "{{ role_name }} | test_create_query_rule | test idempotence of create query rule" + import_tasks: test_create_query_rule.yml + vars: + test_proxysql_query_rules_check_idempotence: true + +- name: "{{ role_name }} | test_delete_query_rule | test delete query rule" + import_tasks: test_delete_query_rule.yml + vars: + test_proxysql_query_rules_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_query_rule | test idempotence of delete query rule" + import_tasks: test_delete_query_rule.yml + vars: + test_proxysql_query_rules_check_idempotence: true + +- name: "{{ role_name }} | test_create_query_rule_in_memory_only | test create query rule in memory" + import_tasks: test_create_query_rule_in_memory_only.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_cleanup_after_test: false +- name: "{{ role_name }} | test_create_query_rule_in_memory_only | test idempotence of create query rule in memory" + import_tasks: test_create_query_rule_in_memory_only.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_check_idempotence: true + +- name: "{{ role_name }} | test_delete_query_rule_in_memory_only | test delete query rule in memory" + import_tasks: test_delete_query_rule_in_memory_only.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_query_rule_in_memory_only | test idempotence of delete query rule in memory" + import_tasks: test_delete_query_rule_in_memory_only.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_check_idempotence: true + +- name: "{{ role_name }} | test_create_query_rule_with_delayed_persist | test create query rule with delayed save to disk/load to runtime" + import_tasks: test_create_query_rule_with_delayed_persist.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_with_delayed_persist: true + test_proxysql_query_rules_cleanup_after_test: false +- name: "{{ role_name }} | test_create_query_rule_with_delayed_persist | test idempotence of create query rule with delayed save to disk/load to runtime" + import_tasks: test_create_query_rule_with_delayed_persist.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_with_delayed_persist: true + test_proxysql_query_rules_check_idempotence: true + +- name: "{{ role_name }} | test_delete_query_rule_with_delayed_persist | test delete query rule with delayed save to disk/load to runtime" + import_tasks: test_delete_query_rule_with_delayed_persist.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_with_delayed_persist: true + test_proxysql_query_rules_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_query_rule_with_delayed_persist | test idempotence of delete query rule with delayed save to disk/load to runtime" + import_tasks: test_delete_query_rule_with_delayed_persist.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_with_delayed_persist: true + test_proxysql_query_rules_check_idempotence: true + +### teardown + +- name: "{{ role_name }} | teardown | perform teardown" + import_tasks: teardown.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/setup_test_query_rule.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/setup_test_query_rule.yml new file mode 100644 index 00000000..0af83654 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/setup_test_query_rule.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure test query rule is created when we start" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure test query rule is created in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"INSERT OR REPLACE INTO mysql_query_rules (username, match_pattern, destination_hostgroup, active, retries) VALUES ('{{ test_user }}', '{{ test_match_pattern}}', '{{ test_destination_hostgroup }}', '{{ test_active }}', '{{ test_retries }}')" + + - name: "{{ role_name }} | {{ current_test }} | ensure test query rule is created on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL QUERY RULES TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure test query rule is created in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL QUERY RULES TO RUNTIME" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/teardown.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/teardown.yml new file mode 100644 index 00000000..0cb5ae1e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/teardown.yml @@ -0,0 +1,6 @@ +--- +- name: "{{ role_name }} | teardown | uninstall proxysql" + apt: + name: proxysql + purge: true + state: absent diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule.yml new file mode 100644 index 00000000..f6fcfe45 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_query_rule | set current test" + set_fact: + current_test: test_create_query_rule + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create query rule reported a change" + assert: + that: + - "status is {{ test_proxysql_query_rules_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml + when: test_proxysql_query_rules_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_in_memory_only.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_in_memory_only.yml new file mode 100644 index 00000000..4f0e5f64 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_query_rule_in_memory_only | set current test" + set_fact: + current_test: test_create_query_rule_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create query rule reported a change" + assert: + that: + - "status is {{ test_proxysql_query_rules_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml + when: test_proxysql_query_rules_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_with_delayed_persist.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_with_delayed_persist.yml new file mode 100644 index 00000000..2914ebce --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_query_rule_with_delayed_persist | set current test" + set_fact: + current_test: test_create_query_rule_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create query rule reported a change" + assert: + that: + - "status is {{ test_proxysql_query_rules_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml + when: test_proxysql_query_rules_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_using_check_mode.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_using_check_mode.yml new file mode 100644 index 00000000..6e217bc6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_create_using_check_mode | set current test" + set_fact: + current_test: test_create_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create query rule in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule in check mode didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule in check mode didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule.yml new file mode 100644 index 00000000..c17cb995 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_query_rule | set current test" + set_fact: + current_test: test_delete_query_rule + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete query rule reported a change" + assert: + that: + - "status is {{ test_proxysql_query_rules_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml + when: test_proxysql_query_rules_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_in_memory_only.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_in_memory_only.yml new file mode 100644 index 00000000..a2e7b6a1 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_in_memory_only.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_query_rule_in_memory_only | set current test" + set_fact: + current_test: test_delete_query_rule_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete query rule reported a change" + assert: + that: + - "status is {{ test_proxysql_query_rules_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml + when: test_proxysql_query_rules_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_with_delayed_persist.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_with_delayed_persist.yml new file mode 100644 index 00000000..5ef8c47b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_query_rule_with_delayed_persist | set current test" + set_fact: + current_test: test_delete_query_rule_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete query rule reported a change" + assert: + that: + - "status is {{ test_proxysql_query_rules_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml + when: test_proxysql_query_rules_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_using_check_mode.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_using_check_mode.yml new file mode 100644 index 00000000..4efbf440 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_using_check_mode | set current test" + set_fact: + current_test: test_delete_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete query rule in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule in check mode didn't make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule in check mode didn't make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/defaults/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/defaults/main.yml new file mode 100644 index 00000000..4e381d45 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/defaults/main.yml @@ -0,0 +1,9 @@ +--- +test_writer_hostgroup: 1 +test_reader_hostgroup: 2 + +test_proxysql_replication_hostgroups_check_mode: false +test_proxysql_replication_hostgroups_in_memory_only: false +test_proxysql_replication_hostgroups_with_delayed_persist: false +test_proxysql_replication_hostgroups_check_idempotence: false +test_proxysql_replication_hostgroups_cleanup_after_test: true diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/meta/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/meta/main.yml new file mode 100644 index 00000000..2023b8da --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_proxysql diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/base_test.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/base_test.yml new file mode 100644 index 00000000..466a7c6b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/base_test.yml @@ -0,0 +1,58 @@ +--- +### prepare +- name: "{{ role_name }} | {{ current_test }} | are we performing a delete" + set_fact: + test_delete: "{{ current_test | regex_search('^test_delete') | ternary(true, false) }}" + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start" + include_tasks: "{{ test_delete|ternary('setup_test_replication_hostgroups', 'cleanup_test_replication_hostgroups') }}.yml" + when: not test_proxysql_replication_hostgroups_check_idempotence + +### when + +- name: "{{ role_name }} | {{ current_test }} | {{ test_delete|ternary('delete','create') }} test mysql replication hostgroup" + proxysql_replication_hostgroups: + login_user: admin + login_password: admin + writer_hostgroup: '{{ test_writer_hostgroup }}' + reader_hostgroup: '{{ test_reader_hostgroup }}' + state: "{{ test_delete|ternary('absent', 'present') }}" + save_to_disk: "{{ not test_proxysql_replication_hostgroups_in_memory_only }}" + load_to_runtime: "{{ not test_proxysql_replication_hostgroups_in_memory_only }}" + check_mode: "{{ test_proxysql_replication_hostgroups_check_mode }}" + register: status + +- name: "{{ role_name }} | {{ current_test }} | persist the changes to disk, and load to runtime" + block: + + - name: "{{ role_name }} | {{ current_test }} | save the replication hostgroups config from memory to disk" + proxysql_manage_config: + login_user: admin + login_password: admin + action: SAVE + config_settings: MYSQL SERVERS + direction: TO + config_layer: DISK + + - name: "{{ role_name }} | {{ current_test }} | load the replication hostgroups config from memory to runtime" + proxysql_manage_config: + login_user: admin + login_password: admin + action: LOAD + config_settings: MYSQL SERVERS + direction: TO + config_layer: RUNTIME + + when: test_proxysql_replication_hostgroups_with_delayed_persist + +- name: "{{ role_name }} | {{ current_test }} | check if test replication hostgroups exists in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT writer_hostgroup || ',' || reader_hostgroup FROM mysql_replication_hostgroups where writer_hostgroup = '{{ test_writer_hostgroup }}' and reader_hostgroup = '{{ test_reader_hostgroup }}'" + register: memory_result + +- name: "{{ role_name }} | {{ current_test }} | check if test replication hostgroups exists on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT writer_hostgroup || ',' || reader_hostgroup FROM disk.mysql_replication_hostgroups where writer_hostgroup = '{{ test_writer_hostgroup }}' and reader_hostgroup = '{{ test_reader_hostgroup }}'" + register: disk_result + +- name: "{{ role_name }} | {{ current_test }} | check if test replication hostgroups exists in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT writer_hostgroup || ',' || reader_hostgroup FROM runtime_mysql_replication_hostgroups where writer_hostgroup = '{{ test_writer_hostgroup }}' and reader_hostgroup = '{{ test_reader_hostgroup }}'" + register: runtime_result diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/cleanup_test_replication_hostgroups.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/cleanup_test_replication_hostgroups.yml new file mode 100644 index 00000000..d32262a3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/cleanup_test_replication_hostgroups.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start/finish" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure no replication hostgroups are created" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"DELETE FROM mysql_replication_hostgroups" + + - name: "{{ role_name }} | {{ current_test }} | ensure no replication hostgroups are saved on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL SERVERS TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure no replication hostgroups are loaded to runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL SERVERS TO RUNTIME" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/main.yml new file mode 100644 index 00000000..de5f8131 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/main.yml @@ -0,0 +1,83 @@ +--- +### tests + +- name: "{{ role_name }} | test_create_using_check_mode | test create replication hostgroups using check mode" + import_tasks: test_create_using_check_mode.yml + vars: + test_proxysql_replication_hostgroups_check_mode: true + +- name: "{{ role_name }} | test_delete_using_check_mode | test delete replication hostgroups using check mode" + import_tasks: test_delete_using_check_mode.yml + vars: + test_proxysql_replication_hostgroups_check_mode: true + +- name: "{{ role_name }} | test_create_replication_hostgroups | test create replication hostgroups" + import_tasks: test_create_replication_hostgroups.yml + vars: + test_proxysql_replication_hostgroups_cleanup_after_test: false +- name: "{{ role_name }} | test_create_replication_hostgroups | test idempotence of create replication hostgroups" + import_tasks: test_create_replication_hostgroups.yml + vars: + test_proxysql_replication_hostgroups_check_idempotence: true + +- name: "{{ role_name }} | test_delete_replication_hostgroups | test delete replication hostgroups" + import_tasks: test_delete_replication_hostgroups.yml + vars: + test_proxysql_replication_hostgroups_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_replication_hostgroups | test idempotence of delete replication hostgroups" + import_tasks: test_delete_replication_hostgroups.yml + vars: + test_proxysql_replication_hostgroups_check_idempotence: true + +- name: "{{ role_name }} | test_create_replication_hostgroups_in_memory_only | test create replication hostgroups in memory" + import_tasks: test_create_replication_hostgroups_in_memory_only.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_cleanup_after_test: false +- name: "{{ role_name }} | test_create_replication_hostgroups_in_memory_only | test idempotence of create replication hostgroups in memory" + import_tasks: test_create_replication_hostgroups_in_memory_only.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_check_idempotence: true + +- name: "{{ role_name }} | test_delete_replication_hostgroups_in_memory_only | test delete replication hostgroups in memory" + import_tasks: test_delete_replication_hostgroups_in_memory_only.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_replication_hostgroups_in_memory_only | test idempotence of delete replication hostgroups in memory" + import_tasks: test_delete_replication_hostgroups_in_memory_only.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_check_idempotence: true + +- name: "{{ role_name }} | test_create_replication_hostgroups_with_delayed_persist | test create replication hostgroups with delayed save to disk/load to runtime" + import_tasks: test_create_replication_hostgroups_with_delayed_persist.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_with_delayed_persist: true + test_proxysql_replication_hostgroups_cleanup_after_test: false +- name: "{{ role_name }} | test_create_replication_hostgroups_with_delayed_persist | test idempotence of create replication hostgroups with delayed save to disk/load to runtime" + import_tasks: test_create_replication_hostgroups_with_delayed_persist.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_with_delayed_persist: true + test_proxysql_replication_hostgroups_check_idempotence: true + +- name: "{{ role_name }} | test_delete_replication_hostgroups_with_delayed_persist | test delete replication hostgroups with delayed save to disk/load to runtime" + import_tasks: test_delete_replication_hostgroups_with_delayed_persist.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_with_delayed_persist: true + test_proxysql_replication_hostgroups_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_replication_hostgroups_with_delayed_persist | test idempotence of delete replication hostgroups with delayed save to disk/load to runtime" + import_tasks: test_delete_replication_hostgroups_with_delayed_persist.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_with_delayed_persist: true + test_proxysql_replication_hostgroups_check_idempotence: true + +### teardown + +- name: "{{ role_name }} | teardown | perform teardown" + import_tasks: teardown.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/setup_test_replication_hostgroups.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/setup_test_replication_hostgroups.yml new file mode 100644 index 00000000..03f759dc --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/setup_test_replication_hostgroups.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure test replication hostgroups is created when we start" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure test replication hostgroups is created in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"INSERT OR REPLACE INTO mysql_replication_hostgroups (writer_hostgroup, reader_hostgroup) VALUES ('{{ test_writer_hostgroup }}', '{{ test_reader_hostgroup}}')" + + - name: "{{ role_name }} | {{ current_test }} | ensure test replication hostgroups is created on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL SERVERS TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure test replication hostgroups is created in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL SERVERS TO RUNTIME" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/teardown.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/teardown.yml new file mode 100644 index 00000000..0cb5ae1e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/teardown.yml @@ -0,0 +1,6 @@ +--- +- name: "{{ role_name }} | teardown | uninstall proxysql" + apt: + name: proxysql + purge: true + state: absent diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups.yml new file mode 100644 index 00000000..43bfb9d7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_replication_hostgroups | set current test" + set_fact: + current_test: test_create_replication_hostgroups + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create replication hostgroups reported a change" + assert: + that: + - "status is {{ test_proxysql_replication_hostgroups_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml + when: test_proxysql_replication_hostgroups_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_in_memory_only.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_in_memory_only.yml new file mode 100644 index 00000000..66d26c32 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_replication_hostgroups_in_memory_only | set current test" + set_fact: + current_test: test_create_replication_hostgroups_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create replication hostgroups reported a change" + assert: + that: + - "status is {{ test_proxysql_replication_hostgroups_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml + when: test_proxysql_replication_hostgroups_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_with_delayed_persist.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_with_delayed_persist.yml new file mode 100644 index 00000000..cd649a51 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_replication_hostgroups_with_delayed_persist | set current test" + set_fact: + current_test: test_create_replication_hostgroups_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create replication hostgroups reported a change" + assert: + that: + - "status is {{ test_proxysql_replication_hostgroups_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml + when: test_proxysql_replication_hostgroups_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_using_check_mode.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_using_check_mode.yml new file mode 100644 index 00000000..b9a2fb2f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_create_using_check_mode | set current test" + set_fact: + current_test: test_create_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create replication hostgroups in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups in check mode didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups in check mode didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups.yml new file mode 100644 index 00000000..ad3a189f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_replication_hostgroups | set current test" + set_fact: + current_test: test_delete_replication_hostgroups + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete replication hostgroups reported a change" + assert: + that: + - "status is {{ test_proxysql_replication_hostgroups_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml + when: test_proxysql_replication_hostgroups_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_in_memory_only.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_in_memory_only.yml new file mode 100644 index 00000000..edd089ac --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_in_memory_only.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_replication_hostgroups_in_memory_only | set current test" + set_fact: + current_test: test_delete_replication_hostgroups_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete replication hostgroups reported a change" + assert: + that: + - "status is {{ test_proxysql_replication_hostgroups_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml + when: test_proxysql_replication_hostgroups_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_with_delayed_persist.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_with_delayed_persist.yml new file mode 100644 index 00000000..03a19889 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_replication_hostgroups_with_delayed_persist | set current test" + set_fact: + current_test: test_delete_replication_hostgroups_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete replication hostgroups reported a change" + assert: + that: + - "status is {{ test_proxysql_replication_hostgroups_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml + when: test_proxysql_replication_hostgroups_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_using_check_mode.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_using_check_mode.yml new file mode 100644 index 00000000..1fb58792 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_using_check_mode | set current test" + set_fact: + current_test: test_delete_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete replication hostgroups in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups in check mode didn't make a change in memory" + assert: + that: memory_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups in check mode didn't make a change on disk" + assert: + that: disk_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/defaults/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/defaults/main.yml new file mode 100644 index 00000000..d7c00743 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/defaults/main.yml @@ -0,0 +1,9 @@ +--- +test_interval_ms: 1000 +test_filename: '/opt/maintenance.py' + +test_proxysql_scheduler_check_mode: false +test_proxysql_scheduler_in_memory_only: false +test_proxysql_scheduler_with_delayed_persist: false +test_proxysql_scheduler_check_idempotence: false +test_proxysql_scheduler_cleanup_after_test: true diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/meta/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/meta/main.yml new file mode 100644 index 00000000..2023b8da --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_proxysql diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/base_test.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/base_test.yml new file mode 100644 index 00000000..e1abf853 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/base_test.yml @@ -0,0 +1,58 @@ +--- +### prepare +- name: "{{ role_name }} | {{ current_test }} | are we performing a delete" + set_fact: + test_delete: "{{ current_test | regex_search('^test_delete') | ternary(true, false) }}" + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start" + include_tasks: "{{ test_delete|ternary('setup_test_scheduler', 'cleanup_test_schedulers') }}.yml" + when: not test_proxysql_scheduler_check_idempotence + +### when + +- name: "{{ role_name }} | {{ current_test }} | {{ test_delete|ternary('delete','create') }} test scheduler" + proxysql_scheduler: + login_user: admin + login_password: admin + interval_ms: '{{ test_interval_ms }}' + filename: '{{ test_filename }}' + state: "{{ test_delete|ternary('absent', 'present') }}" + save_to_disk: "{{ not test_proxysql_scheduler_in_memory_only }}" + load_to_runtime: "{{ not test_proxysql_scheduler_in_memory_only }}" + check_mode: "{{ test_proxysql_scheduler_check_mode }}" + register: status + +- name: "{{ role_name }} | {{ current_test }} | persist the changes to disk, and load to runtime" + block: + + - name: "{{ role_name }} | {{ current_test }} | save the scheduler config from memory to disk" + proxysql_manage_config: + login_user: admin + login_password: admin + action: SAVE + config_settings: SCHEDULER + direction: TO + config_layer: DISK + + - name: "{{ role_name }} | {{ current_test }} | load the scheduler config from memory to runtime" + proxysql_manage_config: + login_user: admin + login_password: admin + action: LOAD + config_settings: SCHEDULER + direction: TO + config_layer: RUNTIME + + when: test_proxysql_scheduler_with_delayed_persist + +- name: "{{ role_name }} | {{ current_test }} | check if test scheduler exists in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT interval_ms || ',' || filename FROM scheduler where interval_ms = '{{ test_interval_ms }}' and filename = '{{ test_filename }}'" + register: memory_result + +- name: "{{ role_name }} | {{ current_test }} | check if test scheduler exists on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT interval_ms || ',' || filename FROM disk.scheduler where interval_ms = '{{ test_interval_ms }}' and filename = '{{ test_filename }}'" + register: disk_result + +- name: "{{ role_name }} | {{ current_test }} | check if test scheduler exists in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT interval_ms || ',' || filename FROM runtime_scheduler where interval_ms = '{{ test_interval_ms }}' and filename = '{{ test_filename }}'" + register: runtime_result diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/cleanup_test_schedulers.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/cleanup_test_schedulers.yml new file mode 100644 index 00000000..396ab836 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/cleanup_test_schedulers.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start/finish" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure no schedulers are created" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"DELETE FROM scheduler" + + - name: "{{ role_name }} | {{ current_test }} | ensure no schedulers are saved on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE SCHEDULER TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure no schedulers are loaded to runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD SCHEDULER TO RUNTIME" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/main.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/main.yml new file mode 100644 index 00000000..071a4d05 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/main.yml @@ -0,0 +1,83 @@ +--- +### tests + +- name: "{{ role_name }} | test_create_using_check_mode | test create scheduler using check mode" + import_tasks: test_create_using_check_mode.yml + vars: + test_proxysql_scheduler_check_mode: true + +- name: "{{ role_name }} | test_delete_using_check_mode | test delete scheduler using check mode" + import_tasks: test_delete_using_check_mode.yml + vars: + test_proxysql_scheduler_check_mode: true + +- name: "{{ role_name }} | test_create_scheduler | test create scheduler" + import_tasks: test_create_scheduler.yml + vars: + test_proxysql_scheduler_cleanup_after_test: false +- name: "{{ role_name }} | test_create_scheduler | test idempotence of create scheduler" + import_tasks: test_create_scheduler.yml + vars: + test_proxysql_scheduler_check_idempotence: true + +- name: "{{ role_name }} | test_delete_scheduler | test delete scheduler" + import_tasks: test_delete_scheduler.yml + vars: + test_proxysql_scheduler_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_scheduler | test idempotence of delete scheduler" + import_tasks: test_delete_scheduler.yml + vars: + test_proxysql_scheduler_check_idempotence: true + +- name: "{{ role_name }} | test_create_scheduler_in_memory_only | test create scheduler in memory" + import_tasks: test_create_scheduler_in_memory_only.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_cleanup_after_test: false +- name: "{{ role_name }} | test_create_scheduler_in_memory_only | test idempotence of create scheduler in memory" + import_tasks: test_create_scheduler_in_memory_only.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_check_idempotence: true + +- name: "{{ role_name }} | test_delete_scheduler_in_memory_only | test delete scheduler in memory" + import_tasks: test_delete_scheduler_in_memory_only.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_scheduler_in_memory_only | test idempotence of delete scheduler in memory" + import_tasks: test_delete_scheduler_in_memory_only.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_check_idempotence: true + +- name: "{{ role_name }} | test_create_scheduler_with_delayed_persist | test create scheduler with delayed save to disk/load to runtime" + import_tasks: test_create_scheduler_with_delayed_persist.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_with_delayed_persist: true + test_proxysql_scheduler_cleanup_after_test: false +- name: "{{ role_name }} | test_create_scheduler_with_delayed_persist | test idempotence of create scheduler with delayed save to disk/load to runtime" + import_tasks: test_create_scheduler_with_delayed_persist.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_with_delayed_persist: true + test_proxysql_scheduler_check_idempotence: true + +- name: "{{ role_name }} | test_delete_scheduler_with_delayed_persist | test delete scheduler with delayed save to disk/load to runtime" + import_tasks: test_delete_scheduler_with_delayed_persist.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_with_delayed_persist: true + test_proxysql_scheduler_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_scheduler_with_delayed_persist | test idempotence of delete scheduler with delayed save to disk/load to runtime" + import_tasks: test_delete_scheduler_with_delayed_persist.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_with_delayed_persist: true + test_proxysql_scheduler_check_idempotence: true + +### teardown + +- name: "{{ role_name }} | teardown | perform teardown" + import_tasks: teardown.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/setup_test_scheduler.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/setup_test_scheduler.yml new file mode 100644 index 00000000..da12b255 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/setup_test_scheduler.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure test scheduler is created when we start" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure test scheduler is created in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"INSERT OR REPLACE INTO scheduler (interval_ms, filename) VALUES ('{{ test_interval_ms }}', '{{ test_filename}}')" + + - name: "{{ role_name }} | {{ current_test }} | ensure test scheduler is created on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE SCHEDULER TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure test scheduler is created in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD SCHEDULER TO RUNTIME" diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/teardown.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/teardown.yml new file mode 100644 index 00000000..0cb5ae1e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/teardown.yml @@ -0,0 +1,6 @@ +--- +- name: "{{ role_name }} | teardown | uninstall proxysql" + apt: + name: proxysql + purge: true + state: absent diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler.yml new file mode 100644 index 00000000..e290d266 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_scheduler | set current test" + set_fact: + current_test: test_create_scheduler + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create scheduler reported a change" + assert: + that: + - "status is {{ test_proxysql_scheduler_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml + when: test_proxysql_scheduler_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_in_memory_only.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_in_memory_only.yml new file mode 100644 index 00000000..d03cb77f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_scheduler_in_memory_only | set current test" + set_fact: + current_test: test_create_scheduler_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create scheduler reported a change" + assert: + that: + - "status is {{ test_proxysql_scheduler_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml + when: test_proxysql_scheduler_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_with_delayed_persist.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_with_delayed_persist.yml new file mode 100644 index 00000000..ba7eec57 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_scheduler_with_delayed_persist | set current test" + set_fact: + current_test: test_create_scheduler_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create scheduler reported a change" + assert: + that: + - "status is {{ test_proxysql_scheduler_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml + when: test_proxysql_scheduler_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_using_check_mode.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_using_check_mode.yml new file mode 100644 index 00000000..bbb5de4d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_create_using_check_mode | set current test" + set_fact: + current_test: test_create_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create scheduler in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler in check mode didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler in check mode didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler.yml new file mode 100644 index 00000000..b136fec0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_scheduler | set current test" + set_fact: + current_test: test_delete_scheduler + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete scheduler reported a change" + assert: + that: + - "status is {{ test_proxysql_scheduler_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml + when: test_proxysql_scheduler_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_in_memory_only.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_in_memory_only.yml new file mode 100644 index 00000000..50c58e1a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_in_memory_only.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_scheduler_in_memory_only | set current test" + set_fact: + current_test: test_delete_scheduler_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete scheduler reported a change" + assert: + that: + - "status is {{ test_proxysql_scheduler_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml + when: test_proxysql_scheduler_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_with_delayed_persist.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_with_delayed_persist.yml new file mode 100644 index 00000000..bdea20cd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_scheduler_with_delayed_persist | set current test" + set_fact: + current_test: test_delete_scheduler_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete scheduler reported a change" + assert: + that: + - "status is {{ test_proxysql_scheduler_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml + when: test_proxysql_scheduler_cleanup_after_test diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_using_check_mode.yml b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_using_check_mode.yml new file mode 100644 index 00000000..96e5133f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_using_check_mode | set current test" + set_fact: + current_test: test_delete_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete scheduler in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler in check mode didn't make a change in memory" + assert: + that: memory_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler in check mode didn't make a change on disk" + assert: + that: disk_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/sanity/ignore-2.10.txt b/collections-debian-merged/ansible_collections/community/proxysql/tests/sanity/ignore-2.10.txt new file mode 100644 index 00000000..4b958cf0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/sanity/ignore-2.10.txt @@ -0,0 +1,2 @@ +roles/proxysql/molecule/default/tests/test_default.py future-import-boilerplate +roles/proxysql/molecule/default/tests/test_default.py metaclass-boilerplate diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/sanity/ignore-2.11.txt b/collections-debian-merged/ansible_collections/community/proxysql/tests/sanity/ignore-2.11.txt new file mode 100644 index 00000000..4b958cf0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/sanity/ignore-2.11.txt @@ -0,0 +1,2 @@ +roles/proxysql/molecule/default/tests/test_default.py future-import-boilerplate +roles/proxysql/molecule/default/tests/test_default.py metaclass-boilerplate diff --git a/collections-debian-merged/ansible_collections/community/proxysql/tests/sanity/ignore-2.9.txt b/collections-debian-merged/ansible_collections/community/proxysql/tests/sanity/ignore-2.9.txt new file mode 100644 index 00000000..4b958cf0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/proxysql/tests/sanity/ignore-2.9.txt @@ -0,0 +1,2 @@ +roles/proxysql/molecule/default/tests/test_default.py future-import-boilerplate +roles/proxysql/molecule/default/tests/test_default.py metaclass-boilerplate |