diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-18 05:52:22 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-18 05:52:22 +0000 |
commit | 38b7c80217c4e72b1d8988eb1e60bb6e77334114 (patch) | |
tree | 356e9fd3762877d07cde52d21e77070aeff7e789 /ansible_collections/community/postgresql | |
parent | Adding upstream version 7.7.0+dfsg. (diff) | |
download | ansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.tar.xz ansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.zip |
Adding upstream version 9.4.0+dfsg.upstream/9.4.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
123 files changed, 6698 insertions, 3147 deletions
diff --git a/ansible_collections/community/postgresql/.azure-pipelines/azure-pipelines.yml b/ansible_collections/community/postgresql/.azure-pipelines/azure-pipelines.yml index 4a34c9edf..28f8b2497 100644 --- a/ansible_collections/community/postgresql/.azure-pipelines/azure-pipelines.yml +++ b/ansible_collections/community/postgresql/.azure-pipelines/azure-pipelines.yml @@ -36,11 +36,30 @@ variables: resources: containers: - container: default - image: quay.io/ansible/azure-pipelines-test-container:3.0.0 + image: quay.io/ansible/azure-pipelines-test-container:4.0.1 pool: Standard stages: + - stage: Python_qualtiy_tools + displayName: Python quality + dependsOn: [] + jobs: + - job: 'Test' + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: Get Python for Python tools. + inputs: + versionSpec: '3.11' + addToPath: false + name: pyTools + - script: $(pyTools.pythonLocation)/bin/pip install --upgrade tox + displayName: Upgrade/Install tox. + - script: $(pyTools.pythonLocation)/bin/tox -e lint + displayName: Run tox -e lint + ## Sanity & units - stage: Ansible_devel displayName: Sanity & Units devel @@ -56,69 +75,74 @@ stages: - name: Units test: 'devel/units/1' - - stage: Ansible_2_15 - displayName: Sanity & Units 2.15 + - stage: Ansible_2_16 + displayName: Sanity & Units 2.16 dependsOn: [] jobs: - template: templates/matrix.yml parameters: targets: - name: Sanity - test: '2.15/sanity/1' + test: '2.16/sanity/1' - name: Units - test: '2.15/units/1' + test: '2.16/units/1' - - stage: Ansible_2_14 - displayName: Sanity & Units 2.14 + - stage: Ansible_2_15 + displayName: Sanity & Units 2.15 dependsOn: [] jobs: - template: templates/matrix.yml parameters: targets: - name: Sanity - test: '2.14/sanity/1' + test: '2.15/sanity/1' - name: Units - test: '2.14/units/1' + test: '2.15/units/1' - - stage: Ansible_2_13 - displayName: Sanity & Units 2.13 + - stage: Ansible_2_14 + displayName: Sanity & Units 2.14 dependsOn: [] jobs: - template: templates/matrix.yml parameters: targets: - name: Sanity - test: '2.13/sanity/1' + test: '2.14/sanity/1' - name: Units - test: '2.13/units/1' + test: '2.14/units/1' - - stage: Ansible_2_12 - displayName: Sanity & Units 2.12 +## Docker + - stage: Docker_devel + displayName: Docker devel dependsOn: [] jobs: - template: templates/matrix.yml parameters: + testFormat: devel/linux/{0}/1 targets: - - name: Sanity - test: '2.12/sanity/1' - - name: Units - test: '2.12/units/1' + - name: Fedora 39 + test: fedora39 + - name: Ubuntu 20.04 + test: ubuntu2004 + - name: Ubuntu 22.04 + test: ubuntu2204 -## Docker - - stage: Docker_devel - displayName: Docker devel + - stage: Docker_2_16 + displayName: Docker 2.16 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: devel/linux/{0}/1 + testFormat: 2.16/linux/{0}/1 targets: - name: CentOS 7 test: centos7 - - name: Fedora 37 - test: fedora37 + - name: Fedora 38 + test: fedora38 - name: Ubuntu 20.04 test: ubuntu2004 + - name: Ubuntu 22.04 + test: ubuntu2204 - stage: Docker_2_15 displayName: Docker 2.15 @@ -134,6 +158,8 @@ stages: test: fedora37 - name: Ubuntu 20.04 test: ubuntu2004 + - name: Ubuntu 22.04 + test: ubuntu2204 - stage: Docker_2_14 displayName: Docker 2.14 @@ -145,52 +171,31 @@ stages: targets: - name: CentOS 7 test: centos7 - - name: Fedora 36 - test: fedora36 - name: Ubuntu 20.04 test: ubuntu2004 - - stage: Docker_2_13 - displayName: Docker 2.13 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.13/linux/{0}/1 - targets: - - name: CentOS 7 - test: centos7 - - name: Fedora 35 - test: fedora35 - - name: Ubuntu 20.04 - test: ubuntu2004 - - - stage: Docker_2_12 - displayName: Docker 2.12 +## Remote + - stage: Remote_devel + displayName: Remote devel dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.12/linux/{0}/1 + testFormat: devel/{0}/1 targets: - - name: CentOS 7 - test: centos7 - - name: Fedora 34 - test: fedora34 - - name: Ubuntu 20.04 - test: ubuntu2004 + - name: RHEL 9.3 + test: rhel/9.3 -## Remote - - stage: Remote_devel - displayName: Remote devel + - stage: Remote_2_16 + displayName: Remote 2.16 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: devel/{0}/1 + testFormat: 2.16/{0}/1 targets: - - name: RHEL 8.7 - test: rhel/8.7 + - name: RHEL 8.8 + test: rhel/8.8 - stage: Remote_2_15 displayName: Remote 2.15 @@ -214,47 +219,22 @@ stages: - name: RHEL 8.6 test: rhel/8.6 - - stage: Remote_2_13 - displayName: Remote 2.13 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.13/{0}/1 - targets: - - name: RHEL 8.5 - test: rhel/8.5 - - - stage: Remote_2_12 - displayName: Remote 2.12 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.12/{0}/1 - targets: - - name: RHEL 8.4 - test: rhel/8.4 - ## Finally - stage: Summary condition: succeededOrFailed() dependsOn: - Ansible_devel + - Ansible_2_16 - Ansible_2_15 - Ansible_2_14 - - Ansible_2_13 - - Ansible_2_12 - Docker_devel + - Docker_2_16 - Docker_2_15 - Docker_2_14 - - Docker_2_13 - - Docker_2_12 - Remote_devel + - Remote_2_16 - Remote_2_15 - Remote_2_14 - - Remote_2_13 - - Remote_2_12 jobs: - template: templates/coverage.yml diff --git a/ansible_collections/community/postgresql/.azure-pipelines/scripts/combine-coverage.py b/ansible_collections/community/postgresql/.azure-pipelines/scripts/combine-coverage.py index 506ade646..3c3a7ecea 100755 --- a/ansible_collections/community/postgresql/.azure-pipelines/scripts/combine-coverage.py +++ b/ansible_collections/community/postgresql/.azure-pipelines/scripts/combine-coverage.py @@ -7,7 +7,8 @@ Keep in mind that Azure Pipelines does not enforce unique job display names (onl It is up to pipeline authors to avoid name collisions when deviating from the recommended format. """ -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import os diff --git a/ansible_collections/community/postgresql/.azure-pipelines/scripts/time-command.py b/ansible_collections/community/postgresql/.azure-pipelines/scripts/time-command.py index 5e8eb8d4c..400528798 100755 --- a/ansible_collections/community/postgresql/.azure-pipelines/scripts/time-command.py +++ b/ansible_collections/community/postgresql/.azure-pipelines/scripts/time-command.py @@ -1,7 +1,8 @@ #!/usr/bin/env python """Prepends a relative timestamp to each input line from stdin and writes it to stdout.""" -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import sys diff --git a/ansible_collections/community/postgresql/.codespell-exclude-words b/ansible_collections/community/postgresql/.codespell-exclude-words new file mode 100644 index 000000000..85dc2b77c --- /dev/null +++ b/ansible_collections/community/postgresql/.codespell-exclude-words @@ -0,0 +1,3 @@ +astroid +hart +brin diff --git a/ansible_collections/community/postgresql/.codespellrc b/ansible_collections/community/postgresql/.codespellrc new file mode 100644 index 000000000..0c61c0b6f --- /dev/null +++ b/ansible_collections/community/postgresql/.codespellrc @@ -0,0 +1,2 @@ +[codespell] +ignore-words = .codespell-exclude-words diff --git a/ansible_collections/community/postgresql/.flake8 b/ansible_collections/community/postgresql/.flake8 new file mode 100644 index 000000000..33b26e792 --- /dev/null +++ b/ansible_collections/community/postgresql/.flake8 @@ -0,0 +1,7 @@ +[flake8] +ignore = + E203, + E402, + E501, + W503, +max-line-length = 120 diff --git a/ansible_collections/community/postgresql/.pre-commit-config.yaml b/ansible_collections/community/postgresql/.pre-commit-config.yaml new file mode 100644 index 000000000..13ad43477 --- /dev/null +++ b/ansible_collections/community/postgresql/.pre-commit-config.yaml @@ -0,0 +1,36 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +# +# Should more or less follow lint and typing settings as found is tox.ini +# +# Once pre-commit package is installed in your environnement, install hooks +# with `pre-commit install` +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-added-large-files +- repo: local + hooks: + - id: autoflake + name: autoflake + entry: autoflake --check-diff + language: system + types: [python] + - id: flake8 + name: flake8 + entry: flake8 . + language: system + types: [python] + - id: isort + name: isort + entry: isort --check --diff . + language: system + types: [python] + - id: codespell + name: codespell + entry: codespell + language: system + types: [file] diff --git a/ansible_collections/community/postgresql/CHANGELOG.rst b/ansible_collections/community/postgresql/CHANGELOG.rst index 5730f6108..d9990ab6a 100644 --- a/ansible_collections/community/postgresql/CHANGELOG.rst +++ b/ansible_collections/community/postgresql/CHANGELOG.rst @@ -5,6 +5,136 @@ Community PostgreSQL Collection Release Notes .. contents:: Topics +v3.4.0 +====== + +Release Summary +--------------- + +This is a minor release of the ``community.postgresql`` collection. +This changelog contains all changes to the modules and plugins in this collection +that have been made after the previous release. + +Minor Changes +------------- + +- postgresql_db - add the ``icu_locale`` argument (https://github.com/ansible-collections/community.postgresql/issues/666). +- postgresql_db - add the ``locale_provider`` argument (https://github.com/ansible-collections/community.postgresql/issues/666). + +Bugfixes +-------- + +- postgresql_privs - fix a failure when altering privileges with ``grant_option: true`` (https://github.com/ansible-collections/community.postgresql/issues/668). + +v3.3.0 +====== + +Release Summary +--------------- + +This is the minor release of the ``community.postgresql`` collection. +This changelog contains all changes to the modules and plugins in this collection +that have been made after the previous release. + +Minor Changes +------------- + +- postgresql_db - add the ``comment`` argument (https://github.com/ansible-collections/community.postgresql/issues/614). +- postgresql_ext - add the ``comment`` argument (https://github.com/ansible-collections/community.postgresql/issues/354). +- postgresql_publication - add the ``comment`` argument (https://github.com/ansible-collections/community.postgresql/issues/354). +- postgresql_schema - add the ``comment`` argument (https://github.com/ansible-collections/community.postgresql/issues/354). +- postgresql_subscription - add the ``comment`` argument (https://github.com/ansible-collections/community.postgresql/issues/354). +- postgresql_tablespace - add the ``comment`` argument (https://github.com/ansible-collections/community.postgresql/issues/354). + +Bugfixes +-------- + +- postgresql_query - now reports not changed for queries starting with "SHOW" (https://github.com/ansible-collections/community.postgresql/pull/592). +- postgresql_user - module failed when running against an SQL_ASCII encoded database as the user's current password was returned as bytes as opposed to a str. Fix now checks for this case and decodes the bytes as an ascii encoded string. (https://github.com/ansible-collections/community.postgresql/issues/584). + +v3.2.0 +====== + +Release Summary +--------------- + +This is the minor release of the ``community.postgresql`` collection. +This changelog contains all changes to the modules and plugins in this collection +that have been made after the previous release. + +Minor Changes +------------- + +- postgres modules - added support for Psycopg 3 library (https://github.com/ansible-collections/community.postgresql/pull/517). +- postgresql_owner - added support at new object types (https://github.com/ansible-collections/community.postgresql/pull/555). + +Bugfixes +-------- + +- postgresql_info - fix SQL syntax issue (https://github.com/ansible-collections/community.postgresql/issues/570). + +v3.1.0 +====== + +Release Summary +--------------- + +This is the minor release of the ``community.postgresql`` collection. +This changelog contains all changes to the modules and plugins in this collection +that have been made after the previous release. + +Major Changes +------------- + +- postgres modules - the minimum version of psycopg2 library the collection supports is 2.5.1 (https://github.com/ansible-collections/community.postgresql/pull/556). + +Minor Changes +------------- + +- Collection core functions - use ``get_server_version`` in all modules (https://github.com/ansible-collections/community.postgresql/pull/518)." +- Collection core functions - use common cursor arguments in all modules (https://github.com/ansible-collections/community.postgresql/pull/522)." +- postgresql_ext - added idempotence always both in standard and in check mode (https://github.com/ansible-collections/community.postgresql/pull/545). +- postgresql_ext - added idempotence when version=latest (https://github.com/ansible-collections/community.postgresql/pull/504). +- postgresql_ext - added prev_version and version return values (https://github.com/ansible-collections/community.postgresql/pull/545). +- postgresql_ext - added queries in module output also in check mode (https://github.com/ansible-collections/community.postgresql/pull/545). +- postgresql_ext - improved error messages (https://github.com/ansible-collections/community.postgresql/pull/545). +- postgresql_privs - added idempotence when roles=PUBLIC (https://github.com/ansible-collections/community.postgresql/pull/502). +- postgresql_privs - added parameters privileges support for PostgreSQL 15 or higher (https://github.com/ansible-collections/community.postgresql/issues/481). +- postgresql_privs - added support for implicit roles CURRENT_ROLE, CURRENT_USER, and SESSION_USER (https://github.com/ansible-collections/community.postgresql/pull/502). +- postgresql_tablespace - added idempotence when dropping a non-existing tablespace (https://github.com/ansible-collections/community.postgresql/pull/554). + +Deprecated Features +------------------- + +- postgresql_lang - the module has been deprecated and will be removed in ``community.postgresql 4.0.0``. Please use the ``postgresql_ext`` module instead (https://github.com/ansible-collections/community.postgresql/issues/559). + +Bugfixes +-------- + +- postgresql_ext - fixed queries return value name in documentation (https://github.com/ansible-collections/community.postgresql/pull/545). +- postgresql_privs - fixed error message and documentation (https://github.com/ansible-collections/community.postgresql/pull/510). +- postgresql_set - fixed GUC_LIST_QUOTE parameters (https://github.com/ansible-collections/community.postgresql/pull/521). +- postgresql_set - fixed error message in param_set function (https://github.com/ansible-collections/community.postgresql/pull/505). + +v3.0.0 +====== + +Release Summary +--------------- + +This is a major release of the ``community.postgresql`` collection. +This changelog contains all changes to the modules in this collection that +have been added after the release of ``community.postgresql`` 2.4.2. + +Major Changes +------------- + +- postgresql_pg_hba - remove the deprecated ``order`` argument. The sortorder ``sdu`` is hardcoded (https://github.com/ansible-collections/community.postgresql/pull/496). +- postgresql_privs - remove the deprecated ``usage_on_types`` argument. Use the ``type`` option of the ``type`` argument to explicitly manipulate privileges on PG types (https://github.com/ansible-collections/community.postgresql/issues/208). +- postgresql_query - remove the deprecated ``path_to_script`` and ``as_single_query`` arguments. Use the ``postgresql_script`` module to run queries from scripts (https://github.com/ansible-collections/community.postgresql/issues/189). +- postgresql_user - move the deprecated ``privs`` argument removal to community.postgresql 4.0.0 (https://github.com/ansible-collections/community.postgresql/issues/493). +- postgresql_user - remove the deprecated ``groups`` argument. Use the ``postgresql_membership`` module instead (https://github.com/ansible-collections/community.postgresql/issues/300). + v2.4.2 ====== @@ -64,7 +194,7 @@ Minor Changes Bugfixes -------- -- postgresql_info - add support for non numeric extenstion version (https://github.com/ansible-collections/community.postgresql/issues/428). +- postgresql_info - add support for non numeric extension version (https://github.com/ansible-collections/community.postgresql/issues/428). - postgresql_info - when getting information about subscriptions, check the list of available columns in the pg_subscription table (https://github.com/ansible-collections/community.postgresql/issues/429). - postgresql_privs - fix connect_params being ignored (https://github.com/ansible-collections/community.postgresql/issues/450). - postgresql_query - could crash under certain conditions because of a missing import to `psycopg2.extras` (https://github.com/ansible-collections/community.postgresql/issues/283). diff --git a/ansible_collections/community/postgresql/CONTRIBUTING.md b/ansible_collections/community/postgresql/CONTRIBUTING.md index 70cd5557e..ecb18f74a 100644 --- a/ansible_collections/community/postgresql/CONTRIBUTING.md +++ b/ansible_collections/community/postgresql/CONTRIBUTING.md @@ -3,3 +3,47 @@ Refer to the [Ansible Contributing guidelines](https://docs.ansible.com/ansible/devel/community/index.html) to learn how to contribute to this collection. Refer to the [review checklist](https://docs.ansible.com/ansible/devel/community/collection_contributors/collection_reviewing.html) when triaging issues or reviewing PRs. + +## Checking your code locally + +### By hand + +You can run flake8 with tox to verify the quality of your code. For that you +can simply call tox with that command: +``` console +$ tox -e lint +``` + +If you tox is missing on your environment you can probably install it through +your package manager (Eg: `sudo apt install tox`) or with pip (within a +virtualenv): + +``` console +$ python3 -m venv .venv +$ source .venv +$ pip install tox +``` + +### Automatically for each commit + +This repo contains some pre-commit configuration to automatically check your +code foreach commit. To use that configuration you should "install" it by +running: + +``` console +$ pre-commit install +``` + +Then autoflake, flake8, isort and codespell must run when you add some commits. +You can also force them to run with this command: + +``` console +$ pre-commit run --all-file +``` + +If pre-commit is missing on your system, you can install it (on Debian based +system) with `apt`: + +``` console +$ sudo apt install pre-commit +``` diff --git a/ansible_collections/community/postgresql/CONTRIBUTORS b/ansible_collections/community/postgresql/CONTRIBUTORS deleted file mode 100644 index 419cd9915..000000000 --- a/ansible_collections/community/postgresql/CONTRIBUTORS +++ /dev/null @@ -1,230 +0,0 @@ -4n70w4 -abadger -abguy -abompard -acasademont -AceSlash -acozine -aioue -Akasurde -alanfairless -aleksandr-vin -Alexhha -AlexTaran -amarao -amenonsen -aminvakil -amossc -anasbouzid -Andersson007 -andreaso -andreyfedoseev -andytom -anis016 -ansibot -antoinell -arbazkhan002 -arkag -artursvonda -AsgerPetersen -asifiqbal -atombrella -b6d -balonik -bcoca -bearrito -benformosa -betanummeric -billietl -binf -blackstar257 -bladypirat -blindrood -Boosai -braderhart -brophyja -btoussaint -cans -caseyandgina -chamini2 -Changaco -char543 -cjewo -cocoy -codrinh -CoffeDriven -Cohedrin -coopengo-glecomte -csamarajeewa -cThrice -czenderink -dagwieers -dan-mcdonald -darklajid -davetapley -DEvil0000 -d-fence -dgalpaj -Dorn- -drob -drrtuy -drybjed -dschep -dukex -ECRR -elventear -Ernest0x -EvanDotPro -F1rst-Unicorn -Fale -faruqisan -feikesteenbergen -felixfontein -fessmage -fix -frittentheke -gearoidibm -geekq -ghost -Glandos -gordonbondon -gotmax23 -grasum -gsauthof -gsauthor -gundalow -Habbie -herrewig -hezbucho -hunleyd -IgorOhrimenko -ilicmilan -indreek -inertialbit -iragsdale -Iridescens -jacekjaros -jamescassell -jamesRUS52 -jborean93 -jbscalia -jchancojr -jd-boyd -jegj -jensdepuydt -jerri -Jhiliano -jinnko -jkman340 -jmcginn13 -jmighion -jnv -joaocc -jochu -johnjelinek -joshmoore -jzielke84 -k3rni -keitalbame -keithf4 -klando -kostiantyn-nemchenko -kustodian -landryb -le9i0nx -legrostdg -leroyguillaume -lichensky -loop-evgeny -lorin -LostInTheWoods -MaayanMordehai -maletin -marcflausino -marcosdiez -markwort -matburt -matonb -mator -mattclay -mattupstate -maxamillion -mguillaume -michael-dev2rights -MichaelDBA -mjrepo2 -mkrizek -mnietz -mohangk -monkz -mribeiro -mspanc -mullaiarasu -nbw74 -nergdron -nerzhul -nh2 -nodiscc -nskalis -ojc97 -pbaisla -perezjasonr -PeteDevoy -phemmer -pierot -Piknik1990 -pilou- -placaze -pmauduit -raneq -raymondroelands -replaced -rgl -rightaway -rmfitzpatrick -rosowiecki -rouge8 -rtsisyk -russoz -sahapasci -saito-hideki -samccann -samdoran -SantiRaposo -saxus -sbulage -ScottSturdivant -seanknox -sebasmannem -set-db-id -sfilipov -SHUFIL -silvio -skilyazhnev -snopoke -strk -tartansandal -Tas-sos -tcraxs -tedder -tiggi -till -tinproject -TJEvans -tom-clx -tomscytale -Trikke76 -truki -tYYGH -Vanav -veger -vfoucault -vmalloc -vosmax -willthames -wrosario -wvidana -yteraoka -zikalino -zswanson -zyitingftnt diff --git a/ansible_collections/community/postgresql/FILES.json b/ansible_collections/community/postgresql/FILES.json index 59d622bc0..4302f209c 100644 --- a/ansible_collections/community/postgresql/FILES.json +++ b/ansible_collections/community/postgresql/FILES.json @@ -32,7 +32,7 @@ "name": ".azure-pipelines/scripts/combine-coverage.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e34d4e863a65b9f53c4ca8ae37655858969898a949e050e9cb3cb0d5f02342d0", + "chksum_sha256": "3311b626f96165acfea0b76c4f6488867f7c9c4e67163f09b8618d4faa914cb0", "format": 1 }, { @@ -67,7 +67,7 @@ "name": ".azure-pipelines/scripts/time-command.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0232f415efeb583ddff907c058986963b775441eaf129d7162aee0acb0d36834", + "chksum_sha256": "fcf41d12a2ed5304e9946f9cb7a0350db169f66a60ebd8e4063708e512dd334a", "format": 1 }, { @@ -109,7 +109,7 @@ "name": ".azure-pipelines/azure-pipelines.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "41d298e610516fda313e5bfa8de7bbd1be63ccb302c42df4f21a1dc025a0d0ac", + "chksum_sha256": "d51571bbcb9d9cd269b46558fbd86071be6a450a189a5383a38b2da0b14b1c86", "format": 1 }, { @@ -158,7 +158,7 @@ "name": "changelogs/changelog.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c3e11aa29315386df71622ac86144d7ede576f54dadc87a179d96916fb844f32", + "chksum_sha256": "b9669874dadda6fc914c52186ac175b590b7c2c4481da2896dbfaad9d36ea263", "format": 1 }, { @@ -200,7 +200,7 @@ "name": "meta/runtime.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b42a308d6db3b0062a9cb8490d522b2369a1b2f502103f2a1188ecad45078f44", + "chksum_sha256": "c64894306db0b7108ddb9ee565813b867547f728cfb8b00807e2e14dbda5c7d4", "format": 1 }, { @@ -221,7 +221,7 @@ "name": "plugins/doc_fragments/postgres.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a5e6d7613c0b367a3cb1f0094a0ae8ecbb72f7dbbd96dadc5bf59c7e385f2fc9", + "chksum_sha256": "c7a33a7d1660cd9b6a60cc5adac9a53e515dcf2cd7851938e4da7625b29c692b", "format": 1 }, { @@ -235,35 +235,35 @@ "name": "plugins/module_utils/_version.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ae6c984a7e9dd51753ea7fcb5995d5655016dd5dc187cd9be7216ef7045f220b", + "chksum_sha256": "7a281b1b906a3421e2b101932abb684c6d4725d28873ea6cc38b6f030abb3e4b", "format": 1 }, { "name": "plugins/module_utils/database.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ae0bbf1af2bec24b4393e731ad6688e452c9ddaef4bf37925f24d935aa3ce3a7", + "chksum_sha256": "6633839a483b33df8118d8f316a21d182e83d65a92bc019682c10d5737fc250b", "format": 1 }, { "name": "plugins/module_utils/postgres.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bdb9a1aa846495b8dfe3dc8a908ad8208463dab66dd10351f0c3b76114ff18af", + "chksum_sha256": "6c0c09c587577a08bc190c2f3cbdebabfffa0f2678ba6fb1535124748f75cad4", "format": 1 }, { "name": "plugins/module_utils/saslprep.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c55980dbc037b35033ead955787ca7660e52d2502b09701394940d7c27e63590", + "chksum_sha256": "d999997e9b4319da69df919726afbc57215457ed9faff174a0571d681cfd6760", "format": 1 }, { "name": "plugins/module_utils/version.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "475a5d990c7314a0ddd22f024b7eaefd2a0f04cbf5dc1543b79b7a3fc7920f4f", + "chksum_sha256": "f76414cd8893472f3349c885a184ed066e806b65a856080bd7e8e78d320d28e0", "format": 1 }, { @@ -277,161 +277,161 @@ "name": "plugins/modules/postgresql_copy.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a18b00eb5e4c3b6602379c605f8529a434451bcaf7aa238bddafeacd0463c362", + "chksum_sha256": "5706a8c4b183ceb4c55f492d2421344b31e8ea9354700a440d1d14373285e10d", "format": 1 }, { "name": "plugins/modules/postgresql_db.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8c409cdb6d6dacceda3848e6172d607fcaa7f04395fdc40f9198df9ed5be2f30", + "chksum_sha256": "ee2ae220616261dee6e44204168ec6e67e5cdb89d5fb8cb8a79a3d9969800219", "format": 1 }, { "name": "plugins/modules/postgresql_ext.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3e22f3438fd9f169b8762a8cba2113a824cdd4239bcf9bf6a82fcf0a73cb568c", + "chksum_sha256": "4e715015d05e5273e6a5c78720e837a31d0b65d8c059242391602d7258401248", "format": 1 }, { "name": "plugins/modules/postgresql_idx.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7527a54f1aff01168c0687fca7554d4840a24b431b62c75adf21625ee76d307c", + "chksum_sha256": "3113e2c7531aaf65c8c5996cdd0906d4a3a4ad603a7eb90ec6c2a61011ef8dbb", "format": 1 }, { "name": "plugins/modules/postgresql_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e9220a9def51d2549f791b20925ade84bac4b1f6b11d8e9ea3301abbeabd45e6", + "chksum_sha256": "3709f46d48e00f548fbb352ef30a7cc6530ebff7fd67a08723c1e70018a5059c", "format": 1 }, { "name": "plugins/modules/postgresql_lang.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bc59aa40e8ce754b1c36793037f1db5f40bdcd240a1a79db03d17f2a532c53f7", + "chksum_sha256": "e82581b2d818da4f6dc92816f20d465617553157829e5fddc4ededc03d0a8013", "format": 1 }, { "name": "plugins/modules/postgresql_membership.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b3aba16398d37253ebe13585766b4296a63597fb4d5f6f6c4107f305e848d778", + "chksum_sha256": "7e0320411240c7b39540357ad4c5a0bb7a336a4aefddeca6b97abf4e5873cd4b", "format": 1 }, { "name": "plugins/modules/postgresql_owner.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "de4c4d00203e534c8153a9b89100c52e4fffe53716534bcc6381370960df6bea", + "chksum_sha256": "4c10ea66f5e06030b891349042607ceaea7ca961ef8af5c607600372483b9a67", "format": 1 }, { "name": "plugins/modules/postgresql_pg_hba.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6eb9946b5dbf34c28c4df8a7560e360a7d0deb52e8219d334bbaee20b5958f60", + "chksum_sha256": "e2cc5d06bd1e7eb4662cb5183b0e5ce7af9ce5ce5b6861fa877e7fdcda358406", "format": 1 }, { "name": "plugins/modules/postgresql_ping.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5214f3a6fade4670db83abfca1d53cc2e096f8f1c6f9f130c2cead028f526f7d", + "chksum_sha256": "c640dc9d74ec6cc08742905b5fa60b6c401e09f3f137976b3281ed6fbed7bab0", "format": 1 }, { "name": "plugins/modules/postgresql_privs.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "09a8b6d073b82e735cfb171bc94259cfffc83be36fdf9d21c9bed8d061d87625", + "chksum_sha256": "84b979c32e98cbe03a96ff07b009e5c53a24910289498dd2ba15e850417bbe66", "format": 1 }, { "name": "plugins/modules/postgresql_publication.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4c99476190a90b080425dbc4d51aacca6a9596ff83eb106cef3b65ebbbaa027f", + "chksum_sha256": "ebd97ee3fb70fcf4b0847fb4b27b7b8e8777b001e9a35356553c214d170ea281", "format": 1 }, { "name": "plugins/modules/postgresql_query.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0f1b18a99606eb8f1a6b429902a285b469cc587aba98054b8802e4fd80bbd519", + "chksum_sha256": "96fecb7c3958176f98c7197b6978d0c15ef2201e87b8fe88cbcb156b947a8a56", "format": 1 }, { "name": "plugins/modules/postgresql_schema.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8087b11aac6f47620807bd31d7b88fa6e1370dcb177beb31d14f1d1b9b239c34", + "chksum_sha256": "c14a105c2f0524d668e3592332ad1f90ff70653d354cc47cf486b9d447e2fdef", "format": 1 }, { "name": "plugins/modules/postgresql_script.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6b7c75cfc2254d91fdf1949ef37548fd3b053cf431ea80d26f557d11b36331f9", + "chksum_sha256": "c1f5fe2a12e04c6d60575247b0829a0ab2a91c4bc548f3da08b815a15b1795fb", "format": 1 }, { "name": "plugins/modules/postgresql_sequence.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a37fa35888dab62628d7c95bd0a6eb2f8fa0eeb8e45df63dfd1a757d58016929", + "chksum_sha256": "7c7b935501f035fdc762201ded31e1dc1a83cb2fce067a4826d8994d721454cd", "format": 1 }, { "name": "plugins/modules/postgresql_set.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "52d343f5f3d581072060b4ed6a76f685f75d551f0dc71e5ca06b3b66b5b586fe", + "chksum_sha256": "5619f70e1d2d7fd7cb54d6c67e1b77c98c4e1715dcb22083ea7596eef43fa2ee", "format": 1 }, { "name": "plugins/modules/postgresql_slot.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "aefaa8ec911503aced0a01eed6d0af599a86107e679e427ed0a92a64746275f7", + "chksum_sha256": "49fde6fafd0e87b849aacede12188b7cc9b18897fc050b4923e275b24b756990", "format": 1 }, { "name": "plugins/modules/postgresql_subscription.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f27e097da06f819773eb75fa2ce05ad1300882ceb082914354082a2480aa9554", + "chksum_sha256": "175e8d28444494201bbf0587ff03f66dad92edeb1a13570e46a86830d947251e", "format": 1 }, { "name": "plugins/modules/postgresql_table.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cddbf54618e293cfed69350a05ca311f38a644d45e8e0f5be83a2ceffe1a8a72", + "chksum_sha256": "b17fcb050967d6548695bbfc9c93f3eba9e1624bee59484a67372d3002c21691", "format": 1 }, { "name": "plugins/modules/postgresql_tablespace.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "526804aa5f0d4a2aa07853ae4cfc6b728e547b71598f996da94b68e7e9dcfcb4", + "chksum_sha256": "73ea596342bae900f214ff9ce2ae338dc31b6d8049288ef9c0f10eef51e4e108", "format": 1 }, { "name": "plugins/modules/postgresql_user.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f50548605c180c66f01364dc59dd6ea3f7fbb01028b8a4bb7193b6a9c78f1582", + "chksum_sha256": "dfd8848754d58778de8d6e26631fb46b2945f1e899840e96b935f5636467bfd5", "format": 1 }, { "name": "plugins/modules/postgresql_user_obj_stat_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1cefc5b44bbd893bad22aa2952699a94aded1e3aba28092a4bad8a1949252c37", + "chksum_sha256": "e0aa5b156bb66d91e18fd38e547cadee0b3ca131f318663b491070f4d3463d43", "format": 1 }, { @@ -522,7 +522,7 @@ "name": "tests/integration/targets/postgresql_db/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "11d903913550d93aaffcda9458d70349ce6e703cf071e922c8124dc24b7f9cdb", + "chksum_sha256": "8e0c549afe5fdabc6ecdb462d1dd9383856a9de08c7b1befcee28bb1cfa34c4d", "format": 1 }, { @@ -550,7 +550,7 @@ "name": "tests/integration/targets/postgresql_db/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7d950960129fed71c1e93223b6236cd693c66d8bccba1154f62201f1db287c17", + "chksum_sha256": "610d83c94a518765ad79b957b76e20a751952ed15d02c2b8ce5c7910fe8f38dc", "format": 1 }, { @@ -561,6 +561,13 @@ "format": 1 }, { + "name": "tests/integration/targets/postgresql_db/tasks/postgresql_db_comment.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8620099ddd001e78c893dee552f013e34c1b424dc4fa70e7393f271159106c74", + "format": 1 + }, + { "name": "tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml", "ftype": "file", "chksum_type": "sha256", @@ -571,7 +578,7 @@ "name": "tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6b8632b1b5caaadee859948072f7083858b39c5399278b7df135daa2bc3b13bd", + "chksum_sha256": "c4916aa2f1a918146e862c556a40f9a28fd0eedb50ddc0cd273f1f9a2ed62a23", "format": 1 }, { @@ -655,7 +662,7 @@ "name": "tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "15f44b7169d69b892328252ba599949389ca63c427cd4302c6ef399bb4036b98", + "chksum_sha256": "7aca5747d3f4dfa0ba65307fad479db92fb6f3fe16d01fa81d6f4d6d1daaf147", "format": 1 }, { @@ -669,7 +676,7 @@ "name": "tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cfd80e8736e13290c4dc2c4e192feb9709044d3d4a627c487e242a751c046143", + "chksum_sha256": "dc0b23755c5eb52354cabdbdf7bb9a5a1b8d786154a7b129ab5d224502b100e8", "format": 1 }, { @@ -746,7 +753,7 @@ "name": "tests/integration/targets/postgresql_info/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a3506fe6d008386fb0503c072d97973117bb79ad293ad91e000a9c3ce4c3ba7d", + "chksum_sha256": "adcabc9849ea03d5b053158644cfb68d528eff27d6088ca5caa16928d06fc6f5", "format": 1 }, { @@ -760,7 +767,7 @@ "name": "tests/integration/targets/postgresql_info/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ab541e45bbbeb211496e76434bd09715e9a541449c267f449d4625b044465286", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", "format": 1 }, { @@ -774,14 +781,14 @@ "name": "tests/integration/targets/postgresql_info/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "41aee9caccefdd5eec4b8fd1eeaea84532cd4402095e8742d0e866cc8139b5b4", + "chksum_sha256": "f9daa952ebb493e71dfb75b448b3bb1552c66674aa3fd1ef0d9b73020280f05a", "format": 1 }, { "name": "tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fbd44018a0af971de6081938ab2d4da7c321776747fd32370bc649077bdde035", + "chksum_sha256": "35ebf3faf8705bdb5ab925214f91f84d3efbbafd8165302c5ef7826f29214943", "format": 1 }, { @@ -799,90 +806,6 @@ "format": 1 }, { - "name": "tests/integration/targets/postgresql_lang", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/postgresql_lang/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/postgresql_lang/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", - "format": 1 - }, - { - "name": "tests/integration/targets/postgresql_lang/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/postgresql_lang/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b75ee9a6bf02c5ff2f9e629a35e588e5297d1bca6463f5fc69a06aa27735d96f", - "format": 1 - }, - { - "name": "tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "948ea39535bf70b471e77c1cbcd13c6a4b7d5e6e4bfa6e2437622c3ba8e16f29", - "format": 1 - }, - { - "name": "tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e4584a76cf4cf30ac1358827b4cbe45dfcb5b72bcb135e2ee54ea51bd171ad06", - "format": 1 - }, - { - "name": "tests/integration/targets/postgresql_lang/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/postgresql_lang/vars/CentOS-7.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a2395b707eb60229acffb97605104820a7f00a23d83d63d90e353929e97fb4e9", - "format": 1 - }, - { - "name": "tests/integration/targets/postgresql_lang/vars/CentOS-8.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a5b45ee4b79b491c7c057d1c4c940df1ef7fa8e7fa6e1d006cbb1f839eeca40d", - "format": 1 - }, - { - "name": "tests/integration/targets/postgresql_lang/vars/default.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "format": 1 - }, - { - "name": "tests/integration/targets/postgresql_lang/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", - "format": 1 - }, - { "name": "tests/integration/targets/postgresql_membership", "ftype": "dir", "chksum_type": null, @@ -998,7 +921,7 @@ "name": "tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dec6d60389b250e99e4e921f63194be772a2f7ca18db43cb560fb1af40336a9f", + "chksum_sha256": "c2976b218eb2aaf5a038ead9936c63528e4b9f7f4de5ffdb86e0be278f8f49f9", "format": 1 }, { @@ -1068,7 +991,7 @@ "name": "tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e101fbb7da23884928d57b0d1db0f05e3161eea29e04aa93a972703fac28ddf9", + "chksum_sha256": "ddaf5fcd9ad9f687e31a6924101ce51ac0692bfe4930f4b6bf6f82968e797c3f", "format": 1 }, { @@ -1086,17 +1009,17 @@ "format": 1 }, { - "name": "tests/integration/targets/postgresql_ping/defaults", + "name": "tests/integration/targets/postgresql_ping/handlers", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/postgresql_ping/defaults/main.yml", + "name": "tests/integration/targets/postgresql_ping/handlers/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ee4c4c179b3e7fdde23d8cf3a4855188e229f2a4ba2828bb3dd8b0a6a1365aea", + "chksum_sha256": "f0e424b6d9aa41327dac421608071f2cdba5f812140930ee5493a73763f6ece4", "format": 1 }, { @@ -1124,14 +1047,28 @@ "name": "tests/integration/targets/postgresql_ping/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "115c090f1029bac19af8970417d5e50b212a935c55a2d23059175fdb308f3b92", + "chksum_sha256": "4bea40334e7a95f3a11239803cbf27ec53a1f8b9543df1e6f34d1b4cd649f34a", "format": 1 }, { "name": "tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f2b1244d57e5b287a6ae73864218c3ed352db0d947ad6bfd4a68a5691a557ebc", + "chksum_sha256": "36c47bfcf55153dc6dea950e51c6f8595038475b01dc02d983a1cd23cca8c5f7", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ping/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ping/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3bb217ec9eb9db6a12457c99a06f9f9d510526cd167ce8d884267792b0669f51", "format": 1 }, { @@ -1201,14 +1138,14 @@ "name": "tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "96211bd4e4c063d0ff264f11759bf60bf6a7d788eac99829a6c36788e683e06c", + "chksum_sha256": "e6d065e8cb0fbfc8b06b4ef8c4bcb3dd7b36154d406f1c6e36bb006105c83c1b", "format": 1 }, { "name": "tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5f1f4ba86290047039bb1562f7bd2171508f5103c06513132c22fdaa833461e0", + "chksum_sha256": "981192ad174194d54af959c4cc6354688c4ecff0497198588d2dd50253e8350d", "format": 1 }, { @@ -1222,7 +1159,7 @@ "name": "tests/integration/targets/postgresql_privs/tasks/test_target_role.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c9e749fe3d9b2172ca69efe4bf6072d0fcaebd5b8d291684471af479e0502a5d", + "chksum_sha256": "26f34a0ecf7dfc9b33a51c430e57c51341f02ff28326a8a475f2d686816ef92b", "format": 1 }, { @@ -1271,7 +1208,7 @@ "name": "tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "22d46a9a422663a28385e66709700895dceb8c31f6ff5b621c1d055f54e83703", + "chksum_sha256": "661eef67dc43cfa3df87e5dbcc8703f7e62e1c62d0fb1ae23681cefeb68db262", "format": 1 }, { @@ -1289,27 +1226,6 @@ "format": 1 }, { - "name": "tests/integration/targets/postgresql_query/files", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/postgresql_query/files/test0.sql", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b2dc19f190b86228709bced10068509a40c6141f8347a3c0cce52b3f787e1876", - "format": 1 - }, - { - "name": "tests/integration/targets/postgresql_query/files/test1.sql", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8de29e382b1af32c9352400d107e16d232405cc50c126ae7f99a5a0879f34320", - "format": 1 - }, - { "name": "tests/integration/targets/postgresql_query/meta", "ftype": "dir", "chksum_type": null, @@ -1341,7 +1257,7 @@ "name": "tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f63e7d48deecded04b2273fddd1c4199c315ab88b00447c94ea7064c0369cd5c", + "chksum_sha256": "e15f63b055d4271cca4f76f743fdd0c566c12bb46c67d955288dd15855c8943a", "format": 1 }, { @@ -1404,7 +1320,7 @@ "name": "tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5be03157af9f62f7caa17e5ad3dc289cf7b59289fa700cc0d9b930d3cbbdc1d5", + "chksum_sha256": "ddbc191e9aec6315913bf97268f5ab2c8d338e07333c960a60ac01a8b04d0d10", "format": 1 }, { @@ -1684,7 +1600,7 @@ "name": "tests/integration/targets/postgresql_set/tasks/options_coverage.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f312eb39567039be39982c9ce88aef490b1935622bcb05b056d3ba49b6f4f5e1", + "chksum_sha256": "ec75b6318b9d58b65d206c7b49dc6faa4752db7ced30463ba7514d179d374a61", "format": 1 }, { @@ -1768,7 +1684,7 @@ "name": "tests/integration/targets/postgresql_subscription/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8104db292a951ca57fe3a0321e894ff18aad3e88d58981e3db7936f5f1d1bb34", + "chksum_sha256": "7549ee4e7072899f84ca40f779b02d321974d884938cfceb7243a9c85a37155a", "format": 1 }, { @@ -1782,7 +1698,7 @@ "name": "tests/integration/targets/postgresql_subscription/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ab541e45bbbeb211496e76434bd09715e9a541449c267f449d4625b044465286", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", "format": 1 }, { @@ -1796,14 +1712,14 @@ "name": "tests/integration/targets/postgresql_subscription/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "eca673fddaf151a108873b2567fc9e40fb19ec24f6559355a602e983fdcf5495", + "chksum_sha256": "94b3acc771c7e46f783969e10f054d1b229c4897ec8e5e4bef200a5865e12d16", "format": 1 }, { "name": "tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6dbb3ba2ca853ffd06bd6883fa5d23e0517237d5a27edc38bc8300fb6b33dc2f", + "chksum_sha256": "79b70cc9f28f38637875a1c06f229d713582169a6a420c027a322b0c92d1f75b", "format": 1 }, { @@ -1922,7 +1838,7 @@ "name": "tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "aff3b89f4673ee60e03ac6e27421682dc328a04fcc5fabc1f477c13a27d89db5", + "chksum_sha256": "a07c48bb2567e354c894246d0bfcdea46695ce4f8ca275a4da3313bf578f9ef0", "format": 1 }, { @@ -1950,7 +1866,7 @@ "name": "tests/integration/targets/postgresql_user/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b14eed97d15235e2056971743de23709e18c00b91c358fe1e79002c98ece9d60", + "chksum_sha256": "d78ac5931ff0383bb350aa6a2c530e0fb853eb699ac2a800ab8979bbd069a5c4", "format": 1 }, { @@ -1978,14 +1894,14 @@ "name": "tests/integration/targets/postgresql_user/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0472c601119258ae89952a06b702c47ec3832b55fccc952ec8f1548a006d0f37", + "chksum_sha256": "6bb8f3286ac2112268a767d5f6709e291d3e238f0c820dacdfe50596119e1097", "format": 1 }, { "name": "tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7481301d5bc8dd240f78002e63d47b3e79ed995a6632918ae15d4b31b84650e6", + "chksum_sha256": "64a3301468c4970452a5c81838f6e818c62bc2fa563e920dc459ba943eae9eea", "format": 1 }, { @@ -1996,6 +1912,13 @@ "format": 1 }, { + "name": "tests/integration/targets/postgresql_user/tasks/postgresql_user_sql_ascii_db.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a05a112efc3c9e46f56a530684bb3660be389f05a916a171074d7265e26b7bc", + "format": 1 + }, + { "name": "tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml", "ftype": "file", "chksum_type": "sha256", @@ -2069,7 +1992,7 @@ "name": "tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dacef0aa7902a944e61a6bfb5bd1d4f13a065e63a18d271bfd1099e78df2637f", + "chksum_sha256": "cb7a981fa53ff3a756d9fa21e5770b9c2781f7cd85f6298761da61e4e368f25e", "format": 1 }, { @@ -2118,7 +2041,7 @@ "name": "tests/integration/targets/setup_postgresql_db/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "254d5cd2cd0525f306778e08c6d9b445e21558fc4f7ecfb20fc86df61969e8da", + "chksum_sha256": "38ef024a32b46ef9d0c1d85fed01ae7bec95a70079179eab12d8ca25d3844575", "format": 1 }, { @@ -2251,147 +2174,98 @@ "name": "tests/integration/targets/setup_postgresql_db/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8e061afedc2f4520674a2a3136e9c15cfca871eaef9a1d4a2beed9609352be08", + "chksum_sha256": "bc4909649a39466b2f04e5aa252c5e06ca5d811b5d570cd6d348fab35d14a385", "format": 1 }, { - "name": "tests/integration/targets/setup_postgresql_db/tasks/ssl.yml", + "name": "tests/integration/targets/setup_postgresql_db/tasks/replica.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fb482e7ee6912b74be31f9fe6b254a87d1717f9d40ae8823e2913331b9586ad7", + "chksum_sha256": "1a66dfc51cae0323a69b33ecf901d4892f5c27b78b896ceb602c7d7024c417f7", "format": 1 }, { - "name": "tests/integration/targets/setup_postgresql_db/vars", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ea1cab96532e023ca4622a31488dd6226c60eb755817868f874c083f8e991eb8", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml", + "name": "tests/integration/targets/setup_postgresql_db/tasks/sql_ascii.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "150c49cf5d8c40a44f33a543d4fb288a952517bbebd367ca223f068e3417c5e1", + "chksum_sha256": "36f1490ca93218409cc8027a2b2ab4e0906ff03f2d6b8d32a582bf4c41467948", "format": 1 }, { - "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat.yml", + "name": "tests/integration/targets/setup_postgresql_db/tasks/ssl.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5252a9c72186f877dcc99a3b66a053474f180248404a39f55da78b1546c95ee2", + "chksum_sha256": "fb482e7ee6912b74be31f9fe6b254a87d1717f9d40ae8823e2913331b9586ad7", "format": 1 }, { - "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "94dc4796606b2bcdca95aba5eefe79a1b4e36cb5c600b15badbc1673340a5ecd", + "name": "tests/integration/targets/setup_postgresql_db/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/setup_postgresql_db/vars/default-py3.yml", + "name": "tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3a88e7f3bdc87e6e9f0a5d02df8672112dba29116fc4ce278eecabc2906f786d", + "chksum_sha256": "5bba8a6c488923638e8a1a2b3f4f7445c27c94e5304a01f714095b0dcafea6dd", "format": 1 }, { - "name": "tests/integration/targets/setup_postgresql_db/vars/default.yml", + "name": "tests/integration/targets/setup_postgresql_db/vars/Fedora-36-py3.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "577e120caebb25a1c1fcafe4991d83e838ca9ef1b18301e2a66e4565057af8e9", + "chksum_sha256": "f30fad67c6bae9b0e8118ac1b3b4017352765929a676a91188ed909a59ae4c3e", "format": 1 }, { - "name": "tests/integration/targets/setup_postgresql_replication", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_replication/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_replication/defaults/main.yml", + "name": "tests/integration/targets/setup_postgresql_db/vars/Fedora-38-py3.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "15c12558d8848d3b44b4f14324e6b02888ed48fa609fee2329180b92e59d3fe1", - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_replication/handlers", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "e1b2168aa6e8ed71a0e798d242fcb4ee512246eefa54a913423287b910436381", "format": 1 }, { - "name": "tests/integration/targets/setup_postgresql_replication/handlers/main.yml", + "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8a6de4eb2d6d7b2702fe7d970e239c4f4a4f8f31643a18628f9363e63bce4cb6", + "chksum_sha256": "ff19411262382d7c97cbb6e6b653fb781e8291d752e0816006ea948c610b9da9", "format": 1 }, { - "name": "tests/integration/targets/setup_postgresql_replication/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_replication/tasks/main.yml", + "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "332fef4bb8a02f7be01b7dea5632ce4608b2aabe87c40464048aa3e172804108", + "chksum_sha256": "963f35abad92c7ef89c14dd107f9a34e40bab06200d7d4fe6fd822021db9b69f", "format": 1 }, { - "name": "tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml", + "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "926f8633822359ea7a106ede40c8913368ed8420c70abccf9ddf623aab6b057e", + "chksum_sha256": "ae02d1fc9a308fb0b4643a3200a4712622c09bef0df46ccfcd9b7dbbb0144d1a", "format": 1 }, { - "name": "tests/integration/targets/setup_postgresql_replication/templates", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2", + "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-22-py3.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9b3e39d80a8a59947b5fba38e8db942a1933ffefcef368cd13a5594fc2f65668", + "chksum_sha256": "c897bcf7d1e0116c886423886d8a875b2cf8f3c0e8cdb6111b2b3e75945f2b9a", "format": 1 }, { - "name": "tests/integration/targets/setup_postgresql_replication/templates/primary_postgresql.conf.j2", + "name": "tests/integration/targets/setup_postgresql_db/vars/default-py3.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4e40eae546ddaeff2de7356ece590b70001f976feb6c07b0c83a9fd1b86ea23d", + "chksum_sha256": "a66ab99eac91dfeed26beb4980cd2116e3033857acc6ad7b63b820e8372d5544", "format": 1 }, { - "name": "tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2", + "name": "tests/integration/targets/setup_postgresql_db/vars/default.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2a70a2f2e5beaa52cefcc93461ca067ef2b665b859a468cbe74d27464253bc6e", + "chksum_sha256": "a74f2e663ed988e7c19149e3746beec16adb2fe48582fcb267002b0450e4782d", "format": 1 }, { @@ -2419,42 +2293,49 @@ "name": "tests/sanity/extra/no-unwanted-files.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f1468e7b22ba353d18fcf2f5b18607873f792de629f887798f081eb6e2cd54fc", + "chksum_sha256": "71dadb137e8d21b7ed325205def87767bd39308398f399b1b328c3af3f788a7d", "format": 1 }, { "name": "tests/sanity/ignore-2.12.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0a7c2f4de6f288675dfebc1f6fbb808728c3ef1bec1a29fe2adb80199372621f", + "chksum_sha256": "a856b96e287379255c9014609dfe9c7b6f5205f4631fc45dbf93be935c5bba26", "format": 1 }, { "name": "tests/sanity/ignore-2.13.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0a7c2f4de6f288675dfebc1f6fbb808728c3ef1bec1a29fe2adb80199372621f", + "chksum_sha256": "a856b96e287379255c9014609dfe9c7b6f5205f4631fc45dbf93be935c5bba26", "format": 1 }, { "name": "tests/sanity/ignore-2.14.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0a7c2f4de6f288675dfebc1f6fbb808728c3ef1bec1a29fe2adb80199372621f", + "chksum_sha256": "a856b96e287379255c9014609dfe9c7b6f5205f4631fc45dbf93be935c5bba26", "format": 1 }, { "name": "tests/sanity/ignore-2.15.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "788d3f00aec392d2c4740329b80911a6b2621e975148d07c2cb9c53d3f736783", + "chksum_sha256": "c9621b54c948cd3473ba21add8cbff189ccd914fc00aec1dfdd19cbdb5ce7eb6", "format": 1 }, { "name": "tests/sanity/ignore-2.16.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "788d3f00aec392d2c4740329b80911a6b2621e975148d07c2cb9c53d3f736783", + "chksum_sha256": "c9621b54c948cd3473ba21add8cbff189ccd914fc00aec1dfdd19cbdb5ce7eb6", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.17.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1e2e4351ae7e8b366042aef4f75e1a4df73a64203296484944df297ddbfbdf0", "format": 1 }, { @@ -2489,14 +2370,14 @@ "name": "tests/unit/plugins/module_utils/test_postgres.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5c08f2ecc41169ebd451837abd543a5102ece6befcff0e70f9ad06acd4d6ee5c", + "chksum_sha256": "d1cdf011913cf9b2056bfa4982cde3856f400fea81f6a1c3444a10c32e27270a", "format": 1 }, { "name": "tests/unit/plugins/module_utils/test_saslprep.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f5e044a935e091aaf52115d3f3238bcd32b3627f9cebac26bd1d6d52aa339953", + "chksum_sha256": "bf0be697f88d5a76edba65f28f158b0fb74534ec245e0c855ae1f0e83f61f73c", "format": 1 }, { @@ -2517,7 +2398,7 @@ "name": "tests/unit/plugins/modules/test_postgresql_set.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1a9478ec6a1548cb1ddf26fea59ef25dea5755340634495d9128f55c22aafce3", + "chksum_sha256": "7dc3ce190a662d7b14459ff100ca119164b9f13eaf11713d4dcf46858af6d899", "format": 1 }, { @@ -2559,7 +2440,7 @@ "name": "tests/utils/shippable/check_matrix.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "163dc2e4b0fb28faa6a03c02c7a5b2e470ca156e119943bf1d8bbf5efff02c18", + "chksum_sha256": "5a6f19f42a2051a181675c8d407c99f1bcf6ac9a3449d4fe51a5552e2582343f", "format": 1 }, { @@ -2587,14 +2468,14 @@ "name": "tests/utils/shippable/shippable.sh", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d1ea59d27bbe21954ece52642108b4fd10d3f526d8efa25de875b61cdea180a3", + "chksum_sha256": "1e5643958e0f7cea4a3189ac7184d66a174c61b81fa04bc7452d173a2366c2b0", "format": 1 }, { "name": "tests/utils/shippable/timing.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ebb7d3553349747ad41d80899ed353e13cf32fcbecbb6566cf36e9d2bc33703e", + "chksum_sha256": "fae44c549a138aa795cc2bf757cc812fa7969b914fc5f79fb42530c48c11052a", "format": 1 }, { @@ -2626,24 +2507,45 @@ "format": 1 }, { - "name": "CHANGELOG.rst", + "name": ".codespell-exclude-words", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2be3a33e5081dc3262b52ce0600fac4942cac650e3ccde5ba1f4aaeee59077a9", + "chksum_sha256": "7896ec8e2f0c01ed844b1e1227decd106137960d931248a54ef005021e7d63c8", "format": 1 }, { - "name": "CONTRIBUTING.md", + "name": ".codespellrc", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b0c50cf3715d59964a341dc651a6f626322209ef9fa8c0d03047d3a2b2e420a4", + "chksum_sha256": "746de3b92c186ceb2a9f20921a53b24523414c0af7441849e2018f14c16c3139", "format": 1 }, { - "name": "CONTRIBUTORS", + "name": ".flake8", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f43fe39043d7329c341144c785599aa3dd3d262ae6876ef257c7a49547151ae4", + "chksum_sha256": "4e97598aa712d2251fd3e645f9bc94be719c72a5ca2a61a21b3474b5e128a51d", + "format": 1 + }, + { + "name": ".pre-commit-config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d69be06ce29c020f1f64f67d13d7c7b4867b630c1470f6a7286785571e46f74d", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "02924d737dff1a6dd5b7e9a137a2c426526ec976780e141bfdfb3262e29557b6", + "format": 1 + }, + { + "name": "CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1b3cbadfbb785585ca2c984365edef37a0d813798ad5c8ea7c296135045f9ee", "format": 1 }, { @@ -2664,7 +2566,7 @@ "name": "MAINTAINING.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2435665a6562d5f3841fff1631970f95f0466c498e949d2b8579ccc2a0b810ad", + "chksum_sha256": "8b3d09e5ba80c8515ad6c99d36bf59324f9e44e27613a510faacb457d0d06685", "format": 1 }, { @@ -2678,7 +2580,7 @@ "name": "README.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c7ef341ae1187f4ad2b6b2505899075ab5dbe06ebd138058055c59f1ef1ebffc", + "chksum_sha256": "522dc34d303faf3a818d0a57b1a7b9a7338f3acdadd55de2226ef7720765246f", "format": 1 }, { @@ -2701,6 +2603,13 @@ "chksum_type": "sha256", "chksum_sha256": "f6036f79d054f42e11f2dd52458b4d2282e901d197955e598bf1a23600280cf0", "format": 1 + }, + { + "name": "tox.ini", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "24fc228f3c3d845aae0da238b99db74d24f5914be86245b3a04897367aa3ac31", + "format": 1 } ], "format": 1 diff --git a/ansible_collections/community/postgresql/MAINTAINING.md b/ansible_collections/community/postgresql/MAINTAINING.md index 9fad0d343..1e511e6dd 100644 --- a/ansible_collections/community/postgresql/MAINTAINING.md +++ b/ansible_collections/community/postgresql/MAINTAINING.md @@ -1,3 +1,3 @@ # Maintaining this collection -Refer to the [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst). +Refer to the [Maintainer guidelines](https://docs.ansible.com/ansible/devel/community/maintainers.html). diff --git a/ansible_collections/community/postgresql/MANIFEST.json b/ansible_collections/community/postgresql/MANIFEST.json index 98c8a04e6..b16ee0f4d 100644 --- a/ansible_collections/community/postgresql/MANIFEST.json +++ b/ansible_collections/community/postgresql/MANIFEST.json @@ -2,7 +2,7 @@ "collection_info": { "namespace": "community", "name": "postgresql", - "version": "2.4.2", + "version": "3.4.0", "authors": [ "Ansible PostgreSQL community" ], @@ -25,7 +25,7 @@ "name": "FILES.json", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d7e71b7ad0987031481832f772a46ae4e27dbb433409e374dca4668c74260ebd", + "chksum_sha256": "87d0437e40df1ddaf3161d1baaf5340d03e0c8983c7326257fab831e5f47e682", "format": 1 }, "format": 1 diff --git a/ansible_collections/community/postgresql/README.md b/ansible_collections/community/postgresql/README.md index 076239137..fde669888 100644 --- a/ansible_collections/community/postgresql/README.md +++ b/ansible_collections/community/postgresql/README.md @@ -5,6 +5,19 @@ This collection is a part of the Ansible package. +## Our mission + +At the `community.postgresql` Ansible collection project, +our mission is to produce and maintain simple, flexible, +and powerful open-source software tailored to automating PostgreSQL-related tasks. + +We welcome members from all skill levels to participate actively in our open, inclusive, and vibrant community. +Whether you are an expert or just beginning your journey with Ansible and PostgreSQL, +you are encouraged to contribute, share insights, and collaborate with fellow enthusiasts. + +We strive to make managing PostgreSQL deployments as effortless and efficient as possible with automation, +enabling users to focus on their core objectives. + ## Code of Conduct We follow the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our interactions within this project. @@ -13,11 +26,9 @@ If you encounter abusive behavior violating the [Ansible Code of Conduct](https: ## Contributing to this collection -The content of this collection is made by [people](https://github.com/ansible-collections/community.postgresql/blob/main/CONTRIBUTORS) just like you, a community of individuals collaborating on making the world better through developing automation software. - -We are actively accepting new contributors. +The content of this collection is made by [people](https://github.com/ansible-collections/community.postgresql/graphs/contributors) just like you; a community of individuals collaborating on making the world better through developing automation software. -All types of contributions are very welcome. +We are actively accepting new contributors and all types of contributions are very welcome. You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.postgresql/blob/main/CONTRIBUTING.md)! @@ -37,20 +48,27 @@ To learn how to maintain / become a maintainer of this collection, refer to the It is necessary for maintainers of this collection to be subscribed to: * The collection itself (the `Watch` button -> `All Activity` in the upper right corner of the repository's homepage). -* The "Changes Impacting Collection Contributors and Maintainers" [issue](https://github.com/ansible-collections/overview/issues/45). +* The [news-for-maintainers repository](https://github.com/ansible-collections/news-for-maintainers). They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn). ## Communication -We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn). If you are a collection developer, be sure you are subscribed. +> `GitHub Discussions` feature is disabled in this repository. Use the `postgresql` tag on the forum in the [Project Discussions](https://forum.ansible.com/new-topic?title=topic%20title&body=topic%20body&category=project&tags=postgresql) or [Get Help](https://forum.ansible.com/new-topic?title=topic%20title&body=topic%20body&category=help&tags=postgresql) category instead. -Join us on Matrix in: +We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn). If you are a collection developer, be sure you are subscribed. -* `#postgresql:ansible.com` [room](https://matrix.to/#/#postgresql:ansible.com): questions on how to contribute and use this collection. -* `#users:ansible.com` [room](https://matrix.to/#/#users:ansible.com): general use questions and support. -* `#ansible-community:ansible.com` [room](https://matrix.to/#/#community:ansible.com): community and collection development questions. -* other Matrix rooms or corresponding bridged Libera.Chat channels. See the [Ansible Communication Guide](https://docs.ansible.com/ansible/devel/community/communication.html) for details. +Join [our team](https://forum.ansible.com/g/PostgreSQLTeam?asc=true&order=) on: +* The Ansible forums: + * [News & Announcements](https://forum.ansible.com/c/news/5/none) + * [Get Help](https://forum.ansible.com/c/help/6/none) + * [Social Spaces](https://forum.ansible.com/c/chat/4) + * [Posts tagged 'postgresql'](https://forum.ansible.com/tag/postgresql) +* Matrix: + * `#postgresql:ansible.com` [room](https://matrix.to/#/#postgresql:ansible.com): questions on how to contribute and use this collection. + * `#users:ansible.com` [room](https://matrix.to/#/#users:ansible.com): general use questions and support. + * `#ansible-community:ansible.com` [room](https://matrix.to/#/#community:ansible.com): community and collection development questions. + * other Matrix rooms or corresponding bridged Libera.Chat channels. See the [Ansible Communication Guide](https://docs.ansible.com/ansible/devel/community/communication.html) for details. We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn) and join us. @@ -58,31 +76,48 @@ For more information about communication, refer to the [Ansible Communication gu ## Governance -We, [the PostgreSQL working group](https://github.com/ansible-collections/community.postgresql/wiki/PostgreSQL-Working-Group), use [the community pinboard](https://github.com/ansible-collections/community.postgresql/issues/30) for general announcements and discussions. +We, [the PostgreSQL working group](https://forum.ansible.com/g/PostgreSQLTeam), use [the forum](https://forum.ansible.com/tag/postgresql) posts tagged with `postgresql` for general announcements and discussions. The process of decision making in this collection is based on discussing and finding consensus among participants. -Every voice is important and every idea is valuable. If you have something on your mind, create an issue or dedicated discussion and let's discuss it! +Every voice is important and every idea is valuable. If you have something on your mind, create an issue or dedicated forum [discussion](https://forum.ansible.com/new-topic?title=topic%20title&body=topic%20body&category=project&tags=postgresql) and let's discuss it! ## External requirements -The PostgreSQL modules rely on the [Psycopg2](https://www.psycopg.org/docs/) PostgreSQL database adapter. +The PostgreSQL modules rely on the [Psycopg](https://www.psycopg.org/) PostgreSQL database adapter. +Both versions [Psycopg2](https://www.psycopg.org/docs/) and [Psycopg3](https://www.psycopg.org/psycopg3/docs/) are supported. +The minimum supported and tested versions of Psycopg are 2.5.1 and 3.1.8 respectively. + +## Releases Support Timeline + +We maintain each major release version (1.x.y, 2.x.y, ...) for two years after the next major version is released. + +Here is the table for the support timeline: +- 1.x.y: released 2020-11-17, supported until 2024-02-10 +- 2.x.y: released 2022-02-10, supported until 2025-06-09 +- 3.x.y: released 2023-06-09, current +- 4.x.y: to be released; not earlier than after Ansible 10 release (~May 2024) ## Tested with ansible-core Tested with the following `ansible-core` releases: -- 2.12 -- 2.13 - 2.14 +- 2.15 +- 2.16 - current development version Ansible-core versions before 2.12.0 are not supported. Our AZP CI includes testing with the following docker images / PostgreSQL versions: -- CentOS 7: 9.2 -- RHEL 8.3 / 8.4: 10 -- Fedora 34: 13 -- Ubuntu 20.04: 14 +| Docker image | Psycopg version | PostgreSQL version | +|--------------|-----------------|--------------------| +| CentOS 7 | 2.5.1 | 9.2 | +| RHEL 8 | 2.7.5 | 10 | +| Fedora 37 | 2.9.6 | 14 | +| Fedora 38 | 2.9.6 | 15 | +| Fedora 39 | 2.9.6 | 15 | +| Ubuntu 20.04 | 2.8.6 | 15 | +| Ubuntu 22.04 | 3.1.9 | 15 | ## Included content @@ -132,7 +167,7 @@ collections: - name: community.postgresql ``` -You can also download the tarball from [Ansible Galaxy](https://galaxy.ansible.com/community/postgresql) and install the collection manually wherever you need. +You can also download the tarball from [Ansible Galaxy](https://galaxy.ansible.com/ui/repo/published/community/postgresql) and install the collection manually wherever you need. Note that if you install the collection from Ansible Galaxy with the command-line tool or tarball, it will not be upgraded automatically when you upgrade the Ansible package. To upgrade the collection to the latest available version, run the following command: @@ -152,10 +187,6 @@ See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_gui See the [changelog](https://github.com/ansible-collections/community.postgresql/blob/main/CHANGELOG.rst). -## Roadmap - -See the [release plan](https://github.com/ansible-collections/community.postgresql/issues/13). - ## More information - [Ansible Collection overview](https://github.com/ansible-collections/overview) diff --git a/ansible_collections/community/postgresql/changelogs/changelog.yaml b/ansible_collections/community/postgresql/changelogs/changelog.yaml index 45e4461b2..bacf00e7a 100644 --- a/ansible_collections/community/postgresql/changelogs/changelog.yaml +++ b/ansible_collections/community/postgresql/changelogs/changelog.yaml @@ -446,7 +446,7 @@ releases: 2.4.0: changes: bugfixes: - - postgresql_info - add support for non numeric extenstion version (https://github.com/ansible-collections/community.postgresql/issues/428). + - postgresql_info - add support for non numeric extension version (https://github.com/ansible-collections/community.postgresql/issues/428). - postgresql_info - when getting information about subscriptions, check the list of available columns in the pg_subscription table (https://github.com/ansible-collections/community.postgresql/issues/429). - postgresql_privs - fix connect_params being ignored (https://github.com/ansible-collections/community.postgresql/issues/450). @@ -529,3 +529,152 @@ releases: - 0-postgresql_info.yml - 2.4.2.yml release_date: '2023-06-09' + 3.0.0: + changes: + major_changes: + - postgresql_pg_hba - remove the deprecated ``order`` argument. The sortorder + ``sdu`` is hardcoded (https://github.com/ansible-collections/community.postgresql/pull/496). + - postgresql_privs - remove the deprecated ``usage_on_types`` argument. Use + the ``type`` option of the ``type`` argument to explicitly manipulate privileges + on PG types (https://github.com/ansible-collections/community.postgresql/issues/208). + - postgresql_query - remove the deprecated ``path_to_script`` and ``as_single_query`` + arguments. Use the ``postgresql_script`` module to run queries from scripts + (https://github.com/ansible-collections/community.postgresql/issues/189). + - postgresql_user - move the deprecated ``privs`` argument removal to community.postgresql + 4.0.0 (https://github.com/ansible-collections/community.postgresql/issues/493). + - postgresql_user - remove the deprecated ``groups`` argument. Use the ``postgresql_membership`` + module instead (https://github.com/ansible-collections/community.postgresql/issues/300). + release_summary: 'This is a major release of the ``community.postgresql`` collection. + + This changelog contains all changes to the modules in this collection that + + have been added after the release of ``community.postgresql`` 2.4.2.' + fragments: + - 0-postgresql_query.yml + - 1-postgresql_privs.yml + - 3-postgresql_user.yml + - 3.0.0.yml + - 4-postgresql_user.yml + - 5-postgresql_pg_hba.yml + release_date: '2023-06-09' + 3.1.0: + changes: + bugfixes: + - postgresql_ext - fixed queries return value name in documentation (https://github.com/ansible-collections/community.postgresql/pull/545). + - postgresql_privs - fixed error message and documentation (https://github.com/ansible-collections/community.postgresql/pull/510). + - postgresql_set - fixed GUC_LIST_QUOTE parameters (https://github.com/ansible-collections/community.postgresql/pull/521). + - postgresql_set - fixed error message in param_set function (https://github.com/ansible-collections/community.postgresql/pull/505). + deprecated_features: + - postgresql_lang - the module has been deprecated and will be removed in ``community.postgresql + 4.0.0``. Please use the ``postgresql_ext`` module instead (https://github.com/ansible-collections/community.postgresql/issues/559). + major_changes: + - postgres modules - the minimum version of psycopg2 library the collection + supports is 2.5.1 (https://github.com/ansible-collections/community.postgresql/pull/556). + minor_changes: + - Collection core functions - use ``get_server_version`` in all modules (https://github.com/ansible-collections/community.postgresql/pull/518)." + - Collection core functions - use common cursor arguments in all modules (https://github.com/ansible-collections/community.postgresql/pull/522)." + - postgresql_ext - added idempotence always both in standard and in check mode + (https://github.com/ansible-collections/community.postgresql/pull/545). + - postgresql_ext - added idempotence when version=latest (https://github.com/ansible-collections/community.postgresql/pull/504). + - postgresql_ext - added prev_version and version return values (https://github.com/ansible-collections/community.postgresql/pull/545). + - postgresql_ext - added queries in module output also in check mode (https://github.com/ansible-collections/community.postgresql/pull/545). + - postgresql_ext - improved error messages (https://github.com/ansible-collections/community.postgresql/pull/545). + - postgresql_privs - added idempotence when roles=PUBLIC (https://github.com/ansible-collections/community.postgresql/pull/502). + - postgresql_privs - added parameters privileges support for PostgreSQL 15 or + higher (https://github.com/ansible-collections/community.postgresql/issues/481). + - postgresql_privs - added support for implicit roles CURRENT_ROLE, CURRENT_USER, + and SESSION_USER (https://github.com/ansible-collections/community.postgresql/pull/502). + - postgresql_tablespace - added idempotence when dropping a non-existing tablespace + (https://github.com/ansible-collections/community.postgresql/pull/554). + release_summary: 'This is the minor release of the ``community.postgresql`` + collection. + + This changelog contains all changes to the modules and plugins in this collection + + that have been made after the previous release.' + fragments: + - 3.1.0.yml + - 481-postgresql_privs.yml + - 502_postgresql_privs.yml + - 504_postgresql_ext.yml + - 505-postgresql_set.yml + - 510-postgresql_privs.yml + - 518-psycopg-server_version.yml + - 521-postgresql_set.yml + - 522-psycopg-cursor_args.yml + - 545_postgresql_ext.yml + - 554_postgresql_tablespace.yml + - 556_psycopg251.yml + - 559-postgresql_lang-deprecate.yml + release_date: '2023-08-14' + 3.2.0: + changes: + bugfixes: + - postgresql_info - fix SQL syntax issue (https://github.com/ansible-collections/community.postgresql/issues/570). + minor_changes: + - postgres modules - added support for Psycopg 3 library (https://github.com/ansible-collections/community.postgresql/pull/517). + - postgresql_owner - added support at new object types (https://github.com/ansible-collections/community.postgresql/pull/555). + release_summary: 'This is the minor release of the ``community.postgresql`` + collection. + + This changelog contains all changes to the modules and plugins in this collection + + that have been made after the previous release.' + fragments: + - 3.2.0.yml + - 517_psycopg3.yml + - 555_postgresql_owner.yml + - 570_postgresql_info.yml + - nuke_contributors.yml + release_date: '2023-08-22' + 3.3.0: + changes: + bugfixes: + - postgresql_query - now reports not changed for queries starting with "SHOW" + (https://github.com/ansible-collections/community.postgresql/pull/592). + - postgresql_user - module failed when running against an SQL_ASCII encoded + database as the user's current password was returned as bytes as opposed to + a str. Fix now checks for this case and decodes the bytes as an ascii encoded + string. (https://github.com/ansible-collections/community.postgresql/issues/584). + minor_changes: + - postgresql_db - add the ``comment`` argument (https://github.com/ansible-collections/community.postgresql/issues/614). + - postgresql_ext - add the ``comment`` argument (https://github.com/ansible-collections/community.postgresql/issues/354). + - postgresql_publication - add the ``comment`` argument (https://github.com/ansible-collections/community.postgresql/issues/354). + - postgresql_schema - add the ``comment`` argument (https://github.com/ansible-collections/community.postgresql/issues/354). + - postgresql_subscription - add the ``comment`` argument (https://github.com/ansible-collections/community.postgresql/issues/354). + - postgresql_tablespace - add the ``comment`` argument (https://github.com/ansible-collections/community.postgresql/issues/354). + release_summary: 'This is the minor release of the ``community.postgresql`` + collection. + + This changelog contains all changes to the modules and plugins in this collection + + that have been made after the previous release.' + fragments: + - 0-postgresql_db.yml + - 1-postgresql_schema.yml + - 3-postgresql_tablespace.yml + - 3.3.0.yml + - 4-postgresql_ext.yml + - 5-postgresql_publication.yml + - 585-decode-data-from-sql_ascii-databases.yml + - 592-return-not-changed-for-show.yml + - 6-postgresql_subscription.yml + release_date: '2023-12-20' + 3.4.0: + changes: + bugfixes: + - 'postgresql_privs - fix a failure when altering privileges with ``grant_option: + true`` (https://github.com/ansible-collections/community.postgresql/issues/668).' + minor_changes: + - postgresql_db - add the ``icu_locale`` argument (https://github.com/ansible-collections/community.postgresql/issues/666). + - postgresql_db - add the ``locale_provider`` argument (https://github.com/ansible-collections/community.postgresql/issues/666). + release_summary: 'This is a minor release of the ``community.postgresql`` collection. + + This changelog contains all changes to the modules and plugins in this collection + + that have been made after the previous release.' + fragments: + - 0-privs.yml + - 3.4.0.yml + - 667-db_icu_provider.yml + release_date: '2024-02-09' diff --git a/ansible_collections/community/postgresql/meta/runtime.yml b/ansible_collections/community/postgresql/meta/runtime.yml index f7fa752ae..0cb35f9c8 100644 --- a/ansible_collections/community/postgresql/meta/runtime.yml +++ b/ansible_collections/community/postgresql/meta/runtime.yml @@ -25,3 +25,9 @@ action_groups: - postgresql_tablespace - postgresql_user - postgresql_user_obj_stat_info +plugin_routing: + modules: + postgresql_lang: + deprecation: + removal_version: 4.0.0 + warning_text: Use postgresql_ext instead. diff --git a/ansible_collections/community/postgresql/plugins/doc_fragments/postgres.py b/ansible_collections/community/postgresql/plugins/doc_fragments/postgres.py index be74a4552..08b23988c 100644 --- a/ansible_collections/community/postgresql/plugins/doc_fragments/postgres.py +++ b/ansible_collections/community/postgresql/plugins/doc_fragments/postgres.py @@ -2,7 +2,8 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type @@ -80,13 +81,12 @@ notes: - The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host. - To avoid "Peer authentication failed for user postgres" error, use postgres user as a I(become_user). -- This module uses C(psycopg2), a Python PostgreSQL database adapter. You must - ensure that C(psycopg2) is installed on the host before using this module. +- This module uses C(psycopg), a Python PostgreSQL database adapter. You must + ensure that C(psycopg2 >= 2.5.1) or C(psycopg3 >= 3.1.8) is installed on the host before using this module. - If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. -- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages +- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python3-psycopg2) packages on the remote host before using this module. -- The ca_cert parameter requires at least Postgres version 8.4 and I(psycopg2) version 2.4.3. -requirements: [ psycopg2 ] +requirements: [ 'psycopg2 >= 2.5.1' ] ''' diff --git a/ansible_collections/community/postgresql/plugins/module_utils/_version.py b/ansible_collections/community/postgresql/plugins/module_utils/_version.py index 0a34929e9..391138bd5 100644 --- a/ansible_collections/community/postgresql/plugins/module_utils/_version.py +++ b/ansible_collections/community/postgresql/plugins/module_utils/_version.py @@ -24,7 +24,8 @@ Every version number class implements the following interface: of the same class, thus must follow the same rules) """ -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import re diff --git a/ansible_collections/community/postgresql/plugins/module_utils/database.py b/ansible_collections/community/postgresql/plugins/module_utils/database.py index 8aba6aad8..fe09a1cc0 100644 --- a/ansible_collections/community/postgresql/plugins/module_utils/database.py +++ b/ansible_collections/community/postgresql/plugins/module_utils/database.py @@ -8,12 +8,13 @@ # # Simplified BSD License (see simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import re -from ansible.module_utils._text import to_native +from ansible.module_utils._text import to_native # Input patterns for is_input_dangerous function: # @@ -148,7 +149,7 @@ def is_input_dangerous(string): Can be used to prevent SQL injections. Note: use this function only when you can't use - psycopg2's cursor.execute method parametrized + psycopg's cursor.execute method parametrized (typically with DDL queries). """ if not string: diff --git a/ansible_collections/community/postgresql/plugins/module_utils/postgres.py b/ansible_collections/community/postgresql/plugins/module_utils/postgres.py index e4a44df56..06eb157fb 100644 --- a/ansible_collections/community/postgresql/plugins/module_utils/postgres.py +++ b/ansible_collections/community/postgresql/plugins/module_utils/postgres.py @@ -9,25 +9,47 @@ # # Simplified BSD License (see simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type from datetime import timedelta from decimal import Decimal from os import environ -psycopg2 = None # This line needs for unit tests -try: - import psycopg2 - import psycopg2.extras - HAS_PSYCOPG2 = True -except ImportError: - HAS_PSYCOPG2 = False - -from ansible.module_utils.basic import missing_required_lib from ansible.module_utils._text import to_native +from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.six import iteritems -from ansible_collections.community.postgresql.plugins.module_utils.version import LooseVersion +from ansible_collections.community.postgresql.plugins.module_utils.version import \ + LooseVersion + +psycopg = None # This line is needed for unit tests +psycopg2 = None # This line is needed for unit tests +pg_cursor_args = None # This line is needed for unit tests +PSYCOPG_VERSION = LooseVersion("0.0") # This line is needed for unit tests + +try: + import psycopg + from psycopg import ClientCursor + from psycopg.rows import dict_row + + # We need Psycopg 3 to be at least 3.1.0 because we need Client-side-binding cursors + # When a Linux distribution provides both Psycopg2 and Psycopg 3.0 we will use Psycopg2 + PSYCOPG_VERSION = LooseVersion(psycopg.__version__) + if PSYCOPG_VERSION < LooseVersion("3.1"): + raise ImportError + HAS_PSYCOPG = True + pg_cursor_args = {"row_factory": psycopg.rows.dict_row} +except ImportError: + try: + import psycopg2 + psycopg = psycopg2 + from psycopg2.extras import DictCursor + PSYCOPG_VERSION = LooseVersion(psycopg2.__version__) + HAS_PSYCOPG = True + pg_cursor_args = {"cursor_factory": DictCursor} + except ImportError: + HAS_PSYCOPG = False TYPES_NEED_TO_CONVERT = (Decimal, timedelta) @@ -51,7 +73,7 @@ def postgres_common_argument_spec(): login_unix_socket=dict(default='', aliases=['unix_socket']), port=dict( type='int', - default=5432 if not env_vars.get("PGPORT") else int(env_vars.get("PGPORT")), + default=int(env_vars.get("PGPORT", 5432)), aliases=['login_port'] ), ssl_mode=dict( @@ -74,17 +96,21 @@ def postgres_common_argument_spec(): def ensure_required_libs(module): """Check required libraries.""" - if not HAS_PSYCOPG2: + if not HAS_PSYCOPG: + # TODO: Should we raise it as psycopg? That will be a breaking change module.fail_json(msg=missing_required_lib('psycopg2')) - if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'): + elif PSYCOPG_VERSION < LooseVersion("2.5.1"): + module.warn("psycopg should be at least 2.5.1 to support all modules functionality") + + if module.params.get('ca_cert') and PSYCOPG_VERSION < LooseVersion('2.4.3'): module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter') def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True): """Connect to a PostgreSQL database. - Return a tuple containing a psycopg2 connection object and error message / None. + Return a tuple containing a psycopg connection object and error message / None. Args: module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class @@ -98,16 +124,25 @@ def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True): db_connection = None conn_err = None try: - db_connection = psycopg2.connect(**conn_params) - if autocommit: - if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'): - db_connection.set_session(autocommit=True) - else: - db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + if PSYCOPG_VERSION >= LooseVersion("3.0"): + conn_params["autocommit"] = autocommit + conn_params["cursor_factory"] = ClientCursor + conn_params["row_factory"] = dict_row + db_connection = psycopg.connect(**conn_params) + else: + db_connection = psycopg2.connect(**conn_params) + if autocommit: + if PSYCOPG_VERSION >= LooseVersion("2.4.2"): + db_connection.set_session(autocommit=True) + else: + db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) # Switch role, if specified: if module.params.get('session_role'): - cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) + if PSYCOPG_VERSION >= LooseVersion("3.0"): + cursor = db_connection.cursor(row_factory=psycopg.rows.dict_row) + else: + cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) try: cursor.execute('SET ROLE "%s"' % module.params['session_role']) @@ -216,7 +251,7 @@ def get_conn_params(module, params_dict, warn_db_default=True): } # Might be different in the modules: - if LooseVersion(psycopg2.__version__) >= LooseVersion('2.7.0'): + if PSYCOPG_VERSION >= LooseVersion("2.7.0"): if params_dict.get('db'): params_map['db'] = 'dbname' elif params_dict.get('database'): @@ -275,7 +310,7 @@ class PgRole(): res = exec_sql(self, query, query_params={'dst_role': self.name}, add_to_executed=False) if res: - return res[0][0] + return res[0]["array"] else: return [] @@ -397,14 +432,14 @@ class PgMembership(object): def __roles_exist(self, roles): tmp = ["'" + x + "'" for x in roles] query = "SELECT rolname FROM pg_roles WHERE rolname IN (%s)" % ','.join(tmp) - return [x[0] for x in exec_sql(self, query, add_to_executed=False)] + return [x["rolname"] for x in exec_sql(self, query, add_to_executed=False)] def set_search_path(cursor, search_path): """Set session's search_path. Args: - cursor (Psycopg2 cursor): Database cursor object. + cursor (Psycopg cursor): Database cursor object. search_path (str): String containing comma-separated schema names. """ cursor.execute('SET search_path TO %s' % search_path) @@ -471,7 +506,68 @@ def get_server_version(conn): Returns server version (int). """ - if LooseVersion(psycopg2.__version__) >= LooseVersion('3.0.0'): + if PSYCOPG_VERSION >= LooseVersion("3.0.0"): return conn.info.server_version else: return conn.server_version + + +def set_autocommit(conn, autocommit): + """Set autocommit. + + Args: + conn (psycopg.Connection) -- Psycopg connection object. + autocommit -- bool. + """ + if PSYCOPG_VERSION >= LooseVersion("2.4.2"): + conn.autocommit = autocommit + else: + if autocommit: + conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + else: + conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED) + + +def get_comment(cursor, obj_type, obj_name): + """Get DB object's comment. + + Args: + cursor (Psycopg cursor) -- Database cursor object. + obj_name (str) -- DB object name to get comment from. + obj_type (str) -- Object type. + + Returns object's comment (str) if present or None. + """ + query = '' + if obj_type == 'role': + query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment " + "FROM pg_catalog.pg_roles AS r " + "WHERE r.rolname = %(obj_name)s") + elif obj_type == 'extension': + query = ("SELECT pg_catalog.obj_description(e.oid, 'pg_extension') AS comment " + "FROM pg_catalog.pg_extension AS e " + "WHERE e.extname = %(obj_name)s") + + cursor.execute(query, {'obj_name': obj_name}) + return cursor.fetchone()['comment'] + + +def set_comment(cursor, comment, obj_type, obj_name, check_mode=True, executed_queries=None): + """Get DB object's comment. + + Args: + cursor (Psycopg cursor) -- Database cursor object. + comment(str) -- Comment to set on object. + obj_name (str) -- DB object name to set comment on. + obj_type (str) -- Object type. + executed_statements (list) -- List of executed state-modifying statements. + """ + query = 'COMMENT ON %s "%s" IS ' % (obj_type.upper(), obj_name) + + if not check_mode: + cursor.execute(query + '%(comment)s', {'comment': comment}) + + if executed_queries is not None: + executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment})) + + return True diff --git a/ansible_collections/community/postgresql/plugins/module_utils/saslprep.py b/ansible_collections/community/postgresql/plugins/module_utils/saslprep.py index 804200c37..124858c5f 100644 --- a/ansible_collections/community/postgresql/plugins/module_utils/saslprep.py +++ b/ansible_collections/community/postgresql/plugins/module_utils/saslprep.py @@ -10,24 +10,14 @@ # # Simplified BSD License (see simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type -from stringprep import ( - in_table_a1, - in_table_b1, - in_table_c3, - in_table_c4, - in_table_c5, - in_table_c6, - in_table_c7, - in_table_c8, - in_table_c9, - in_table_c12, - in_table_c21_c22, - in_table_d1, - in_table_d2, -) +from stringprep import (in_table_a1, in_table_b1, in_table_c3, in_table_c4, + in_table_c5, in_table_c6, in_table_c7, in_table_c8, + in_table_c9, in_table_c12, in_table_c21_c22, + in_table_d1, in_table_d2) from unicodedata import normalize from ansible.module_utils.six import text_type diff --git a/ansible_collections/community/postgresql/plugins/module_utils/version.py b/ansible_collections/community/postgresql/plugins/module_utils/version.py index 6afaca75e..578032a42 100644 --- a/ansible_collections/community/postgresql/plugins/module_utils/version.py +++ b/ansible_collections/community/postgresql/plugins/module_utils/version.py @@ -1,11 +1,10 @@ # -*- coding: utf-8 -*- - # Copyright: (c) 2021, Felix Fontein <felix@fontein.de> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - """Provide version object to compare version numbers.""" from __future__ import absolute_import, division, print_function + __metaclass__ = type # Once we drop support for Ansible 2.11, we can @@ -13,4 +12,4 @@ __metaclass__ = type # # from ansible.module_utils.compat.version import LooseVersion -from ._version import LooseVersion +from ._version import LooseVersion # noqa diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py index 37ee9b80f..81e0dba12 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py @@ -4,7 +4,8 @@ # Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -163,41 +164,34 @@ EXAMPLES = r''' RETURN = r''' queries: description: List of executed queries. - returned: always + returned: success type: str sample: [ "COPY test_table FROM '/tmp/data_file.txt' (FORMAT csv, DELIMITER ',', NULL 'NULL')" ] src: description: Data source. - returned: always + returned: success type: str sample: "mytable" dst: description: Data destination. - returned: always + returned: success type: str sample: "/tmp/data.csv" ''' -try: - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems from ansible_collections.community.postgresql.plugins.module_utils.database import ( check_input, pg_quote_identifier, ) from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, - exec_sql, ensure_required_libs, - get_conn_params, + exec_sql, get_conn_params, + pg_cursor_args, postgres_common_argument_spec, ) -from ansible.module_utils.six import iteritems class PgCopyData(object): @@ -206,11 +200,11 @@ class PgCopyData(object): Arguments: module (AnsibleModule) -- object of AnsibleModule class - cursor (cursor) -- cursor object of psycopg2 library + cursor (cursor) -- cursor object of psycopg library Attributes: module (AnsibleModule) -- object of AnsibleModule class - cursor (cursor) -- cursor object of psycopg2 library + cursor (cursor) -- cursor object of psycopg library changed (bool) -- something was changed after execution or not executed_queries (list) -- executed queries dst (str) -- data destination table (when copy_from) @@ -383,12 +377,12 @@ def main(): elif module.params.get('copy_to') and not module.params.get('src'): module.fail_json(msg='src param is necessary with copy_to') - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) # Connect to DB and make cursor object: conn_params = get_conn_params(module, module.params) db_connection, dummy = connect_to_db(module, conn_params, autocommit=False) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) ############## # Create the object and do main job: diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_db.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_db.py index e45d9b769..ae2faa007 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_db.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_db.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -47,6 +48,21 @@ options: - Must match LC_CTYPE of template database unless C(template0) is used as template. type: str default: '' + icu_locale: + description: + - Specifies the ICU locale (ICU_LOCALE) for the database default collation order and character classification, overriding the setting locale. + - The locale provider must be ICU. The default is the setting of locale if specified; otherwise the same setting as the template database. + type: str + default: '' + version_added: '3.4.0' + locale_provider: + description: + - Specifies the provider to use for the default collation in this database (LOCALE_PROVIDER). + - Possible values are icu (if the server was built with ICU support) or libc. + - By default, the provider is the same as that of the template. + type: str + default: '' + version_added: '3.4.0' session_role: description: - Switch to session_role after connecting. @@ -130,6 +146,12 @@ options: type: bool default: true version_added: '0.2.0' + comment: + description: + - Sets a comment on the database. + - To reset the comment, pass an empty string. + type: str + version_added: '3.3.0' seealso: - name: CREATE DATABASE reference description: Complete reference of the CREATE DATABASE command documentation. @@ -148,7 +170,7 @@ seealso: - module: community.postgresql.postgresql_ping notes: -- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8. +- State C(dump) and C(restore) don't require I(psycopg) since ansible version 2.8. attributes: check_mode: @@ -164,6 +186,7 @@ EXAMPLES = r''' - name: Create a new database with name "acme" community.postgresql.postgresql_db: name: acme + comment: My test DB # Note: If a template different from "template0" is specified, # encoding and locale settings must match those of the template. @@ -173,6 +196,8 @@ EXAMPLES = r''' encoding: UTF-8 lc_collate: de_DE.UTF-8 lc_ctype: de_DE.UTF-8 + locale_provider: icu + icu_locale: de-DE-x-icu template: template0 # Note: Default limit for the number of concurrent connections to @@ -257,7 +282,7 @@ EXAMPLES = r''' RETURN = r''' executed_commands: description: List of commands which tried to run. - returned: always + returned: success type: list sample: ["CREATE DATABASE acme"] version_added: '0.2.0' @@ -268,26 +293,22 @@ import os import subprocess import traceback -try: - from psycopg2.extras import DictCursor -except ImportError: - HAS_PSYCOPG2 = False -else: - HAS_PSYCOPG2 = True - -from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( - connect_to_db, - get_conn_params, - ensure_required_libs, - postgres_common_argument_spec -) +from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import shlex_quote from ansible_collections.community.postgresql.plugins.module_utils.database import ( - check_input, SQLParseError, + check_input, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + ensure_required_libs, + get_conn_params, + get_server_version, + pg_cursor_args, + postgres_common_argument_spec, + set_comment, ) -from ansible.module_utils.six.moves import shlex_quote -from ansible.module_utils._text import to_native executed_commands = [] @@ -321,16 +342,32 @@ def get_encoding_id(cursor, encoding): def get_db_info(cursor, db): - query = """ - SELECT rolname AS owner, - pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id, - datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit, - spcname AS tablespace - FROM pg_database - JOIN pg_roles ON pg_roles.oid = pg_database.datdba - JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace - WHERE datname = %(db)s - """ + if get_server_version(cursor.connection) >= 150000: + query = """ + SELECT rolname AS owner, + pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id, + datcollate AS lc_collate, datctype AS lc_ctype, daticulocale AS icu_locale, + CASE datlocprovider WHEN 'c' THEN 'libc' WHEN 'i' THEN 'icu' END AS locale_provider, + pg_database.datconnlimit AS conn_limit, spcname AS tablespace, + pg_catalog.shobj_description(pg_database.oid, 'pg_database') AS comment + FROM pg_database + JOIN pg_roles ON pg_roles.oid = pg_database.datdba + JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace + WHERE datname = %(db)s + """ + else: + query = """ + SELECT rolname AS owner, + pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id, + datcollate AS lc_collate, datctype AS lc_ctype, + null::char AS icu_locale, null::text AS locale_provider, + pg_database.datconnlimit AS conn_limit, spcname AS tablespace, + pg_catalog.shobj_description(pg_database.oid, 'pg_database') AS comment + FROM pg_database + JOIN pg_roles ON pg_roles.oid = pg_database.datdba + JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace + WHERE datname = %(db)s + """ cursor.execute(query, {'db': db}) return cursor.fetchone() @@ -342,7 +379,7 @@ def db_exists(cursor, db): def db_dropconns(cursor, db): - if cursor.connection.server_version >= 90200: + if get_server_version(cursor.connection) >= 90200: """ Drop DB connections in Postgres 9.2 and above """ query_terminate = ("SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity " "WHERE pg_stat_activity.datname=%(db)s AND pid <> pg_backend_pid()") @@ -360,7 +397,7 @@ def db_delete(cursor, db, force=False): if db_exists(cursor, db): query = 'DROP DATABASE "%s"' % db if force: - if cursor.connection.server_version >= 130000: + if get_server_version(cursor.connection) >= 130000: query = ('DROP DATABASE "%s" WITH (FORCE)' % db) else: db_dropconns(cursor, db) @@ -371,8 +408,10 @@ def db_delete(cursor, db, force=False): return False -def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace): - params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace) +def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, icu_locale, locale_provider, conn_limit, tablespace, comment, check_mode): + params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, iculocale=icu_locale, localeprovider=locale_provider, conn_limit=conn_limit, + tablespace=tablespace) + icu_supported = get_server_version(cursor.connection) >= 150000 if not db_exists(cursor, db): query_fragments = ['CREATE DATABASE "%s"' % db] if owner: @@ -385,6 +424,10 @@ def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_ query_fragments.append('LC_COLLATE %(collate)s') if lc_ctype: query_fragments.append('LC_CTYPE %(ctype)s') + if icu_locale and icu_supported: + query_fragments.append('ICU_LOCALE %(iculocale)s') + if locale_provider and icu_supported: + query_fragments.append('LOCALE_PROVIDER %(localeprovider)s') if tablespace: query_fragments.append('TABLESPACE "%s"' % tablespace) if conn_limit: @@ -392,6 +435,8 @@ def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_ query = ' '.join(query_fragments) executed_commands.append(cursor.mogrify(query, params)) cursor.execute(query, params) + if comment: + set_comment(cursor, comment, 'database', db, check_mode, executed_commands) return True else: db_info = get_db_info(cursor, db) @@ -410,9 +455,23 @@ def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_ 'Changing LC_CTYPE is not supported.' 'Current LC_CTYPE: %s' % db_info['lc_ctype'] ) + elif icu_locale and icu_locale != db_info['icu_locale']: + raise NotSupportedError( + 'Changing ICU_LOCALE is not supported.' + 'Current ICU_LOCALE: %s' % db_info['icu_locale'] + ) + elif locale_provider and locale_provider != db_info['locale_provider']: + raise NotSupportedError( + 'Changing LOCALE_PROVIDER is not supported.' + 'Current LOCALE_PROVIDER: %s' % db_info['locale_provider'] + ) else: changed = False + if db_info['comment'] is None: + # For the resetting comment feature (comment: '') to work correctly + db_info['comment'] = '' + if owner and owner != db_info['owner']: changed = set_owner(cursor, db, owner) @@ -422,26 +481,40 @@ def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_ if tablespace and tablespace != db_info['tablespace']: changed = set_tablespace(cursor, db, tablespace) + if comment is not None and comment != db_info['comment']: + changed = set_comment(cursor, comment, 'database', db, check_mode, executed_commands) + return changed -def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace): +def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, icu_locale, locale_provider, conn_limit, tablespace, comment): if not db_exists(cursor, db): return False else: db_info = get_db_info(cursor, db) + + if db_info['comment'] is None: + # For the resetting comment feature (comment: '') to work correctly + db_info['comment'] = '' + if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): return False elif lc_collate and lc_collate != db_info['lc_collate']: return False elif lc_ctype and lc_ctype != db_info['lc_ctype']: return False + elif icu_locale and icu_locale != db_info['icu_locale']: + return False + elif locale_provider and locale_provider != db_info['locale_provider']: + return False elif owner and owner != db_info['owner']: return False elif conn_limit and conn_limit != str(db_info['conn_limit']): return False elif tablespace and tablespace != db_info['tablespace']: return False + elif comment is not None and comment != db_info['comment']: + return False else: return True @@ -643,6 +716,8 @@ def main(): encoding=dict(type='str', default=''), lc_collate=dict(type='str', default=''), lc_ctype=dict(type='str', default=''), + icu_locale=dict(type='str', default=''), + locale_provider=dict(type='str', default=''), state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'rename', 'restore']), target=dict(type='path', default=''), @@ -654,6 +729,7 @@ def main(): dump_extra_args=dict(type='str', default=None), trust_input=dict(type='bool', default=True), force=dict(type='bool', default=False), + comment=dict(type='str', default=None), ) module = AnsibleModule( @@ -667,6 +743,8 @@ def main(): encoding = module.params["encoding"] lc_collate = module.params["lc_collate"] lc_ctype = module.params["lc_ctype"] + icu_locale = module.params["icu_locale"] + locale_provider = module.params["locale_provider"] target = module.params["target"] target_opts = module.params["target_opts"] state = module.params["state"] @@ -678,6 +756,7 @@ def main(): dump_extra_args = module.params['dump_extra_args'] trust_input = module.params['trust_input'] force = module.params['force'] + comment = module.params['comment'] if state == 'rename': if not target: @@ -692,7 +771,8 @@ def main(): # Check input if not trust_input: # Check input for potentially dangerous elements: - check_input(module, owner, conn_limit, encoding, db, template, tablespace, session_role) + check_input(module, owner, conn_limit, encoding, db, + template, tablespace, session_role, comment) raw_connection = state in ("dump", "restore") @@ -712,7 +792,7 @@ def main(): if not raw_connection: db_connection, dummy = connect_to_db(module, conn_params, autocommit=True) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) if session_role: try: @@ -726,7 +806,8 @@ def main(): changed = db_exists(cursor, db) elif state == "present": - changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace) + changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, + icu_locale, locale_provider, conn_limit, tablespace, comment) elif state == "rename": changed = rename_db(module, cursor, db, target, check_mode=True) @@ -741,7 +822,9 @@ def main(): elif state == "present": try: - changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace) + changed = db_create(cursor, db, owner, template, encoding, lc_collate, + lc_ctype, icu_locale, locale_provider, conn_limit, + tablespace, comment, module.check_mode) except SQLParseError as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py index e9f9e46b7..7dfc95ac4 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -77,7 +78,7 @@ options: path exists. - Downgrading is only supported if the extension provides a downgrade path otherwise the extension must be removed and a lower version of the extension must be made available. - - Set I(version=latest) to always update the extension to the latest available version. + - Set I(version=latest) to update the extension to the latest available version. type: str trust_input: description: @@ -87,6 +88,13 @@ options: type: bool default: true version_added: '0.2.0' + comment: + description: + - Sets a comment on the extension. + - To reset the comment, pass an empty string. + type: str + version_added: '3.3.0' + seealso: - name: PostgreSQL extensions description: General information about PostgreSQL extensions. @@ -114,6 +122,7 @@ author: - Sandro Santilli (@strk) - Andrew Klychkov (@Andersson007) - Keith Fiske (@keithf4) +- Daniele Giudice (@RealGreenDragon) extends_documentation_fragment: - community.postgresql.postgres @@ -125,6 +134,7 @@ EXAMPLES = r''' name: postgis db: acme schema: foo + comment: Test extension - name: Removes postgis extension to the database acme community.postgresql.postgresql_ext: @@ -161,34 +171,40 @@ EXAMPLES = r''' ''' RETURN = r''' -query: +queries: description: List of executed queries. - returned: always + returned: success type: list sample: ["DROP EXTENSION \"acme\""] - +prev_version: + description: Previous installed extension version or empty string if the extension was not installed. + returned: success + type: str + sample: '1.0' + version_added: '3.1.0' +version: + description: Current installed extension version or empty string if the extension is not installed. + returned: success + type: str + sample: '2.0' + version_added: '3.1.0' ''' import traceback -try: - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - +from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.postgresql.plugins.module_utils.database import ( - check_input, -) +from ansible_collections.community.postgresql.plugins.module_utils.database import \ + check_input from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, ensure_required_libs, + get_comment, get_conn_params, + pg_cursor_args, postgres_common_argument_spec, + set_comment, ) -from ansible.module_utils._text import to_native executed_queries = [] @@ -198,37 +214,35 @@ executed_queries = [] # -def ext_delete(cursor, ext, current_version, cascade): +def ext_delete(check_mode, cursor, ext, cascade): """Remove the extension from the database. Return True if success. Args: - cursor (cursor) -- cursor object of psycopg2 library + cursor (cursor) -- cursor object of psycopg library ext (str) -- extension name - current_version (str) -- installed version of the extension. - Value obtained from ext_get_versions and used to - determine if the extension was installed. - cascade (boolean) -- Pass the CASCADE flag to the DROP commmand + cascade (boolean) -- Pass the CASCADE flag to the DROP command """ - if current_version: - query = "DROP EXTENSION \"%s\"" % ext - if cascade: - query += " CASCADE" + query = "DROP EXTENSION \"%s\"" % ext + + if cascade: + query += " CASCADE" + + if not check_mode: cursor.execute(query) - executed_queries.append(cursor.mogrify(query)) - return True - else: - return False + executed_queries.append(cursor.mogrify(query)) + + return True -def ext_update_version(cursor, ext, version): +def ext_update_version(check_mode, cursor, ext, version): """Update extension version. Return True if success. Args: - cursor (cursor) -- cursor object of psycopg2 library + cursor (cursor) -- cursor object of psycopg library ext (str) -- extension name version (str) -- extension version """ @@ -239,22 +253,24 @@ def ext_update_version(cursor, ext, version): query += " TO %(ver)s" params['ver'] = version - cursor.execute(query, params) + if not check_mode: + cursor.execute(query, params) executed_queries.append(cursor.mogrify(query, params)) return True -def ext_create(cursor, ext, schema, cascade, version): +def ext_create(check_mode, cursor, ext, schema, cascade, version): """ Create the extension objects inside the database. Return True if success. Args: - cursor (cursor) -- cursor object of psycopg2 library + cursor (cursor) -- cursor object of psycopg library ext (str) -- extension name schema (str) -- target schema for extension objects + cascade (boolean) -- Pass the CASCADE flag to the CREATE command version (str) -- extension version """ query = "CREATE EXTENSION \"%s\"" % ext @@ -268,18 +284,20 @@ def ext_create(cursor, ext, schema, cascade, version): if cascade: query += " CASCADE" - cursor.execute(query, params) + if not check_mode: + cursor.execute(query, params) executed_queries.append(cursor.mogrify(query, params)) + return True def ext_get_versions(cursor, ext): """ Get the currently created extension version if it is installed - in the database and versions that are available if it is - installed on the system. + in the database, its default version (used to update to 'latest'), + and versions that are available if it is installed on the system. - Return tuple (current_version, [list of available versions]). + Return tuple (current_version, default_version, [list of available versions]). Note: the list of available versions contains only versions that higher than the current created version. @@ -287,11 +305,12 @@ def ext_get_versions(cursor, ext): available versions. Args: - cursor (cursor) -- cursor object of psycopg2 library + cursor (cursor) -- cursor object of psycopg library ext (str) -- extension name """ current_version = None + default_version = None params = {} params['ext'] = ext @@ -303,54 +322,64 @@ def ext_get_versions(cursor, ext): res = cursor.fetchone() if res: - current_version = res[0] + current_version = res["extversion"] + + # 2. Get the extension default version: + query = ("SELECT default_version FROM pg_catalog.pg_available_extensions " + "WHERE name = %(ext)s") + + cursor.execute(query, params) + + res = cursor.fetchone() + if res: + default_version = res["default_version"] - # 2. Get available versions: - query = ("SELECT version FROM pg_available_extension_versions " + # 3. Get extension available versions: + query = ("SELECT version FROM pg_catalog.pg_available_extension_versions " "WHERE name = %(ext)s") cursor.execute(query, params) - available_versions = set(r[0] for r in cursor.fetchall()) + available_versions = set(r["version"] for r in cursor.fetchall()) if current_version is None: current_version = False + if default_version is None: + default_version = False - return (current_version, available_versions) + return (current_version, default_version, available_versions) def ext_valid_update_path(cursor, ext, current_version, version): """ Check to see if the installed extension version has a valid update - path to the given version. A version of 'latest' is always a valid path. + path to the given version. Return True if a valid path exists. Otherwise return False. + Note: 'latest' is not a valid value for version here as it can be + replaced with default_version specified in extension control file. + Args: - cursor (cursor) -- cursor object of psycopg2 library + cursor (cursor) -- cursor object of psycopg library ext (str) -- extension name current_version (str) -- installed version of the extension. version (str) -- target extension version to update to. - A value of 'latest' is always a valid path and will result - in the extension update command always being run. """ valid_path = False params = {} - if version != 'latest': - query = ("SELECT path FROM pg_extension_update_paths(%(ext)s) " - "WHERE source = %(cv)s " - "AND target = %(ver)s") + query = ("SELECT path FROM pg_extension_update_paths(%(ext)s) " + "WHERE source = %(cv)s " + "AND target = %(ver)s") - params['ext'] = ext - params['cv'] = current_version - params['ver'] = version + params['ext'] = ext + params['cv'] = current_version + params['ver'] = version - cursor.execute(query, params) - res = cursor.fetchone() - if res is not None: - valid_path = True - else: + cursor.execute(query, params) + res = cursor.fetchone() + if res is not None: valid_path = True return (valid_path) @@ -372,6 +401,7 @@ def main(): session_role=dict(type="str"), version=dict(type="str"), trust_input=dict(type="bool", default=True), + comment=dict(type="str", default=None), ) module = AnsibleModule( @@ -386,56 +416,82 @@ def main(): version = module.params["version"] session_role = module.params["session_role"] trust_input = module.params["trust_input"] + comment = module.params["comment"] + changed = False if not trust_input: - check_input(module, ext, schema, version, session_role) + check_input(module, ext, schema, version, session_role, comment) if version and state == 'absent': module.warn("Parameter version is ignored when state=absent") - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) conn_params = get_conn_params(module, module.params) db_connection, dummy = connect_to_db(module, conn_params, autocommit=True) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) try: # Get extension info and available versions: - curr_version, available_versions = ext_get_versions(cursor, ext) + curr_version, default_version, available_versions = ext_get_versions(cursor, ext) + + # Decode version 'latest' when passed (if version is not passed 'latest' is assumed) + # Note: real_version used for checks but not in CREATE/DROP/ALTER EXTENSION commands, + # as the correct way to obtain 'latest' version is not specify the version + if not version or version == 'latest': + # If there are not available versions the extension is not available + if not available_versions: + module.fail_json(msg="Extension %s is not available" % ext) + # Check default_version is available + if default_version: + # 'latest' version matches default_version specified in extension control file + real_version = default_version + else: + # Passed version is 'latest', versions are available, but no default_version is specified + # in extension control file. In this situation CREATE/ALTER EXTENSION commands fail if + # a specific version is not passed ('latest' cannot be determined). + module.fail_json(msg="Passed version 'latest' but no default_version available " + "in extension control file") + else: + real_version = version if state == "present": - # If version passed + # If version passed: if version: # If extension is installed, update to passed version if a valid path exists if curr_version: - # Given version already installed - if curr_version == version: + # Given/Latest version already installed + if curr_version == real_version: changed = False - # Attempt to update to given version or latest version defined in extension control file - # ALTER EXTENSION is actually run if valid, so 'changed' will be true even if nothing updated + # Attempt to update to given/latest version else: - valid_update_path = ext_valid_update_path(cursor, ext, curr_version, version) + valid_update_path = ext_valid_update_path(cursor, ext, curr_version, real_version) if valid_update_path: - if module.check_mode: - changed = True - else: - changed = ext_update_version(cursor, ext, version) + changed = ext_update_version(module.check_mode, cursor, ext, version) else: - module.fail_json(msg="Passed version '%s' has no valid update path from " - "the currently installed version '%s' or " - "the passed version is not available" % (version, curr_version)) + if version == 'latest': + # No valid update path from curr_version to latest extension version + # (extension is buggy or no direct update supported) + module.fail_json(msg="Latest version '%s' has no valid update path from " + "the currently installed version '%s'" % (real_version, curr_version)) + else: + module.fail_json(msg="Passed version '%s' has no valid update path from " + "the currently installed version '%s' or " + "the passed version is not available" % (version, curr_version)) + # If extension is not installed, install passed version else: - # If not requesting latest version and passed version not available - if version != 'latest' and version not in available_versions: - module.fail_json(msg="Passed version '%s' is not available" % version) - # Else install the passed version when available - else: - if module.check_mode: - changed = True + # If passed version not available fail + if real_version not in available_versions: + if version == 'latest': + # Latest version not available (extension is buggy) + module.fail_json(msg="Latest version '%s' is not available" % real_version) else: - changed = ext_create(cursor, ext, schema, cascade, version) + module.fail_json(msg="Passed version '%s' is not available" % real_version) + # Else install the passed version + else: + changed = ext_create(module.check_mode, cursor, ext, schema, cascade, version) # If version is not passed: else: @@ -445,30 +501,51 @@ def main(): else: # If the ext doesn't exist and is available: if available_versions: - if module.check_mode: - changed = True - else: - changed = ext_create(cursor, ext, schema, cascade, 'latest') - + # 'latest' version installed by default if version not passed + changed = ext_create(module.check_mode, cursor, ext, schema, cascade, 'latest') # If the ext doesn't exist and is not available: else: module.fail_json(msg="Extension %s is not available" % ext) + if comment is not None: + current_comment = get_comment(cursor, 'extension', ext) + # For the resetting comment feature (comment: '') to work correctly + current_comment = current_comment if current_comment is not None else '' + if comment != current_comment: + changed = set_comment(cursor, comment, 'extension', ext, module.check_mode, executed_queries) + elif state == "absent": if curr_version: - if module.check_mode: - changed = True - else: - changed = ext_delete(cursor, ext, curr_version, cascade) + changed = ext_delete(module.check_mode, cursor, ext, cascade) else: changed = False + # Get extension info again: + new_version, new_default_version, new_available_versions = ext_get_versions(cursor, ext) + + # Parse previous and current version for module output + out_prev_version = curr_version if curr_version else '' + if module.check_mode and changed: + if state == "present": + out_version = real_version + elif state == "absent": + out_version = '' + else: + out_version = new_version if new_version else '' + except Exception as e: db_connection.close() module.fail_json(msg="Management of PostgreSQL extension failed: %s" % to_native(e), exception=traceback.format_exc()) db_connection.close() - module.exit_json(changed=changed, db=module.params["db"], ext=ext, queries=executed_queries) + module.exit_json( + changed=changed, + db=module.params["db"], + ext=ext, + prev_version=out_prev_version, + version=out_version, + queries=executed_queries, + ) if __name__ == '__main__': diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py index 2ffb33a8c..7eb35de40 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -222,59 +223,53 @@ EXAMPLES = r''' RETURN = r''' name: description: Index name. - returned: always + returned: success type: str sample: 'foo_idx' state: description: Index state. - returned: always + returned: success type: str sample: 'present' schema: description: Schema where index exists. - returned: always + returned: success type: str sample: 'public' tablespace: description: Tablespace where index exists. - returned: always + returned: success type: str sample: 'ssd' query: description: Query that was tried to be executed. - returned: always + returned: success type: str sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)' storage_params: description: Index storage parameters. - returned: always + returned: success type: list sample: [ "fillfactor=90" ] valid: description: Index validity. - returned: always + returned: success type: bool sample: true ''' -try: - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.postgresql.plugins.module_utils.database import check_input +from ansible_collections.community.postgresql.plugins.module_utils.database import \ + check_input from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, - exec_sql, ensure_required_libs, + exec_sql, get_conn_params, + pg_cursor_args, postgres_common_argument_spec, ) - VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN') @@ -294,13 +289,13 @@ class Index(object): Args: module (AnsibleModule) -- object of AnsibleModule class - cursor (cursor) -- cursor object of psycopg2 library + cursor (cursor) -- cursor object of psycopg library schema (str) -- name of the index schema name (str) -- name of the index Attrs: module (AnsibleModule) -- object of AnsibleModule class - cursor (cursor) -- cursor object of psycopg2 library + cursor (cursor) -- cursor object of psycopg library schema (str) -- name of the index schema name (str) -- name of the index exists (bool) -- flag the index exists in the DB or not @@ -357,11 +352,11 @@ class Index(object): self.info = dict( name=self.name, state='present', - schema=res[0][0], - tblname=res[0][1], - tblspace=res[0][2] if res[0][2] else '', - valid=res[0][3], - storage_params=res[0][4] if res[0][4] else [], + schema=res[0]["schemaname"], + tblname=res[0]["tablename"], + tblspace=res[0]["tablespace"] if res[0]["tablespace"] else '', + valid=res[0]["indisvalid"], + storage_params=res[0]["reloptions"] if res[0]["reloptions"] else [], ) return True @@ -520,11 +515,11 @@ def main(): if cascade and state != 'absent': module.fail_json(msg="cascade parameter used only with state=absent") - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) conn_params = get_conn_params(module, module.params) db_connection, dummy = connect_to_db(module, conn_params, autocommit=True) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) # Set defaults: changed = False diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_info.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_info.py index 55bb6ebd8..f21bfcdc1 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_info.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_info.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -109,18 +110,18 @@ EXAMPLES = r''' RETURN = r''' version: description: Database server version U(https://www.postgresql.org/support/versioning/). - returned: always + returned: success type: dict sample: { "version": { "major": 10, "minor": 6 } } contains: major: description: Major server version. - returned: always + returned: success type: int sample: 11 minor: description: Minor server version. - returned: always + returned: success type: int sample: 1 patch: @@ -131,24 +132,24 @@ version: version_added: '1.2.0' full: description: Full server version. - returned: always + returned: success type: str sample: '13.2' version_added: '1.2.0' raw: description: Full output returned by ``SELECT version()``. - returned: always + returned: success type: str sample: 'PostgreSQL 13.2 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 10.2.1 20201125 (Red Hat 10.2.1-9), 64-bit' version_added: '1.2.0' in_recovery: description: Indicates if the service is in recovery mode or not. - returned: always + returned: success type: bool sample: false databases: description: Information about databases. - returned: always + returned: success type: dict sample: - { "postgres": { "access_priv": "", "collate": "en_US.UTF-8", @@ -156,48 +157,60 @@ databases: contains: database_name: description: Database name. - returned: always + returned: success type: dict sample: template1 contains: access_priv: description: Database access privileges. - returned: always + returned: success type: str sample: "=c/postgres_npostgres=CTc/postgres" collate: description: - Database collation U(https://www.postgresql.org/docs/current/collation.html). - returned: always + returned: success type: str sample: en_US.UTF-8 ctype: description: - Database LC_CTYPE U(https://www.postgresql.org/docs/current/multibyte.html). - returned: always + returned: success + type: str + sample: en_US.UTF-8 + icu_locale: + description: + - Database ICU_LOCALE U(https://www.postgresql.org/docs/current/locale.html#ICU-LOCALES). + returned: success + type: str + sample: en_US.UTF-8 + locale_provider: + description: + - Database LOCALE_PROVIDER U(https://www.postgresql.org/docs/current/locale.html#LOCALE-PROVIDERS). + returned: success type: str sample: en_US.UTF-8 encoding: description: - Database encoding U(https://www.postgresql.org/docs/current/multibyte.html). - returned: always + returned: success type: str sample: UTF8 owner: description: - Database owner U(https://www.postgresql.org/docs/current/sql-createdatabase.html). - returned: always + returned: success type: str sample: postgres size: description: Database size in bytes. - returned: always + returned: success type: str sample: 8189415 extensions: description: - Extensions U(https://www.postgresql.org/docs/current/sql-createextension.html). - returned: always + returned: success type: dict sample: - { "plpgsql": { "description": "PL/pgSQL procedural language", @@ -210,32 +223,32 @@ databases: sample: PL/pgSQL procedural language extversion: description: Extension description. - returned: always + returned: success type: dict contains: major: description: Extension major version. - returned: always + returned: success type: int sample: 1 minor: description: Extension minor version. - returned: always + returned: success type: int sample: 0 raw: description: Extension full version. - returned: always + returned: success type: str sample: '1.0' nspname: description: Namespace where the extension is. - returned: always + returned: success type: str sample: pg_catalog languages: description: Procedural languages U(https://www.postgresql.org/docs/current/xplang.html). - returned: always + returned: success type: dict sample: { "sql": { "lanacl": "", "lanowner": "postgres" } } contains: @@ -243,32 +256,32 @@ databases: description: - Language access privileges U(https://www.postgresql.org/docs/current/catalog-pg-language.html). - returned: always + returned: success type: str sample: "{postgres=UC/postgres,=U/postgres}" lanowner: description: - Language owner U(https://www.postgresql.org/docs/current/catalog-pg-language.html). - returned: always + returned: success type: str sample: postgres namespaces: description: - Namespaces (schema) U(https://www.postgresql.org/docs/current/sql-createschema.html). - returned: always + returned: success type: dict sample: { "pg_catalog": { "nspacl": "{postgres=UC/postgres,=U/postgres}", "nspowner": "postgres" } } contains: nspacl: description: - Access privileges U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html). - returned: always + returned: success type: str sample: "{postgres=UC/postgres,=U/postgres}" nspowner: description: - Schema owner U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html). - returned: always + returned: success type: str sample: postgres publications: @@ -303,24 +316,24 @@ repl_slots: active: description: - True means that a receiver has connected to it, and it is currently reserving archives. - returned: always + returned: success type: bool sample: true database: description: Database name this slot is associated with, or null. - returned: always + returned: success type: str sample: acme plugin: description: - Base name of the shared object containing the output plugin this logical slot is using, or null for physical slots. - returned: always + returned: success type: str sample: pgoutput slot_type: description: The slot type - physical or logical. - returned: always + returned: success type: str sample: logical replications: @@ -336,7 +349,7 @@ replications: usename: description: - Name of the user logged into this WAL sender process ('usename' is a column name in pg_stat_replication view). - returned: always + returned: success type: str sample: replication_user app_name: @@ -349,30 +362,30 @@ replications: - IP address of the client connected to this WAL sender. - If this field is null, it indicates that the client is connected via a Unix socket on the server machine. - returned: always + returned: success type: str sample: 10.0.0.101 client_hostname: description: - Host name of the connected client, as reported by a reverse DNS lookup of client_addr. - This field will only be non-null for IP connections, and only when log_hostname is enabled. - returned: always + returned: success type: str sample: dbsrv1 backend_start: description: Time when this process was started, i.e., when the client connected to this WAL sender. - returned: always + returned: success type: str sample: "2019-02-03 00:14:33.908593+03" state: description: Current WAL sender state. - returned: always + returned: success type: str sample: streaming tablespaces: description: - Information about tablespaces U(https://www.postgresql.org/docs/current/catalog-pg-tablespace.html). - returned: always + returned: success type: dict sample: - { "test": { "spcacl": "{postgres=C/postgres,andreyk=C/postgres}", "spcoptions": [ "seq_page_cost=1" ], @@ -380,23 +393,23 @@ tablespaces: contains: spcacl: description: Tablespace access privileges. - returned: always + returned: success type: str sample: "{postgres=C/postgres,andreyk=C/postgres}" spcoptions: description: Tablespace-level options. - returned: always + returned: success type: list sample: [ "seq_page_cost=1" ] spcowner: description: Owner of the tablespace. - returned: always + returned: success type: str sample: test_user roles: description: - Information about roles U(https://www.postgresql.org/docs/current/user-manag.html). - returned: always + returned: success type: dict sample: - { "test_role": { "canlogin": true, "member_of": [ "user_ro" ], "superuser": false, @@ -404,37 +417,37 @@ roles: contains: canlogin: description: Login privilege U(https://www.postgresql.org/docs/current/role-attributes.html). - returned: always + returned: success type: bool sample: true member_of: description: - Role membership U(https://www.postgresql.org/docs/current/role-membership.html). - returned: always + returned: success type: list sample: [ "read_only_users" ] superuser: description: User is a superuser or not. - returned: always + returned: success type: bool sample: false valid_until: description: - Password expiration date U(https://www.postgresql.org/docs/current/sql-alterrole.html). - returned: always + returned: success type: str sample: "9999-12-31T23:59:59.999999+00:00" pending_restart_settings: description: - List of settings that are pending restart to be set. - returned: always + returned: success type: list sample: [ "shared_buffers" ] settings: description: - Information about run-time server parameters U(https://www.postgresql.org/docs/current/view-pg-settings.html). - returned: always + returned: success type: dict sample: - { "work_mem": { "boot_val": "4096", "context": "user", "max_val": "2147483647", @@ -443,30 +456,30 @@ settings: contains: setting: description: Current value of the parameter. - returned: always + returned: success type: str sample: 49152 unit: description: Implicit unit of the parameter. - returned: always + returned: success type: str sample: kB boot_val: description: - Parameter value assumed at server startup if the parameter is not otherwise set. - returned: always + returned: success type: str sample: 4096 min_val: description: - Minimum allowed value of the parameter (null for non-numeric values). - returned: always + returned: success type: str sample: 64 max_val: description: - Maximum allowed value of the parameter (null for non-numeric values). - returned: always + returned: success type: str sample: 2147483647 sourcefile: @@ -475,20 +488,20 @@ settings: - Null for values set from sources other than configuration files, or when examined by a user who is neither a superuser or a member of pg_read_all_settings. - Helpful when using include directives in configuration files. - returned: always + returned: success type: str sample: /var/lib/pgsql/10/data/postgresql.auto.conf context: description: - Context required to set the parameter's value. - For more information see U(https://www.postgresql.org/docs/current/view-pg-settings.html). - returned: always + returned: success type: str sample: user vartype: description: - Parameter type (bool, enum, integer, real, or string). - returned: always + returned: success type: str sample: integer val_in_bytes: @@ -500,14 +513,14 @@ settings: pretty_val: description: - Value presented in the pretty form. - returned: always + returned: success type: str sample: 2MB pending_restart: description: - True if the value has been changed in the configuration file but needs a restart; or false otherwise. - Returns only if C(settings) is passed. - returned: always + returned: success type: bool sample: false ''' @@ -515,31 +528,25 @@ settings: import re from fnmatch import fnmatch -try: - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - +from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.postgresql.plugins.module_utils.database import ( - check_input, -) +from ansible.module_utils.six import iteritems +from ansible_collections.community.postgresql.plugins.module_utils.database import \ + check_input from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, ensure_required_libs, get_conn_params, + get_server_version, + pg_cursor_args, postgres_common_argument_spec, ) -from ansible.module_utils.six import iteritems -from ansible.module_utils._text import to_native - # =========================================== # PostgreSQL module specific support methods. # + class PgDbConn(object): """Auxiliary class for working with PostgreSQL connection objects. @@ -558,14 +565,14 @@ class PgDbConn(object): Note: connection parameters are passed by self.module object. """ - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(self.module) conn_params = get_conn_params(self.module, self.module.params, warn_db_default=False) self.db_conn, dummy = connect_to_db(self.module, conn_params, fail_on_conn=fail_on_conn) if self.db_conn is None: # only happens if fail_on_conn is False and there actually was an issue connecting to the DB return None - return self.db_conn.cursor(cursor_factory=DictCursor) + return self.db_conn.cursor(**pg_cursor_args) def reconnect(self, dbname): """Reconnect to another database and return a PostgreSQL cursor object. @@ -588,7 +595,7 @@ class PgClusterInfo(object): Arguments: module (AnsibleModule): Object of AnsibleModule class. - db_conn_obj (psycopg2.connect): PostgreSQL connection object. + db_conn_obj (psycopg.connect): PostgreSQL connection object. """ def __init__(self, module, db_conn_obj): @@ -696,7 +703,7 @@ class PgClusterInfo(object): "WHERE table_schema = 'pg_catalog' " "AND table_name = 'pg_subscription'") columns_result = self.__exec_sql(columns_sub_table) - columns = ", ".join(["s.%s" % column[0] for column in columns_result]) + columns = ", ".join(["s.%s" % column["column_name"] for column in columns_result]) query = ("SELECT %s, r.rolname AS ownername, d.datname AS dbname " "FROM pg_catalog.pg_subscription s " @@ -736,22 +743,22 @@ class PgClusterInfo(object): "AND column_name = 'spcoptions'") if not opt: - query = ("SELECT s.spcname, pg_catalog.pg_get_userbyid(s.spcowner) as rolname, s.spcacl " + query = ("SELECT s.spcname, pg_catalog.pg_get_userbyid(s.spcowner) as rolname, s.spcacl::text " "FROM pg_tablespace AS s ") else: - query = ("SELECT s.spcname, pg_catalog.pg_get_userbyid(s.spcowner) as rolname, s.spcacl, s.spcoptions " + query = ("SELECT s.spcname, pg_catalog.pg_get_userbyid(s.spcowner) as rolname, s.spcacl::text, s.spcoptions " "FROM pg_tablespace AS s ") res = self.__exec_sql(query) ts_dict = {} for i in res: - ts_name = i[0] + ts_name = i["spcname"] ts_info = dict( - spcowner=i[1], - spcacl=i[2] if i[2] else '', + spcowner=i["rolname"], + spcacl=i["spcacl"] if i["spcacl"] else '', ) if opt: - ts_info['spcoptions'] = i[3] if i[3] else [] + ts_info["spcoptions"] = i["spcoptions"] if i["spcoptions"] else [] ts_dict[ts_name] = ts_info @@ -763,7 +770,7 @@ class PgClusterInfo(object): res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM " "information_schema.tables " "WHERE table_name = 'pg_extension')") - if not res[0][0]: + if not res[0]["exists"]: return True query = ("SELECT e.extname, e.extversion, n.nspname, c.description " @@ -776,12 +783,12 @@ class PgClusterInfo(object): res = self.__exec_sql(query) ext_dict = {} for i in res: - ext_ver_raw = i[1] + ext_ver_raw = i["extversion"] - if re.search(r'^([0-9]+([\-]*[0-9]+)?\.)*[0-9]+([\-]*[0-9]+)?$', i[1]) is None: + if re.search(r'^([0-9]+([\-]*[0-9]+)?\.)*[0-9]+([\-]*[0-9]+)?$', i["extversion"]) is None: ext_ver = [None, None] else: - ext_ver = i[1].split('.') + ext_ver = i["extversion"].split('.') if re.search(r'-', ext_ver[0]) is not None: ext_ver = ext_ver[0].split('-') else: @@ -791,14 +798,14 @@ class PgClusterInfo(object): except IndexError: ext_ver.append(None) - ext_dict[i[0]] = dict( + ext_dict[i["extname"]] = dict( extversion=dict( major=int(ext_ver[0]) if ext_ver[0] else None, minor=int(ext_ver[1]) if ext_ver[1] else None, raw=ext_ver_raw, ), - nspname=i[2], - description=i[3], + nspname=i["nspname"], + description=i["description"], ) return ext_dict @@ -817,11 +824,11 @@ class PgClusterInfo(object): res = self.__exec_sql(query) rol_dict = {} for i in res: - rol_dict[i[0]] = dict( - superuser=i[1], - canlogin=i[2], - valid_until=i[3] if i[3] else '', - member_of=i[4] if i[4] else [], + rol_dict[i["rolname"]] = dict( + superuser=i["rolsuper"], + canlogin=i["rolcanlogin"], + valid_until=i["rolvaliduntil"] if i["rolvaliduntil"] else '', + member_of=i["memberof"] if i["memberof"] else [], ) self.pg_info["roles"] = rol_dict @@ -832,7 +839,7 @@ class PgClusterInfo(object): res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM " "information_schema.tables " "WHERE table_name = 'pg_replication_slots')") - if not res[0][0]: + if not res[0]["exists"]: return True query = ("SELECT slot_name, plugin, slot_type, database, " @@ -845,11 +852,11 @@ class PgClusterInfo(object): rslot_dict = {} for i in res: - rslot_dict[i[0]] = dict( - plugin=i[1], - slot_type=i[2], - database=i[3], - active=i[4], + rslot_dict[i["slot_name"]] = dict( + plugin=i["plugin"], + slot_type=i["slot_type"], + database=i["database"], + active=i["active"], ) self.pg_info["repl_slots"] = rslot_dict @@ -874,9 +881,9 @@ class PgClusterInfo(object): set_dict = {} for i in res: val_in_bytes = None - setting = i[1] - if i[2]: - unit = i[2] + setting = i["setting"] + if i["unit"]: + unit = i["unit"] else: unit = '' @@ -892,22 +899,22 @@ class PgClusterInfo(object): if val_in_bytes is not None and val_in_bytes < 0: val_in_bytes = 0 - setting_name = i[0] + setting_name = i["name"] pretty_val = self.__get_pretty_val(setting_name) pending_restart = None if pend_rest_col_exists: - pending_restart = i[9] + pending_restart = i["pending_restart"] set_dict[setting_name] = dict( setting=setting, unit=unit, - context=i[3], - vartype=i[4], - boot_val=i[5] if i[5] else '', - min_val=i[6] if i[6] else '', - max_val=i[7] if i[7] else '', - sourcefile=i[8] if i[8] else '', + context=i["context"], + vartype=i["vartype"], + boot_val=i["boot_val"] if i["boot_val"] else '', + min_val=i["min_val"] if i["min_val"] else '', + max_val=i["max_val"] if i["max_val"] else '', + sourcefile=i["sourcefile"] if i["sourcefile"] else '', pretty_val=pretty_val, ) if val_in_bytes is not None: @@ -926,10 +933,10 @@ class PgClusterInfo(object): res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM " "information_schema.tables " "WHERE table_name = 'pg_stat_replication')") - if not res[0][0]: + if not res[0]["exists"]: return True - query = ("SELECT r.pid, pg_catalog.pg_get_userbyid(r.usesysid) AS rolname, r.application_name, r.client_addr, " + query = ("SELECT r.pid, pg_catalog.pg_get_userbyid(r.usesysid) AS rolname, r.application_name, r.client_addr::text, " "r.client_hostname, r.backend_start::text, r.state " "FROM pg_stat_replication AS r ") res = self.__exec_sql(query) @@ -940,42 +947,42 @@ class PgClusterInfo(object): repl_dict = {} for i in res: - repl_dict[i[0]] = dict( - usename=i[1], - app_name=i[2] if i[2] else '', - client_addr=i[3], - client_hostname=i[4] if i[4] else '', - backend_start=i[5], - state=i[6], + repl_dict[i["pid"]] = dict( + usename=i["rolname"], + app_name=i["application_name"] if i["application_name"] else '', + client_addr=i["client_addr"], + client_hostname=i["client_hostname"] if i["client_hostname"] else '', + backend_start=i["backend_start"], + state=i["state"], ) self.pg_info["replications"] = repl_dict def get_lang_info(self): """Get information about current supported languages.""" - query = ("SELECT l.lanname, pg_catalog.pg_get_userbyid(l.lanowner) AS rolname, l.lanacl " + query = ("SELECT l.lanname, pg_catalog.pg_get_userbyid(l.lanowner) AS rolname, l.lanacl::text " "FROM pg_language AS l ") res = self.__exec_sql(query) lang_dict = {} for i in res: - lang_dict[i[0]] = dict( - lanowner=i[1], - lanacl=i[2] if i[2] else '', + lang_dict[i["lanname"]] = dict( + lanowner=i["rolname"], + lanacl=i["lanacl"] if i["lanacl"] else '', ) return lang_dict def get_namespaces(self): """Get information about namespaces.""" - query = ("SELECT n.nspname, pg_catalog.pg_get_userbyid(n.nspowner) AS rolname, n.nspacl " + query = ("SELECT n.nspname, pg_catalog.pg_get_userbyid(n.nspowner) AS rolname, n.nspacl::text " "FROM pg_catalog.pg_namespace AS n ") res = self.__exec_sql(query) nsp_dict = {} for i in res: - nsp_dict[i[0]] = dict( - nspowner=i[1], - nspacl=i[2] if i[2] else '', + nsp_dict[i["nspname"]] = dict( + nspowner=i["rolname"], + nspacl=i["nspacl"] if i["nspacl"] else '', ) return nsp_dict @@ -983,7 +990,7 @@ class PgClusterInfo(object): def get_pg_version(self): """Get major and minor PostgreSQL server version.""" query = "SELECT version()" - raw = self.__exec_sql(query)[0][0] + raw = self.__exec_sql(query)[0]["version"] full = raw.split()[1] m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", full) @@ -1005,21 +1012,21 @@ class PgClusterInfo(object): def get_recovery_state(self): """Get if the service is in recovery mode.""" - self.pg_info["in_recovery"] = self.__exec_sql("SELECT pg_is_in_recovery()")[0][0] + self.pg_info["in_recovery"] = self.__exec_sql("SELECT pg_is_in_recovery()")[0]["pg_is_in_recovery"] def get_db_info(self): """Get information about the current database.""" # Following query returns: # Name, Owner, Encoding, Collate, Ctype, Access Priv, Size query = ("SELECT d.datname, " - "pg_catalog.pg_get_userbyid(d.datdba), " - "pg_catalog.pg_encoding_to_char(d.encoding), " + "pg_catalog.pg_get_userbyid(d.datdba) AS username, " + "pg_catalog.pg_encoding_to_char(d.encoding) AS encoding, " "d.datcollate, " "d.datctype, " - "pg_catalog.array_to_string(d.datacl, E'\n'), " + "pg_catalog.array_to_string(d.datacl, E'\n') aclstring, " "CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') " "THEN pg_catalog.pg_database_size(d.datname)::text " - "ELSE 'No Access' END, " + "ELSE 'No Access' END as dbsize, " "t.spcname " "FROM pg_catalog.pg_database AS d " "JOIN pg_catalog.pg_tablespace t ON d.dattablespace = t.oid " @@ -1029,16 +1036,16 @@ class PgClusterInfo(object): db_dict = {} for i in res: - db_dict[i[0]] = dict( - owner=i[1], - encoding=i[2], - collate=i[3], - ctype=i[4], - access_priv=i[5] if i[5] else '', - size=i[6], + db_dict[i["datname"]] = dict( + owner=i["username"], + encoding=i["encoding"], + collate=i["datcollate"], + ctype=i["datctype"], + access_priv=i["aclstring"] if i["aclstring"] else '', + size=i["dbsize"], ) - if self.cursor.connection.server_version >= 100000: + if get_server_version(self.cursor.connection) >= 100000: subscr_info = self.get_subscr_info() for datname in db_dict: @@ -1053,7 +1060,7 @@ class PgClusterInfo(object): db_dict[datname]['namespaces'] = self.get_namespaces() db_dict[datname]['extensions'] = self.get_ext_info() db_dict[datname]['languages'] = self.get_lang_info() - if self.cursor.connection.server_version >= 100000: + if get_server_version(self.cursor.connection) >= 100000: db_dict[datname]['publications'] = self.get_pub_info() db_dict[datname]['subscriptions'] = subscr_info.get(datname, {}) @@ -1061,7 +1068,7 @@ class PgClusterInfo(object): def __get_pretty_val(self, setting): """Get setting's value represented by SHOW command.""" - return self.__exec_sql('SHOW "%s"' % setting)[0][0] + return self.__exec_sql('SHOW "%s"' % setting)[0][setting] def __exec_sql(self, query): """Execute SQL and return the result.""" diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py index 3d696dba6..2e9ac5b1b 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type @@ -12,6 +13,10 @@ DOCUMENTATION = r''' --- module: postgresql_lang short_description: Adds, removes or changes procedural languages with a PostgreSQL database +deprecated: + removed_in: "4.0.0" + why: As of PostgreSQL 9.1, most procedural languages have been made into extensions. + alternative: Use M(community.postgresql.postgresql_ext) instead. description: - Adds, removes or changes procedural languages with a PostgreSQL database. - This module allows you to add a language, remote a language or change the trust @@ -163,17 +168,21 @@ EXAMPLES = r''' RETURN = r''' queries: description: List of executed queries. - returned: always + returned: success type: list sample: ['CREATE LANGUAGE "acme"'] ''' +# WARNING - The postgresql_lang module has been deprecated and will be removed in community.postgresql 4.0.0. + from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.postgresql.plugins.module_utils.database import check_input +from ansible_collections.community.postgresql.plugins.module_utils.database import \ + check_input from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, ensure_required_libs, get_conn_params, + pg_cursor_args, postgres_common_argument_spec, ) @@ -191,7 +200,7 @@ def lang_istrusted(cursor, lang): """Checks if language is trusted for db""" query = "SELECT lanpltrusted FROM pg_language WHERE lanname = %(lang)s" cursor.execute(query, {'lang': lang}) - return cursor.fetchone()[0] + return cursor.fetchone()["lanpltrusted"] def lang_altertrust(cursor, lang, trust): @@ -235,21 +244,21 @@ def get_lang_owner(cursor, lang): """Get language owner. Args: - cursor (cursor): psycopg2 cursor object. + cursor (cursor): psycopg cursor object. lang (str): language name. """ query = ("SELECT r.rolname FROM pg_language l " "JOIN pg_roles r ON l.lanowner = r.oid " "WHERE l.lanname = %(lang)s") cursor.execute(query, {'lang': lang}) - return cursor.fetchone()[0] + return cursor.fetchone()["rolname"] def set_lang_owner(cursor, lang, owner): """Set language owner. Args: - cursor (cursor): psycopg2 cursor object. + cursor (cursor): psycopg cursor object. lang (str): language name. owner (str): name of new owner. """ @@ -294,11 +303,11 @@ def main(): # Check input for potentially dangerous elements: check_input(module, lang, session_role, owner) - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) conn_params = get_conn_params(module, module.params) db_connection, dummy = connect_to_db(module, conn_params, autocommit=False) - cursor = db_connection.cursor() + cursor = db_connection.cursor(**pg_cursor_args) changed = False kw = {'db': db, 'lang': lang, 'trust': trust} diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py index 68d7db2ef..e3b72d20a 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py @@ -4,7 +4,8 @@ # Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -143,7 +144,7 @@ EXAMPLES = r''' RETURN = r''' queries: description: List of executed queries. - returned: always + returned: success type: str sample: [ "GRANT \"user_ro\" TO \"alice\"" ] granted: @@ -158,33 +159,28 @@ revoked: sample: { "ro_group": [ "alice", "bob" ] } state: description: Membership state that tried to be set. - returned: always + returned: success type: str sample: "present" ''' -try: - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.postgresql.plugins.module_utils.database import check_input +from ansible_collections.community.postgresql.plugins.module_utils.database import \ + check_input from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + PgMembership, connect_to_db, ensure_required_libs, get_conn_params, - PgMembership, + pg_cursor_args, postgres_common_argument_spec, ) - # =========================================== # Module execution. # + def main(): argument_spec = postgres_common_argument_spec() argument_spec.update( @@ -212,11 +208,11 @@ def main(): # Check input for potentially dangerous elements: check_input(module, groups, target_roles, session_role) - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) conn_params = get_conn_params(module, module.params, warn_db_default=False) db_connection, dummy = connect_to_db(module, conn_params, autocommit=False) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) ############## # Create the object and do main job: diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py index 934e3b957..35e960fac 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py @@ -4,7 +4,8 @@ # Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -30,8 +31,14 @@ options: description: - Type of a database object. - Mutually exclusive with I(reassign_owned_by). + - I(obj_type=matview) is available since PostgreSQL 9.3. + - I(obj_type=event_trigger), I(obj_type=procedure), I(obj_type=publication), + I(obj_type=statistics), and I(obj_type=routine) are available since PostgreSQL 11. type: str - choices: [ database, function, matview, sequence, schema, table, tablespace, view ] + choices: [ aggregate, collation, conversion, database, domain, event_trigger, foreign_data_wrapper, + foreign_table, function, language, large_object, matview, procedure, publication, routine, + schema, sequence, server, statistics, table, tablespace, text_search_configuration, + text_search_dictionary, type, view ] aliases: - type reassign_owned_by: @@ -74,10 +81,20 @@ options: type: bool default: true version_added: '0.2.0' + +notes: +- Function Overloading is not supported so when I(obj_type) is C(aggregate), C(function), C(routine), or C(procedure) + I(obj_name) is considered the only object of same type with this name. +- Despite Function Overloading is not supported, when I(obj_type=aggregate) I(obj_name) must contain also aggregate + signature because it is required by SQL syntax. +- I(new_owner) must be a superuser if I(obj_type) is C(event_type) or C(foreign_data_wrapper). +- To manage subscriptions ownership use C(community.postgresql.postgresql_subscription) module. + seealso: - module: community.postgresql.postgresql_user - module: community.postgresql.postgresql_privs - module: community.postgresql.postgresql_membership +- module: community.postgresql.postgresql_subscription - name: PostgreSQL REASSIGN OWNED command reference description: Complete reference of the PostgreSQL REASSIGN OWNED command documentation. link: https://www.postgresql.org/docs/current/sql-reassign-owned.html @@ -88,6 +105,7 @@ attributes: author: - Andrew Klychkov (@Andersson007) +- Daniele Giudice (@RealGreenDragon) extends_documentation_fragment: - community.postgresql.postgres @@ -143,18 +161,11 @@ EXAMPLES = r''' RETURN = r''' queries: description: List of executed queries. - returned: always + returned: success type: str sample: [ 'REASSIGN OWNED BY "bob" TO "alice"' ] ''' -try: - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.postgresql.plugins.module_utils.database import ( check_input, @@ -162,12 +173,19 @@ from ansible_collections.community.postgresql.plugins.module_utils.database impo ) from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, - exec_sql, ensure_required_libs, + exec_sql, get_conn_params, + get_server_version, + pg_cursor_args, postgres_common_argument_spec, ) +VALID_OBJ_TYPES = ('aggregate', 'collation', 'conversion', 'database', 'domain', 'event_trigger', 'foreign_data_wrapper', + 'foreign_table', 'function', 'language', 'large_object', 'matview', 'procedure', 'publication', + 'routine', 'schema', 'sequence', 'server', 'statistics', 'table', 'tablespace', 'text_search_configuration', + 'text_search_dictionary', 'type', 'view') + class PgOwnership(object): @@ -175,7 +193,7 @@ class PgOwnership(object): Arguments: module (AnsibleModule): Object of Ansible module class. - cursor (psycopg2.connect.cursor): Cursor object for interaction with the database. + cursor (psycopg.connect.cursor): Cursor object for interaction with the database. role (str): Role name to set as a new owner of objects. Important: @@ -187,9 +205,10 @@ class PgOwnership(object): That's all. """ - def __init__(self, module, cursor, role): + def __init__(self, module, cursor, pg_version, role): self.module = module self.cursor = cursor + self.pg_version = pg_version self.check_role_exists(role) self.role = role self.changed = False @@ -285,6 +304,57 @@ class PgOwnership(object): elif obj_type == 'matview': self.__set_mat_view_owner() + elif obj_type == 'procedure': + self.__set_procedure_owner() + + elif obj_type == 'type': + self.__set_type_owner() + + elif obj_type == 'aggregate': + self.__set_aggregate_owner() + + elif obj_type == 'routine': + self.__set_routine_owner() + + elif obj_type == 'language': + self.__set_language_owner() + + elif obj_type == 'domain': + self.__set_domain_owner() + + elif obj_type == 'collation': + self.__set_collation_owner() + + elif obj_type == 'conversion': + self.__set_conversion_owner() + + elif obj_type == 'text_search_configuration': + self.__set_text_search_configuration_owner() + + elif obj_type == 'text_search_dictionary': + self.__set_text_search_dictionary_owner() + + elif obj_type == 'foreign_data_wrapper': + self.__set_foreign_data_wrapper_owner() + + elif obj_type == 'server': + self.__set_server_owner() + + elif obj_type == 'foreign_table': + self.__set_foreign_table_owner() + + elif obj_type == 'event_trigger': + self.__set_event_trigger_owner() + + elif obj_type == 'large_object': + self.__set_large_object_owner() + + elif obj_type == 'publication': + self.__set_publication_owner() + + elif obj_type == 'statistics': + self.__set_statistics_owner() + def __is_owner(self): """Return True if self.role is the current object owner.""" if self.obj_type == 'table': @@ -298,7 +368,11 @@ class PgOwnership(object): "WHERE d.datname = %(obj_name)s " "AND r.rolname = %(role)s") - elif self.obj_type == 'function': + elif self.obj_type in ('aggregate', 'function', 'routine', 'procedure'): + if self.obj_type == 'routine' and self.pg_version < 110000: + self.module.fail_json(msg="PostgreSQL version must be >= 11 for obj_type=routine.") + if self.obj_type == 'procedure' and self.pg_version < 110000: + self.module.fail_json(msg="PostgreSQL version must be >= 11 for obj_type=procedure.") query = ("SELECT 1 FROM pg_proc AS f " "JOIN pg_roles AS r ON f.proowner = r.oid " "WHERE f.proname = %(obj_name)s " @@ -327,11 +401,101 @@ class PgOwnership(object): "AND viewowner = %(role)s") elif self.obj_type == 'matview': + if self.pg_version < 90300: + self.module.fail_json(msg="PostgreSQL version must be >= 9.3 for obj_type=matview.") query = ("SELECT 1 FROM pg_matviews " "WHERE matviewname = %(obj_name)s " "AND matviewowner = %(role)s") - query_params = {'obj_name': self.obj_name, 'role': self.role} + elif self.obj_type in ('domain', 'type'): + query = ("SELECT 1 FROM pg_type AS t " + "JOIN pg_roles AS r ON t.typowner = r.oid " + "WHERE t.typname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'language': + query = ("SELECT 1 FROM pg_language AS l " + "JOIN pg_roles AS r ON l.lanowner = r.oid " + "WHERE l.lanname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'collation': + query = ("SELECT 1 FROM pg_collation AS c " + "JOIN pg_roles AS r ON c.collowner = r.oid " + "WHERE c.collname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'conversion': + query = ("SELECT 1 FROM pg_conversion AS c " + "JOIN pg_roles AS r ON c.conowner = r.oid " + "WHERE c.conname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'text_search_configuration': + query = ("SELECT 1 FROM pg_ts_config AS t " + "JOIN pg_roles AS r ON t.cfgowner = r.oid " + "WHERE t.cfgname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'text_search_dictionary': + query = ("SELECT 1 FROM pg_ts_dict AS t " + "JOIN pg_roles AS r ON t.dictowner = r.oid " + "WHERE t.dictname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'foreign_data_wrapper': + query = ("SELECT 1 FROM pg_foreign_data_wrapper AS f " + "JOIN pg_roles AS r ON f.fdwowner = r.oid " + "WHERE f.fdwname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'server': + query = ("SELECT 1 FROM pg_foreign_server AS f " + "JOIN pg_roles AS r ON f.srvowner = r.oid " + "WHERE f.srvname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'foreign_table': + query = ("SELECT 1 FROM pg_class AS c " + "JOIN pg_roles AS r ON c.relowner = r.oid " + "WHERE c.relkind = 'f' AND c.relname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'event_trigger': + if self.pg_version < 110000: + self.module.fail_json(msg="PostgreSQL version must be >= 11 for obj_type=event_trigger.") + query = ("SELECT 1 FROM pg_event_trigger AS e " + "JOIN pg_roles AS r ON e.evtowner = r.oid " + "WHERE e.evtname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'large_object': + query = ("SELECT 1 FROM pg_largeobject_metadata AS l " + "JOIN pg_roles AS r ON l.lomowner = r.oid " + "WHERE l.oid = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'publication': + if self.pg_version < 110000: + self.module.fail_json(msg="PostgreSQL version must be >= 11 for obj_type=publication.") + query = ("SELECT 1 FROM pg_publication AS p " + "JOIN pg_roles AS r ON p.pubowner = r.oid " + "WHERE p.pubname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'statistics': + if self.pg_version < 110000: + self.module.fail_json(msg="PostgreSQL version must be >= 11 for obj_type=statistics.") + query = ("SELECT 1 FROM pg_statistic_ext AS s " + "JOIN pg_roles AS r ON s.stxowner = r.oid " + "WHERE s.stxname = %(obj_name)s " + "AND r.rolname = %(role)s") + + if self.obj_type in ('function', 'aggregate', 'procedure', 'routine'): + query_params = {'obj_name': self.obj_name.split('(')[0], 'role': self.role} + else: + query_params = {'obj_name': self.obj_name, 'role': self.role} + return exec_sql(self, query, query_params, add_to_executed=False) def __set_db_owner(self): @@ -346,7 +510,7 @@ class PgOwnership(object): def __set_seq_owner(self): """Set the sequence owner.""" - query = 'ALTER SEQUENCE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + query = 'ALTER SEQUENCE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'sequence'), self.role) self.changed = exec_sql(self, query, return_bool=True) @@ -375,10 +539,121 @@ class PgOwnership(object): def __set_mat_view_owner(self): """Set the materialized view owner.""" + if self.pg_version < 90300: + self.module.fail_json(msg="PostgreSQL version must be >= 9.3 for obj_type=matview.") + query = 'ALTER MATERIALIZED VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), self.role) self.changed = exec_sql(self, query, return_bool=True) + def __set_procedure_owner(self): + """Set the procedure owner.""" + if self.pg_version < 110000: + self.module.fail_json(msg="PostgreSQL version must be >= 11 for obj_type=procedure.") + + query = 'ALTER PROCEDURE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_type_owner(self): + """Set the type owner.""" + query = 'ALTER TYPE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_aggregate_owner(self): + """Set the aggregate owner.""" + query = 'ALTER AGGREGATE %s OWNER TO "%s"' % (self.obj_name, self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_routine_owner(self): + """Set the routine owner.""" + if self.pg_version < 110000: + self.module.fail_json(msg="PostgreSQL version must be >= 11 for obj_type=routine.") + query = 'ALTER ROUTINE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_language_owner(self): + """Set the language owner.""" + query = 'ALTER LANGUAGE %s OWNER TO "%s"' % (self.obj_name, self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_domain_owner(self): + """Set the domain owner.""" + query = 'ALTER DOMAIN %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_collation_owner(self): + """Set the collation owner.""" + query = 'ALTER COLLATION %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_conversion_owner(self): + """Set the conversion owner.""" + query = 'ALTER CONVERSION %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_text_search_configuration_owner(self): + """Set the text search configuration owner.""" + query = 'ALTER TEXT SEARCH CONFIGURATION %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_text_search_dictionary_owner(self): + """Set the text search dictionary owner.""" + query = 'ALTER TEXT SEARCH DICTIONARY %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_foreign_data_wrapper_owner(self): + """Set the foreign data wrapper owner.""" + query = 'ALTER FOREIGN DATA WRAPPER %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_server_owner(self): + """Set the server owner.""" + query = 'ALTER SERVER %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_foreign_table_owner(self): + """Set the foreign table owner.""" + query = 'ALTER FOREIGN TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_event_trigger_owner(self): + """Set the event trigger owner.""" + query = 'ALTER EVENT TRIGGER %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_large_object_owner(self): + """Set the large object owner.""" + query = 'ALTER LARGE OBJECT %s OWNER TO "%s"' % (self.obj_name, self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_publication_owner(self): + """Set the publication owner.""" + if self.pg_version < 110000: + self.module.fail_json(msg="PostgreSQL version must be >= 11 for obj_type=publication.") + query = 'ALTER PUBLICATION %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'publication'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_statistics_owner(self): + """Set the statistics owner.""" + if self.pg_version < 110000: + self.module.fail_json(msg="PostgreSQL version must be >= 11 for obj_type=statistics.") + query = 'ALTER STATISTICS %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + def __role_exists(self, role): """Return True if role exists, otherwise return False.""" query_params = {'role': role} @@ -390,14 +665,12 @@ class PgOwnership(object): # Module execution. # - def main(): argument_spec = postgres_common_argument_spec() argument_spec.update( new_owner=dict(type='str', required=True), obj_name=dict(type='str'), - obj_type=dict(type='str', aliases=['type'], choices=[ - 'database', 'function', 'matview', 'sequence', 'schema', 'table', 'tablespace', 'view']), + obj_type=dict(type='str', aliases=['type'], choices=VALID_OBJ_TYPES), reassign_owned_by=dict(type='list', elements='str'), fail_on_role=dict(type='bool', default=True), db=dict(type='str', aliases=['login_db']), @@ -426,15 +699,16 @@ def main(): # Check input for potentially dangerous elements: check_input(module, new_owner, obj_name, reassign_owned_by, session_role) - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) conn_params = get_conn_params(module, module.params) db_connection, dummy = connect_to_db(module, conn_params, autocommit=False) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) + pg_version = get_server_version(db_connection) ############## # Create the object and do main job: - pg_ownership = PgOwnership(module, cursor, new_owner) + pg_ownership = PgOwnership(module, cursor, pg_version, new_owner) # if we want to change ownership: if obj_name: diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py index 002e7817d..82f5df173 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py @@ -80,16 +80,6 @@ options: description: - Additional options for the authentication I(method). type: str - order: - description: - - The entries will be written out in a specific order. - With this option you can control by which field they are ordered first, second and last. - s=source, d=databases, u=users. - This option is deprecated since 2.9 and will be removed in community.postgresql 3.0.0. - Sortorder is now hardcoded to sdu. - type: str - default: sdu - choices: [ sdu, sud, dsu, dus, usd, uds ] overwrite: description: - Remove all existing rules before adding rules. (Like I(state=absent) for all pre-existing rules.) @@ -143,7 +133,6 @@ notes: In that situation, the 'ip specific rule' will never hit, it is in the C(pg_hba) file obsolete. After the C(pg_hba) file is rewritten by the M(community.postgresql.postgresql_pg_hba) module, the ip specific rule will be sorted above the range rule. And then it will hit, which will give unexpected results. - - With the 'order' parameter you can control which field is used to sort first, next and last. seealso: - name: PostgreSQL pg_hba.conf file reference @@ -232,7 +221,7 @@ EXAMPLES = ''' RETURN = r''' msgs: description: List of textual messages what was done. - returned: always + returned: success type: list sample: "msgs": [ @@ -247,7 +236,7 @@ backup_file: sample: /tmp/pg_hba_jxobj_p pg_hba: description: List of the pg_hba rules as they are configured in the specified hba file. - returned: always + returned: success type: list sample: "pg_hba": [ @@ -271,8 +260,9 @@ try: except ImportError: IPADDRESS_IMP_ERR = traceback.format_exc() -import tempfile import shutil +import tempfile + from ansible.module_utils.basic import AnsibleModule, missing_required_lib # from ansible.module_utils.postgres import postgres_common_argument_spec @@ -280,7 +270,6 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib PG_HBA_METHODS = ["trust", "reject", "md5", "password", "gss", "sspi", "krb5", "ident", "peer", "ldap", "radius", "cert", "pam", "scram-sha-256"] PG_HBA_TYPES = ["local", "host", "hostssl", "hostnossl", "hostgssenc", "hostnogssenc"] -PG_HBA_ORDERS = ["sdu", "sud", "dsu", "dus", "usd", "uds"] PG_HBA_HDR = ['type', 'db', 'usr', 'src', 'mask', 'method', 'options'] WHITESPACES_RE = re.compile(r'\s+') @@ -322,14 +311,10 @@ class PgHba(object): pg_hba_file - the pg_hba file almost always /etc/pg_hba """ - def __init__(self, pg_hba_file=None, order="sdu", backup=False, create=False, keep_comments_at_rules=False): - if order not in PG_HBA_ORDERS: - msg = "invalid order setting {0} (should be one of '{1}')." - raise PgHbaError(msg.format(order, "', '".join(PG_HBA_ORDERS))) + def __init__(self, pg_hba_file=None, backup=False, create=False, keep_comments_at_rules=False): self.pg_hba_file = pg_hba_file self.rules = None self.comment = None - self.order = order self.backup = backup self.last_backup = None self.create = create @@ -512,7 +497,7 @@ class PgHbaRule(dict): def __init__(self, contype=None, databases=None, users=None, source=None, netmask=None, method=None, options=None, line=None, comment=None): ''' - This function can be called with a comma seperated list of databases and a comma seperated + This function can be called with a comma separated list of databases and a comma separated list of users and it will act as a generator that returns a expanded list of rules one by one. ''' @@ -535,11 +520,11 @@ class PgHbaRule(dict): # Some sanity checks for key in ['method', 'type']: if key not in self: - raise PgHbaRuleError('Missing {0} in rule {1}'.format(key, self)) + raise PgHbaRuleError('Missing {method} in rule {rule}'.format(method=key, rule=self)) if self['method'] not in PG_HBA_METHODS: - msg = "invalid method {0} (should be one of '{1}')." - raise PgHbaRuleValueError(msg.format(self['method'], "', '".join(PG_HBA_METHODS))) + msg = "invalid method {method} (should be one of '{valid_methods}')." + raise PgHbaRuleValueError(msg.format(method=self['method'], valid_methods="', '".join(PG_HBA_METHODS))) if self['type'] not in PG_HBA_TYPES: msg = "invalid connection type {0} (should be one of '{1}')." @@ -549,7 +534,7 @@ class PgHbaRule(dict): self.unset('src') self.unset('mask') elif 'src' not in self: - raise PgHbaRuleError('Missing src in rule {1}'.format(self)) + raise PgHbaRuleError('Missing src in rule {rule}'.format(rule=self)) elif '/' in self['src']: self.unset('mask') else: @@ -722,7 +707,7 @@ class PgHbaRule(dict): # hostname, let's assume only one host matches, which is # IPv4/32 or IPv6/128 (both have weight 128) return 128 - raise PgHbaValueError('Cannot deduct the source weight of this source {1}'.format(sourceobj)) + raise PgHbaValueError('Cannot deduct the source weight of this source {sourceobj}'.format(sourceobj=sourceobj)) def source_type_weight(self): """Give a weight on the type of this source. @@ -780,8 +765,6 @@ def main(): method=dict(type='str', default='md5', choices=PG_HBA_METHODS), netmask=dict(type='str'), options=dict(type='str'), - order=dict(type='str', default="sdu", choices=PG_HBA_ORDERS, - removed_in_version='3.0.0', removed_from_collection='community.postgresql'), keep_comments_at_rules=dict(type='bool', default=False), state=dict(type='str', default="present", choices=["absent", "present"]), users=dict(type='str', default='all'), @@ -803,7 +786,6 @@ def main(): else: backup = module.params['backup'] dest = module.params["dest"] - order = module.params["order"] keep_comments_at_rules = module.params["keep_comments_at_rules"] rules = module.params["rules"] rules_behavior = module.params["rules_behavior"] @@ -811,7 +793,7 @@ def main(): ret = {'msgs': []} try: - pg_hba = PgHba(dest, order, backup=backup, create=create, keep_comments_at_rules=keep_comments_at_rules) + pg_hba = PgHba(dest, backup=backup, create=create, keep_comments_at_rules=keep_comments_at_rules) except PgHbaError as error: module.fail_json(msg='Error reading file:\n{0}'.format(error)) diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py index fd104022b..e04f45376 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -76,17 +77,17 @@ EXAMPLES = r''' RETURN = r''' is_available: description: PostgreSQL server availability. - returned: always + returned: success type: bool sample: true server_version: description: PostgreSQL server version. - returned: always + returned: success type: dict sample: { major: 13, minor: 2, full: '13.2', raw: 'PostgreSQL 13.2 on x86_64-pc-linux-gnu' } conn_err_msg: description: Connection error message. - returned: always + returned: success type: str sample: '' version_added: 1.7.0 @@ -94,26 +95,18 @@ conn_err_msg: import re -try: - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.postgresql.plugins.module_utils.database import ( - check_input, -) +from ansible_collections.community.postgresql.plugins.module_utils.database import \ + check_input from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, - exec_sql, ensure_required_libs, + exec_sql, get_conn_params, + pg_cursor_args, postgres_common_argument_spec, ) - # =========================================== # PostgreSQL module specific support methods. # @@ -132,7 +125,7 @@ class PgPing(object): def get_pg_version(self): query = "SELECT version()" - raw = exec_sql(self, query, add_to_executed=False)[0][0] + raw = exec_sql(self, query, add_to_executed=False)[0]["version"] if not raw: return @@ -190,7 +183,7 @@ def main(): conn_err_msg='', ) - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) conn_params = get_conn_params(module, module.params, warn_db_default=False) db_connection, err = connect_to_db(module, conn_params, fail_on_conn=False) @@ -198,7 +191,7 @@ def main(): result['conn_err_msg'] = err if db_connection is not None: - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) # Do job: pg_ping = PgPing(module, cursor) diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py index 44aaeba3b..2c4ca7086 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py @@ -6,6 +6,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -17,9 +18,6 @@ description: - This module is basically a wrapper around most of the functionality of PostgreSQL's GRANT and REVOKE statements with detection of changes (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)). -- B(WARNING) The C(usage_on_types) option has been B(deprecated) and will be removed in - community.postgresql 3.0.0, please use the C(type) option with value C(type) to - GRANT/REVOKE permissions on types explicitly. options: database: description: @@ -48,10 +46,11 @@ options: - The C(foreign_data_wrapper) and C(foreign_server) object types are available since Ansible version 2.8. - The C(type) choice is available since Ansible version 2.10. - The C(procedure) is supported since collection version 1.3.0 and PostgreSQL 11. + - The C(parameter) is supported since collection version 3.1.0 and PostgreSQL 15. type: str default: table choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function, - group, language, table, tablespace, schema, sequence, type , procedure] + group, language, table, tablespace, schema, sequence, type, procedure, parameter ] objs: description: - Comma separated list of database objects to set privileges on. @@ -61,6 +60,7 @@ options: (This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available for C(function) and C(partition table) since Ansible 2.8). - C(procedure) is supported since PostgreSQL 11 and community.postgresql collection 1.3.0. + - C(parameter) is supported since PostgreSQL 15 and community.postgresql collection 3.1.0. - If I(type) is C(database), this parameter can be omitted, in which case privileges are set for the database specified via I(database). - If I(type) is C(function) or C(procedure), colons (":") in object names will be @@ -80,8 +80,9 @@ options: roles: description: - Comma separated list of role (user/group) names to set permissions for. - - The special value C(PUBLIC) can be provided instead to set permissions - for the implicitly defined PUBLIC group. + - Roles C(PUBLIC), C(CURRENT_ROLE), C(CURRENT_USER), C(SESSION_USER) are implicitly defined in PostgreSQL. + - C(CURRENT_USER) and C(SESSION_USER) implicit roles are supported since collection version 3.1.0 and PostgreSQL 9.5. + - C(CURRENT_ROLE) implicit role is supported since collection version 3.1.0 and PostgreSQL 14. type: str required: true aliases: @@ -128,18 +129,6 @@ options: type: bool default: true version_added: '0.2.0' - usage_on_types: - description: - - This option has been B(deprecated) and will be removed in community.postgresql 3.0.0, - please use the I(type) option with value C(type) to GRANT/REVOKE permissions on types - explicitly. - - When adding default privileges, the module always implicitly adds ``USAGE ON TYPES``. - - To avoid this behavior, set I(usage_on_types) to C(false). - - Added to save backwards compatibility. - - Used only when adding default privileges, ignored otherwise. - type: bool - default: true - version_added: '1.2.0' notes: - Parameters that accept comma separated lists (I(privs), I(objs), I(roles)) @@ -148,7 +137,6 @@ notes: C(present) and I(grant_option) to C(false) (see examples). - Note that when revoking privileges from a role R, this role may still have access via privileges granted to any role R is a member of including C(PUBLIC). -- Note that when you use C(PUBLIC) role, the module always reports that the state has been changed. - Note that when revoking privileges from a role R, you do so as the user specified via I(login_user). If R has been granted the same privileges by another user also, R can still access database objects via these privileges. @@ -178,6 +166,7 @@ extends_documentation_fragment: author: - Bernhard Weitzhofer (@b6d) - Tobias Birkefeld (@tcraxs) +- Daniele Giudice (@RealGreenDragon) ''' EXAMPLES = r''' @@ -402,43 +391,74 @@ EXAMPLES = r''' privs: usage objs: schemas role: datascience + +# Available since community.postgresql 3.1.0 +# Needs PostgreSQL 15 or higher +- name: GRANT SET ON PARAMETER log_destination,log_line_prefix TO logtest + community.postgresql.postgresql_privs: + database: logtest + state: present + privs: SET + type: parameter + objs: log_destination,log_line_prefix + roles: logtest + +- name: GRANT ALTER SYSTEM ON PARAMETER primary_conninfo,synchronous_standby_names TO replicamgr + community.postgresql.postgresql_privs: + database: replicamgr + state: present + privs: ALTER_SYSTEM + type: parameter + objs: primary_conninfo,synchronous_standby_names + roles: replicamgr ''' RETURN = r''' queries: description: List of executed queries. - returned: always + returned: success type: list sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";'] ''' import traceback -PSYCOPG2_IMP_ERR = None -try: - import psycopg2 - import psycopg2.extensions -except ImportError: - PSYCOPG2_IMP_ERR = traceback.format_exc() - psycopg2 = None - +from ansible.module_utils._text import to_native # import module snippets -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.postgresql.plugins.module_utils.database import ( - pg_quote_identifier, check_input, + pg_quote_identifier, ) -from ansible_collections.community.postgresql.plugins.module_utils.postgres import postgres_common_argument_spec, get_conn_params -from ansible.module_utils._text import to_native +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + HAS_PSYCOPG, + PSYCOPG_VERSION, + connect_to_db, + ensure_required_libs, + get_conn_params, + get_server_version, + pg_cursor_args, + postgres_common_argument_spec, +) +from ansible_collections.community.postgresql.plugins.module_utils.version import \ + LooseVersion -VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', - 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT', - 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL')) +if HAS_PSYCOPG and PSYCOPG_VERSION < LooseVersion("3.0"): + from psycopg2 import Error as PsycopgError +elif HAS_PSYCOPG: + from psycopg import Error as PsycopgError + +VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'CREATE', + 'CONNECT', 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'SET', 'ALTER_SYSTEM')) VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'), 'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'), 'FUNCTIONS': ('ALL', 'EXECUTE'), 'TYPES': ('ALL', 'USAGE'), 'SCHEMAS': ('CREATE', 'USAGE'), } +VALID_IMPLICIT_ROLES = {'PUBLIC': 0, + 'CURRENT_USER': 95000, + 'SESSION_USER': 95000, + 'CURRENT_ROLE': 140000, } executed_queries = [] @@ -447,19 +467,6 @@ class Error(Exception): pass -def role_exists(module, cursor, rolname): - """Check user exists or not""" - query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname - try: - cursor.execute(query) - return cursor.rowcount > 0 - - except Exception as e: - module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e))) - - return False - - # We don't have functools.partial in Python < 2.5 def partial(f, *args, **kwargs): """Partial function application""" @@ -476,21 +483,30 @@ def partial(f, *args, **kwargs): class Connection(object): - """Wrapper around a psycopg2 connection with some convenience methods""" + """Wrapper around a psycopg connection with some convenience methods""" def __init__(self, params, module): self.database = params.database self.module = module + # Ensure psycopg libraries are available before connecting to DB: + ensure_required_libs(module) conn_params = get_conn_params(module, params.__dict__, warn_db_default=False) - sslrootcert = params.ca_cert - if psycopg2.__version__ < '2.4.3' and sslrootcert is not None: - raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter') + self.connection, dummy = connect_to_db(module, conn_params, autocommit=False) + self.cursor = self.connection.cursor(**pg_cursor_args) + self.pg_version = get_server_version(self.connection) + + # implicit roles in current pg version + self.pg_implicit_roles = tuple( + implicit_role for implicit_role, version_min in VALID_IMPLICIT_ROLES.items() if self.pg_version >= version_min + ) - self.connection = psycopg2.connect(**conn_params) - self.cursor = self.connection.cursor() - self.pg_version = self.connection.server_version + def execute(self, query, input_vars=None): + try: + self.cursor.execute(query, input_vars) + except Exception as e: + self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e))) def commit(self): self.connection.commit() @@ -498,22 +514,32 @@ class Connection(object): def rollback(self): self.connection.rollback() - @property - def encoding(self): - """Connection encoding in Python-compatible form""" - return psycopg2.extensions.encodings[self.connection.encoding] + # Methods for implicit roles managements + + def is_implicit_role(self, rolname): + return rolname.upper() in self.pg_implicit_roles # Methods for querying database objects + def role_exists(self, rolname): + # check if rolname is a implicit role + if self.is_implicit_role(rolname): + return True + + # check if rolname is present in pg_catalog.pg_roles + query = "SELECT 1 FROM pg_catalog.pg_roles WHERE rolname = %s" + self.execute(query, (rolname,)) + return self.cursor.rowcount > 0 + # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like # phrases in GRANT or REVOKE statements, therefore alternative methods are # provided here. def schema_exists(self, schema): - query = """SELECT count(*) + query = """SELECT count(*) c FROM pg_catalog.pg_namespace WHERE nspname = %s""" - self.cursor.execute(query, (schema,)) - return self.cursor.fetchone()[0] > 0 + self.execute(query, (schema,)) + return self.cursor.fetchone()["c"] > 0 def get_all_tables_in_schema(self, schema): if schema: @@ -524,12 +550,12 @@ class Connection(object): FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')""" - self.cursor.execute(query, (schema,)) + self.execute(query, (schema,)) else: query = ("SELECT relname FROM pg_catalog.pg_class " "WHERE relkind in ('r', 'v', 'm', 'p')") - self.cursor.execute(query) - return [t[0] for t in self.cursor.fetchall()] + self.execute(query) + return [t["relname"] for t in self.cursor.fetchall()] def get_all_sequences_in_schema(self, schema): if schema: @@ -539,17 +565,17 @@ class Connection(object): FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind = 'S'""" - self.cursor.execute(query, (schema,)) + self.execute(query, (schema,)) else: - self.cursor.execute("SELECT relname FROM pg_catalog.pg_class WHERE relkind = 'S'") - return [t[0] for t in self.cursor.fetchall()] + self.execute("SELECT relname FROM pg_catalog.pg_class WHERE relkind = 'S'") + return [t["relname"] for t in self.cursor.fetchall()] def get_all_functions_in_schema(self, schema): if schema: if not self.schema_exists(schema): raise Error('Schema "%s" does not exist.' % schema) - query = ("SELECT p.proname, oidvectortypes(p.proargtypes) " + query = ("SELECT p.proname, oidvectortypes(p.proargtypes) ptypes " "FROM pg_catalog.pg_proc p " "JOIN pg_namespace n ON n.oid = p.pronamespace " "WHERE nspname = %s") @@ -557,30 +583,30 @@ class Connection(object): if self.pg_version >= 110000: query += " and p.prokind = 'f'" - self.cursor.execute(query, (schema,)) + self.execute(query, (schema,)) else: - self.cursor.execute("SELECT p.proname, oidvectortypes(p.proargtypes) FROM pg_catalog.pg_proc p") - return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()] + self.execute("SELECT p.proname, oidvectortypes(p.proargtypes) ptypes FROM pg_catalog.pg_proc p") + return ["%s(%s)" % (t["proname"], t["ptypes"]) for t in self.cursor.fetchall()] def get_all_procedures_in_schema(self, schema): if self.pg_version < 110000: - raise Error("PostgreSQL verion must be >= 11 for type=procedure. Exit") + raise Error("PostgreSQL version must be >= 11 for type=procedure. Exit") if schema: if not self.schema_exists(schema): raise Error('Schema "%s" does not exist.' % schema) - query = ("SELECT p.proname, oidvectortypes(p.proargtypes) " + query = ("SELECT p.proname, oidvectortypes(p.proargtypes) ptypes " "FROM pg_catalog.pg_proc p " "JOIN pg_namespace n ON n.oid = p.pronamespace " "WHERE nspname = %s and p.prokind = 'p'") - self.cursor.execute(query, (schema,)) + self.execute(query, (schema,)) else: - query = ("SELECT p.proname, oidvectortypes(p.proargtypes) " + query = ("SELECT p.proname, oidvectortypes(p.proargtypes) ptypes " "FROM pg_catalog.pg_proc p WHERE p.prokind = 'p'") - self.cursor.execute(query) - return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()] + self.execute(query) + return ["%s(%s)" % (t["proname"], t["ptypes"]) for t in self.cursor.fetchall()] # Methods for getting access control lists and group membership info @@ -592,71 +618,71 @@ class Connection(object): def get_table_acls(self, schema, tables): if schema: - query = """SELECT relacl + query = """SELECT relacl::text FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s) ORDER BY relname""" - self.cursor.execute(query, (schema, tables)) + self.execute(query, (schema, tables)) else: - query = ("SELECT relacl FROM pg_catalog.pg_class " + query = ("SELECT relacl::text FROM pg_catalog.pg_class " "WHERE relkind in ('r','p','v','m') AND relname = ANY (%s) " "ORDER BY relname") - self.cursor.execute(query) - return [t[0] for t in self.cursor.fetchall()] + self.execute(query) + return [t["relacl"] for t in self.cursor.fetchall()] def get_sequence_acls(self, schema, sequences): if schema: - query = """SELECT relacl + query = """SELECT relacl::text FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s) ORDER BY relname""" - self.cursor.execute(query, (schema, sequences)) + self.execute(query, (schema, sequences)) else: - query = ("SELECT relacl FROM pg_catalog.pg_class " + query = ("SELECT relacl::text FROM pg_catalog.pg_class " "WHERE relkind = 'S' AND relname = ANY (%s) ORDER BY relname") - self.cursor.execute(query) - return [t[0] for t in self.cursor.fetchall()] + self.execute(query) + return [t["relacl"] for t in self.cursor.fetchall()] def get_function_acls(self, schema, function_signatures): funcnames = [f.split('(', 1)[0] for f in function_signatures] if schema: - query = """SELECT proacl + query = """SELECT proacl::text FROM pg_catalog.pg_proc p JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace WHERE nspname = %s AND proname = ANY (%s) ORDER BY proname, proargtypes""" - self.cursor.execute(query, (schema, funcnames)) + self.execute(query, (schema, funcnames)) else: - query = ("SELECT proacl FROM pg_catalog.pg_proc WHERE proname = ANY (%s) " + query = ("SELECT proacl::text FROM pg_catalog.pg_proc WHERE proname = ANY (%s) " "ORDER BY proname, proargtypes") - self.cursor.execute(query) - return [t[0] for t in self.cursor.fetchall()] + self.execute(query) + return [t["proacl"] for t in self.cursor.fetchall()] def get_schema_acls(self, schemas): - query = """SELECT nspacl FROM pg_catalog.pg_namespace + query = """SELECT nspacl::text FROM pg_catalog.pg_namespace WHERE nspname = ANY (%s) ORDER BY nspname""" - self.cursor.execute(query, (schemas,)) - return [t[0] for t in self.cursor.fetchall()] + self.execute(query, (schemas,)) + return [t["nspacl"] for t in self.cursor.fetchall()] def get_language_acls(self, languages): - query = """SELECT lanacl FROM pg_catalog.pg_language + query = """SELECT lanacl::text FROM pg_catalog.pg_language WHERE lanname = ANY (%s) ORDER BY lanname""" - self.cursor.execute(query, (languages,)) - return [t[0] for t in self.cursor.fetchall()] + self.execute(query, (languages,)) + return [t["lanacl"] for t in self.cursor.fetchall()] def get_tablespace_acls(self, tablespaces): - query = """SELECT spcacl FROM pg_catalog.pg_tablespace + query = """SELECT spcacl::text FROM pg_catalog.pg_tablespace WHERE spcname = ANY (%s) ORDER BY spcname""" - self.cursor.execute(query, (tablespaces,)) - return [t[0] for t in self.cursor.fetchall()] + self.execute(query, (tablespaces,)) + return [t["spcacl"] for t in self.cursor.fetchall()] def get_database_acls(self, databases): - query = """SELECT datacl FROM pg_catalog.pg_database + query = """SELECT datacl::text FROM pg_catalog.pg_database WHERE datname = ANY (%s) ORDER BY datname""" - self.cursor.execute(query, (databases,)) - return [t[0] for t in self.cursor.fetchall()] + self.execute(query, (databases,)) + return [t["datacl"] for t in self.cursor.fetchall()] def get_group_memberships(self, groups): query = """SELECT roleid, grantor, member, admin_option @@ -664,48 +690,56 @@ class Connection(object): JOIN pg_catalog.pg_roles r ON r.oid = am.roleid WHERE r.rolname = ANY(%s) ORDER BY roleid, grantor, member""" - self.cursor.execute(query, (groups,)) + self.execute(query, (groups,)) return self.cursor.fetchall() def get_default_privs(self, schema, *args): if schema: - query = """SELECT defaclacl + query = """SELECT defaclacl::text FROM pg_default_acl a JOIN pg_namespace b ON a.defaclnamespace=b.oid WHERE b.nspname = %s;""" - self.cursor.execute(query, (schema,)) + self.execute(query, (schema,)) else: - self.cursor.execute("SELECT defaclacl FROM pg_default_acl;") - return [t[0] for t in self.cursor.fetchall()] + self.execute("SELECT defaclacl::text FROM pg_default_acl;") + return [t["defaclacl"] for t in self.cursor.fetchall()] def get_foreign_data_wrapper_acls(self, fdws): - query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper + query = """SELECT fdwacl::text FROM pg_catalog.pg_foreign_data_wrapper WHERE fdwname = ANY (%s) ORDER BY fdwname""" - self.cursor.execute(query, (fdws,)) - return [t[0] for t in self.cursor.fetchall()] + self.execute(query, (fdws,)) + return [t["fdwacl"] for t in self.cursor.fetchall()] def get_foreign_server_acls(self, fs): - query = """SELECT srvacl FROM pg_catalog.pg_foreign_server + query = """SELECT srvacl::text FROM pg_catalog.pg_foreign_server WHERE srvname = ANY (%s) ORDER BY srvname""" - self.cursor.execute(query, (fs,)) - return [t[0] for t in self.cursor.fetchall()] + self.execute(query, (fs,)) + return [t["srvacl"] for t in self.cursor.fetchall()] def get_type_acls(self, schema, types): if schema: - query = """SELECT t.typacl FROM pg_catalog.pg_type t + query = """SELECT t.typacl::text FROM pg_catalog.pg_type t JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace WHERE n.nspname = %s AND t.typname = ANY (%s) ORDER BY typname""" - self.cursor.execute(query, (schema, types)) + self.execute(query, (schema, types)) else: - query = "SELECT typacl FROM pg_catalog.pg_type WHERE typname = ANY (%s) ORDER BY typname" - self.cursor.execute(query) - return [t[0] for t in self.cursor.fetchall()] + query = "SELECT typacl::text FROM pg_catalog.pg_type WHERE typname = ANY (%s) ORDER BY typname" + self.execute(query) + return [t["typacl"] for t in self.cursor.fetchall()] + + def get_parameter_acls(self, parameters): + if self.pg_version < 150000: + raise Error("PostgreSQL version must be >= 15 for type=parameter. Exit") + + query = """SELECT paracl::text FROM pg_catalog.pg_parameter_acl + WHERE parname = ANY (%s) ORDER BY parname""" + self.cursor.execute(query, (parameters,)) + return [t["paracl"] for t in self.cursor.fetchall()] # Manipulating privileges - # WARNING: usage_on_types has been deprecated and will be removed in community.postgresql 3.0.0, please use an obj_type of 'type' instead. def manipulate_privs(self, obj_type, privs, objs, orig_objs, roles, target_roles, - state, grant_option, schema_qualifier=None, fail_on_role=True, usage_on_types=True): + state, grant_option, schema_qualifier=None, fail_on_role=True): """Manipulate database object privileges. :param obj_type: Type of database object to grant/revoke @@ -714,9 +748,8 @@ class Connection(object): or None if type is "group". :param objs: List of database objects to grant/revoke privileges for. - :param orig_objs: ALL_IN_SCHEMA or None - :param roles: Either a list of role names or "PUBLIC" - for the implicitly defined "PUBLIC" group + :param orig_objs: ALL_IN_SCHEMA or None. + :param roles: List of role names. :param target_roles: List of role names to grant/revoke default privileges as. :param state: "present" to grant privileges, "absent" to revoke. @@ -752,6 +785,8 @@ class Connection(object): get_status = self.get_foreign_server_acls elif obj_type == 'type': get_status = partial(self.get_type_acls, schema_qualifier) + elif obj_type == 'parameter': + get_status = self.get_parameter_acls else: raise Error('Unsupported database object type "%s".' % obj_type) @@ -787,33 +822,18 @@ class Connection(object): obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids] # Note: obj_type has been checked against a set of string literals # and privs was escaped when it was parsed - # Note: Underscores are replaced with spaces to support multi-word obj_type + # Note: Underscores are replaced with spaces to support multi-word privs and obj_type if orig_objs is not None: - set_what = '%s ON %s %s' % (','.join(privs), orig_objs, quoted_schema_qualifier) + set_what = '%s ON %s %s' % (','.join(privs).replace('_', ' '), orig_objs, quoted_schema_qualifier) else: - set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '), ','.join(obj_ids)) + set_what = '%s ON %s %s' % (','.join(privs).replace('_', ' '), obj_type.replace('_', ' '), ','.join(obj_ids)) # for_whom: SQL-fragment specifying for whom to set the above - if roles == 'PUBLIC': - for_whom = 'PUBLIC' - else: - for_whom = [] - for r in roles: - if not role_exists(self.module, self.cursor, r): - if fail_on_role: - self.module.fail_json(msg="Role '%s' does not exist" % r.strip()) - - else: - self.module.warn("Role '%s' does not exist, pass it" % r.strip()) - else: - for_whom.append('"%s"' % r) - - if not for_whom: - return False - - for_whom = ','.join(for_whom) + if not roles: + return False + for_whom = ','.join(roles) - # as_who: + # as_who: SQL-fragment specifying to who to set the above as_who = None if target_roles: as_who = ','.join('"%s"' % r for r in target_roles) @@ -828,13 +848,10 @@ class Connection(object): .for_schema(quoted_schema_qualifier) \ .set_what(set_what) \ .for_objs(objs) \ - .usage_on_types(usage_on_types) \ .build() executed_queries.append(query) - self.cursor.execute(query) - if roles == 'PUBLIC': - return True + self.execute(query) status_after = get_status(objs) @@ -843,7 +860,8 @@ class Connection(object): # to compare NoneType elements by sort method. if e is None: return '' - return e + # With Psycopg 3 we get a list of dicts, it is easier to sort it as strings + return str(e) status_before.sort(key=nonesorted) status_after.sort(key=nonesorted) @@ -860,7 +878,6 @@ class QueryBuilder(object): self._state = state self._schema = None self._objs = None - self._usage_on_types = None self.query = [] def for_objs(self, objs): @@ -879,10 +896,6 @@ class QueryBuilder(object): self._for_whom = who return self - def usage_on_types(self, usage_on_types): - self._usage_on_types = usage_on_types - return self - def as_who(self, target_roles): self._as_who = target_roles return self @@ -948,18 +961,6 @@ class QueryBuilder(object): self._for_whom)) self.add_grant_option() - if self._usage_on_types: - - if self._as_who: - self.query.append( - 'ALTER DEFAULT PRIVILEGES FOR ROLE {0}{1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who, - self._schema, - self._for_whom)) - else: - self.query.append( - 'ALTER DEFAULT PRIVILEGES{0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom)) - self.add_grant_option() - def build_present(self): if self._obj_type == 'default_privs': self.add_default_revoke() @@ -1004,7 +1005,8 @@ def main(): 'default_privs', 'foreign_data_wrapper', 'foreign_server', - 'type', ]), + 'type', + 'parameter', ]), objs=dict(required=False, aliases=['obj']), schema=dict(required=False), roles=dict(required=True, aliases=['role']), @@ -1019,9 +1021,6 @@ def main(): removed_from_collection='community.postgreql'), fail_on_role=dict(type='bool', default=True), trust_input=dict(type='bool', default=True), - usage_on_types=dict(type='bool', default=True, - removed_in_version='3.0.0', - removed_from_collection='community.postgresql'), ) module = AnsibleModule( @@ -1030,7 +1029,6 @@ def main(): ) fail_on_role = module.params['fail_on_role'] - usage_on_types = module.params['usage_on_types'] # Create type object as namespace for module params p = type('Params', (), module.params) @@ -1083,19 +1081,7 @@ def main(): check_input(module, p.roles, p.target_roles, p.session_role, p.schema) # Connect to Database - if not psycopg2: - module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR) - try: - conn = Connection(p, module) - except psycopg2.Error as e: - module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc()) - except TypeError as e: - if 'sslrootcert' in e.args[0]: - module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert') - module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) - except ValueError as e: - # We raise this when the psycopg library is too old - module.fail_json(msg=to_native(e)) + conn = Connection(p, module) if p.session_role: try: @@ -1156,17 +1142,25 @@ def main(): objs = [obj.replace(':', ',') for obj in objs] # roles - if p.roles.upper() == 'PUBLIC': - roles = 'PUBLIC' - else: - roles = p.roles.split(',') - - if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]): + roles = [] + roles_raw = p.roles.split(',') + for r in roles_raw: + if conn.role_exists(r): + if conn.is_implicit_role(r): + # Some implicit roles (as PUBLIC) works in uppercase without double quotes and in lowercase with double quotes. + # Other implicit roles (as SESSION_USER) works only in uppercase without double quotes. + # So the approach that works for all implicit roles is uppercase without double quotes. + roles.append('%s' % r.upper()) + else: + roles.append('"%s"' % r.replace('"', '""')) + else: if fail_on_role: - module.fail_json(msg="Role '%s' does not exist" % roles[0].strip()) + module.fail_json(msg="Role '%s' does not exist" % r) else: - module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip()) - module.exit_json(changed=False, queries=executed_queries) + module.warn("Role '%s' does not exist, pass it" % r) + if not roles: + module.warn("No valid roles provided, nothing to do") + module.exit_json(changed=False, queries=executed_queries) # check if target_roles is set with type: default_privs if p.target_roles and not p.type == 'default_privs': @@ -1190,14 +1184,13 @@ def main(): grant_option=p.grant_option, schema_qualifier=p.schema, fail_on_role=fail_on_role, - usage_on_types=usage_on_types, ) except Error as e: conn.rollback() module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - except psycopg2.Error as e: + except PsycopgError as e: conn.rollback() module.fail_json(msg=to_native(e)) diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py index 5edfc2abb..0ab26e7a5 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py @@ -6,6 +6,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type @@ -74,6 +75,12 @@ options: type: bool default: true version_added: '0.2.0' + comment: + description: + - Sets a comment on the publication. + - To reset the comment, pass an empty string. + type: str + version_added: '3.3.0' notes: - PostgreSQL version must be 10 or greater. @@ -104,6 +111,7 @@ EXAMPLES = r''' community.postgresql.postgresql_publication: db: test name: acme + comment: Made by Ansible - name: Create publication "acme" publishing only prices and vehicles tables community.postgresql.postgresql_publication: @@ -142,12 +150,12 @@ RETURN = r''' exists: description: - Flag indicates the publication exists or not at the end of runtime. - returned: always + returned: success type: bool sample: true queries: description: List of executed queries. - returned: always + returned: success type: str sample: [ 'DROP PUBLICATION "acme" CASCADE' ] owner: @@ -176,26 +184,22 @@ parameters: ''' -try: - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems from ansible_collections.community.postgresql.plugins.module_utils.database import ( check_input, pg_quote_identifier, ) from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, - exec_sql, ensure_required_libs, + exec_sql, get_conn_params, + get_server_version, + pg_cursor_args, postgres_common_argument_spec, + set_comment, ) -from ansible.module_utils.six import iteritems SUPPORTED_PG_VERSION = 10000 @@ -228,12 +232,12 @@ class PgPublication(): Args: module (AnsibleModule): Object of AnsibleModule class. - cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + cursor (cursor): Cursor object of psycopg library to work with PostgreSQL. name (str): The name of the publication. Attributes: module (AnsibleModule): Object of AnsibleModule class. - cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + cursor (cursor): Cursor object of psycopg library to work with PostgreSQL. name (str): Name of the publication. executed_queries (list): List of executed queries. attrs (dict): Dict with publication attributes. @@ -276,6 +280,7 @@ class PgPublication(): return False self.attrs['owner'] = pub_info.get('pubowner') + self.attrs['comment'] = pub_info.get('comment') if pub_info.get('comment') is not None else '' # Publication DML operations: self.attrs['parameters']['publish'] = {} @@ -288,10 +293,8 @@ class PgPublication(): # If alltables flag is False, get the list of targeted tables: if not pub_info.get('puballtables'): table_info = self.__get_tables_pub_info() - # Join sublists [['schema', 'table'], ...] to ['schema.table', ...] - # for better representation: for i, schema_and_table in enumerate(table_info): - table_info[i] = pg_quote_identifier('.'.join(schema_and_table), 'table') + table_info[i] = pg_quote_identifier(schema_and_table["schema_dot_table"], 'table') self.attrs['tables'] = table_info else: @@ -300,13 +303,14 @@ class PgPublication(): # Publication exists: return True - def create(self, tables, params, owner, check_mode=True): + def create(self, tables, params, owner, comment, check_mode=True): """Create the publication. Args: tables (list): List with names of the tables that need to be added to the publication. params (dict): Dict contains optional publication parameters and their values. owner (str): Name of the publication owner. + comment (str): Comment on the publication. Kwargs: check_mode (bool): If True, don't actually change anything, @@ -340,15 +344,20 @@ class PgPublication(): # executed_queries and return: self.__pub_set_owner(owner, check_mode=check_mode) + if comment is not None: + set_comment(self.cursor, comment, 'publication', + self.name, check_mode, self.executed_queries) + return changed - def update(self, tables, params, owner, check_mode=True): + def update(self, tables, params, owner, comment, check_mode=True): """Update the publication. Args: tables (list): List with names of the tables that need to be presented in the publication. params (dict): Dict contains optional publication parameters and their values. owner (str): Name of the publication owner. + comment (str): Comment on the publication. Kwargs: check_mode (bool): If True, don't actually change anything, @@ -414,6 +423,10 @@ class PgPublication(): if owner != self.attrs['owner']: changed = self.__pub_set_owner(owner, check_mode=check_mode) + if comment is not None and comment != self.attrs['comment']: + changed = set_comment(self.cursor, comment, 'publication', + self.name, check_mode, self.executed_queries) + return changed def drop(self, cascade=False, check_mode=True): @@ -448,13 +461,15 @@ class PgPublication(): "AND column_name = 'pubtruncate'"), add_to_executed=False) if pgtrunc_sup: - query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, " + query = ("SELECT obj_description(p.oid, 'pg_publication') AS comment, " + "r.rolname AS pubowner, p.puballtables, p.pubinsert, " "p.pubupdate , p.pubdelete, p.pubtruncate FROM pg_publication AS p " "JOIN pg_catalog.pg_roles AS r " "ON p.pubowner = r.oid " "WHERE p.pubname = %(pname)s") else: - query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, " + query = ("SELECT obj_description(p.oid, 'pg_publication') AS comment, " + "r.rolname AS pubowner, p.puballtables, p.pubinsert, " "p.pubupdate , p.pubdelete FROM pg_publication AS p " "JOIN pg_catalog.pg_roles AS r " "ON p.pubowner = r.oid " @@ -472,7 +487,7 @@ class PgPublication(): Returns: List of dicts with published tables. """ - query = ("SELECT schemaname, tablename " + query = ("SELECT schemaname || '.' || tablename as schema_dot_table " "FROM pg_publication_tables WHERE pubname = %(pname)s") return exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False) @@ -603,6 +618,7 @@ def main(): cascade=dict(type='bool', default=False), session_role=dict(type='str'), trust_input=dict(type='bool', default=True), + comment=dict(type='str', default=None), ) module = AnsibleModule( argument_spec=argument_spec, @@ -618,6 +634,7 @@ def main(): cascade = module.params['cascade'] session_role = module.params['session_role'] trust_input = module.params['trust_input'] + comment = module.params['comment'] if not trust_input: # Check input for potentially dangerous elements: @@ -626,7 +643,8 @@ def main(): else: params_list = ['%s = %s' % (k, v) for k, v in iteritems(params)] - check_input(module, name, tables, owner, session_role, params_list) + check_input(module, name, tables, owner, + session_role, params_list, comment) if state == 'absent': if tables: @@ -639,16 +657,16 @@ def main(): if state == 'present' and cascade: module.warn('parameter "cascade" is ignored when "state=present"') - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) # Connect to DB and make cursor object: conn_params = get_conn_params(module, module.params) # We check publication state without DML queries execution, so set autocommit: db_connection, dummy = connect_to_db(module, conn_params, autocommit=True) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) # Check version: - if cursor.connection.server_version < SUPPORTED_PG_VERSION: + if get_server_version(cursor.connection) < SUPPORTED_PG_VERSION: module.fail_json(msg="PostgreSQL server version should be 10.0 or greater") # Nothing was changed by default: @@ -664,10 +682,12 @@ def main(): # If module.check_mode=True, nothing will be changed: if state == 'present': if not publication.exists: - changed = publication.create(tables, params, owner, check_mode=module.check_mode) + changed = publication.create(tables, params, owner, comment, + check_mode=module.check_mode) else: - changed = publication.update(tables, params, owner, check_mode=module.check_mode) + changed = publication.update(tables, params, owner, comment, + check_mode=module.check_mode) elif state == 'absent': changed = publication.drop(cascade=cascade, check_mode=module.check_mode) diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_query.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_query.py index 83f1665ee..2c219f4fa 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_query.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_query.py @@ -5,7 +5,8 @@ # Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -14,17 +15,11 @@ module: postgresql_query short_description: Run PostgreSQL queries description: - Runs arbitrary PostgreSQL queries. -- B(WARNING) The C(path_to_script) and C(as_single_query) options as well as - the C(query_list) and C(query_all_results) return values have been B(deprecated) and - will be removed in community.postgresql 3.0.0, please use the - M(community.postgresql.postgresql_script) module to execute statements from scripts. -- Does not run against backup files. Use M(community.postgresql.postgresql_db) with I(state=restore) - to run queries on files made by pg_dump/pg_dumpall utilities. options: query: description: - - SQL query string or list of queries to run. Variables can be escaped with psycopg2 syntax - U(http://initd.org/psycopg/docs/usage.html). + - SQL query string or list of queries to run. Variables can be escaped with psycopg syntax + U(https://www.psycopg.org/psycopg3/docs/basic/params.html). type: raw positional_args: description: @@ -39,19 +34,6 @@ options: When the value is a list, it will be converted to PostgreSQL array. - Mutually exclusive with I(positional_args). type: dict - path_to_script: - description: - - This option has been B(deprecated) and will be removed in community.postgresql 3.0.0, - please use the M(community.postgresql.postgresql_script) module to execute - statements from scripts. - - Path to a SQL script on the target machine. - - If the script contains several queries, they must be semicolon-separated. - - To run scripts containing objects with semicolons - (for example, function and procedure definitions), use I(as_single_query=true). - - To upload dumps or to execute other complex scripts, the preferable way - is to use the M(community.postgresql.postgresql_db) module with I(state=restore). - - Mutually exclusive with I(query). - type: path session_role: description: - Switch to session_role after connecting. The specified session_role must @@ -91,25 +73,6 @@ options: type: list elements: str version_added: '1.0.0' - as_single_query: - description: - - This option has been B(deprecated) and will be removed in community.postgresql 3.0.0, - please use the M(community.postgresql.postgresql_script) module to execute - statements from scripts. - - If C(true), when reading from the I(path_to_script) file, - executes its whole content in a single query (not splitting it up - into separate queries by semicolons). It brings the following changes in - the module's behavior. - - When C(true), the C(query_all_results) return value - contains only the result of the last statement. - - Whether the state is reported as changed or not - is determined by the last statement of the file. - - Used only when I(path_to_script) is specified, otherwise ignored. - - If set to C(false), the script can contain only semicolon-separated queries. - (see the I(path_to_script) option documentation). - type: bool - default: true - version_added: '1.1.0' seealso: - module: community.postgresql.postgresql_script - module: community.postgresql.postgresql_db @@ -178,20 +141,6 @@ EXAMPLES = r''' db: 'test' query: 'insert into test (test) values (now())' - -# WARNING: The path_to_script and as_single_query options have been deprecated -# and will be removed in community.postgresql 3.0.0, please -# use the community.postgresql.postgresql_script module instead. -# If your script contains semicolons as parts of separate objects -# like functions, procedures, and so on, use "as_single_query: true" -- name: Run queries from SQL script using UTF-8 client encoding for session - community.postgresql.postgresql_query: - db: test_db - path_to_script: /var/lib/pgsql/test.sql - positional_args: - - 1 - encoding: UTF-8 - - name: Example of using autocommit parameter community.postgresql.postgresql_query: db: test_db @@ -260,37 +209,35 @@ query: description: - Executed query. - When reading several queries from a file, it contains only the last one. - returned: always + returned: success type: str sample: 'SELECT * FROM bar' statusmessage: description: - Attribute containing the message returned by the command. - When reading several queries from a file, it contains a message of the last one. - returned: always + returned: success type: str sample: 'INSERT 0 1' query_result: description: - List of dictionaries in column:value form representing returned rows. - When running queries from a file, returns result of the last query. - returned: always + returned: success type: list elements: dict sample: [{"Column": "Value1"},{"Column": "Value2"}] query_list: description: - List of executed queries. - Useful when reading several queries from a file. - returned: always + returned: success type: list elements: str sample: ['SELECT * FROM foo', 'SELECT * FROM bar'] query_all_results: description: - List containing results of all queries executed (one sublist for every query). - Useful when running a list of queries. - returned: always + returned: success type: list elements: list sample: [[{"Column": "Value1"},{"Column": "Value2"}], [{"Column": "Value1"},{"Column": "Value2"}]] @@ -304,33 +251,34 @@ rowcount: sample: 5 ''' -try: - from psycopg2 import ProgrammingError as Psycopg2ProgrammingError - from psycopg2.extras import DictCursor -except ImportError: - # it is needed for checking 'no result to fetch' in main(), - # psycopg2 availability will be checked by connect_to_db() into - # ansible.module_utils.postgres - pass - import re +from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.postgresql.plugins.module_utils.database import ( - check_input, -) +from ansible.module_utils.six import iteritems +from ansible_collections.community.postgresql.plugins.module_utils.database import \ + check_input from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + HAS_PSYCOPG, + PSYCOPG_VERSION, + TYPES_NEED_TO_CONVERT, connect_to_db, convert_elements_to_pg_arrays, convert_to_supported, ensure_required_libs, get_conn_params, + pg_cursor_args, postgres_common_argument_spec, set_search_path, - TYPES_NEED_TO_CONVERT, ) -from ansible.module_utils._text import to_native -from ansible.module_utils.six import iteritems +from ansible_collections.community.postgresql.plugins.module_utils.version import \ + LooseVersion + +if HAS_PSYCOPG and PSYCOPG_VERSION < LooseVersion("3.0"): + from psycopg2 import ProgrammingError as PsycopgProgrammingError +elif HAS_PSYCOPG: + from psycopg import ProgrammingError as PsycopgProgrammingError + # =========================================== # Module execution. @@ -353,12 +301,10 @@ def main(): positional_args=dict(type='list', elements='raw'), named_args=dict(type='dict'), session_role=dict(type='str'), - path_to_script=dict(type='path'), autocommit=dict(type='bool', default=False), encoding=dict(type='str'), trust_input=dict(type='bool', default=True), search_path=dict(type='list', elements='str'), - as_single_query=dict(type='bool', default=True), ) module = AnsibleModule( @@ -370,13 +316,11 @@ def main(): query = module.params["query"] positional_args = module.params["positional_args"] named_args = module.params["named_args"] - path_to_script = module.params["path_to_script"] autocommit = module.params["autocommit"] encoding = module.params["encoding"] session_role = module.params["session_role"] trust_input = module.params["trust_input"] search_path = module.params["search_path"] - as_single_query = module.params["as_single_query"] if query and not isinstance(query, (str, list)): module.fail_json(msg="query argument must be of type string or list") @@ -388,52 +332,20 @@ def main(): if autocommit and module.check_mode: module.fail_json(msg="Using autocommit is mutually exclusive with check_mode") - if path_to_script and query: - module.fail_json(msg="path_to_script is mutually exclusive with query") - query_list = [] - if path_to_script: - depr_msg = ("The 'path_to_script' option is deprecated. Please use the " - "'community.postgresql.postgresql_script' module to execute " - "statements from scripts") - module.deprecate(msg=depr_msg, version="3.0.0", collection_name="community.postgresql") - try: - with open(path_to_script, 'rb') as f: - query = to_native(f.read()) - - if not as_single_query: - depr_msg = ("The 'as_single_query' option is deprecated. Please use the " - "'community.postgresql.postgresql_script' module to execute " - "statements from scripts") - module.deprecate(msg=depr_msg, version="3.0.0", collection_name="community.postgresql") - - if ';' in query: - for q in query.split(';'): - if insane_query(q): - continue - else: - query_list.append(q) - else: - query_list.append(query) - else: - query_list.append(query) - - except Exception as e: - module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e))) - else: - if isinstance(query, str): - query_list.append(query) - else: # if it's a list - query_list = query + if isinstance(query, str): + query_list.append(query) + else: # if it's a list + query_list = query - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) conn_params = get_conn_params(module, module.params) - db_connection, dummy = connect_to_db(module, conn_params, autocommit=autocommit) if encoding is not None: - db_connection.set_client_encoding(encoding) - cursor = db_connection.cursor(cursor_factory=DictCursor) + conn_params["client_encoding"] = encoding + db_connection, dummy = connect_to_db(module, conn_params, autocommit=autocommit) + cursor = db_connection.cursor(**pg_cursor_args) if search_path: set_search_path(cursor, '%s' % ','.join([x.strip(' ') for x in search_path])) @@ -461,6 +373,7 @@ def main(): # Execute query: for query in query_list: try: + current_query_txt = cursor.mogrify(query, args) cursor.execute(query, args) statusmessage = cursor.statusmessage if cursor.rowcount > 0: @@ -478,16 +391,21 @@ def main(): query_result.append(row) - except Psycopg2ProgrammingError as e: + # Psycopg 3 doesn't fail with 'no results to fetch' + # This exception will be triggered only in Psycopg 2 + except PsycopgProgrammingError as e: if to_native(e) == 'no results to fetch': query_result = {} except Exception as e: module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e)) + if query_result == []: + query_result = {} + query_all_results.append(query_result) - if 'SELECT' not in statusmessage: + if 'SELECT' not in statusmessage and 'SHOW' not in statusmessage: if re.search(re.compile(r'(UPDATE|INSERT|DELETE)'), statusmessage): s = statusmessage.split() if len(s) == 3: @@ -520,7 +438,7 @@ def main(): kw = dict( changed=changed, - query=cursor.query, + query=current_query_txt, query_list=query_list, statusmessage=statusmessage, query_result=query_result, diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py index f107e1aa0..bfacde6f8 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -59,6 +60,12 @@ options: type: bool default: true version_added: '0.2.0' + comment: + description: + - Sets a comment on the schema. + - To reset the comment, pass an empty string. + type: str + version_added: '3.3.0' seealso: - name: PostgreSQL schemas description: General information about PostgreSQL schemas. @@ -90,6 +97,7 @@ EXAMPLES = r''' community.postgresql.postgresql_schema: db: test name: acme + comment: 'My test schema' - name: Create a new schema acme with a user bob who will own it community.postgresql.postgresql_schema: @@ -106,38 +114,33 @@ EXAMPLES = r''' RETURN = r''' schema: description: Name of the schema. - returned: success, changed + returned: success type: str sample: "acme" queries: description: List of executed queries. - returned: always + returned: success type: list sample: ["CREATE SCHEMA \"acme\""] ''' import traceback -try: - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - +from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + SQLParseError, + check_input, + pg_quote_identifier, +) from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, ensure_required_libs, get_conn_params, + pg_cursor_args, postgres_common_argument_spec, + set_comment, ) -from ansible_collections.community.postgresql.plugins.module_utils.database import ( - check_input, - pg_quote_identifier, - SQLParseError, -) -from ansible.module_utils._text import to_native executed_queries = [] @@ -159,7 +162,9 @@ def set_owner(cursor, schema, owner): def get_schema_info(cursor, schema): - query = ("SELECT schema_owner AS owner " + query = ("SELECT obj_description((SELECT oid " + "FROM pg_namespace WHERE nspname = %(schema)s), 'pg_namespace') " + "AS comment, schema_owner AS owner " "FROM information_schema.schemata " "WHERE schema_name = %(schema)s") cursor.execute(query, {'schema': schema}) @@ -185,7 +190,7 @@ def schema_delete(cursor, schema, cascade): return False -def schema_create(cursor, schema, owner): +def schema_create(cursor, schema, owner, comment): if not schema_exists(cursor, schema): query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')] if owner: @@ -193,24 +198,37 @@ def schema_create(cursor, schema, owner): query = ' '.join(query_fragments) cursor.execute(query) executed_queries.append(query) + if comment is not None: + set_comment(cursor, comment, 'schema', schema, False, executed_queries) return True else: schema_info = get_schema_info(cursor, schema) + changed = False if owner and owner != schema_info['owner']: - return set_owner(cursor, schema, owner) - else: - return False + changed = set_owner(cursor, schema, owner) + + if comment is not None: + current_comment = schema_info['comment'] if schema_info['comment'] is not None else '' + if comment != current_comment: + changed = set_comment(cursor, comment, 'schema', schema, False, executed_queries) or changed + + return changed -def schema_matches(cursor, schema, owner): +def schema_matches(cursor, schema, owner, comment): if not schema_exists(cursor, schema): return False else: schema_info = get_schema_info(cursor, schema) if owner and owner != schema_info['owner']: return False - else: - return True + if comment is not None: + # For the resetting comment feature (comment: '') to work correctly + current_comment = schema_info['comment'] if schema_info['comment'] is not None else '' + if comment != current_comment: + return False + + return True # =========================================== # Module execution. @@ -227,6 +245,7 @@ def main(): state=dict(type="str", default="present", choices=["absent", "present"]), session_role=dict(type="str"), trust_input=dict(type="bool", default=True), + comment=dict(type="str", default=None), ) module = AnsibleModule( @@ -240,25 +259,26 @@ def main(): cascade_drop = module.params["cascade_drop"] session_role = module.params["session_role"] trust_input = module.params["trust_input"] + comment = module.params["comment"] if not trust_input: # Check input for potentially dangerous elements: - check_input(module, schema, owner, session_role) + check_input(module, schema, owner, session_role, comment) changed = False - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) conn_params = get_conn_params(module, module.params) db_connection, dummy = connect_to_db(module, conn_params, autocommit=True) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) try: if module.check_mode: if state == "absent": changed = not schema_exists(cursor, schema) elif state == "present": - changed = not schema_matches(cursor, schema, owner) + changed = not schema_matches(cursor, schema, owner, comment) module.exit_json(changed=changed, schema=schema) if state == "absent": @@ -269,7 +289,7 @@ def main(): elif state == "present": try: - changed = schema_create(cursor, schema, owner) + changed = schema_create(cursor, schema, owner, comment) except SQLParseError as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) except NotSupportedError as e: diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_script.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_script.py index acd97f4d2..02a32c2ac 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_script.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_script.py @@ -4,7 +4,8 @@ # Copyright: (c) 2022, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -180,7 +181,7 @@ query: - When the C(positional_args) or C(named_args) options are used, the query contains all variables that were substituted inside the database connector. - returned: always + returned: success type: str sample: 'SELECT * FROM bar' statusmessage: @@ -189,7 +190,7 @@ statusmessage: after executing the script content. - When there are several statements in the script, returns a message related to the last statement. - returned: always + returned: success type: str sample: 'INSERT 0 1' query_result: @@ -197,7 +198,7 @@ query_result: - List of dictionaries in the column:value form representing returned rows. - When there are several statements in the script, returns result of the last statement. - returned: always + returned: success type: list elements: dict sample: [{"Column": "Value1"},{"Column": "Value2"}] @@ -211,31 +212,31 @@ rowcount: sample: 5 ''' -try: - from psycopg2 import ProgrammingError as Psycopg2ProgrammingError - from psycopg2.extras import DictCursor -except ImportError: - # it is needed for checking 'no result to fetch' in main(), - # psycopg2 availability will be checked by connect_to_db() into - # ansible.module_utils.postgres - pass - +from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.postgresql.plugins.module_utils.database import ( - check_input, -) +from ansible.module_utils.six import iteritems +from ansible_collections.community.postgresql.plugins.module_utils.database import \ + check_input from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + HAS_PSYCOPG, + PSYCOPG_VERSION, + TYPES_NEED_TO_CONVERT, connect_to_db, convert_elements_to_pg_arrays, convert_to_supported, ensure_required_libs, get_conn_params, + pg_cursor_args, postgres_common_argument_spec, set_search_path, - TYPES_NEED_TO_CONVERT, ) -from ansible.module_utils._text import to_native -from ansible.module_utils.six import iteritems +from ansible_collections.community.postgresql.plugins.module_utils.version import \ + LooseVersion + +if HAS_PSYCOPG and PSYCOPG_VERSION < LooseVersion("3.0"): + from psycopg2 import ProgrammingError as PsycopgProgrammingError +elif HAS_PSYCOPG: + from psycopg import ProgrammingError as PsycopgProgrammingError # =========================================== # Module execution. @@ -280,13 +281,13 @@ def main(): except Exception as e: module.fail_json(msg="Cannot read file '%s' : %s" % (path, to_native(e))) - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) conn_params = get_conn_params(module, module.params) - db_connection, dummy = connect_to_db(module, conn_params, autocommit=True) if encoding is not None: - db_connection.set_client_encoding(encoding) - cursor = db_connection.cursor(cursor_factory=DictCursor) + conn_params["client_encoding"] = encoding + db_connection, dummy = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(**pg_cursor_args) if search_path: set_search_path(cursor, '%s' % ','.join([x.strip(' ') for x in search_path])) @@ -306,6 +307,7 @@ def main(): # Execute script content: try: + current_query_txt = cursor.mogrify(script_content, args) cursor.execute(script_content, args) except Exception as e: cursor.close() @@ -316,9 +318,16 @@ def main(): rowcount = cursor.rowcount + # In Psycopg 2, only the result of the last statement is returned. + # In Psycopg 3, all the results are available. + # https://www.psycopg.org/psycopg3/docs/basic/from_pg2.html#multiple-results-returned-from-multiple-statements query_result = [] try: - for row in cursor.fetchall(): + result_set = cursor.fetchall() + if PSYCOPG_VERSION >= LooseVersion("3.0"): + while cursor.nextset() is not None: + result_set = cursor.fetchall() + for row in result_set: # Ansible engine does not support decimals. # An explicit conversion is required on the module's side row = dict(row) @@ -328,16 +337,21 @@ def main(): query_result.append(row) - except Psycopg2ProgrammingError as e: + # Psycopg 3 doesn't fail with 'no results to fetch' + # This exception will be triggered only in Psycopg 2 + except PsycopgProgrammingError as e: if to_native(e) == "no results to fetch": query_result = {} except Exception as e: module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e)) + if query_result == []: + query_result = {} + kw = dict( changed=True, - query=cursor.query, + query=current_query_txt, statusmessage=statusmessage, query_result=query_result, rowcount=rowcount, diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py index c874cb970..25b17f26b 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py @@ -4,7 +4,8 @@ # Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -232,91 +233,84 @@ EXAMPLES = r''' RETURN = r''' state: description: Sequence state at the end of execution. - returned: always + returned: success type: str sample: 'present' sequence: description: Sequence name. - returned: always + returned: success type: str sample: 'foobar' queries: description: List of queries that was tried to be executed. - returned: always + returned: success type: str sample: [ "CREATE SEQUENCE \"foo\"" ] schema: description: Name of the schema of the sequence. - returned: always + returned: success type: str sample: 'foo' data_type: description: Shows the current data type of the sequence. - returned: always + returned: success type: str sample: 'bigint' increment: description: The value of increment of the sequence. A positive value will make an ascending sequence, a negative one a descending sequence. - returned: always + returned: success type: int sample: -1 minvalue: description: The value of minvalue of the sequence. - returned: always + returned: success type: int sample: 1 maxvalue: description: The value of maxvalue of the sequence. - returned: always + returned: success type: int sample: 9223372036854775807 start: description: The value of start of the sequence. - returned: always + returned: success type: int sample: 12 cycle: description: Shows if the sequence cycle or not. - returned: always + returned: success type: bool sample: false owner: description: Shows the current owner of the sequence after the successful run of the task. - returned: always + returned: success type: str sample: 'postgres' newname: description: Shows the new sequence name after rename. - returned: on success + returned: success type: str sample: 'barfoo' newschema: description: Shows the new schema of the sequence after schema change. - returned: on success + returned: success type: str sample: 'foobar' ''' -try: - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.postgresql.plugins.module_utils.database import ( - check_input, -) +from ansible_collections.community.postgresql.plugins.module_utils.database import \ + check_input from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, - exec_sql, ensure_required_libs, + exec_sql, get_conn_params, + pg_cursor_args, postgres_common_argument_spec, ) @@ -326,11 +320,11 @@ class Sequence(object): Arguments: module (AnsibleModule) -- object of AnsibleModule class - cursor (cursor) -- cursor object of psycopg2 library + cursor (cursor) -- cursor object of psycopg library Attributes: module (AnsibleModule) -- object of AnsibleModule class - cursor (cursor) -- cursor object of psycopg2 library + cursor (cursor) -- cursor object of psycopg library changed (bool) -- something was changed after execution or not executed_queries (list) -- executed queries name (str) -- name of the sequence @@ -539,12 +533,12 @@ def main(): # Change autocommit to False if check_mode: autocommit = not module.check_mode - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) # Connect to DB and make cursor object: conn_params = get_conn_params(module, module.params) db_connection, dummy = connect_to_db(module, conn_params, autocommit=autocommit) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) ############## # Create the object and do main job: diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_set.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_set.py index 966aeb004..d8d196acf 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_set.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_set.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -60,6 +61,7 @@ options: type: bool default: true version_added: '0.2.0' + notes: - Supported version of PostgreSQL is 9.4 and later. - Pay attention, change setting with 'postmaster' context can return changed is true @@ -86,8 +88,11 @@ seealso: - name: PostgreSQL ALTER SYSTEM command reference description: Complete reference of the ALTER SYSTEM command documentation. link: https://www.postgresql.org/docs/current/sql-altersystem.html + author: - Andrew Klychkov (@Andersson007) +- Daniele Giudice (@RealGreenDragon) + extends_documentation_fragment: - community.postgresql.postgres ''' @@ -134,22 +139,22 @@ EXAMPLES = r''' RETURN = r''' name: description: Name of PostgreSQL server parameter. - returned: always + returned: success type: str sample: 'shared_buffers' restart_required: description: Information about parameter current state. - returned: always + returned: success type: bool sample: true prev_val_pretty: description: Information about previous state of the parameter. - returned: always + returned: success type: str sample: '4MB' value_pretty: description: Information about current state of the parameter. - returned: always + returned: success type: str sample: '64MB' value: @@ -157,49 +162,77 @@ value: - Dictionary that contains the current parameter value (at the time of playbook finish). - Pay attention that for real change some parameters restart of PostgreSQL server is required. - Returns the current value in the check mode. - returned: always + returned: success type: dict sample: { "value": 67108864, "unit": "b" } context: description: - PostgreSQL setting context. - returned: always + returned: success type: str sample: user ''' -try: - from psycopg2.extras import DictCursor -except Exception: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - from copy import deepcopy +from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.postgresql.plugins.module_utils.database import ( - check_input, -) +from ansible_collections.community.postgresql.plugins.module_utils.database import \ + check_input from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, ensure_required_libs, get_conn_params, + get_server_version, + pg_cursor_args, postgres_common_argument_spec, ) -from ansible.module_utils._text import to_native PG_REQ_VER = 90400 # To allow to set value like 1mb instead of 1MB, etc: LOWERCASE_SIZE_UNITS = ("mb", "gb", "tb") +# GUC_LIST_QUOTE parameters list for each version where they changed (from PG_REQ_VER). +# It is a tuple of tuples as we need to iterate it in order. +PARAMETERS_GUC_LIST_QUOTE = ( + (140000, ( + 'local_preload_libraries', + 'search_path', + 'session_preload_libraries', + 'shared_preload_libraries', + 'temp_tablespaces', + 'unix_socket_directories' + )), + (90400, ( + 'local_preload_libraries', + 'search_path', + 'session_preload_libraries', + 'shared_preload_libraries', + 'temp_tablespaces' + )), +) + + # =========================================== # PostgreSQL module specific support methods. # -def param_get(cursor, module, name): +def param_is_guc_list_quote(server_version, name): + for guc_list_quote_ver, guc_list_quote_params in PARAMETERS_GUC_LIST_QUOTE: + if server_version >= guc_list_quote_ver: + return name in guc_list_quote_params + return False + + +def param_guc_list_unquote(value): + # Unquote GUC_LIST_QUOTE parameter (each element can be quoted or not) + # Assume the parameter is GUC_LIST_QUOTE (check in param_is_guc_list_quote function) + return ', '.join([v.strip('" ') for v in value.split(',')]) + + +def param_get(cursor, module, name, is_guc_list_quote): query = ("SELECT name, setting, unit, context, boot_val " "FROM pg_settings WHERE name = %(name)s") try: @@ -216,15 +249,16 @@ def param_get(cursor, module, name): "Please check its spelling or presence in your PostgreSQL version " "(https://www.postgresql.org/docs/current/runtime-config.html)" % name) + current_val = val[name] raw_val = info['setting'] unit = info['unit'] context = info['context'] boot_val = info['boot_val'] - if val[name] == 'True': - val[name] = 'on' - elif val[name] == 'False': - val[name] = 'off' + if current_val == 'True': + current_val = 'on' + elif current_val == 'False': + current_val = 'off' if unit == 'kB': if int(raw_val) > 0: @@ -242,8 +276,12 @@ def param_get(cursor, module, name): unit = 'b' + if is_guc_list_quote: + current_val = param_guc_list_unquote(current_val) + raw_val = param_guc_list_unquote(raw_val) + return { - 'current_val': val[name], + 'current_val': current_val, 'raw_val': raw_val, 'unit': unit, 'boot_val': boot_val, @@ -317,14 +355,22 @@ def pretty_to_bytes(pretty_val): return pretty_val -def param_set(cursor, module, name, value, context): +def param_set(cursor, module, name, value, context, server_version): try: if str(value).lower() == 'default': query = "ALTER SYSTEM SET %s = DEFAULT" % name else: - if isinstance(value, str) and ',' in value and not name.endswith(('_command', '_prefix')): + if isinstance(value, str) and \ + ',' in value and \ + not name.endswith(('_command', '_prefix')) and \ + not (server_version < 140000 and name == 'unix_socket_directories'): # Issue https://github.com/ansible-collections/community.postgresql/issues/78 # Change value from 'one, two, three' -> "'one','two','three'" + # PR https://github.com/ansible-collections/community.postgresql/pull/400 + # Parameter names ends with '_command' or '_prefix' can contains commas but are not lists + # PR https://github.com/ansible-collections/community.postgresql/pull/521 + # unix_socket_directories up to PostgreSQL 13 lacks GUC_LIST_INPUT and + # GUC_LIST_QUOTE options so it is a single value parameter value = ','.join(["'" + elem.strip() + "'" for elem in value.split(',')]) query = "ALTER SYSTEM SET %s = %s" % (name, value) else: @@ -335,7 +381,7 @@ def param_set(cursor, module, name, value, context): cursor.execute("SELECT pg_reload_conf()") except Exception as e: - module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e))) + module.fail_json(msg="Unable to set %s value due to : %s" % (name, to_native(e))) return True @@ -385,15 +431,15 @@ def main(): if value is None and not reset: module.fail_json(msg="%s: at least one of value or reset param must be specified" % name) - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) conn_params = get_conn_params(module, module.params, warn_db_default=False) db_connection, dummy = connect_to_db(module, conn_params, autocommit=True) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) kw = {} # Check server version (needs 9.4 or later): - ver = db_connection.server_version + ver = get_server_version(db_connection) if ver < PG_REQ_VER: module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER)) kw = dict( @@ -407,6 +453,9 @@ def main(): db_connection.close() module.exit_json(**kw) + # Check parameter is GUC_LIST_QUOTE (done once as depend only on server version) + is_guc_list_quote = param_is_guc_list_quote(ver, name) + # Set default returned values: restart_required = False changed = False @@ -414,7 +463,7 @@ def main(): kw['restart_required'] = False # Get info about param state: - res = param_get(cursor, module, name) + res = param_get(cursor, module, name, is_guc_list_quote) current_val = res['current_val'] raw_val = res['raw_val'] unit = res['unit'] @@ -457,7 +506,7 @@ def main(): # Set param (value can be an empty string): if value is not None and value != current_val: - changed = param_set(cursor, module, name, value, context) + changed = param_set(cursor, module, name, value, context, ver) kw['value_pretty'] = value @@ -471,7 +520,7 @@ def main(): ) module.exit_json(**kw) - changed = param_set(cursor, module, name, boot_val, context) + changed = param_set(cursor, module, name, boot_val, context, ver) cursor.close() db_connection.close() @@ -479,9 +528,9 @@ def main(): # Reconnect and recheck current value: if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'): db_connection, dummy = connect_to_db(module, conn_params, autocommit=True) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) - res = param_get(cursor, module, name) + res = param_get(cursor, module, name, is_guc_list_quote) # f_ means 'final' f_value = res['current_val'] f_raw_val = res['raw_val'] diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py index b863784af..9c6a5d04d 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -136,40 +137,34 @@ EXAMPLES = r''' RETURN = r''' name: description: Name of the slot. - returned: always + returned: success type: str sample: "physical_one" queries: description: List of executed queries. - returned: always + returned: success type: str sample: [ "SELECT pg_create_physical_replication_slot('physical_one', False, False)" ] ''' -try: - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.postgresql.plugins.module_utils.database import ( - check_input, -) +from ansible_collections.community.postgresql.plugins.module_utils.database import \ + check_input from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, - exec_sql, ensure_required_libs, + exec_sql, get_conn_params, + get_server_version, + pg_cursor_args, postgres_common_argument_spec, ) - # =========================================== # PostgreSQL module specific support methods. # + class PgSlot(object): def __init__(self, module, cursor, name): self.module = module @@ -195,7 +190,7 @@ class PgSlot(object): if kind == 'physical': # Check server version (immediately_reserved needs 9.6+): - if self.cursor.connection.server_version < 90600: + if get_server_version(self.cursor.connection) < 90600: query = "SELECT pg_create_physical_replication_slot(%(name)s)" else: @@ -222,7 +217,7 @@ class PgSlot(object): res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False) if res: self.exists = True - self.kind = res[0][0] + self.kind = res[0]["slot_type"] # =========================================== @@ -271,11 +266,11 @@ def main(): else: warn_db_default = False - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) conn_params = get_conn_params(module, module.params, warn_db_default=warn_db_default) db_connection, dummy = connect_to_db(module, conn_params, autocommit=True) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) ################################## # Create an object and do main job diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py index ae46a0dea..a7d30631b 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type @@ -92,6 +93,12 @@ options: type: bool default: true version_added: '0.2.0' + comment: + description: + - Sets a comment on the subscription. + - To reset the comment, pass an empty string. + type: str + version_added: '3.3.0' notes: - PostgreSQL version must be 10 or greater. @@ -137,6 +144,7 @@ EXAMPLES = r''' user: repl password: replpass dbname: mydb + comment: Made by Ansible - name: Assuming that acme subscription exists, try to change conn parameters community.postgresql.postgresql_subscription: @@ -175,51 +183,48 @@ RETURN = r''' name: description: - Name of the subscription. - returned: always + returned: success type: str sample: acme exists: description: - Flag indicates the subscription exists or not at the end of runtime. - returned: always + returned: success type: bool sample: true queries: description: List of executed queries. - returned: always + returned: success type: str sample: [ 'DROP SUBSCRIPTION "mysubscription"' ] initial_state: description: Subscription configuration at the beginning of runtime. - returned: always + returned: success type: dict sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true} final_state: description: Subscription configuration at the end of runtime. - returned: always + returned: success type: dict sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true} ''' from copy import deepcopy -try: - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.postgresql.plugins.module_utils.database import check_input +from ansible.module_utils.six import iteritems +from ansible_collections.community.postgresql.plugins.module_utils.database import \ + check_input from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, - exec_sql, ensure_required_libs, + exec_sql, get_conn_params, + get_server_version, + pg_cursor_args, postgres_common_argument_spec, + set_comment, ) -from ansible.module_utils.six import iteritems SUPPORTED_PG_VERSION = 10000 @@ -287,13 +292,13 @@ class PgSubscription(): Args: module (AnsibleModule): Object of AnsibleModule class. - cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + cursor (cursor): Cursor object of psycopg library to work with PostgreSQL. name (str): The name of the subscription. db (str): The database name the subscription will be associated with. Attributes: module (AnsibleModule): Object of AnsibleModule class. - cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + cursor (cursor): Cursor object of psycopg library to work with PostgreSQL. name (str): Name of subscription. executed_queries (list): List of executed queries. attrs (dict): Dict with subscription attributes. @@ -313,6 +318,7 @@ class PgSubscription(): 'conninfo': {}, 'slotname': None, 'publications': [], + 'comment': None, } self.empty_attrs = deepcopy(self.attrs) self.exists = self.check_subscr() @@ -345,6 +351,11 @@ class PgSubscription(): self.attrs['synccommit'] = subscr_info.get('subenabled') self.attrs['slotname'] = subscr_info.get('subslotname') self.attrs['publications'] = subscr_info.get('subpublications') + if subscr_info.get('comment') is not None: + self.attrs['comment'] = subscr_info.get('comment') + else: + # To support the comment resetting functionality + self.attrs['comment'] = '' if subscr_info.get('subconninfo'): for param in subscr_info['subconninfo'].split(' '): tmp = param.split('=') @@ -471,6 +482,22 @@ class PgSubscription(): query = 'ALTER SUBSCRIPTION %s OWNER TO "%s"' % (self.name, role) return self.__exec_sql(query, check_mode=check_mode) + def set_comment(self, comment, check_mode=True): + """Set a subscription comment. + + Args: + comment (str): Comment to set on the subscription. + + Kwargs: + check_mode (bool): If True, don not change anything. + + Returns: + True if success, False otherwise. + """ + set_comment(self.cursor, comment, 'subscription', self.name, check_mode, self.executed_queries) + + return True + def refresh(self, check_mode=True): """Refresh publication. @@ -559,7 +586,8 @@ class PgSubscription(): Returns: Dict with subscription information if successful, False otherwise. """ - query = ("SELECT d.datname, r.rolname, s.subenabled, " + query = ("SELECT obj_description(s.oid, 'pg_subscription') AS comment, " + "d.datname, r.rolname, s.subenabled, " "s.subconninfo, s.subslotname, s.subsynccommit, " "s.subpublications FROM pg_catalog.pg_subscription s " "JOIN pg_catalog.pg_database d " @@ -615,6 +643,7 @@ def main(): subsparams=dict(type='dict'), session_role=dict(type='str'), trust_input=dict(type='bool', default=True), + comment=dict(type='str', default=None), ) module = AnsibleModule( argument_spec=argument_spec, @@ -632,6 +661,7 @@ def main(): connparams = module.params['connparams'] session_role = module.params['session_role'] trust_input = module.params['trust_input'] + comment = module.params['comment'] if not trust_input: # Check input for potentially dangerous elements: @@ -646,7 +676,7 @@ def main(): connparams_str = convert_conn_params(connparams) check_input(module, name, publications, owner, session_role, - connparams_str, subsparams_str) + connparams_str, subsparams_str, comment) if state == 'present' and cascade: module.warn('parameter "cascade" is ignored when state is not absent') @@ -660,17 +690,19 @@ def main(): module.warn("parameter 'connparams' is ignored when state is not 'present'") if subsparams: module.warn("parameter 'subsparams' is ignored when state is not 'present'") + if comment is not None: + module.warn("parameter 'comment' is ignored when state is not 'present'") - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) # Connect to DB and make cursor object: pg_conn_params = get_conn_params(module, module.params) # We check subscription state without DML queries execution, so set autocommit: db_connection, dummy = connect_to_db(module, pg_conn_params, autocommit=True) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) # Check version: - if cursor.connection.server_version < SUPPORTED_PG_VERSION: + if get_server_version(cursor.connection) < SUPPORTED_PG_VERSION: module.fail_json(msg="PostgreSQL server version should be 10.0 or greater") # Set defaults: @@ -711,6 +743,9 @@ def main(): if owner and subscription.attrs['owner'] != owner: changed = subscription.set_owner(owner, check_mode=module.check_mode) or changed + if comment is not None and comment != subscription.attrs['comment']: + changed = subscription.set_comment(comment, check_mode=module.check_mode) or changed + elif state == 'absent': changed = subscription.drop(cascade, check_mode=module.check_mode) diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_table.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_table.py index 33f1c752f..1e25c8f23 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_table.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_table.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -212,43 +213,36 @@ EXAMPLES = r''' RETURN = r''' table: description: Name of a table. - returned: always + returned: success type: str sample: 'foo' state: description: Table state. - returned: always + returned: success type: str sample: 'present' owner: description: Table owner. - returned: always + returned: success type: str sample: 'postgres' tablespace: description: Tablespace. - returned: always + returned: success type: str sample: 'ssd_tablespace' queries: description: List of executed queries. - returned: always + returned: success type: str sample: [ 'CREATE TABLE "test_table" (id bigint)' ] storage_params: description: Storage parameters. - returned: always + returned: success type: list sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ] ''' -try: - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.postgresql.plugins.module_utils.database import ( check_input, @@ -256,17 +250,18 @@ from ansible_collections.community.postgresql.plugins.module_utils.database impo ) from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, - exec_sql, ensure_required_libs, + exec_sql, get_conn_params, + pg_cursor_args, postgres_common_argument_spec, ) - # =========================================== # PostgreSQL module specific support methods. # + class Table(object): def __init__(self, name, module, cursor): self.name = name @@ -305,9 +300,9 @@ class Table(object): if res: self.exists = True self.info = dict( - owner=res[0][0], - tblspace=res[0][1] if res[0][1] else '', - storage_params=res[0][2] if res[0][2] else [], + owner=res[0]["tableowner"], + tblspace=res[0]["tablespace"] if res[0]["tablespace"] else '', + storage_params=res[0]["reloptions"] if res[0]["reloptions"] else [], ) return True @@ -536,11 +531,11 @@ def main(): if including and not like: module.fail_json(msg="%s: including param needs like param specified" % table) - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) conn_params = get_conn_params(module, module.params) db_connection, dummy = connect_to_db(module, conn_params, autocommit=False) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) if storage_params: storage_params = ','.join(storage_params) diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py index 243005733..7556370aa 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py @@ -6,7 +6,8 @@ # Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -77,6 +78,12 @@ options: type: bool default: true version_added: '0.2.0' + comment: + description: + - Sets a comment on the tablespace. + - To reset the comment, pass an empty string. + type: str + version_added: '3.3.0' attributes: check_mode: @@ -104,6 +111,7 @@ author: - Flavien Chantelot (@Dorn-) - Antoine Levy-Lambert (@antoinell) - Andrew Klychkov (@Andersson007) +- Daniele Giudice (@RealGreenDragon) extends_documentation_fragment: - community.postgresql.postgres @@ -115,6 +123,7 @@ EXAMPLES = r''' name: acme owner: bob location: /data/foo + comment: "Bob's tablespace" - name: Create a new tablespace called bar with tablespace options community.postgresql.postgresql_tablespace: @@ -143,27 +152,32 @@ EXAMPLES = r''' RETURN = r''' queries: description: List of queries that was tried to be executed. - returned: always + returned: success type: str sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ] tablespace: description: Tablespace name. - returned: always + returned: success type: str sample: 'ssd' owner: description: Tablespace owner. - returned: always + returned: success type: str sample: 'Bob' +comment: + description: Tablespace comment. + returned: success + type: str + sample: 'Test tablespace' options: description: Tablespace options. - returned: always + returned: success type: dict sample: { 'random_page_cost': 1, 'seq_page_cost': 1 } location: description: Path to the tablespace in the file system. - returned: always + returned: success type: str sample: '/incredible/fast/ssd' newname: @@ -173,33 +187,24 @@ newname: sample: new_ssd state: description: Tablespace state at the end of execution. - returned: always + returned: success type: str sample: 'present' ''' -try: - from psycopg2 import __version__ as PSYCOPG2_VERSION - from psycopg2.extras import DictCursor - from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT - from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import iteritems - -from ansible_collections.community.postgresql.plugins.module_utils.database import ( - check_input, -) +from ansible_collections.community.postgresql.plugins.module_utils.database import \ + check_input from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, - exec_sql, ensure_required_libs, + exec_sql, get_conn_params, + pg_cursor_args, postgres_common_argument_spec, + set_autocommit, + set_comment, ) @@ -209,12 +214,12 @@ class PgTablespace(object): Args: module (AnsibleModule) -- object of AnsibleModule class - cursor (cursor) -- cursor object of psycopg2 library + cursor (cursor) -- cursor object of psycopg library name (str) -- name of the tablespace Attrs: module (AnsibleModule) -- object of AnsibleModule class - cursor (cursor) -- cursor object of psycopg2 library + cursor (cursor) -- cursor object of psycopg library name (str) -- name of the tablespace exists (bool) -- flag the tablespace exists in the DB or not owner (str) -- tablespace owner @@ -235,6 +240,7 @@ class PgTablespace(object): self.executed_queries = [] self.new_name = '' self.opt_not_supported = False + self.comment = None # Collect info: self.get_info() @@ -256,12 +262,14 @@ class PgTablespace(object): if not opt: self.opt_not_supported = True - query = ("SELECT r.rolname, (SELECT Null), %s " + query = ("SELECT shobj_description(t.oid, 'pg_tablespace') AS comment, " + "r.rolname, (SELECT Null) spcoptions, %s loc_string " "FROM pg_catalog.pg_tablespace AS t " "JOIN pg_catalog.pg_roles AS r " "ON t.spcowner = r.oid " % location) else: - query = ("SELECT r.rolname, t.spcoptions, %s " + query = ("SELECT shobj_description(t.oid, 'pg_tablespace') AS comment, " + "r.rolname, t.spcoptions, %s loc_string " "FROM pg_catalog.pg_tablespace AS t " "JOIN pg_catalog.pg_roles AS r " "ON t.spcowner = r.oid " % location) @@ -273,19 +281,21 @@ class PgTablespace(object): self.exists = False return False - if res[0][0]: + if res[0]["rolname"]: self.exists = True - self.owner = res[0][0] + self.owner = res[0]["rolname"] - if res[0][1]: + if res[0]["spcoptions"]: # Options exist: - for i in res[0][1]: + for i in res[0]["spcoptions"]: i = i.split('=') self.settings[i[0]] = i[1] - if res[0][2]: + if res[0]["loc_string"]: # Location exists: - self.location = res[0][2] + self.location = res[0]["loc_string"] + + self.comment = res[0]["comment"] if res[0]["comment"] is not None else '' def create(self, location): """Create tablespace. @@ -319,6 +329,20 @@ class PgTablespace(object): query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.name, new_owner) return exec_sql(self, query, return_bool=True) + def set_comment(self, comment, check_mode): + """Set tablespace comment. + + Return True if success, otherwise, return False. + + args: + comment (str) -- comment to set for the tablespace" + """ + if comment == self.comment: + return False + + return set_comment(self.cursor, comment, 'tablespace', self.name, + check_mode, self.executed_queries) + def rename(self, newname): """Rename tablespace. @@ -398,11 +422,11 @@ def main(): db=dict(type='str', aliases=['login_db']), session_role=dict(type='str'), trust_input=dict(type='bool', default=True), + comment=dict(type='str', default=None), ) module = AnsibleModule( argument_spec=argument_spec, - mutually_exclusive=(('positional_args', 'named_args'),), supports_check_mode=True, ) @@ -414,6 +438,7 @@ def main(): settings = module.params["set"] session_role = module.params["session_role"] trust_input = module.params["trust_input"] + comment = module.params["comment"] if state == 'absent' and (location or owner or rename_to or settings): module.fail_json(msg="state=absent is mutually exclusive location, " @@ -427,20 +452,13 @@ def main(): settings_list = ['%s = %s' % (k, v) for k, v in iteritems(settings)] check_input(module, tablespace, location, owner, - rename_to, session_role, settings_list) + rename_to, session_role, settings_list, comment) - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) conn_params = get_conn_params(module, module.params, warn_db_default=False) - db_connection, dummy = connect_to_db(module, conn_params, autocommit=True) - cursor = db_connection.cursor(cursor_factory=DictCursor) - - # Change autocommit to False if check_mode: - if module.check_mode: - if PSYCOPG2_VERSION >= '2.4.2': - db_connection.set_session(autocommit=False) - else: - db_connection.set_isolation_level(READ_COMMITTED) + db_connection, dummy = connect_to_db(module, conn_params, autocommit=False if module.check_mode else True) + cursor = db_connection.cursor(**pg_cursor_args) # Set defaults: autocommit = False @@ -466,26 +484,15 @@ def main(): # Because CREATE TABLESPACE can not be run inside the transaction block: autocommit = True - if PSYCOPG2_VERSION >= '2.4.2': - db_connection.set_session(autocommit=True) - else: - db_connection.set_isolation_level(AUTOCOMMIT) + set_autocommit(db_connection, True) changed = tblspace.create(location) - # Drop non-existing tablespace: - elif not tblspace.exists and state == 'absent': - # Nothing to do: - module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name) - # Drop existing tablespace: elif tblspace.exists and state == 'absent': # Because DROP TABLESPACE can not be run inside the transaction block: autocommit = True - if PSYCOPG2_VERSION >= '2.4.2': - db_connection.set_session(autocommit=True) - else: - db_connection.set_isolation_level(AUTOCOMMIT) + set_autocommit(db_connection, True) changed = tblspace.drop() @@ -498,14 +505,18 @@ def main(): # Refresh information: tblspace.get_info() - # Change owner and settings: + # Change owner, comment and settings: if state == 'present' and tblspace.exists: if owner: - changed = tblspace.set_owner(owner) + changed = tblspace.set_owner(owner) or changed if settings: - changed = tblspace.set_settings(settings) + changed = tblspace.set_settings(settings) or changed + + if comment is not None: + changed = tblspace.set_comment(comment, module.check_mode) or changed + # Update tablespace information in the class tblspace.get_info() # Rollback if it's possible and check_mode: @@ -527,6 +538,7 @@ def main(): queries=tblspace.executed_queries, options=tblspace.settings, location=tblspace.location, + comment=tblspace.comment, ) if state == 'present': diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_user.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_user.py index 594e0f1ae..8192a5480 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_user.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_user.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -21,10 +22,8 @@ description: - Set I(fail_on_user) to C(false) to make the module ignore failures when trying to remove a user. In this case, the module reports if changes happened as usual and separately reports whether the user has been removed or not. -- B(WARNING) The I(priv) option has been B(deprecated) and will be removed in community.postgresql 3.0.0. Please use the +- B(WARNING) The I(priv) option has been B(deprecated) and will be removed in community.postgresql 4.0.0. Please use the M(community.postgresql.postgresql_privs) module instead. -- B(WARNING) The I(groups) option has been B(deprecated) ans will be removed in community.postgresql 3.0.0. - Please use the M(community.postgresql.postgresql_membership) module instead. options: name: description: @@ -64,7 +63,7 @@ options: priv: description: - This option has been B(deprecated) and will be removed in - community.postgresql 3.0.0. Please use the M(community.postgresql.postgresql_privs) module to + community.postgresql 4.0.0. Please use the M(community.postgresql.postgresql_privs) module to GRANT/REVOKE permissions instead. - "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where you can define the user's privileges for the database ( allowed options - 'CREATE', @@ -141,23 +140,16 @@ options: - If the file exists, verifies that the server's certificate is signed by one of these authorities. type: str aliases: [ ssl_rootcert ] - groups: - description: - - This option has been B(deprecated) and will be removed in community.postgresql 3.0.0. - Please use the I(postgresql_membership) module to GRANT/REVOKE group/role memberships - instead. - - The list of groups (roles) that you want to grant to the user. - type: list - elements: str comment: description: - Adds a comment on the user (equivalent to the C(COMMENT ON ROLE) statement). + - To reset the comment, pass an empty string. type: str version_added: '0.2.0' trust_input: description: - If C(false), checks whether values of options I(name), I(password), I(privs), I(expires), - I(role_attr_flags), I(groups), I(comment), I(session_role) are potentially dangerous. + I(role_attr_flags), I(comment), I(session_role) are potentially dangerous. - It makes sense to use C(false) only when SQL injections through the options are possible. type: bool default: true @@ -267,15 +259,6 @@ EXAMPLES = r''' user: test password: "" -# This example uses the `group` argument which is deprecated. -# You should use the `postgresql_membership` module instead. -- name: Create user test and grant group user_ro and user_rw to it - community.postgresql.postgresql_user: - name: test - groups: - - user_ro - - user_rw - # Create user with a cleartext password if it does not exist or update its password. # The password will be encrypted with SCRAM algorithm (available since PostgreSQL 10) - name: Create appclient user with SCRAM-hashed password @@ -296,43 +279,47 @@ EXAMPLES = r''' RETURN = r''' queries: description: List of executed queries. - returned: always + returned: success type: list sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"'] ''' +import hmac import itertools import re import traceback -from hashlib import md5, sha256 -import hmac from base64 import b64decode +from hashlib import md5, sha256 -try: - import psycopg2 - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - +from ansible.module_utils._text import to_bytes, to_native, to_text from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems +from ansible_collections.community.postgresql.plugins.module_utils import \ + saslprep from ansible_collections.community.postgresql.plugins.module_utils.database import ( - pg_quote_identifier, SQLParseError, check_input, + pg_quote_identifier, ) from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + HAS_PSYCOPG, + PSYCOPG_VERSION, connect_to_db, ensure_required_libs, + get_comment, get_conn_params, get_server_version, - PgMembership, + pg_cursor_args, postgres_common_argument_spec, + set_comment, ) -from ansible.module_utils._text import to_bytes, to_native, to_text -from ansible.module_utils.six import iteritems -from ansible_collections.community.postgresql.plugins.module_utils import saslprep +from ansible_collections.community.postgresql.plugins.module_utils.version import \ + LooseVersion + +if HAS_PSYCOPG and PSYCOPG_VERSION < LooseVersion("3.0"): + import psycopg2 as psycopg +elif HAS_PSYCOPG: + import psycopg try: # pbkdf2_hmac is missing on python 2.6, we can safely assume, @@ -348,7 +335,7 @@ FLAGS_BY_VERSION = {'BYPASSRLS': 90500} SCRAM_SHA256_REGEX = r'^SCRAM-SHA-256\$(\d+):([A-Za-z0-9+\/=]+)\$([A-Za-z0-9+\/=]+):([A-Za-z0-9+\/=]+)$' -# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 +# WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')), database=frozenset( ('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')), @@ -428,24 +415,29 @@ def user_should_we_change_password(current_role_attrs, user, password, encrypted # Do we actually need to do anything? pwchanging = False if password is not None: + current_password = current_role_attrs['rolpassword'] + # Handle SQL_ASCII encoded databases + if isinstance(current_password, bytes): + current_password = current_password.decode('ascii') + # Empty password means that the role shouldn't have a password, which # means we need to check if the current password is None. if password == '': - if current_role_attrs['rolpassword'] is not None: + if current_password is not None: pwchanging = True # If the provided password is a SCRAM hash, compare it directly to the current password elif re.match(SCRAM_SHA256_REGEX, password): - if password != current_role_attrs['rolpassword']: + if password != current_password: pwchanging = True # SCRAM hashes are represented as a special object, containing hash data: # `SCRAM-SHA-256$<iteration count>:<salt>$<StoredKey>:<ServerKey>` # for reference, see https://www.postgresql.org/docs/current/catalog-pg-authid.html - elif current_role_attrs['rolpassword'] is not None \ + elif current_password is not None \ and pbkdf2_found \ - and re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword']): + and re.match(SCRAM_SHA256_REGEX, current_password): - r = re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword']) + r = re.match(SCRAM_SHA256_REGEX, current_password) try: # extract SCRAM params from rolpassword it = int(r.group(1)) @@ -475,11 +467,11 @@ def user_should_we_change_password(current_role_attrs, user, password, encrypted # When the provided password looks like a MD5-hash, value of # 'encrypted' is ignored. elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED': - if password != current_role_attrs['rolpassword']: + if password != current_password: pwchanging = True elif encrypted == 'ENCRYPTED': hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest()) - if hashed_password != current_role_attrs['rolpassword']: + if hashed_password != current_password: pwchanging = True return pwchanging @@ -489,7 +481,7 @@ def user_alter(db_connection, module, user, password, role_attr_flags, encrypted """Change user password and/or attributes. Return True if changed, False otherwise.""" changed = False - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a # literal if user == 'PUBLIC': @@ -508,7 +500,7 @@ def user_alter(db_connection, module, user, password, role_attr_flags, encrypted cursor.execute(select, {"user": user}) # Grab current role attributes. current_role_attrs = cursor.fetchone() - except psycopg2.ProgrammingError: + except psycopg.ProgrammingError: current_role_attrs = None db_connection.rollback() @@ -522,7 +514,7 @@ def user_alter(db_connection, module, user, password, role_attr_flags, encrypted cursor.execute(select, {"user": user}) # Grab current role attributes from pg_roles current_role_attrs = cursor.fetchone() - except psycopg2.ProgrammingError as e: + except psycopg.ProgrammingError as e: db_connection.rollback() module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e)) @@ -540,8 +532,8 @@ def user_alter(db_connection, module, user, password, role_attr_flags, encrypted role_attr_flags_changing = True if expires is not None: - cursor.execute("SELECT %s::timestamptz;", (expires,)) - expires_with_tz = cursor.fetchone()[0] + cursor.execute("SELECT %s::timestamptz exp_timestamp", (expires,)) + expires_with_tz = cursor.fetchone()["exp_timestamp"] expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil') else: expires_changing = False @@ -572,17 +564,19 @@ def user_alter(db_connection, module, user, password, role_attr_flags, encrypted cursor.execute(statement, query_password_data) changed = True executed_queries.append(statement) - except psycopg2.InternalError as e: - if e.pgcode == '25006': + # We could catch psycopg.errors.ReadOnlySqlTransaction directly, + # but that was added only in Psycopg 2.8 + except psycopg.InternalError as e: + if e.diag.sqlstate == "25006": # Handle errors due to read-only transactions indicated by pgcode 25006 # ERROR: cannot execute ALTER ROLE in a read-only transaction changed = False - module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + module.fail_json(msg=e.diag.message_primary, exception=traceback.format_exc()) return changed else: - raise psycopg2.InternalError(e) - except psycopg2.NotSupportedError as e: - module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + raise psycopg.InternalError(e) + except psycopg.NotSupportedError as e: + module.fail_json(msg=e.diag.message_primary, exception=traceback.format_exc()) elif no_password_changes and role_attr_flags != '': # Grab role information from pg_roles instead of pg_authid @@ -617,15 +611,16 @@ def user_alter(db_connection, module, user, password, role_attr_flags, encrypted statement = ' '.join(alter) cursor.execute(statement) executed_queries.append(statement) - except psycopg2.InternalError as e: - if e.pgcode == '25006': + + except psycopg.InternalError as e: + if e.diag.sqlstate == "25006": # Handle errors due to read-only transactions indicated by pgcode 25006 # ERROR: cannot execute ALTER ROLE in a read-only transaction changed = False - module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + module.fail_json(msg=e.diag.message_primary, exception=traceback.format_exc()) return changed else: - raise psycopg2.InternalError(e) + raise psycopg.InternalError(e) # Grab new role attributes. cursor.execute(select, {"user": user}) @@ -653,7 +648,7 @@ def user_delete(cursor, user): return True -# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 +# WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 def has_table_privileges(cursor, user, table, privs): """ Return the difference between the privileges that a user already has and @@ -671,7 +666,7 @@ def has_table_privileges(cursor, user, table, privs): return (have_currently, other_current, desired) -# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 +# WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 def get_table_privileges(cursor, user, table): if '.' in table: schema, table = table.split('.', 1) @@ -680,10 +675,10 @@ def get_table_privileges(cursor, user, table): query = ("SELECT privilege_type FROM information_schema.role_table_grants " "WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s") cursor.execute(query, {'user': user, 'table': table, 'schema': schema}) - return frozenset([x[0] for x in cursor.fetchall()]) + return frozenset([x["privilege_type"] for x in cursor.fetchall()]) -# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 +# WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 def grant_table_privileges(cursor, user, table, privs): # Note: priv escaped by parse_privs privs = ', '.join(privs) @@ -693,7 +688,7 @@ def grant_table_privileges(cursor, user, table, privs): cursor.execute(query) -# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 +# WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 def revoke_table_privileges(cursor, user, table, privs): # Note: priv escaped by parse_privs privs = ', '.join(privs) @@ -703,16 +698,16 @@ def revoke_table_privileges(cursor, user, table, privs): cursor.execute(query) -# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 +# WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 def get_database_privileges(cursor, user, db): priv_map = { 'C': 'CREATE', 'T': 'TEMPORARY', 'c': 'CONNECT', } - query = 'SELECT datacl FROM pg_database WHERE datname = %s' + query = 'SELECT datacl::text FROM pg_database WHERE datname = %s' cursor.execute(query, (db,)) - datacl = cursor.fetchone()[0] + datacl = cursor.fetchone()["datacl"] if datacl is None: return set() r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl) @@ -724,7 +719,7 @@ def get_database_privileges(cursor, user, db): return normalize_privileges(o, 'database') -# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 +# WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 def has_database_privileges(cursor, user, db, privs): """ Return the difference between the privileges that a user already has and @@ -742,7 +737,7 @@ def has_database_privileges(cursor, user, db, privs): return (have_currently, other_current, desired) -# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 +# WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 def grant_database_privileges(cursor, user, db, privs): # Note: priv escaped by parse_privs privs = ', '.join(privs) @@ -757,7 +752,7 @@ def grant_database_privileges(cursor, user, db, privs): cursor.execute(query) -# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 +# WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 def revoke_database_privileges(cursor, user, db, privs): # Note: priv escaped by parse_privs privs = ', '.join(privs) @@ -772,7 +767,7 @@ def revoke_database_privileges(cursor, user, db, privs): cursor.execute(query) -# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 +# WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 def revoke_privileges(cursor, user, privs): if privs is None: return False @@ -794,7 +789,7 @@ def revoke_privileges(cursor, user, privs): return changed -# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 +# WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 def grant_privileges(cursor, user, privs): if privs is None: return False @@ -846,7 +841,7 @@ def parse_role_attrs(role_attr_flags, srv_version): return ' '.join(flags) -# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 +# WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 def normalize_privileges(privs, type_): new_privs = set(privs) if 'ALL' in new_privs: @@ -859,7 +854,7 @@ def normalize_privileges(privs, type_): return new_privs -# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 +# WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 def parse_privs(privs, db): """ Parse privilege string to determine permissions for database db. @@ -913,21 +908,13 @@ def get_valid_flags_by_version(srv_version): ] -def get_comment(cursor, user): - """Get user's comment.""" - query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') " - "FROM pg_catalog.pg_roles r " - "WHERE r.rolname = %(user)s") - cursor.execute(query, {'user': user}) - return cursor.fetchone()[0] - - -def add_comment(cursor, user, comment): +def add_comment(cursor, user, comment, check_mode): """Add comment on user.""" - if comment != get_comment(cursor, user): - query = 'COMMENT ON ROLE "%s" IS ' % user - cursor.execute(query + '%(comment)s', {'comment': comment}) - executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment})) + current_comment = get_comment(cursor, 'role', user) + # For the resetting comment feature (comment: '') to work correctly + current_comment = current_comment if current_comment is not None else '' + if comment != current_comment: + set_comment(cursor, comment, 'role', user, check_mode, executed_queries) return True else: return False @@ -943,7 +930,7 @@ def main(): user=dict(type='str', required=True, aliases=['name']), password=dict(type='str', default=None, no_log=True), state=dict(type='str', default='present', choices=['absent', 'present']), - priv=dict(type='str', default=None, removed_in_version='3.0.0', removed_from_collection='community.postgreql'), + priv=dict(type='str', default=None, removed_in_version='4.0.0', removed_from_collection='community.postgreql'), db=dict(type='str', default='', aliases=['login_db']), fail_on_user=dict(type='bool', default=True, aliases=['fail_on_role']), role_attr_flags=dict(type='str', default=''), @@ -952,8 +939,6 @@ def main(): expires=dict(type='str', default=None), conn_limit=dict(type='int', default=None), session_role=dict(type='str'), - # WARNING: groups are deprecated and will be removed in community.postgresql 3.0.0 - groups=dict(type='list', elements='str', removed_in_version='3.0.0', removed_from_collection='community.postgreql'), comment=dict(type='str', default=None), trust_input=dict(type='bool', default=True), ) @@ -966,10 +951,10 @@ def main(): password = module.params["password"] state = module.params["state"] fail_on_user = module.params["fail_on_user"] - # WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 + # WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 if module.params['db'] == '' and module.params["priv"] is not None: module.fail_json(msg="privileges require a database to be specified") - # WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 + # WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 privs = parse_privs(module.params["priv"], module.params["db"]) no_password_changes = module.params["no_password_changes"] if module.params["encrypted"]: @@ -979,25 +964,20 @@ def main(): expires = module.params["expires"] conn_limit = module.params["conn_limit"] role_attr_flags = module.params["role_attr_flags"] - # WARNING: groups are deprecated and will be removed in community.postgresql 3.0.0 - groups = module.params["groups"] - if groups: - groups = [e.strip() for e in groups] comment = module.params["comment"] session_role = module.params['session_role'] trust_input = module.params['trust_input'] if not trust_input: # Check input for potentially dangerous elements: - # WARNING: groups are deprecated and will be removed in community.postgresql 3.0.0 check_input(module, user, password, privs, expires, - role_attr_flags, groups, comment, session_role) + role_attr_flags, comment, session_role, comment) - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) conn_params = get_conn_params(module, module.params, warn_db_default=False) db_connection, dummy = connect_to_db(module, conn_params) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) srv_version = get_server_version(db_connection) @@ -1021,29 +1001,21 @@ def main(): try: changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit) - except psycopg2.ProgrammingError as e: + except psycopg.ProgrammingError as e: module.fail_json(msg="Unable to add user with given requirement " "due to : %s" % to_native(e), exception=traceback.format_exc()) except SQLParseError as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - # WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 + # WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 try: changed = grant_privileges(cursor, user, privs) or changed except SQLParseError as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) - # WARNING: groups are deprecated and will be removed in community.postgresql 3.0.0 - if groups: - target_roles = [] - target_roles.append(user) - pg_membership = PgMembership(module, cursor, groups, target_roles) - changed = pg_membership.grant() or changed - executed_queries.extend(pg_membership.executed_queries) - if comment is not None: try: - changed = add_comment(cursor, user, comment) or changed + changed = add_comment(cursor, user, comment, module.check_mode) or changed except Exception as e: module.fail_json(msg='Unable to add comment on role: %s' % to_native(e), exception=traceback.format_exc()) @@ -1054,7 +1026,7 @@ def main(): changed = True kw['user_removed'] = True else: - # WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0 + # WARNING: privs are deprecated and will be removed in community.postgresql 4.0.0 try: changed = revoke_privileges(cursor, user, privs) user_removed = user_delete(cursor, user) diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py index f443d50c3..35a5b1fd0 100644 --- a/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py +++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r''' @@ -89,41 +90,33 @@ EXAMPLES = r''' RETURN = r''' indexes: description: User index statistics. - returned: always + returned: success type: dict sample: {"public": {"test_id_idx": {"idx_scan": 0, "idx_tup_fetch": 0, "idx_tup_read": 0, "relname": "test", "size": 8192, ...}}} tables: description: User table statistics. - returned: always + returned: success type: dict sample: {"public": {"test": {"analyze_count": 3, "n_dead_tup": 0, "n_live_tup": 0, "seq_scan": 2, "size": 0, "total_size": 8192, ...}}} functions: description: User function statistics. - returned: always + returned: success type: dict sample: {"public": {"inc": {"calls": 1, "funcid": 26722, "self_time": 0.23, "total_time": 0.23}}} ''' -try: - from psycopg2.extras import DictCursor -except ImportError: - # psycopg2 is checked by connect_to_db() - # from ansible.module_utils.postgres - pass - from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.postgresql.plugins.module_utils.database import ( - check_input, -) +from ansible.module_utils.six import iteritems +from ansible_collections.community.postgresql.plugins.module_utils.database import \ + check_input from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( connect_to_db, - exec_sql, ensure_required_libs, + exec_sql, get_conn_params, + pg_cursor_args, postgres_common_argument_spec, ) -from ansible.module_utils.six import iteritems - # =========================================== # PostgreSQL module specific support methods. @@ -135,11 +128,11 @@ class PgUserObjStatInfo(): Args: module (AnsibleModule): Object of AnsibleModule class. - cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + cursor (cursor): Cursor object of psycopg library to work with PostgreSQL. Attributes: module (AnsibleModule): Object of AnsibleModule class. - cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + cursor (cursor): Cursor object of psycopg library to work with PostgreSQL. executed_queries (list): List of executed queries. info (dict): Statistics dictionary. obj_func_mapping (dict): Mapping of object types to corresponding functions. @@ -193,11 +186,12 @@ class PgUserObjStatInfo(): def get_func_stat(self): """Get function statistics and fill out self.info dictionary.""" query = "SELECT * FROM pg_stat_user_functions" + qp = None if self.schema: query = "SELECT * FROM pg_stat_user_functions WHERE schemaname = %s" + qp = (self.schema,) - result = exec_sql(self, query, query_params=(self.schema,), - add_to_executed=False) + result = exec_sql(self, query, query_params=qp, add_to_executed=False) if not result: return @@ -210,11 +204,12 @@ class PgUserObjStatInfo(): def get_idx_stat(self): """Get index statistics and fill out self.info dictionary.""" query = "SELECT * FROM pg_stat_user_indexes" + qp = None if self.schema: query = "SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s" + qp = (self.schema,) - result = exec_sql(self, query, query_params=(self.schema,), - add_to_executed=False) + result = exec_sql(self, query, query_params=qp, add_to_executed=False) if not result: return @@ -227,11 +222,12 @@ class PgUserObjStatInfo(): def get_tbl_stat(self): """Get table statistics and fill out self.info dictionary.""" query = "SELECT * FROM pg_stat_user_tables" + qp = None if self.schema: query = "SELECT * FROM pg_stat_user_tables WHERE schemaname = %s" + qp = (self.schema,) - result = exec_sql(self, query, query_params=(self.schema,), - add_to_executed=False) + result = exec_sql(self, query, query_params=qp, add_to_executed=False) if not result: return @@ -270,23 +266,23 @@ class PgUserObjStatInfo(): query_params=(relname,), add_to_executed=False) - self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0] + self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0]["pg_relation_size"] if info_key == 'tables': result = exec_sql(self, "SELECT pg_total_relation_size (%s)", query_params=(relname,), add_to_executed=False) - self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0] + self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0]["pg_total_relation_size"] def set_schema(self, schema): """If schema exists, sets self.schema, otherwise fails.""" - query = ("SELECT 1 FROM information_schema.schemata " + query = ("SELECT 1 as schema_exists FROM information_schema.schemata " "WHERE schema_name = %s") result = exec_sql(self, query, query_params=(schema,), add_to_executed=False) - if result and result[0][0]: + if result and result[0]["schema_exists"]: self.schema = schema else: self.module.fail_json(msg="Schema '%s' does not exist" % (schema)) @@ -316,13 +312,13 @@ def main(): if not module.params["trust_input"]: check_input(module, module.params['session_role']) - # Ensure psycopg2 libraries are available before connecting to DB: + # Ensure psycopg libraries are available before connecting to DB: ensure_required_libs(module) # Connect to DB and make cursor object: pg_conn_params = get_conn_params(module, module.params) # We don't need to commit anything, so, set it to False: db_connection, dummy = connect_to_db(module, pg_conn_params, autocommit=False) - cursor = db_connection.cursor(cursor_factory=DictCursor) + cursor = db_connection.cursor(**pg_cursor_args) ############################ # Create object and do work: diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/defaults/main.yml index 766feeecc..d5c9deb57 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/defaults/main.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/defaults/main.yml @@ -1,4 +1,5 @@ db_name: 'ansible_db' +db_name_icu: 'ansible_db_icu' db_user1: 'ansible.db.user1' db_user2: 'ansible.db.user2' tmp_dir: '/tmp' diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/main.yml index dd55c3f98..2c5fc7c95 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/main.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/main.yml @@ -45,3 +45,6 @@ # Simple test to create and then drop with force - import_tasks: manage_database.yml + +# Test the comment feature +- import_tasks: postgresql_db_comment.yml diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_comment.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_comment.yml new file mode 100644 index 000000000..1e4d8426c --- /dev/null +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_comment.yml @@ -0,0 +1,189 @@ +# Test code for the postgresql_db comment module feature +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <andrew.a.klychkov@gmail.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: Set parameters we use with most of tasks + ansible.builtin.set_fact: + task_parameters: &task_parameters + become_user: "{{ pg_user }}" + become: true + register: result + +- name: Create DB with comment + <<: *task_parameters + postgresql_db: + state: present + name: comment_db + trust_input: false + login_user: "{{ pg_user }}" + comment: Test DB comment 1 + +- name: Assert the executed commands + assert: + that: + - result is changed + - result.db == "comment_db" + - result.executed_commands == ['CREATE DATABASE "comment_db"', "COMMENT ON DATABASE \"comment_db\" IS 'Test DB comment 1'"] + +- name: Get the DB comment + <<: *task_parameters + postgresql_query: + login_user: "{{ pg_user }}" + query: "SELECT pg_catalog.shobj_description(d.oid, 'pg_database') AS comment FROM pg_catalog.pg_database d WHERE datname = 'comment_db'" + +- name: Check the comments match + assert: + that: + - result.query_result[0]['comment'] == "Test DB comment 1" + + +- name: Create DB with another comment in check mode + <<: *task_parameters + postgresql_db: + state: present + name: comment_db + login_user: "{{ pg_user }}" + comment: Another comment + check_mode: true + +- name: Assert the result + assert: + that: + - result is changed + +- name: Check the comment + <<: *task_parameters + postgresql_query: + login_user: "{{ pg_user }}" + query: "SELECT pg_catalog.shobj_description(d.oid, 'pg_database') AS comment FROM pg_catalog.pg_database d WHERE datname = 'comment_db'" + +- name: Check the comment hasn't changed + assert: + that: + - result.query_result[0]['comment'] == "Test DB comment 1" + + +- name: Create DB with another comment in real mode + <<: *task_parameters + postgresql_db: + state: present + name: comment_db + login_user: "{{ pg_user }}" + comment: Another comment + +- name: Assert the result + assert: + that: + - result is changed + - result.executed_commands == ["COMMENT ON DATABASE \"comment_db\" IS 'Another comment'"] + +- name: Check the comment + <<: *task_parameters + postgresql_query: + login_user: "{{ pg_user }}" + query: "SELECT pg_catalog.shobj_description(d.oid, 'pg_database') AS comment FROM pg_catalog.pg_database d WHERE datname = 'comment_db'" + +- name: Check the comments match + assert: + that: + - result.query_result[0]['comment'] == "Another comment" + + +- name: Create DB with the same comment in real mode + <<: *task_parameters + postgresql_db: + state: present + name: comment_db + login_user: "{{ pg_user }}" + comment: Another comment + +- name: Assert the result + assert: + that: + - result is not changed + - result.executed_commands == [] + +- name: Check the comment + <<: *task_parameters + postgresql_query: + login_user: "{{ pg_user }}" + query: "SELECT pg_catalog.shobj_description(d.oid, 'pg_database') AS comment FROM pg_catalog.pg_database d WHERE datname = 'comment_db'" + +- name: Check the comments match + assert: + that: + - result.query_result[0]['comment'] == "Another comment" + + +- name: Not specifying the comment will not erase it + <<: *task_parameters + postgresql_db: + state: present + name: comment_db + login_user: "{{ pg_user }}" + +- name: Assert the result + assert: + that: + - result is not changed + - result.executed_commands == [] + +- name: Check the comment + <<: *task_parameters + postgresql_query: + login_user: "{{ pg_user }}" + query: "SELECT pg_catalog.shobj_description(d.oid, 'pg_database') AS comment FROM pg_catalog.pg_database d WHERE datname = 'comment_db'" + +- name: Check the comments match + assert: + that: + - result.query_result[0]['comment'] == "Another comment" + + +- name: Reset the comment + <<: *task_parameters + postgresql_db: + state: present + name: comment_db + login_user: "{{ pg_user }}" + comment: '' + +- name: Assert the result + assert: + that: + - result is changed + - result.executed_commands == ["COMMENT ON DATABASE \"comment_db\" IS ''"] + +- name: Check the comment + <<: *task_parameters + postgresql_query: + login_user: "{{ pg_user }}" + query: "SELECT pg_catalog.shobj_description(d.oid, 'pg_database') AS comment FROM pg_catalog.pg_database d WHERE datname = 'comment_db'" + +- name: Check the comments match + assert: + that: + - result.query_result[0]['comment'] == None + + +- name: Reset the comment again + <<: *task_parameters + postgresql_db: + state: present + name: comment_db + login_user: "{{ pg_user }}" + comment: '' + +- name: Assert the result + assert: + that: + - result is not changed + - result.executed_commands == [] + + +- name: Clean up + <<: *task_parameters + postgresql_db: + state: absent + name: comment_db + login_user: "{{ pg_user }}" diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml index 472524a23..e87b4c3da 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml @@ -105,7 +105,7 @@ # # Test conn_limit, encoding, collate, ctype, template options # -- name: Create a DB with conn_limit, encoding, collate, ctype, and template options +- name: Create a DB with conn_limit, encoding, collate, ctype and template options become_user: "{{ pg_user }}" become: true postgresql_db: @@ -124,6 +124,35 @@ - result is changed - result.executed_commands == ["CREATE DATABASE \"{{ db_name }}\" TEMPLATE \"template0\" ENCODING 'LATIN1' LC_COLLATE 'pt_BR{{ locale_latin_suffix }}' LC_CTYPE 'es_ES{{ locale_latin_suffix }}' CONNECTION LIMIT 100"] or result.executed_commands == ["CREATE DATABASE \"{{ db_name }}\" TEMPLATE \"template0\" ENCODING E'LATIN1' LC_COLLATE E'pt_BR{{ locale_latin_suffix }}' LC_CTYPE E'es_ES{{ locale_latin_suffix }}' CONNECTION LIMIT 100"] + +# +# Test conn_limit, encoding, collate, ctype, icu_locale, icu_provider, template options +# +- block: + + - name: Create a DB with conn_limit, encoding, collate, ctype, icu_locale, locale_provider and template options + become_user: "{{ pg_user }}" + become: true + postgresql_db: + name: '{{ db_name_icu }}' + state: 'present' + conn_limit: '100' + encoding: 'LATIN1' + lc_collate: 'pt_BR{{ locale_latin_suffix }}' + lc_ctype: 'es_ES{{ locale_latin_suffix }}' + icu_locale: 'es_ES-x-icu' + locale_provider: 'icu' + template: 'template0' + login_user: "{{ pg_user }}" + register: result + + - assert: + that: + - result is changed + - result.executed_commands == ["CREATE DATABASE \"{{ db_name_icu }}\" TEMPLATE \"template0\" ENCODING 'LATIN1' LC_COLLATE 'pt_BR{{ locale_latin_suffix }}' LC_CTYPE 'es_ES{{ locale_latin_suffix }}' ICU_LOCALE 'es_ES-x-icu' LOCALE_PROVIDER 'icu' CONNECTION LIMIT 100"] or result.executed_commands == ["CREATE DATABASE \"{{ db_name_icu }}\" TEMPLATE \"template0\" ENCODING E'LATIN1' LC_COLLATE E'pt_BR{{ locale_latin_suffix }}' LC_CTYPE E'es_ES{{ locale_latin_suffix }}' ICU_LOCALE E'es_ES-x-icu' LOCALE_PROVIDER 'icu' CONNECTION LIMIT 100"] + + when: postgres_version_resp.stdout is version('15.0', '>=') + - name: Check that the DB has all of our options become_user: "{{ pg_user }}" become: true @@ -140,6 +169,29 @@ - "'en_US' not in result.stdout_lines[-2]" - "'100' in result.stdout_lines[-2]" +- block: + + - name: Check that the DB has all of our options including icu + become_user: "{{ pg_user }}" + become: true + shell: echo "select datname, datconnlimit, pg_encoding_to_char(encoding), datcollate, datctype, daticulocale, CASE datlocprovider WHEN 'i' THEN 'lib_icu' WHEN 'c' THEN 'libc' END AS localeprovider from pg_database where datname = '{{ db_name_icu }}';" | psql -d postgres + register: result + + - assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'LATIN1' in result.stdout_lines[-2]" + - "'pt_BR' in result.stdout_lines[-2]" + - "'es_ES' in result.stdout_lines[-2]" + - "'es_ES-x-icu' in result.stdout_lines[-2]" + - "'lib_icu' in result.stdout_lines[-2]" + - "'UTF8' not in result.stdout_lines[-2]" + - "'en_US' not in result.stdout_lines[-2]" + - "'100' in result.stdout_lines[-2]" + + when: postgres_version_resp.stdout is version('15.0', '>=') + + - name: Check that running db creation with options a second time does nothing become_user: "{{ pg_user }}" become: true @@ -159,6 +211,30 @@ - result is not changed +- block: + + - name: Check that running db creation with icu options a second time does nothing + become_user: "{{ pg_user }}" + become: true + postgresql_db: + name: '{{ db_name_icu }}' + state: 'present' + conn_limit: '100' + encoding: 'LATIN1' + lc_collate: 'pt_BR{{ locale_latin_suffix }}' + lc_ctype: 'es_ES{{ locale_latin_suffix }}' + icu_locale: 'es_ES-x-icu' + locale_provider: 'icu' + template: 'template0' + login_user: "{{ pg_user }}" + register: result + + - assert: + that: + - result is not changed + + when: postgres_version_resp.stdout is version('15.0', '>=') + - name: Check that attempting to change encoding returns an error become_user: "{{ pg_user }}" become: true @@ -177,6 +253,50 @@ that: - result is failed +- block: + + - name: Check that attempting to change icu collate returns an error + become_user: "{{ pg_user }}" + become: true + postgresql_db: + name: '{{ db_name_icu }}' + state: 'present' + encoding: 'LATIN1' + lc_collate: 'pt_BR{{ locale_utf8_suffix }}' + lc_ctype: 'es_ES{{ locale_utf8_suffix }}' + icu_locale: 'en_US-x-icu' + locale_provider: 'icu' + template: 'template0' + login_user: "{{ pg_user }}" + register: result + ignore_errors: true + + - assert: + that: + - result is failed + + - name: Check that attempting to change locale provider returns an error + become_user: "{{ pg_user }}" + become: true + postgresql_db: + name: '{{ db_name_icu }}' + state: 'present' + encoding: 'LATIN1' + lc_collate: 'pt_BR{{ locale_utf8_suffix }}' + lc_ctype: 'es_ES{{ locale_utf8_suffix }}' + icu_locale: 'es_ES-x-icu' + locale_provider: 'libc' + template: 'template0' + login_user: "{{ pg_user }}" + register: result + ignore_errors: true + + - assert: + that: + - result is failed + + when: postgres_version_resp.stdout is version('15.0', '>=') + - name: Check that changing the conn_limit actually works become_user: "{{ pg_user }}" become: true @@ -199,13 +319,15 @@ - name: Check that conn_limit has actually been set / updated to 200 become_user: "{{ pg_user }}" become: true - shell: echo "SELECT datconnlimit AS conn_limit FROM pg_database WHERE datname = '{{ db_name }}';" | psql -d postgres + postgresql_query: + login_db: postgres + query: "SELECT datconnlimit AS conn_limit FROM pg_database WHERE datname = '{{ db_name }}'" register: result - assert: that: - - "result.stdout_lines[-1] == '(1 row)'" - - "'200' == '{{ result.stdout_lines[-2] | trim }}'" + - result.rowcount == 1 + - result.query_result[0]['conn_limit'] == 200 - name: Cleanup test DB become_user: "{{ pg_user }}" @@ -215,14 +337,43 @@ state: 'absent' login_user: "{{ pg_user }}" -- shell: echo "select datname, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name }}';" | psql -d postgres +- name: Check become_user: "{{ pg_user }}" become: true + postgresql_query: + login_db: postgres + query: "select datname, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name }}'" register: result - assert: that: - - "result.stdout_lines[-1] == '(0 rows)'" + - result.rowcount == 0 + +- block: + + - name: Cleanup icu test DB + become_user: "{{ pg_user }}" + become: true + postgresql_db: + name: '{{ db_name_icu }}' + state: 'absent' + login_user: "{{ pg_user }}" + + - name: Check icu test DB was removed + become_user: "{{ pg_user }}" + become: true + postgresql_query: + login_db: postgres + query: "select datname, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name_icu }}'" + register: result + + - assert: + that: + - result.rowcount == 0 + + when: postgres_version_resp.stdout is version('15.0', '>=') + + # # Test db ownership @@ -320,13 +471,15 @@ - name: Check that the user owns the newly created DB become_user: "{{ pg_user }}" become: true - shell: echo "select pg_catalog.pg_get_userbyid(datdba) from pg_catalog.pg_database where datname = '{{ db_name }}';" | psql -d postgres + postgresql_query: + login_db: postgres + query: "select pg_catalog.pg_get_userbyid(datdba) from pg_catalog.pg_database where datname = '{{ db_name }}'" register: result - assert: that: - - "result.stdout_lines[-1] == '(1 row)'" - - "'{{ pg_user }}' == '{{ result.stdout_lines[-2] | trim }}'" + - result.rowcount == 1 + - result.query_result[0]['pg_get_userbyid'] == '{{ pg_user }}' - name: Cleanup db become_user: "{{ pg_user }}" diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml index 3e3eeda83..8a7afa8ca 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml @@ -33,7 +33,7 @@ - assert: that: - result is changed - - result.queries == [] + - result.queries == ['CREATE EXTENSION "postgis"'] - name: postgresql_ext - check that extension doesn't exist after the previous step become_user: '{{ pg_user }}' @@ -55,13 +55,14 @@ login_db: postgres login_port: 5432 name: postgis + comment: Test comment 1 ignore_errors: true register: result - assert: that: - result is changed - - result.queries == ['CREATE EXTENSION "postgis"'] + - result.queries == ['CREATE EXTENSION "postgis"', "COMMENT ON EXTENSION \"postgis\" IS 'Test comment 1'"] - name: postgresql_ext - check that extension exists after the previous step become_user: '{{ pg_user }}' @@ -76,6 +77,127 @@ that: - result.rowcount == 1 +- name: Check the comment + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + query: "SELECT obj_description((SELECT oid FROM pg_catalog.pg_extension WHERE extname = 'postgis'), 'pg_extension') AS comment" + register: result + +- name: Check the comments match + assert: + that: + - result.query_result[0]['comment'] == "Test comment 1" + + +- name: Now after the comment was set, invoke again not pass the comment explicitly + become_user: '{{ pg_user }}' + become: true + postgresql_ext: + login_db: postgres + login_port: 5432 + name: postgis + ignore_errors: true + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: Check the comment didn't change + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + query: "SELECT obj_description((SELECT oid FROM pg_catalog.pg_extension WHERE extname = 'postgis'), 'pg_extension') AS comment" + register: result + +- name: Check the comments match + assert: + that: + - result.query_result[0]['comment'] == "Test comment 1" + + +- name: Reset the comment in check mode + become_user: '{{ pg_user }}' + become: true + postgresql_ext: + login_db: postgres + login_port: 5432 + name: postgis + comment: '' + ignore_errors: true + register: result + check_mode: true + +- assert: + that: + - result is changed + - result.queries == ["COMMENT ON EXTENSION \"postgis\" IS ''"] + +- name: Check the comment didn't change + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + query: "SELECT obj_description((SELECT oid FROM pg_catalog.pg_extension WHERE extname = 'postgis'), 'pg_extension') AS comment" + register: result + +- name: Check the comments match + assert: + that: + - result.query_result[0]['comment'] == "Test comment 1" + + +- name: Reset the comment in real mode + become_user: '{{ pg_user }}' + become: true + postgresql_ext: + login_db: postgres + login_port: 5432 + name: postgis + comment: '' + ignore_errors: true + register: result + +- assert: + that: + - result is changed + - result.queries == ["COMMENT ON EXTENSION \"postgis\" IS ''"] + +- name: Check the comment changed + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + query: "SELECT obj_description((SELECT oid FROM pg_catalog.pg_extension WHERE extname = 'postgis'), 'pg_extension') AS comment" + register: result + +- name: Check the comments match + assert: + that: + - result.query_result[0]['comment'] == None + + +- name: Reset the comment again + become_user: '{{ pg_user }}' + become: true + postgresql_ext: + login_db: postgres + login_port: 5432 + name: postgis + comment: '' + ignore_errors: true + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + + - name: postgresql_ext - drop extension postgis become_user: '{{ pg_user }}' become: true diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml index 2443fe785..ca255a87a 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml @@ -14,15 +14,19 @@ login_db: postgres block: - # Preparation: + ######## Preparation ######## + - name: postgresql_ext_version - create schema schema1 <<: *task_parameters postgresql_schema: <<: *pg_parameters name: "{{ test_schema }}" - # Do tests: - - name: postgresql_ext_version - create extension of specific version, check mode + ######## Do tests ######## + + #### create extension with specific version #### + + - name: postgresql_ext_version - create extension of specific version in check_mode <<: *task_parameters postgresql_ext: <<: *pg_parameters @@ -35,6 +39,9 @@ - assert: that: - result is changed + - result.queries == ["CREATE EXTENSION \"{{ test_ext }}\" WITH SCHEMA \"{{ test_schema }}\" VERSION '1.0'"] + - result.prev_version == '' + - result.version == '1.0' - name: postgresql_ext_version - check that nothing was actually changed <<: *task_parameters @@ -59,6 +66,8 @@ that: - result is changed - result.queries == ["CREATE EXTENSION \"{{ test_ext }}\" WITH SCHEMA \"{{ test_schema }}\" VERSION '1.0'"] + - result.prev_version == '' + - result.version == '1.0' - name: postgresql_ext_version - check <<: *task_parameters @@ -83,6 +92,9 @@ - assert: that: - result is not changed + - result.queries == [] + - result.prev_version == '1.0' + - result.version == result.prev_version - name: postgresql_ext_version - check <<: *task_parameters @@ -106,6 +118,9 @@ - assert: that: - result is not changed + - result.queries == [] + - result.prev_version == '1.0' + - result.version == result.prev_version - name: postgresql_ext_version - check <<: *task_parameters @@ -117,6 +132,8 @@ that: - result.rowcount == 1 + #### update the extension to the next version #### + - name: postgresql_ext_version - update the extension to the next version in check_mode <<: *task_parameters postgresql_ext: @@ -130,6 +147,9 @@ - assert: that: - result is changed + - result.queries == ["ALTER EXTENSION \"{{ test_ext }}\" UPDATE TO '2.0'"] + - result.prev_version == '1.0' + - result.version == '2.0' - name: postgresql_ext_version - check, the version must be 1.0 <<: *task_parameters @@ -154,6 +174,8 @@ that: - result is changed - result.queries == ["ALTER EXTENSION \"{{ test_ext }}\" UPDATE TO '2.0'"] + - result.prev_version == '1.0' + - result.version == '2.0' - name: postgresql_ext_version - check, the version must be 2.0 <<: *task_parameters @@ -165,6 +187,24 @@ that: - result.rowcount == 1 + #### check no change if extension installed but no version specified #### + + - name: postgresql_ext_version - check that version won't be changed if version won't be passed in check_mode + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + schema: "{{ test_schema }}" + trust_input: false + check_mode: true + + - assert: + that: + - result is not changed + - result.queries == [] + - result.prev_version == '2.0' + - result.version == result.prev_version + - name: postgresql_ext_version - check that version won't be changed if version won't be passed <<: *task_parameters postgresql_ext: @@ -176,6 +216,38 @@ - assert: that: - result is not changed + - result.queries == [] + - result.prev_version == '2.0' + - result.version == result.prev_version + + - name: postgresql_ext_version - check, the version must be 2.0 + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '2.0'" + + - assert: + that: + - result.rowcount == 1 + + #### update the extension to the latest version #### + + - name: postgresql_ext_version - update the extension to the latest version in check_mode + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + schema: "{{ test_schema }}" + version: latest + trust_input: false + check_mode: true + + - assert: + that: + - result is changed + - result.queries == ["ALTER EXTENSION \"{{ test_ext }}\" UPDATE"] + - result.prev_version == '2.0' + - result.version == '4.0' - name: postgresql_ext_version - check, the version must be 2.0 <<: *task_parameters @@ -200,6 +272,8 @@ that: - result is changed - result.queries == ["ALTER EXTENSION \"{{ test_ext }}\" UPDATE"] + - result.prev_version == '2.0' + - result.version == '4.0' - name: postgresql_ext_version - check <<: *task_parameters @@ -211,7 +285,7 @@ that: - result.rowcount == 1 - - name: postgresql_ext_version - try to update the extension to the latest version again which always runs an update. + - name: postgresql_ext_version - update the extension to the latest version again in check_mode <<: *task_parameters postgresql_ext: <<: *pg_parameters @@ -219,12 +293,16 @@ schema: "{{ test_schema }}" version: latest trust_input: false + check_mode: true - assert: that: - - result is changed + - result is not changed + - result.queries == [] + - result.prev_version == '4.0' + - result.version == result.prev_version - - name: postgresql_ext_version - check that version number did not change even though update ran + - name: postgresql_ext_version - check, the version must be 4.0 (latest) <<: *task_parameters postgresql_query: <<: *pg_parameters @@ -234,6 +312,34 @@ that: - result.rowcount == 1 + - name: postgresql_ext_version - update the extension to the latest version again + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + schema: "{{ test_schema }}" + version: latest + trust_input: false + + - assert: + that: + - result is not changed + - result.queries == [] + - result.prev_version == '4.0' + - result.version == result.prev_version + + - name: postgresql_ext_version - check, the version must be 4.0 (latest) + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '4.0'" + + - assert: + that: + - result.rowcount == 1 + + #### downgrade the extension version #### + - name: postgresql_ext_version - try to downgrade the extension version, must fail <<: *task_parameters postgresql_ext: @@ -248,6 +354,8 @@ that: - result.failed == true + #### drop extension #### + - name: postgresql_ext_version - drop the extension in check_mode <<: *task_parameters postgresql_ext: @@ -260,6 +368,9 @@ - assert: that: - result is changed + - result.queries == ["DROP EXTENSION \"{{ test_ext }}\""] + - result.prev_version == '4.0' + - result.version == '' - name: postgresql_ext_version - check that extension exists <<: *task_parameters @@ -282,6 +393,9 @@ - assert: that: - result is changed + - result.queries == ["DROP EXTENSION \"{{ test_ext }}\""] + - result.prev_version == '4.0' + - result.version == '' - name: postgresql_ext_version - check that extension doesn't exist after the prev step <<: *task_parameters @@ -293,6 +407,22 @@ that: - result.rowcount == 0 + - name: postgresql_ext_version - try to drop the non-existent extension again in check_mode + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + state: absent + trust_input: false + check_mode: true + + - assert: + that: + - result is not changed + - result.queries == [] + - result.prev_version == '' + - result.version == result.prev_version + - name: postgresql_ext_version - try to drop the non-existent extension again <<: *task_parameters postgresql_ext: @@ -304,6 +434,26 @@ - assert: that: - result is not changed + - result.queries == [] + - result.prev_version == '' + - result.version == result.prev_version + + #### create extension without specify version #### + + - name: postgresql_ext_version - create the extension without passing version in check_mode + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + trust_input: false + check_mode: true + + - assert: + that: + - result is changed + - result.queries == ["CREATE EXTENSION \"{{ test_ext }}\""] + - result.prev_version == '' + - result.version == '4.0' - name: postgresql_ext_version - create the extension without passing version <<: *task_parameters @@ -316,6 +466,8 @@ that: - result is changed - result.queries == ["CREATE EXTENSION \"{{ test_ext }}\""] + - result.prev_version == '' + - result.version == '4.0' - name: postgresql_ext_version - check <<: *task_parameters @@ -327,6 +479,37 @@ that: - result.rowcount == 1 + - name: postgresql_ext_version - create the extension without passing version again in check_mode + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + trust_input: false + check_mode: true + + - assert: + that: + - result is not changed + - result.queries == [] + - result.prev_version == '4.0' + - result.version == result.prev_version + + - name: postgresql_ext_version - create the extension without passing version again + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + trust_input: false + + - assert: + that: + - result is not changed + - result.queries == [] + - result.prev_version == '4.0' + - result.version == result.prev_version + + #### create non existent extension #### + - name: postgresql_ext_version - try to install non-existent extension <<: *task_parameters postgresql_ext: diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/defaults/main.yml index 7a8fe2a37..af2269ffe 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/defaults/main.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/defaults/main.yml @@ -11,3 +11,8 @@ test_db: acme_db test_subscription: test test_subscription2: test2 conn_timeout: 100 + +primary_port: 5432 + +# The info module tests require a replica db to test subscriptions info +replica_db_required: true diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/meta/main.yml index d72e4d23c..4ce5a5837 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/meta/main.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/meta/main.yml @@ -1,2 +1,2 @@ dependencies: - - setup_postgresql_replication + - setup_postgresql_db diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/main.yml index 04c7788ad..0e3d5d163 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/main.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/main.yml @@ -5,8 +5,10 @@ # For testing getting publication and subscription info - import_tasks: setup_publication.yml - when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18' + when: + - ansible_distribution_major_version != "7" # CentOS 7 with Postgres 9.2 doesn't support logical replication # Initial CI tests of postgresql_info module - import_tasks: postgresql_info_initial.yml - when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18' + when: + - ansible_distribution_major_version != "7" # CentOS 7 with Postgres 9.2 doesn't support logical replication diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml index 6dfe50542..be110ff9c 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml @@ -126,16 +126,24 @@ - result.tablespaces - result.roles + - name: Set full server version as X.Y.Z + set_fact: + version_full: '{{ result.version.major }}.{{ result.version.minor }}.{{ result.version.patch }}' + when: result.version.major == 9 + + - name: Set full server version as X.Y + set_fact: + version_full: '{{ result.version.major }}.{{ result.version.minor }}' + when: result.version.major >= 10 + - assert: that: - result.version.patch != {} - - result.version.full == '{{ result.version.major }}.{{ result.version.minor }}.{{ result.version.patch }}' when: result.version.major == 9 - assert: that: - - result.version.full == '{{ result.version.major }}.{{ result.version.minor }}' - when: result.version.major >= 10 + - result.version.full == version_full - name: postgresql_info - check filter param passed by list <<: *task_parameters diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/aliases deleted file mode 100644 index a4c92ef85..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/aliases +++ /dev/null @@ -1,2 +0,0 @@ -destructive -shippable/posix/group1 diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/meta/main.yml deleted file mode 100644 index 4ce5a5837..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - setup_postgresql_db diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/main.yml deleted file mode 100644 index 799501432..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/main.yml +++ /dev/null @@ -1,25 +0,0 @@ -#################################################################### -# WARNING: These are designed specifically for Ansible tests # -# and should not be used as examples of how to write Ansible roles # -#################################################################### - -- name: Include distribution specific variables - include_vars: "{{ lookup('first_found', params) }}" - vars: - params: - files: - - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml" - - default.yml - paths: - - vars - -# Only run on CentOS 7 because there is a stack trace on CentOS 8 because the module -# is looking for the incorrect version of plpython. -# https://gist.github.com/samdoran/8fc1b4ae834d3e66d1895d087419b8d8 -- name: Initial CI tests of postgresql_lang module - when: - - ansible_facts.distribution == 'CentOS' - - ansible_facts.distribution_major_version is version ('7', '==') - block: - - include_tasks: postgresql_lang_initial.yml - - include_tasks: postgresql_lang_add_owner_param.yml diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml deleted file mode 100644 index a08ff82f2..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -- vars: - test_user1: alice - test_user2: bob - test_lang: plperl - non_existent_role: fake_role - task_parameters: &task_parameters - become_user: '{{ pg_user }}' - become: true - register: result - pg_parameters: &pg_parameters - login_user: '{{ pg_user }}' - login_db: postgres - - block: - - name: Create roles for tests - <<: *task_parameters - postgresql_user: - <<: *pg_parameters - name: '{{ item }}' - loop: - - '{{ test_user1 }}' - - '{{ test_user2 }}' - - - name: Create lang with owner in check_mode - <<: *task_parameters - postgresql_lang: - <<: *pg_parameters - name: '{{ test_lang }}' - owner: '{{ test_user1 }}' - trust_input: false - check_mode: true - - - assert: - that: - - result is changed - - result.queries == [] - - - name: Check that nothing was actually changed - <<: *task_parameters - postgresql_query: - <<: *pg_parameters - query: > - SELECT r.rolname FROM pg_language l - JOIN pg_roles r ON l.lanowner = r.oid - WHERE l.lanname = '{{ test_lang }}' - AND r.rolname = '{{ test_user1 }}' - - - assert: - that: - - result.rowcount == 0 - - - name: Create lang with owner - <<: *task_parameters - postgresql_lang: - <<: *pg_parameters - name: '{{ test_lang }}' - owner: '{{ test_user1 }}' - trust_input: false - - - assert: - that: - - result is changed - - result.queries == ['CREATE LANGUAGE "{{ test_lang }}"', 'ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user1 }}"'] - - - name: Check - <<: *task_parameters - postgresql_query: - <<: *pg_parameters - query: > - SELECT r.rolname FROM pg_language l - JOIN pg_roles r ON l.lanowner = r.oid - WHERE l.lanname = '{{ test_lang }}' - AND r.rolname = '{{ test_user1 }}' - - - assert: - that: - - result.rowcount == 1 - - - name: Change lang owner in check_mode - <<: *task_parameters - postgresql_lang: - <<: *pg_parameters - name: '{{ test_lang }}' - owner: '{{ test_user2 }}' - trust_input: true - check_mode: true - - - assert: - that: - - result is changed - - result.queries == ['ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user2 }}"'] - - - name: Check that nothing was actually changed - <<: *task_parameters - postgresql_query: - <<: *pg_parameters - query: > - SELECT r.rolname FROM pg_language l - JOIN pg_roles r ON l.lanowner = r.oid - WHERE l.lanname = '{{ test_lang }}' - AND r.rolname = '{{ test_user2 }}' - - - assert: - that: - - result.rowcount == 0 - - - name: Change lang owner - <<: *task_parameters - postgresql_lang: - <<: *pg_parameters - name: '{{ test_lang }}' - owner: '{{ test_user2 }}' - - - assert: - that: - - result is changed - # TODO: the first elem of the returned list below - # looks like a bug, not related with the option owner, needs to be checked - - result.queries == ["UPDATE pg_language SET lanpltrusted = false WHERE lanname = '{{ test_lang }}'", 'ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user2 }}"'] - - - name: Check - <<: *task_parameters - postgresql_query: - <<: *pg_parameters - query: > - SELECT r.rolname FROM pg_language l - JOIN pg_roles r ON l.lanowner = r.oid - WHERE l.lanname = '{{ test_lang }}' - AND r.rolname = '{{ test_user2 }}' - - - assert: - that: - - result.rowcount == 1 - - - name: Try to change lang owner again to the same role - <<: *task_parameters - postgresql_lang: - <<: *pg_parameters - name: '{{ test_lang }}' - owner: '{{ test_user2 }}' - - - assert: - that: - - result is not changed - - result.queries == [] - - - name: Check - <<: *task_parameters - postgresql_query: - <<: *pg_parameters - query: > - SELECT r.rolname FROM pg_language l - JOIN pg_roles r ON l.lanowner = r.oid - WHERE l.lanname = '{{ test_lang }}' - AND r.rolname = '{{ test_user2 }}' - - - assert: - that: - - result.rowcount == 1 - - - name: Drop test lang with owner, must ignore - <<: *task_parameters - postgresql_lang: - <<: *pg_parameters - name: '{{ test_lang }}' - state: absent - owner: '{{ non_existent_role }}' - - - assert: - that: - - result is changed - - result.queries == ["DROP LANGUAGE \"{{ test_lang }}\""] - - - name: Check - <<: *task_parameters - postgresql_query: - <<: *pg_parameters - query: > - SELECT r.rolname FROM pg_language l - JOIN pg_roles r ON l.lanowner = r.oid - WHERE l.lanname = '{{ test_lang }}' - - - assert: - that: - - result.rowcount == 0 - - # Clean up - - name: Drop test roles - <<: *task_parameters - postgresql_user: - <<: *pg_parameters - name: '{{ item }}' - state: absent - loop: - - '{{ test_user1 }}' - - '{{ test_user2 }}' diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml deleted file mode 100644 index 1d24778b4..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml +++ /dev/null @@ -1,231 +0,0 @@ -# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Preparation for tests: -- name: Install PostgreSQL support packages - become: true - action: "{{ ansible_facts.pkg_mgr }}" - args: - name: "{{ postgresql_lang_packages }}" - state: present - -############### -# Do main tests -# - -# Create language in check_mode: -- name: postgresql_lang - create plperl in check_mode - become_user: "{{ pg_user }}" - become: true - postgresql_lang: - db: postgres - login_user: "{{ pg_user }}" - name: plperl - register: result - ignore_errors: true - check_mode: true - -- assert: - that: - - result is changed - - result.queries == [] - -- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0 - become_user: "{{ pg_user }}" - become: true - postgresql_query: - db: postgres - login_user: "{{ pg_user }}" - query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'" - register: result - -- assert: - that: - - result.rowcount == 0 - -# Create language: -- name: postgresql_lang - create plperl - become_user: "{{ pg_user }}" - become: true - postgresql_lang: - db: postgres - login_user: "{{ pg_user }}" - name: plperl - register: result - ignore_errors: true - -- assert: - that: - - result is changed - - result.queries == ['CREATE LANGUAGE "plperl"'] - -- name: postgresql_lang - check that lang exists after previous step - become_user: "{{ pg_user }}" - become: true - postgresql_query: - db: postgres - login_user: "{{ pg_user }}" - query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'" - register: result - -- assert: - that: - - result.rowcount == 1 - -# Drop language in check_mode: -- name: postgresql_lang - drop plperl in check_mode - become_user: "{{ pg_user }}" - become: true - postgresql_lang: - db: postgres - login_user: "{{ pg_user }}" - name: plperl - state: absent - register: result - ignore_errors: true - check_mode: true - -- assert: - that: - - result is changed - - result.queries == [] - -- name: postgresql_lang - check that lang exists after previous step, rowcount must be 1 - become_user: "{{ pg_user }}" - become: true - postgresql_query: - db: postgres - login_user: "{{ pg_user }}" - query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'" - register: result - -- assert: - that: - - result.rowcount == 1 - -# Drop language: -- name: postgresql_lang - drop plperl - become_user: "{{ pg_user }}" - become: true - postgresql_lang: - db: postgres - login_user: "{{ pg_user }}" - name: plperl - state: absent - register: result - ignore_errors: true - -- assert: - that: - - result is changed - - result.queries == ['DROP LANGUAGE "plperl"'] - -- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0 - become_user: "{{ pg_user }}" - become: true - postgresql_query: - db: postgres - login_user: "{{ pg_user }}" - query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'" - register: result - -- assert: - that: - - result.rowcount == 0 - -# Check fail_on_drop true -- name: postgresql_lang - drop c language to check fail_on_drop true - become_user: "{{ pg_user }}" - become: true - postgresql_lang: - db: postgres - login_user: "{{ pg_user }}" - name: c - state: absent - fail_on_drop: true - register: result - ignore_errors: true - -- assert: - that: - - result.failed == true - -# Check fail_on_drop no -- name: postgresql_lang - drop c language to check fail_on_drop no - become_user: "{{ pg_user }}" - become: true - postgresql_lang: - db: postgres - login_user: "{{ pg_user }}" - name: c - state: absent - fail_on_drop: false - register: result - ignore_errors: true - -- assert: - that: - - result.failed == false - -# Create trusted language: -- name: postgresql_lang - create plpythonu - become_user: "{{ pg_user }}" - become: true - postgresql_lang: - db: postgres - login_user: "{{ pg_user }}" - name: plpythonu - trust: true - force_trust: true - register: result - ignore_errors: true - -- assert: - that: - - result is changed - - result.queries == ['CREATE TRUSTED LANGUAGE "plpythonu"', "UPDATE pg_language SET lanpltrusted = true WHERE lanname = 'plpythonu'"] - -- name: postgresql_lang - check that lang exists and it's trusted after previous step - become_user: "{{ pg_user }}" - become: true - postgresql_query: - db: postgres - login_user: "{{ pg_user }}" - query: "SELECT 1 FROM pg_language WHERE lanname = 'plpythonu' AND lanpltrusted = 't'" - register: result - -- assert: - that: - - result.rowcount == 1 - -# Drop language cascade, tests of aliases: -- name: postgresql_lang - drop plpythonu cascade - become_user: "{{ pg_user }}" - become: true - postgresql_lang: - login_db: postgres - login_user: "{{ pg_user }}" - login_port: 5432 - lang: plpythonu - state: absent - cascade: true - register: result - ignore_errors: true - -- assert: - that: - - result is changed - - result.queries == ['DROP LANGUAGE "plpythonu" CASCADE'] - -- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0 - become_user: "{{ pg_user }}" - become: true - postgresql_query: - db: postgres - login_user: "{{ pg_user }}" - query: "SELECT 1 FROM pg_language WHERE lanname = 'plpythonu'" - register: result - -- assert: - that: - - result.rowcount == 0 diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-7.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-7.yml deleted file mode 100644 index 8d4bcc7e2..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-7.yml +++ /dev/null @@ -1,3 +0,0 @@ -postgresql_lang_packages: - - postgresql-plperl - - postgresql-plpython diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-8.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-8.yml deleted file mode 100644 index 5da004c8f..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-8.yml +++ /dev/null @@ -1,3 +0,0 @@ -postgresql_lang_packages: - - postgresql-plperl - - postgresql-plpython3 diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml index a21160282..a8e1a9173 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml @@ -2,9 +2,13 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) #################### -# Prepare for tests: -# Create test roles: +# +# Prepare for tests +# + +# Create test roles + - name: postgresql_owner - create test roles become_user: '{{ pg_user }}' become: true @@ -17,6 +21,21 @@ - alice - bob +- name: postgresql_owner - create test roles superuser + become_user: '{{ pg_user }}' + become: true + postgresql_user: + login_user: '{{ pg_user }}' + db: postgres + name: '{{ item }}' + role_attr_flags: SUPERUSER + ignore_errors: true + with_items: + - alice_super + - bob_super + +# Create test database + - name: postgresql_owner - create test database become_user: '{{ pg_user }}' become: true @@ -24,13 +43,15 @@ login_user: '{{ pg_user }}' db: acme +# Create test table + - name: postgresql_owner - create test table become_user: '{{ pg_user }}' become: true postgresql_query: login_user: '{{ pg_user }}' db: acme - query: CREATE TABLE my_table (id int) + query: CREATE TABLE test_table (id int) - name: postgresql_owner - set owner become_user: '{{ pg_user }}' @@ -39,9 +60,11 @@ login_user: '{{ pg_user }}' db: acme new_owner: bob - obj_name: my_table + obj_name: test_table obj_type: table +# Create test sequence + - name: postgresql_owner - create test sequence become_user: '{{ pg_user }}' become: true @@ -50,6 +73,8 @@ db: acme query: CREATE SEQUENCE test_seq +# Create test function + - name: postgresql_owner - create test function become_user: '{{ pg_user }}' become: true @@ -57,9 +82,11 @@ login_user: '{{ pg_user }}' db: acme query: > - CREATE FUNCTION increment(integer) RETURNS integer AS 'select $1 + 1;' + CREATE FUNCTION test_function(integer) RETURNS integer AS 'select $1 + 1;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; +# Create test schema + - name: postgresql_owner - create test schema become_user: '{{ pg_user }}' become: true @@ -68,13 +95,17 @@ db: acme query: CREATE SCHEMA test_schema +# Create test view + - name: postgresql_owner - create test view become_user: '{{ pg_user }}' become: true postgresql_query: login_user: '{{ pg_user }}' db: acme - query: CREATE VIEW test_view AS SELECT * FROM my_table + query: CREATE VIEW test_view AS SELECT * FROM test_table + +# Create test materialized view - name: postgresql_owner - create test materialized view become_user: '{{ pg_user }}' @@ -82,9 +113,11 @@ postgresql_query: login_user: '{{ pg_user }}' db: acme - query: CREATE MATERIALIZED VIEW test_mat_view AS SELECT * FROM my_table + query: CREATE MATERIALIZED VIEW test_mat_view AS SELECT * FROM test_table when: postgres_version_resp.stdout is version('9.4', '>=') +# Create test materialized tablespace + - name: postgresql_owner - drop dir for test tablespace become: true file: @@ -119,12 +152,212 @@ owner: alice location: '{{ test_tablespace_path }}' +# Create test procedure + +- name: postgresql_owner - create test procedure + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + CREATE OR REPLACE PROCEDURE test_procedure(id integer) + LANGUAGE SQL + AS $$ + INSERT INTO test_table VALUES (id); + $$ + when: postgres_version_resp.stdout is version('11', '>=') + +# Create test type + +- name: postgresql_owner - create test type + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: CREATE TYPE test_type AS ENUM ('new', 'open', 'closed') + +# Create test aggregate + +- name: postgresql_owner - create test aggregate + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + CREATE AGGREGATE test_aggregate (float8) + ( + sfunc = float8_accum, + stype = float8[], + finalfunc = float8_avg, + initcond = '{0,0,0}' + ) + +# Create test routine + +- name: postgresql_owner - create test routine + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + CREATE FUNCTION test_routine(integer) RETURNS integer AS 'select $1 + 1;' + LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT + +# No need to create test language as 'plpgsql' is present by default + +# Create test domain + +- name: postgresql_owner - create test domain + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + CREATE DOMAIN test_domain AS TEXT + CHECK( + VALUE ~ '^\d{5}$' + OR VALUE ~ '^\d{5}-\d{4}$' + ) + +# Create test collation + +- name: postgresql_owner - create test collation + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + CREATE COLLATION test_collation (locale = 'en_US.utf8') + +# No need to create test conversion as 'windows_1256_to_utf8' is present by default + +# No need to create test test search configuration as 'simple' is present by default + +# No need to create test test search dict as 'simple' is present by default + +# Create test foreign data wrapper + +- name: postgresql_owner - create test foreign data wrapper + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + CREATE FOREIGN DATA WRAPPER test_foreign_data_wrapper + +# Create test server + +- name: postgresql_owner - create test server + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + CREATE SERVER test_server FOREIGN DATA WRAPPER test_foreign_data_wrapper + +# Create test foreign table + +- name: postgresql_owner - create test foreign table + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + CREATE FOREIGN TABLE test_foreign_table (id int) SERVER test_server + +# Create test event trigger + +- name: postgresql_owner - create test event trigger function + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + CREATE FUNCTION test_event_trigger_function() + RETURNS event_trigger + LANGUAGE plpgsql + AS $$ + BEGIN + PERFORM pg_is_in_recovery(); + END; + $$ + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - create test event trigger + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + CREATE EVENT TRIGGER test_event_trigger ON ddl_command_start + EXECUTE FUNCTION test_event_trigger_function() + when: postgres_version_resp.stdout is version('11', '>=') + +# Create test large object + +- name: postgresql_owner - create test large object + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + SELECT lo_creat(-1); + register: result_large_object + +- name: postgresql_owner - parse test large object OID + set_fact: + test_large_object: "{{ result_large_object.query_result[0]['lo_creat'] }}" + +# Create test publication + +- name: postgresql_owner - create test publication + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + CREATE PUBLICATION test_publication FOR TABLE test_table + +# Create test statistics + +- name: postgresql_owner - create test statistics table + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + CREATE TABLE test_statistics_table (a int, b int) + +- name: postgresql_owner - create test statistics + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + CREATE STATISTICS test_statistics (dependencies) ON a, b FROM test_statistics_table + ################ # Do main tests: # # check reassign_owned_by param # + # try to reassign ownership to non existent user: - name: postgresql_owner - reassign_owned_by to non existent user become_user: '{{ pg_user }}' @@ -179,7 +412,7 @@ db: acme login_user: '{{ pg_user }}' query: > - SELECT 1 FROM pg_tables WHERE tablename = 'my_table' + SELECT 1 FROM pg_tables WHERE tablename = 'test_table' AND tableowner = 'alice' ignore_errors: true register: result @@ -210,7 +443,7 @@ postgresql_query: db: acme login_user: '{{ pg_user }}' - query: SELECT 1 FROM pg_tables WHERE tablename = 'my_table' AND tableowner = 'alice' + query: SELECT 1 FROM pg_tables WHERE tablename = 'test_table' AND tableowner = 'alice' ignore_errors: true register: result @@ -260,7 +493,9 @@ # # ############################# -# check_mode obj_type: database + +# Test obj_type: database + - name: postgresql_owner - set db owner in check_mode become_user: '{{ pg_user }}' become: true @@ -358,6 +593,8 @@ that: - result.rowcount == 1 +# Test obj_type: table + - name: postgresql_owner - set table owner in check_mode become_user: '{{ pg_user }}' become: true @@ -365,7 +602,7 @@ login_user: '{{ pg_user }}' db: acme new_owner: bob - obj_name: my_table + obj_name: test_table obj_type: table check_mode: true register: result @@ -373,7 +610,7 @@ - assert: that: - result is changed - - result.queries == ['ALTER TABLE "my_table" OWNER TO "bob"'] + - result.queries == ['ALTER TABLE "test_table" OWNER TO "bob"'] - name: postgresql_owner - check that nothing changed after the previous step become_user: '{{ pg_user }}' @@ -382,7 +619,7 @@ db: acme login_user: '{{ pg_user }}' query: > - SELECT 1 FROM pg_tables WHERE tablename = 'my_table' + SELECT 1 FROM pg_tables WHERE tablename = 'test_table' AND tableowner = 'bob' ignore_errors: true register: result @@ -391,21 +628,21 @@ that: - result.rowcount == 0 -- name: postgresql_owner - set db owner +- name: postgresql_owner - set table owner become_user: '{{ pg_user }}' become: true postgresql_owner: login_user: '{{ pg_user }}' db: acme new_owner: bob - obj_name: my_table + obj_name: test_table obj_type: table register: result - assert: that: - result is changed - - result.queries == ['ALTER TABLE "my_table" OWNER TO "bob"'] + - result.queries == ['ALTER TABLE "test_table" OWNER TO "bob"'] - name: postgresql_owner - check that table owner has been changed after the previous step become_user: '{{ pg_user }}' @@ -414,7 +651,7 @@ db: acme login_user: '{{ pg_user }}' query: > - SELECT 1 FROM pg_tables WHERE tablename = 'my_table' + SELECT 1 FROM pg_tables WHERE tablename = 'test_table' AND tableowner = 'bob' ignore_errors: true register: result @@ -423,14 +660,14 @@ that: - result.rowcount == 1 -- name: postgresql_owner - set db owner again +- name: postgresql_owner - set table owner again become_user: '{{ pg_user }}' become: true postgresql_owner: login_user: '{{ pg_user }}' db: acme new_owner: bob - obj_name: my_table + obj_name: test_table obj_type: table register: result @@ -446,7 +683,7 @@ db: acme login_user: '{{ pg_user }}' query: > - SELECT 1 FROM pg_tables WHERE tablename = 'my_table' + SELECT 1 FROM pg_tables WHERE tablename = 'test_table' AND tableowner = 'bob' ignore_errors: true register: result @@ -455,6 +692,8 @@ that: - result.rowcount == 1 +# Test obj_type: sequence + - name: postgresql_owner - set sequence owner in check_mode become_user: '{{ pg_user }}' become: true @@ -489,7 +728,7 @@ that: - result.rowcount == 0 -- name: postgresql_owner - set db owner +- name: postgresql_owner - set sequence owner become_user: '{{ pg_user }}' become: true postgresql_owner: @@ -522,7 +761,7 @@ that: - result.rowcount == 1 -- name: postgresql_owner - set db owner again +- name: postgresql_owner - set sequence owner again become_user: '{{ pg_user }}' become: true postgresql_owner: @@ -555,6 +794,8 @@ that: - result.rowcount == 1 +# Test obj_type: function + - name: postgresql_owner - set function owner in check_mode become_user: '{{ pg_user }}' become: true @@ -562,7 +803,7 @@ login_user: '{{ pg_user }}' db: acme new_owner: bob - obj_name: increment + obj_name: test_function obj_type: function check_mode: true register: result @@ -571,7 +812,7 @@ - assert: that: - result is changed - - result.queries == ['ALTER FUNCTION increment OWNER TO "bob"'] + - result.queries == ['ALTER FUNCTION test_function OWNER TO "bob"'] when: postgres_version_resp.stdout is version('10', '>=') - name: postgresql_owner - check that nothing changed after the previous step @@ -582,7 +823,7 @@ login_user: '{{ pg_user }}' query: > SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r - ON f.proowner = r.oid WHERE f.proname = 'increment' AND r.rolname = 'bob' + ON f.proowner = r.oid WHERE f.proname = 'test_function' AND r.rolname = 'bob' ignore_errors: true register: result when: postgres_version_resp.stdout is version('10', '>=') @@ -592,14 +833,14 @@ - result.rowcount == 0 when: postgres_version_resp.stdout is version('10', '>=') -- name: postgresql_owner - set func owner +- name: postgresql_owner - set function owner become_user: '{{ pg_user }}' become: true postgresql_owner: login_user: '{{ pg_user }}' db: acme new_owner: bob - obj_name: increment + obj_name: test_function obj_type: function register: result when: postgres_version_resp.stdout is version('10', '>=') @@ -607,10 +848,10 @@ - assert: that: - result is changed - - result.queries == ['ALTER FUNCTION increment OWNER TO "bob"'] + - result.queries == ['ALTER FUNCTION test_function OWNER TO "bob"'] when: postgres_version_resp.stdout is version('10', '>=') -- name: postgresql_owner - check that func owner has been changed after the previous step +- name: postgresql_owner - check that function owner has been changed after the previous step become_user: '{{ pg_user }}' become: true postgresql_query: @@ -618,7 +859,7 @@ login_user: '{{ pg_user }}' query: > SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r - ON f.proowner = r.oid WHERE f.proname = 'increment' AND r.rolname = 'bob' + ON f.proowner = r.oid WHERE f.proname = 'test_function' AND r.rolname = 'bob' ignore_errors: true register: result when: postgres_version_resp.stdout is version('10', '>=') @@ -628,14 +869,14 @@ - result.rowcount == 1 when: postgres_version_resp.stdout is version('10', '>=') -- name: postgresql_owner - set func owner again +- name: postgresql_owner - set function owner again become_user: '{{ pg_user }}' become: true postgresql_owner: login_user: '{{ pg_user }}' db: acme new_owner: bob - obj_name: increment + obj_name: test_function obj_type: function register: result when: postgres_version_resp.stdout is version('10', '>=') @@ -654,7 +895,7 @@ login_user: '{{ pg_user }}' query: > SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r - ON f.proowner = r.oid WHERE f.proname = 'increment' AND r.rolname = 'bob' + ON f.proowner = r.oid WHERE f.proname = 'test_function' AND r.rolname = 'bob' ignore_errors: true register: result when: postgres_version_resp.stdout is version('10', '>=') @@ -664,6 +905,8 @@ - result.rowcount == 1 when: postgres_version_resp.stdout is version('10', '>=') +# Test obj_type: schema + - name: postgresql_owner - set schema owner in check_mode become_user: '{{ pg_user }}' become: true @@ -761,6 +1004,8 @@ that: - result.rowcount == 1 +# Test obj_type: view + - name: postgresql_owner - set view owner in check_mode become_user: '{{ pg_user }}' become: true @@ -852,6 +1097,8 @@ that: - result.rowcount == 1 +# Test obj_type: matview + - name: postgresql_owner - set matview owner in check_mode become_user: '{{ pg_user }}' become: true @@ -877,7 +1124,7 @@ postgresql_query: db: acme login_user: '{{ pg_user }}' - query: SELECT 1 FROM pg_matviews WHERE matviewname = 'test_view' AND matviewowner = 'bob' + query: SELECT 1 FROM pg_matviews WHERE matviewname = 'test_mat_view' AND matviewowner = 'bob' ignore_errors: true register: result when: postgres_version_resp.stdout is version('9.4', '>=') @@ -955,6 +1202,8 @@ - result.rowcount == 1 when: postgres_version_resp.stdout is version('9.4', '>=') +# Test obj_type: tablespace + - name: postgresql_owner - set tablespace owner in check_mode become_user: '{{ pg_user }}' become: true @@ -1052,9 +1301,1761 @@ that: - result.rowcount == 1 +# Test obj_type: procedure + +- name: postgresql_owner - set procedure owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_procedure + obj_type: procedure + check_mode: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is changed + - result.queries == ['ALTER PROCEDURE "test_procedure" OWNER TO "bob"'] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r + ON f.proowner = r.oid WHERE f.proname = 'test_procedure' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - set procedure owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_procedure + obj_type: procedure + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is changed + - result.queries == ['ALTER PROCEDURE "test_procedure" OWNER TO "bob"'] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that procedure owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r + ON f.proowner = r.oid WHERE f.proname = 'test_procedure' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - set procedure owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_procedure + obj_type: procedure + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is not changed + - result.queries == [] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that procedure owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r + ON f.proowner = r.oid WHERE f.proname = 'test_procedure' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('11', '>=') + +# Test obj_type: type + +- name: postgresql_owner - set type owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_type + obj_type: type + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER TYPE "test_type" OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_type AS t JOIN pg_roles AS r + ON t.typowner = r.oid WHERE t.typname = 'test_type' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set type owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_type + obj_type: type + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER TYPE "test_type" OWNER TO "bob"'] + +- name: postgresql_owner - check that type owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_type AS t JOIN pg_roles AS r + ON t.typowner = r.oid WHERE t.typname = 'test_type' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set type owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_type + obj_type: type + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that type owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_type AS t JOIN pg_roles AS r + ON t.typowner = r.oid WHERE t.typname = 'test_type' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test obj_type: aggregate + +- name: postgresql_owner - set aggregate owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_aggregate(float8) + obj_type: aggregate + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER AGGREGATE test_aggregate(float8) OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r + ON f.proowner = r.oid WHERE f.proname = 'test_aggregate' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set aggregate owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_aggregate(float8) + obj_type: aggregate + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER AGGREGATE test_aggregate(float8) OWNER TO "bob"'] + +- name: postgresql_owner - check that aggregate owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r + ON f.proowner = r.oid WHERE f.proname = 'test_aggregate' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set aggregate owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_aggregate(float8) + obj_type: aggregate + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that aggregate owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r + ON f.proowner = r.oid WHERE f.proname = 'test_aggregate' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test obj_type: routine + +- name: postgresql_owner - set routine owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_routine + obj_type: routine + check_mode: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is changed + - result.queries == ['ALTER ROUTINE "test_routine" OWNER TO "bob"'] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r + ON f.proowner = r.oid WHERE f.proname = 'test_routine' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - set routine owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_routine + obj_type: routine + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is changed + - result.queries == ['ALTER ROUTINE "test_routine" OWNER TO "bob"'] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that routine owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r + ON f.proowner = r.oid WHERE f.proname = 'test_routine' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - set routine owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_routine + obj_type: routine + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is not changed + - result.queries == [] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that routine owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r + ON f.proowner = r.oid WHERE f.proname = 'test_routine' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('11', '>=') + +# Test obj_type: language + +- name: postgresql_owner - set language owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: plpgsql + obj_type: language + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER LANGUAGE plpgsql OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_language AS l JOIN pg_roles AS r + ON l.lanowner = r.oid WHERE l.lanname = 'plpgsql' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set language owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: plpgsql + obj_type: language + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER LANGUAGE plpgsql OWNER TO "bob"'] + +- name: postgresql_owner - check that language owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_language AS l JOIN pg_roles AS r + ON l.lanowner = r.oid WHERE l.lanname = 'plpgsql' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set language owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: plpgsql + obj_type: language + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that language owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_language AS l JOIN pg_roles AS r + ON l.lanowner = r.oid WHERE l.lanname = 'plpgsql' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test obj_type: domain + +- name: postgresql_owner - set domain owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_domain + obj_type: domain + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER DOMAIN "test_domain" OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_type AS t JOIN pg_roles AS r + ON t.typowner = r.oid WHERE t.typname = 'test_domain' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set domain owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_domain + obj_type: domain + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER DOMAIN "test_domain" OWNER TO "bob"'] + +- name: postgresql_owner - check that domain owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_type AS t JOIN pg_roles AS r + ON t.typowner = r.oid WHERE t.typname = 'test_domain' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set domain owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_domain + obj_type: domain + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that domain owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_type AS t JOIN pg_roles AS r + ON t.typowner = r.oid WHERE t.typname = 'test_domain' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test obj_type: collation + +- name: postgresql_owner - set collation owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_collation + obj_type: collation + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER COLLATION "test_collation" OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_collation AS c JOIN pg_roles AS r + ON c.collowner = r.oid WHERE c.collname = 'test_collation' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set collation owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_collation + obj_type: collation + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER COLLATION "test_collation" OWNER TO "bob"'] + +- name: postgresql_owner - check that collation owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_collation AS c JOIN pg_roles AS r + ON c.collowner = r.oid WHERE c.collname = 'test_collation' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set collation owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_collation + obj_type: collation + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that collation owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_collation AS c JOIN pg_roles AS r + ON c.collowner = r.oid WHERE c.collname = 'test_collation' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test obj_type: conversion + +- name: postgresql_owner - set conversion owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: windows_1256_to_utf8 + obj_type: conversion + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER CONVERSION "windows_1256_to_utf8" OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_conversion AS c JOIN pg_roles AS r + ON c.conowner = r.oid WHERE c.conname = 'windows_1256_to_utf8' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set conversion owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: windows_1256_to_utf8 + obj_type: conversion + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER CONVERSION "windows_1256_to_utf8" OWNER TO "bob"'] + +- name: postgresql_owner - check that conversion owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_conversion AS c JOIN pg_roles AS r + ON c.conowner = r.oid WHERE c.conname = 'windows_1256_to_utf8' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set conversion owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: windows_1256_to_utf8 + obj_type: conversion + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that conversion owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_conversion AS c JOIN pg_roles AS r + ON c.conowner = r.oid WHERE c.conname = 'windows_1256_to_utf8' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test obj_type: text_search_configuration + +- name: postgresql_owner - set text search configuration owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: simple + obj_type: text_search_configuration + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER TEXT SEARCH CONFIGURATION "simple" OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_ts_config AS t JOIN pg_roles AS r + ON t.cfgowner = r.oid WHERE t.cfgname = 'simple' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set text search configuration owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: simple + obj_type: text_search_configuration + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER TEXT SEARCH CONFIGURATION "simple" OWNER TO "bob"'] + +- name: postgresql_owner - check that text search configuration owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_ts_config AS t JOIN pg_roles AS r + ON t.cfgowner = r.oid WHERE t.cfgname = 'simple' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set text search configuration owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: simple + obj_type: text_search_configuration + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that text search configuration owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_ts_config AS t JOIN pg_roles AS r + ON t.cfgowner = r.oid WHERE t.cfgname = 'simple' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test obj_type: text_search_dictionary + +- name: postgresql_owner - set text search dict owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: simple + obj_type: text_search_dictionary + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER TEXT SEARCH DICTIONARY "simple" OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_ts_dict AS t JOIN pg_roles AS r + ON t.dictowner = r.oid WHERE t.dictname = 'simple' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set text search dict owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: simple + obj_type: text_search_dictionary + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER TEXT SEARCH DICTIONARY "simple" OWNER TO "bob"'] + +- name: postgresql_owner - check that text search dict owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_ts_dict AS t JOIN pg_roles AS r + ON t.dictowner = r.oid WHERE t.dictname = 'simple' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set text search dict owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: simple + obj_type: text_search_dictionary + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that text search dict owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_ts_dict AS t JOIN pg_roles AS r + ON t.dictowner = r.oid WHERE t.dictname = 'simple' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test obj_type: foreign_data_wrapper + +- name: postgresql_owner - set foreign_data_wrapper owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob_super + obj_name: test_foreign_data_wrapper + obj_type: foreign_data_wrapper + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER FOREIGN DATA WRAPPER "test_foreign_data_wrapper" OWNER TO "bob_super"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_foreign_data_wrapper AS f JOIN pg_roles AS r + ON f.fdwowner = r.oid WHERE f.fdwname = 'test_foreign_data_wrapper' AND r.rolname = 'bob_super' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set foreign_data_wrapper owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob_super + obj_name: test_foreign_data_wrapper + obj_type: foreign_data_wrapper + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER FOREIGN DATA WRAPPER "test_foreign_data_wrapper" OWNER TO "bob_super"'] + +- name: postgresql_owner - check that foreign_data_wrapper owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_foreign_data_wrapper AS f JOIN pg_roles AS r + ON f.fdwowner = r.oid WHERE f.fdwname = 'test_foreign_data_wrapper' AND r.rolname = 'bob_super' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set foreign_data_wrapper owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob_super + obj_name: test_foreign_data_wrapper + obj_type: foreign_data_wrapper + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that foreign_data_wrapper owner is bob_super + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_foreign_data_wrapper AS f JOIN pg_roles AS r + ON f.fdwowner = r.oid WHERE f.fdwname = 'test_foreign_data_wrapper' AND r.rolname = 'bob_super' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test obj_type: server + +- name: postgresql_owner - set server owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_server + obj_type: server + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER SERVER "test_server" OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_foreign_server AS f JOIN pg_roles AS r + ON f.srvowner = r.oid WHERE f.srvname = 'test_server' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set server owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_server + obj_type: server + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER SERVER "test_server" OWNER TO "bob"'] + +- name: postgresql_owner - check that server owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_foreign_server AS f JOIN pg_roles AS r + ON f.srvowner = r.oid WHERE f.srvname = 'test_server' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set server owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_server + obj_type: server + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that server owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_foreign_server AS f JOIN pg_roles AS r + ON f.srvowner = r.oid WHERE f.srvname = 'test_server' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test obj_type: foreign_table + +- name: postgresql_owner - set foreign_table owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_foreign_table + obj_type: foreign_table + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER FOREIGN TABLE "test_foreign_table" OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_class AS c JOIN pg_roles AS r + ON c.relowner = r.oid WHERE c.relkind = 'f' + AND c.relname = 'test_foreign_table' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set foreign_table owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_foreign_table + obj_type: foreign_table + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER FOREIGN TABLE "test_foreign_table" OWNER TO "bob"'] + +- name: postgresql_owner - check that foreign_table owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_class AS c JOIN pg_roles AS r + ON c.relowner = r.oid WHERE c.relkind = 'f' + AND c.relname = 'test_foreign_table' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set foreign_table owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_foreign_table + obj_type: foreign_table + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that foreign_table owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_class AS c JOIN pg_roles AS r + ON c.relowner = r.oid WHERE c.relkind = 'f' + AND c.relname = 'test_foreign_table' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test obj_type: event_trigger + +- name: postgresql_owner - set event_trigger owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob_super + obj_name: test_event_trigger + obj_type: event_trigger + check_mode: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is changed + - result.queries == ['ALTER EVENT TRIGGER "test_event_trigger" OWNER TO "bob_super"'] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_event_trigger AS e JOIN pg_roles AS r + ON e.evtowner = r.oid WHERE e.evtname = 'test_event_trigger' AND r.rolname = 'bob_super' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - set event_trigger owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob_super + obj_name: test_event_trigger + obj_type: event_trigger + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is changed + - result.queries == ['ALTER EVENT TRIGGER "test_event_trigger" OWNER TO "bob_super"'] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that event_trigger owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_event_trigger AS e JOIN pg_roles AS r + ON e.evtowner = r.oid WHERE e.evtname = 'test_event_trigger' AND r.rolname = 'bob_super' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - set event_trigger owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob_super + obj_name: test_event_trigger + obj_type: event_trigger + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is not changed + - result.queries == [] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that event_trigger owner is bob_super + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_event_trigger AS e JOIN pg_roles AS r + ON e.evtowner = r.oid WHERE e.evtname = 'test_event_trigger' AND r.rolname = 'bob_super' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('11', '>=') + +# Test obj_type: large_object + +- name: postgresql_owner - set large_object owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: '{{ test_large_object }}' + obj_type: large_object + check_mode: true + register: result + +- set_fact: + query: 'ALTER LARGE OBJECT {{ test_large_object }} OWNER TO "bob"' + +- assert: + that: + - result is changed + - result.queries == [query] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_largeobject_metadata AS l JOIN pg_roles AS r + ON l.lomowner = r.oid WHERE l.oid = '{{ test_large_object }}' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set large_object owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: '{{ test_large_object }}' + obj_type: large_object + register: result + +- assert: + that: + - result is changed + - result.queries == [query] + +- name: postgresql_owner - check that large_object owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_largeobject_metadata AS l JOIN pg_roles AS r + ON l.lomowner = r.oid WHERE l.oid = '{{ test_large_object }}' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set large_object owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: '{{ test_large_object }}' + obj_type: large_object + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that large_object owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_largeobject_metadata AS l JOIN pg_roles AS r + ON l.lomowner = r.oid WHERE l.oid = '{{ test_large_object }}' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test obj_type: publication + +- name: postgresql_owner - set publication owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_publication + obj_type: publication + check_mode: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is changed + - result.queries == ['ALTER PUBLICATION "test_publication" OWNER TO "bob"'] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_publication AS p JOIN pg_roles AS r + ON p.pubowner = r.oid WHERE p.pubname = 'test_publication' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - set publication owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_publication + obj_type: publication + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is changed + - result.queries == ['ALTER PUBLICATION "test_publication" OWNER TO "bob"'] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that publication owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_publication AS p JOIN pg_roles AS r + ON p.pubowner = r.oid WHERE p.pubname = 'test_publication' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - set publication owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_publication + obj_type: publication + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is not changed + - result.queries == [] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that publication owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_publication AS p JOIN pg_roles AS r + ON p.pubowner = r.oid WHERE p.pubname = 'test_publication' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('11', '>=') + +# Test obj_type: statistics + +- name: postgresql_owner - set statistics owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_statistics + obj_type: statistics + check_mode: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is changed + - result.queries == ['ALTER STATISTICS "test_statistics" OWNER TO "bob"'] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_statistic_ext AS s JOIN pg_roles AS r + ON s.stxowner = r.oid WHERE s.stxname = 'test_statistics' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - set statistics owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_statistics + obj_type: statistics + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is changed + - result.queries == ['ALTER STATISTICS "test_statistics" OWNER TO "bob"'] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that statistics owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_statistic_ext AS s JOIN pg_roles AS r + ON s.stxowner = r.oid WHERE s.stxname = 'test_statistics' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - set statistics owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_statistics + obj_type: statistics + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is not changed + - result.queries == [] + when: postgres_version_resp.stdout is version('11', '>=') + +- name: postgresql_owner - check that statistics owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_statistic_ext AS s JOIN pg_roles AS r + ON s.stxowner = r.oid WHERE s.stxname = 'test_statistics' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('11', '>=') + +# ############################# + # -# Crean up +# Clean up # + - name: postgresql_owner - drop test database become_user: '{{ pg_user }}' become: true diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml index 2a9505a5b..01d98cd5f 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml @@ -11,7 +11,6 @@ netmask: 'ffff:fff0::' method: md5 backup: 'True' - order: sud state: "{{item}}" check_mode: true with_items: @@ -30,7 +29,6 @@ dest: /tmp/pg_hba.conf method: "{{item.method|default('md5')}}" netmask: "{{item.netmask|default('')}}" - order: sud source: "{{item.source|default('')}}" state: absent users: "{{item.users|default('all')}}" @@ -51,7 +49,6 @@ dest: /tmp/pg_hba.conf method: "{{item.method|default('md5')}}" netmask: "{{item.netmask|default('')}}" - order: sud source: "{{item.source|default('')}}" state: present users: "{{item.users|default('all')}}" @@ -62,7 +59,6 @@ postgresql_pg_hba: dest: "/tmp/pg_hba.conf" users: "+some" - order: "sud" state: "present" contype: "local" method: "cert" @@ -76,7 +72,6 @@ postgresql_pg_hba: dest: "/tmp/pg_hba.conf" users: "+some" - order: "sud" state: "present" contype: "{{ item.contype }}" method: "{{ item.method }}" @@ -100,7 +95,6 @@ dest: /tmp/pg_hba.conf method: "{{item.method|default('md5')}}" netmask: "{{item.netmask|default('')}}" - order: sud source: "{{item.source|default('')}}" state: present users: "{{item.users|default('all')}}" @@ -119,7 +113,6 @@ dest: /tmp/pg_hba.conf method: md5 netmask: 255.255.255.0 - order: sud source: '172.21.0.0' state: present register: pg_hba_backup_check2 @@ -130,7 +123,6 @@ contype: host dest: /tmp/pg_hba.conf method: md5 - order: sud source: '172.21.0.0/24' state: present register: netmask_sameas_prefix_check @@ -146,7 +138,6 @@ dest: /tmp/pg_hba.conf method: md5 netmask: '255.255.255.255' - order: sud source: all state: present register: pg_hba_fail_src_all_with_netmask @@ -196,7 +187,6 @@ create: true method: md5 address: "2001:db8::1/128" - order: sud state: present comment: "comment1" @@ -220,7 +210,6 @@ dest: /tmp/pg_hba2.conf method: md5 address: "2001:db8::2/128" - order: sud state: present comment: "comment2" @@ -244,7 +233,6 @@ dest: /tmp/pg_hba2.conf method: md5 address: "2001:db8::3/128" - order: sud state: present comment: "comment3" keep_comments_at_rules: true diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/defaults/main.yml deleted file mode 100644 index 73eb55ae2..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -db_default: postgres diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/handlers/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/handlers/main.yml new file mode 100644 index 000000000..535dd6467 --- /dev/null +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/handlers/main.yml @@ -0,0 +1,6 @@ +- name: Drop test user + become: true + become_user: "{{ pg_user }}" + community.postgresql.postgresql_user: + name: "{{ ping_test_user }}" + state: absent diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/main.yml index bcb18d2fe..73f3e189a 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/main.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/main.yml @@ -4,6 +4,5 @@ #################################################################### # Initial CI tests of postgresql_ping module -- import_tasks: postgresql_ping_initial.yml - vars: - db_name_nonexist: fake_db +- name: Import the original test task file + ansible.builtin.import_tasks: postgresql_ping_initial.yml diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml index 218ae9fd7..be48065df 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml @@ -2,186 +2,198 @@ # Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -- name: postgresql_ping - test return values - become_user: "{{ pg_user }}" - become: true - postgresql_ping: +- name: Set parameters we use with most of tasks + ansible.builtin.set_fact: + task_parameters: &task_parameters + become_user: "{{ pg_user }}" + become: true + register: result + + +- name: Test return values + <<: *task_parameters + ignore_errors: true + community.postgresql.postgresql_ping: db: "{{ db_default }}" login_user: "{{ pg_user }}" - register: result - ignore_errors: true -- assert: +- name: Assert return values + ansible.builtin.assert: that: - - result.is_available == true - - result.server_version != {} - - result.server_version.raw is search('PostgreSQL') - - result.server_version.major != '' - - result.server_version.minor != '' - - result is not changed + - result.is_available == true + - result.server_version != {} + - result.server_version.raw is search("PostgreSQL") + - result.server_version.major != "" + - result.server_version.minor != "" + - result is not changed + +- name: Set full server version as X.Y.Z + set_fact: + version_full: '{{ result.server_version.major }}.{{ result.server_version.minor }}.{{ result.server_version.patch }}' + when: result.server_version.major == 9 + +- name: Set full server version as X.Y + set_fact: + version_full: '{{ result.server_version.major }}.{{ result.server_version.minor }}' + when: result.server_version.major >= 10 - assert: that: - result.server_version.patch != {} - - result.server_version.full == '{{ result.server_version.major }}.{{ result.server_version.minor }}.{{ result.server_version.patch }}' when: result.server_version.major == 9 - assert: that: - - result.server_version.full == '{{ result.server_version.major }}.{{ result.server_version.minor }}' - when: result.server_version.major >= 10 + - result.server_version.full == version_full + -- name: postgresql_ping - check ping of non-existing database doesn't return anything - become_user: "{{ pg_user }}" - become: true - postgresql_ping: +- name: Test ping of non-existing database returns nothing + <<: *task_parameters + ignore_errors: true + community.postgresql.postgresql_ping: db: "{{ db_name_nonexist }}" login_user: "{{ pg_user }}" - register: result - ignore_errors: true -- assert: +- name: Assert that ping of non-existing database returns nothing + ansible.builtin.assert: that: - - result.is_available == false - - result.server_version == {} - - result is not changed + - result.is_available == false + - result.server_version == {} + - result is not changed + -- name: postgresql_ping - check ping of the database on non-existent port does not return anything - become_user: "{{ pg_user }}" - become: true +- name: Test ping of the database on non-existent port returns nothing + <<: *task_parameters environment: PGPORT: 5435 ignore_errors: true - postgresql_ping: + community.postgresql.postgresql_ping: db: "{{ db_default }}" login_user: "{{ pg_user }}" - register: result -- assert: +- name: Assert that ping of the database on non-existent port returns nothing + ansible.builtin.assert: that: - - result.is_available == false - - result.server_version == {} - - result is not changed + - result.is_available == false + - result.server_version == {} + - result is not changed + -- name: postgresql_ping - check ping of the database by a non-existent user does not return anything - become_user: "{{ pg_user }}" - become: true +- name: Test ping of the database by a non-existent user returns nothing + <<: *task_parameters environment: - PGUSER: 'test_user' + PGUSER: "none_existent_test_user" ignore_errors: true - postgresql_ping: + community.postgresql.postgresql_ping: db: "{{ db_default }}" - register: result -- assert: +- name: Assert that ping of the database by a non-existent user returns nothing + ansible.builtin.assert: that: - - result.is_available == false - - result.server_version == {} - - result is not changed + - result.is_available == false + - result.server_version == {} + - result is not changed + -- name: Creating a "test_user" in postresql - shell: - cmd: psql -U "{{ pg_user }}" -c "CREATE ROLE test_user WITH LOGIN PASSWORD 'TEST_PASSWORD';" +- name: Create a {{ ping_test_user }} + <<: *task_parameters + notify: Drop test user + community.postgresql.postgresql_user: + name: "{{ ping_test_user }}" + role_attr_flags: LOGIN + password: "{{ ping_test_user_pass }}" -- name: postgresql_ping - check ping of the database by a existent user - become_user: "{{ pg_user }}" - become: true +- name: Test ping of the database by existent user + <<: *task_parameters environment: - PGUSER: 'test_user' + PGUSER: "{{ ping_test_user }}" ignore_errors: true - postgresql_ping: + community.postgresql.postgresql_ping: db: "{{ db_default }}" - login_password: "TEST_PASSWORD" - register: result + login_password: "{{ ping_test_user_pass }}" -- assert: +- name: Assert ping of the database by existent user + ansible.builtin.assert: that: - - result.is_available == true - - result.server_version != {} - - result.server_version.raw is search('PostgreSQL') - - result.server_version.major != '' - - result.server_version.minor != '' - - result is not changed - -- name: postgresql_ping - ping DB with SSL 1 - become_user: "{{ pg_user }}" - become: true - postgresql_ping: - db: "{{ ssl_db }}" - login_user: "{{ ssl_user }}" - login_password: "{{ ssl_pass }}" - login_host: 127.0.0.1 - login_port: 5432 - ssl_mode: require - ca_cert: '{{ ssl_rootcert }}' - trust_input: true - register: result - when: - - ansible_os_family == 'Debian' - - postgres_version_resp.stdout is version('9.4', '>=') - -- assert: - that: - result.is_available == true - - result.conn_err_msg == '' - when: - - ansible_os_family == 'Debian' - - postgres_version_resp.stdout is version('9.4', '>=') - -- name: postgresql_ping - ping DB with SSL 2 - become_user: "{{ pg_user }}" - become: true - postgresql_ping: - db: "{{ ssl_db }}" - login_user: "{{ ssl_user }}" - login_password: "{{ ssl_pass }}" - login_host: 127.0.0.1 - login_port: 5432 - ssl_mode: verify-full - ca_cert: '{{ ssl_rootcert }}' - ssl_cert: '{{ ssl_cert }}' - ssl_key: '{{ ssl_key }}' - trust_input: true - register: result - when: - - ansible_os_family == 'Debian' - - postgres_version_resp.stdout is version('9.4', '>=') + - result.server_version != {} + - result.server_version.raw is search("PostgreSQL") + - result.server_version.major != "" + - result.server_version.minor != "" + - result is not changed -- assert: - that: - - result.is_available == true - - result.conn_err_msg == '' - when: - - ansible_os_family == 'Debian' - - postgres_version_resp.stdout is version('9.4', '>=') -- name: postgresql_ping - check trust_input - become_user: "{{ pg_user }}" - become: true - postgresql_ping: +- name: Test SSL block + when: + - ansible_os_family == "Debian" + - postgres_version_resp.stdout is version("9.4", ">=") + block: + + - name: Test ping DB with SSL 1 + <<: *task_parameters + community.postgresql.postgresql_ping: + db: "{{ ssl_db }}" + login_user: "{{ ssl_user }}" + login_password: "{{ ssl_pass }}" + login_host: 127.0.0.1 + login_port: 5432 + ssl_mode: require + ca_cert: "{{ ssl_rootcert }}" + trust_input: true + + - name: Assert ping DB with SSL 1 + ansible.builtin.assert: + that: + - result.is_available == true + - result.conn_err_msg == "" + + + - name: Test ping DB with SSL 2 + <<: *task_parameters + community.postgresql.postgresql_ping: + db: "{{ ssl_db }}" + login_user: "{{ ssl_user }}" + login_password: "{{ ssl_pass }}" + login_host: 127.0.0.1 + login_port: 5432 + ssl_mode: verify-full + ca_cert: "{{ ssl_rootcert }}" + ssl_cert: "{{ ssl_cert }}" + ssl_key: "{{ ssl_key }}" + trust_input: true + + - name: Assert ping DB with SSL 2 + ansible.builtin.assert: + that: + - result.is_available == true + - result.conn_err_msg == "" + + +- name: Test trust_input is false and input looks suspicious + <<: *task_parameters + ignore_errors: true + community.postgresql.postgresql_ping: db: "{{ db_default }}" login_user: "{{ pg_user }}" trust_input: false session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' - register: result - ignore_errors: true -- assert: +- name: Assert result when trust_input is false and input looks suspicious + ansible.builtin.assert: that: - result is failed - - result.msg is search('is potentially dangerous') + - result.msg is search("is potentially dangerous") + # Check conn_err_msg return value - name: Try to connect to non-existent DB - become_user: "{{ pg_user }}" - become: true - postgresql_ping: - db: blahblah + <<: *task_parameters + community.postgresql.postgresql_ping: + db: "{{ db_name_nonexist }}" login_user: "{{ pg_user }}" - register: result -- name: Check conn_err_msg return value - assert: +- name: Assert connection to non-existent DB + ansible.builtin.assert: that: - result is succeeded - - result.conn_err_msg is search("database \"blahblah\" does not exist") + - result.conn_err_msg is search("database \"{{ db_name_nonexist }}\" does not exist") diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/vars/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/vars/main.yml new file mode 100644 index 000000000..8b7a34379 --- /dev/null +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/vars/main.yml @@ -0,0 +1,5 @@ +db_default: postgres +db_name_nonexist: fake_db + +ping_test_user: ping_test_user +ping_test_user_pass: ping_test_user_pass diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml index 4b4621010..d83472ba1 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml @@ -73,7 +73,7 @@ # Grant rights to the public schema, since in PostgreSQL 15 # the rights to this schema are taken away from all users except the owner -- name: GRANT ALL PRIVILEGES ON SCHEMA public TO ansible_db_user1,2,3 +- name: GRANT ALL PRIVILEGES ON SCHEMA public TO ansible_db_user2,3 community.postgresql.postgresql_privs: db: "{{ db_name }}" privs: ALL @@ -96,14 +96,12 @@ obj: TABLES privs: all state: present - usage_on_types: true register: result check_mode: true - assert: that: - result is changed - - result.queries is search('ON TYPES') # Also covers https://github.com/ansible-collections/community.general/issues/884 - name: Set table default privs on the schema with hyphen in the name @@ -117,13 +115,11 @@ obj: TABLES privs: all state: present - usage_on_types: false register: result - assert: that: - result is changed - - result.queries is not search('ON TYPES') - name: Delete table default privs on the schema with hyphen in the name postgresql_privs: @@ -597,7 +593,7 @@ register: result - assert: - that: "'{{ db_user2 }}=X/{{ db_user3 }}' in '{{ result.stdout_lines[0] }}'" + that: result.stdout_lines[0] is search('{{ db_user2 }}=X/{{ db_user3 }}') # Test - name: Grant execute to all functions again @@ -839,16 +835,16 @@ - result is changed when: postgres_version_resp.stdout is version('11', '>=') -################################################# -# Test ALL_IN_SCHEMA for 'partioned tables type # -################################################# +################################################### +# Test ALL_IN_SCHEMA for 'partitioned tables type # +################################################### # Partitioning tables is a feature introduced in Postgresql 10. # (see https://www.postgresql.org/docs/10/ddl-partitioning.html ) # The test below check for this version # Function ALL_IN_SCHEMA Setup -- name: Create partioned table for test purpose +- name: Create partitioned table for test purpose postgresql_query: query: CREATE TABLE public.testpt (id int not null, logdate date not null) PARTITION BY RANGE (logdate); db: "{{ db_name }}" @@ -958,6 +954,22 @@ - assert: that: "'{{ db_user2 }}=X*/{{ pg_user }}' in result.query_result[0].proacl" +# Test https://github.com/ansible-collections/community.postgresql/issues/668 +- name: Issue 688 + become: true + become_user: "{{ pg_user }}" + register: result + postgresql_privs: + privs: ALL + type: default_privs + schema: pg_catalog + obj: ALL_DEFAULT + db: "{{ db_name }}" + roles: "{{ db_user2 }}" + login_user: "{{ pg_user }}" + grant_option: true + state: present + # Test - name: Revoke grant option on pg_create_restore_point function postgresql_privs: @@ -1190,7 +1202,7 @@ postgresql_query: login_user: "{{ pg_user }}" login_db: "{{ db_name }}" - query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric'; + query: SELECT typacl::varchar FROM pg_catalog.pg_type WHERE typname = 'numeric'; register: typ_result when: postgres_version_resp.stdout is version('10', '>=') @@ -1228,7 +1240,7 @@ postgresql_query: login_user: "{{ pg_user }}" login_db: "{{ db_name }}" - query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric'; + query: SELECT typacl::varchar FROM pg_catalog.pg_type WHERE typname = 'numeric'; register: typ_result when: postgres_version_resp.stdout is version('10', '>=') @@ -1265,7 +1277,7 @@ postgresql_query: login_user: "{{ pg_user }}" login_db: "{{ db_name }}" - query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric'; + query: SELECT typacl::varchar FROM pg_catalog.pg_type WHERE typname = 'numeric'; register: typ_result when: postgres_version_resp.stdout is version('10', '>=') @@ -1303,7 +1315,7 @@ postgresql_query: login_user: "{{ pg_user }}" login_db: "{{ db_name }}" - query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric'; + query: SELECT typacl::varchar FROM pg_catalog.pg_type WHERE typname = 'numeric'; register: typ_result when: postgres_version_resp.stdout is version('10', '>=') @@ -1340,7 +1352,7 @@ postgresql_query: login_user: "{{ pg_user }}" login_db: "{{ db_name }}" - query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric'; + query: SELECT typacl::varchar FROM pg_catalog.pg_type WHERE typname = 'numeric'; register: typ_result when: postgres_version_resp.stdout is version('10', '>=') @@ -1388,7 +1400,7 @@ login_user: "{{ pg_user }}" login_db: "{{ db_name }}" query: > - SELECT t.typacl FROM pg_catalog.pg_type t JOIN pg_catalog.pg_namespace n + SELECT t.typacl::varchar FROM pg_catalog.pg_type t JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace WHERE t.typname = 'compfoo' AND n.nspname = 'public'; register: typ_result when: postgres_version_resp.stdout is version('10', '>=') @@ -1558,7 +1570,7 @@ register: result - assert: that: - - result is changed + - result is not changed - name: check permissions on tables in schemas with special names become: true become_user: "{{ pg_user }}" @@ -1700,6 +1712,132 @@ - result is failed - result.msg is search('ALL_IN_SCHEMA can be used only for type') +########################################### +# Test for 'parameter' value of type parameter # +########################################### + +- when: postgres_version_resp.stdout is version('15', '>=') + block: + #### GRANT ALTER_SYSTEM/SET - test #### + - name: GRANT ALTER SYSTEM ON PARAMETER primary_conninfo,synchronous_standby_names,log_destination TO db_user3 + community.postgresql.postgresql_privs: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + state: present + privs: ALTER_SYSTEM + type: parameter + objs: primary_conninfo,synchronous_standby_names,log_destination + roles: "{{ db_user3 }}" + register: result + + - assert: + that: + - result is changed + + - name: GRANT SET ON PARAMETER log_destination,log_line_prefix,synchronous_standby_names TO db_user3 + community.postgresql.postgresql_privs: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + state: present + privs: SET + type: parameter + objs: log_destination,log_line_prefix,synchronous_standby_names + roles: "{{ db_user3 }}" + + - assert: + that: + - result is changed + + #### GRANT ALTER_SYSTEM/SET - idempotence #### + - name: GRANT ALTER SYSTEM ON PARAMETER primary_conninfo,synchronous_standby_names,log_destination TO db_user3 + community.postgresql.postgresql_privs: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + state: present + privs: ALTER_SYSTEM + type: parameter + objs: primary_conninfo,synchronous_standby_names,log_destination + roles: "{{ db_user3 }}" + register: result + + - assert: + that: + - result is not changed + + - name: GRANT SET ON PARAMETER log_destination,log_line_prefix,synchronous_standby_names TO db_user3 + community.postgresql.postgresql_privs: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + state: present + privs: SET + type: parameter + objs: log_destination,log_line_prefix,synchronous_standby_names + roles: "{{ db_user3 }}" + + - assert: + that: + - result is not changed + + #### REVOKE ALTER_SYSTEM/SET - test #### + - name: REVOKE ALTER SYSTEM ON PARAMETER primary_conninfo,synchronous_standby_names,log_destination FROM db_user3 + community.postgresql.postgresql_privs: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + state: absent + privs: ALTER_SYSTEM + type: parameter + objs: primary_conninfo,synchronous_standby_names,log_destination + roles: "{{ db_user3 }}" + register: result + + - assert: + that: + - result is changed + + - name: REVOKE SET ON PARAMETER log_destination,log_line_prefix,synchronous_standby_names FROM db_user3 + community.postgresql.postgresql_privs: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + state: absent + privs: SET + type: parameter + objs: log_destination,log_line_prefix,synchronous_standby_names + roles: "{{ db_user3 }}" + + - assert: + that: + - result is changed + + #### REVOKE ALTER_SYSTEM/SET - idempotence #### + - name: REVOKE ALTER SYSTEM ON PARAMETER primary_conninfo,synchronous_standby_names,log_destination FROM db_user3 + community.postgresql.postgresql_privs: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + state: absent + privs: ALTER_SYSTEM + type: parameter + objs: primary_conninfo,synchronous_standby_names,log_destination + roles: "{{ db_user3 }}" + register: result + + - assert: + that: + - result is not changed + + - name: REVOKE SET ON PARAMETER log_destination,log_line_prefix,synchronous_standby_names FROM db_user3 + community.postgresql.postgresql_privs: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + state: absent + privs: SET + type: parameter + objs: log_destination,log_line_prefix,synchronous_standby_names + roles: "{{ db_user3 }}" + + - assert: + that: + - result is not changed + # Cleanup - name: Remove privs become: true diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml index 814bc348d..081b13773 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml @@ -41,35 +41,34 @@ - name: Check that the user has the requested permissions (table1) become_user: "{{ pg_user }}" become: true - shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + postgresql_query: + login_db: '{{ db_name }}' + query: "select array_agg(privilege_type::TEXT ORDER BY privilege_type ASC) as privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1'" register: result_table1 - name: Check that the user has the requested permissions (table2) become_user: "{{ pg_user }}" become: true - shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + postgresql_query: + login_db: '{{ db_name }}' + query: "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2'" register: result_table2 - name: Check that the user has the requested permissions (database) become_user: "{{ pg_user }}" become: true - shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + postgresql_query: + login_db: '{{ db_name }}' + query: "select datacl from pg_database where datname='{{ db_name }}'" register: result_database - assert: that: - - "result_table1.stdout_lines[-1] == '(7 rows)'" - - "'INSERT' in result_table1.stdout" - - "'SELECT' in result_table1.stdout" - - "'UPDATE' in result_table1.stdout" - - "'DELETE' in result_table1.stdout" - - "'TRUNCATE' in result_table1.stdout" - - "'REFERENCES' in result_table1.stdout" - - "'TRIGGER' in result_table1.stdout" - - "result_table2.stdout_lines[-1] == '(1 row)'" - - "'INSERT' == '{{ result_table2.stdout_lines[-2] | trim }}'" - - "result_database.stdout_lines[-1] == '(1 row)'" - - "'{{ db_user1 }}=CTc/{{ pg_user }}' in result_database.stdout_lines[-2]" + - result_table1.query_result[0]["privilege_type"] == ["DELETE", "INSERT", "REFERENCES", "SELECT", "TRIGGER", "TRUNCATE", "UPDATE"] + - result_table2.rowcount == 1 + - result_table2.query_result[0]['privilege_type'] == 'INSERT' + - result_database.rowcount == 1 + - "'{{ db_user1 }}=CTc/{{ pg_user }}' in result_database.query_result[0]['datacl']" - name: Add another permission for the user become_user: "{{ pg_user }}" @@ -91,14 +90,14 @@ - name: Check that the user has the requested permissions (table2) become_user: "{{ pg_user }}" become: true - shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + postgresql_query: + login_db: '{{ db_name }}' + query: "select array_agg(privilege_type::TEXT ORDER BY privilege_type ASC) as privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2'" register: result_table2 - assert: that: - - "result_table2.stdout_lines[-1] == '(2 rows)'" - - "'INSERT' in result_table2.stdout" - - "'SELECT' in result_table2.stdout" + - result_table2.query_result[0]['privilege_type'] == ['INSERT', 'SELECT'] # # Test priv setting via postgresql_privs module @@ -127,13 +126,15 @@ - name: Check that the user has the requested permissions (table2) become_user: "{{ pg_user }}" become: true - shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + postgresql_query: + login_db: '{{ db_name }}' + query: "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2'" register: result_table2 - assert: that: - - "result_table2.stdout_lines[-1] == '(1 row)'" - - "'SELECT' == '{{ result_table2.stdout_lines[-2] | trim }}'" + - result_table2.rowcount == 1 + - result_table2.query_result[0]['privilege_type'] == 'SELECT' - name: Revoke many privileges on multiple tables become_user: "{{ pg_user }}" @@ -156,19 +157,23 @@ - name: Check that permissions were revoked (table1) become_user: "{{ pg_user }}" become: true - shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + postgresql_query: + login_db: '{{ db_name }}' + query: "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1'" register: result_table1 - name: Check that permissions were revoked (table2) become_user: "{{ pg_user }}" become: true - shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + postgresql_query: + login_db: '{{ db_name }}' + query: "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2'" register: result_table2 - assert: that: - - "result_table1.stdout_lines[-1] == '(0 rows)'" - - "result_table2.stdout_lines[-1] == '(0 rows)'" + - result_table1.rowcount == 0 + - result_table2.rowcount == 0 - name: Revoke database privileges become_user: "{{ pg_user }}" @@ -186,13 +191,15 @@ - name: Check that the user has the requested permissions (database) become_user: "{{ pg_user }}" become: true - shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + postgresql_query: + login_db: '{{ db_name }}' + query: "select datacl from pg_database where datname='{{ db_name }}'" register: result_database - assert: that: - - "result_database.stdout_lines[-1] == '(1 row)'" - - "'{{ db_user1 }}' not in result_database.stdout" + - result_database.rowcount == 1 + - "'{{ db_user1 }}' not in result_database.query_result[0]['datacl']" - name: Grant database privileges become_user: "{{ pg_user }}" @@ -216,13 +223,15 @@ - name: Check that the user has the requested permissions (database) become_user: "{{ pg_user }}" become: true - shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + postgresql_query: + login_db: '{{ db_name }}' + query: "select datacl from pg_database where datname='{{ db_name }}'" register: result_database - assert: that: - - "result_database.stdout_lines[-1] == '(1 row)'" - - "'{{ db_user1 }}=Cc' in result_database.stdout" + - result_database.rowcount == 1 + - result_database.query_result[0]['datacl'] is search("{{ db_user1 }}=Cc") - name: Grant a single privilege on a table become_user: "{{ pg_user }}" @@ -239,13 +248,15 @@ - name: Check that permissions were added (table1) become_user: "{{ pg_user }}" become: true - shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + postgresql_query: + login_db: '{{ db_name }}' + query: "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1'" register: result_table1 - assert: that: - - "result_table1.stdout_lines[-1] == '(1 row)'" - - "'{{ result_table1.stdout_lines[-2] | trim }}' == 'INSERT'" + - result_table1.rowcount == 1 + - result_table1.query_result[0]['privilege_type'] == 'INSERT' - name: Grant many privileges on multiple tables become_user: "{{ pg_user }}" @@ -262,33 +273,23 @@ - name: Check that permissions were added (table1) become_user: "{{ pg_user }}" become: true - shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + postgresql_query: + login_db: '{{ db_name }}' + query: "select array_agg(privilege_type::TEXT ORDER BY privilege_type ASC) as privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1'" register: result_table1 - name: Check that permissions were added (table2) become_user: "{{ pg_user }}" become: true - shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + postgresql_query: + login_db: '{{ db_name }}' + query: "select array_agg(privilege_type::TEXT ORDER BY privilege_type ASC) as privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2'" register: result_table2 - assert: that: - - "result_table1.stdout_lines[-1] == '(7 rows)'" - - "'INSERT' in result_table1.stdout" - - "'SELECT' in result_table1.stdout" - - "'UPDATE' in result_table1.stdout" - - "'DELETE' in result_table1.stdout" - - "'TRUNCATE' in result_table1.stdout" - - "'REFERENCES' in result_table1.stdout" - - "'TRIGGER' in result_table1.stdout" - - "result_table2.stdout_lines[-1] == '(7 rows)'" - - "'INSERT' in result_table2.stdout" - - "'SELECT' in result_table2.stdout" - - "'UPDATE' in result_table2.stdout" - - "'DELETE' in result_table2.stdout" - - "'TRUNCATE' in result_table2.stdout" - - "'REFERENCES' in result_table2.stdout" - - "'TRIGGER' in result_table2.stdout" + - result_table1.query_result[0]["privilege_type"] == ["DELETE", "INSERT", "REFERENCES", "SELECT", "TRIGGER", "TRUNCATE", "UPDATE"] + - result_table2.query_result[0]["privilege_type"] == ["DELETE", "INSERT", "REFERENCES", "SELECT", "TRIGGER", "TRUNCATE", "UPDATE"] # Check passing roles with dots # https://github.com/ansible/ansible/issues/63204 @@ -343,22 +344,144 @@ target_roles: "{{ db_user_with_dots2 }}" trust_input: false -# Bugfix for https://github.com/ansible-collections/community.general/issues/857 -- name: Test passing lowercase PUBLIC role +# https://github.com/ansible-collections/community.postgresql/pull/502 - role PUBLIC +- name: Test passing lowercase PUBLIC role - Grant CREATE ON DATABASE - Test + become_user: "{{ pg_user }}" + become: true + postgresql_privs: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + state: present + type: 'database' + privs: 'create' + role: 'public' + register: result + +- assert: + that: + - result is changed + - result.queries == ["GRANT CREATE ON database \"{{ db_name }}\" TO PUBLIC;"] + +- name: Test passing lowercase PUBLIC role - Grant CREATE ON DATABASE - Idempotence + become_user: "{{ pg_user }}" + become: true + postgresql_privs: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + state: present + type: 'database' + privs: 'create' + role: 'public' + register: result + +- assert: + that: + - result is not changed + - result.queries == ["GRANT CREATE ON database \"{{ db_name }}\" TO PUBLIC;"] + +- name: Test passing lowercase PUBLIC role - Revoke CREATE ON DATABASE - Test become_user: "{{ pg_user }}" become: true postgresql_privs: db: "{{ db_name }}" login_user: "{{ pg_user }}" + state: absent type: 'database' - privs: 'connect' + privs: 'create' role: 'public' register: result - assert: that: - result is changed - - result.queries == ["GRANT CONNECT ON database \"{{ db_name }}\" TO PUBLIC;"] + - result.queries == ["REVOKE CREATE ON database \"{{ db_name }}\" FROM PUBLIC;"] + +- name: Test passing lowercase PUBLIC role - Revoke CREATE ON DATABASE - Test + become_user: "{{ pg_user }}" + become: true + postgresql_privs: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + state: absent + type: 'database' + privs: 'create' + role: 'public' + register: result + +- assert: + that: + - result is not changed + - result.queries == ["REVOKE CREATE ON database \"{{ db_name }}\" FROM PUBLIC;"] + +# https://github.com/ansible-collections/community.postgresql/pull/502 - role SESSION_USER +# first revoke after grant, as the privilege is already granted +- name: Test passing lowercase SESSION_USER role - Revoke CREATE ON DATABASE - Test + become_user: "{{ pg_user }}" + become: true + postgresql_privs: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + state: absent + type: 'database' + privs: 'create' + role: 'session_user' + register: result + +- assert: + that: + - result is changed + - result.queries == ["REVOKE CREATE ON database \"{{ db_name }}\" FROM SESSION_USER;"] + +- name: Test passing lowercase SESSION_USER role - Revoke CREATE ON DATABASE - Test + become_user: "{{ pg_user }}" + become: true + postgresql_privs: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + state: absent + type: 'database' + privs: 'create' + role: 'session_user' + register: result + +- assert: + that: + - result is not changed + - result.queries == ["REVOKE CREATE ON database \"{{ db_name }}\" FROM SESSION_USER;"] + +- name: Test passing lowercase SESSION_USER role - Grant CREATE ON DATABASE - Test + become_user: "{{ pg_user }}" + become: true + postgresql_privs: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + state: present + type: 'database' + privs: 'create' + role: 'session_user' + register: result + +- assert: + that: + - result is changed + - result.queries == ["GRANT CREATE ON database \"{{ db_name }}\" TO SESSION_USER;"] + +- name: Test passing lowercase SESSION_USER role - Grant CREATE ON DATABASE - Idempotence + become_user: "{{ pg_user }}" + become: true + postgresql_privs: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + state: present + type: 'database' + privs: 'create' + role: 'session_user' + register: result + +- assert: + that: + - result is not changed + - result.queries == ["GRANT CREATE ON database \"{{ db_name }}\" TO SESSION_USER;"] # # Cleanup diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml index 42ece0bad..bf8c76744 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml @@ -55,7 +55,7 @@ register: result - assert: - that: "'{{ db_user2 }}=r/{{ db_user1 }}' in '{{ result.stdout_lines[0] }}'" + that: result.stdout_lines[0] is search('{{ db_user2 }}=r/{{ db_user1 }}') # Test - name: Revoke default privileges for new table objects diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml index 584a4848b..ac7563c2d 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml @@ -90,20 +90,21 @@ postgresql_publication: <<: *pg_parameters name: '{{ test_pub }}' + comment: 'Made by Ansible' trust_input: false - assert: that: - result is changed - result.exists == true - - result.queries == ["CREATE PUBLICATION \"{{ test_pub }}\" FOR ALL TABLES"] + - result.queries == ["CREATE PUBLICATION \"{{ test_pub }}\" FOR ALL TABLES", "COMMENT ON PUBLICATION \"{{ test_pub }}\" IS 'Made by Ansible'"] - result.owner == '{{ pg_user }}' - result.alltables == true - result.tables == [] - result.parameters.publish != {} # Check - - name: postgresql_publication - check that nothing has been changed + - name: postgresql_publication - check the publication was created <<: *task_parameters postgresql_query: <<: *pg_parameters @@ -115,6 +116,85 @@ that: - result.rowcount == 1 + - name: Check the comment + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT obj_description(p.oid, 'pg_publication') AS comment FROM pg_publication AS p WHERE p.pubname = '{{ test_pub }}'" + + - assert: + that: + - result.query_result[0]['comment'] == 'Made by Ansible' + + - name: Not specify the comment explicitly + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + name: '{{ test_pub }}' + + - assert: + that: + - result is not changed + - result.queries == [] + + - name: Change the comment in check mode + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + name: '{{ test_pub }}' + comment: 'Made by me' + check_mode: true + + - assert: + that: + - result is changed + - result.queries == ["COMMENT ON PUBLICATION \"{{ test_pub }}\" IS 'Made by me'"] + + - name: Check the comment didn't change + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT obj_description(p.oid, 'pg_publication') AS comment FROM pg_publication AS p WHERE p.pubname = '{{ test_pub }}'" + + - assert: + that: + - result.query_result[0]['comment'] == 'Made by Ansible' + + - name: Reset the comment in real mode + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + name: '{{ test_pub }}' + comment: '' + + - assert: + that: + - result is changed + - result.queries == ["COMMENT ON PUBLICATION \"{{ test_pub }}\" IS ''"] + + - name: Check the comment was reset + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT obj_description(p.oid, 'pg_publication') AS comment FROM pg_publication AS p WHERE p.pubname = '{{ test_pub }}'" + + - assert: + that: + - result.query_result[0]['comment'] == None + + - name: Reset the comment again in check mode + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + name: '{{ test_pub }}' + comment: '' + check_mode: true + + - assert: + that: + - result is not changed + - result.queries == [] + # Test - name: postgresql_publication - drop publication, check_mode <<: *task_parameters diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test0.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test0.sql deleted file mode 100644 index e8a5ca03d..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test0.sql +++ /dev/null @@ -1,6 +0,0 @@ -SELECT version(); - -SELECT story FROM test_table - WHERE id = %s OR story = 'Данные'; - - diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test1.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test1.sql deleted file mode 100644 index 028c192d7..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test1.sql +++ /dev/null @@ -1,10 +0,0 @@ -CREATE FUNCTION add(integer, integer) RETURNS integer - AS 'select $1 + $2;' - LANGUAGE SQL - IMMUTABLE - RETURNS NULL ON NULL INPUT; - -SELECT story FROM test_table - WHERE id = %s OR story = 'Данные'; - -SELECT version(); diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml index 5d447d608..b784955a4 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml @@ -25,19 +25,6 @@ shell: psql postgres -U "{{ pg_user }}" -t -c "INSERT INTO test_table (id, story) VALUES (1, 'first'), (2, 'second'), (3, 'third');" ignore_errors: true - - name: Copy script files - become: true - copy: - src: '{{ item }}' - dest: '~{{ pg_user }}/{{ item }}' - owner: '{{ pg_user }}' - force: true - loop: - - test0.sql - - test1.sql - register: sql_file_created - ignore_errors: true - - name: postgresql_query - analyze test_table become_user: '{{ pg_user }}' become: true @@ -57,29 +44,6 @@ - result.query_result == {} - result.query_all_results == [{}] - - name: postgresql_query - run queries from SQL script - become_user: '{{ pg_user }}' - become: true - postgresql_query: - <<: *pg_parameters - path_to_script: ~{{ pg_user }}/test0.sql - positional_args: - - 1 - encoding: UTF-8 - as_single_query: false - register: result - ignore_errors: true - when: sql_file_created - - - assert: - that: - - result is not changed - - result.query == "\n\nSELECT story FROM test_table\n WHERE id = 1 OR story = 'Данные'" - - result.query_result[0].story == 'first' - - result.rowcount == 2 - - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT' - when: sql_file_created - - name: postgresql_query - simple select query to test_table become_user: '{{ pg_user }}' become: true @@ -270,6 +234,37 @@ - result.rowcount == 0 - result.statusmessage == 'ALTER TABLE' + - name: postgresql_query - alter test_table using encoding + become_user: '{{ pg_user }}' + become: true + postgresql_query: + <<: *pg_parameters + query: ALTER TABLE test_table ADD COLUMN foo2 int + encoding: 'UTF-8' + register: result + ignore_errors: true + + - assert: + that: + - result is changed + - result.query == "ALTER TABLE test_table ADD COLUMN foo2 int" + - result.rowcount == 0 + - result.statusmessage == 'ALTER TABLE' + + - name: postgresql_query - alter test_table using bad encoding + become_user: '{{ pg_user }}' + become: true + postgresql_query: + <<: *pg_parameters + query: ALTER TABLE test_table ADD COLUMN foo888 int + encoding: 'UTF-888-bad' + register: result + ignore_errors: true + + - assert: + that: + - result.failed == true + - name: postgresql_query - vacuum without autocommit must fail become_user: '{{ pg_user }}' become: true @@ -502,28 +497,6 @@ that: - result is failed - # Tests for the as_single_query option - - name: Run queries from SQL script as a single query - become_user: '{{ pg_user }}' - become: true - postgresql_query: - <<: *pg_parameters - path_to_script: ~{{ pg_user }}/test1.sql - positional_args: - - 1 - encoding: UTF-8 - as_single_query: true - register: result - - - name: > - Must pass. Not changed because we can only - check statusmessage of the last query - assert: - that: - - result is not changed - - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT' - - result.query_list[0] == "CREATE FUNCTION add(integer, integer) RETURNS integer\n AS 'select $1 + $2;'\n LANGUAGE SQL\n IMMUTABLE\n RETURNS NULL ON NULL INPUT;\n\nSELECT story FROM test_table\n WHERE id = %s OR story = 'Данные';\n\nSELECT version();\n" - ############################################################################# # Issue https://github.com/ansible-collections/community.postgresql/issues/45 - name: Create table containing a decimal value @@ -602,3 +575,15 @@ - result.rowcount == 3 - result.query_result == [{"?column?": 1}] - 'result.query_all_results == [[{"?column?": 1}], [{"?column?": 1}], [{"?column?": 1}]]' + + - name: Run SHOW query + become_user: '{{ pg_user }}' + become: true + postgresql_query: + <<: *pg_parameters + query: "SHOW hba_file" + register: result + + - assert: + that: + - result is not changed
\ No newline at end of file diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml index 58832f049..625fb45e2 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml @@ -314,6 +314,205 @@ - result.rowcount == 0 +# Test the comment argument +- name: Create schema with comment + become_user: "{{ pg_user }}" + become: true + postgresql_schema: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + name: comment_schema + comment: Test schema 1 + register: result + +- name: Check return values + assert: + that: + - result is changed + - result.queries == ['CREATE SCHEMA "comment_schema"', "COMMENT ON SCHEMA \"comment_schema\" IS 'Test schema 1'"] + +- name: Check the comment + become: true + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT obj_description((SELECT oid FROM pg_namespace WHERE nspname = 'comment_schema'), 'pg_namespace') AS comment" + register: result + +- assert: + that: + - result.rowcount == 1 + - result.query_result[0]['comment'] == 'Test schema 1' + + +- name: Set the same comment in check mode + become_user: "{{ pg_user }}" + become: true + postgresql_schema: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + name: comment_schema + comment: Test schema 1 + register: result + check_mode: true + +- name: Check return values + assert: + that: + - result is not changed + + +- name: Set another comment in check mode + become_user: "{{ pg_user }}" + become: true + postgresql_schema: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + name: comment_schema + comment: Test schema 2 + register: result + check_mode: true + +- name: Check return values + assert: + that: + - result is changed + +- name: Check the comment didn't change + become: true + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT obj_description((SELECT oid FROM pg_namespace WHERE nspname = 'comment_schema'), 'pg_namespace') AS comment" + register: result + +- assert: + that: + - result.rowcount == 1 + - result.query_result[0]['comment'] == 'Test schema 1' + + +- name: Set another comment in real mode + become_user: "{{ pg_user }}" + become: true + postgresql_schema: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + name: comment_schema + comment: Test schema 2 + register: result + +- name: Check return values + assert: + that: + - result is changed + - result.queries == ["COMMENT ON SCHEMA \"comment_schema\" IS 'Test schema 2'"] + +- name: Check the comment changed + become: true + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT obj_description((SELECT oid FROM pg_namespace WHERE nspname = 'comment_schema'), 'pg_namespace') AS comment" + register: result + +- assert: + that: + - result.rowcount == 1 + - result.query_result[0]['comment'] == 'Test schema 2' + + +- name: Don's specify the comment explicitly + become_user: "{{ pg_user }}" + become: true + postgresql_schema: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + name: comment_schema + register: result + +- name: Check return values + assert: + that: + - result is not changed + - result.queries == [] + +- name: Check the comment didn't change + become: true + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT obj_description((SELECT oid FROM pg_namespace WHERE nspname = 'comment_schema'), 'pg_namespace') AS comment" + register: result + +- assert: + that: + - result.rowcount == 1 + - result.query_result[0]['comment'] == 'Test schema 2' + + +- name: Reset the comment + become_user: "{{ pg_user }}" + become: true + postgresql_schema: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + name: comment_schema + comment: '' + register: result + +- name: Check return values + assert: + that: + - result is changed + - result.queries == ["COMMENT ON SCHEMA \"comment_schema\" IS ''"] + +- name: Check the comment is None + become: true + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT obj_description((SELECT oid FROM pg_namespace WHERE nspname = 'comment_schema'), 'pg_namespace') AS comment" + register: result + +- assert: + that: + - result.rowcount == 1 + - result.query_result[0]['comment'] == None + + +- name: Reset the comment again + become_user: "{{ pg_user }}" + become: true + postgresql_schema: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + name: comment_schema + comment: '' + register: result + +- name: Check return values + assert: + that: + - result is not changed + - result.queries == [] + + +- name: Drop schema + become_user: "{{ pg_user }}" + become: true + postgresql_schema: + login_user: "{{ pg_user }}" + database: "{{ db_name }}" + name: comment_schema + state: absent + + # Cleanup - name: Remove user postgresql_user: diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/options_coverage.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/options_coverage.yml index c4598d2a9..19622d8af 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/options_coverage.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/options_coverage.yml @@ -28,8 +28,19 @@ wal_level: replica log_statement: mod track_functions: none - shared_preload_libraries: 'pg_stat_statements, pgaudit' + shared_preload_libraries: 'pg_stat_statements, pgcrypto' log_line_prefix: 'db=%d,user=%u,app=%a,client=%h ' + unix_socket_directories: '/var/run/postgresql, /var/run/postgresql2' + + - name: Ensure all unix_socket_directories directories exist + file: + state: directory + path: "{{ item }}" + owner: "{{ pg_user }}" + group: "{{ pg_user }}" + mode: '0777' + become: true + with_list: "{{ setting_map['unix_socket_directories'].split(',') | map('trim') | list }}" # Check mode: - name: Set settings in check mode @@ -51,16 +62,33 @@ with_dict: '{{ setting_map }}' # https://github.com/ansible-collections/community.postgresql/issues/78 - - name: Test param with comma containing values + - name: Test param with comma containing values but no quotes <<: *task_parameters shell: "grep shared_preload_libraries {{ pg_auto_conf }}" register: result - assert: that: - - result.stdout == "shared_preload_libraries = 'pg_stat_statements, pgaudit'" - - # Test for single-value params with commas and spaces in value + - result.stdout == "shared_preload_libraries = 'pg_stat_statements, pgcrypto'" + + # https://github.com/ansible-collections/community.postgresql/pull/521 + # unix_socket_directories is a GUC_LIST_QUOTE parameter only from PostgreSQL 14 + - name: Test param with comma containing values and quotes + <<: *task_parameters + shell: "grep unix_socket_directories {{ pg_auto_conf }}" + register: result + + - assert: + that: + - result.stdout == "unix_socket_directories = '/var/run/postgresql, /var/run/postgresql2'" + when: postgres_version_resp.stdout is version('14', '<') + + - assert: + that: + - result.stdout == "unix_socket_directories = '\"/var/run/postgresql\", \"/var/run/postgresql2\"'" + when: postgres_version_resp.stdout is version('14', '>=') + + # https://github.com/ansible-collections/community.postgresql/pull/400 - name: Test single-value param with commas and spaces in value <<: *task_parameters shell: "grep log_line_prefix {{ pg_auto_conf }}" @@ -69,3 +97,25 @@ - assert: that: - result.stdout == "log_line_prefix = 'db=%d,user=%u,app=%a,client=%h '" + + # Restart PostgreSQL: + - name: Restart PostgreSQL + become: true + service: + name: "{{ postgresql_service }}" + state: restarted + + # Idempotence: + - name: Set settings in actual mode again after restart for idempotence + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: '{{ item.key }}' + value: '{{ item.value }}' + register: test_idempotence + with_dict: '{{ setting_map }}' + + - name: Check idempotence after restart + assert: + that: not item.changed + with_items: '{{ test_idempotence.results }}' diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/defaults/main.yml index 8709694ba..ba4513248 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/defaults/main.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/defaults/main.yml @@ -11,3 +11,7 @@ test_subscription: test test_role1: alice test_role2: bob conn_timeout: 100 + +primary_port: 5432 + +replica_db_required: true diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/meta/main.yml index d72e4d23c..4ce5a5837 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/meta/main.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/meta/main.yml @@ -1,2 +1,2 @@ dependencies: - - setup_postgresql_replication + - setup_postgresql_db diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/main.yml index e440e8c80..e9cfb4172 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/main.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/main.yml @@ -6,7 +6,9 @@ # Initial tests of postgresql_subscription module: - import_tasks: setup_publication.yml - when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18' + when: + - ansible_distribution_major_version != "7" # CentOS 7 with Postgres 9.2 doesn't support logical replication - import_tasks: postgresql_subscription_initial.yml - when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18' + when: + - ansible_distribution_major_version != "7" # CentOS 7 with Postgres 9.2 doesn't support logical replication diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml index b464c3dbe..d2997b480 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml @@ -35,6 +35,7 @@ name: '{{ test_subscription }}' state: present publications: '{{ test_pub }}' + comment: Made by Ansible connparams: host: 127.0.0.1 port: '{{ primary_port }}' @@ -47,7 +48,7 @@ that: - result is changed - result.name == '{{ test_subscription }}' - - result.queries == ["CREATE SUBSCRIPTION test CONNECTION 'host=127.0.0.1 port={{ primary_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }}' PUBLICATION {{ test_pub }}"] + - result.queries == ["CREATE SUBSCRIPTION test CONNECTION 'host=127.0.0.1 port={{ primary_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }}' PUBLICATION {{ test_pub }}", "COMMENT ON SUBSCRIPTION \"test\" IS 'Made by Ansible'"] - result.exists == true - result.initial_state == {} - result.final_state.owner == '{{ pg_user }}' @@ -72,6 +73,116 @@ that: - result.rowcount == 1 + - name: Check the comment + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: "SELECT obj_description(s.oid, 'pg_subscription') AS comment FROM pg_subscription AS s WHERE s.subname = 'test'" + + - assert: + that: + - result.query_result[0]['comment'] == 'Made by Ansible' + + - name: Not specify the comment explicitly + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + + - assert: + that: + - result is not changed + - result.queries == [] + + - name: Check the comment is the same + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: "SELECT obj_description(s.oid, 'pg_subscription') AS comment FROM pg_subscription AS s WHERE s.subname = 'test'" + + - assert: + that: + - result.query_result[0]['comment'] == 'Made by Ansible' + + - name: Reset the comment in check mode + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + comment: '' + check_mode: true + + - assert: + that: + - result is changed + - result.queries == ["COMMENT ON SUBSCRIPTION \"test\" IS ''"] + + - name: Check the comment is the same + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: "SELECT obj_description(s.oid, 'pg_subscription') AS comment FROM pg_subscription AS s WHERE s.subname = 'test'" + + - assert: + that: + - result.query_result[0]['comment'] == 'Made by Ansible' + + - name: Reset the comment in real mode + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + comment: '' + + - assert: + that: + - result is changed + - result.queries == ["COMMENT ON SUBSCRIPTION \"test\" IS ''"] + + - name: Check the comment was reset + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: "SELECT obj_description(s.oid, 'pg_subscription') AS comment FROM pg_subscription AS s WHERE s.subname = 'test'" + + - assert: + that: + - result.query_result[0]['comment'] == None + + - name: Reset the comment again + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + comment: '' + + - assert: + that: + - result is not changed + - result.queries == [] + + - name: Reset the comment again in check mode + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + comment: '' + check_mode: true + + - assert: + that: + - result is not changed + - result.queries == [] + ################### # Test mode: absent ################### diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml index b8e6d0b54..a89509af4 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml @@ -237,9 +237,205 @@ name: foo state: absent register: result - ignore_errors: true - assert: that: - result is not changed - - result.msg == "Tries to drop nonexistent tablespace 'foo'" + - result.state == 'absent' + - result.queries == [] + + +# Testing comment argument +- name: Create tablespace with comment + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + location: /ssd + comment: Test comment 1 + register: result + +- assert: + that: + - result is changed + - result.queries == ["CREATE TABLESPACE \"acme\" LOCATION '/ssd'", "COMMENT ON TABLESPACE \"acme\" IS 'Test comment 1'"] + +- name: Check comment + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + login_user: '{{ pg_user }}' + query: "SELECT shobj_description((SELECT oid FROM pg_catalog.pg_tablespace WHERE spcname = 'acme'), 'pg_tablespace') AS comment" + register: result + +- assert: + that: + - result.query_result[0]['comment'] == "Test comment 1" + + +- name: Try to create tablespace with same comment + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + location: /ssd + comment: Test comment 1 + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + + +- name: Now try not to pass the comment explicitly + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + location: /ssd + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: Check comment didn't change + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + login_user: '{{ pg_user }}' + query: "SELECT shobj_description((SELECT oid FROM pg_catalog.pg_tablespace WHERE spcname = 'acme'), 'pg_tablespace') AS comment" + register: result + +- assert: + that: + - result.query_result[0]['comment'] == "Test comment 1" + + +- name: Set another comment in check mode + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + location: /ssd + comment: Test comment 2 + register: result + check_mode: true + +- assert: + that: + - result is changed + - result.queries == ["COMMENT ON TABLESPACE \"acme\" IS 'Test comment 2'"] + +- name: Check the comment didn't change + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + login_user: '{{ pg_user }}' + query: "SELECT shobj_description((SELECT oid FROM pg_catalog.pg_tablespace WHERE spcname = 'acme'), 'pg_tablespace') AS comment" + register: result + +- assert: + that: + - result.query_result[0]['comment'] == "Test comment 1" + + +- name: Set another comment in real mode + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + location: /ssd + comment: Test comment 2 + register: result + +- assert: + that: + - result is changed + - result.queries == ["COMMENT ON TABLESPACE \"acme\" IS 'Test comment 2'"] + +- name: Check the comment changed + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + login_user: '{{ pg_user }}' + query: "SELECT shobj_description((SELECT oid FROM pg_catalog.pg_tablespace WHERE spcname = 'acme'), 'pg_tablespace') AS comment" + register: result + +- assert: + that: + - result.query_result[0]['comment'] == "Test comment 2" + + +- name: Reset the comment + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + location: /ssd + comment: '' + register: result + +- assert: + that: + - result is changed + - result.queries == ["COMMENT ON TABLESPACE \"acme\" IS ''"] + +- name: Check the comment changed + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + login_user: '{{ pg_user }}' + query: "SELECT shobj_description((SELECT oid FROM pg_catalog.pg_tablespace WHERE spcname = 'acme'), 'pg_tablespace') AS comment" + register: result + +- assert: + that: + - result.query_result[0]['comment'] == None + + +- name: Reset the comment again + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + location: /ssd + comment: '' + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + + +# Clean up +- name: Drop tablespace + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + state: absent diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/defaults/main.yml index dbcbea120..5da901258 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/defaults/main.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/defaults/main.yml @@ -2,3 +2,6 @@ db_name: 'ansible_db' db_user1: 'ansible_db_user1' db_user2: 'ansible_db_user2' dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' + +# The user module tests require an sql_ascii encoded db to test client decoding +sql_ascii_db_required: true diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/main.yml index 150d26efd..8d0c8b489 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/main.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/main.yml @@ -10,3 +10,7 @@ # General tests: - import_tasks: postgresql_user_general.yml when: postgres_version_resp.stdout is version('9.4', '>=') + +# SQL_ASCII database tests: +- import_tasks: postgresql_user_sql_ascii_db.yml + when: postgres_version_resp.stdout is version('9.4', '>=') diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml index cde95b0c6..73db14149 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml @@ -5,8 +5,6 @@ - vars: test_user: hello.user.with.dots test_user2: hello - test_group1: group1 - test_group2: group2 test_table: test test_comment1: 'comment1' test_comment2: 'comment2' @@ -66,6 +64,33 @@ that: - result.rowcount == 1 +# Check comment argument: + - name: Add a comment on the user in check mode + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + comment: '{{ test_comment1 }}' + check_mode: true + + - assert: + that: + - result is changed + - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS '{{ test_comment1 }}'"] + + - name: check the comment didn't change + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment + FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}' + + - assert: + that: + - result.rowcount == 1 + - result.query_result[0].comment == None + - name: Add a comment on the user <<: *task_parameters postgresql_user: @@ -103,7 +128,33 @@ that: - result is not changed - - name: Try to add another comment on the user + - name: Try to add another comment on the user in check mode + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + comment: '{{ test_comment2 }}' + check_mode: true + + - assert: + that: + - result is changed + - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS '{{ test_comment2 }}'"] + + - name: check the comment didn't change + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment + FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}' + + - assert: + that: + - result.rowcount == 1 + - result.query_result[0].comment == '{{ test_comment1 }}' + + - name: Try to add another comment on the user in real mode <<: *task_parameters postgresql_user: <<: *pg_parameters @@ -128,6 +179,44 @@ - result.rowcount == 1 - result.query_result[0].comment == '{{ test_comment2 }}' + - name: Reset the comment + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + comment: '' + + - assert: + that: + - result is changed + - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS ''"] + + - name: check the comment + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment + FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}' + + - assert: + that: + - result.rowcount == 1 + - result.query_result[0].comment == None + + - name: Reset the comment again + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + comment: '' + + - assert: + that: + - result is not changed + - result.queries == [] +# End comment argument testing + - name: Try to create role again in check_mode <<: *task_parameters check_mode: true @@ -596,134 +685,6 @@ that: - result is not changed - # - # Test groups parameter - # - - name: Create test group - <<: *task_parameters - postgresql_user: - <<: *pg_parameters - name: '{{ test_group2 }}' - role_attr_flags: NOLOGIN - - - name: Create role test_group1 and grant test_group2 to test_group1 in check_mode - <<: *task_parameters - postgresql_user: - <<: *pg_parameters - name: '{{ test_group1 }}' - groups: '{{ test_group2 }}' - role_attr_flags: NOLOGIN - check_mode: true - - - assert: - that: - - result is changed - - result.user == '{{ test_group1 }}' - - result.queries == ['CREATE USER "{{ test_group1 }}" NOLOGIN', 'GRANT "{{ test_group2 }}" TO "{{ test_group1 }}"'] - - - name: check that the user doesn't exist - <<: *task_parameters - postgresql_query: - <<: *pg_parameters - query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_group1 }}'" - - - assert: - that: - - result.rowcount == 0 - - - name: check membership - <<: *task_parameters - postgresql_query: - <<: *pg_parameters - query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'" - - - assert: - that: - - result.rowcount == 0 - - - name: Create role test_group1 and grant test_group2 to test_group1 - <<: *task_parameters - postgresql_user: - <<: *pg_parameters - name: '{{ test_group1 }}' - groups: '{{ test_group2 }}' - role_attr_flags: NOLOGIN - trust_input: false - - - assert: - that: - - result is changed - - result.user == '{{ test_group1 }}' - - result.queries == ['CREATE USER "{{ test_group1 }}" NOLOGIN', 'GRANT "{{ test_group2 }}" TO "{{ test_group1 }}"'] - - - name: check that the user exists - <<: *task_parameters - postgresql_query: - <<: *pg_parameters - query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_group1 }}'" - - - assert: - that: - - result.rowcount == 1 - - - name: check membership - <<: *task_parameters - postgresql_query: - <<: *pg_parameters - query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'" - - - assert: - that: - - result.rowcount == 1 - - - name: Grant test_group2 to test_group1 again - <<: *task_parameters - postgresql_user: - <<: *pg_parameters - name: '{{ test_group1 }}' - groups: '{{ test_group2 }}' - - - assert: - that: - - result is not changed - - result.user == '{{ test_group1 }}' - - - name: check membership - <<: *task_parameters - postgresql_query: - <<: *pg_parameters - query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'" - - - assert: - that: - - result.rowcount == 1 - - - name: Grant groups to existent role - <<: *task_parameters - postgresql_user: - <<: *pg_parameters - name: '{{ test_user }}' - groups: - - '{{ test_group1 }}' - - '{{ test_group2 }}' - trust_input: false - - - assert: - that: - - result is changed - - result.user == '{{ test_user }}' - - result.queries == ['GRANT "{{ test_group1 }}" TO "{{ test_user }}"', 'GRANT "{{ test_group2 }}" TO "{{ test_user }}"'] - - - name: check membership - <<: *task_parameters - postgresql_query: - <<: *pg_parameters - query: "SELECT * FROM pg_group WHERE groname in ('{{ test_group1 }}', '{{ test_group2 }}') AND grolist != '{}'" - - - assert: - that: - - result.rowcount == 2 - ######################## # Test trust_input param @@ -777,6 +738,35 @@ that: - result is not changed +##### Test error handling when the database is read-only + + - name: Set database as read-only + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: 'default_transaction_read_only' + value: 'on' + + - name: Try to alter role in read-only database + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + role_attr_flags: 'CREATEDB' + register: result + ignore_errors: true + + - assert: + that: + - result.msg == 'ERROR: cannot execute ALTER ROLE in a read-only transaction\n' + + - name: Set database as read-write + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: 'default_transaction_read_only' + value: 'off' + always: # # Clean up @@ -797,6 +787,4 @@ loop: - '{{ test_user }}' - '{{ test_user2 }}' - - '{{ test_group1 }}' - - '{{ test_group2 }}' - '{{ dangerous_name }}' diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_sql_ascii_db.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_sql_ascii_db.yml new file mode 100644 index 000000000..4f322f152 --- /dev/null +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_sql_ascii_db.yml @@ -0,0 +1,8 @@ +- name: Execute module with no changes + become_user: '{{ pg_user }}' + become: true + postgresql_user: + name: '{{ sql_ascii_user }}' + db: '{{ sql_ascii_db }}' + role_attr_flags: SUPERUSER + password: '{{ sql_ascii_pass }}' diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml index 62f72d9ec..48b6208e0 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml @@ -75,7 +75,7 @@ state: stopped when: (ansible_facts.distribution_major_version != '8' and ansible_facts.distribution == 'CentOS') or ansible_facts.distribution != 'CentOS' - - name: Pause between stop and start PosgreSQL + - name: Pause between stop and start PostgreSQL pause: seconds: 5 diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/defaults/main.yml index 973d41591..823b6561a 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/defaults/main.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/defaults/main.yml @@ -4,9 +4,16 @@ postgresql_packages: - postgresql-server - python-psycopg2 +pip_packages: [] + pg_user: postgres pg_group: root +pg_dir: "/var/lib/pgsql/data" +pg_hba_location: "{{ pg_dir }}/pg_hba.conf" +pg_conf_location: "{{ pg_dir }}/postgresql.conf" +pg_auto_conf: "{{ pg_dir }}/postgresql.auto.conf" + locale_latin_suffix: locale_utf8_suffix: @@ -18,4 +25,13 @@ ssl_user: 'ssl_user' ssl_pass: 'ssl_pass' ssl_rootcert: '/etc/server-ca.crt' ssl_cert: '/etc/client.crt' -ssl_key: '/etc/client.key'
\ No newline at end of file +ssl_key: '/etc/client.key' + +# second database, for logical replication testing +replica_data_dir: "/var/lib/pgsql_replica" +replica_port: 5533 + +# defaults for test sql_ascii database +sql_ascii_db: 'sql_ascii' +sql_ascii_user: 'sql_ascii_user' +sql_ascii_pass: 'sql_ascii_pass' diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/main.yml index 80bb3c4d4..062fadef7 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/main.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/main.yml @@ -66,20 +66,15 @@ loop_var: loop_item # -# Install PostgreSQL 15 on Ubuntu 20.04 -- name: Install PostgreSQL 15 on Ubuntu 20.04 +# Prepare Ubuntu for PostgreSQL install +- name: Prepare Ubuntu for PostgreSQL when: - ansible_facts.distribution == 'Ubuntu' - - ansible_facts.distribution_major_version == '20' block: - name: Run autoremove become: true apt: autoremove: true - - - name: Install wget - package: - name: wget - name: Create the file repository configuration lineinfile: @@ -108,7 +103,7 @@ ## # -- name: install dependencies for postgresql test +- name: Install required OS packages package: name: '{{ postgresql_package_item }}' state: present @@ -116,6 +111,13 @@ loop_control: loop_var: postgresql_package_item +- name: Install required Pip packages + pip: + name: "{{ pip_package_item }}" + with_items: "{{ pip_packages }}" + loop_control: + loop_var: pip_package_item + - name: Initialize postgres (RedHat systemd) command: postgresql-setup initdb when: ansible_os_family == "RedHat" and ansible_service_mgr == "systemd" @@ -125,7 +127,7 @@ when: ansible_os_family == "RedHat" and ansible_service_mgr != "systemd" - name: Initialize postgres (Debian) - shell: . /usr/share/postgresql-common/maintscripts-functions && set_system_locale && /usr/bin/pg_createcluster -u postgres {{ pg_ver }} main + shell: . /usr/share/postgresql-common/maintscripts-functions && set_system_locale && /usr/bin/pg_createcluster -u postgres {{ pg_ver }} main args: creates: /etc/postgresql/{{ pg_ver }}/ when: ansible_os_family == 'Debian' @@ -208,7 +210,15 @@ - name: Stop postgresql service service: name={{ postgresql_service }} state=stopped - when: terminate is not succeeded + +- name: Configure postgresql.conf + ansible.builtin.lineinfile: + path: '{{ pg_conf_location }}' + regexp: '^wal_level ' + line: 'wal_level = logical' + when: + - replica_db_required is defined and replica_db_required + - ansible_distribution_major_version != "7" # CentOS 7 with Postgres 9.2 doesn't support 'logical' - name: Pause between stop and start pause: @@ -277,3 +287,14 @@ when: - ansible_os_family == 'Debian' - postgres_version_resp.stdout is version('9.4', '>=') + +# Create a second database +- import_tasks: replica.yml + when: + - replica_db_required is defined and replica_db_required + - ansible_distribution_major_version != "7" # CentOS 7 with Postgres 9.2 doesn't support 'logical' + +# Create an SQL_ASCII encoded database +- import_tasks: sql_ascii.yml + when: + - sql_ascii_db_required is defined and sql_ascii_db_required diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/replica.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/replica.yml new file mode 100644 index 000000000..0570db9bb --- /dev/null +++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/replica.yml @@ -0,0 +1,59 @@ +- name: Replica - find pg_ctl + shell: find /usr/lib /usr/bin -type f -name "pg_ctl" + register: result + +- name: Replica - set path to pg_ctl + set_fact: + pg_ctl: '{{ result.stdout }}' + +- name: Replica - stop database + become: true + become_user: '{{ pg_user }}' + shell: '{{ pg_ctl }} -D {{ replica_data_dir }} stop' + ignore_errors: true + +- name: Replica - remove old db + file: + path: '{{ replica_data_dir }}' + state: absent + ignore_errors: true + +- name: Replica - create data dir + file: + state: directory + recurse: true + path: "{{ replica_data_dir }}" + owner: "{{ pg_user }}" + mode: "0700" + +- name: Replica - find initdb + shell: find /usr/lib /usr/bin -type f -name "initdb" + register: result + +- name: Replica - set path to initdb + set_fact: + initdb: '{{ result.stdout }}' + +- name: Replica - initialize database + become: true + become_user: '{{ pg_user }}' + shell: '{{ initdb }} --pgdata {{ replica_data_dir }}' + +- name: Replica - configure postgresql.conf + ansible.builtin.lineinfile: + path: '{{ replica_data_dir }}/postgresql.conf' + regexp: '^wal_level ' + line: 'wal_level = logical' + +- name: Replica - start database + become: true + become_user: '{{ pg_user }}' + shell: '{{ pg_ctl }} -D {{ replica_data_dir }} -o "-p {{ replica_port }}" -l {{ replica_data_dir }}/replica.log start' + +- name: Replica - check connectivity + become: true + become_user: '{{ pg_user }}' + postgresql_ping: + db: '{{ pg_user }}' + login_user: '{{ pg_user }}' + login_port: '{{ replica_port }}' diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/sql_ascii.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/sql_ascii.yml new file mode 100644 index 000000000..d6641412a --- /dev/null +++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/sql_ascii.yml @@ -0,0 +1,36 @@ +- name: postgresql SQL_ASCII - create database + become_user: '{{ pg_user }}' + become: true + postgresql_db: + name: '{{ sql_ascii_db }}' + encoding: 'SQL_ASCII' + template: 'template0' + +- name: postgresql SQL_ASCII - ensure db exists with proper encoding + become_user: '{{ pg_user }}' + become: true + shell: "psql -c 'SHOW SERVER_ENCODING' --tuples-only --no-align --dbname {{ sql_ascii_db }}" + register: sql_ascii_db_encoding + +- ansible.builtin.assert: + that: + - sql_ascii_db_encoding.stdout == 'SQL_ASCII' + +- name: postgresql SQL_ASCII - create role + become_user: '{{ pg_user }}' + become: true + postgresql_user: + name: '{{ sql_ascii_user }}' + db: '{{ sql_ascii_db }}' + role_attr_flags: SUPERUSER + password: '{{ sql_ascii_pass }}' + +- name: postgresql SQL_ASCII - ensure role was created + become: true + become_user: "{{ pg_user }}" + shell: "psql -c \"select * from pg_authid where rolname='{{ sql_ascii_user }}';\" -d {{ sql_ascii_db }}" + register: result + +- ansible.builtin.assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml index 932738d39..8e16579ad 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml @@ -3,7 +3,6 @@ postgresql_packages: - "postgresql-common" - "python-psycopg2" -pg_hba_location: "/etc/postgresql/9.4/main/pg_hba.conf" -pg_dir: "/var/lib/postgresql/9.4/main" -pg_auto_conf: "{{ pg_dir }}/postgresql.auto.conf" pg_ver: 9.4 +pg_hba_location: "/etc/postgresql/{{ pg_ver }}/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/{{ pg_ver }}/main" diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Fedora-36-py3.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Fedora-36-py3.yml new file mode 100644 index 000000000..e67a4ac4f --- /dev/null +++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Fedora-36-py3.yml @@ -0,0 +1,7 @@ +# We install both psycopg2 and psycopg3. +# As psycopg3 is only 3.0, the modules should use psycopg2. +postgresql_packages: + - "postgresql-contrib" + - "postgresql-server" + - "python3-psycopg2" + - "python3-psycopg3" # psycopg 3.0.16 diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Fedora-38-py3.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Fedora-38-py3.yml new file mode 100644 index 000000000..e233746ed --- /dev/null +++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Fedora-38-py3.yml @@ -0,0 +1,5 @@ +postgresql_packages: + - "postgresql-contrib" + - "postgresql-server" + - "python3-psycopg2" + - "python3-psycopg3" diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml index 72041a3d7..08d2adb8a 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml @@ -1,9 +1,6 @@ postgresql_packages: - "postgresql-server" + - "postgresql-contrib" - "python3-psycopg2" - "bzip2" - "xz" - -pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" -pg_dir: "/var/lib/pgsql/data" -pg_auto_conf: "{{ pg_dir }}/postgresql.auto.conf" diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml index 30720f8fe..2d82eaefd 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml @@ -1,8 +1,5 @@ postgresql_packages: - "postgresql-server" + - "postgresql-contrib" - "python-psycopg2" - "bzip2" - -pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" -pg_dir: "/var/lib/pgsql/data" -pg_auto_conf: "{{ pg_dir }}/postgresql.auto.conf" diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml index ff543a385..15c8b4c2b 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml @@ -1,13 +1,15 @@ +pg_ver: 15 + postgresql_packages: - "apt-utils" - - "postgresql" + - "postgresql-{{ pg_ver }}" - "postgresql-common" - "python3-psycopg2" - "postgresql-client" -pg_hba_location: "/etc/postgresql/15/main/pg_hba.conf" -pg_dir: "/var/lib/postgresql/15/main" +pg_conf_location: "/etc/postgresql/{{ pg_ver }}/main/postgresql.conf" +pg_hba_location: "/etc/postgresql/{{ pg_ver }}/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/{{ pg_ver }}/main" pg_auto_conf: "{{ pg_dir }}/postgresql.auto.conf" -pg_ver: 15 -postgis: postgresql-15-postgis-3 +postgis: "postgresql-{{ pg_ver }}-postgis-3" diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-22-py3.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-22-py3.yml new file mode 100644 index 000000000..334ad27aa --- /dev/null +++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-22-py3.yml @@ -0,0 +1,18 @@ +pg_ver: 15 + +postgresql_packages: + - "apt-utils" + - "postgresql-{{ pg_ver }}" + - "postgresql-common" + # - "python3-psycopg2" + - "postgresql-client" + +pip_packages: + - "psycopg==3.1.9" # We need at least 3.1 for Client-side-binding cursors + +pg_conf_location: "/etc/postgresql/{{ pg_ver }}/main/postgresql.conf" +pg_hba_location: "/etc/postgresql/{{ pg_ver }}/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/{{ pg_ver }}/main" +pg_auto_conf: "{{ pg_dir }}/postgresql.auto.conf" + +postgis: postgresql-{{ pg_ver }}-postgis-3 diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml index 3ff3e0de5..c5fa981e7 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml @@ -1,7 +1,3 @@ postgresql_packages: - "postgresql-server" - "python3-psycopg2" - -pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" -pg_dir: "/var/lib/pgsql/data" -pg_auto_conf: "{{ pg_dir }}/postgresql.auto.conf" diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default.yml index 71f1cd46e..af7dfe475 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default.yml +++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default.yml @@ -1,7 +1,3 @@ postgresql_packages: - "postgresql-server" - "python-psycopg2" - -pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" -pg_dir: "/var/lib/pgsql/data" -pg_auto_conf: "{{ pg_dir }}/postgresql.auto.conf" diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/defaults/main.yml deleted file mode 100644 index 5ac314c4b..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/defaults/main.yml +++ /dev/null @@ -1,26 +0,0 @@ -# General: -pg_user: postgres -db_default: postgres - -pg_package_list: -- postgresql -- postgresql-client -- python3-psycopg2 - -packages_to_remove: -- postgresql -- postgresql-client - -# Master specific defaults: -primary_root_dir: '/var/lib/pgsql/primary' -primary_data_dir: '{{ primary_root_dir }}/data' -primary_postgresql_conf: '{{ primary_data_dir }}/postgresql.conf' -primary_pg_hba_conf: '{{ primary_data_dir }}/pg_hba.conf' -primary_port: 5431 - -# Replica specific defaults: -replica_root_dir: '/var/lib/pgsql/replica' -replica_data_dir: '{{ replica_root_dir }}/data' -replica_postgresql_conf: '{{ replica_data_dir }}/postgresql.conf' -replica_pg_hba_conf: '{{ replica_data_dir }}/pg_hba.conf' -replica_port: 5434 diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/handlers/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/handlers/main.yml deleted file mode 100644 index ea230c778..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/handlers/main.yml +++ /dev/null @@ -1,24 +0,0 @@ -- name: Stop services - become: true - become_user: '{{ pg_user }}' - shell: '{{ pg_ctl }} -D {{ item.datadir }} -o "-p {{ item.port }}" -m immediate stop' - loop: - - { datadir: '{{ primary_data_dir }}', port: '{{ primary_port }}' } - - { datadir: '{{ replica_data_dir }}', port: '{{ replica_port }}' } - listen: stop postgresql - -- name: Remove packages - apt: - name: '{{ packages_to_remove }}' - state: absent - listen: cleanup postgresql - -- name: Remove FS objects - file: - state: absent - path: "{{ item }}" - force: true - loop: - - "{{ primary_root_dir }}" - - "{{ replica_root_dir }}" - listen: cleanup postgresql diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/main.yml deleted file mode 100644 index 4c6421a18..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/main.yml +++ /dev/null @@ -1,13 +0,0 @@ -#################################################################### -# WARNING: These are designed specifically for Ansible tests # -# and should not be used as examples of how to write Ansible roles # -#################################################################### - -# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# Setup PostgreSQL primary-standby replication into one container: -- import_tasks: setup_postgresql_cluster.yml - when: - - ansible_distribution == 'Ubuntu' - - ansible_distribution_major_version >= '18' diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml deleted file mode 100644 index 2bff42e78..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml +++ /dev/null @@ -1,149 +0,0 @@ -- name: Remove preinstalled packages - apt: - name: '{{ packages_to_remove }}' - state: absent - become: true - -- name: Run autoremove - become: true - apt: - autoremove: true - -- name: Configure Ubuntu 20 for PostgreSQL - when: - - ansible_facts['distribution'] == 'Ubuntu' - - ansible_facts['distribution_major_version'] is version('20', 'ge') - block: - - name: Install wget - package: - name: wget - - - name: Add PGDG repository - lineinfile: - create: true - line: "deb http://apt.postgresql.org/pub/repos/apt {{ ansible_facts['distribution_release'] }}-pgdg main" - path: '/etc/apt/sources.list.d/pgdg.list' - state: 'present' - - - name: Add PGDG GPG key - ansible.builtin.apt_key: - state: present - url: https://www.postgresql.org/media/keys/ACCC4CF8.asc - - - name: Update apt cache - apt: - update_cache: true - -- name: Install apt-utils - apt: - name: apt-utils - -- name: Install packages - apt: - name: '{{ pg_package_list }}' - policy_rc_d: 101 # prevent the service from starting - notify: cleanup postgresql - -- name: Delete postgresql related files - file: - state: absent - path: '{{ item }}' - force: true - loop: - - '{{ primary_root_dir }}' - - '{{ replica_root_dir }}' - - /etc/postgresql - - /var/lib/postgresql - -- name: Create dirs needed - file: - state: directory - recurse: true - path: '{{ item }}' - owner: postgres - group: postgres - mode: '0700' - loop: - - '{{ primary_data_dir }}' - - '{{ replica_data_dir }}' - - /var/lib/postgresql - notify: cleanup postgresql - -- name: Find initdb - shell: find /usr/lib -type f -name "initdb" - register: result - -- name: Set path to initdb - set_fact: - initdb: '{{ result.stdout }}' - -- name: Initialize databases - become: true - become_user: '{{ pg_user }}' - shell: '{{ initdb }} --pgdata {{ item }}' - loop: - - '{{ primary_data_dir }}' - - '{{ replica_data_dir }}' - -- name: Copy config templates - template: - src: '{{ item.conf_templ }}' - dest: '{{ item.conf_dest }}' - owner: postgres - group: postgres - force: true - loop: - - conf_templ: primary_postgresql.conf.j2 - conf_dest: '{{ primary_postgresql_conf }}' - - conf_templ: replica_postgresql.conf.j2 - conf_dest: '{{ replica_postgresql_conf }}' - - conf_templ: pg_hba.conf.j2 - conf_dest: '{{ primary_pg_hba_conf }}' - - conf_templ: pg_hba.conf.j2 - conf_dest: '{{ replica_pg_hba_conf }}' - -- name: Find pg_ctl - shell: find /usr/lib -type f -name "pg_ctl" - register: result - -- name: Set path to initdb - set_fact: - pg_ctl: '{{ result.stdout }}' - -- name: Start primary - become: true - become_user: '{{ pg_user }}' - shell: '{{ pg_ctl }} -D {{ primary_data_dir }} -o "-p {{ primary_port }}" -l {{ primary_data_dir }}/primary.log start' - notify: - - stop postgresql - -- name: Start replica - become: true - become_user: '{{ pg_user }}' - shell: '{{ pg_ctl }} -D {{ replica_data_dir }} -o "-p {{ replica_port }}" -l {{ replica_data_dir }}/replica.log start' - -- name: Check connectivity to the primary and get PostgreSQL version - become: true - become_user: '{{ pg_user }}' - postgresql_ping: - db: '{{ db_default }}' - login_user: '{{ pg_user }}' - login_port: '{{ primary_port }}' - register: result - -- name: Check connectivity to the replica and get PostgreSQL version - become: true - become_user: '{{ pg_user }}' - postgresql_ping: - db: '{{ db_default }}' - login_user: '{{ pg_user }}' - login_port: '{{ replica_port }}' - -- name: Define server version - set_fact: - pg_major_version: '{{ result.server_version.major }}' - pg_minor_version: '{{ result.server_version.minor }}' - -- name: Print PostgreSQL version - debug: - msg: PostgreSQL version is {{ pg_major_version }}.{{ pg_minor_version }} diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2 b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2 deleted file mode 100644 index 62e05ffc8..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2 +++ /dev/null @@ -1,7 +0,0 @@ -local all all trust -local replication logical_replication trust -host replication logical_replication 127.0.0.1/32 trust -host replication logical_replication 0.0.0.0/0 trust -local all logical_replication trust -host all logical_replication 127.0.0.1/32 trust -host all logical_replication 0.0.0.0/0 trust diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/primary_postgresql.conf.j2 b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/primary_postgresql.conf.j2 deleted file mode 100644 index 545769f35..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/primary_postgresql.conf.j2 +++ /dev/null @@ -1,28 +0,0 @@ -# Important parameters: -listen_addresses='*' -port = {{ primary_port }} -wal_level = logical -max_wal_senders = 8 -track_commit_timestamp = on -max_replication_slots = 10 - -# Unimportant parameters: -max_connections=10 -shared_buffers=8MB -dynamic_shared_memory_type=posix -log_destination='stderr' -logging_collector=on -log_directory='log' -log_filename='postgresql-%a.log' -log_truncate_on_rotation=on -log_rotation_age=1d -log_rotation_size=0 -log_line_prefix='%m[%p]' -log_timezone='W-SU' -datestyle='iso,mdy' -timezone='W-SU' -lc_messages='en_US.UTF-8' -lc_monetary='en_US.UTF-8' -lc_numeric='en_US.UTF-8' -lc_time='en_US.UTF-8' -default_text_search_config='pg_catalog.english' diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2 b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2 deleted file mode 100644 index 206ab2eb3..000000000 --- a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2 +++ /dev/null @@ -1,28 +0,0 @@ -# Important parameters: -listen_addresses='*' -port = {{ replica_port }} -wal_level = logical -max_wal_senders = 8 -track_commit_timestamp = on -max_replication_slots = 10 - -# Unimportant parameters: -max_connections=10 -shared_buffers=8MB -dynamic_shared_memory_type=posix -log_destination='stderr' -logging_collector=on -log_directory='log' -log_filename='postgresql-%a.log' -log_truncate_on_rotation=on -log_rotation_age=1d -log_rotation_size=0 -log_line_prefix='%m[%p]' -log_timezone='W-SU' -datestyle='iso,mdy' -timezone='W-SU' -lc_messages='en_US.UTF-8' -lc_monetary='en_US.UTF-8' -lc_numeric='en_US.UTF-8' -lc_time='en_US.UTF-8' -default_text_search_config='pg_catalog.english' diff --git a/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.py b/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.py index 49806f2e2..85d693c61 100755 --- a/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.py +++ b/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.py @@ -2,7 +2,8 @@ # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """Prevent unwanted files from being added to the source tree.""" -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import os diff --git a/ansible_collections/community/postgresql/tests/sanity/ignore-2.12.txt b/ansible_collections/community/postgresql/tests/sanity/ignore-2.12.txt index b9cd1303f..d188d3d68 100644 --- a/ansible_collections/community/postgresql/tests/sanity/ignore-2.12.txt +++ b/ansible_collections/community/postgresql/tests/sanity/ignore-2.12.txt @@ -1,5 +1,3 @@ -tests/utils/shippable/check_matrix.py replace-urlopen tests/utils/shippable/timing.py shebang plugins/modules/postgresql_db.py use-argspec-type-path plugins/modules/postgresql_db.py validate-modules:use-run-command-not-popen -plugins/modules/postgresql_tablespace.py validate-modules:mutually_exclusive-unknown diff --git a/ansible_collections/community/postgresql/tests/sanity/ignore-2.13.txt b/ansible_collections/community/postgresql/tests/sanity/ignore-2.13.txt index b9cd1303f..d188d3d68 100644 --- a/ansible_collections/community/postgresql/tests/sanity/ignore-2.13.txt +++ b/ansible_collections/community/postgresql/tests/sanity/ignore-2.13.txt @@ -1,5 +1,3 @@ -tests/utils/shippable/check_matrix.py replace-urlopen tests/utils/shippable/timing.py shebang plugins/modules/postgresql_db.py use-argspec-type-path plugins/modules/postgresql_db.py validate-modules:use-run-command-not-popen -plugins/modules/postgresql_tablespace.py validate-modules:mutually_exclusive-unknown diff --git a/ansible_collections/community/postgresql/tests/sanity/ignore-2.14.txt b/ansible_collections/community/postgresql/tests/sanity/ignore-2.14.txt index b9cd1303f..d188d3d68 100644 --- a/ansible_collections/community/postgresql/tests/sanity/ignore-2.14.txt +++ b/ansible_collections/community/postgresql/tests/sanity/ignore-2.14.txt @@ -1,5 +1,3 @@ -tests/utils/shippable/check_matrix.py replace-urlopen tests/utils/shippable/timing.py shebang plugins/modules/postgresql_db.py use-argspec-type-path plugins/modules/postgresql_db.py validate-modules:use-run-command-not-popen -plugins/modules/postgresql_tablespace.py validate-modules:mutually_exclusive-unknown diff --git a/ansible_collections/community/postgresql/tests/sanity/ignore-2.15.txt b/ansible_collections/community/postgresql/tests/sanity/ignore-2.15.txt index 58b57c247..c71a79a50 100644 --- a/ansible_collections/community/postgresql/tests/sanity/ignore-2.15.txt +++ b/ansible_collections/community/postgresql/tests/sanity/ignore-2.15.txt @@ -1,6 +1,4 @@ -tests/utils/shippable/check_matrix.py replace-urlopen tests/utils/shippable/timing.py shebang plugins/modules/postgresql_db.py use-argspec-type-path plugins/modules/postgresql_db.py validate-modules:use-run-command-not-popen -plugins/modules/postgresql_tablespace.py validate-modules:mutually_exclusive-unknown plugins/module_utils/version.py pylint:unused-import diff --git a/ansible_collections/community/postgresql/tests/sanity/ignore-2.16.txt b/ansible_collections/community/postgresql/tests/sanity/ignore-2.16.txt index 58b57c247..c71a79a50 100644 --- a/ansible_collections/community/postgresql/tests/sanity/ignore-2.16.txt +++ b/ansible_collections/community/postgresql/tests/sanity/ignore-2.16.txt @@ -1,6 +1,4 @@ -tests/utils/shippable/check_matrix.py replace-urlopen tests/utils/shippable/timing.py shebang plugins/modules/postgresql_db.py use-argspec-type-path plugins/modules/postgresql_db.py validate-modules:use-run-command-not-popen -plugins/modules/postgresql_tablespace.py validate-modules:mutually_exclusive-unknown plugins/module_utils/version.py pylint:unused-import diff --git a/ansible_collections/community/postgresql/tests/sanity/ignore-2.17.txt b/ansible_collections/community/postgresql/tests/sanity/ignore-2.17.txt new file mode 100644 index 000000000..230ec3421 --- /dev/null +++ b/ansible_collections/community/postgresql/tests/sanity/ignore-2.17.txt @@ -0,0 +1,5 @@ +plugins/modules/postgresql_db.py use-argspec-type-path +plugins/modules/postgresql_db.py validate-modules:use-run-command-not-popen +plugins/module_utils/version.py pylint:unused-import +tests/utils/shippable/timing.py shebang +tests/unit/plugins/module_utils/test_postgres.py pylint:unidiomatic-typecheck diff --git a/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_postgres.py b/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_postgres.py index 975542446..566c78851 100644 --- a/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_postgres.py +++ b/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_postgres.py @@ -1,14 +1,15 @@ # Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type from os import environ -import pytest - import ansible_collections.community.postgresql.plugins.module_utils.postgres as pg - +import pytest +from ansible_collections.community.postgresql.plugins.module_utils.version import \ + LooseVersion INPUT_DICT = dict( session_role=dict(default=''), @@ -124,7 +125,7 @@ def m_psycopg2(): class DummyPsycopg2(): def __init__(self): - self.__version__ = '2.4.3' + self.__version__ = "2.9.6" self.extras = Extras() self.extensions = Extensions() @@ -155,8 +156,12 @@ class TestEnsureReqLibs(): class Dummym_ansible_module(): def __init__(self): self.params = {'ca_cert': False} + self.warn_msg = '' self.err_msg = '' + def warn(self, msg): + self.warn_msg = msg + def fail_json(self, msg): self.err_msg = msg @@ -164,13 +169,14 @@ class TestEnsureReqLibs(): def test_ensure_req_libs_has_not_psycopg2(self, m_ansible_module): """Test ensure_required_libs() with psycopg2 is None.""" - # HAS_PSYCOPG2 is False by default + # HAS_PSYCOPG is False by default pg.ensure_required_libs(m_ansible_module) assert 'Failed to import the required Python library (psycopg2)' in m_ansible_module.err_msg def test_ensure_req_libs_has_psycopg2(self, m_ansible_module, monkeypatch): """Test ensure_required_libs() with psycopg2 is not None.""" - monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) + monkeypatch.setattr(pg, 'HAS_PSYCOPG', True) + monkeypatch.setattr(pg, 'PSYCOPG_VERSION', "2.9") pg.ensure_required_libs(m_ansible_module) assert m_ansible_module.err_msg == '' @@ -180,8 +186,9 @@ class TestEnsureReqLibs(): Test with module.params['ca_cert'], psycopg2 version is suitable. """ m_ansible_module.params['ca_cert'] = True - monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) - monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) + monkeypatch.setattr(pg, 'HAS_PSYCOPG', True) + monkeypatch.setattr(pg, 'PSYCOPG_VERSION', LooseVersion("2.9.6")) + monkeypatch.setattr(pg, 'psycopg', m_psycopg2) pg.ensure_required_libs(m_ansible_module) assert m_ansible_module.err_msg == '' @@ -191,11 +198,10 @@ class TestEnsureReqLibs(): Test with module.params['ca_cert'], psycopg2 version is wrong. """ m_ansible_module.params['ca_cert'] = True - monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) + psycopg = m_psycopg2 + monkeypatch.setattr(pg, 'psycopg', psycopg) # Set wrong psycopg2 version number: - psycopg2 = m_psycopg2 - psycopg2.__version__ = '2.4.2' - monkeypatch.setattr(pg, 'psycopg2', psycopg2) + monkeypatch.setattr(pg, 'PSYCOPG_VERSION', LooseVersion("2.4.2")) pg.ensure_required_libs(m_ansible_module) assert 'psycopg2 must be at least 2.4.3' in m_ansible_module.err_msg @@ -231,10 +237,10 @@ class TestConnectToDb(): """ Namespace for testing connect_to_db() function. - When some connection errors occure connect_to_db() caught any of them + When some connection errors occur connect_to_db() caught any of them and invoke fail_json() or warn() methods of AnsibleModule object depending on the passed parameters. - connect_to_db may return db_connection object or None if errors occured. + connect_to_db may return db_connection object or None if errors occurred. Therefore we must check: 1. Values of err_msg and warn_msg attributes of m_ansible_module mock object. 2. Types of return objects (db_connection and cursor). @@ -242,22 +248,22 @@ class TestConnectToDb(): def test_connect_to_db(self, m_ansible_module, monkeypatch, m_psycopg2): """Test connect_to_db(), common test.""" - monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) + monkeypatch.setattr(pg, 'psycopg', m_psycopg2) monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params) db_connection, dummy = pg.connect_to_db(m_ansible_module, conn_params) cursor = db_connection.cursor() # if errors, db_connection returned as None: - assert type(db_connection) == DbConnection - assert type(cursor) == Cursor + assert type(db_connection) is DbConnection + assert type(cursor) is Cursor assert m_ansible_module.err_msg == '' # The default behaviour, normal in this case: assert 'Database name has not been passed' in m_ansible_module.warn_msg def test_session_role(self, m_ansible_module, monkeypatch, m_psycopg2): """Test connect_to_db(), switch on session_role.""" - monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) + monkeypatch.setattr(pg, 'psycopg', m_psycopg2) monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) m_ansible_module.params['session_role'] = 'test_role' @@ -265,8 +271,8 @@ class TestConnectToDb(): db_connection, dummy = pg.connect_to_db(m_ansible_module, conn_params) cursor = db_connection.cursor() # if errors, db_connection returned as None: - assert type(db_connection) == DbConnection - assert type(cursor) == Cursor + assert type(db_connection) is DbConnection + assert type(cursor) is Cursor assert m_ansible_module.err_msg == '' # The default behaviour, normal in this case: assert 'Database name has not been passed' in m_ansible_module.warn_msg @@ -275,8 +281,7 @@ class TestConnectToDb(): """ Test connect_to_db(), fail_on_conn arg passed as True (the default behavior). """ - monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) - monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) + monkeypatch.setattr(pg, 'psycopg', m_psycopg2) m_ansible_module.params['login_user'] = 'Exception' # causes Exception @@ -290,8 +295,7 @@ class TestConnectToDb(): """ Test connect_to_db(), fail_on_conn arg passed as False. """ - monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) - monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) + monkeypatch.setattr(pg, 'psycopg', m_psycopg2) m_ansible_module.params['login_user'] = 'Exception' # causes Exception @@ -306,9 +310,9 @@ class TestConnectToDb(): """ Test connect_to_db(), autocommit arg passed as True (the default is False). """ - monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) # case 1: psycopg2.__version >= 2.4.2 (the default in m_psycopg2) + monkeypatch.setattr(pg, 'psycopg', m_psycopg2) monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params) @@ -316,8 +320,8 @@ class TestConnectToDb(): cursor = db_connection.cursor() # if errors, db_connection returned as None: - assert type(db_connection) == DbConnection - assert type(cursor) == Cursor + assert type(db_connection) is DbConnection + assert type(cursor) is Cursor assert m_ansible_module.err_msg == '' @@ -327,12 +331,12 @@ class TestGetConnParams(): def test_get_conn_params_def(self, m_ansible_module, m_psycopg2, monkeypatch): """Test get_conn_params(), warn_db_default kwarg is default.""" - monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) + monkeypatch.setattr(pg, 'psycopg', m_psycopg2) assert pg.get_conn_params(m_ansible_module, INPUT_DICT) == EXPECTED_DICT assert m_ansible_module.warn_msg == 'Database name has not been passed, used default database to connect to.' def test_get_conn_params_warn_db_def_false(self, m_ansible_module, m_psycopg2, monkeypatch): """Test get_conn_params(), warn_db_default kwarg is False.""" - monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) + monkeypatch.setattr(pg, 'psycopg', m_psycopg2) assert pg.get_conn_params(m_ansible_module, INPUT_DICT, warn_db_default=False) == EXPECTED_DICT assert m_ansible_module.warn_msg == '' diff --git a/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_saslprep.py b/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_saslprep.py index 62a1704ad..a93bf3f79 100644 --- a/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_saslprep.py +++ b/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_saslprep.py @@ -2,13 +2,13 @@ # Copyright: (c) 2019, Andrey Tuzhilin <andrei.tuzhilin@gmail.com> # Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import pytest - -from ansible_collections.community.postgresql.plugins.module_utils.saslprep import saslprep - +from ansible_collections.community.postgresql.plugins.module_utils.saslprep import \ + saslprep VALID = [ (u'', u''), @@ -51,5 +51,5 @@ def test_saslprep_conversions(source, target): @pytest.mark.parametrize('source,exception', INVALID) def test_saslprep_exceptions(source, exception): - with pytest.raises(exception) as ex: + with pytest.raises(exception): saslprep(source) diff --git a/ansible_collections/community/postgresql/tests/unit/plugins/modules/test_postgresql_set.py b/ansible_collections/community/postgresql/tests/unit/plugins/modules/test_postgresql_set.py index a10678202..8850df251 100644 --- a/ansible_collections/community/postgresql/tests/unit/plugins/modules/test_postgresql_set.py +++ b/ansible_collections/community/postgresql/tests/unit/plugins/modules/test_postgresql_set.py @@ -1,12 +1,13 @@ # -*- coding: utf-8 -*- # Copyright: (c) 2021, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import pytest - -from ansible_collections.community.postgresql.plugins.modules.postgresql_set import pretty_to_bytes +from ansible_collections.community.postgresql.plugins.modules.postgresql_set import \ + pretty_to_bytes @pytest.mark.parametrize('input_,expected', [ diff --git a/ansible_collections/community/postgresql/tests/utils/shippable/check_matrix.py b/ansible_collections/community/postgresql/tests/utils/shippable/check_matrix.py index 608db6923..e0115c124 100755 --- a/ansible_collections/community/postgresql/tests/utils/shippable/check_matrix.py +++ b/ansible_collections/community/postgresql/tests/utils/shippable/check_matrix.py @@ -1,6 +1,7 @@ #!/usr/bin/env python """Verify the currently executing Shippable test matrix matches the one defined in the "shippable.yml" file.""" -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import datetime @@ -15,12 +16,7 @@ try: except ImportError: NoReturn = None -try: - # noinspection PyCompatibility - from urllib2 import urlopen # pylint: disable=ansible-bad-import-from -except ImportError: - # noinspection PyCompatibility - from urllib.request import urlopen +from ansible.module_utils.urls import open_url def main(): # type: () -> None @@ -47,7 +43,7 @@ def main(): # type: () -> None for attempts_remaining in range(4, -1, -1): try: - jobs = json.loads(urlopen('https://api.shippable.com/jobs?runIds=%s' % run_id).read()) + jobs = json.loads(open_url('https://api.shippable.com/jobs?runIds=%s' % run_id).read()) if not isinstance(jobs, list): raise Exception('Shippable run %s data is not a list.' % run_id) diff --git a/ansible_collections/community/postgresql/tests/utils/shippable/shippable.sh b/ansible_collections/community/postgresql/tests/utils/shippable/shippable.sh index b181297f9..818324edd 100755 --- a/ansible_collections/community/postgresql/tests/utils/shippable/shippable.sh +++ b/ansible_collections/community/postgresql/tests/utils/shippable/shippable.sh @@ -59,6 +59,7 @@ else retry pip install "https://github.com/ansible/ansible/archive/stable-${ansible_version}.tar.gz" --disable-pip-version-check fi +# shellcheck disable=SC2153 if [ "${SHIPPABLE_BUILD_ID:-}" ]; then export ANSIBLE_COLLECTIONS_PATHS="${HOME}/.ansible" SHIPPABLE_RESULT_DIR="$(pwd)/shippable" diff --git a/ansible_collections/community/postgresql/tests/utils/shippable/timing.py b/ansible_collections/community/postgresql/tests/utils/shippable/timing.py index fb538271b..c6bf15532 100755 --- a/ansible_collections/community/postgresql/tests/utils/shippable/timing.py +++ b/ansible_collections/community/postgresql/tests/utils/shippable/timing.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3.7 -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import sys diff --git a/ansible_collections/community/postgresql/tox.ini b/ansible_collections/community/postgresql/tox.ini new file mode 100644 index 000000000..48a491cd5 --- /dev/null +++ b/ansible_collections/community/postgresql/tox.ini @@ -0,0 +1,14 @@ +[tox] +envlist = lint +isolated_build = true +[testenv:lint] +skip_install = true +commands = + flake8 . + codespell +deps = + flake8 + codespell + +[pycodestyle] +ignore = E226,E302,E71 diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/default.yml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bfd/__init__.py index e69de29bb..e69de29bb 100644 --- a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/default.yml +++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bfd/__init__.py |