summaryrefslogtreecommitdiffstats
path: root/ansible_collections/community/postgresql
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
commit975f66f2eebe9dadba04f275774d4ab83f74cf25 (patch)
tree89bd26a93aaae6a25749145b7e4bca4a1e75b2be /ansible_collections/community/postgresql
parentInitial commit. (diff)
downloadansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.tar.xz
ansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.zip
Adding upstream version 7.7.0+dfsg.upstream/7.7.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--ansible_collections/community/postgresql/.azure-pipelines/README.md3
-rw-r--r--ansible_collections/community/postgresql/.azure-pipelines/azure-pipelines.yml260
-rwxr-xr-xansible_collections/community/postgresql/.azure-pipelines/scripts/aggregate-coverage.sh20
-rwxr-xr-xansible_collections/community/postgresql/.azure-pipelines/scripts/combine-coverage.py60
-rwxr-xr-xansible_collections/community/postgresql/.azure-pipelines/scripts/process-results.sh24
-rwxr-xr-xansible_collections/community/postgresql/.azure-pipelines/scripts/publish-codecov.sh27
-rwxr-xr-xansible_collections/community/postgresql/.azure-pipelines/scripts/report-coverage.sh15
-rwxr-xr-xansible_collections/community/postgresql/.azure-pipelines/scripts/run-tests.sh34
-rwxr-xr-xansible_collections/community/postgresql/.azure-pipelines/scripts/time-command.py25
-rw-r--r--ansible_collections/community/postgresql/.azure-pipelines/templates/coverage.yml39
-rw-r--r--ansible_collections/community/postgresql/.azure-pipelines/templates/matrix.yml55
-rw-r--r--ansible_collections/community/postgresql/.azure-pipelines/templates/test.yml45
-rw-r--r--ansible_collections/community/postgresql/.github/CODEOWNERS8
-rw-r--r--ansible_collections/community/postgresql/.github/patchback.yml5
-rw-r--r--ansible_collections/community/postgresql/CHANGELOG.rst461
-rw-r--r--ansible_collections/community/postgresql/CONTRIBUTING.md5
-rw-r--r--ansible_collections/community/postgresql/CONTRIBUTORS230
-rw-r--r--ansible_collections/community/postgresql/COPYING674
-rw-r--r--ansible_collections/community/postgresql/FILES.json2707
-rw-r--r--ansible_collections/community/postgresql/MAINTAINERS6
-rw-r--r--ansible_collections/community/postgresql/MAINTAINING.md3
-rw-r--r--ansible_collections/community/postgresql/MANIFEST.json32
-rw-r--r--ansible_collections/community/postgresql/PSF-license.txt48
-rw-r--r--ansible_collections/community/postgresql/README.md170
-rw-r--r--ansible_collections/community/postgresql/changelogs/changelog.yaml531
-rw-r--r--ansible_collections/community/postgresql/changelogs/config.yaml29
-rw-r--r--ansible_collections/community/postgresql/changelogs/fragments/.keep0
-rw-r--r--ansible_collections/community/postgresql/docs/docsite/links.yml45
-rw-r--r--ansible_collections/community/postgresql/meta/runtime.yml27
-rw-r--r--ansible_collections/community/postgresql/plugins/doc_fragments/postgres.py92
-rw-r--r--ansible_collections/community/postgresql/plugins/module_utils/_version.py335
-rw-r--r--ansible_collections/community/postgresql/plugins/module_utils/database.py193
-rw-r--r--ansible_collections/community/postgresql/plugins/module_utils/postgres.py477
-rw-r--r--ansible_collections/community/postgresql/plugins/module_utils/saslprep.py178
-rw-r--r--ansible_collections/community/postgresql/plugins/module_utils/version.py16
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py427
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_db.py786
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py475
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py594
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_info.py1111
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py353
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py265
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py463
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py907
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py215
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py1216
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py691
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_query.py538
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py288
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_script.py353
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py637
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_set.py514
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py310
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py741
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_table.py619
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py545
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_user.py1085
-rw-r--r--ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py342
-rw-r--r--ansible_collections/community/postgresql/requirements.txt1
-rw-r--r--ansible_collections/community/postgresql/shippable.yml60
-rw-r--r--ansible_collections/community/postgresql/simplified_bsd.txt7
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/tasks/main.yml8
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/tasks/postgresql_copy_initial.yml278
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/aliases3
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/defaults/main.yml11
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/main.yml47
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/manage_database.yml9
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml152
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml366
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_session_role.yml80
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/state_dump_restore.yml235
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/state_rename.yml261
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/aliases3
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/defaults/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/meta/main.yml3
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/main.yml26
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml208
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_session_role.yml114
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml554
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/tasks/main.yml7
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/tasks/postgresql_idx_initial.yml377
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/aliases4
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/defaults/main.yml13
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/main.yml12
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml243
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/setup_publication.yml64
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/main.yml25
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml199
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml231
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-7.yml3
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-8.yml3
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/default.yml0
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/defaults/main.yml6
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/tasks/main.yml7
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/tasks/postgresql_membership_initial.yml736
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/defaults/main.yml3
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/main.yml9
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml1073
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/defaults/main.yml29
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/main.yml8
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_bulk_rules.yml136
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml264
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/defaults/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/main.yml9
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml187
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/defaults/main.yml14
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/main.yml19
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/pg_authid_not_readable.yml50
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml1767
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml407
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_session_role.yml102
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml120
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/aliases3
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/main.yml8
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml436
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test0.sql6
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test1.sql10
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/main.yml7
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml604
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/defaults/main.yml7
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/main.yml9
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml331
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_session_role.yml78
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/defaults/main.yml1
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test0.sql4
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test1.sql10
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test10.sql1
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test11.sql1
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test12.sql1
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test2.sql1
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test3.sql4
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test4.sql1
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test5.sql1
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test6.sql1
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test7.sql1
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test8.sql1
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test9.sql1
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/tasks/main.yml7
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/tasks/postgresql_script_initial.yml311
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/defaults/main.yml5
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/tasks/main.yml8
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/tasks/postgresql_sequence_initial.yml730
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/main.yml11
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/options_coverage.yml71
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/postgresql_set_initial.yml442
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/tasks/main.yml9
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/tasks/postgresql_slot_initial.yml735
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/aliases4
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/defaults/main.yml13
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/main.yml12
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml672
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/setup_publication.yml85
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/tasks/main.yml7
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/tasks/postgresql_table_initial.yml899
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/defaults/main.yml3
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/main.yml7
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml245
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/aliases2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/defaults/main.yml4
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/main.yml12
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml802
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_initial.yml156
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml167
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/test_password.yml429
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/aliases4
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/defaults/main.yml12
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/main.yml8
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml222
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_pkg_mgr/tasks/main.yml17
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/defaults/main.yml21
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--0.sql2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3-1.0-1.sql2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3-1.0.sql2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3-1.foo.sql2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0-1.sql2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0-foo.sql2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.beta.sql2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--4.0.sql2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--v4.sql2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy.control3
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf10
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/meta/main.yml2
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/main.yml279
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/ssl.yml108
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml9
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml9
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml8
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml13
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml7
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default.yml7
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/defaults/main.yml26
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/handlers/main.yml24
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/main.yml13
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml149
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j27
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/primary_postgresql.conf.j228
-rw-r--r--ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j228
-rw-r--r--ansible_collections/community/postgresql/tests/requirements.yml3
-rw-r--r--ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.json7
-rwxr-xr-xansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.py43
-rw-r--r--ansible_collections/community/postgresql/tests/sanity/ignore-2.12.txt5
-rw-r--r--ansible_collections/community/postgresql/tests/sanity/ignore-2.13.txt5
-rw-r--r--ansible_collections/community/postgresql/tests/sanity/ignore-2.14.txt5
-rw-r--r--ansible_collections/community/postgresql/tests/sanity/ignore-2.15.txt6
-rw-r--r--ansible_collections/community/postgresql/tests/sanity/ignore-2.16.txt6
-rw-r--r--ansible_collections/community/postgresql/tests/unit/plugins/module_utils/__init__.py0
-rw-r--r--ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_postgres.py338
-rw-r--r--ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_saslprep.py55
-rw-r--r--ansible_collections/community/postgresql/tests/unit/plugins/modules/__init__.py0
-rw-r--r--ansible_collections/community/postgresql/tests/unit/plugins/modules/test_postgresql_set.py28
-rw-r--r--ansible_collections/community/postgresql/tests/utils/constraints.txt52
-rwxr-xr-xansible_collections/community/postgresql/tests/utils/shippable/aix.sh22
-rwxr-xr-xansible_collections/community/postgresql/tests/utils/shippable/check_matrix.py120
-rwxr-xr-xansible_collections/community/postgresql/tests/utils/shippable/freebsd.sh22
-rwxr-xr-xansible_collections/community/postgresql/tests/utils/shippable/linux.sh18
-rwxr-xr-xansible_collections/community/postgresql/tests/utils/shippable/remote.sh22
-rwxr-xr-xansible_collections/community/postgresql/tests/utils/shippable/rhel.sh22
-rwxr-xr-xansible_collections/community/postgresql/tests/utils/shippable/sanity.sh27
-rwxr-xr-xansible_collections/community/postgresql/tests/utils/shippable/shippable.sh208
-rwxr-xr-xansible_collections/community/postgresql/tests/utils/shippable/timing.py16
-rwxr-xr-xansible_collections/community/postgresql/tests/utils/shippable/timing.sh5
-rwxr-xr-xansible_collections/community/postgresql/tests/utils/shippable/units.sh26
263 files changed, 38462 insertions, 0 deletions
diff --git a/ansible_collections/community/postgresql/.azure-pipelines/README.md b/ansible_collections/community/postgresql/.azure-pipelines/README.md
new file mode 100644
index 000000000..385e70bac
--- /dev/null
+++ b/ansible_collections/community/postgresql/.azure-pipelines/README.md
@@ -0,0 +1,3 @@
+## Azure Pipelines Configuration
+
+Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information.
diff --git a/ansible_collections/community/postgresql/.azure-pipelines/azure-pipelines.yml b/ansible_collections/community/postgresql/.azure-pipelines/azure-pipelines.yml
new file mode 100644
index 000000000..4a34c9edf
--- /dev/null
+++ b/ansible_collections/community/postgresql/.azure-pipelines/azure-pipelines.yml
@@ -0,0 +1,260 @@
+trigger:
+ batch: true
+ branches:
+ include:
+ - main
+ - stable-*
+
+pr:
+ autoCancel: true
+ branches:
+ include:
+ - main
+ - stable-*
+
+schedules:
+ - cron: 0 9 * * *
+ displayName: Nightly
+ always: true
+ branches:
+ include:
+ - main
+ - stable-*
+
+variables:
+ - name: checkoutPath
+ value: ansible_collections/community/postgresql
+ - name: coverageBranches
+ value: main
+ - name: pipelinesCoverage
+ value: coverage
+ - name: entryPoint
+ value: tests/utils/shippable/shippable.sh
+ - name: fetchDepth
+ value: 0
+
+resources:
+ containers:
+ - container: default
+ image: quay.io/ansible/azure-pipelines-test-container:3.0.0
+
+pool: Standard
+
+stages:
+## Sanity & units
+ - stage: Ansible_devel
+ displayName: Sanity & Units devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ targets:
+ - name: Sanity
+ test: 'devel/sanity/1'
+ - name: Sanity Extra # Only on devel
+ test: 'devel/sanity/extra'
+ - name: Units
+ test: 'devel/units/1'
+
+ - stage: Ansible_2_15
+ displayName: Sanity & Units 2.15
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ targets:
+ - name: Sanity
+ test: '2.15/sanity/1'
+ - name: Units
+ test: '2.15/units/1'
+
+ - stage: Ansible_2_14
+ displayName: Sanity & Units 2.14
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ targets:
+ - name: Sanity
+ test: '2.14/sanity/1'
+ - name: Units
+ test: '2.14/units/1'
+
+ - stage: Ansible_2_13
+ displayName: Sanity & Units 2.13
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ targets:
+ - name: Sanity
+ test: '2.13/sanity/1'
+ - name: Units
+ test: '2.13/units/1'
+
+ - stage: Ansible_2_12
+ displayName: Sanity & Units 2.12
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ targets:
+ - name: Sanity
+ test: '2.12/sanity/1'
+ - name: Units
+ test: '2.12/units/1'
+
+## Docker
+ - stage: Docker_devel
+ displayName: Docker devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: devel/linux/{0}/1
+ targets:
+ - name: CentOS 7
+ test: centos7
+ - name: Fedora 37
+ test: fedora37
+ - name: Ubuntu 20.04
+ test: ubuntu2004
+
+ - stage: Docker_2_15
+ displayName: Docker 2.15
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.15/linux/{0}/1
+ targets:
+ - name: CentOS 7
+ test: centos7
+ - name: Fedora 37
+ test: fedora37
+ - name: Ubuntu 20.04
+ test: ubuntu2004
+
+ - stage: Docker_2_14
+ displayName: Docker 2.14
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.14/linux/{0}/1
+ targets:
+ - name: CentOS 7
+ test: centos7
+ - name: Fedora 36
+ test: fedora36
+ - name: Ubuntu 20.04
+ test: ubuntu2004
+
+ - stage: Docker_2_13
+ displayName: Docker 2.13
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.13/linux/{0}/1
+ targets:
+ - name: CentOS 7
+ test: centos7
+ - name: Fedora 35
+ test: fedora35
+ - name: Ubuntu 20.04
+ test: ubuntu2004
+
+ - stage: Docker_2_12
+ displayName: Docker 2.12
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.12/linux/{0}/1
+ targets:
+ - name: CentOS 7
+ test: centos7
+ - name: Fedora 34
+ test: fedora34
+ - name: Ubuntu 20.04
+ test: ubuntu2004
+
+## Remote
+ - stage: Remote_devel
+ displayName: Remote devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: devel/{0}/1
+ targets:
+ - name: RHEL 8.7
+ test: rhel/8.7
+
+ - stage: Remote_2_15
+ displayName: Remote 2.15
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.15/{0}/1
+ targets:
+ - name: RHEL 8.7
+ test: rhel/8.7
+
+ - stage: Remote_2_14
+ displayName: Remote 2.14
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.14/{0}/1
+ targets:
+ - name: RHEL 8.6
+ test: rhel/8.6
+
+ - stage: Remote_2_13
+ displayName: Remote 2.13
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.13/{0}/1
+ targets:
+ - name: RHEL 8.5
+ test: rhel/8.5
+
+ - stage: Remote_2_12
+ displayName: Remote 2.12
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.12/{0}/1
+ targets:
+ - name: RHEL 8.4
+ test: rhel/8.4
+
+## Finally
+
+ - stage: Summary
+ condition: succeededOrFailed()
+ dependsOn:
+ - Ansible_devel
+ - Ansible_2_15
+ - Ansible_2_14
+ - Ansible_2_13
+ - Ansible_2_12
+ - Docker_devel
+ - Docker_2_15
+ - Docker_2_14
+ - Docker_2_13
+ - Docker_2_12
+ - Remote_devel
+ - Remote_2_15
+ - Remote_2_14
+ - Remote_2_13
+ - Remote_2_12
+ jobs:
+ - template: templates/coverage.yml
diff --git a/ansible_collections/community/postgresql/.azure-pipelines/scripts/aggregate-coverage.sh b/ansible_collections/community/postgresql/.azure-pipelines/scripts/aggregate-coverage.sh
new file mode 100755
index 000000000..f3113dd0a
--- /dev/null
+++ b/ansible_collections/community/postgresql/.azure-pipelines/scripts/aggregate-coverage.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+# Aggregate code coverage results for later processing.
+
+set -o pipefail -eu
+
+agent_temp_directory="$1"
+
+PATH="${PWD}/bin:${PATH}"
+
+mkdir "${agent_temp_directory}/coverage/"
+
+options=(--venv --venv-system-site-packages --color -v)
+
+ansible-test coverage combine --export "${agent_temp_directory}/coverage/" "${options[@]}"
+
+if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
+ # Only analyze coverage if the installed version of ansible-test supports it.
+ # Doing so allows this script to work unmodified for multiple Ansible versions.
+ ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}"
+fi
diff --git a/ansible_collections/community/postgresql/.azure-pipelines/scripts/combine-coverage.py b/ansible_collections/community/postgresql/.azure-pipelines/scripts/combine-coverage.py
new file mode 100755
index 000000000..506ade646
--- /dev/null
+++ b/ansible_collections/community/postgresql/.azure-pipelines/scripts/combine-coverage.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+"""
+Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
+Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
+The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
+Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
+It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import shutil
+import sys
+
+
+def main():
+ """Main program entry point."""
+ source_directory = sys.argv[1]
+
+ if '/ansible_collections/' in os.getcwd():
+ output_path = "tests/output"
+ else:
+ output_path = "test/results"
+
+ destination_directory = os.path.join(output_path, 'coverage')
+
+ if not os.path.exists(destination_directory):
+ os.makedirs(destination_directory)
+
+ jobs = {}
+ count = 0
+
+ for name in os.listdir(source_directory):
+ match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
+ label = match.group('label')
+ attempt = int(match.group('attempt'))
+ jobs[label] = max(attempt, jobs.get(label, 0))
+
+ for label, attempt in jobs.items():
+ name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
+ source = os.path.join(source_directory, name)
+ source_files = os.listdir(source)
+
+ for source_file in source_files:
+ source_path = os.path.join(source, source_file)
+ destination_path = os.path.join(destination_directory, source_file + '.' + label)
+ print('"%s" -> "%s"' % (source_path, destination_path))
+ shutil.copyfile(source_path, destination_path)
+ count += 1
+
+ print('Coverage file count: %d' % count)
+ print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
+ print('##vso[task.setVariable variable=outputPath]%s' % output_path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/.azure-pipelines/scripts/process-results.sh b/ansible_collections/community/postgresql/.azure-pipelines/scripts/process-results.sh
new file mode 100755
index 000000000..f3f1d1bae
--- /dev/null
+++ b/ansible_collections/community/postgresql/.azure-pipelines/scripts/process-results.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+# Check the test results and set variables for use in later steps.
+
+set -o pipefail -eu
+
+if [[ "$PWD" =~ /ansible_collections/ ]]; then
+ output_path="tests/output"
+else
+ output_path="test/results"
+fi
+
+echo "##vso[task.setVariable variable=outputPath]${output_path}"
+
+if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then
+ echo "##vso[task.setVariable variable=haveTestResults]true"
+fi
+
+if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then
+ echo "##vso[task.setVariable variable=haveBotResults]true"
+fi
+
+if compgen -G "${output_path}"'/coverage/*' > /dev/null; then
+ echo "##vso[task.setVariable variable=haveCoverageData]true"
+fi
diff --git a/ansible_collections/community/postgresql/.azure-pipelines/scripts/publish-codecov.sh b/ansible_collections/community/postgresql/.azure-pipelines/scripts/publish-codecov.sh
new file mode 100755
index 000000000..6d184f0b8
--- /dev/null
+++ b/ansible_collections/community/postgresql/.azure-pipelines/scripts/publish-codecov.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+# Upload code coverage reports to codecov.io.
+# Multiple coverage files from multiple languages are accepted and aggregated after upload.
+# Python coverage, as well as PowerShell and Python stubs can all be uploaded.
+
+set -o pipefail -eu
+
+output_path="$1"
+
+curl --silent --show-error https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh > codecov.sh
+
+for file in "${output_path}"/reports/coverage*.xml; do
+ name="${file}"
+ name="${name##*/}" # remove path
+ name="${name##coverage=}" # remove 'coverage=' prefix if present
+ name="${name%.xml}" # remove '.xml' suffix
+
+ bash codecov.sh \
+ -f "${file}" \
+ -n "${name}" \
+ -X coveragepy \
+ -X gcov \
+ -X fix \
+ -X search \
+ -X xcode \
+ || echo "Failed to upload code coverage report to codecov.io: ${file}"
+done
diff --git a/ansible_collections/community/postgresql/.azure-pipelines/scripts/report-coverage.sh b/ansible_collections/community/postgresql/.azure-pipelines/scripts/report-coverage.sh
new file mode 100755
index 000000000..1bd91bdc9
--- /dev/null
+++ b/ansible_collections/community/postgresql/.azure-pipelines/scripts/report-coverage.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.
+
+set -o pipefail -eu
+
+PATH="${PWD}/bin:${PATH}"
+
+if ! ansible-test --help >/dev/null 2>&1; then
+ # Install the devel version of ansible-test for generating code coverage reports.
+ # This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs).
+ # Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used.
+ pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
+fi
+
+ansible-test coverage xml --stub --venv --venv-system-site-packages --color -v
diff --git a/ansible_collections/community/postgresql/.azure-pipelines/scripts/run-tests.sh b/ansible_collections/community/postgresql/.azure-pipelines/scripts/run-tests.sh
new file mode 100755
index 000000000..a947fdf01
--- /dev/null
+++ b/ansible_collections/community/postgresql/.azure-pipelines/scripts/run-tests.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+# Configure the test environment and run the tests.
+
+set -o pipefail -eu
+
+entry_point="$1"
+test="$2"
+read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds
+
+export COMMIT_MESSAGE
+export COMPLETE
+export COVERAGE
+export IS_PULL_REQUEST
+
+if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then
+ IS_PULL_REQUEST=true
+ COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2)
+else
+ IS_PULL_REQUEST=
+ COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD)
+fi
+
+COMPLETE=
+COVERAGE=
+
+if [ "${BUILD_REASON}" = "Schedule" ]; then
+ COMPLETE=yes
+
+ if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then
+ COVERAGE=yes
+ fi
+fi
+
+"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py"
diff --git a/ansible_collections/community/postgresql/.azure-pipelines/scripts/time-command.py b/ansible_collections/community/postgresql/.azure-pipelines/scripts/time-command.py
new file mode 100755
index 000000000..5e8eb8d4c
--- /dev/null
+++ b/ansible_collections/community/postgresql/.azure-pipelines/scripts/time-command.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import time
+
+
+def main():
+ """Main program entry point."""
+ start = time.time()
+
+ sys.stdin.reconfigure(errors='surrogateescape')
+ sys.stdout.reconfigure(errors='surrogateescape')
+
+ for line in sys.stdin:
+ seconds = time.time() - start
+ sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
+ sys.stdout.flush()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/.azure-pipelines/templates/coverage.yml b/ansible_collections/community/postgresql/.azure-pipelines/templates/coverage.yml
new file mode 100644
index 000000000..1864e4441
--- /dev/null
+++ b/ansible_collections/community/postgresql/.azure-pipelines/templates/coverage.yml
@@ -0,0 +1,39 @@
+# This template adds a job for processing code coverage data.
+# It will upload results to Azure Pipelines and codecov.io.
+# Use it from a job stage that completes after all other jobs have completed.
+# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed.
+
+jobs:
+ - job: Coverage
+ displayName: Code Coverage
+ container: default
+ workspace:
+ clean: all
+ steps:
+ - checkout: self
+ fetchDepth: $(fetchDepth)
+ path: $(checkoutPath)
+ - task: DownloadPipelineArtifact@2
+ displayName: Download Coverage Data
+ inputs:
+ path: coverage/
+ patterns: "Coverage */*=coverage.combined"
+ - bash: .azure-pipelines/scripts/combine-coverage.py coverage/
+ displayName: Combine Coverage Data
+ - bash: .azure-pipelines/scripts/report-coverage.sh
+ displayName: Generate Coverage Report
+ condition: gt(variables.coverageFileCount, 0)
+ - task: PublishCodeCoverageResults@1
+ inputs:
+ codeCoverageTool: Cobertura
+ # Azure Pipelines only accepts a single coverage data file.
+ # That means only Python or PowerShell coverage can be uploaded, but not both.
+ # Set the "pipelinesCoverage" variable to determine which type is uploaded.
+ # Use "coverage" for Python and "coverage-powershell" for PowerShell.
+ summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
+ displayName: Publish to Azure Pipelines
+ condition: gt(variables.coverageFileCount, 0)
+ - bash: .azure-pipelines/scripts/publish-codecov.sh "$(outputPath)"
+ displayName: Publish to codecov.io
+ condition: gt(variables.coverageFileCount, 0)
+ continueOnError: true
diff --git a/ansible_collections/community/postgresql/.azure-pipelines/templates/matrix.yml b/ansible_collections/community/postgresql/.azure-pipelines/templates/matrix.yml
new file mode 100644
index 000000000..4e9555dd3
--- /dev/null
+++ b/ansible_collections/community/postgresql/.azure-pipelines/templates/matrix.yml
@@ -0,0 +1,55 @@
+# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template.
+# If this matrix template does not provide the required functionality, consider using the test template directly instead.
+
+parameters:
+ # A required list of dictionaries, one per test target.
+ # Each item in the list must contain a "test" or "name" key.
+ # Both may be provided. If one is omitted, the other will be used.
+ - name: targets
+ type: object
+
+ # An optional list of values which will be used to multiply the targets list into a matrix.
+ # Values can be strings or numbers.
+ - name: groups
+ type: object
+ default: []
+
+ # An optional format string used to generate the job name.
+ # - {0} is the name of an item in the targets list.
+ - name: nameFormat
+ type: string
+ default: "{0}"
+
+ # An optional format string used to generate the test name.
+ # - {0} is the name of an item in the targets list.
+ - name: testFormat
+ type: string
+ default: "{0}"
+
+ # An optional format string used to add the group to the job name.
+ # {0} is the formatted name of an item in the targets list.
+ # {{1}} is the group -- be sure to include the double "{{" and "}}".
+ - name: nameGroupFormat
+ type: string
+ default: "{0} - {{1}}"
+
+ # An optional format string used to add the group to the test name.
+ # {0} is the formatted test of an item in the targets list.
+ # {{1}} is the group -- be sure to include the double "{{" and "}}".
+ - name: testGroupFormat
+ type: string
+ default: "{0}/{{1}}"
+
+jobs:
+ - template: test.yml
+ parameters:
+ jobs:
+ - ${{ if eq(length(parameters.groups), 0) }}:
+ - ${{ each target in parameters.targets }}:
+ - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
+ test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
+ - ${{ if not(eq(length(parameters.groups), 0)) }}:
+ - ${{ each group in parameters.groups }}:
+ - ${{ each target in parameters.targets }}:
+ - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
+ test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
diff --git a/ansible_collections/community/postgresql/.azure-pipelines/templates/test.yml b/ansible_collections/community/postgresql/.azure-pipelines/templates/test.yml
new file mode 100644
index 000000000..5250ed802
--- /dev/null
+++ b/ansible_collections/community/postgresql/.azure-pipelines/templates/test.yml
@@ -0,0 +1,45 @@
+# This template uses the provided list of jobs to create test one or more test jobs.
+# It can be used directly if needed, or through the matrix template.
+
+parameters:
+ # A required list of dictionaries, one per test job.
+ # Each item in the list must contain a "job" and "name" key.
+ - name: jobs
+ type: object
+
+jobs:
+ - ${{ each job in parameters.jobs }}:
+ - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
+ displayName: ${{ job.name }}
+ container: default
+ workspace:
+ clean: all
+ steps:
+ - checkout: self
+ fetchDepth: $(fetchDepth)
+ path: $(checkoutPath)
+ - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
+ displayName: Run Tests
+ - bash: .azure-pipelines/scripts/process-results.sh
+ condition: succeededOrFailed()
+ displayName: Process Results
+ - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
+ condition: eq(variables.haveCoverageData, 'true')
+ displayName: Aggregate Coverage Data
+ - task: PublishTestResults@2
+ condition: eq(variables.haveTestResults, 'true')
+ inputs:
+ testResultsFiles: "$(outputPath)/junit/*.xml"
+ displayName: Publish Test Results
+ - task: PublishPipelineArtifact@1
+ condition: eq(variables.haveBotResults, 'true')
+ displayName: Publish Bot Results
+ inputs:
+ targetPath: "$(outputPath)/bot/"
+ artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
+ - task: PublishPipelineArtifact@1
+ condition: eq(variables.haveCoverageData, 'true')
+ displayName: Publish Coverage Data
+ inputs:
+ targetPath: "$(Agent.TempDirectory)/coverage/"
+ artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
diff --git a/ansible_collections/community/postgresql/.github/CODEOWNERS b/ansible_collections/community/postgresql/.github/CODEOWNERS
new file mode 100644
index 000000000..e2fd0cfa3
--- /dev/null
+++ b/ansible_collections/community/postgresql/.github/CODEOWNERS
@@ -0,0 +1,8 @@
+# This is a comment.
+# Each line is a file pattern followed by one or more owners.
+
+# These owners will be the default owners for everything in
+# the repo. Unless a later match takes precedence,
+# @global-owner1 and @global-owner2 will be requested for
+# review when someone opens a pull request.
+* @Andersson007 @hunleyd @jchancojr
diff --git a/ansible_collections/community/postgresql/.github/patchback.yml b/ansible_collections/community/postgresql/.github/patchback.yml
new file mode 100644
index 000000000..33ad6e84a
--- /dev/null
+++ b/ansible_collections/community/postgresql/.github/patchback.yml
@@ -0,0 +1,5 @@
+---
+backport_branch_prefix: patchback/backports/
+backport_label_prefix: backport-
+target_branch_prefix: stable-
+...
diff --git a/ansible_collections/community/postgresql/CHANGELOG.rst b/ansible_collections/community/postgresql/CHANGELOG.rst
new file mode 100644
index 000000000..5730f6108
--- /dev/null
+++ b/ansible_collections/community/postgresql/CHANGELOG.rst
@@ -0,0 +1,461 @@
+=============================================
+Community PostgreSQL Collection Release Notes
+=============================================
+
+.. contents:: Topics
+
+
+v2.4.2
+======
+
+Release Summary
+---------------
+
+This is a bugfix release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after release 2.4.1.
+
+Bugfixes
+--------
+
+- postgresql_db - when the task is completed successfully, close the database connection (https://github.com/ansible-collections/community.postgresql/issues/465).
+- postgresql_info - when the task is completed successfully, close the database connection (https://github.com/ansible-collections/community.postgresql/issues/465).
+- postgresql_ping - when the task is completed successfully, close the database connection (https://github.com/ansible-collections/community.postgresql/issues/465).
+- postgresql_privs - when the task is completed successfully, close the database connection (https://github.com/ansible-collections/community.postgresql/issues/465).
+
+v2.4.1
+======
+
+Release Summary
+---------------
+
+This is the bugfix release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after release 2.4.0.
+
+Bugfixes
+--------
+
+- postgresql_privs - fix a breaking change related to handling the ``password`` argument (https://github.com/ansible-collections/community.postgresql/pull/463).
+
+v2.4.0
+======
+
+Release Summary
+---------------
+
+This is the minor release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 2.3.2.
+
+Major Changes
+-------------
+
+- postgresql_privs - the ``password`` argument is deprecated and will be removed in community.postgresql 4.0.0, use the ``login_password`` argument instead (https://github.com/ansible-collections/community.postgresql/issues/406).
+
+Minor Changes
+-------------
+
+- Add support for module_defaults with action_group ``all`` (https://github.com/ansible-collections/community.postgresql/pull/430).
+- postgresql - added new parameters ``ssl_cert`` and ``ssl_key`` for ssl connection (https://github.com/ansible-collections/community.postgresql/issues/424).
+- postgresql - when receiving the connection parameters, the ``PGPORT`` and ``PGUSER`` environment variables are checked. The order of assigning values ``environment variables`` -> ``default values`` -> ``set values`` (https://github.com/ansible-collections/community.postgresql/issues/311).
+- postgresql_query - a list of queries can be passed as the ``query`` argument's value, the results will be stored in the ``query_all_results`` return value (is not deprecated anymore, as well as ``query_list``) (https://github.com/ansible-collections/community.postgresql/issues/312).
+
+Bugfixes
+--------
+
+- postgresql_info - add support for non numeric extenstion version (https://github.com/ansible-collections/community.postgresql/issues/428).
+- postgresql_info - when getting information about subscriptions, check the list of available columns in the pg_subscription table (https://github.com/ansible-collections/community.postgresql/issues/429).
+- postgresql_privs - fix connect_params being ignored (https://github.com/ansible-collections/community.postgresql/issues/450).
+- postgresql_query - could crash under certain conditions because of a missing import to `psycopg2.extras` (https://github.com/ansible-collections/community.postgresql/issues/283).
+- postgresql_set - avoid throwing ValueError for IP addresses and other values that may look like a number, but which are not (https://github.com/ansible-collections/community.postgresql/pull/422).
+- postgresql_set - avoid wrong values for single-value parameters containing commas (https://github.com/ansible-collections/community.postgresql/pull/400).
+- postgresql_user - properly close DB connections to prevent possible connection limit exhaustion (https://github.com/ansible-collections/community.postgresql/issues/431).
+
+v2.3.2
+======
+
+Release Summary
+---------------
+
+This is the bugfix release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after release 2.3.1.
+
+Bugfixes
+--------
+
+- postgresql_pg_hba - fix ``changed`` return value for when ``overwrite`` is enabled (https://github.com/ansible-collections/community.postgresql/pull/378).
+- postgresql_privs - fix quoting of the ``schema`` parameter in SQL statements (https://github.com/ansible-collections/community.postgresql/pull/382).
+- postgresql_privs - raise an error when the ``objs: ALL_IN_SCHEMA`` is used with a value of ``type`` that is not ``table``, ``sequence``, ``function`` or ``procedure`` (https://github.com/ansible-collections/community.postgresql/issues/379).
+
+v2.3.1
+======
+
+Release Summary
+---------------
+
+This is the bugfix release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after release 2.3.0.
+
+Bugfixes
+--------
+
+- postgresql_privs - fails with ``type=default_privs``, ``privs=ALL``, ``objs=ALL_DEFAULT`` (https://github.com/ansible-collections/community.postgresql/issues/373).
+
+v2.3.0
+======
+
+Release Summary
+---------------
+
+This is the minor release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 2.2.0.
+
+Minor Changes
+-------------
+
+- postgresql_* - add the ``connect_params`` parameter dict to allow any additional ``libpg`` connection parameters (https://github.com/ansible-collections/community.postgresql/pull/329).
+
+Bugfixes
+--------
+
+- postgresql_info - make arguments passed to SHOW command properly quoted to prevent the interpreter evaluating them (https://github.com/ansible-collections/community.postgresql/issues/314).
+- postgresql_pg_hba - support the connection types ``hostgssenc`` and ``hostnogssenc`` (https://github.com/ansible-collections/community.postgresql/pull/351).
+- postgresql_privs - add support for alter default privileges grant usage on schemas (https://github.com/ansible-collections/community.postgresql/issues/332).
+- postgresql_privs - cannot grant select on objects in all schemas; add the ``not-specified`` value to the ``schema`` parameter to make this possible (https://github.com/ansible-collections/community.postgresql/issues/332).
+- postgresql_set - avoid postgres puts extra quotes when passing values containing commas (https://github.com/ansible-collections/community.postgresql/issues/78).
+- postgresql_user - make the module idempotent when password is scram hashed (https://github.com/ansible-collections/community.postgresql/issues/301).
+
+v2.2.0
+======
+
+Release Summary
+---------------
+
+This is the minor release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 2.1.5.
+
+Major Changes
+-------------
+
+- postgresql_user - the ``groups`` argument has been deprecated and will be removed in ``community.postgresql 3.0.0``. Please use the ``postgresql_membership`` module to specify group/role memberships instead (https://github.com/ansible-collections/community.postgresql/issues/277).
+
+Minor Changes
+-------------
+
+- postgresql_membership - add the ``exact`` state value to be able to specify a list of only groups a user must be a member of (https://github.com/ansible-collections/community.postgresql/issues/277).
+- postgresql_pg_hba - add argument ``overwrite`` (bool, default: false) to remove unmanaged rules (https://github.com/ansible-collections/community.postgresql/issues/297).
+- postgresql_pg_hba - add argument ``rules_behavior`` (choices: conflict (default), combine) to fail when ``rules`` and normal rule-specific arguments are given or, when ``combine``, use them as defaults for the ``rules`` items (https://github.com/ansible-collections/community.postgresql/issues/297).
+- postgresql_pg_hba - add argument ``rules`` to specify a list of rules using the normal rule-specific argument in each item (https://github.com/ansible-collections/community.postgresql/issues/297).
+
+Bugfixes
+--------
+
+- Include ``simplified_bsd.txt`` license file for various module utils.
+- postgresql_info - fix pg version parsing (https://github.com/ansible-collections/community.postgresql/issues/315).
+- postgresql_ping - fix pg version parsing (https://github.com/ansible-collections/community.postgresql/issues/315).
+- postgresql_privs.py - add functionality when the PostgreSQL version is 9.0.0 or greater to incorporate ``ALL x IN SCHEMA`` syntax (https://github.com/ansible-collections/community.postgresql/pull/282). Please see the official documentation for details regarding grants (https://www.postgresql.org/docs/9.0/sql-grant.html).
+- postgresql_subscription - fix idempotence by casting the ``connparams`` dict variable (https://github.com/ansible-collections/community.postgresql/issues/280).
+- postgresql_user - add ``alter user``-statements in the return value ``queries`` (https://github.com/ansible-collections/community.postgresql/issues/307).
+
+v2.1.5
+======
+
+Release Summary
+---------------
+
+This is the bugfix release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 2.1.4
+
+Bugfixes
+--------
+
+- Include ``PSF-license.txt`` file for ``plugins/module_utils/_version.py``.
+- collection core functions - fix attribute error `nonetype` by always calling `ensure_required_libs` (https://github.com/ansible-collections/community.postgresql/issues/252).
+
+v2.1.4
+======
+
+Release Summary
+---------------
+
+This is the minor release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 2.1.3.
+
+Major Changes
+-------------
+
+- The community.postgresql collection no longer supports ``Ansible 2.9`` and ``ansible-base 2.10``. While we take no active measures to prevent usage and there are no plans to introduce incompatible code to the modules, we will stop testing against ``Ansible 2.9`` and ``ansible-base 2.10``. Both will very soon be End of Life and if you are still using them, you should consider upgrading to the ``latest Ansible / ansible-core 2.11 or later`` as soon as possible (https://github.com/ansible-collections/community.postgresql/pull/245).
+
+v2.1.3
+======
+
+Release Summary
+---------------
+
+This is the minor release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 2.1.2.
+
+Major Changes
+-------------
+
+- postgresql_user - the ``priv`` argument has been deprecated and will be removed in ``community.postgresql 3.0.0``. Please use the ``postgresql_privs`` module to grant/revoke privileges instead (https://github.com/ansible-collections/community.postgresql/issues/212).
+
+Bugfixes
+--------
+
+- postgresql_db - get rid of the deprecated psycopg2 connection alias ``database`` in favor of ``dbname`` when psycopg2 is 2.7+ is used (https://github.com/ansible-collections/community.postgresql/issues/194, https://github.com/ansible-collections/community.postgresql/pull/196).
+
+v2.1.2
+======
+
+Release Summary
+---------------
+
+This is the patch release of the `community.postgresql` collection. This changelog contains all changes to the modules in this collection that have been added after the release of `community.postgresql` 2.1.1.
+
+Major Changes
+-------------
+
+- postgresql_privs - the ``usage_on_types`` feature have been deprecated and will be removed in ``community.postgresql 3.0.0``. Please use the ``type`` option with the ``type`` value to explicitly grant/revoke privileges on types (https://github.com/ansible-collections/community.postgresql/issues/207).
+
+v2.1.1
+======
+
+Release Summary
+---------------
+
+This is the bugfix release of the community.postgresql collection.
+This changelog contains all changes to the modules in this collection that have been added after the release of community.postgresql 2.1.0.
+
+Bugfixes
+--------
+
+- module core functions - get rid of the deprecated psycopg2 connection alias ``database`` in favor of ``dbname`` when psycopg2 is 2.7+ (https://github.com/ansible-collections/community.postgresql/pull/196).
+- postgresql_query - cannot handle .sql file with \\n at end of file (https://github.com/ansible-collections/community.postgresql/issues/180).
+
+v2.1.0
+======
+
+Release Summary
+---------------
+
+This is the minor release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 2.0.0.
+
+Major Changes
+-------------
+
+- postgresql_query - the ``path_to_script`` and ``as_single_query`` options as well as the ``query_list`` and ``query_all_results`` return values have been deprecated and will be removed in ``community.postgresql 3.0.0``. Please use the ``community.postgresql.postgresql_script`` module to execute statements from scripts (https://github.com/ansible-collections/community.postgresql/issues/189).
+
+New Modules
+-----------
+
+- postgresql_script - Run PostgreSQL statements from a file
+
+v2.0.0
+======
+
+Release Summary
+---------------
+
+This is the major release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 1.7.0.
+
+Major Changes
+-------------
+
+- postgresql_query - the default value of the ``as_single_query`` option changes to ``yes``. If the related behavior of your tasks where the module is involved changes, please adjust the parameter's value correspondingly (https://github.com/ansible-collections/community.postgresql/issues/85).
+
+v1.6.1
+======
+
+Release Summary
+---------------
+
+This is the bugfix release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 1.6.1.
+
+Bugfixes
+--------
+
+- Collection core functions - use vendored version of ``distutils.version`` instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/community.postgresql/pull/179).
+- postgres_info - It now works on AWS RDS Postgres.
+- postgres_info - Specific info (namespaces, extensions, languages) of each database was not being shown properly. Instead, the info from the DB that was connected was always being shown (https://github.com/ansible-collections/community.postgresql/issues/172).
+
+v1.6.0
+======
+
+Release Summary
+---------------
+
+This is the minor release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 1.5.0.
+
+Bugfixes
+--------
+
+- postgresql_ext - Handle postgresql extension updates through path validation instead of version comparison (https://github.com/ansible-collections/community.postgresql/issues/129).
+
+v1.5.0
+======
+
+Release Summary
+---------------
+
+This is the minor release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 1.4.0.
+
+Minor Changes
+-------------
+
+- postgresql_db - Add the ``force`` boolean option to drop active connections first and then remove the database (https://github.com/ansible-collections/community.postgresql/issues/109).
+- postgresql_info - Add the ``raw`` return value for extension version (https://github.com/ansible-collections/community.postgresql/pull/138).
+- postgresql_pg_hba - Add the parameters ``keep_comments_at_rules`` and ``comment`` (https://github.com/ansible-collections/community.postgresql/issues/134).
+
+Bugfixes
+--------
+
+- postgresql_ext - Fix extension version handling when it has 0 value (https://github.com/ansible-collections/community.postgresql/issues/136).
+- postgresql_info - Fix extension version handling when it has 0 value (https://github.com/ansible-collections/community.postgresql/issues/137).
+- postgresql_set - Fix wrong numerical value conversion (https://github.com/ansible-collections/community.postgresql/issues/110).
+- postgresql_slot - Correct the server_version check for PG 9.6 (https://github.com/ansible-collections/community.postgresql/issue/120)
+
+v1.4.0
+======
+
+Release Summary
+---------------
+
+This is the minor release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 1.3.0.
+
+Minor Changes
+-------------
+
+- postgresql_db - add support for the ``directory`` format when the ``state`` option is ``dump`` or ``restore`` (https://github.com/ansible-collections/community.postgresql/pull/108).
+- postgresql_db - add the ``rename`` value to the ``state`` option (https://github.com/ansible-collections/community.postgresql/pull/107).
+
+v1.3.0
+======
+
+Release Summary
+---------------
+
+This is the minor release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 1.2.0.
+
+Major Changes
+-------------
+
+- postgresql_query - the default value of the ``as_single_query`` option will be changed to ``yes`` in community.postgresql 2.0.0 (https://github.com/ansible-collections/community.postgresql/issues/85).
+
+Bugfixes
+--------
+
+- postgresql_privs - fix ``fail_on_role`` check (https://github.com/ansible-collections/community.postgresql/pull/82).
+
+v1.2.0
+======
+
+Release Summary
+---------------
+
+This is the minor release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 1.1.1.
+
+Minor Changes
+-------------
+
+- postgresql_info - add the ``patch``, ``full``, and ``raw`` values of the ``version`` return value (https://github.com/ansible-collections/community.postgresql/pull/68).
+- postgresql_ping - add the ``patch``, ``full``, and ``raw`` values of the ``server_version`` return value (https://github.com/ansible-collections/community.postgresql/pull/70).
+
+v1.1.1
+======
+
+Release Summary
+---------------
+
+This is the patch release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 1.1.0.
+
+Bugfixes
+--------
+
+- postgresql_query - add a warning to set ``as_single_query`` option explicitly (https://github.com/ansible-collections/community.postgresql/pull/54).
+- postgresql_query - fix datetime.timedelta type handling (https://github.com/ansible-collections/community.postgresql/issues/47).
+- postgresql_query - fix decimal handling (https://github.com/ansible-collections/community.postgresql/issues/45).
+- postgresql_set - fails in check_mode on non-numeric values containing `B` (https://github.com/ansible-collections/community.postgresql/issues/48).
+
+v1.1.0
+======
+
+Release Summary
+---------------
+
+This is the minor release of the ``community.postgresql`` collection.
+This changelog contains all changes to the modules in this collection that
+have been added after the release of ``community.postgresql`` 1.0.0.
+
+Minor Changes
+-------------
+
+- postgresql_query - add ``as_single_query`` option to execute a script content as a single query to avoid semicolon related errors (https://github.com/ansible-collections/community.postgresql/pull/37).
+
+Bugfixes
+--------
+
+- postgresql_info - fix crash caused by wrong PgSQL version parsing (https://github.com/ansible-collections/community.postgresql/issues/40).
+- postgresql_ping - fix crash caused by wrong PgSQL version parsing (https://github.com/ansible-collections/community.postgresql/issues/40).
+- postgresql_set - return a message instead of traceback when a passed parameter has not been found (https://github.com/ansible-collections/community.postgresql/issues/41).
+
+v1.0.0
+======
+
+Release Summary
+---------------
+
+This is the first proper release of the ``community.postgresql`` collection which is needed to include the collection in Ansible.
+This changelog does not contain any changes because there are no changes made since release 0.1.0.
+
+
+v0.1.0
+======
+
+Release Summary
+---------------
+
+The ``community.postgresql`` continues the work on the Ansible PostgreSQL
+modules from their state in ``community.general`` 1.2.0.
+The changes listed here are thus relative to the modules ``community.general.postgresql_*``.
+
+
+Minor Changes
+-------------
+
+- postgresql_info - add ``in_recovery`` return value to show if a service in recovery mode or not (https://github.com/ansible-collections/community.general/issues/1068).
+- postgresql_privs - add ``procedure`` type support (https://github.com/ansible-collections/community.general/issues/1002).
+- postgresql_query - add ``query_list`` and ``query_all_results`` return values (https://github.com/ansible-collections/community.general/issues/838).
+
+Bugfixes
+--------
+
+- postgresql_ext - fix the module crashes when available ext versions cannot be compared with current version (https://github.com/ansible-collections/community.general/issues/1095).
+- postgresql_ext - fix version selection when ``version=latest`` (https://github.com/ansible-collections/community.general/pull/1078).
+- postgresql_privs - fix module fails when ``type`` group and passing ``objs`` value containing hyphens (https://github.com/ansible-collections/community.general/issues/1058).
diff --git a/ansible_collections/community/postgresql/CONTRIBUTING.md b/ansible_collections/community/postgresql/CONTRIBUTING.md
new file mode 100644
index 000000000..70cd5557e
--- /dev/null
+++ b/ansible_collections/community/postgresql/CONTRIBUTING.md
@@ -0,0 +1,5 @@
+# Contributing
+
+Refer to the [Ansible Contributing guidelines](https://docs.ansible.com/ansible/devel/community/index.html) to learn how to contribute to this collection.
+
+Refer to the [review checklist](https://docs.ansible.com/ansible/devel/community/collection_contributors/collection_reviewing.html) when triaging issues or reviewing PRs.
diff --git a/ansible_collections/community/postgresql/CONTRIBUTORS b/ansible_collections/community/postgresql/CONTRIBUTORS
new file mode 100644
index 000000000..419cd9915
--- /dev/null
+++ b/ansible_collections/community/postgresql/CONTRIBUTORS
@@ -0,0 +1,230 @@
+4n70w4
+abadger
+abguy
+abompard
+acasademont
+AceSlash
+acozine
+aioue
+Akasurde
+alanfairless
+aleksandr-vin
+Alexhha
+AlexTaran
+amarao
+amenonsen
+aminvakil
+amossc
+anasbouzid
+Andersson007
+andreaso
+andreyfedoseev
+andytom
+anis016
+ansibot
+antoinell
+arbazkhan002
+arkag
+artursvonda
+AsgerPetersen
+asifiqbal
+atombrella
+b6d
+balonik
+bcoca
+bearrito
+benformosa
+betanummeric
+billietl
+binf
+blackstar257
+bladypirat
+blindrood
+Boosai
+braderhart
+brophyja
+btoussaint
+cans
+caseyandgina
+chamini2
+Changaco
+char543
+cjewo
+cocoy
+codrinh
+CoffeDriven
+Cohedrin
+coopengo-glecomte
+csamarajeewa
+cThrice
+czenderink
+dagwieers
+dan-mcdonald
+darklajid
+davetapley
+DEvil0000
+d-fence
+dgalpaj
+Dorn-
+drob
+drrtuy
+drybjed
+dschep
+dukex
+ECRR
+elventear
+Ernest0x
+EvanDotPro
+F1rst-Unicorn
+Fale
+faruqisan
+feikesteenbergen
+felixfontein
+fessmage
+fix
+frittentheke
+gearoidibm
+geekq
+ghost
+Glandos
+gordonbondon
+gotmax23
+grasum
+gsauthof
+gsauthor
+gundalow
+Habbie
+herrewig
+hezbucho
+hunleyd
+IgorOhrimenko
+ilicmilan
+indreek
+inertialbit
+iragsdale
+Iridescens
+jacekjaros
+jamescassell
+jamesRUS52
+jborean93
+jbscalia
+jchancojr
+jd-boyd
+jegj
+jensdepuydt
+jerri
+Jhiliano
+jinnko
+jkman340
+jmcginn13
+jmighion
+jnv
+joaocc
+jochu
+johnjelinek
+joshmoore
+jzielke84
+k3rni
+keitalbame
+keithf4
+klando
+kostiantyn-nemchenko
+kustodian
+landryb
+le9i0nx
+legrostdg
+leroyguillaume
+lichensky
+loop-evgeny
+lorin
+LostInTheWoods
+MaayanMordehai
+maletin
+marcflausino
+marcosdiez
+markwort
+matburt
+matonb
+mator
+mattclay
+mattupstate
+maxamillion
+mguillaume
+michael-dev2rights
+MichaelDBA
+mjrepo2
+mkrizek
+mnietz
+mohangk
+monkz
+mribeiro
+mspanc
+mullaiarasu
+nbw74
+nergdron
+nerzhul
+nh2
+nodiscc
+nskalis
+ojc97
+pbaisla
+perezjasonr
+PeteDevoy
+phemmer
+pierot
+Piknik1990
+pilou-
+placaze
+pmauduit
+raneq
+raymondroelands
+replaced
+rgl
+rightaway
+rmfitzpatrick
+rosowiecki
+rouge8
+rtsisyk
+russoz
+sahapasci
+saito-hideki
+samccann
+samdoran
+SantiRaposo
+saxus
+sbulage
+ScottSturdivant
+seanknox
+sebasmannem
+set-db-id
+sfilipov
+SHUFIL
+silvio
+skilyazhnev
+snopoke
+strk
+tartansandal
+Tas-sos
+tcraxs
+tedder
+tiggi
+till
+tinproject
+TJEvans
+tom-clx
+tomscytale
+Trikke76
+truki
+tYYGH
+Vanav
+veger
+vfoucault
+vmalloc
+vosmax
+willthames
+wrosario
+wvidana
+yteraoka
+zikalino
+zswanson
+zyitingftnt
diff --git a/ansible_collections/community/postgresql/COPYING b/ansible_collections/community/postgresql/COPYING
new file mode 100644
index 000000000..f288702d2
--- /dev/null
+++ b/ansible_collections/community/postgresql/COPYING
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/community/postgresql/FILES.json b/ansible_collections/community/postgresql/FILES.json
new file mode 100644
index 000000000..59d622bc0
--- /dev/null
+++ b/ansible_collections/community/postgresql/FILES.json
@@ -0,0 +1,2707 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/aggregate-coverage.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "820353ffde6fd3ad655118772547549d84ccf0a7ba951e8fb1325f912ef640a0",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/combine-coverage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e34d4e863a65b9f53c4ca8ae37655858969898a949e050e9cb3cb0d5f02342d0",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/process-results.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c03d7273fe58882a439b6723e92ab89f1e127772b5ce35aa67c546dd62659741",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/publish-codecov.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "70c795c8dbca2534b7909b17911630b7afaa693bbd7154e63a51340bc8b28dad",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/report-coverage.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6a373322759ccc2736fb25d25d8c402dfe16b5d9a57cfccb1ca8cb136e09663",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/run-tests.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb08a3ec5715b00d476ae6d63ca22e11a9ad8887239439937d2a7ea342e5a623",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/time-command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0232f415efeb583ddff907c058986963b775441eaf129d7162aee0acb0d36834",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates/coverage.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "daf1930264760d47b54588f05c6339fd69ca2d239c77c44bc4cee3c4e9f76447",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates/matrix.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4fb0d3ffb2125d5806c7597e4f9d4b2af69cf8c337e9d57803081eddd4a6b081",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2cfa1271f94c71f05ffa0b1f763d8946394b5636e14579cda8ee14bb38bbcf1c",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61f20decd3c8fb34ac2cc6ff79f598fc5136e642130a7ba065ccc5aa37960cd2",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/azure-pipelines.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41d298e610516fda313e5bfa8de7bbd1be63ccb302c42df4f21a1dc025a0d0ac",
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/CODEOWNERS",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a8057bf580df20c1254537a0a8715ba4fb830d359a914257542705460163f8d2",
+ "format": 1
+ },
+ {
+ "name": ".github/patchback.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f28653c2f8d2965a55f76092049c4205a9c7f828e4edbd1cd089f7dd2685f93a",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3e11aa29315386df71622ac86144d7ede576f54dadc87a179d96916fb844f32",
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc4c2808161bd591e443ffd315931ec82d014ee09698caf078ca953e8689a8ce",
+ "format": 1
+ },
+ {
+ "name": "docs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/links.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c71502b898e0666305c4259147b54a2d5232d68ccd9cf56b3b51066f9f44b078",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b42a308d6db3b0062a9cb8490d522b2369a1b2f502103f2a1188ecad45078f44",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/postgres.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5e6d7613c0b367a3cb1f0094a0ae8ecbb72f7dbbd96dadc5bf59c7e385f2fc9",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_version.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae6c984a7e9dd51753ea7fcb5995d5655016dd5dc187cd9be7216ef7045f220b",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/database.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae0bbf1af2bec24b4393e731ad6688e452c9ddaef4bf37925f24d935aa3ce3a7",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/postgres.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdb9a1aa846495b8dfe3dc8a908ad8208463dab66dd10351f0c3b76114ff18af",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/saslprep.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c55980dbc037b35033ead955787ca7660e52d2502b09701394940d7c27e63590",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/version.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "475a5d990c7314a0ddd22f024b7eaefd2a0f04cbf5dc1543b79b7a3fc7920f4f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_copy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a18b00eb5e4c3b6602379c605f8529a434451bcaf7aa238bddafeacd0463c362",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_db.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c409cdb6d6dacceda3848e6172d607fcaa7f04395fdc40f9198df9ed5be2f30",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_ext.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e22f3438fd9f169b8762a8cba2113a824cdd4239bcf9bf6a82fcf0a73cb568c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_idx.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7527a54f1aff01168c0687fca7554d4840a24b431b62c75adf21625ee76d307c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9220a9def51d2549f791b20925ade84bac4b1f6b11d8e9ea3301abbeabd45e6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_lang.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc59aa40e8ce754b1c36793037f1db5f40bdcd240a1a79db03d17f2a532c53f7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_membership.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3aba16398d37253ebe13585766b4296a63597fb4d5f6f6c4107f305e848d778",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_owner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de4c4d00203e534c8153a9b89100c52e4fffe53716534bcc6381370960df6bea",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_pg_hba.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb9946b5dbf34c28c4df8a7560e360a7d0deb52e8219d334bbaee20b5958f60",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_ping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5214f3a6fade4670db83abfca1d53cc2e096f8f1c6f9f130c2cead028f526f7d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_privs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09a8b6d073b82e735cfb171bc94259cfffc83be36fdf9d21c9bed8d061d87625",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_publication.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c99476190a90b080425dbc4d51aacca6a9596ff83eb106cef3b65ebbbaa027f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_query.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f1b18a99606eb8f1a6b429902a285b469cc587aba98054b8802e4fd80bbd519",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_schema.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8087b11aac6f47620807bd31d7b88fa6e1370dcb177beb31d14f1d1b9b239c34",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_script.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b7c75cfc2254d91fdf1949ef37548fd3b053cf431ea80d26f557d11b36331f9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_sequence.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a37fa35888dab62628d7c95bd0a6eb2f8fa0eeb8e45df63dfd1a757d58016929",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_set.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52d343f5f3d581072060b4ed6a76f685f75d551f0dc71e5ca06b3b66b5b586fe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_slot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aefaa8ec911503aced0a01eed6d0af599a86107e679e427ed0a92a64746275f7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_subscription.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f27e097da06f819773eb75fa2ce05ad1300882ceb082914354082a2480aa9554",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_table.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cddbf54618e293cfed69350a05ca311f38a644d45e8e0f5be83a2ceffe1a8a72",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_tablespace.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "526804aa5f0d4a2aa07853ae4cfc6b728e547b71598f996da94b68e7e9dcfcb4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f50548605c180c66f01364dc59dd6ea3f7fbb01028b8a4bb7193b6a9c78f1582",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_user_obj_stat_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1cefc5b44bbd893bad22aa2952699a94aded1e3aba28092a4bad8a1949252c37",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_copy",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_copy/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_copy/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_copy/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_copy/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb8afe7f5d60c2d26691849facab97a0cabdf2113dff7963c9dcf8ea399d0354",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_copy/tasks/postgresql_copy_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "227bcfffff0e41b75d40da9b35af9bb682269ce1c7908fe9a369ffd7355e00e9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_copy/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11d903913550d93aaffcda9458d70349ce6e703cf071e922c8124dc24b7f9cdb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d950960129fed71c1e93223b6236cd693c66d8bccba1154f62201f1db287c17",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/tasks/manage_database.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b45b2984ce7cfca91b43b2ce102b27ee809305671b8a9f907477db459b768818",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "061558b412eb0734d680cc4498c45dacb89222065c292631fe4f9351d9f74eca",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b8632b1b5caaadee859948072f7083858b39c5399278b7df135daa2bc3b13bd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/tasks/postgresql_db_session_role.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e0c9de1f687503901a185e8f3edd0f037d2faac2b3f6b497a4c90776bcc5f48",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/tasks/state_dump_restore.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1563c9f75c359b901c083a669015054dd95511cdd62b1b26ea1ac7b4f6ba9082",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/tasks/state_rename.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5da2a2655e2085e5af640657d7a914f911d4f093c6bbbe432ac3ffcfc7f2bf4e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3dbb7181be6f88eb1887cc28be153f197c76ccdb0512af52f311816658b1e9e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0825fa293c525220ac6e3f52f1fbd7464a1ea91281eda9fb3dc3d614ba77f815",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "62bf42b117b74dcc39491f0501575a090971dd200281a3d7b1323601f8429365",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69406a13751afa1a236adfa8a4f6d029018ee42986ab25676885bb54a0d5adc5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "15f44b7169d69b892328252ba599949389ca63c427cd4302c6ef399bb4036b98",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/tasks/postgresql_ext_session_role.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38cc648421d276f756eb7c8c1d3c13338b265bde8f0555f37c8193edbe749efc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cfd80e8736e13290c4dc2c4e192feb9709044d3d4a627c487e242a751c046143",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d1d066196bc922c2a80e67260f93a69bd805233b4f4b60baed742ef0e68e8e2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_idx",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_idx/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_idx/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_idx/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_idx/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be09f7acd3c03ff517ade5601bc6a55aafe3c0d8e60fc37133f1e24d69935f8c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_idx/tasks/postgresql_idx_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f464ed52348054f1db0bb18f4bfa94ac0814195044eaa4daf5415b549b8e2e55",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_idx/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3506fe6d008386fb0503c072d97973117bb79ad293ad91e000a9c3ce4c3ba7d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab541e45bbbeb211496e76434bd09715e9a541449c267f449d4625b044465286",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41aee9caccefdd5eec4b8fd1eeaea84532cd4402095e8742d0e866cc8139b5b4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fbd44018a0af971de6081938ab2d4da7c321776747fd32370bc649077bdde035",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/tasks/setup_publication.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14dc41160c4a299c14764e61709c05ed30d1f600261d6ec8aeb9f10b5ef8172f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fcd58a1a5188a145948f0d2cd67f7d46900d949fcb7f3b28e7731b0a062df66e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b75ee9a6bf02c5ff2f9e629a35e588e5297d1bca6463f5fc69a06aa27735d96f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "948ea39535bf70b471e77c1cbcd13c6a4b7d5e6e4bfa6e2437622c3ba8e16f29",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4584a76cf4cf30ac1358827b4cbe45dfcb5b72bcb135e2ee54ea51bd171ad06",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/vars/CentOS-7.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a2395b707eb60229acffb97605104820a7f00a23d83d63d90e353929e97fb4e9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/vars/CentOS-8.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5b45ee4b79b491c7c057d1c4c940df1ef7fa8e7fa6e1d006cbb1f839eeca40d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b96d9358354b72ca27b4079b748fc140086aa57b553209aa83887e15ee219420",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5aca162654e8110b7883ff6ce7517273d7b49688a05d8331b0d39f5321b4d8a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/tasks/postgresql_membership_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2c694201bc582cf356b88cf232784b18ad024e63b7aa321c8ffda153914d440",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5c1aa84798ed23a23135fbf53a881b42e2c1edc79f8edc7509829125fefa1a05",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be8c3305d4986b588578932b0b29cf10d6c85909adf2a31afeaa8fa4c9d9d946",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dec6d60389b250e99e4e921f63194be772a2f7ca18db43cb560fb1af40336a9f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fa3d3f8e3ee368e1eecc94e7f4dcd115d8f94bc74fc00786722f0ddee3da5885",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5db0027e05a16df3c5037e7cb43f5c375a0a2ab9e1a80c505e87a53d3681243e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_bulk_rules.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34cbf3cfa8a38e8b1d90cbe58b1b9daf6282389f92eb2cc0f9d6429337baae09",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e101fbb7da23884928d57b0d1db0f05e3161eea29e04aa93a972703fac28ddf9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee4c4c179b3e7fdde23d8cf3a4855188e229f2a4ba2828bb3dd8b0a6a1365aea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "115c090f1029bac19af8970417d5e50b212a935c55a2d23059175fdb308f3b92",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2b1244d57e5b287a6ae73864218c3ed352db0d947ad6bfd4a68a5691a557ebc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a159d14c43bbb47db5d8ff342f41eaef05d84b54cebb4d47c790f809d463358c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52d9ea758f3abea032d42028fe55c7373ac250cc5285a23e22f49229a14831f9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/tasks/pg_authid_not_readable.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "25f712deb1fb6c9d6dbf171a66dc37fbbb9cbf74cdec4de062d0ac940617f256",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96211bd4e4c063d0ff264f11759bf60bf6a7d788eac99829a6c36788e683e06c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f1f4ba86290047039bb1562f7bd2171508f5103c06513132c22fdaa833461e0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/tasks/postgresql_privs_session_role.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d6cda241469eebab9d252f4f14d4535d5bd8fd8733c7a7451c4ecf2dc5361db9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/tasks/test_target_role.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c9e749fe3d9b2172ca69efe4bf6072d0fcaebd5b8d291684471af479e0502a5d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_publication",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_publication/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_publication/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_publication/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_publication/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "663e2ade4e3c5f2b5e2c75da96b8f596196a21fe8bb4ce5fcecb63e2f664d0b7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22d46a9a422663a28385e66709700895dceb8c31f6ff5b621c1d055f54e83703",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_publication/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d1d066196bc922c2a80e67260f93a69bd805233b4f4b60baed742ef0e68e8e2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/files/test0.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2dc19f190b86228709bced10068509a40c6141f8347a3c0cce52b3f787e1876",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/files/test1.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8de29e382b1af32c9352400d107e16d232405cc50c126ae7f99a5a0879f34320",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a77f5f53953beb0037561ef70130651715fe677f0428a3f56e9facc00dcc5eb5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f63e7d48deecded04b2273fddd1c4199c315ab88b00447c94ea7064c0369cd5c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db462dadd7106222bf4e2f1beab06e522172bd5ba2502cb702aaead6a06ac716",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "01c1f7f0d634cabfe23bdbaac0559d84c25265bafeebf3ffba6175a6e90f837a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5be03157af9f62f7caa17e5ad3dc289cf7b59289fa700cc0d9b930d3cbbdc1d5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/tasks/postgresql_schema_session_role.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49cb0f6fa9c65df21299de881cbca9ce1d3ac306ef3948b29364a95e91707ed5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95690021ab6d3c369fb257d78ca76858121de3c2c4c82a3a1dfb8bc496e21d52",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/files/test0.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3c59720ac1314a814f51dced181f9d624d11b0087f095cf75dc97a90b6cd777",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/files/test1.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8de29e382b1af32c9352400d107e16d232405cc50c126ae7f99a5a0879f34320",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/files/test10.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb9bddc60d75facc146d9bff4019fe67ae27b01bc9a94b5707d317a006e1ce68",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/files/test11.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef27458f43bb4a04ed4ecad679886bdd10742ea734dad2317b0e09868ac34dc9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/files/test12.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13f59ea335d9268a21f0c86f7ea06d0715414b2e880b34e913b240efa80f2419",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/files/test2.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d032b5b14771abd0c6daff370337dbbf6c3ea7e40539b64ec43b85403100d17",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/files/test3.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee743c2fb1ba25bebb863125e8020ef3f73eca659a0db2f4fc19f9cd23313b84",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/files/test4.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "138ce2e680197629658ce320a4cbda8c6f1a3e9b29a73eb54593620e798c05d1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/files/test5.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42ac6db917b330f10abf3614bc9789be5644c862fd53db785d20d4133201f740",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/files/test6.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3447c9af2c7529d6a0e328fb91a7ede629693b77d9edc0ad1ac89bf64620e785",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/files/test7.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac2080f46c70950b11763ce82d491d3f26390db7933e71088f40653d96cf8b55",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/files/test8.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9d11fa7778ec4785bc87c801e80326505ecfcdd01bf0154e5f2c78e6b7949da",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/files/test9.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9c257ab7645fc5e2236403fbba58f5774e3b2b0fcd01b6d2f42d8d422461d3f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65301419dea70131e2c9989083f5ccccb2d19ae5046ca2aa725a25ab0dec0050",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/tasks/postgresql_script_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c46121b0a3e766cfe9e5084020153a73e15f0f9c88d513c651b607b792068411",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_script/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d437a0ee3b271c8934e68d14684931a56a398d633c6b549e74ee3a9f17f9bc46",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e2b8a8ad0b65cf738ea7fdcb6b18f528a6bcfffa1981f91838fc284ac4ddb6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/tasks/postgresql_sequence_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30974061626e679d175d757c5a5cfbf33fb53e53bb97e68539b332a0fe4904aa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "955f64aa9a8675f80d702ab39436a2b236d1adb7b9c89a4dfe313fe1e0c51594",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set/tasks/options_coverage.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f312eb39567039be39982c9ce88aef490b1935622bcb05b056d3ba49b6f4f5e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set/tasks/postgresql_set_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e249199ac6585a92d65893cf7770cce14009edd32c452114d585b9f09a55a91",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_slot",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_slot/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_slot/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_slot/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_slot/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5419b276f7c47acaf995267ce3acbb56a0922a4f715db0df27cf0e535cd0220d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_slot/tasks/postgresql_slot_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c472534156211fe22c58e9224346d4a839951318551f0d0b739a904c78369a7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_slot/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8104db292a951ca57fe3a0321e894ff18aad3e88d58981e3db7936f5f1d1bb34",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab541e45bbbeb211496e76434bd09715e9a541449c267f449d4625b044465286",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eca673fddaf151a108873b2567fc9e40fb19ec24f6559355a602e983fdcf5495",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6dbb3ba2ca853ffd06bd6883fa5d23e0517237d5a27edc38bc8300fb6b33dc2f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/tasks/setup_publication.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "918cdbb3ae36aa7d4dfb2d40bd80603c89b564a84b27208d17d97908b0ed6b40",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fcd58a1a5188a145948f0d2cd67f7d46900d949fcb7f3b28e7731b0a062df66e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_table",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_table/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_table/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_table/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_table/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "472e36bde52e14d2f401739cdbcaf09c945bafaec718ff016890616b6bc3c118",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_table/tasks/postgresql_table_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a15fff8baf923b80ab404995350a79aab6ed8485909e461c06b00bc242071963",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_table/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "01f5b747e4b37d8d14ee65bf72bc036da73766464ae02af174f5b6d41394aa87",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "589d9dd6e690018d918be7f002c310d40aa4632f100d32008dbd7db685d47ecb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "188470559381756e939df9d964d444ff47e7df613c77b6cc2653627bab65df69",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aff3b89f4673ee60e03ac6e27421682dc328a04fcc5fabc1f477c13a27d89db5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b14eed97d15235e2056971743de23709e18c00b91c358fe1e79002c98ece9d60",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0472c601119258ae89952a06b702c47ec3832b55fccc952ec8f1548a006d0f37",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7481301d5bc8dd240f78002e63d47b3e79ed995a6632918ae15d4b31b84650e6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/tasks/postgresql_user_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87c87a59f114652085d8a03cc13a6c0d7ed08568a29e2d971f7019dd8e6c72ab",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb25d012b3d69ac106313904d1e373e47d728f2c819c9c7e875012c1313ace42",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/tasks/test_password.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39e99bd0fae186faf4dc75d509281377a767d2e40418091d6831ffbf33248e74",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd425d7bbca57edbcc6883e032b1d00ecdc9b2cc1d75b0be4fda3e488fed7053",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72441f6b0cc63e71b547b18314bc8b45bb8a792ba466d75c8a4b2c63c7bdecc2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dacef0aa7902a944e61a6bfb5bd1d4f13a065e63a18d271bfd1099e78df2637f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fcd58a1a5188a145948f0d2cd67f7d46900d949fcb7f3b28e7731b0a062df66e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_pkg_mgr",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_pkg_mgr/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_pkg_mgr/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6b87087025315be7a9fc906605f75fb4eae352d9171caddd5819c1edb406476",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "254d5cd2cd0525f306778e08c6d9b445e21558fc4f7ecfb20fc86df61969e8da",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--0.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "17870abf494f66e301533b74bf9365202240597248988bc69fa72756e92c63c3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4e7839720cd873fbfbe855a61c55f4d69bf5154c420a5a776daccba0db0326e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be49da51a69d0f8af9ad8bfd120189b95aa9feb2ea00be9e2f6e06af3a5c754b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--3-1.0-1.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5516cb6238870997d822d802686f9eac0956ad53bd4e5b3959d6b2e5fcb36eef",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--3-1.0.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c0d4df7c2947d5ac3a178e163609a85d34060b58b5e8e31a801cf8ab92387d1f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--3-1.foo.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f51f61a2dc69d5919cee8eda723140b2d38f37035c30557e10db51ce082f9463",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--3.0-1.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3a5dc4725ad7827ff524fbdc4db3b242206fba361c457add86e644b35e50d20",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--3.0-foo.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c2f09148859e64cfcc42e0a3e2445c8517926f57ad76cd64867a1988f4242761",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a16cb164d32705033b9e7a7c4e9b8050de79c561deddbcc8603e8d0d59cb563e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--3.beta.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "723e906514a4bdc5647866b0c37abd08b668894ef31801a6c27c57fcfd553523",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--4.0.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a49f62cb78167ef96fe33e7c642db2efe8f24839d09a016d0b000155c0181b9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--v4.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74a05cf1c260e40504cbde4dfa176159ae2af69e6a1c1f59754368624d660062",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy.control",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6cd70d95050891a3652aa70d210742617655e5cd09cba7bf8abbe943b392dc3a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/pg_hba.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1d8fd0e6645d939cf0fc5a67738039e036f06c540efeb8a18bf9fed779ddb40",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e061afedc2f4520674a2a3136e9c15cfca871eaef9a1d4a2beed9609352be08",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/tasks/ssl.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb482e7ee6912b74be31f9fe6b254a87d1717f9d40ae8823e2913331b9586ad7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea1cab96532e023ca4622a31488dd6226c60eb755817868f874c083f8e991eb8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "150c49cf5d8c40a44f33a543d4fb288a952517bbebd367ca223f068e3417c5e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5252a9c72186f877dcc99a3b66a053474f180248404a39f55da78b1546c95ee2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "94dc4796606b2bcdca95aba5eefe79a1b4e36cb5c600b15badbc1673340a5ecd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/default-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3a88e7f3bdc87e6e9f0a5d02df8672112dba29116fc4ce278eecabc2906f786d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "577e120caebb25a1c1fcafe4991d83e838ca9ef1b18301e2a66e4565057af8e9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "15c12558d8848d3b44b4f14324e6b02888ed48fa609fee2329180b92e59d3fe1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a6de4eb2d6d7b2702fe7d970e239c4f4a4f8f31643a18628f9363e63bce4cb6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "332fef4bb8a02f7be01b7dea5632ce4608b2aabe87c40464048aa3e172804108",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "926f8633822359ea7a106ede40c8913368ed8420c70abccf9ddf623aab6b057e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b3e39d80a8a59947b5fba38e8db942a1933ffefcef368cd13a5594fc2f65668",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/templates/primary_postgresql.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e40eae546ddaeff2de7356ece590b70001f976feb6c07b0c83a9fd1b86ea23d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a70a2f2e5beaa52cefcc93461ca067ef2b665b859a468cbe74d27464253bc6e",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/no-unwanted-files.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3d3b17f699b042958c7cd845a9d685bc935d83062e0bcf077f2c7200e2c0bac",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/no-unwanted-files.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1468e7b22ba353d18fcf2f5b18607873f792de629f887798f081eb6e2cd54fc",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.12.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a7c2f4de6f288675dfebc1f6fbb808728c3ef1bec1a29fe2adb80199372621f",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.13.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a7c2f4de6f288675dfebc1f6fbb808728c3ef1bec1a29fe2adb80199372621f",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.14.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a7c2f4de6f288675dfebc1f6fbb808728c3ef1bec1a29fe2adb80199372621f",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.15.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "788d3f00aec392d2c4740329b80911a6b2621e975148d07c2cb9c53d3f736783",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.16.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "788d3f00aec392d2c4740329b80911a6b2621e975148d07c2cb9c53d3f736783",
+ "format": 1
+ },
+ {
+ "name": "tests/unit",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_postgres.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5c08f2ecc41169ebd451837abd543a5102ece6befcff0e70f9ad06acd4d6ee5c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_saslprep.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5e044a935e091aaf52115d3f3238bcd32b3627f9cebac26bd1d6d52aa339953",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_postgresql_set.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a9478ec6a1548cb1ddf26fea59ef25dea5755340634495d9128f55c22aafce3",
+ "format": 1
+ },
+ {
+ "name": "tests/utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/aix.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/freebsd.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/rhel.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/check_matrix.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "163dc2e4b0fb28faa6a03c02c7a5b2e470ca156e119943bf1d8bbf5efff02c18",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/linux.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07aa5e07a0b732a671bf9fdadfe073dd310b81857b897328ce2fa829e2c76315",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/remote.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/sanity.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a01d92ca36ea457c0e7032ece03a0b485377eef8c8598d8f7c04a185fba279ed",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/shippable.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d1ea59d27bbe21954ece52642108b4fd10d3f526d8efa25de875b61cdea180a3",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/timing.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ebb7d3553349747ad41d80899ed353e13cf32fcbecbb6566cf36e9d2bc33703e",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/timing.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3f3cc03a997cdba719b0542fe668fc612451841cbe840ab36865f30aa54a1bd",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/units.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a712977b416e5b93325b40d0bf855e4817e597076552f79777aeca8d2fa192bd",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/constraints.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5beb3383ef5546038c2c00ea8e5f438e607d91828ce2259594fd8fbaea003ec9",
+ "format": 1
+ },
+ {
+ "name": "tests/requirements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "309eab525a905a19f04aa713a60a344b64f115cbdece10d237ddd35f5fc76311",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2be3a33e5081dc3262b52ce0600fac4942cac650e3ccde5ba1f4aaeee59077a9",
+ "format": 1
+ },
+ {
+ "name": "CONTRIBUTING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0c50cf3715d59964a341dc651a6f626322209ef9fa8c0d03047d3a2b2e420a4",
+ "format": 1
+ },
+ {
+ "name": "CONTRIBUTORS",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f43fe39043d7329c341144c785599aa3dd3d262ae6876ef257c7a49547151ae4",
+ "format": 1
+ },
+ {
+ "name": "COPYING",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "format": 1
+ },
+ {
+ "name": "MAINTAINERS",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1fe7a1f63961de06fdcde8ecf4cc744daa41b9dba2e010a578f3e356fa9e37b3",
+ "format": 1
+ },
+ {
+ "name": "MAINTAINING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2435665a6562d5f3841fff1631970f95f0466c498e949d2b8579ccc2a0b810ad",
+ "format": 1
+ },
+ {
+ "name": "PSF-license.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83b042fc7d6aca0f10d68e45efa56b9bc0a1496608e7e7728fe09d1a534a054a",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7ef341ae1187f4ad2b6b2505899075ab5dbe06ebd138058055c59f1ef1ebffc",
+ "format": 1
+ },
+ {
+ "name": "requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff1b863d960920e09f3c3e4b8dffcb581359b71498fe620e2e3cc0393ad64550",
+ "format": 1
+ },
+ {
+ "name": "shippable.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0554a7502c0a59a09f9ae26e8f64a44192bd56b40088cfb5fc3dbb93db74554c",
+ "format": 1
+ },
+ {
+ "name": "simplified_bsd.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6036f79d054f42e11f2dd52458b4d2282e901d197955e598bf1a23600280cf0",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/community/postgresql/MAINTAINERS b/ansible_collections/community/postgresql/MAINTAINERS
new file mode 100644
index 000000000..ac6d7b3fa
--- /dev/null
+++ b/ansible_collections/community/postgresql/MAINTAINERS
@@ -0,0 +1,6 @@
+aleksvagachev (aleks_vagachev on Matrix)
+Andersson007 (andersson007_ on Matrix)
+betanummeric
+hunleyd
+jchancojr
+tcraxs
diff --git a/ansible_collections/community/postgresql/MAINTAINING.md b/ansible_collections/community/postgresql/MAINTAINING.md
new file mode 100644
index 000000000..9fad0d343
--- /dev/null
+++ b/ansible_collections/community/postgresql/MAINTAINING.md
@@ -0,0 +1,3 @@
+# Maintaining this collection
+
+Refer to the [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst).
diff --git a/ansible_collections/community/postgresql/MANIFEST.json b/ansible_collections/community/postgresql/MANIFEST.json
new file mode 100644
index 000000000..98c8a04e6
--- /dev/null
+++ b/ansible_collections/community/postgresql/MANIFEST.json
@@ -0,0 +1,32 @@
+{
+ "collection_info": {
+ "namespace": "community",
+ "name": "postgresql",
+ "version": "2.4.2",
+ "authors": [
+ "Ansible PostgreSQL community"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "database",
+ "postgres",
+ "postgresql"
+ ],
+ "description": null,
+ "license": [],
+ "license_file": "COPYING",
+ "dependencies": {},
+ "repository": "https://github.com/ansible-collections/community.postgresql",
+ "documentation": "https://docs.ansible.com/ansible/latest/collections/community/postgresql",
+ "homepage": "https://github.com/ansible-collections/community.postgresql",
+ "issues": "https://github.com/ansible-collections/community.postgresql/issues"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d7e71b7ad0987031481832f772a46ae4e27dbb433409e374dca4668c74260ebd",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/community/postgresql/PSF-license.txt b/ansible_collections/community/postgresql/PSF-license.txt
new file mode 100644
index 000000000..35acd7fb5
--- /dev/null
+++ b/ansible_collections/community/postgresql/PSF-license.txt
@@ -0,0 +1,48 @@
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Python Software Foundation;
+All Rights Reserved" are retained in Python alone or in any derivative version
+prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee. This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
diff --git a/ansible_collections/community/postgresql/README.md b/ansible_collections/community/postgresql/README.md
new file mode 100644
index 000000000..076239137
--- /dev/null
+++ b/ansible_collections/community/postgresql/README.md
@@ -0,0 +1,170 @@
+# PostgreSQL collection for Ansible
+| | | | |
+|--|--|--|--|
+|[![Build Status](https://dev.azure.com/ansible/community.postgres/_apis/build/status/CI?branchName=main)](https://dev.azure.com/ansible/community.postgres/_build?definitionId=28)|[![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.postgresql)](https://codecov.io/gh/ansible-collections/community.postgresql)| |[![Discuss on Matrix at #postgresql:ansible.com](https://img.shields.io/matrix/postgresql:ansible.com.svg?server_fqdn=ansible-accounts.ems.host&label=Discuss%20on%20Matrix%20at%20%23postgresql:ansible.com&logo=matrix)](https://matrix.to/#/#postgresql:ansible.com)|
+
+This collection is a part of the Ansible package.
+
+## Code of Conduct
+
+We follow the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our interactions within this project.
+
+If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint.
+
+## Contributing to this collection
+
+The content of this collection is made by [people](https://github.com/ansible-collections/community.postgresql/blob/main/CONTRIBUTORS) just like you, a community of individuals collaborating on making the world better through developing automation software.
+
+We are actively accepting new contributors.
+
+All types of contributions are very welcome.
+
+You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.postgresql/blob/main/CONTRIBUTING.md)!
+
+We use the following guidelines:
+
+* [CONTRIBUTING.md](https://github.com/ansible-collections/community.postgresql/blob/main/CONTRIBUTING.md)
+* [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html)
+* [Ansible Development Guide](https://docs.ansible.com/ansible/devel/dev_guide/index.html)
+* [Ansible Collection Development Guide](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections)
+
+## Collection maintenance
+
+The current maintainers (contributors with `write` or higher access) are listed in the [MAINTAINERS](https://github.com/ansible-collections/community.postgresql/blob/main/MAINTAINERS) file. If you have questions or need help, feel free to mention them in the proposals.
+
+To learn how to maintain / become a maintainer of this collection, refer to the [Maintainer guidelines](https://github.com/ansible-collections/community.postgresql/blob/main/MAINTAINING.md).
+
+It is necessary for maintainers of this collection to be subscribed to:
+
+* The collection itself (the `Watch` button -> `All Activity` in the upper right corner of the repository's homepage).
+* The "Changes Impacting Collection Contributors and Maintainers" [issue](https://github.com/ansible-collections/overview/issues/45).
+
+They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn).
+
+## Communication
+
+We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn). If you are a collection developer, be sure you are subscribed.
+
+Join us on Matrix in:
+
+* `#postgresql:ansible.com` [room](https://matrix.to/#/#postgresql:ansible.com): questions on how to contribute and use this collection.
+* `#users:ansible.com` [room](https://matrix.to/#/#users:ansible.com): general use questions and support.
+* `#ansible-community:ansible.com` [room](https://matrix.to/#/#community:ansible.com): community and collection development questions.
+* other Matrix rooms or corresponding bridged Libera.Chat channels. See the [Ansible Communication Guide](https://docs.ansible.com/ansible/devel/community/communication.html) for details.
+
+We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn) and join us.
+
+For more information about communication, refer to the [Ansible Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
+
+## Governance
+
+We, [the PostgreSQL working group](https://github.com/ansible-collections/community.postgresql/wiki/PostgreSQL-Working-Group), use [the community pinboard](https://github.com/ansible-collections/community.postgresql/issues/30) for general announcements and discussions.
+
+The process of decision making in this collection is based on discussing and finding consensus among participants.
+
+Every voice is important and every idea is valuable. If you have something on your mind, create an issue or dedicated discussion and let's discuss it!
+
+## External requirements
+
+The PostgreSQL modules rely on the [Psycopg2](https://www.psycopg.org/docs/) PostgreSQL database adapter.
+
+## Tested with ansible-core
+
+Tested with the following `ansible-core` releases:
+- 2.12
+- 2.13
+- 2.14
+- current development version
+
+Ansible-core versions before 2.12.0 are not supported.
+Our AZP CI includes testing with the following docker images / PostgreSQL versions:
+
+- CentOS 7: 9.2
+- RHEL 8.3 / 8.4: 10
+- Fedora 34: 13
+- Ubuntu 20.04: 14
+
+## Included content
+
+- **Info modules**:
+ - [postgresql_info](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_info_module.html)
+ - [postgresql_ping](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_ping_module.html)
+ - [postgresql_user_obj_stat_info](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_user_obj_stat_info_module.html)
+
+- **Basic modules**:
+ - [postgresql_db](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_db_module.html)
+ - [postgresql_ext](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_ext_module.html)
+ - [postgresql_lang](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_lang_module.html)
+ - [postgresql_pg_hba](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_pg_hba_module.html)
+ - [postgresql_privs](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_privs_module.html)
+ - [postgresql_set](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_set_module.html)
+ - [postgresql_schema](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_schema_module.html)
+ - [postgresql_tablespace](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_tablespace_module.html)
+ - [postgresql_query](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_query_module.html)
+ - [postgresql_user](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_user_module.html)
+
+- **Other modules**:
+ - [postgresql_copy](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_copy_module.html)
+ - [postgresql_idx](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_idx_module.html)
+ - [postgresql_membership](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_membership_module.html)
+ - [postgresql_owner](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_owner_module.html)
+ - [postgresql_publication](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_publication_module.html)
+ - [postgresql_sequence](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_sequence_module.html)
+ - [postgresql_slot](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_slot_module.html)
+ - [postgresql_subscription](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_subscription_module.html)
+ - [postgresql_table](https://docs.ansible.com/ansible/latest/collections/community/postgresql/postgresql_table_module.html)
+
+## Using this collection
+
+### Installing the Collection from Ansible Galaxy
+
+Before using the PostgreSQL collection, you need to install it with the Ansible Galaxy command-line tool:
+
+```bash
+ansible-galaxy collection install community.postgresql
+```
+
+You can include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml`, using the format:
+
+```yaml
+---
+collections:
+ - name: community.postgresql
+```
+
+You can also download the tarball from [Ansible Galaxy](https://galaxy.ansible.com/community/postgresql) and install the collection manually wherever you need.
+
+Note that if you install the collection from Ansible Galaxy with the command-line tool or tarball, it will not be upgraded automatically when you upgrade the Ansible package. To upgrade the collection to the latest available version, run the following command:
+
+```bash
+ansible-galaxy collection install community.postgresql --upgrade
+```
+
+You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax:
+
+```bash
+ansible-galaxy collection install community.postgresql:==X.Y.Z
+```
+
+See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
+
+## Release notes
+
+See the [changelog](https://github.com/ansible-collections/community.postgresql/blob/main/CHANGELOG.rst).
+
+## Roadmap
+
+See the [release plan](https://github.com/ansible-collections/community.postgresql/issues/13).
+
+## More information
+
+- [Ansible Collection overview](https://github.com/ansible-collections/overview)
+- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html)
+- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html)
+- [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html)
+
+## Licensing
+
+GNU General Public License v3.0 or later.
+
+See [LICENSE](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text.
diff --git a/ansible_collections/community/postgresql/changelogs/changelog.yaml b/ansible_collections/community/postgresql/changelogs/changelog.yaml
new file mode 100644
index 000000000..45e4461b2
--- /dev/null
+++ b/ansible_collections/community/postgresql/changelogs/changelog.yaml
@@ -0,0 +1,531 @@
+ancestor: null
+releases:
+ 0.1.0:
+ changes:
+ bugfixes:
+ - postgresql_ext - fix the module crashes when available ext versions cannot
+ be compared with current version (https://github.com/ansible-collections/community.general/issues/1095).
+ - postgresql_ext - fix version selection when ``version=latest`` (https://github.com/ansible-collections/community.general/pull/1078).
+ - postgresql_privs - fix module fails when ``type`` group and passing ``objs``
+ value containing hyphens (https://github.com/ansible-collections/community.general/issues/1058).
+ minor_changes:
+ - postgresql_info - add ``in_recovery`` return value to show if a service in
+ recovery mode or not (https://github.com/ansible-collections/community.general/issues/1068).
+ - postgresql_privs - add ``procedure`` type support (https://github.com/ansible-collections/community.general/issues/1002).
+ - postgresql_query - add ``query_list`` and ``query_all_results`` return values
+ (https://github.com/ansible-collections/community.general/issues/838).
+ release_summary: 'The ``community.postgresql`` continues the work on the Ansible
+ PostgreSQL
+
+ modules from their state in ``community.general`` 1.2.0.
+
+ The changes listed here are thus relative to the modules ``community.general.postgresql_*``.
+
+ '
+ fragments:
+ - 0.1.0.yml
+ - 1048-postgresql_privs_add_procedure_type.yml
+ - 1059-postgresql_privs_fix_failings_when_using_roles_with_hyphen.yml
+ - 1078-postgresql_ext_fix_version_selection_when_version_is_latest.yml
+ - 1091-postgresql_info_add_in_recovery_ret_val.yml
+ - 1099-postgresql_ext_fix_failing_when_version_cannot_be_compared.yml
+ - 886-postgresql_query_add_ret_vals.yml
+ release_date: '2020-10-29'
+ 1.0.0:
+ changes:
+ release_summary: 'This is the first proper release of the ``community.postgresql``
+ collection which is needed to include the collection in Ansible.
+
+ This changelog does not contain any changes because there are no changes made
+ since release 0.1.0.
+
+ '
+ fragments:
+ - 1.0.0.yml
+ release_date: '2020-11-17'
+ 1.1.0:
+ changes:
+ bugfixes:
+ - postgresql_info - fix crash caused by wrong PgSQL version parsing (https://github.com/ansible-collections/community.postgresql/issues/40).
+ - postgresql_ping - fix crash caused by wrong PgSQL version parsing (https://github.com/ansible-collections/community.postgresql/issues/40).
+ - postgresql_set - return a message instead of traceback when a passed parameter
+ has not been found (https://github.com/ansible-collections/community.postgresql/issues/41).
+ minor_changes:
+ - postgresql_query - add ``as_single_query`` option to execute a script content
+ as a single query to avoid semicolon related errors (https://github.com/ansible-collections/community.postgresql/pull/37).
+ release_summary: 'This is the minor release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 1.0.0.'
+ fragments:
+ - 1.1.0.yml
+ - 37-postgresql_query_add_single_query_opt.yml
+ - 42-postgresql_set_add_message_when_parameter_not_found.yml
+ - 43-modules_fix_version_parsing.yml
+ release_date: '2021-01-18'
+ 1.1.1:
+ changes:
+ bugfixes:
+ - postgresql_query - add a warning to set ``as_single_query`` option explicitly
+ (https://github.com/ansible-collections/community.postgresql/pull/54).
+ - postgresql_query - fix datetime.timedelta type handling (https://github.com/ansible-collections/community.postgresql/issues/47).
+ - postgresql_query - fix decimal handling (https://github.com/ansible-collections/community.postgresql/issues/45).
+ - postgresql_set - fails in check_mode on non-numeric values containing `B`
+ (https://github.com/ansible-collections/community.postgresql/issues/48).
+ release_summary: 'This is the patch release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 1.1.0.'
+ fragments:
+ - 1.1.1.yml
+ - 46-postgresql_query_fix_decimal_handling.yml
+ - 51-postgresql_query_fix_datetime_timedelta_type_handling.yml
+ - 52-postgresql_set_fix_b_values_handling.yml
+ - 54-postgresql_query_add_warning_as_single_query.yml
+ release_date: '2021-02-09'
+ 1.2.0:
+ changes:
+ minor_changes:
+ - postgresql_info - add the ``patch``, ``full``, and ``raw`` values of the ``version``
+ return value (https://github.com/ansible-collections/community.postgresql/pull/68).
+ - postgresql_ping - add the ``patch``, ``full``, and ``raw`` values of the ``server_version``
+ return value (https://github.com/ansible-collections/community.postgresql/pull/70).
+ release_summary: 'This is the minor release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 1.1.1.'
+ fragments:
+ - 1.2.0.yml
+ - 68-postgresql_info_add_ret_values.yml
+ - 70-postgresql_ping_add_ret_values.yml
+ release_date: '2021-03-30'
+ 1.3.0:
+ changes:
+ bugfixes:
+ - postgresql_privs - fix ``fail_on_role`` check (https://github.com/ansible-collections/community.postgresql/pull/82).
+ major_changes:
+ - postgresql_query - the default value of the ``as_single_query`` option will
+ be changed to ``yes`` in community.postgresql 2.0.0 (https://github.com/ansible-collections/community.postgresql/issues/85).
+ release_summary: 'This is the minor release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 1.2.0.'
+ fragments:
+ - 1.3.0.yml
+ - 82-fix-fail-on-role.yml
+ - 87-postgresql_query_add_warn_announcement.yml
+ release_date: '2021-05-03'
+ 1.4.0:
+ changes:
+ minor_changes:
+ - postgresql_db - add support for the ``directory`` format when the ``state``
+ option is ``dump`` or ``restore`` (https://github.com/ansible-collections/community.postgresql/pull/108).
+ - postgresql_db - add the ``rename`` value to the ``state`` option (https://github.com/ansible-collections/community.postgresql/pull/107).
+ release_summary: 'This is the minor release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 1.3.0.'
+ fragments:
+ - 1.4.0.yml
+ - 107-postgresql_db_add_rename_state_value.yml
+ - 108-postgresql_db_directory_format.yml
+ release_date: '2021-07-13'
+ 1.5.0:
+ changes:
+ bugfixes:
+ - postgresql_ext - Fix extension version handling when it has 0 value (https://github.com/ansible-collections/community.postgresql/issues/136).
+ - postgresql_info - Fix extension version handling when it has 0 value (https://github.com/ansible-collections/community.postgresql/issues/137).
+ - postgresql_set - Fix wrong numerical value conversion (https://github.com/ansible-collections/community.postgresql/issues/110).
+ - postgresql_slot - Correct the server_version check for PG 9.6 (https://github.com/ansible-collections/community.postgresql/issue/120)
+ minor_changes:
+ - postgresql_db - Add the ``force`` boolean option to drop active connections
+ first and then remove the database (https://github.com/ansible-collections/community.postgresql/issues/109).
+ - postgresql_info - Add the ``raw`` return value for extension version (https://github.com/ansible-collections/community.postgresql/pull/138).
+ - postgresql_pg_hba - Add the parameters ``keep_comments_at_rules`` and ``comment``
+ (https://github.com/ansible-collections/community.postgresql/issues/134).
+ release_summary: 'This is the minor release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 1.4.0.'
+ fragments:
+ - 1.5.0.yml
+ - 120-postgresql_correct_server_version_check.yml
+ - 134-postgresql_pg_hba-rule-specific-comments.yml
+ - 138-postgresql_info_fix_ver_handling.yml
+ - 145-postgresql_set_fix_wrong_value_conversion.yml
+ - drop_db_with_force.yml
+ release_date: '2021-09-30'
+ 1.6.0:
+ changes:
+ bugfixes:
+ - postgresql_ext - Handle postgresql extension updates through path validation
+ instead of version comparison (https://github.com/ansible-collections/community.postgresql/issues/129).
+ release_summary: 'This is the minor release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 1.5.0.'
+ fragments:
+ - 1.6.0.yml
+ - 163-better_handling_of_postgresql_extensions.yml
+ release_date: '2021-11-24'
+ 1.6.1:
+ changes:
+ bugfixes:
+ - Collection core functions - use vendored version of ``distutils.version``
+ instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/community.postgresql/pull/179).
+ - postgres_info - It now works on AWS RDS Postgres.
+ - postgres_info - Specific info (namespaces, extensions, languages) of each
+ database was not being shown properly. Instead, the info from the DB that
+ was connected was always being shown (https://github.com/ansible-collections/community.postgresql/issues/172).
+ release_summary: 'This is the bugfix release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 1.6.1.'
+ fragments:
+ - 1.6.1.yml
+ - 171-postgres_info__was_not_working_on_aws_rds_postgres.yml
+ - 173-postgres_info_now_shows_the_info_of_each_specific_database.yml
+ - 178-prepare_for_distutils_be_removed.yml
+ release_date: '2022-01-17'
+ 2.0.0:
+ changes:
+ major_changes:
+ - postgresql_query - the default value of the ``as_single_query`` option changes
+ to ``yes``. If the related behavior of your tasks where the module is involved
+ changes, please adjust the parameter's value correspondingly (https://github.com/ansible-collections/community.postgresql/issues/85).
+ release_summary: 'This is the major release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 1.7.0.'
+ fragments:
+ - 185-postgresql_query_as_single_query_true.yml
+ - 2.0.0.yml
+ release_date: '2022-02-10'
+ 2.1.0:
+ changes:
+ major_changes:
+ - postgresql_query - the ``path_to_script`` and ``as_single_query`` options
+ as well as the ``query_list`` and ``query_all_results`` return values have
+ been deprecated and will be removed in ``community.postgresql 3.0.0``. Please
+ use the ``community.postgresql.postgresql_script`` module to execute statements
+ from scripts (https://github.com/ansible-collections/community.postgresql/issues/189).
+ release_summary: 'This is the minor release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 2.0.0.'
+ fragments:
+ - 0_deprecation_announcement.yml
+ - 2.1.0.yml
+ modules:
+ - description: Run PostgreSQL statements from a file
+ name: postgresql_script
+ namespace: ''
+ release_date: '2022-02-17'
+ 2.1.1:
+ changes:
+ bugfixes:
+ - module core functions - get rid of the deprecated psycopg2 connection alias
+ ``database`` in favor of ``dbname`` when psycopg2 is 2.7+ (https://github.com/ansible-collections/community.postgresql/pull/196).
+ - postgresql_query - cannot handle .sql file with \\n at end of file (https://github.com/ansible-collections/community.postgresql/issues/180).
+ release_summary: 'This is the bugfix release of the community.postgresql collection.
+
+ This changelog contains all changes to the modules in this collection that
+ have been added after the release of community.postgresql 2.1.0.'
+ fragments:
+ - 0-postgresql_query_fix.yml
+ - 1-postgresq_connection_fix.yml
+ - 2.1.1.yml
+ release_date: '2022-02-28'
+ 2.1.2:
+ changes:
+ major_changes:
+ - postgresql_privs - the ``usage_on_types`` feature have been deprecated and
+ will be removed in ``community.postgresql 3.0.0``. Please use the ``type``
+ option with the ``type`` value to explicitly grant/revoke privileges on types
+ (https://github.com/ansible-collections/community.postgresql/issues/207).
+ release_summary: This is the patch release of the `community.postgresql` collection.
+ This changelog contains all changes to the modules in this collection that
+ have been added after the release of `community.postgresql` 2.1.1.
+ fragments:
+ - 0_deprecation_announcement.yml
+ - 2.1.2.yml
+ release_date: '2022-03-16'
+ 2.1.3:
+ changes:
+ bugfixes:
+ - postgresql_db - get rid of the deprecated psycopg2 connection alias ``database``
+ in favor of ``dbname`` when psycopg2 is 2.7+ is used (https://github.com/ansible-collections/community.postgresql/issues/194,
+ https://github.com/ansible-collections/community.postgresql/pull/196).
+ major_changes:
+ - postgresql_user - the ``priv`` argument has been deprecated and will be removed
+ in ``community.postgresql 3.0.0``. Please use the ``postgresql_privs`` module
+ to grant/revoke privileges instead (https://github.com/ansible-collections/community.postgresql/issues/212).
+ release_summary: 'This is the minor release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 2.1.2.'
+ fragments:
+ - 0-postgresql_db_deprecated_alias.yml
+ - 0-postgresql_user_depraction_of_privs.yml
+ - 2.1.3.yml
+ release_date: '2022-04-12'
+ 2.1.4:
+ changes:
+ major_changes:
+ - The community.postgresql collection no longer supports ``Ansible 2.9`` and
+ ``ansible-base 2.10``. While we take no active measures to prevent usage and
+ there are no plans to introduce incompatible code to the modules, we will
+ stop testing against ``Ansible 2.9`` and ``ansible-base 2.10``. Both will
+ very soon be End of Life and if you are still using them, you should consider
+ upgrading to the ``latest Ansible / ansible-core 2.11 or later`` as soon as
+ possible (https://github.com/ansible-collections/community.postgresql/pull/245).
+ release_summary: 'This is the minor release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 2.1.3.'
+ fragments:
+ - 2.1.4.yml
+ - remove-2.9-2.10-compatability.yml
+ release_date: '2022-04-28'
+ 2.1.5:
+ changes:
+ bugfixes:
+ - Include ``PSF-license.txt`` file for ``plugins/module_utils/_version.py``.
+ - collection core functions - fix attribute error `nonetype` by always calling
+ `ensure_required_libs` (https://github.com/ansible-collections/community.postgresql/issues/252).
+ release_summary: 'This is the bugfix release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 2.1.4'
+ fragments:
+ - 2.1.5.yml
+ - 252-fix-none-attribute-error.yml
+ - psf-license.yml
+ release_date: '2022-05-16'
+ 2.2.0:
+ changes:
+ bugfixes:
+ - Include ``simplified_bsd.txt`` license file for various module utils.
+ - postgresql_info - fix pg version parsing (https://github.com/ansible-collections/community.postgresql/issues/315).
+ - postgresql_ping - fix pg version parsing (https://github.com/ansible-collections/community.postgresql/issues/315).
+ - postgresql_privs.py - add functionality when the PostgreSQL version is 9.0.0
+ or greater to incorporate ``ALL x IN SCHEMA`` syntax (https://github.com/ansible-collections/community.postgresql/pull/282).
+ Please see the official documentation for details regarding grants (https://www.postgresql.org/docs/9.0/sql-grant.html).
+ - postgresql_subscription - fix idempotence by casting the ``connparams`` dict
+ variable (https://github.com/ansible-collections/community.postgresql/issues/280).
+ - postgresql_user - add ``alter user``-statements in the return value ``queries``
+ (https://github.com/ansible-collections/community.postgresql/issues/307).
+ major_changes:
+ - postgresql_user - the ``groups`` argument has been deprecated and will be
+ removed in ``community.postgresql 3.0.0``. Please use the ``postgresql_membership``
+ module to specify group/role memberships instead (https://github.com/ansible-collections/community.postgresql/issues/277).
+ minor_changes:
+ - postgresql_membership - add the ``exact`` state value to be able to specify
+ a list of only groups a user must be a member of (https://github.com/ansible-collections/community.postgresql/issues/277).
+ - 'postgresql_pg_hba - add argument ``overwrite`` (bool, default: false) to
+ remove unmanaged rules (https://github.com/ansible-collections/community.postgresql/issues/297).'
+ - 'postgresql_pg_hba - add argument ``rules_behavior`` (choices: conflict (default),
+ combine) to fail when ``rules`` and normal rule-specific arguments are given
+ or, when ``combine``, use them as defaults for the ``rules`` items (https://github.com/ansible-collections/community.postgresql/issues/297).'
+ - postgresql_pg_hba - add argument ``rules`` to specify a list of rules using
+ the normal rule-specific argument in each item (https://github.com/ansible-collections/community.postgresql/issues/297).
+ release_summary: 'This is the minor release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 2.1.5.'
+ fragments:
+ - 0-postgresql_user-deprecate-privs-manipulation.yml
+ - 2.2.0.yml
+ - 285-postgresql_subscription_fix_idempontece.yml
+ - 293-postgresql_membership_exact_value.yml
+ - 303-postgresql_pg_hba_add_bulk_rule_arguments.yml
+ - 308-postgresql_user_alter_statements_return.yml
+ - 316-postgresql_ping_fix_pg_version_parsing.yml
+ - all_in_schema.yml
+ - simplified-bsd-license.yml
+ release_date: '2022-07-27'
+ 2.3.0:
+ changes:
+ bugfixes:
+ - postgresql_info - make arguments passed to SHOW command properly quoted to
+ prevent the interpreter evaluating them (https://github.com/ansible-collections/community.postgresql/issues/314).
+ - postgresql_pg_hba - support the connection types ``hostgssenc`` and ``hostnogssenc``
+ (https://github.com/ansible-collections/community.postgresql/pull/351).
+ - postgresql_privs - add support for alter default privileges grant usage on
+ schemas (https://github.com/ansible-collections/community.postgresql/issues/332).
+ - postgresql_privs - cannot grant select on objects in all schemas; add the
+ ``not-specified`` value to the ``schema`` parameter to make this possible
+ (https://github.com/ansible-collections/community.postgresql/issues/332).
+ - postgresql_set - avoid postgres puts extra quotes when passing values containing
+ commas (https://github.com/ansible-collections/community.postgresql/issues/78).
+ - postgresql_user - make the module idempotent when password is scram hashed
+ (https://github.com/ansible-collections/community.postgresql/issues/301).
+ minor_changes:
+ - postgresql_* - add the ``connect_params`` parameter dict to allow any additional
+ ``libpg`` connection parameters (https://github.com/ansible-collections/community.postgresql/pull/329).
+ release_summary: 'This is the minor release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 2.2.0.'
+ fragments:
+ - 0-postgresql_set_avoid_quoting.yml
+ - 2.3.0.yml
+ - 301-postgresql_user_idempotent_scram.yml
+ - 314-postgresql_info-quote-show-args.yml
+ - 329-postgresql_add_connect_params_field.yml
+ - 332-postgresql_privs_def_privs_schemas.yml
+ - 351-postgresql_pg_hba-add-connection-types.yml
+ release_date: '2022-11-04'
+ 2.3.1:
+ changes:
+ bugfixes:
+ - postgresql_privs - fails with ``type=default_privs``, ``privs=ALL``, ``objs=ALL_DEFAULT``
+ (https://github.com/ansible-collections/community.postgresql/issues/373).
+ release_summary: 'This is the bugfix release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after release 2.3.0.'
+ fragments:
+ - 0-postgresql_privs.yml
+ - 2.3.1.yml
+ release_date: '2022-11-25'
+ 2.3.2:
+ changes:
+ bugfixes:
+ - postgresql_pg_hba - fix ``changed`` return value for when ``overwrite`` is
+ enabled (https://github.com/ansible-collections/community.postgresql/pull/378).
+ - postgresql_privs - fix quoting of the ``schema`` parameter in SQL statements
+ (https://github.com/ansible-collections/community.postgresql/pull/382).
+ - 'postgresql_privs - raise an error when the ``objs: ALL_IN_SCHEMA`` is used
+ with a value of ``type`` that is not ``table``, ``sequence``, ``function``
+ or ``procedure`` (https://github.com/ansible-collections/community.postgresql/issues/379).'
+ release_summary: 'This is the bugfix release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after release 2.3.1.'
+ fragments:
+ - 2.3.2.yml
+ - 378-postgresql_pg_hba_fix_change_detection.yml
+ - 379-postgresql_privs.yml
+ - 382-postgresql_privs_fix_schemas_with_special_names.yml
+ release_date: '2022-12-09'
+ 2.4.0:
+ changes:
+ bugfixes:
+ - postgresql_info - add support for non numeric extenstion version (https://github.com/ansible-collections/community.postgresql/issues/428).
+ - postgresql_info - when getting information about subscriptions, check the
+ list of available columns in the pg_subscription table (https://github.com/ansible-collections/community.postgresql/issues/429).
+ - postgresql_privs - fix connect_params being ignored (https://github.com/ansible-collections/community.postgresql/issues/450).
+ - postgresql_query - could crash under certain conditions because of a missing
+ import to `psycopg2.extras` (https://github.com/ansible-collections/community.postgresql/issues/283).
+ - postgresql_set - avoid throwing ValueError for IP addresses and other values
+ that may look like a number, but which are not (https://github.com/ansible-collections/community.postgresql/pull/422).
+ - postgresql_set - avoid wrong values for single-value parameters containing
+ commas (https://github.com/ansible-collections/community.postgresql/pull/400).
+ - postgresql_user - properly close DB connections to prevent possible connection
+ limit exhaustion (https://github.com/ansible-collections/community.postgresql/issues/431).
+ major_changes:
+ - postgresql_privs - the ``password`` argument is deprecated and will be removed
+ in community.postgresql 4.0.0, use the ``login_password`` argument instead
+ (https://github.com/ansible-collections/community.postgresql/issues/406).
+ minor_changes:
+ - Add support for module_defaults with action_group ``all`` (https://github.com/ansible-collections/community.postgresql/pull/430).
+ - postgresql - added new parameters ``ssl_cert`` and ``ssl_key`` for ssl connection
+ (https://github.com/ansible-collections/community.postgresql/issues/424).
+ - postgresql - when receiving the connection parameters, the ``PGPORT`` and
+ ``PGUSER`` environment variables are checked. The order of assigning values
+ ``environment variables`` -> ``default values`` -> ``set values`` (https://github.com/ansible-collections/community.postgresql/issues/311).
+ - postgresql_query - a list of queries can be passed as the ``query`` argument's
+ value, the results will be stored in the ``query_all_results`` return value
+ (is not deprecated anymore, as well as ``query_list``) (https://github.com/ansible-collections/community.postgresql/issues/312).
+ release_summary: 'This is the minor release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after the release of ``community.postgresql`` 2.3.2.'
+ fragments:
+ - 0-postgres.yml
+ - 0-postgresql_info.yml
+ - 0-postgresql_query.yml
+ - 0-postgresql_set_avoid_handle_ip_addresses.yml
+ - 0-postgresql_set_avoid_wrong_values.yml
+ - 0-postgresql_user.yml
+ - 1-postgres.yml
+ - 1-postgresql_privs_deprecate_password.yml
+ - 2.4.0.yml
+ - 399-missing-import.yml
+ - 428-postgres_info_support_non_numeric_extenstion_version.yml
+ - 430-action_group_all_for_module_defaults.yml
+ - 451-postgresql_privs_fix_connect_params_being_ignored.yml
+ release_date: '2023-05-04'
+ 2.4.1:
+ changes:
+ bugfixes:
+ - postgresql_privs - fix a breaking change related to handling the ``password``
+ argument (https://github.com/ansible-collections/community.postgresql/pull/463).
+ release_summary: 'This is the bugfix release of the ``community.postgresql``
+ collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after release 2.4.0.'
+ fragments:
+ - 0-postgresql_privs.yml
+ - 2.4.1.yml
+ release_date: '2023-05-05'
+ 2.4.2:
+ changes:
+ bugfixes:
+ - postgresql_db - when the task is completed successfully, close the database
+ connection (https://github.com/ansible-collections/community.postgresql/issues/465).
+ - postgresql_info - when the task is completed successfully, close the database
+ connection (https://github.com/ansible-collections/community.postgresql/issues/465).
+ - postgresql_ping - when the task is completed successfully, close the database
+ connection (https://github.com/ansible-collections/community.postgresql/issues/465).
+ - postgresql_privs - when the task is completed successfully, close the database
+ connection (https://github.com/ansible-collections/community.postgresql/issues/465).
+ release_summary: 'This is a bugfix release of the ``community.postgresql`` collection.
+
+ This changelog contains all changes to the modules in this collection that
+
+ have been added after release 2.4.1.'
+ fragments:
+ - 0-postgresql_db.yml
+ - 0-postgresql_info.yml
+ - 2.4.2.yml
+ release_date: '2023-06-09'
diff --git a/ansible_collections/community/postgresql/changelogs/config.yaml b/ansible_collections/community/postgresql/changelogs/config.yaml
new file mode 100644
index 000000000..70f0481a7
--- /dev/null
+++ b/ansible_collections/community/postgresql/changelogs/config.yaml
@@ -0,0 +1,29 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+keep_fragments: false
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: Community PostgreSQL Collection
+trivial_section_name: trivial
diff --git a/ansible_collections/community/postgresql/changelogs/fragments/.keep b/ansible_collections/community/postgresql/changelogs/fragments/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/postgresql/changelogs/fragments/.keep
diff --git a/ansible_collections/community/postgresql/docs/docsite/links.yml b/ansible_collections/community/postgresql/docs/docsite/links.yml
new file mode 100644
index 000000000..6d694b7cc
--- /dev/null
+++ b/ansible_collections/community/postgresql/docs/docsite/links.yml
@@ -0,0 +1,45 @@
+---
+# This will make sure that plugin and module documentation gets Edit on GitHub links
+# that allow users to directly create a PR for this plugin or module in GitHub's UI.
+# Remove this section if the collection repository is not on GitHub, or if you do not want this
+# functionality for your collection.
+edit_on_github:
+ repository: ansible-collections/community.postgresql
+ branch: main
+ # If your collection root (the directory containing galaxy.yml) does not coincide with your
+ # repository's root, you have to specify the path to the collection root here. For example,
+ # if the collection root is in a subdirectory ansible_collections/community/postgresql
+ # in your repository, you have to set path_prefix to 'ansible_collections/community/postgresql'.
+ path_prefix: ''
+
+# Here you can add arbitrary extra links. Please keep the number of links down to a
+# minimum! Also please keep the description short, since this will be the text put on
+# a button.
+#
+# Also note that some links are automatically added from information in galaxy.yml.
+# The following are automatically added:
+# 1. A link to the issue tracker (if `issues` is specified);
+# 2. A link to the homepage (if `homepage` is specified and does not equal the
+# `documentation` or `repository` link);
+# 3. A link to the collection's repository (if `repository` is specified).
+
+extra_links:
+ - description: Report an issue
+ url: https://github.com/ansible-collections/community.postgresql/issues/new/choose
+
+# Specify communication channels for your collection. We suggest to not specify more
+# than one place for communication per communication tool to avoid confusion.
+communication:
+ matrix_rooms:
+ - topic: General usage and support questions
+ room: '#postgresql:ansible.com'
+ irc_channels:
+ - topic: General usage and support questions
+ network: Libera
+ channel: '#ansible'
+ mailing_lists:
+ - topic: Ansible Project List
+ url: https://groups.google.com/g/ansible-project
+ # You can also add a `subscribe` field with an URI that allows to subscribe
+ # to the mailing list. For lists on https://groups.google.com/ a subscribe link is
+ # automatically generated.
diff --git a/ansible_collections/community/postgresql/meta/runtime.yml b/ansible_collections/community/postgresql/meta/runtime.yml
new file mode 100644
index 000000000..f7fa752ae
--- /dev/null
+++ b/ansible_collections/community/postgresql/meta/runtime.yml
@@ -0,0 +1,27 @@
+---
+requires_ansible: '>=2.9.10'
+action_groups:
+ all:
+ - postgresql_copy
+ - postgresql_db
+ - postgresql_ext
+ - postgresql_idx
+ - postgresql_info
+ - postgresql_lang
+ - postgresql_membership
+ - postgresql_owner
+ - postgresql_pg_hba
+ - postgresql_ping
+ - postgresql_privs
+ - postgresql_publication
+ - postgresql_query
+ - postgresql_schema
+ - postgresql_script
+ - postgresql_sequence
+ - postgresql_set
+ - postgresql_slot
+ - postgresql_subscription
+ - postgresql_table
+ - postgresql_tablespace
+ - postgresql_user
+ - postgresql_user_obj_stat_info
diff --git a/ansible_collections/community/postgresql/plugins/doc_fragments/postgres.py b/ansible_collections/community/postgresql/plugins/doc_fragments/postgres.py
new file mode 100644
index 000000000..be74a4552
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/doc_fragments/postgres.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Postgres documentation fragment
+ DOCUMENTATION = r'''
+options:
+ login_user:
+ description:
+ - The username this module should use to establish its PostgreSQL session.
+ type: str
+ default: postgres
+ aliases: [ login ]
+ login_password:
+ description:
+ - The password this module should use to establish its PostgreSQL session.
+ type: str
+ default: ''
+ login_host:
+ description:
+ - Host running the database.
+ - If you have connection issues when using C(localhost), try to use C(127.0.0.1) instead.
+ default: ''
+ type: str
+ aliases: [ host ]
+ login_unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ default: ''
+ aliases: [ unix_socket ]
+ port:
+ description:
+ - Database port to connect to.
+ type: int
+ default: 5432
+ aliases: [ login_port ]
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ ssl_cert:
+ description:
+ - Specifies the file name of the client SSL certificate.
+ type: path
+ version_added: '2.4.0'
+ ssl_key:
+ description:
+ - Specifies the location for the secret key used for the client certificate.
+ type: path
+ version_added: '2.4.0'
+ connect_params:
+ description:
+ - Any additional parameters to be passed to libpg.
+ - These parameters take precedence.
+ type: dict
+ default: {}
+ version_added: '2.3.0'
+
+attributes:
+ check_mode:
+ description: Can run in check_mode and return changed status prediction without modifying target.
+
+notes:
+- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
+- To avoid "Peer authentication failed for user postgres" error,
+ use postgres user as a I(become_user).
+- This module uses C(psycopg2), a Python PostgreSQL database adapter. You must
+ ensure that C(psycopg2) is installed on the host before using this module.
+- If the remote host is the PostgreSQL server (which is the default case), then
+ PostgreSQL must also be installed on the remote host.
+- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages
+ on the remote host before using this module.
+- The ca_cert parameter requires at least Postgres version 8.4 and I(psycopg2) version 2.4.3.
+
+requirements: [ psycopg2 ]
+'''
diff --git a/ansible_collections/community/postgresql/plugins/module_utils/_version.py b/ansible_collections/community/postgresql/plugins/module_utils/_version.py
new file mode 100644
index 000000000..0a34929e9
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/module_utils/_version.py
@@ -0,0 +1,335 @@
+# Vendored copy of distutils/version.py from CPython 3.9.5
+#
+# Implements multiple version numbering conventions for the
+# Python Module Distribution Utilities.
+#
+# PSF License (see PSF-license.txt or https://opensource.org/licenses/Python-2.0)
+#
+
+"""Provides classes to represent module version numbers (one class for
+each style of version numbering). There are currently two such classes
+implemented: StrictVersion and LooseVersion.
+Every version number class implements the following interface:
+ * the 'parse' method takes a string and parses it to some internal
+ representation; if the string is an invalid version number,
+ 'parse' raises a ValueError exception
+ * the class constructor takes an optional string argument which,
+ if supplied, is passed to 'parse'
+ * __str__ reconstructs the string that was passed to 'parse' (or
+ an equivalent string -- ie. one that will generate an equivalent
+ version number instance)
+ * __repr__ generates Python code to recreate the version number instance
+ * _cmp compares the current instance with either another instance
+ of the same class or a string (which will be parsed to an instance
+ of the same class, thus must follow the same rules)
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+try:
+ RE_FLAGS = re.VERBOSE | re.ASCII
+except AttributeError:
+ RE_FLAGS = re.VERBOSE
+
+
+class Version:
+ """Abstract base class for version numbering classes. Just provides
+ constructor (__init__) and reproducer (__repr__), because those
+ seem to be the same for all version numbering classes; and route
+ rich comparisons to _cmp.
+ """
+
+ def __init__(self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def __repr__(self):
+ return "%s ('%s')" % (self.__class__.__name__, str(self))
+
+ def __eq__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c == 0
+
+ def __lt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c < 0
+
+ def __le__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c <= 0
+
+ def __gt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c > 0
+
+ def __ge__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c >= 0
+
+
+# Interface for version-number classes -- must be implemented
+# by the following classes (the concrete ones -- Version should
+# be treated as an abstract class).
+# __init__ (string) - create and take same action as 'parse'
+# (string parameter is optional)
+# parse (string) - convert a string representation to whatever
+# internal representation is appropriate for
+# this style of version numbering
+# __str__ (self) - convert back to a string; should be very similar
+# (if not identical to) the string supplied to parse
+# __repr__ (self) - generate Python code to recreate
+# the instance
+# _cmp (self, other) - compare two version numbers ('other' may
+# be an unparsed version string, or another
+# instance of your version class)
+
+
+class StrictVersion(Version):
+ """Version numbering for anal retentives and software idealists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of two or three
+ dot-separated numeric components, with an optional "pre-release" tag
+ on the end. The pre-release tag consists of the letter 'a' or 'b'
+ followed by a number. If the numeric components of two version
+ numbers are equal, then one with a pre-release tag will always
+ be deemed earlier (lesser) than one without.
+ The following are valid version numbers (shown in the order that
+ would be obtained by sorting according to the supplied cmp function):
+ 0.4 0.4.0 (these two are equivalent)
+ 0.4.1
+ 0.5a1
+ 0.5b3
+ 0.5
+ 0.9.6
+ 1.0
+ 1.0.4a3
+ 1.0.4b1
+ 1.0.4
+ The following are examples of invalid version numbers:
+ 1
+ 2.7.2.2
+ 1.3.a4
+ 1.3pl1
+ 1.3c4
+ The rationale for this version numbering system will be explained
+ in the distutils documentation.
+ """
+
+ version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
+ RE_FLAGS)
+
+ def parse(self, vstring):
+ match = self.version_re.match(vstring)
+ if not match:
+ raise ValueError("invalid version number '%s'" % vstring)
+
+ (major, minor, patch, prerelease, prerelease_num) = \
+ match.group(1, 2, 4, 5, 6)
+
+ if patch:
+ self.version = tuple(map(int, [major, minor, patch]))
+ else:
+ self.version = tuple(map(int, [major, minor])) + (0,)
+
+ if prerelease:
+ self.prerelease = (prerelease[0], int(prerelease_num))
+ else:
+ self.prerelease = None
+
+ def __str__(self):
+ if self.version[2] == 0:
+ vstring = '.'.join(map(str, self.version[0:2]))
+ else:
+ vstring = '.'.join(map(str, self.version))
+
+ if self.prerelease:
+ vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
+
+ return vstring
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = StrictVersion(other)
+ elif not isinstance(other, StrictVersion):
+ return NotImplemented
+
+ if self.version != other.version:
+ # numeric versions don't match
+ # prerelease stuff doesn't matter
+ if self.version < other.version:
+ return -1
+ else:
+ return 1
+
+ # have to compare prerelease
+ # case 1: neither has prerelease; they're equal
+ # case 2: self has prerelease, other doesn't; other is greater
+ # case 3: self doesn't have prerelease, other does: self is greater
+ # case 4: both have prerelease: must compare them!
+
+ if (not self.prerelease and not other.prerelease):
+ return 0
+ elif (self.prerelease and not other.prerelease):
+ return -1
+ elif (not self.prerelease and other.prerelease):
+ return 1
+ elif (self.prerelease and other.prerelease):
+ if self.prerelease == other.prerelease:
+ return 0
+ elif self.prerelease < other.prerelease:
+ return -1
+ else:
+ return 1
+ else:
+ raise AssertionError("never get here")
+
+# end class StrictVersion
+
+# The rules according to Greg Stein:
+# 1) a version number has 1 or more numbers separated by a period or by
+# sequences of letters. If only periods, then these are compared
+# left-to-right to determine an ordering.
+# 2) sequences of letters are part of the tuple for comparison and are
+# compared lexicographically
+# 3) recognize the numeric components may have leading zeroes
+#
+# The LooseVersion class below implements these rules: a version number
+# string is split up into a tuple of integer and string components, and
+# comparison is a simple tuple comparison. This means that version
+# numbers behave in a predictable and obvious way, but a way that might
+# not necessarily be how people *want* version numbers to behave. There
+# wouldn't be a problem if people could stick to purely numeric version
+# numbers: just split on period and compare the numbers as tuples.
+# However, people insist on putting letters into their version numbers;
+# the most common purpose seems to be:
+# - indicating a "pre-release" version
+# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
+# - indicating a post-release patch ('p', 'pl', 'patch')
+# but of course this can't cover all version number schemes, and there's
+# no way to know what a programmer means without asking him.
+#
+# The problem is what to do with letters (and other non-numeric
+# characters) in a version number. The current implementation does the
+# obvious and predictable thing: keep them as strings and compare
+# lexically within a tuple comparison. This has the desired effect if
+# an appended letter sequence implies something "post-release":
+# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
+#
+# However, if letters in a version number imply a pre-release version,
+# the "obvious" thing isn't correct. Eg. you would expect that
+# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
+# implemented here, this just isn't so.
+#
+# Two possible solutions come to mind. The first is to tie the
+# comparison algorithm to a particular set of semantic rules, as has
+# been done in the StrictVersion class above. This works great as long
+# as everyone can go along with bondage and discipline. Hopefully a
+# (large) subset of Python module programmers will agree that the
+# particular flavour of bondage and discipline provided by StrictVersion
+# provides enough benefit to be worth using, and will submit their
+# version numbering scheme to its domination. The free-thinking
+# anarchists in the lot will never give in, though, and something needs
+# to be done to accommodate them.
+#
+# Perhaps a "moderately strict" version class could be implemented that
+# lets almost anything slide (syntactically), and makes some heuristic
+# assumptions about non-digits in version number strings. This could
+# sink into special-case-hell, though; if I was as talented and
+# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
+# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
+# just as happy dealing with things like "2g6" and "1.13++". I don't
+# think I'm smart enough to do it right though.
+#
+# In any case, I've coded the test suite for this module (see
+# ../test/test_version.py) specifically to fail on things like comparing
+# "1.2a2" and "1.2". That's not because the *code* is doing anything
+# wrong, it's because the simple, obvious design doesn't match my
+# complicated, hairy expectations for real-world version numbers. It
+# would be a snap to fix the test suite to say, "Yep, LooseVersion does
+# the Right Thing" (ie. the code matches the conception). But I'd rather
+# have a conception that matches common notions about version numbers.
+
+
+class LooseVersion(Version):
+ """Version numbering for anarchists and software realists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of a series of numbers,
+ separated by either periods or strings of letters. When comparing
+ version numbers, the numeric components will be compared
+ numerically, and the alphabetic components lexically. The following
+ are all valid version numbers, in no particular order:
+ 1.5.1
+ 1.5.2b2
+ 161
+ 3.10a
+ 8.02
+ 3.4j
+ 1996.07.12
+ 3.2.pl0
+ 3.1.1.6
+ 2g6
+ 11g
+ 0.960923
+ 2.2beta29
+ 1.13++
+ 5.5.kw
+ 2.0b1pl0
+ In fact, there is no such thing as an invalid version number under
+ this scheme; the rules for comparison are simple and predictable,
+ but may not always give the results you want (for some definition
+ of "want").
+ """
+
+ component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
+
+ def __init__(self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def parse(self, vstring):
+ # I've given up on thinking I can reconstruct the version string
+ # from the parsed tuple -- so I just store the string here for
+ # use by __str__
+ self.vstring = vstring
+ components = [x for x in self.component_re.split(vstring) if x and x != '.']
+ for i, obj in enumerate(components):
+ try:
+ components[i] = int(obj)
+ except ValueError:
+ pass
+
+ self.version = components
+
+ def __str__(self):
+ return self.vstring
+
+ def __repr__(self):
+ return "LooseVersion ('%s')" % str(self)
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = LooseVersion(other)
+ elif not isinstance(other, LooseVersion):
+ return NotImplemented
+
+ if self.version == other.version:
+ return 0
+ if self.version < other.version:
+ return -1
+ if self.version > other.version:
+ return 1
+
+# end class LooseVersion
diff --git a/ansible_collections/community/postgresql/plugins/module_utils/database.py b/ansible_collections/community/postgresql/plugins/module_utils/database.py
new file mode 100644
index 000000000..8aba6aad8
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/module_utils/database.py
@@ -0,0 +1,193 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# Simplified BSD License (see simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+from ansible.module_utils._text import to_native
+
+
+# Input patterns for is_input_dangerous function:
+#
+# 1. '"' in string and '--' in string or
+# "'" in string and '--' in string
+PATTERN_1 = re.compile(r'(\'|\").*--')
+
+# 2. union \ intersect \ except + select
+PATTERN_2 = re.compile(r'(UNION|INTERSECT|EXCEPT).*SELECT', re.IGNORECASE)
+
+# 3. ';' and any KEY_WORDS
+PATTERN_3 = re.compile(r';.*(SELECT|UPDATE|INSERT|DELETE|DROP|TRUNCATE|ALTER)', re.IGNORECASE)
+
+
+class SQLParseError(Exception):
+ pass
+
+
+class UnclosedQuoteError(SQLParseError):
+ pass
+
+
+# maps a type of identifier to the maximum number of dot levels that are
+# allowed to specify that identifier. For example, a database column can be
+# specified by up to 4 levels: database.schema.table.column
+_PG_IDENTIFIER_TO_DOT_LEVEL = dict(
+ database=1,
+ schema=2,
+ table=3,
+ column=4,
+ role=1,
+ tablespace=1,
+ sequence=3,
+ publication=1,
+)
+_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
+
+
+def _find_end_quote(identifier, quote_char):
+ accumulate = 0
+ while True:
+ try:
+ quote = identifier.index(quote_char)
+ except ValueError:
+ raise UnclosedQuoteError
+ accumulate = accumulate + quote
+ try:
+ next_char = identifier[quote + 1]
+ except IndexError:
+ return accumulate
+ if next_char == quote_char:
+ try:
+ identifier = identifier[quote + 2:]
+ accumulate = accumulate + 2
+ except IndexError:
+ raise UnclosedQuoteError
+ else:
+ return accumulate
+
+
+def _identifier_parse(identifier, quote_char):
+ if not identifier:
+ raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
+
+ already_quoted = False
+ if identifier.startswith(quote_char):
+ already_quoted = True
+ try:
+ end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
+ except UnclosedQuoteError:
+ already_quoted = False
+ else:
+ if end_quote < len(identifier) - 1:
+ if identifier[end_quote + 1] == '.':
+ dot = end_quote + 1
+ first_identifier = identifier[:dot]
+ next_identifier = identifier[dot + 1:]
+ further_identifiers = _identifier_parse(next_identifier, quote_char)
+ further_identifiers.insert(0, first_identifier)
+ else:
+ raise SQLParseError('User escaped identifiers must escape extra quotes')
+ else:
+ further_identifiers = [identifier]
+
+ if not already_quoted:
+ try:
+ dot = identifier.index('.')
+ except ValueError:
+ identifier = identifier.replace(quote_char, quote_char * 2)
+ identifier = ''.join((quote_char, identifier, quote_char))
+ further_identifiers = [identifier]
+ else:
+ if dot == 0 or dot >= len(identifier) - 1:
+ identifier = identifier.replace(quote_char, quote_char * 2)
+ identifier = ''.join((quote_char, identifier, quote_char))
+ further_identifiers = [identifier]
+ else:
+ first_identifier = identifier[:dot]
+ next_identifier = identifier[dot + 1:]
+ further_identifiers = _identifier_parse(next_identifier, quote_char)
+ first_identifier = first_identifier.replace(quote_char, quote_char * 2)
+ first_identifier = ''.join((quote_char, first_identifier, quote_char))
+ further_identifiers.insert(0, first_identifier)
+
+ return further_identifiers
+
+
+def pg_quote_identifier(identifier, id_type):
+ identifier_fragments = _identifier_parse(identifier, quote_char='"')
+ if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
+ raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
+ return '.'.join(identifier_fragments)
+
+
+def mysql_quote_identifier(identifier, id_type):
+ identifier_fragments = _identifier_parse(identifier, quote_char='`')
+ if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
+ raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
+
+ special_cased_fragments = []
+ for fragment in identifier_fragments:
+ if fragment == '`*`':
+ special_cased_fragments.append('*')
+ else:
+ special_cased_fragments.append(fragment)
+
+ return '.'.join(special_cased_fragments)
+
+
+def is_input_dangerous(string):
+ """Check if the passed string is potentially dangerous.
+ Can be used to prevent SQL injections.
+
+ Note: use this function only when you can't use
+ psycopg2's cursor.execute method parametrized
+ (typically with DDL queries).
+ """
+ if not string:
+ return False
+
+ for pattern in (PATTERN_1, PATTERN_2, PATTERN_3):
+ if re.search(pattern, string):
+ return True
+
+ return False
+
+
+def check_input(module, *args):
+ """Wrapper for is_input_dangerous function."""
+ needs_to_check = args
+
+ dangerous_elements = []
+
+ for elem in needs_to_check:
+ try:
+ if isinstance(elem, str):
+ if is_input_dangerous(elem):
+ dangerous_elements.append(elem)
+
+ elif isinstance(elem, list):
+ for e in elem:
+ if is_input_dangerous(e):
+ dangerous_elements.append(e)
+
+ elif elem is None or isinstance(elem, bool):
+ pass
+
+ else:
+ elem = str(elem)
+ if is_input_dangerous(elem):
+ dangerous_elements.append(elem)
+ except ValueError as e:
+ module.fail_json(msg=to_native(e))
+
+ if dangerous_elements:
+ module.fail_json(msg="Passed input '%s' is "
+ "potentially dangerous" % ', '.join(dangerous_elements))
diff --git a/ansible_collections/community/postgresql/plugins/module_utils/postgres.py b/ansible_collections/community/postgresql/plugins/module_utils/postgres.py
new file mode 100644
index 000000000..e4a44df56
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/module_utils/postgres.py
@@ -0,0 +1,477 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Ted Timmons <ted@timmons.me>, 2017.
+# Most of this was originally added by other creators in the postgresql_user module.
+#
+# Simplified BSD License (see simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from datetime import timedelta
+from decimal import Decimal
+from os import environ
+
+psycopg2 = None # This line needs for unit tests
+try:
+ import psycopg2
+ import psycopg2.extras
+ HAS_PSYCOPG2 = True
+except ImportError:
+ HAS_PSYCOPG2 = False
+
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.postgresql.plugins.module_utils.version import LooseVersion
+
+TYPES_NEED_TO_CONVERT = (Decimal, timedelta)
+
+
+def postgres_common_argument_spec():
+ """
+ Return a dictionary with connection options.
+
+ The options are commonly used by most of PostgreSQL modules.
+ """
+ # Getting a dictionary of environment variables
+ env_vars = environ
+
+ return dict(
+ login_user=dict(
+ default='postgres' if not env_vars.get("PGUSER") else env_vars.get("PGUSER"),
+ aliases=['login']
+ ),
+ login_password=dict(default='', no_log=True),
+ login_host=dict(default='', aliases=['host']),
+ login_unix_socket=dict(default='', aliases=['unix_socket']),
+ port=dict(
+ type='int',
+ default=5432 if not env_vars.get("PGPORT") else int(env_vars.get("PGPORT")),
+ aliases=['login_port']
+ ),
+ ssl_mode=dict(
+ default='prefer',
+ choices=[
+ 'allow',
+ 'disable',
+ 'prefer',
+ 'require',
+ 'verify-ca',
+ 'verify-full'
+ ]
+ ),
+ ca_cert=dict(aliases=['ssl_rootcert']),
+ ssl_cert=dict(type='path'),
+ ssl_key=dict(type='path'),
+ connect_params=dict(default={}, type='dict'),
+ )
+
+
+def ensure_required_libs(module):
+ """Check required libraries."""
+ if not HAS_PSYCOPG2:
+ module.fail_json(msg=missing_required_lib('psycopg2'))
+
+ if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'):
+ module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter')
+
+
+def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True):
+ """Connect to a PostgreSQL database.
+
+ Return a tuple containing a psycopg2 connection object and error message / None.
+
+ Args:
+ module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
+ conn_params (dict) -- dictionary with connection parameters
+
+ Kwargs:
+ autocommit (bool) -- commit automatically (default False)
+ fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True)
+ """
+
+ db_connection = None
+ conn_err = None
+ try:
+ db_connection = psycopg2.connect(**conn_params)
+ if autocommit:
+ if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'):
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+
+ # Switch role, if specified:
+ if module.params.get('session_role'):
+ cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+ try:
+ cursor.execute('SET ROLE "%s"' % module.params['session_role'])
+ except Exception as e:
+ module.fail_json(msg="Could not switch role: %s" % to_native(e))
+ finally:
+ cursor.close()
+
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least '
+ 'version 8.4 to support sslrootcert')
+
+ conn_err = to_native(e)
+
+ except Exception as e:
+ conn_err = to_native(e)
+
+ if conn_err is not None:
+ if fail_on_conn:
+ module.fail_json(msg="unable to connect to database: %s" % conn_err)
+ else:
+ module.warn("PostgreSQL server is unavailable: %s" % conn_err)
+ db_connection = None
+
+ return db_connection, conn_err
+
+
+def exec_sql(obj, query, query_params=None, return_bool=False, add_to_executed=True, dont_exec=False):
+ """Execute SQL.
+
+ Auxiliary function for PostgreSQL user classes.
+
+ Returns a query result if possible or a boolean value.
+
+ Args:
+ obj (obj) -- must be an object of a user class.
+ The object must have module (AnsibleModule class object) and
+ cursor (psycopg cursor object) attributes
+ query (str) -- SQL query to execute
+
+ Kwargs:
+ query_params (dict or tuple) -- Query parameters to prevent SQL injections,
+ could be a dict or tuple
+ return_bool (bool) -- return True instead of rows if a query was successfully executed.
+ It's necessary for statements that don't return any result like DDL queries (default False).
+ add_to_executed (bool) -- append the query to obj.executed_queries attribute
+ dont_exec (bool) -- used with add_to_executed=True to generate a query, add it
+ to obj.executed_queries list and return True (default False)
+ """
+
+ if dont_exec:
+ # This is usually needed to return queries in check_mode
+ # without execution
+ query = obj.cursor.mogrify(query, query_params)
+ if add_to_executed:
+ obj.executed_queries.append(query)
+
+ return True
+
+ try:
+ if query_params is not None:
+ obj.cursor.execute(query, query_params)
+ else:
+ obj.cursor.execute(query)
+
+ if add_to_executed:
+ if query_params is not None:
+ obj.executed_queries.append(obj.cursor.mogrify(query, query_params))
+ else:
+ obj.executed_queries.append(query)
+
+ if not return_bool:
+ res = obj.cursor.fetchall()
+ return res
+ return True
+ except Exception as e:
+ obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+ return False
+
+
+def get_conn_params(module, params_dict, warn_db_default=True):
+ """Get connection parameters from the passed dictionary.
+
+ Return a dictionary with parameters to connect to PostgreSQL server.
+
+ Args:
+ module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
+ params_dict (dict) -- dictionary with variables
+
+ Kwargs:
+ warn_db_default (bool) -- warn that the default DB is used (default True)
+ """
+
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the return dictionary
+ params_map = {
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert",
+ "ssl_cert": "sslcert",
+ "ssl_key": "sslkey",
+ }
+
+ # Might be different in the modules:
+ if LooseVersion(psycopg2.__version__) >= LooseVersion('2.7.0'):
+ if params_dict.get('db'):
+ params_map['db'] = 'dbname'
+ elif params_dict.get('database'):
+ params_map['database'] = 'dbname'
+ elif params_dict.get('login_db'):
+ params_map['login_db'] = 'dbname'
+ else:
+ if warn_db_default:
+ module.warn('Database name has not been passed, '
+ 'used default database to connect to.')
+ else:
+ if params_dict.get('db'):
+ params_map['db'] = 'database'
+ elif params_dict.get('database'):
+ params_map['database'] = 'database'
+ elif params_dict.get('login_db'):
+ params_map['login_db'] = 'database'
+ else:
+ if warn_db_default:
+ module.warn('Database name has not been passed, '
+ 'used default database to connect to.')
+
+ kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict)
+ if k in params_map and v != '' and v is not None)
+
+ # If a login_unix_socket is specified, incorporate it here.
+ is_localhost = False
+ if 'host' not in kw or kw['host'] in [None, 'localhost']:
+ is_localhost = True
+
+ if is_localhost and params_dict["login_unix_socket"] != "":
+ kw["host"] = params_dict["login_unix_socket"]
+
+ # If connect_params is specified, merge it together
+ if params_dict.get("connect_params"):
+ kw.update(params_dict["connect_params"])
+
+ return kw
+
+
+class PgRole():
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.memberof = self.__fetch_members()
+
+ def __fetch_members(self):
+ query = ("SELECT ARRAY(SELECT b.rolname FROM "
+ "pg_catalog.pg_auth_members m "
+ "JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) "
+ "WHERE m.member = r.oid) "
+ "FROM pg_catalog.pg_roles r "
+ "WHERE r.rolname = %(dst_role)s")
+
+ res = exec_sql(self, query, query_params={'dst_role': self.name},
+ add_to_executed=False)
+ if res:
+ return res[0][0]
+ else:
+ return []
+
+
+class PgMembership(object):
+ def __init__(self, module, cursor, groups, target_roles, fail_on_role=True):
+ self.module = module
+ self.cursor = cursor
+ self.target_roles = [r.strip() for r in target_roles]
+ self.groups = [r.strip() for r in groups]
+ self.executed_queries = []
+ self.granted = {}
+ self.revoked = {}
+ self.fail_on_role = fail_on_role
+ self.non_existent_roles = []
+ self.changed = False
+ self.__check_roles_exist()
+
+ def grant(self):
+ for group in self.groups:
+ self.granted[group] = []
+
+ for role in self.target_roles:
+ role_obj = PgRole(self.module, self.cursor, role)
+ # If role is in a group now, pass:
+ if group in role_obj.memberof:
+ continue
+
+ query = 'GRANT "%s" TO "%s"' % (group, role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ if self.changed:
+ self.granted[group].append(role)
+
+ return self.changed
+
+ def revoke(self):
+ for group in self.groups:
+ self.revoked[group] = []
+
+ for role in self.target_roles:
+ role_obj = PgRole(self.module, self.cursor, role)
+ # If role is not in a group now, pass:
+ if group not in role_obj.memberof:
+ continue
+
+ query = 'REVOKE "%s" FROM "%s"' % (group, role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ if self.changed:
+ self.revoked[group].append(role)
+
+ return self.changed
+
+ def match(self):
+ for role in self.target_roles:
+ role_obj = PgRole(self.module, self.cursor, role)
+
+ desired_groups = set(self.groups)
+ current_groups = set(role_obj.memberof)
+ # 1. Get groups that the role is member of but not in self.groups and revoke them
+ groups_to_revoke = current_groups - desired_groups
+ for group in groups_to_revoke:
+ query = 'REVOKE "%s" FROM "%s"' % (group, role)
+ self.changed = exec_sql(self, query, return_bool=True)
+ if group in self.revoked:
+ self.revoked[group].append(role)
+ else:
+ self.revoked[group] = [role]
+
+ # 2. Filter out groups that in self.groups and
+ # the role is already member of and grant the rest
+ groups_to_grant = desired_groups - current_groups
+ for group in groups_to_grant:
+ query = 'GRANT "%s" TO "%s"' % (group, role)
+ self.changed = exec_sql(self, query, return_bool=True)
+ if group in self.granted:
+ self.granted[group].append(role)
+ else:
+ self.granted[group] = [role]
+
+ return self.changed
+
+ def __check_roles_exist(self):
+ if self.groups:
+ existent_groups = self.__roles_exist(self.groups)
+
+ for group in self.groups:
+ if group not in existent_groups:
+ if self.fail_on_role:
+ self.module.fail_json(msg="Role %s does not exist" % group)
+ else:
+ self.module.warn("Role %s does not exist, pass" % group)
+ self.non_existent_roles.append(group)
+
+ existent_roles = self.__roles_exist(self.target_roles)
+ for role in self.target_roles:
+ if role not in existent_roles:
+ if self.fail_on_role:
+ self.module.fail_json(msg="Role %s does not exist" % role)
+ else:
+ self.module.warn("Role %s does not exist, pass" % role)
+
+ if role not in self.groups:
+ self.non_existent_roles.append(role)
+
+ else:
+ if self.fail_on_role:
+ self.module.exit_json(msg="Role role '%s' is a member of role '%s'" % (role, role))
+ else:
+ self.module.warn("Role role '%s' is a member of role '%s', pass" % (role, role))
+
+ # Update role lists, excluding non existent roles:
+ if self.groups:
+ self.groups = [g for g in self.groups if g not in self.non_existent_roles]
+
+ self.target_roles = [r for r in self.target_roles if r not in self.non_existent_roles]
+
+ def __roles_exist(self, roles):
+ tmp = ["'" + x + "'" for x in roles]
+ query = "SELECT rolname FROM pg_roles WHERE rolname IN (%s)" % ','.join(tmp)
+ return [x[0] for x in exec_sql(self, query, add_to_executed=False)]
+
+
+def set_search_path(cursor, search_path):
+ """Set session's search_path.
+
+ Args:
+ cursor (Psycopg2 cursor): Database cursor object.
+ search_path (str): String containing comma-separated schema names.
+ """
+ cursor.execute('SET search_path TO %s' % search_path)
+
+
+def convert_elements_to_pg_arrays(obj):
+ """Convert list elements of the passed object
+ to PostgreSQL arrays represented as strings.
+
+ Args:
+ obj (dict or list): Object whose elements need to be converted.
+
+ Returns:
+ obj (dict or list): Object with converted elements.
+ """
+ if isinstance(obj, dict):
+ for (key, elem) in iteritems(obj):
+ if isinstance(elem, list):
+ obj[key] = list_to_pg_array(elem)
+
+ elif isinstance(obj, list):
+ for i, elem in enumerate(obj):
+ if isinstance(elem, list):
+ obj[i] = list_to_pg_array(elem)
+
+ return obj
+
+
+def list_to_pg_array(elem):
+ """Convert the passed list to PostgreSQL array
+ represented as a string.
+
+ Args:
+ elem (list): List that needs to be converted.
+
+ Returns:
+ elem (str): String representation of PostgreSQL array.
+ """
+ elem = str(elem).strip('[]')
+ elem = '{' + elem + '}'
+ return elem
+
+
+def convert_to_supported(val):
+ """Convert unsupported type to appropriate.
+ Args:
+ val (any) -- Any value fetched from database.
+ Returns value of appropriate type.
+ """
+ if isinstance(val, Decimal):
+ return float(val)
+
+ elif isinstance(val, timedelta):
+ return str(val)
+
+ return val # By default returns the same value
+
+
+def get_server_version(conn):
+ """Get server version.
+
+ Args:
+ conn (psycopg.Connection) -- Psycopg connection object.
+
+ Returns server version (int).
+ """
+ if LooseVersion(psycopg2.__version__) >= LooseVersion('3.0.0'):
+ return conn.info.server_version
+ else:
+ return conn.server_version
diff --git a/ansible_collections/community/postgresql/plugins/module_utils/saslprep.py b/ansible_collections/community/postgresql/plugins/module_utils/saslprep.py
new file mode 100644
index 000000000..804200c37
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/module_utils/saslprep.py
@@ -0,0 +1,178 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+
+# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+#
+# Simplified BSD License (see simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from stringprep import (
+ in_table_a1,
+ in_table_b1,
+ in_table_c3,
+ in_table_c4,
+ in_table_c5,
+ in_table_c6,
+ in_table_c7,
+ in_table_c8,
+ in_table_c9,
+ in_table_c12,
+ in_table_c21_c22,
+ in_table_d1,
+ in_table_d2,
+)
+from unicodedata import normalize
+
+from ansible.module_utils.six import text_type
+
+
+def is_unicode_str(string):
+ return True if isinstance(string, text_type) else False
+
+
+def mapping_profile(string):
+ """RFC4013 Mapping profile implementation."""
+ # Regarding RFC4013,
+ # This profile specifies:
+ # - non-ASCII space characters [StringPrep, C.1.2] that can be
+ # mapped to SPACE (U+0020), and
+ # - the "commonly mapped to nothing" characters [StringPrep, B.1]
+ # that can be mapped to nothing.
+
+ tmp = []
+ for c in string:
+ # If not the "commonly mapped to nothing"
+ if not in_table_b1(c):
+ if in_table_c12(c):
+ # map non-ASCII space characters
+ # (that can be mapped) to Unicode space
+ tmp.append(u' ')
+ else:
+ tmp.append(c)
+
+ return u"".join(tmp)
+
+
+def is_ral_string(string):
+ """RFC3454 Check bidirectional category of the string"""
+ # Regarding RFC3454,
+ # Table D.1 lists the characters that belong
+ # to Unicode bidirectional categories "R" and "AL".
+ # If a string contains any RandALCat character, a RandALCat
+ # character MUST be the first character of the string, and a
+ # RandALCat character MUST be the last character of the string.
+ if in_table_d1(string[0]):
+ if not in_table_d1(string[-1]):
+ raise ValueError('RFC3454: incorrect bidirectional RandALCat string.')
+ return True
+ return False
+
+
+def prohibited_output_profile(string):
+ """RFC4013 Prohibited output profile implementation."""
+ # Implements:
+ # RFC4013, 2.3. Prohibited Output.
+ # This profile specifies the following characters as prohibited input:
+ # - Non-ASCII space characters [StringPrep, C.1.2]
+ # - ASCII control characters [StringPrep, C.2.1]
+ # - Non-ASCII control characters [StringPrep, C.2.2]
+ # - Private Use characters [StringPrep, C.3]
+ # - Non-character code points [StringPrep, C.4]
+ # - Surrogate code points [StringPrep, C.5]
+ # - Inappropriate for plain text characters [StringPrep, C.6]
+ # - Inappropriate for canonical representation characters [StringPrep, C.7]
+ # - Change display properties or deprecated characters [StringPrep, C.8]
+ # - Tagging characters [StringPrep, C.9]
+ # RFC4013, 2.4. Bidirectional Characters.
+ # RFC4013, 2.5. Unassigned Code Points.
+
+ # Determine how to handle bidirectional characters (RFC3454):
+ if is_ral_string(string):
+ # If a string contains any RandALCat characters,
+ # The string MUST NOT contain any LCat character:
+ is_prohibited_bidi_ch = in_table_d2
+ bidi_table = 'D.2'
+ else:
+ # Forbid RandALCat characters in LCat string:
+ is_prohibited_bidi_ch = in_table_d1
+ bidi_table = 'D.1'
+
+ RFC = 'RFC4013'
+ for c in string:
+ # RFC4013 2.3. Prohibited Output:
+ if in_table_c12(c):
+ raise ValueError('%s: prohibited non-ASCII space characters '
+ 'that cannot be replaced (C.1.2).' % RFC)
+ if in_table_c21_c22(c):
+ raise ValueError('%s: prohibited control characters (C.2.1).' % RFC)
+ if in_table_c3(c):
+ raise ValueError('%s: prohibited private Use characters (C.3).' % RFC)
+ if in_table_c4(c):
+ raise ValueError('%s: prohibited non-character code points (C.4).' % RFC)
+ if in_table_c5(c):
+ raise ValueError('%s: prohibited surrogate code points (C.5).' % RFC)
+ if in_table_c6(c):
+ raise ValueError('%s: prohibited inappropriate for plain text '
+ 'characters (C.6).' % RFC)
+ if in_table_c7(c):
+ raise ValueError('%s: prohibited inappropriate for canonical '
+ 'representation characters (C.7).' % RFC)
+ if in_table_c8(c):
+ raise ValueError('%s: prohibited change display properties / '
+ 'deprecated characters (C.8).' % RFC)
+ if in_table_c9(c):
+ raise ValueError('%s: prohibited tagging characters (C.9).' % RFC)
+
+ # RFC4013, 2.4. Bidirectional Characters:
+ if is_prohibited_bidi_ch(c):
+ raise ValueError('%s: prohibited bidi characters (%s).' % (RFC, bidi_table))
+
+ # RFC4013, 2.5. Unassigned Code Points:
+ if in_table_a1(c):
+ raise ValueError('%s: prohibited unassigned code points (A.1).' % RFC)
+
+
+def saslprep(string):
+ """RFC4013 implementation.
+ Implements "SASLprep" profile (RFC4013) of the "stringprep" algorithm (RFC3454)
+ to prepare Unicode strings representing user names and passwords for comparison.
+ Regarding the RFC4013, the "SASLprep" profile is intended to be used by
+ Simple Authentication and Security Layer (SASL) mechanisms
+ (such as PLAIN, CRAM-MD5, and DIGEST-MD5), as well as other protocols
+ exchanging simple user names and/or passwords.
+
+ Args:
+ string (unicode string): Unicode string to validate and prepare.
+
+ Returns:
+ Prepared unicode string.
+ """
+ # RFC4013: "The algorithm assumes all strings are
+ # comprised of characters from the Unicode [Unicode] character set."
+ # Validate the string is a Unicode string
+ # (text_type is the string type if PY3 and unicode otherwise):
+ if not is_unicode_str(string):
+ raise TypeError('input must be of type %s, not %s' % (text_type, type(string)))
+
+ # RFC4013: 2.1. Mapping.
+ string = mapping_profile(string)
+
+ # RFC4013: 2.2. Normalization.
+ # "This profile specifies using Unicode normalization form KC."
+ string = normalize('NFKC', string)
+ if not string:
+ return u''
+
+ # RFC4013: 2.3. Prohibited Output.
+ # RFC4013: 2.4. Bidirectional Characters.
+ # RFC4013: 2.5. Unassigned Code Points.
+ prohibited_output_profile(string)
+
+ return string
diff --git a/ansible_collections/community/postgresql/plugins/module_utils/version.py b/ansible_collections/community/postgresql/plugins/module_utils/version.py
new file mode 100644
index 000000000..6afaca75e
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/module_utils/version.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Provide version object to compare version numbers."""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+# Once we drop support for Ansible 2.11, we can
+# remove the _version.py file, and replace the following import by
+#
+# from ansible.module_utils.compat.version import LooseVersion
+
+from ._version import LooseVersion
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py
new file mode 100644
index 000000000..37ee9b80f
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_copy
+short_description: Copy data between a file/program and a PostgreSQL table
+description:
+- Copy data between a file/program and a PostgreSQL table.
+
+options:
+ copy_to:
+ description:
+ - Copy the contents of a table to a file.
+ - Can also copy the results of a SELECT query.
+ - Mutually exclusive with I(copy_from) and I(dst).
+ type: path
+ aliases: [ to ]
+ copy_from:
+ description:
+ - Copy data from a file to a table (appending the data to whatever is in the table already).
+ - Mutually exclusive with I(copy_to) and I(src).
+ type: path
+ aliases: [ from ]
+ src:
+ description:
+ - Copy data from I(copy_from) to I(src=tablename).
+ - Used with I(copy_to) only.
+ type: str
+ aliases: [ source ]
+ dst:
+ description:
+ - Copy data to I(dst=tablename) from I(copy_from=/path/to/data.file).
+ - Used with I(copy_from) only.
+ type: str
+ aliases: [ destination ]
+ columns:
+ description:
+ - List of column names for the src/dst table to COPY FROM/TO.
+ type: list
+ elements: str
+ aliases: [ column ]
+ program:
+ description:
+ - Mark I(src)/I(dst) as a program. Data will be copied to/from a program.
+ - See block Examples and PROGRAM arg description U(https://www.postgresql.org/docs/current/sql-copy.html).
+ type: bool
+ default: false
+ options:
+ description:
+ - Options of COPY command.
+ - See the full list of available options U(https://www.postgresql.org/docs/current/sql-copy.html).
+ type: dict
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases: [ login_db ]
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+notes:
+- Supports PostgreSQL version 9.4+.
+- COPY command is only allowed to database superusers.
+
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - If I(check_mode=true), we just check the src/dst table availability
+ and return the COPY query that actually has not been executed.
+ - If i(check_mode=true) and the source has been passed as SQL, the module
+ will execute it and roll the transaction back, but pay attention
+ it can affect database performance (e.g., if SQL collects a lot of data).
+
+seealso:
+- name: COPY command reference
+ description: Complete reference of the COPY command documentation.
+ link: https://www.postgresql.org/docs/current/sql-copy.html
+
+author:
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Copy text TAB-separated data from file /tmp/data.txt to acme table
+ community.postgresql.postgresql_copy:
+ copy_from: /tmp/data.txt
+ dst: acme
+
+- name: Copy CSV (comma-separated) data from file /tmp/data.csv to columns id, name of table acme
+ community.postgresql.postgresql_copy:
+ copy_from: /tmp/data.csv
+ dst: acme
+ columns: id,name
+ options:
+ format: csv
+
+- name: >
+ Copy text vertical-bar-separated data from file /tmp/data.txt to bar table.
+ The NULL values are specified as N
+ community.postgresql.postgresql_copy:
+ copy_from: /tmp/data.csv
+ dst: bar
+ options:
+ delimiter: '|'
+ null: 'N'
+
+- name: Copy data from acme table to file /tmp/data.txt in text format, TAB-separated
+ community.postgresql.postgresql_copy:
+ src: acme
+ copy_to: /tmp/data.txt
+
+- name: Copy data from SELECT query to/tmp/data.csv in CSV format
+ community.postgresql.postgresql_copy:
+ src: 'SELECT * FROM acme'
+ copy_to: /tmp/data.csv
+ options:
+ format: csv
+
+- name: Copy CSV data from my_table to gzip
+ community.postgresql.postgresql_copy:
+ src: my_table
+ copy_to: 'gzip > /tmp/data.csv.gz'
+ program: true
+ options:
+ format: csv
+
+- name: >
+ Copy data from columns id, name of table bar to /tmp/data.txt.
+ Output format is text, vertical-bar-separated, NULL as N
+ community.postgresql.postgresql_copy:
+ src: bar
+ columns:
+ - id
+ - name
+ copy_to: /tmp/data.csv
+ options:
+ delimiter: '|'
+ null: 'N'
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "COPY test_table FROM '/tmp/data_file.txt' (FORMAT csv, DELIMITER ',', NULL 'NULL')" ]
+src:
+ description: Data source.
+ returned: always
+ type: str
+ sample: "mytable"
+dst:
+ description: Data destination.
+ returned: always
+ type: str
+ sample: "/tmp/data.csv"
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+
+class PgCopyData(object):
+
+ """Implements behavior of COPY FROM, COPY TO PostgreSQL command.
+
+ Arguments:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+
+ Attributes:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ changed (bool) -- something was changed after execution or not
+ executed_queries (list) -- executed queries
+ dst (str) -- data destination table (when copy_from)
+ src (str) -- data source table (when copy_to)
+ opt_need_quotes (tuple) -- values of these options must be passed
+ to SQL in quotes
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.executed_queries = []
+ self.changed = False
+ self.dst = ''
+ self.src = ''
+ self.opt_need_quotes = (
+ 'DELIMITER',
+ 'NULL',
+ 'QUOTE',
+ 'ESCAPE',
+ 'ENCODING',
+ )
+
+ def copy_from(self):
+ """Implements COPY FROM command behavior."""
+ self.src = self.module.params['copy_from']
+ self.dst = self.module.params['dst']
+
+ query_fragments = ['COPY %s' % pg_quote_identifier(self.dst, 'table')]
+
+ if self.module.params.get('columns'):
+ query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
+
+ query_fragments.append('FROM')
+
+ if self.module.params.get('program'):
+ query_fragments.append('PROGRAM')
+
+ query_fragments.append("'%s'" % self.src)
+
+ if self.module.params.get('options'):
+ query_fragments.append(self.__transform_options())
+
+ # Note: check mode is implemented here:
+ if self.module.check_mode:
+ self.changed = self.__check_table(self.dst)
+
+ if self.changed:
+ self.executed_queries.append(' '.join(query_fragments))
+ else:
+ if exec_sql(self, ' '.join(query_fragments), return_bool=True):
+ self.changed = True
+
+ def copy_to(self):
+ """Implements COPY TO command behavior."""
+ self.src = self.module.params['src']
+ self.dst = self.module.params['copy_to']
+
+ if 'SELECT ' in self.src.upper():
+ # If src is SQL SELECT statement:
+ query_fragments = ['COPY (%s)' % self.src]
+ else:
+ # If src is a table:
+ query_fragments = ['COPY %s' % pg_quote_identifier(self.src, 'table')]
+
+ if self.module.params.get('columns'):
+ query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
+
+ query_fragments.append('TO')
+
+ if self.module.params.get('program'):
+ query_fragments.append('PROGRAM')
+
+ query_fragments.append("'%s'" % self.dst)
+
+ if self.module.params.get('options'):
+ query_fragments.append(self.__transform_options())
+
+ # Note: check mode is implemented here:
+ if self.module.check_mode:
+ self.changed = self.__check_table(self.src)
+
+ if self.changed:
+ self.executed_queries.append(' '.join(query_fragments))
+ else:
+ if exec_sql(self, ' '.join(query_fragments), return_bool=True):
+ self.changed = True
+
+ def __transform_options(self):
+ """Transform options dict into a suitable string."""
+ for (key, val) in iteritems(self.module.params['options']):
+ if key.upper() in self.opt_need_quotes:
+ self.module.params['options'][key] = "'%s'" % val
+
+ opt = ['%s %s' % (key, val) for (key, val) in iteritems(self.module.params['options'])]
+ return '(%s)' % ', '.join(opt)
+
+ def __check_table(self, table):
+ """Check table or SQL in transaction mode for check_mode.
+
+ Return True if it is OK.
+
+ Arguments:
+ table (str) - Table name that needs to be checked.
+ It can be SQL SELECT statement that was passed
+ instead of the table name.
+ """
+ if 'SELECT ' in table.upper():
+ # In this case table is actually SQL SELECT statement.
+ # If SQL fails, it's handled by exec_sql():
+ exec_sql(self, table, add_to_executed=False)
+ # If exec_sql was passed, it means all is OK:
+ return True
+
+ exec_sql(self, 'SELECT 1 FROM %s' % pg_quote_identifier(table, 'table'),
+ add_to_executed=False)
+ # If SQL was executed successfully:
+ return True
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ copy_to=dict(type='path', aliases=['to']),
+ copy_from=dict(type='path', aliases=['from']),
+ src=dict(type='str', aliases=['source']),
+ dst=dict(type='str', aliases=['destination']),
+ columns=dict(type='list', elements='str', aliases=['column']),
+ options=dict(type='dict'),
+ program=dict(type='bool', default=False),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['copy_from', 'copy_to'],
+ ['copy_from', 'src'],
+ ['copy_to', 'dst'],
+ ]
+ )
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ opt_list = None
+ if module.params['options']:
+ opt_list = ['%s %s' % (key, val) for (key, val) in iteritems(module.params['options'])]
+
+ check_input(module,
+ module.params['copy_to'],
+ module.params['copy_from'],
+ module.params['src'],
+ module.params['dst'],
+ opt_list,
+ module.params['columns'],
+ module.params['session_role'])
+
+ # Note: we don't need to check mutually exclusive params here, because they are
+ # checked automatically by AnsibleModule (mutually_exclusive=[] list above).
+ if module.params.get('copy_from') and not module.params.get('dst'):
+ module.fail_json(msg='dst param is necessary with copy_from')
+
+ elif module.params.get('copy_to') and not module.params.get('src'):
+ module.fail_json(msg='src param is necessary with copy_to')
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ data = PgCopyData(module, cursor)
+
+ # Note: parameters like dst, src, etc. are got
+ # from module object into data object of PgCopyData class.
+ # Therefore not need to pass args to the methods below.
+ # Note: check mode is implemented inside the methods below
+ # by checking passed module.check_mode arg.
+ if module.params.get('copy_to'):
+ data.copy_to()
+
+ elif module.params.get('copy_from'):
+ data.copy_from()
+
+ # Finish:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Return some values:
+ module.exit_json(
+ changed=data.changed,
+ queries=data.executed_queries,
+ src=data.src,
+ dst=data.dst,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_db.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_db.py
new file mode 100644
index 000000000..e45d9b769
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_db.py
@@ -0,0 +1,786 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_db
+short_description: Add or remove PostgreSQL databases from a remote host
+description:
+ - Add or remove PostgreSQL databases from a remote host.
+options:
+ name:
+ description:
+ - Name of the database to add or remove.
+ type: str
+ required: true
+ aliases: [ db ]
+ owner:
+ description:
+ - Name of the role to set as owner of the database.
+ type: str
+ default: ''
+ template:
+ description:
+ - Template used to create the database.
+ type: str
+ default: ''
+ encoding:
+ description:
+ - Encoding of the database.
+ type: str
+ default: ''
+ lc_collate:
+ description:
+ - Collation order (LC_COLLATE) to use in the database
+ must match collation order of template database unless C(template0) is used as template.
+ type: str
+ default: ''
+ lc_ctype:
+ description:
+ - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...).
+ - Must match LC_CTYPE of template database unless C(template0) is used as template.
+ type: str
+ default: ''
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role
+ were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The database state.
+ - C(present) implies that the database should be created if necessary.
+ - C(absent) implies that the database should be removed if present.
+ - C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
+ Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
+ returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
+ so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
+ pg_dump returns rc 1 in this case.
+ - C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4).
+ - The format of the backup will be detected based on the target name.
+ - Supported compression formats for dump and restore determined by target file format C(.pgc) (custom), C(.bz2) (bzip2), C(.gz) (gzip/pigz) and C(.xz) (xz).
+ - Supported formats for dump and restore determined by target file format C(.sql) (plain), C(.tar) (tar), C(.pgc) (custom) and C(.dir) (directory)
+ For the directory format which is supported since collection version 1.4.0.
+ - "Restore program is selected by target file format: C(.tar), C(.pgc), and C(.dir) are handled by pg_restore, other with pgsql."
+ - "."
+ - C(rename) is used to rename the database C(name) to C(target).
+ - If the database C(name) exists, it will be renamed to C(target).
+ - If the database C(name) does not exist and the C(target) database exists,
+ the module will report that nothing has changed.
+ - If both the databases exist as well as when they have the same value, an error will be raised.
+ - When I(state=rename), in addition to the C(name) option, the module requires the C(target) option. Other options are ignored.
+ Supported since collection version 1.4.0.
+ type: str
+ choices: [ absent, dump, present, rename, restore ]
+ default: present
+ force:
+ description:
+ - Used to forcefully drop a database when the I(state) is C(absent), ignored otherwise.
+ type: bool
+ default: False
+ target:
+ description:
+ - File to back up or restore from.
+ - Used when I(state) is C(dump) or C(restore).
+ type: path
+ default: ''
+ target_opts:
+ description:
+ - Additional arguments for pg_dump or restore program (pg_restore or psql, depending on target's format).
+ - Used when I(state) is C(dump) or C(restore).
+ type: str
+ default: ''
+ maintenance_db:
+ description:
+ - The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
+ type: str
+ default: postgres
+ conn_limit:
+ description:
+ - Specifies the database connection limit.
+ type: str
+ default: ''
+ tablespace:
+ description:
+ - The tablespace to set for the database
+ U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
+ - If you want to move the database back to the default tablespace,
+ explicitly set this to pg_default.
+ type: path
+ default: ''
+ dump_extra_args:
+ description:
+ - Provides additional arguments when I(state) is C(dump).
+ - Cannot be used with dump-file-format-related arguments like ``--format=d``.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters I(owner), I(conn_limit), I(encoding),
+ I(db), I(template), I(tablespace), I(session_role) are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via the parameters are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+seealso:
+- name: CREATE DATABASE reference
+ description: Complete reference of the CREATE DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createdatabase.html
+- name: DROP DATABASE reference
+ description: Complete reference of the DROP DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
+- name: pg_dump reference
+ description: Complete reference of pg_dump documentation.
+ link: https://www.postgresql.org/docs/current/app-pgdump.html
+- name: pg_restore reference
+ description: Complete reference of pg_restore documentation.
+ link: https://www.postgresql.org/docs/current/app-pgrestore.html
+- module: community.postgresql.postgresql_tablespace
+- module: community.postgresql.postgresql_info
+- module: community.postgresql.postgresql_ping
+
+notes:
+- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
+
+attributes:
+ check_mode:
+ support: full
+
+author: "Ansible Core Team"
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Create a new database with name "acme"
+ community.postgresql.postgresql_db:
+ name: acme
+
+# Note: If a template different from "template0" is specified,
+# encoding and locale settings must match those of the template.
+- name: Create a new database with name "acme" and specific encoding and locale # settings
+ community.postgresql.postgresql_db:
+ name: acme
+ encoding: UTF-8
+ lc_collate: de_DE.UTF-8
+ lc_ctype: de_DE.UTF-8
+ template: template0
+
+# Note: Default limit for the number of concurrent connections to
+# a specific database is "-1", which means "unlimited"
+- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
+ community.postgresql.postgresql_db:
+ name: acme
+ conn_limit: "100"
+
+- name: Dump an existing database to a file
+ community.postgresql.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+
+- name: Dump an existing database to a file excluding the test table
+ community.postgresql.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ dump_extra_args: --exclude-table=test
+
+- name: Dump an existing database to a file (with compression)
+ community.postgresql.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql.gz
+
+- name: Dump a single schema for an existing database
+ community.postgresql.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ target_opts: "-n public"
+
+- name: Dump only table1 and table2 from the acme database
+ community.postgresql.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/table1_table2.sql
+ target_opts: "-t table1 -t table2"
+
+- name: Dump an existing database using the directory format
+ community.postgresql.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.dir
+
+- name: Dump an existing database using the custom format
+ community.postgresql.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.pgc
+
+# name: acme - the name of the database to connect through which the recovery will take place
+- name: Restore database using the tar format
+ community.postgresql.postgresql_db:
+ name: acme
+ state: restore
+ target: /tmp/acme.tar
+
+# Note: In the example below, if database foo exists and has another tablespace
+# the tablespace will be changed to foo. Access to the database will be locked
+# until the copying of database files is finished.
+- name: Create a new database called foo in tablespace bar
+ community.postgresql.postgresql_db:
+ name: foo
+ tablespace: bar
+
+# Rename the database foo to bar.
+# If the database foo exists, it will be renamed to bar.
+# If the database foo does not exist and the bar database exists,
+# the module will report that nothing has changed.
+# If both the databases exist, an error will be raised.
+- name: Rename the database foo to bar
+ community.postgresql.postgresql_db:
+ name: foo
+ state: rename
+ target: bar
+'''
+
+RETURN = r'''
+executed_commands:
+ description: List of commands which tried to run.
+ returned: always
+ type: list
+ sample: ["CREATE DATABASE acme"]
+ version_added: '0.2.0'
+'''
+
+
+import os
+import subprocess
+import traceback
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ HAS_PSYCOPG2 = False
+else:
+ HAS_PSYCOPG2 = True
+
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ ensure_required_libs,
+ postgres_common_argument_spec
+)
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+ SQLParseError,
+)
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_native
+
+executed_commands = []
+
+
+class NotSupportedError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def set_owner(cursor, db, owner):
+ query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (db, owner)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def set_conn_limit(cursor, db, conn_limit):
+ query = 'ALTER DATABASE "%s" CONNECTION LIMIT %s' % (db, conn_limit)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def get_encoding_id(cursor, encoding):
+ query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
+ cursor.execute(query, {'encoding': encoding})
+ return cursor.fetchone()['encoding_id']
+
+
+def get_db_info(cursor, db):
+ query = """
+ SELECT rolname AS owner,
+ pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
+ datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
+ spcname AS tablespace
+ FROM pg_database
+ JOIN pg_roles ON pg_roles.oid = pg_database.datdba
+ JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
+ WHERE datname = %(db)s
+ """
+ cursor.execute(query, {'db': db})
+ return cursor.fetchone()
+
+
+def db_exists(cursor, db):
+ query = "SELECT * FROM pg_database WHERE datname=%(db)s"
+ cursor.execute(query, {'db': db})
+ return cursor.rowcount == 1
+
+
+def db_dropconns(cursor, db):
+ if cursor.connection.server_version >= 90200:
+ """ Drop DB connections in Postgres 9.2 and above """
+ query_terminate = ("SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity "
+ "WHERE pg_stat_activity.datname=%(db)s AND pid <> pg_backend_pid()")
+ else:
+ """ Drop DB connections in Postgres 9.1 and below """
+ query_terminate = ("SELECT pg_terminate_backend(pg_stat_activity.procpid) FROM pg_stat_activity "
+ "WHERE pg_stat_activity.datname=%(db)s AND procpid <> pg_backend_pid()")
+ query_block = ("UPDATE pg_database SET datallowconn = false WHERE datname=%(db)s")
+ query = query_block + '; ' + query_terminate
+
+ cursor.execute(query, {'db': db})
+
+
+def db_delete(cursor, db, force=False):
+ if db_exists(cursor, db):
+ query = 'DROP DATABASE "%s"' % db
+ if force:
+ if cursor.connection.server_version >= 130000:
+ query = ('DROP DATABASE "%s" WITH (FORCE)' % db)
+ else:
+ db_dropconns(cursor, db)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+ else:
+ return False
+
+
+def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
+ if not db_exists(cursor, db):
+ query_fragments = ['CREATE DATABASE "%s"' % db]
+ if owner:
+ query_fragments.append('OWNER "%s"' % owner)
+ if template:
+ query_fragments.append('TEMPLATE "%s"' % template)
+ if encoding:
+ query_fragments.append('ENCODING %(enc)s')
+ if lc_collate:
+ query_fragments.append('LC_COLLATE %(collate)s')
+ if lc_ctype:
+ query_fragments.append('LC_CTYPE %(ctype)s')
+ if tablespace:
+ query_fragments.append('TABLESPACE "%s"' % tablespace)
+ if conn_limit:
+ query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query = ' '.join(query_fragments)
+ executed_commands.append(cursor.mogrify(query, params))
+ cursor.execute(query, params)
+ return True
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ raise NotSupportedError(
+ 'Changing database encoding is not supported. '
+ 'Current encoding: %s' % db_info['encoding']
+ )
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ raise NotSupportedError(
+ 'Changing LC_COLLATE is not supported. '
+ 'Current LC_COLLATE: %s' % db_info['lc_collate']
+ )
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ raise NotSupportedError(
+ 'Changing LC_CTYPE is not supported.'
+ 'Current LC_CTYPE: %s' % db_info['lc_ctype']
+ )
+ else:
+ changed = False
+
+ if owner and owner != db_info['owner']:
+ changed = set_owner(cursor, db, owner)
+
+ if conn_limit and conn_limit != str(db_info['conn_limit']):
+ changed = set_conn_limit(cursor, db, conn_limit)
+
+ if tablespace and tablespace != db_info['tablespace']:
+ changed = set_tablespace(cursor, db, tablespace)
+
+ return changed
+
+
+def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ if not db_exists(cursor, db):
+ return False
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ return False
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ return False
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ return False
+ elif owner and owner != db_info['owner']:
+ return False
+ elif conn_limit and conn_limit != str(db_info['conn_limit']):
+ return False
+ elif tablespace and tablespace != db_info['tablespace']:
+ return False
+ else:
+ return True
+
+
+def db_dump(module, target, target_opts="",
+ db=None,
+ dump_extra_args=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user, db_prefix=False)
+ cmd = module.get_bin_path('pg_dump', True)
+ comp_prog_path = None
+
+ if os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=t')
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=c')
+ elif os.path.splitext(target)[-1] == '.dir':
+ flags.append(' --format=d')
+
+ if os.path.splitext(target)[-1] == '.gz':
+ if module.get_bin_path('pigz'):
+ comp_prog_path = module.get_bin_path('pigz', True)
+ else:
+ comp_prog_path = module.get_bin_path('gzip', True)
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzip2', True)
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xz', True)
+
+ cmd += "".join(flags)
+
+ if dump_extra_args:
+ cmd += " {0} ".format(dump_extra_args)
+
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ # Use a fifo to be notified of an error in pg_dump
+ # Using shell pipe has no way to return the code of the first command
+ # in a portable way.
+ fifo = os.path.join(module.tmpdir, 'pg_fifo')
+ os.mkfifo(fifo)
+ cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
+ else:
+ if ' --format=d' in cmd:
+ cmd = '{0} -f {1}'.format(cmd, shlex_quote(target))
+ else:
+ cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def db_restore(module, target, target_opts="",
+ db=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user)
+ comp_prog_path = None
+ cmd = module.get_bin_path('psql', True)
+
+ if os.path.splitext(target)[-1] == '.sql':
+ flags.append(' --file={0}'.format(target))
+
+ elif os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=Tar')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=Custom')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.dir':
+ flags.append(' --format=Directory')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.gz':
+ comp_prog_path = module.get_bin_path('zcat', True)
+
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzcat', True)
+
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xzcat', True)
+
+ cmd += "".join(flags)
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ env = os.environ.copy()
+ if password:
+ env = {"PGPASSWORD": password}
+ p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
+ (stdout2, stderr2) = p2.communicate()
+ p1.stdout.close()
+ p1.wait()
+ if p1.returncode != 0:
+ stderr1 = p1.stderr.read()
+ return p1.returncode, '', stderr1, 'cmd: ****'
+ else:
+ return p2.returncode, '', stderr2, 'cmd: ****'
+ else:
+ if '--format=Directory' in cmd:
+ cmd = '{0} {1}'.format(cmd, shlex_quote(target))
+ else:
+ cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def login_flags(db, host, port, user, db_prefix=True):
+ """
+ returns a list of connection argument strings each prefixed
+ with a space and quoted where necessary to later be combined
+ in a single shell string with `"".join(rv)`
+
+ db_prefix determines if "--dbname" is prefixed to the db argument,
+ since the argument was introduced in 9.3.
+ """
+ flags = []
+ if db:
+ if db_prefix:
+ flags.append(' --dbname={0}'.format(shlex_quote(db)))
+ else:
+ flags.append(' {0}'.format(shlex_quote(db)))
+ if host:
+ flags.append(' --host={0}'.format(host))
+ if port:
+ flags.append(' --port={0}'.format(port))
+ if user:
+ flags.append(' --username={0}'.format(user))
+ return flags
+
+
+def do_with_password(module, cmd, password):
+ env = {}
+ if password:
+ env = {"PGPASSWORD": password}
+ executed_commands.append(cmd)
+ rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
+ return rc, stderr, stdout, cmd
+
+
+def set_tablespace(cursor, db, tablespace):
+ query = 'ALTER DATABASE "%s" SET TABLESPACE "%s"' % (db, tablespace)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def rename_db(module, cursor, db, target, check_mode=False):
+ source_db = db_exists(cursor, db)
+ target_db = db_exists(cursor, target)
+
+ if source_db and target_db:
+ module.fail_json(msg='Both the source and the target databases exist.')
+
+ if not source_db and target_db:
+ # If the source db doesn't exist and
+ # the target db exists, we assume that
+ # the desired state has been reached and
+ # respectively nothing needs to be changed
+ return False
+
+ if not source_db and not target_db:
+ module.fail_json(msg='The source and the target databases do not exist.')
+
+ if source_db and not target_db:
+ if check_mode:
+ return True
+
+ query = 'ALTER DATABASE "%s" RENAME TO "%s"' % (db, target)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', required=True, aliases=['name']),
+ owner=dict(type='str', default=''),
+ template=dict(type='str', default=''),
+ encoding=dict(type='str', default=''),
+ lc_collate=dict(type='str', default=''),
+ lc_ctype=dict(type='str', default=''),
+ state=dict(type='str', default='present',
+ choices=['absent', 'dump', 'present', 'rename', 'restore']),
+ target=dict(type='path', default=''),
+ target_opts=dict(type='str', default=''),
+ maintenance_db=dict(type='str', default="postgres"),
+ session_role=dict(type='str'),
+ conn_limit=dict(type='str', default=''),
+ tablespace=dict(type='path', default=''),
+ dump_extra_args=dict(type='str', default=None),
+ trust_input=dict(type='bool', default=True),
+ force=dict(type='bool', default=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ db = module.params["db"]
+ owner = module.params["owner"]
+ template = module.params["template"]
+ encoding = module.params["encoding"]
+ lc_collate = module.params["lc_collate"]
+ lc_ctype = module.params["lc_ctype"]
+ target = module.params["target"]
+ target_opts = module.params["target_opts"]
+ state = module.params["state"]
+ changed = False
+ maintenance_db = module.params['maintenance_db']
+ session_role = module.params["session_role"]
+ conn_limit = module.params['conn_limit']
+ tablespace = module.params['tablespace']
+ dump_extra_args = module.params['dump_extra_args']
+ trust_input = module.params['trust_input']
+ force = module.params['force']
+
+ if state == 'rename':
+ if not target:
+ module.fail_json(msg='The "target" option must be defined when the "rename" option is used.')
+
+ if db == target:
+ module.fail_json(msg='The "name/db" option and the "target" option cannot be the same.')
+
+ if maintenance_db == db:
+ module.fail_json(msg='The "maintenance_db" option and the "name/db" option cannot be the same.')
+
+ # Check input
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, owner, conn_limit, encoding, db, template, tablespace, session_role)
+
+ raw_connection = state in ("dump", "restore")
+
+ if not raw_connection:
+ ensure_required_libs(module)
+
+ if target == "":
+ target = "{0}/{1}.sql".format(os.getcwd(), db)
+ target = os.path.expanduser(target)
+
+ # Such a transformation is used, since the connection should go to 'maintenance_db'
+ params_dict = module.params
+ params_dict["db"] = module.params["maintenance_db"]
+
+ # Parameters for connecting to the database
+ conn_params = get_conn_params(module, params_dict, warn_db_default=False)
+
+ if not raw_connection:
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if session_role:
+ try:
+ cursor.execute('SET ROLE "%s"' % session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
+
+ try:
+ if module.check_mode:
+ if state == "absent":
+ changed = db_exists(cursor, db)
+
+ elif state == "present":
+ changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+
+ elif state == "rename":
+ changed = rename_db(module, cursor, db, target, check_mode=True)
+
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+ if state == "absent":
+ try:
+ changed = db_delete(cursor, db, force)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == "present":
+ try:
+ changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif raw_connection:
+ # Parameters for performing dump/restore
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+
+ method = state == "dump" and db_dump or db_restore
+ try:
+ if state == 'dump':
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **conn_params)
+ else:
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, **conn_params)
+
+ if rc != 0:
+ module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
+ else:
+ module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
+ executed_commands=executed_commands)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == 'rename':
+ changed = rename_db(module, cursor, db, target)
+
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # Avoid catching this on Python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ if not raw_connection:
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py
new file mode 100644
index 000000000..e9f9e46b7
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py
@@ -0,0 +1,475 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_ext
+short_description: Add or remove PostgreSQL extensions from a database
+description:
+- Add or remove PostgreSQL extensions from a database.
+options:
+ name:
+ description:
+ - Name of the extension to add or remove.
+ required: true
+ type: str
+ aliases:
+ - ext
+ db:
+ description:
+ - Name of the database to add or remove the extension to/from.
+ required: true
+ type: str
+ aliases:
+ - login_db
+ schema:
+ description:
+ - Name of the schema to add the extension to.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The database extension state.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ cascade:
+ description:
+ - Automatically install/remove any extensions that this extension depends on
+ that are not already installed/removed (supported since PostgreSQL 9.6).
+ type: bool
+ default: false
+ login_unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ version:
+ description:
+ - Extension version to add or update to. Has effect with I(state=present) only.
+ - If not specified and extension is not installed in the database,
+ the latest version available will be created.
+ - If extension is already installed, will update to the given version if a valid update
+ path exists.
+ - Downgrading is only supported if the extension provides a downgrade path otherwise
+ the extension must be removed and a lower version of the extension must be made available.
+ - Set I(version=latest) to always update the extension to the latest available version.
+ type: str
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters I(ext), I(schema),
+ I(version), I(session_role) are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via the parameters are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL extensions
+ description: General information about PostgreSQL extensions.
+ link: https://www.postgresql.org/docs/current/external-extensions.html
+- name: CREATE EXTENSION reference
+ description: Complete reference of the CREATE EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createextension.html
+- name: ALTER EXTENSION reference
+ description: Complete reference of the ALTER EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterextension.html
+- name: DROP EXTENSION reference
+ description: Complete reference of the DROP EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droppublication.html
+
+notes:
+- Incomparable versions, for example PostGIS ``unpackaged``, cannot be installed.
+
+attributes:
+ check_mode:
+ support: full
+
+author:
+- Daniel Schep (@dschep)
+- Thomas O'Donnell (@andytom)
+- Sandro Santilli (@strk)
+- Andrew Klychkov (@Andersson007)
+- Keith Fiske (@keithf4)
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Adds postgis extension to the database acme in the schema foo
+ community.postgresql.postgresql_ext:
+ name: postgis
+ db: acme
+ schema: foo
+
+- name: Removes postgis extension to the database acme
+ community.postgresql.postgresql_ext:
+ name: postgis
+ db: acme
+ state: absent
+
+- name: Adds earthdistance extension to the database template1 cascade
+ community.postgresql.postgresql_ext:
+ name: earthdistance
+ db: template1
+ cascade: true
+
+# In the example below, if earthdistance extension is installed,
+# it will be removed too because it depends on cube:
+- name: Removes cube extension from the database acme cascade
+ community.postgresql.postgresql_ext:
+ name: cube
+ db: acme
+ cascade: true
+ state: absent
+
+- name: Create extension foo of version 1.2 or update it to that version if it's already created and a valid update path exists
+ community.postgresql.postgresql_ext:
+ db: acme
+ name: foo
+ version: 1.2
+
+- name: Create the latest available version of extension foo. If already installed, update it to the latest version
+ community.postgresql.postgresql_ext:
+ db: acme
+ name: foo
+ version: latest
+'''
+
+RETURN = r'''
+query:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ["DROP EXTENSION \"acme\""]
+
+'''
+
+import traceback
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+
+executed_queries = []
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def ext_delete(cursor, ext, current_version, cascade):
+ """Remove the extension from the database.
+
+ Return True if success.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ current_version (str) -- installed version of the extension.
+ Value obtained from ext_get_versions and used to
+ determine if the extension was installed.
+ cascade (boolean) -- Pass the CASCADE flag to the DROP commmand
+ """
+ if current_version:
+ query = "DROP EXTENSION \"%s\"" % ext
+ if cascade:
+ query += " CASCADE"
+ cursor.execute(query)
+ executed_queries.append(cursor.mogrify(query))
+ return True
+ else:
+ return False
+
+
+def ext_update_version(cursor, ext, version):
+ """Update extension version.
+
+ Return True if success.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ version (str) -- extension version
+ """
+ query = "ALTER EXTENSION \"%s\" UPDATE" % ext
+ params = {}
+
+ if version != 'latest':
+ query += " TO %(ver)s"
+ params['ver'] = version
+
+ cursor.execute(query, params)
+ executed_queries.append(cursor.mogrify(query, params))
+
+ return True
+
+
+def ext_create(cursor, ext, schema, cascade, version):
+ """
+ Create the extension objects inside the database.
+
+ Return True if success.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ schema (str) -- target schema for extension objects
+ version (str) -- extension version
+ """
+ query = "CREATE EXTENSION \"%s\"" % ext
+ params = {}
+
+ if schema:
+ query += " WITH SCHEMA \"%s\"" % schema
+ if version != 'latest':
+ query += " VERSION %(ver)s"
+ params['ver'] = version
+ if cascade:
+ query += " CASCADE"
+
+ cursor.execute(query, params)
+ executed_queries.append(cursor.mogrify(query, params))
+ return True
+
+
+def ext_get_versions(cursor, ext):
+ """
+ Get the currently created extension version if it is installed
+ in the database and versions that are available if it is
+ installed on the system.
+
+ Return tuple (current_version, [list of available versions]).
+
+ Note: the list of available versions contains only versions
+ that higher than the current created version.
+ If the extension is not created, this list will contain all
+ available versions.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ """
+
+ current_version = None
+ params = {}
+ params['ext'] = ext
+
+ # 1. Get the current extension version:
+ query = ("SELECT extversion FROM pg_catalog.pg_extension "
+ "WHERE extname = %(ext)s")
+
+ cursor.execute(query, params)
+
+ res = cursor.fetchone()
+ if res:
+ current_version = res[0]
+
+ # 2. Get available versions:
+ query = ("SELECT version FROM pg_available_extension_versions "
+ "WHERE name = %(ext)s")
+
+ cursor.execute(query, params)
+
+ available_versions = set(r[0] for r in cursor.fetchall())
+
+ if current_version is None:
+ current_version = False
+
+ return (current_version, available_versions)
+
+
+def ext_valid_update_path(cursor, ext, current_version, version):
+ """
+ Check to see if the installed extension version has a valid update
+ path to the given version. A version of 'latest' is always a valid path.
+
+ Return True if a valid path exists. Otherwise return False.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ current_version (str) -- installed version of the extension.
+ version (str) -- target extension version to update to.
+ A value of 'latest' is always a valid path and will result
+ in the extension update command always being run.
+ """
+
+ valid_path = False
+ params = {}
+ if version != 'latest':
+ query = ("SELECT path FROM pg_extension_update_paths(%(ext)s) "
+ "WHERE source = %(cv)s "
+ "AND target = %(ver)s")
+
+ params['ext'] = ext
+ params['cv'] = current_version
+ params['ver'] = version
+
+ cursor.execute(query, params)
+ res = cursor.fetchone()
+ if res is not None:
+ valid_path = True
+ else:
+ valid_path = True
+
+ return (valid_path)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", required=True, aliases=["login_db"]),
+ ext=dict(type="str", required=True, aliases=["name"]),
+ schema=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ cascade=dict(type="bool", default=False),
+ session_role=dict(type="str"),
+ version=dict(type="str"),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ ext = module.params["ext"]
+ schema = module.params["schema"]
+ state = module.params["state"]
+ cascade = module.params["cascade"]
+ version = module.params["version"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+ changed = False
+
+ if not trust_input:
+ check_input(module, ext, schema, version, session_role)
+
+ if version and state == 'absent':
+ module.warn("Parameter version is ignored when state=absent")
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ conn_params = get_conn_params(module, module.params)
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ # Get extension info and available versions:
+ curr_version, available_versions = ext_get_versions(cursor, ext)
+
+ if state == "present":
+
+ # If version passed
+ if version:
+ # If extension is installed, update to passed version if a valid path exists
+ if curr_version:
+ # Given version already installed
+ if curr_version == version:
+ changed = False
+ # Attempt to update to given version or latest version defined in extension control file
+ # ALTER EXTENSION is actually run if valid, so 'changed' will be true even if nothing updated
+ else:
+ valid_update_path = ext_valid_update_path(cursor, ext, curr_version, version)
+ if valid_update_path:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_update_version(cursor, ext, version)
+ else:
+ module.fail_json(msg="Passed version '%s' has no valid update path from "
+ "the currently installed version '%s' or "
+ "the passed version is not available" % (version, curr_version))
+ else:
+ # If not requesting latest version and passed version not available
+ if version != 'latest' and version not in available_versions:
+ module.fail_json(msg="Passed version '%s' is not available" % version)
+ # Else install the passed version when available
+ else:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_create(cursor, ext, schema, cascade, version)
+
+ # If version is not passed:
+ else:
+ # Extension exists, no request to update so no change
+ if curr_version:
+ changed = False
+ else:
+ # If the ext doesn't exist and is available:
+ if available_versions:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_create(cursor, ext, schema, cascade, 'latest')
+
+ # If the ext doesn't exist and is not available:
+ else:
+ module.fail_json(msg="Extension %s is not available" % ext)
+
+ elif state == "absent":
+ if curr_version:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_delete(cursor, ext, curr_version, cascade)
+ else:
+ changed = False
+
+ except Exception as e:
+ db_connection.close()
+ module.fail_json(msg="Management of PostgreSQL extension failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ db_connection.close()
+ module.exit_json(changed=changed, db=module.params["db"], ext=ext, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py
new file mode 100644
index 000000000..2ffb33a8c
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py
@@ -0,0 +1,594 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018-2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_idx
+short_description: Create or drop indexes from a PostgreSQL database
+description:
+- Create or drop indexes from a PostgreSQL database.
+
+options:
+ idxname:
+ description:
+ - Name of the index to create or drop.
+ type: str
+ required: true
+ aliases:
+ - name
+ db:
+ description:
+ - Name of database to connect to and where the index will be created/dropped.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ schema:
+ description:
+ - Name of a database schema where the index will be created.
+ type: str
+ state:
+ description:
+ - Index state.
+ - C(present) implies the index will be created if it does not exist.
+ - C(absent) implies the index will be dropped if it exists.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ table:
+ description:
+ - Table to create index on it.
+ - Mutually exclusive with I(state=absent).
+ type: str
+ columns:
+ description:
+ - List of index columns that need to be covered by index.
+ - Mutually exclusive with I(state=absent).
+ type: list
+ elements: str
+ aliases:
+ - column
+ cond:
+ description:
+ - Index conditions.
+ - Mutually exclusive with I(state=absent).
+ type: str
+ idxtype:
+ description:
+ - Index type (like btree, gist, gin, etc.).
+ - Mutually exclusive with I(state=absent).
+ type: str
+ aliases:
+ - type
+ concurrent:
+ description:
+ - Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
+ - Pay attention, if I(concurrent=false), the table will be locked (ACCESS EXCLUSIVE) during the building process.
+ For more information about the lock levels see U(https://www.postgresql.org/docs/current/explicit-locking.html).
+ - If the building process was interrupted for any reason when I(cuncurrent=true), the index becomes invalid.
+ In this case it should be dropped and created again.
+ - Mutually exclusive with I(cascade=true).
+ type: bool
+ default: true
+ unique:
+ description:
+ - Enable unique index.
+ - Only btree currently supports unique indexes.
+ type: bool
+ default: false
+ version_added: '0.2.0'
+ tablespace:
+ description:
+ - Set a tablespace for the index.
+ - Mutually exclusive with I(state=absent).
+ type: str
+ storage_params:
+ description:
+ - Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc.
+ - Mutually exclusive with I(state=absent).
+ type: list
+ elements: str
+ cascade:
+ description:
+ - Automatically drop objects that depend on the index,
+ and in turn all objects that depend on those objects.
+ - It used only with I(state=absent).
+ - Mutually exclusive with I(concurrent=true).
+ type: bool
+ default: false
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters I(idxname), I(session_role),
+ I(schema), I(table), I(columns), I(tablespace), I(storage_params),
+ I(cond) are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via the parameters are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+
+seealso:
+- module: community.postgresql.postgresql_table
+- module: community.postgresql.postgresql_tablespace
+- name: PostgreSQL indexes reference
+ description: General information about PostgreSQL indexes.
+ link: https://www.postgresql.org/docs/current/indexes.html
+- name: CREATE INDEX reference
+ description: Complete reference of the CREATE INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createindex.html
+- name: ALTER INDEX reference
+ description: Complete reference of the ALTER INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterindex.html
+- name: DROP INDEX reference
+ description: Complete reference of the DROP INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropindex.html
+
+notes:
+- The index building process can affect database performance.
+- To avoid table locks on production databases, use I(concurrent=true) (default behavior).
+
+attributes:
+ check_mode:
+ support: full
+
+author:
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products
+ community.postgresql.postgresql_idx:
+ db: acme
+ table: products
+ columns: id,name
+ name: test_idx
+
+- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter
+ community.postgresql.postgresql_idx:
+ db: acme
+ table: products
+ columns:
+ - id
+ - name
+ idxname: test_idx
+ tablespace: ssd
+ storage_params:
+ - fillfactor=90
+
+- name: Create gist index test_gist_idx concurrently on column geo_data of table map
+ community.postgresql.postgresql_idx:
+ db: somedb
+ table: map
+ idxtype: gist
+ columns: geo_data
+ idxname: test_gist_idx
+
+# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops
+- name: Create gin index gin0_idx not concurrently on column comment of table test
+ community.postgresql.postgresql_idx:
+ idxname: gin0_idx
+ table: test
+ columns: comment gin_trgm_ops
+ concurrent: false
+ idxtype: gin
+
+- name: Drop btree test_idx concurrently
+ community.postgresql.postgresql_idx:
+ db: mydb
+ idxname: test_idx
+ state: absent
+
+- name: Drop test_idx cascade
+ community.postgresql.postgresql_idx:
+ db: mydb
+ idxname: test_idx
+ state: absent
+ cascade: true
+ concurrent: false
+
+- name: Create btree index test_idx concurrently on columns id,comment where column id > 1
+ community.postgresql.postgresql_idx:
+ db: mydb
+ table: test
+ columns: id,comment
+ idxname: test_idx
+ cond: id > 1
+
+- name: Create unique btree index if not exists test_unique_idx on column name of table products
+ community.postgresql.postgresql_idx:
+ db: acme
+ table: products
+ columns: name
+ name: test_unique_idx
+ unique: true
+ concurrent: false
+'''
+
+RETURN = r'''
+name:
+ description: Index name.
+ returned: always
+ type: str
+ sample: 'foo_idx'
+state:
+ description: Index state.
+ returned: always
+ type: str
+ sample: 'present'
+schema:
+ description: Schema where index exists.
+ returned: always
+ type: str
+ sample: 'public'
+tablespace:
+ description: Tablespace where index exists.
+ returned: always
+ type: str
+ sample: 'ssd'
+query:
+ description: Query that was tried to be executed.
+ returned: always
+ type: str
+ sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)'
+storage_params:
+ description: Index storage parameters.
+ returned: always
+ type: list
+ sample: [ "fillfactor=90" ]
+valid:
+ description: Index validity.
+ returned: always
+ type: bool
+ sample: true
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import check_input
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN')
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class Index(object):
+
+ """Class for working with PostgreSQL indexes.
+
+ TODO:
+ 1. Add possibility to change ownership
+ 2. Add possibility to change tablespace
+ 3. Add list called executed_queries (executed_query should be left too)
+ 4. Use self.module instead of passing arguments to the methods whenever possible
+
+ Args:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ schema (str) -- name of the index schema
+ name (str) -- name of the index
+
+ Attrs:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ schema (str) -- name of the index schema
+ name (str) -- name of the index
+ exists (bool) -- flag the index exists in the DB or not
+ info (dict) -- dict that contents information about the index
+ executed_query (str) -- executed query
+ """
+
+ def __init__(self, module, cursor, schema, name):
+ self.name = name
+ if schema:
+ self.schema = schema
+ else:
+ self.schema = 'public'
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'name': self.name,
+ 'state': 'absent',
+ 'schema': '',
+ 'tblname': '',
+ 'tblspace': '',
+ 'valid': True,
+ 'storage_params': [],
+ }
+ self.exists = False
+ self.__exists_in_db()
+ self.executed_query = ''
+
+ def get_info(self):
+ """Refresh index info.
+
+ Return self.info dict.
+ """
+ self.__exists_in_db()
+ return self.info
+
+ def __exists_in_db(self):
+ """Check index existence, collect info, add it to self.info dict.
+
+ Return True if the index exists, otherwise, return False.
+ """
+ query = ("SELECT i.schemaname, i.tablename, i.tablespace, "
+ "pi.indisvalid, c.reloptions "
+ "FROM pg_catalog.pg_indexes AS i "
+ "JOIN pg_catalog.pg_class AS c "
+ "ON i.indexname = c.relname "
+ "JOIN pg_catalog.pg_index AS pi "
+ "ON c.oid = pi.indexrelid "
+ "WHERE i.indexname = %(name)s")
+
+ res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
+ if res:
+ self.exists = True
+ self.info = dict(
+ name=self.name,
+ state='present',
+ schema=res[0][0],
+ tblname=res[0][1],
+ tblspace=res[0][2] if res[0][2] else '',
+ valid=res[0][3],
+ storage_params=res[0][4] if res[0][4] else [],
+ )
+ return True
+
+ else:
+ self.exists = False
+ return False
+
+ def create(self, tblname, idxtype, columns, cond, tblspace,
+ storage_params, concurrent=True, unique=False):
+ """Create PostgreSQL index.
+
+ Return True if success, otherwise, return False.
+
+ Args:
+ tblname (str) -- name of a table for the index
+ idxtype (str) -- type of the index like BTREE, BRIN, etc
+ columns (str) -- string of comma-separated columns that need to be covered by index
+ tblspace (str) -- tablespace for storing the index
+ storage_params (str) -- string of comma-separated storage parameters
+
+ Kwargs:
+ concurrent (bool) -- build index in concurrent mode, default True
+ """
+ if self.exists:
+ return False
+
+ if idxtype is None:
+ idxtype = "BTREE"
+
+ query = 'CREATE'
+
+ if unique:
+ query += ' UNIQUE'
+
+ query += ' INDEX'
+
+ if concurrent:
+ query += ' CONCURRENTLY'
+
+ query += ' "%s"' % self.name
+
+ query += ' ON "%s"."%s" ' % (self.schema, tblname)
+
+ query += 'USING %s (%s)' % (idxtype, columns)
+
+ if storage_params:
+ query += ' WITH (%s)' % storage_params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if cond:
+ query += ' WHERE %s' % cond
+
+ self.executed_query = query
+
+ return exec_sql(self, query, return_bool=True, add_to_executed=False)
+
+ def drop(self, cascade=False, concurrent=True):
+ """Drop PostgreSQL index.
+
+ Return True if success, otherwise, return False.
+
+ Args:
+ schema (str) -- name of the index schema
+
+ Kwargs:
+ cascade (bool) -- automatically drop objects that depend on the index,
+ default False
+ concurrent (bool) -- build index in concurrent mode, default True
+ """
+ if not self.exists:
+ return False
+
+ query = 'DROP INDEX'
+
+ if concurrent:
+ query += ' CONCURRENTLY'
+
+ query += ' "%s"."%s"' % (self.schema, self.name)
+
+ if cascade:
+ query += ' CASCADE'
+
+ self.executed_query = query
+
+ return exec_sql(self, query, return_bool=True, add_to_executed=False)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ idxname=dict(type='str', required=True, aliases=['name']),
+ db=dict(type='str', aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ concurrent=dict(type='bool', default=True),
+ unique=dict(type='bool', default=False),
+ table=dict(type='str'),
+ idxtype=dict(type='str', aliases=['type']),
+ columns=dict(type='list', elements='str', aliases=['column']),
+ cond=dict(type='str'),
+ session_role=dict(type='str'),
+ tablespace=dict(type='str'),
+ storage_params=dict(type='list', elements='str'),
+ cascade=dict(type='bool', default=False),
+ schema=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ idxname = module.params["idxname"]
+ state = module.params["state"]
+ concurrent = module.params["concurrent"]
+ unique = module.params["unique"]
+ table = module.params["table"]
+ idxtype = module.params["idxtype"]
+ columns = module.params["columns"]
+ cond = module.params["cond"]
+ tablespace = module.params["tablespace"]
+ storage_params = module.params["storage_params"]
+ cascade = module.params["cascade"]
+ schema = module.params["schema"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, idxname, session_role, schema, table, columns,
+ tablespace, storage_params, cond)
+
+ if concurrent and cascade:
+ module.fail_json(msg="Concurrent mode and cascade parameters are mutually exclusive")
+
+ if unique and (idxtype and idxtype != 'btree'):
+ module.fail_json(msg="Only btree currently supports unique indexes")
+
+ if state == 'present':
+ if not table:
+ module.fail_json(msg="Table must be specified")
+ if not columns:
+ module.fail_json(msg="At least one column must be specified")
+ else:
+ if table or columns or cond or idxtype or tablespace:
+ module.fail_json(msg="Index %s is going to be removed, so it does not "
+ "make sense to pass a table name, columns, conditions, "
+ "index type, or tablespace" % idxname)
+
+ if cascade and state != 'absent':
+ module.fail_json(msg="cascade parameter used only with state=absent")
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ conn_params = get_conn_params(module, module.params)
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Set defaults:
+ changed = False
+
+ # Do job:
+ index = Index(module, cursor, schema, idxname)
+ kw = index.get_info()
+ kw['query'] = ''
+
+ #
+ # check_mode start
+ if module.check_mode:
+ if state == 'present' and index.exists:
+ kw['changed'] = False
+ module.exit_json(**kw)
+
+ elif state == 'present' and not index.exists:
+ kw['changed'] = True
+ module.exit_json(**kw)
+
+ elif state == 'absent' and not index.exists:
+ kw['changed'] = False
+ module.exit_json(**kw)
+
+ elif state == 'absent' and index.exists:
+ kw['changed'] = True
+ module.exit_json(**kw)
+ # check_mode end
+ #
+
+ if state == "present":
+ if idxtype and idxtype.upper() not in VALID_IDX_TYPES:
+ module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname))
+
+ columns = ','.join(columns)
+
+ if storage_params:
+ storage_params = ','.join(storage_params)
+
+ changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent, unique)
+
+ if changed:
+ kw = index.get_info()
+ kw['state'] = 'present'
+ kw['query'] = index.executed_query
+
+ else:
+ changed = index.drop(cascade, concurrent)
+
+ if changed:
+ kw['state'] = 'absent'
+ kw['query'] = index.executed_query
+
+ if not kw['valid']:
+ db_connection.rollback()
+ module.warn("Index %s is invalid! ROLLBACK" % idxname)
+
+ if not concurrent:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_info.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_info.py
new file mode 100644
index 000000000..55bb6ebd8
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_info.py
@@ -0,0 +1,1111 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_info
+short_description: Gather information about PostgreSQL servers
+description:
+- Gathers information about PostgreSQL servers.
+options:
+ filter:
+ description:
+ - Limit the collected information by comma separated string or YAML list.
+ - Allowable values are C(version),
+ C(databases), C(in_recovery), C(settings), C(tablespaces), C(roles),
+ C(replications), C(repl_slots).
+ - By default, collects all subsets.
+ - You can use shell-style (fnmatch) wildcard to pass groups of values (see Examples).
+ - You can use '!' before value (for example, C(!settings)) to exclude it from the information.
+ - If you pass including and excluding values to the filter, for example, I(filter=!settings,ver),
+ the excluding values will be ignored.
+ type: list
+ elements: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(false), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+
+attributes:
+ check_mode:
+ support: full
+
+seealso:
+- module: community.postgresql.postgresql_ping
+
+author:
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+# Display info from postgres hosts.
+# ansible postgres -m postgresql_info
+
+# Display only databases and roles info from all hosts using shell-style wildcards:
+# ansible all -m postgresql_info -a 'filter=dat*,rol*'
+
+# Display only replications and repl_slots info from standby hosts using shell-style wildcards:
+# ansible standby -m postgresql_info -a 'filter=repl*'
+
+# Display all info from databases hosts except settings:
+# ansible databases -m postgresql_info -a 'filter=!settings'
+
+- name: Collect PostgreSQL version and extensions
+ become: true
+ become_user: postgres
+ community.postgresql.postgresql_info:
+ filter: ver*,ext*
+
+- name: Collect all info except settings and roles
+ become: true
+ become_user: postgres
+ community.postgresql.postgresql_info:
+ filter: "!settings,!roles"
+
+# On FreeBSD with PostgreSQL 9.5 version and lower use pgsql user to become
+# and pass "postgres" as a database to connect to
+- name: Collect tablespaces and repl_slots info
+ become: true
+ become_user: pgsql
+ community.postgresql.postgresql_info:
+ db: postgres
+ filter:
+ - tablesp*
+ - repl_sl*
+
+- name: Collect all info except databases
+ become: true
+ become_user: postgres
+ community.postgresql.postgresql_info:
+ filter:
+ - "!databases"
+'''
+
+RETURN = r'''
+version:
+ description: Database server version U(https://www.postgresql.org/support/versioning/).
+ returned: always
+ type: dict
+ sample: { "version": { "major": 10, "minor": 6 } }
+ contains:
+ major:
+ description: Major server version.
+ returned: always
+ type: int
+ sample: 11
+ minor:
+ description: Minor server version.
+ returned: always
+ type: int
+ sample: 1
+ patch:
+ description: Patch server version.
+ returned: if supported
+ type: int
+ sample: 5
+ version_added: '1.2.0'
+ full:
+ description: Full server version.
+ returned: always
+ type: str
+ sample: '13.2'
+ version_added: '1.2.0'
+ raw:
+ description: Full output returned by ``SELECT version()``.
+ returned: always
+ type: str
+ sample: 'PostgreSQL 13.2 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 10.2.1 20201125 (Red Hat 10.2.1-9), 64-bit'
+ version_added: '1.2.0'
+in_recovery:
+ description: Indicates if the service is in recovery mode or not.
+ returned: always
+ type: bool
+ sample: false
+databases:
+ description: Information about databases.
+ returned: always
+ type: dict
+ sample:
+ - { "postgres": { "access_priv": "", "collate": "en_US.UTF-8",
+ "ctype": "en_US.UTF-8", "encoding": "UTF8", "owner": "postgres", "size": "7997 kB" } }
+ contains:
+ database_name:
+ description: Database name.
+ returned: always
+ type: dict
+ sample: template1
+ contains:
+ access_priv:
+ description: Database access privileges.
+ returned: always
+ type: str
+ sample: "=c/postgres_npostgres=CTc/postgres"
+ collate:
+ description:
+ - Database collation U(https://www.postgresql.org/docs/current/collation.html).
+ returned: always
+ type: str
+ sample: en_US.UTF-8
+ ctype:
+ description:
+ - Database LC_CTYPE U(https://www.postgresql.org/docs/current/multibyte.html).
+ returned: always
+ type: str
+ sample: en_US.UTF-8
+ encoding:
+ description:
+ - Database encoding U(https://www.postgresql.org/docs/current/multibyte.html).
+ returned: always
+ type: str
+ sample: UTF8
+ owner:
+ description:
+ - Database owner U(https://www.postgresql.org/docs/current/sql-createdatabase.html).
+ returned: always
+ type: str
+ sample: postgres
+ size:
+ description: Database size in bytes.
+ returned: always
+ type: str
+ sample: 8189415
+ extensions:
+ description:
+ - Extensions U(https://www.postgresql.org/docs/current/sql-createextension.html).
+ returned: always
+ type: dict
+ sample:
+ - { "plpgsql": { "description": "PL/pgSQL procedural language",
+ "extversion": { "major": 1, "minor": 0, "raw": '1.0' } } }
+ contains:
+ extdescription:
+ description: Extension description.
+ returned: if existent
+ type: str
+ sample: PL/pgSQL procedural language
+ extversion:
+ description: Extension description.
+ returned: always
+ type: dict
+ contains:
+ major:
+ description: Extension major version.
+ returned: always
+ type: int
+ sample: 1
+ minor:
+ description: Extension minor version.
+ returned: always
+ type: int
+ sample: 0
+ raw:
+ description: Extension full version.
+ returned: always
+ type: str
+ sample: '1.0'
+ nspname:
+ description: Namespace where the extension is.
+ returned: always
+ type: str
+ sample: pg_catalog
+ languages:
+ description: Procedural languages U(https://www.postgresql.org/docs/current/xplang.html).
+ returned: always
+ type: dict
+ sample: { "sql": { "lanacl": "", "lanowner": "postgres" } }
+ contains:
+ lanacl:
+ description:
+ - Language access privileges
+ U(https://www.postgresql.org/docs/current/catalog-pg-language.html).
+ returned: always
+ type: str
+ sample: "{postgres=UC/postgres,=U/postgres}"
+ lanowner:
+ description:
+ - Language owner U(https://www.postgresql.org/docs/current/catalog-pg-language.html).
+ returned: always
+ type: str
+ sample: postgres
+ namespaces:
+ description:
+ - Namespaces (schema) U(https://www.postgresql.org/docs/current/sql-createschema.html).
+ returned: always
+ type: dict
+ sample: { "pg_catalog": { "nspacl": "{postgres=UC/postgres,=U/postgres}", "nspowner": "postgres" } }
+ contains:
+ nspacl:
+ description:
+ - Access privileges U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html).
+ returned: always
+ type: str
+ sample: "{postgres=UC/postgres,=U/postgres}"
+ nspowner:
+ description:
+ - Schema owner U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html).
+ returned: always
+ type: str
+ sample: postgres
+ publications:
+ description:
+ - Information about logical replication publications (available for PostgreSQL 10 and higher)
+ U(https://www.postgresql.org/docs/current/logical-replication-publication.html).
+ - Content depends on PostgreSQL server version.
+ returned: if configured
+ type: dict
+ sample: { "pub1": { "ownername": "postgres", "puballtables": true, "pubinsert": true, "pubupdate": true } }
+ version_added: '0.2.0'
+ subscriptions:
+ description:
+ - Information about replication subscriptions (available for PostgreSQL 10 and higher)
+ U(https://www.postgresql.org/docs/current/logical-replication-subscription.html).
+ - Content depends on PostgreSQL server version.
+ - The return values for the superuser and the normal user may differ
+ U(https://www.postgresql.org/docs/current/catalog-pg-subscription.html).
+ returned: if configured
+ type: dict
+ sample:
+ - { "my_subscription": {"ownername": "postgres", "subenabled": true, "subpublications": ["first_publication"] } }
+ version_added: '0.2.0'
+repl_slots:
+ description:
+ - Replication slots (available in 9.4 and later)
+ U(https://www.postgresql.org/docs/current/view-pg-replication-slots.html).
+ returned: if existent
+ type: dict
+ sample: { "slot0": { "active": false, "database": null, "plugin": null, "slot_type": "physical" } }
+ contains:
+ active:
+ description:
+ - True means that a receiver has connected to it, and it is currently reserving archives.
+ returned: always
+ type: bool
+ sample: true
+ database:
+ description: Database name this slot is associated with, or null.
+ returned: always
+ type: str
+ sample: acme
+ plugin:
+ description:
+ - Base name of the shared object containing the output plugin
+ this logical slot is using, or null for physical slots.
+ returned: always
+ type: str
+ sample: pgoutput
+ slot_type:
+ description: The slot type - physical or logical.
+ returned: always
+ type: str
+ sample: logical
+replications:
+ description:
+ - Information about the current replications by process PIDs
+ U(https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-STATS-VIEWS-TABLE).
+ returned: if pg_stat_replication view existent
+ type: dict
+ sample:
+ - { "76580": { "app_name": "standby1", "backend_start": "2019-02-03 00:14:33.908593+03",
+ "client_addr": "10.10.10.2", "client_hostname": "", "state": "streaming", "usename": "postgres" } }
+ contains:
+ usename:
+ description:
+ - Name of the user logged into this WAL sender process ('usename' is a column name in pg_stat_replication view).
+ returned: always
+ type: str
+ sample: replication_user
+ app_name:
+ description: Name of the application that is connected to this WAL sender.
+ returned: if existent
+ type: str
+ sample: acme_srv
+ client_addr:
+ description:
+ - IP address of the client connected to this WAL sender.
+ - If this field is null, it indicates that the client is connected
+ via a Unix socket on the server machine.
+ returned: always
+ type: str
+ sample: 10.0.0.101
+ client_hostname:
+ description:
+ - Host name of the connected client, as reported by a reverse DNS lookup of client_addr.
+ - This field will only be non-null for IP connections, and only when log_hostname is enabled.
+ returned: always
+ type: str
+ sample: dbsrv1
+ backend_start:
+ description: Time when this process was started, i.e., when the client connected to this WAL sender.
+ returned: always
+ type: str
+ sample: "2019-02-03 00:14:33.908593+03"
+ state:
+ description: Current WAL sender state.
+ returned: always
+ type: str
+ sample: streaming
+tablespaces:
+ description:
+ - Information about tablespaces U(https://www.postgresql.org/docs/current/catalog-pg-tablespace.html).
+ returned: always
+ type: dict
+ sample:
+ - { "test": { "spcacl": "{postgres=C/postgres,andreyk=C/postgres}", "spcoptions": [ "seq_page_cost=1" ],
+ "spcowner": "postgres" } }
+ contains:
+ spcacl:
+ description: Tablespace access privileges.
+ returned: always
+ type: str
+ sample: "{postgres=C/postgres,andreyk=C/postgres}"
+ spcoptions:
+ description: Tablespace-level options.
+ returned: always
+ type: list
+ sample: [ "seq_page_cost=1" ]
+ spcowner:
+ description: Owner of the tablespace.
+ returned: always
+ type: str
+ sample: test_user
+roles:
+ description:
+ - Information about roles U(https://www.postgresql.org/docs/current/user-manag.html).
+ returned: always
+ type: dict
+ sample:
+ - { "test_role": { "canlogin": true, "member_of": [ "user_ro" ], "superuser": false,
+ "valid_until": "9999-12-31T23:59:59.999999+00:00" } }
+ contains:
+ canlogin:
+ description: Login privilege U(https://www.postgresql.org/docs/current/role-attributes.html).
+ returned: always
+ type: bool
+ sample: true
+ member_of:
+ description:
+ - Role membership U(https://www.postgresql.org/docs/current/role-membership.html).
+ returned: always
+ type: list
+ sample: [ "read_only_users" ]
+ superuser:
+ description: User is a superuser or not.
+ returned: always
+ type: bool
+ sample: false
+ valid_until:
+ description:
+ - Password expiration date U(https://www.postgresql.org/docs/current/sql-alterrole.html).
+ returned: always
+ type: str
+ sample: "9999-12-31T23:59:59.999999+00:00"
+pending_restart_settings:
+ description:
+ - List of settings that are pending restart to be set.
+ returned: always
+ type: list
+ sample: [ "shared_buffers" ]
+settings:
+ description:
+ - Information about run-time server parameters
+ U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+ returned: always
+ type: dict
+ sample:
+ - { "work_mem": { "boot_val": "4096", "context": "user", "max_val": "2147483647",
+ "min_val": "64", "setting": "8192", "sourcefile": "/var/lib/pgsql/10/data/postgresql.auto.conf",
+ "unit": "kB", "vartype": "integer", "val_in_bytes": 4194304 } }
+ contains:
+ setting:
+ description: Current value of the parameter.
+ returned: always
+ type: str
+ sample: 49152
+ unit:
+ description: Implicit unit of the parameter.
+ returned: always
+ type: str
+ sample: kB
+ boot_val:
+ description:
+ - Parameter value assumed at server startup if the parameter is not otherwise set.
+ returned: always
+ type: str
+ sample: 4096
+ min_val:
+ description:
+ - Minimum allowed value of the parameter (null for non-numeric values).
+ returned: always
+ type: str
+ sample: 64
+ max_val:
+ description:
+ - Maximum allowed value of the parameter (null for non-numeric values).
+ returned: always
+ type: str
+ sample: 2147483647
+ sourcefile:
+ description:
+ - Configuration file the current value was set in.
+ - Null for values set from sources other than configuration files,
+ or when examined by a user who is neither a superuser or a member of pg_read_all_settings.
+ - Helpful when using include directives in configuration files.
+ returned: always
+ type: str
+ sample: /var/lib/pgsql/10/data/postgresql.auto.conf
+ context:
+ description:
+ - Context required to set the parameter's value.
+ - For more information see U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+ returned: always
+ type: str
+ sample: user
+ vartype:
+ description:
+ - Parameter type (bool, enum, integer, real, or string).
+ returned: always
+ type: str
+ sample: integer
+ val_in_bytes:
+ description:
+ - Current value of the parameter in bytes.
+ returned: if supported
+ type: int
+ sample: 2147483647
+ pretty_val:
+ description:
+ - Value presented in the pretty form.
+ returned: always
+ type: str
+ sample: 2MB
+ pending_restart:
+ description:
+ - True if the value has been changed in the configuration file but needs a restart; or false otherwise.
+ - Returns only if C(settings) is passed.
+ returned: always
+ type: bool
+ sample: false
+'''
+
+import re
+from fnmatch import fnmatch
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_native
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class PgDbConn(object):
+ """Auxiliary class for working with PostgreSQL connection objects.
+
+ Arguments:
+ module (AnsibleModule): Object of AnsibleModule class that
+ contains connection parameters.
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.db_conn = None
+ self.cursor = None
+
+ def connect(self, fail_on_conn=True):
+ """Connect to a PostgreSQL database and return a cursor object.
+
+ Note: connection parameters are passed by self.module object.
+ """
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(self.module)
+ conn_params = get_conn_params(self.module, self.module.params, warn_db_default=False)
+ self.db_conn, dummy = connect_to_db(self.module, conn_params, fail_on_conn=fail_on_conn)
+ if self.db_conn is None:
+ # only happens if fail_on_conn is False and there actually was an issue connecting to the DB
+ return None
+ return self.db_conn.cursor(cursor_factory=DictCursor)
+
+ def reconnect(self, dbname):
+ """Reconnect to another database and return a PostgreSQL cursor object.
+
+ Arguments:
+ dbname (string): Database name to connect to.
+ """
+ if self.db_conn is not None:
+ self.db_conn.close()
+
+ # the lines below seem redundant but they are actually needed for connect to work as expected
+ self.module.params['db'] = dbname
+ self.module.params['database'] = dbname
+ self.module.params['login_db'] = dbname
+ return self.connect(fail_on_conn=False)
+
+
+class PgClusterInfo(object):
+ """Class for collection information about a PostgreSQL instance.
+
+ Arguments:
+ module (AnsibleModule): Object of AnsibleModule class.
+ db_conn_obj (psycopg2.connect): PostgreSQL connection object.
+ """
+
+ def __init__(self, module, db_conn_obj):
+ self.module = module
+ self.db_obj = db_conn_obj
+ self.cursor = db_conn_obj.connect()
+ self.pg_info = {
+ "version": {},
+ "in_recovery": None,
+ "tablespaces": {},
+ "databases": {},
+ "replications": {},
+ "repl_slots": {},
+ "settings": {},
+ "roles": {},
+ "pending_restart_settings": [],
+ }
+
+ def collect(self, val_list=False):
+ """Collect information based on 'filter' option."""
+ subset_map = {
+ "version": self.get_pg_version,
+ "in_recovery": self.get_recovery_state,
+ "tablespaces": self.get_tablespaces,
+ "databases": self.get_db_info,
+ "replications": self.get_repl_info,
+ "repl_slots": self.get_rslot_info,
+ "settings": self.get_settings,
+ "roles": self.get_role_info,
+ }
+
+ incl_list = []
+ excl_list = []
+ # Notice: incl_list and excl_list
+ # don't make sense together, therefore,
+ # if incl_list is not empty, we collect
+ # only values from it:
+ if val_list:
+ for i in val_list:
+ if i[0] != '!':
+ incl_list.append(i)
+ else:
+ excl_list.append(i.lstrip('!'))
+
+ if incl_list:
+ for s in subset_map:
+ for i in incl_list:
+ if fnmatch(s, i):
+ subset_map[s]()
+ break
+ elif excl_list:
+ found = False
+ # Collect info:
+ for s in subset_map:
+ for e in excl_list:
+ if fnmatch(s, e):
+ found = True
+
+ if not found:
+ subset_map[s]()
+ else:
+ found = False
+
+ # Default behaviour, if include or exclude is not passed:
+ else:
+ # Just collect info for each item:
+ for s in subset_map:
+ subset_map[s]()
+
+ self.cursor.close()
+ self.db_obj.db_conn.close()
+
+ return self.pg_info
+
+ def get_pub_info(self):
+ """Get publication statistics."""
+ query = ("SELECT p.*, r.rolname AS ownername "
+ "FROM pg_catalog.pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid")
+
+ result = self.__exec_sql(query)
+
+ if result:
+ result = [dict(row) for row in result]
+ else:
+ return {}
+
+ publications = {}
+
+ for elem in result:
+ if not publications.get(elem['pubname']):
+ publications[elem['pubname']] = {}
+
+ for key, val in iteritems(elem):
+ if key != 'pubname':
+ publications[elem['pubname']][key] = val
+
+ return publications
+
+ def get_subscr_info(self):
+ """Get subscription statistics."""
+ columns_sub_table = ("SELECT column_name "
+ "FROM information_schema.columns "
+ "WHERE table_schema = 'pg_catalog' "
+ "AND table_name = 'pg_subscription'")
+ columns_result = self.__exec_sql(columns_sub_table)
+ columns = ", ".join(["s.%s" % column[0] for column in columns_result])
+
+ query = ("SELECT %s, r.rolname AS ownername, d.datname AS dbname "
+ "FROM pg_catalog.pg_subscription s "
+ "JOIN pg_catalog.pg_database d "
+ "ON s.subdbid = d.oid "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON s.subowner = r.oid" % columns)
+
+ result = self.__exec_sql(query)
+
+ if result:
+ result = [dict(row) for row in result]
+ else:
+ return {}
+
+ subscr_info = {}
+
+ for elem in result:
+ if not subscr_info.get(elem['dbname']):
+ subscr_info[elem['dbname']] = {}
+
+ if not subscr_info[elem['dbname']].get(elem['subname']):
+ subscr_info[elem['dbname']][elem['subname']] = {}
+
+ for key, val in iteritems(elem):
+ if key not in ('subname', 'dbname'):
+ subscr_info[elem['dbname']][elem['subname']][key] = val
+
+ return subscr_info
+
+ def get_tablespaces(self):
+ """Get information about tablespaces."""
+ # Check spcoption exists:
+ opt = self.__exec_sql("SELECT column_name "
+ "FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spcoptions'")
+
+ if not opt:
+ query = ("SELECT s.spcname, pg_catalog.pg_get_userbyid(s.spcowner) as rolname, s.spcacl "
+ "FROM pg_tablespace AS s ")
+ else:
+ query = ("SELECT s.spcname, pg_catalog.pg_get_userbyid(s.spcowner) as rolname, s.spcacl, s.spcoptions "
+ "FROM pg_tablespace AS s ")
+
+ res = self.__exec_sql(query)
+ ts_dict = {}
+ for i in res:
+ ts_name = i[0]
+ ts_info = dict(
+ spcowner=i[1],
+ spcacl=i[2] if i[2] else '',
+ )
+ if opt:
+ ts_info['spcoptions'] = i[3] if i[3] else []
+
+ ts_dict[ts_name] = ts_info
+
+ self.pg_info["tablespaces"] = ts_dict
+
+ def get_ext_info(self):
+ """Get information about existing extensions."""
+ # Check that pg_extension exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_extension')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT e.extname, e.extversion, n.nspname, c.description "
+ "FROM pg_catalog.pg_extension AS e "
+ "LEFT JOIN pg_catalog.pg_namespace AS n "
+ "ON n.oid = e.extnamespace "
+ "LEFT JOIN pg_catalog.pg_description AS c "
+ "ON c.objoid = e.oid "
+ "AND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass")
+ res = self.__exec_sql(query)
+ ext_dict = {}
+ for i in res:
+ ext_ver_raw = i[1]
+
+ if re.search(r'^([0-9]+([\-]*[0-9]+)?\.)*[0-9]+([\-]*[0-9]+)?$', i[1]) is None:
+ ext_ver = [None, None]
+ else:
+ ext_ver = i[1].split('.')
+ if re.search(r'-', ext_ver[0]) is not None:
+ ext_ver = ext_ver[0].split('-')
+ else:
+ try:
+ if re.search(r'-', ext_ver[1]) is not None:
+ ext_ver[1] = ext_ver[1].split('-')[0]
+ except IndexError:
+ ext_ver.append(None)
+
+ ext_dict[i[0]] = dict(
+ extversion=dict(
+ major=int(ext_ver[0]) if ext_ver[0] else None,
+ minor=int(ext_ver[1]) if ext_ver[1] else None,
+ raw=ext_ver_raw,
+ ),
+ nspname=i[2],
+ description=i[3],
+ )
+
+ return ext_dict
+
+ def get_role_info(self):
+ """Get information about roles (in PgSQL groups and users are roles)."""
+ query = ("SELECT r.rolname, r.rolsuper, r.rolcanlogin, "
+ "r.rolvaliduntil, "
+ "ARRAY(SELECT b.rolname "
+ "FROM pg_catalog.pg_auth_members AS m "
+ "JOIN pg_catalog.pg_roles AS b ON (m.roleid = b.oid) "
+ "WHERE m.member = r.oid) AS memberof "
+ "FROM pg_catalog.pg_roles AS r "
+ "WHERE r.rolname !~ '^pg_'")
+
+ res = self.__exec_sql(query)
+ rol_dict = {}
+ for i in res:
+ rol_dict[i[0]] = dict(
+ superuser=i[1],
+ canlogin=i[2],
+ valid_until=i[3] if i[3] else '',
+ member_of=i[4] if i[4] else [],
+ )
+
+ self.pg_info["roles"] = rol_dict
+
+ def get_rslot_info(self):
+ """Get information about replication slots if exist."""
+ # Check that pg_replication_slots exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_replication_slots')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT slot_name, plugin, slot_type, database, "
+ "active FROM pg_replication_slots")
+ res = self.__exec_sql(query)
+
+ # If there is no replication:
+ if not res:
+ return True
+
+ rslot_dict = {}
+ for i in res:
+ rslot_dict[i[0]] = dict(
+ plugin=i[1],
+ slot_type=i[2],
+ database=i[3],
+ active=i[4],
+ )
+
+ self.pg_info["repl_slots"] = rslot_dict
+
+ def get_settings(self):
+ """Get server settings."""
+ # Check pending restart column exists:
+ pend_rest_col_exists = self.__exec_sql("SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_settings' "
+ "AND column_name = 'pending_restart'")
+ if not pend_rest_col_exists:
+ query = ("SELECT name, setting, unit, context, vartype, "
+ "boot_val, min_val, max_val, sourcefile "
+ "FROM pg_settings")
+ else:
+ query = ("SELECT name, setting, unit, context, vartype, "
+ "boot_val, min_val, max_val, sourcefile, pending_restart "
+ "FROM pg_settings")
+
+ res = self.__exec_sql(query)
+
+ set_dict = {}
+ for i in res:
+ val_in_bytes = None
+ setting = i[1]
+ if i[2]:
+ unit = i[2]
+ else:
+ unit = ''
+
+ if unit == 'kB':
+ val_in_bytes = int(setting) * 1024
+
+ elif unit == '8kB':
+ val_in_bytes = int(setting) * 1024 * 8
+
+ elif unit == 'MB':
+ val_in_bytes = int(setting) * 1024 * 1024
+
+ if val_in_bytes is not None and val_in_bytes < 0:
+ val_in_bytes = 0
+
+ setting_name = i[0]
+ pretty_val = self.__get_pretty_val(setting_name)
+
+ pending_restart = None
+ if pend_rest_col_exists:
+ pending_restart = i[9]
+
+ set_dict[setting_name] = dict(
+ setting=setting,
+ unit=unit,
+ context=i[3],
+ vartype=i[4],
+ boot_val=i[5] if i[5] else '',
+ min_val=i[6] if i[6] else '',
+ max_val=i[7] if i[7] else '',
+ sourcefile=i[8] if i[8] else '',
+ pretty_val=pretty_val,
+ )
+ if val_in_bytes is not None:
+ set_dict[setting_name]['val_in_bytes'] = val_in_bytes
+
+ if pending_restart is not None:
+ set_dict[setting_name]['pending_restart'] = pending_restart
+ if pending_restart:
+ self.pg_info["pending_restart_settings"].append(setting_name)
+
+ self.pg_info["settings"] = set_dict
+
+ def get_repl_info(self):
+ """Get information about replication if the server is a primary."""
+ # Check that pg_replication_slots exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_stat_replication')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT r.pid, pg_catalog.pg_get_userbyid(r.usesysid) AS rolname, r.application_name, r.client_addr, "
+ "r.client_hostname, r.backend_start::text, r.state "
+ "FROM pg_stat_replication AS r ")
+ res = self.__exec_sql(query)
+
+ # If there is no replication:
+ if not res:
+ return True
+
+ repl_dict = {}
+ for i in res:
+ repl_dict[i[0]] = dict(
+ usename=i[1],
+ app_name=i[2] if i[2] else '',
+ client_addr=i[3],
+ client_hostname=i[4] if i[4] else '',
+ backend_start=i[5],
+ state=i[6],
+ )
+
+ self.pg_info["replications"] = repl_dict
+
+ def get_lang_info(self):
+ """Get information about current supported languages."""
+ query = ("SELECT l.lanname, pg_catalog.pg_get_userbyid(l.lanowner) AS rolname, l.lanacl "
+ "FROM pg_language AS l ")
+ res = self.__exec_sql(query)
+ lang_dict = {}
+ for i in res:
+ lang_dict[i[0]] = dict(
+ lanowner=i[1],
+ lanacl=i[2] if i[2] else '',
+ )
+
+ return lang_dict
+
+ def get_namespaces(self):
+ """Get information about namespaces."""
+ query = ("SELECT n.nspname, pg_catalog.pg_get_userbyid(n.nspowner) AS rolname, n.nspacl "
+ "FROM pg_catalog.pg_namespace AS n ")
+ res = self.__exec_sql(query)
+
+ nsp_dict = {}
+ for i in res:
+ nsp_dict[i[0]] = dict(
+ nspowner=i[1],
+ nspacl=i[2] if i[2] else '',
+ )
+
+ return nsp_dict
+
+ def get_pg_version(self):
+ """Get major and minor PostgreSQL server version."""
+ query = "SELECT version()"
+ raw = self.__exec_sql(query)[0][0]
+ full = raw.split()[1]
+ m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", full)
+
+ major = int(m.group(1))
+ minor = int(m.group(2))
+ patch = None
+ if m.group(3) is not None:
+ patch = int(m.group(3))
+
+ self.pg_info["version"] = dict(
+ major=major,
+ minor=minor,
+ full=full,
+ raw=raw,
+ )
+
+ if patch is not None:
+ self.pg_info["version"]["patch"] = patch
+
+ def get_recovery_state(self):
+ """Get if the service is in recovery mode."""
+ self.pg_info["in_recovery"] = self.__exec_sql("SELECT pg_is_in_recovery()")[0][0]
+
+ def get_db_info(self):
+ """Get information about the current database."""
+ # Following query returns:
+ # Name, Owner, Encoding, Collate, Ctype, Access Priv, Size
+ query = ("SELECT d.datname, "
+ "pg_catalog.pg_get_userbyid(d.datdba), "
+ "pg_catalog.pg_encoding_to_char(d.encoding), "
+ "d.datcollate, "
+ "d.datctype, "
+ "pg_catalog.array_to_string(d.datacl, E'\n'), "
+ "CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') "
+ "THEN pg_catalog.pg_database_size(d.datname)::text "
+ "ELSE 'No Access' END, "
+ "t.spcname "
+ "FROM pg_catalog.pg_database AS d "
+ "JOIN pg_catalog.pg_tablespace t ON d.dattablespace = t.oid "
+ "WHERE d.datname != 'template0'")
+
+ res = self.__exec_sql(query)
+
+ db_dict = {}
+ for i in res:
+ db_dict[i[0]] = dict(
+ owner=i[1],
+ encoding=i[2],
+ collate=i[3],
+ ctype=i[4],
+ access_priv=i[5] if i[5] else '',
+ size=i[6],
+ )
+
+ if self.cursor.connection.server_version >= 100000:
+ subscr_info = self.get_subscr_info()
+
+ for datname in db_dict:
+ self.cursor = self.db_obj.reconnect(datname)
+ if self.cursor is None:
+ # that means we don't have permission to access these database
+ db_dict[datname]['namespaces'] = {}
+ db_dict[datname]['extensions'] = {}
+ db_dict[datname]['languages'] = {}
+ db_dict[datname]['error'] = "Could not connect to the database."
+ continue
+ db_dict[datname]['namespaces'] = self.get_namespaces()
+ db_dict[datname]['extensions'] = self.get_ext_info()
+ db_dict[datname]['languages'] = self.get_lang_info()
+ if self.cursor.connection.server_version >= 100000:
+ db_dict[datname]['publications'] = self.get_pub_info()
+ db_dict[datname]['subscriptions'] = subscr_info.get(datname, {})
+
+ self.pg_info["databases"] = db_dict
+
+ def __get_pretty_val(self, setting):
+ """Get setting's value represented by SHOW command."""
+ return self.__exec_sql('SHOW "%s"' % setting)[0][0]
+
+ def __exec_sql(self, query):
+ """Execute SQL and return the result."""
+ try:
+ self.cursor.execute(query)
+ res = self.cursor.fetchall()
+ if res:
+ return res
+ except Exception as e:
+ self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+ self.cursor.close()
+ return False
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ filter=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ filter_ = module.params['filter']
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ check_input(module, module.params['session_role'])
+
+ db_conn_obj = PgDbConn(module)
+
+ # Do job:
+ pg_info = PgClusterInfo(module, db_conn_obj)
+
+ module.exit_json(**pg_info.collect(filter_))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py
new file mode 100644
index 000000000..3d696dba6
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py
@@ -0,0 +1,353 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2014, Jens Depuydt <http://www.jensd.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_lang
+short_description: Adds, removes or changes procedural languages with a PostgreSQL database
+description:
+- Adds, removes or changes procedural languages with a PostgreSQL database.
+- This module allows you to add a language, remote a language or change the trust
+ relationship with a PostgreSQL database.
+- The module can be used on the machine where executed or on a remote host.
+- When removing a language from a database, it is possible that dependencies prevent
+ the database from being removed. In that case, you can specify I(cascade=true) to
+ automatically drop objects that depend on the language (such as functions in the
+ language).
+- In case the language can't be deleted because it is required by the
+ database system, you can specify I(fail_on_drop=false) to ignore the error.
+- Be careful when marking a language as trusted since this could be a potential
+ security breach. Untrusted languages allow only users with the PostgreSQL superuser
+ privilege to use this language to create new functions.
+options:
+ lang:
+ description:
+ - Name of the procedural language to add, remove or change.
+ required: true
+ type: str
+ aliases:
+ - name
+ trust:
+ description:
+ - Make this language trusted for the selected db.
+ type: bool
+ default: 'false'
+ db:
+ description:
+ - Name of database to connect to and where the language will be added, removed or changed.
+ type: str
+ aliases:
+ - login_db
+ required: true
+ force_trust:
+ description:
+ - Marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
+ - Use with care!
+ type: bool
+ default: 'false'
+ fail_on_drop:
+ description:
+ - If C(true), fail when removing a language. Otherwise just log and continue.
+ - In some cases, it is not possible to remove a language (used by the db-system).
+ - When dependencies block the removal, consider using I(cascade).
+ type: bool
+ default: 'true'
+ cascade:
+ description:
+ - When dropping a language, also delete object that depend on this language.
+ - Only used when I(state=absent).
+ type: bool
+ default: 'false'
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified I(session_role) must be a role that the current I(login_user) is a member of.
+ - Permissions checking for SQL commands is carried out as though the
+ I(session_role) were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The state of the language for the selected database.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ owner:
+ description:
+ - Set an owner for the language.
+ - Ignored when I(state=absent).
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters I(lang), I(session_role),
+ I(owner) are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via the parameters are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL languages
+ description: General information about PostgreSQL languages.
+ link: https://www.postgresql.org/docs/current/xplang.html
+- name: CREATE LANGUAGE reference
+ description: Complete reference of the CREATE LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createlanguage.html
+- name: ALTER LANGUAGE reference
+ description: Complete reference of the ALTER LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterlanguage.html
+- name: DROP LANGUAGE reference
+ description: Complete reference of the DROP LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droplanguage.html
+
+attributes:
+ check_mode:
+ support: full
+
+author:
+- Jens Depuydt (@jensdepuydt)
+- Thomas O'Donnell (@andytom)
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Add language pltclu to database testdb if it doesn't exist
+ community.postgresql.postgresql_lang: db=testdb lang=pltclu state=present
+
+# Add language pltclu to database testdb if it doesn't exist and mark it as trusted.
+# Marks the language as trusted if it exists but isn't trusted yet.
+# force_trust makes sure that the language will be marked as trusted
+- name: Add language pltclu to database testdb if it doesn't exist and mark it as trusted
+ community.postgresql.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: present
+ trust: true
+ force_trust: true
+
+- name: Remove language pltclu from database testdb
+ community.postgresql.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+
+- name: Remove language pltclu from database testdb and remove all dependencies
+ community.postgresql.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+ cascade: true
+
+- name: Remove language c from database testdb but ignore errors if something prevents the removal
+ community.postgresql.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+ fail_on_drop: false
+
+- name: In testdb change owner of mylang to alice
+ community.postgresql.postgresql_lang:
+ db: testdb
+ lang: mylang
+ owner: alice
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['CREATE LANGUAGE "acme"']
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import check_input
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+executed_queries = []
+
+
+def lang_exists(cursor, lang):
+ """Checks if language exists for db"""
+ query = "SELECT lanname FROM pg_language WHERE lanname = %(lang)s"
+ cursor.execute(query, {'lang': lang})
+ return cursor.rowcount > 0
+
+
+def lang_istrusted(cursor, lang):
+ """Checks if language is trusted for db"""
+ query = "SELECT lanpltrusted FROM pg_language WHERE lanname = %(lang)s"
+ cursor.execute(query, {'lang': lang})
+ return cursor.fetchone()[0]
+
+
+def lang_altertrust(cursor, lang, trust):
+ """Changes if language is trusted for db"""
+ query = "UPDATE pg_language SET lanpltrusted = %(trust)s WHERE lanname = %(lang)s"
+ cursor.execute(query, {'trust': trust, 'lang': lang})
+ executed_queries.append(cursor.mogrify(query, {'trust': trust, 'lang': lang}))
+ return True
+
+
+def lang_add(cursor, lang, trust):
+ """Adds language for db"""
+ if trust:
+ query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
+ else:
+ query = 'CREATE LANGUAGE "%s"' % lang
+ executed_queries.append(query)
+ cursor.execute(query)
+ return True
+
+
+def lang_drop(cursor, lang, cascade):
+ """Drops language for db"""
+ cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
+ try:
+ if cascade:
+ query = "DROP LANGUAGE \"%s\" CASCADE" % lang
+ else:
+ query = "DROP LANGUAGE \"%s\"" % lang
+ executed_queries.append(query)
+ cursor.execute(query)
+ except Exception:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
+ return False
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
+ return True
+
+
+def get_lang_owner(cursor, lang):
+ """Get language owner.
+
+ Args:
+ cursor (cursor): psycopg2 cursor object.
+ lang (str): language name.
+ """
+ query = ("SELECT r.rolname FROM pg_language l "
+ "JOIN pg_roles r ON l.lanowner = r.oid "
+ "WHERE l.lanname = %(lang)s")
+ cursor.execute(query, {'lang': lang})
+ return cursor.fetchone()[0]
+
+
+def set_lang_owner(cursor, lang, owner):
+ """Set language owner.
+
+ Args:
+ cursor (cursor): psycopg2 cursor object.
+ lang (str): language name.
+ owner (str): name of new owner.
+ """
+ query = "ALTER LANGUAGE \"%s\" OWNER TO \"%s\"" % (lang, owner)
+ executed_queries.append(query)
+ cursor.execute(query)
+ return True
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", required=True, aliases=["login_db"]),
+ lang=dict(type="str", required=True, aliases=["name"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ trust=dict(type="bool", default="false"),
+ force_trust=dict(type="bool", default="false"),
+ cascade=dict(type="bool", default="false"),
+ fail_on_drop=dict(type="bool", default="true"),
+ session_role=dict(type="str"),
+ owner=dict(type="str"),
+ trust_input=dict(type="bool", default="true")
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ db = module.params["db"]
+ lang = module.params["lang"]
+ state = module.params["state"]
+ trust = module.params["trust"]
+ force_trust = module.params["force_trust"]
+ cascade = module.params["cascade"]
+ fail_on_drop = module.params["fail_on_drop"]
+ owner = module.params["owner"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, lang, session_role, owner)
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ conn_params = get_conn_params(module, module.params)
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor()
+
+ changed = False
+ kw = {'db': db, 'lang': lang, 'trust': trust}
+
+ if state == "present":
+ if lang_exists(cursor, lang):
+ lang_trusted = lang_istrusted(cursor, lang)
+ if (lang_trusted and not trust) or (not lang_trusted and trust):
+ if module.check_mode:
+ changed = True
+ else:
+ changed = lang_altertrust(cursor, lang, trust)
+ else:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = lang_add(cursor, lang, trust)
+ if force_trust:
+ changed = lang_altertrust(cursor, lang, trust)
+
+ else:
+ if lang_exists(cursor, lang):
+ if module.check_mode:
+ changed = True
+ kw['lang_dropped'] = True
+ else:
+ changed = lang_drop(cursor, lang, cascade)
+ if fail_on_drop and not changed:
+ msg = ("unable to drop language, use cascade "
+ "to delete dependencies or fail_on_drop=false to ignore")
+ module.fail_json(msg=msg)
+ kw['lang_dropped'] = changed
+
+ if owner and state == 'present':
+ if lang_exists(cursor, lang):
+ if owner != get_lang_owner(cursor, lang):
+ changed = set_lang_owner(cursor, lang, owner)
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ kw['queries'] = executed_queries
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py
new file mode 100644
index 000000000..68d7db2ef
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_membership
+short_description: Add or remove PostgreSQL roles from groups
+description:
+- Adds or removes PostgreSQL roles from groups (other roles).
+- Users are roles with login privilege.
+- Groups are PostgreSQL roles usually without LOGIN privilege.
+- "Common use case:"
+- 1) add a new group (groups) by M(community.postgresql.postgresql_user) module with I(role_attr_flags=NOLOGIN)
+- 2) grant them desired privileges by M(community.postgresql.postgresql_privs) module
+- 3) add desired PostgreSQL users to the new group (groups) by this module
+options:
+ groups:
+ description:
+ - The list of groups (roles) that need to be granted to or revoked from I(target_roles).
+ required: true
+ type: list
+ elements: str
+ aliases:
+ - group
+ - source_role
+ - source_roles
+ target_roles:
+ description:
+ - The list of target roles (groups will be granted to them).
+ required: true
+ type: list
+ elements: str
+ aliases:
+ - target_role
+ - users
+ - user
+ fail_on_role:
+ description:
+ - If C(true), fail when group or target_role doesn't exist. If C(false), just warn and continue.
+ default: true
+ type: bool
+ state:
+ description:
+ - Membership state.
+ - I(state=present) implies the I(groups)must be granted to I(target_roles).
+ - I(state=absent) implies the I(groups) must be revoked from I(target_roles).
+ - I(state=exact) implies that I(target_roles) will be members of only the I(groups)
+ (available since community.postgresql 2.2.0).
+ Any other groups will be revoked from I(target_roles).
+ type: str
+ default: present
+ choices: [ absent, exact, present ]
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters I(groups),
+ I(target_roles), I(session_role) are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via the parameters are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+seealso:
+- module: community.postgresql.postgresql_user
+- module: community.postgresql.postgresql_privs
+- module: community.postgresql.postgresql_owner
+- name: PostgreSQL role membership reference
+ description: Complete reference of the PostgreSQL role membership documentation.
+ link: https://www.postgresql.org/docs/current/role-membership.html
+- name: PostgreSQL role attributes reference
+ description: Complete reference of the PostgreSQL role attributes documentation.
+ link: https://www.postgresql.org/docs/current/role-attributes.html
+
+attributes:
+ check_mode:
+ support: full
+
+author:
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Grant role read_only to alice and bob
+ community.postgresql.postgresql_membership:
+ group: read_only
+ target_roles:
+ - alice
+ - bob
+ state: present
+
+# you can also use target_roles: alice,bob,etc to pass the role list
+
+- name: Revoke role read_only and exec_func from bob. Ignore if roles don't exist
+ community.postgresql.postgresql_membership:
+ groups:
+ - read_only
+ - exec_func
+ target_role: bob
+ fail_on_role: false
+ state: absent
+
+- name: >
+ Make sure alice and bob are members only of marketing and sales.
+ If they are members of other groups, they will be removed from those groups
+ community.postgresql.postgresql_membership:
+ group:
+ - marketing
+ - sales
+ target_roles:
+ - alice
+ - bob
+ state: exact
+
+- name: Make sure alice and bob do not belong to any groups
+ community.postgresql.postgresql_membership:
+ group: []
+ target_roles:
+ - alice
+ - bob
+ state: exact
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "GRANT \"user_ro\" TO \"alice\"" ]
+granted:
+ description: Dict of granted groups and roles.
+ returned: if I(state=present)
+ type: dict
+ sample: { "ro_group": [ "alice", "bob" ] }
+revoked:
+ description: Dict of revoked groups and roles.
+ returned: if I(state=absent)
+ type: dict
+ sample: { "ro_group": [ "alice", "bob" ] }
+state:
+ description: Membership state that tried to be set.
+ returned: always
+ type: str
+ sample: "present"
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import check_input
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ ensure_required_libs,
+ get_conn_params,
+ PgMembership,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ groups=dict(type='list', elements='str', required=True, aliases=['group', 'source_role', 'source_roles']),
+ target_roles=dict(type='list', elements='str', required=True, aliases=['target_role', 'user', 'users']),
+ fail_on_role=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['absent', 'exact', 'present']),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ groups = module.params['groups']
+ target_roles = module.params['target_roles']
+ fail_on_role = module.params['fail_on_role']
+ state = module.params['state']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, groups, target_roles, session_role)
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+
+ pg_membership = PgMembership(module, cursor, groups, target_roles, fail_on_role)
+
+ if state == 'present':
+ pg_membership.grant()
+
+ elif state == 'exact':
+ pg_membership.match()
+
+ elif state == 'absent':
+ pg_membership.revoke()
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ return_dict = dict(
+ changed=pg_membership.changed,
+ state=state,
+ groups=pg_membership.groups,
+ target_roles=pg_membership.target_roles,
+ queries=pg_membership.executed_queries,
+ )
+
+ if state == 'present':
+ return_dict['granted'] = pg_membership.granted
+ elif state == 'absent':
+ return_dict['revoked'] = pg_membership.revoked
+ elif state == 'exact':
+ return_dict['granted'] = pg_membership.granted
+ return_dict['revoked'] = pg_membership.revoked
+
+ module.exit_json(**return_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py
new file mode 100644
index 000000000..934e3b957
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py
@@ -0,0 +1,463 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_owner
+short_description: Change an owner of PostgreSQL database object
+description:
+- Change an owner of PostgreSQL database object.
+- Also allows to reassign the ownership of database objects owned by a database role to another role.
+
+options:
+ new_owner:
+ description:
+ - Role (user/group) to set as an I(obj_name) owner.
+ type: str
+ required: true
+ obj_name:
+ description:
+ - Name of a database object to change ownership.
+ - Mutually exclusive with I(reassign_owned_by).
+ type: str
+ obj_type:
+ description:
+ - Type of a database object.
+ - Mutually exclusive with I(reassign_owned_by).
+ type: str
+ choices: [ database, function, matview, sequence, schema, table, tablespace, view ]
+ aliases:
+ - type
+ reassign_owned_by:
+ description:
+ - Caution - the ownership of all the objects within the specified I(db),
+ owned by this role(s) will be reassigned to I(new_owner).
+ - REASSIGN OWNED is often used to prepare for the removal of one or more roles.
+ - REASSIGN OWNED does not affect objects within other databases.
+ - Execute this command in each database that contains objects owned by a role that is to be removed.
+ - If role(s) exists, always returns changed True.
+ - Cannot reassign ownership of objects that are required by the database system.
+ - Mutually exclusive with C(obj_type).
+ type: list
+ elements: str
+ fail_on_role:
+ description:
+ - If C(true), fail when I(reassign_owned_by) role does not exist.
+ Otherwise just warn and continue.
+ - Mutually exclusive with I(obj_name) and I(obj_type).
+ default: true
+ type: bool
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters I(new_owner), I(obj_name),
+ I(reassign_owned_by), I(session_role) are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via the parameters are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+seealso:
+- module: community.postgresql.postgresql_user
+- module: community.postgresql.postgresql_privs
+- module: community.postgresql.postgresql_membership
+- name: PostgreSQL REASSIGN OWNED command reference
+ description: Complete reference of the PostgreSQL REASSIGN OWNED command documentation.
+ link: https://www.postgresql.org/docs/current/sql-reassign-owned.html
+
+attributes:
+ check_mode:
+ support: full
+
+author:
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+# Set owner as alice for function myfunc in database bar by ansible ad-hoc command:
+# ansible -m postgresql_owner -a "db=bar new_owner=alice obj_name=myfunc obj_type=function"
+
+- name: The same as above by playbook
+ community.postgresql.postgresql_owner:
+ db: bar
+ new_owner: alice
+ obj_name: myfunc
+ obj_type: function
+
+- name: Set owner as bob for table acme in database bar
+ community.postgresql.postgresql_owner:
+ db: bar
+ new_owner: bob
+ obj_name: acme
+ obj_type: table
+
+- name: Set owner as alice for view test_view in database bar
+ community.postgresql.postgresql_owner:
+ db: bar
+ new_owner: alice
+ obj_name: test_view
+ obj_type: view
+
+- name: Set owner as bob for tablespace ssd in database foo
+ community.postgresql.postgresql_owner:
+ db: foo
+ new_owner: bob
+ obj_name: ssd
+ obj_type: tablespace
+
+- name: Reassign all databases owned by bob to alice and all objects in database bar owned by bob to alice
+ community.postgresql.postgresql_owner:
+ db: bar
+ new_owner: alice
+ reassign_owned_by: bob
+
+- name: Reassign all databases owned by bob or bill to alice and all objects in database bar owned by bob or bill to alice
+ community.postgresql.postgresql_owner:
+ db: bar
+ new_owner: alice
+ reassign_owned_by:
+ - bob
+ - bill
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'REASSIGN OWNED BY "bob" TO "alice"' ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class PgOwnership(object):
+
+ """Class for changing ownership of PostgreSQL objects.
+
+ Arguments:
+ module (AnsibleModule): Object of Ansible module class.
+ cursor (psycopg2.connect.cursor): Cursor object for interaction with the database.
+ role (str): Role name to set as a new owner of objects.
+
+ Important:
+ If you want to add handling of a new type of database objects:
+ 1. Add a specific method for this like self.__set_db_owner(), etc.
+ 2. Add a condition with a check of ownership for new type objects to self.__is_owner()
+ 3. Add a condition with invocation of the specific method to self.set_owner()
+ 4. Add the information to the module documentation
+ That's all.
+ """
+
+ def __init__(self, module, cursor, role):
+ self.module = module
+ self.cursor = cursor
+ self.check_role_exists(role)
+ self.role = role
+ self.changed = False
+ self.executed_queries = []
+ self.obj_name = ''
+ self.obj_type = ''
+
+ def check_role_exists(self, role, fail_on_role=True):
+ """Check the role exists or not.
+
+ Arguments:
+ role (str): Role name.
+ fail_on_role (bool): If True, fail when the role does not exist.
+ Otherwise just warn and continue.
+ """
+ if not self.__role_exists(role):
+ if fail_on_role:
+ self.module.fail_json(msg="Role '%s' does not exist" % role)
+ else:
+ self.module.warn("Role '%s' does not exist, pass" % role)
+
+ return False
+
+ else:
+ return True
+
+ def reassign(self, old_owners, fail_on_role):
+ """Implements REASSIGN OWNED BY command.
+
+ If success, set self.changed as True.
+
+ Arguments:
+ old_owners (list): The ownership of all the objects within
+ the current database, and of all shared objects (databases, tablespaces),
+ owned by these roles will be reassigned to self.role.
+ fail_on_role (bool): If True, fail when a role from old_owners does not exist.
+ Otherwise just warn and continue.
+ """
+ roles = []
+ for r in old_owners:
+ if self.check_role_exists(r, fail_on_role):
+ roles.append('"%s"' % r)
+
+ # Roles do not exist, nothing to do, exit:
+ if not roles:
+ return False
+
+ old_owners = ','.join(roles)
+
+ query = ['REASSIGN OWNED BY']
+ query.append(old_owners)
+ query.append('TO "%s"' % self.role)
+ query = ' '.join(query)
+
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def set_owner(self, obj_type, obj_name):
+ """Change owner of a database object.
+
+ Arguments:
+ obj_type (str): Type of object (like database, table, view, etc.).
+ obj_name (str): Object name.
+ """
+ self.obj_name = obj_name
+ self.obj_type = obj_type
+
+ # if a new_owner is the object owner now,
+ # nothing to do:
+ if self.__is_owner():
+ return False
+
+ if obj_type == 'database':
+ self.__set_db_owner()
+
+ elif obj_type == 'function':
+ self.__set_func_owner()
+
+ elif obj_type == 'sequence':
+ self.__set_seq_owner()
+
+ elif obj_type == 'schema':
+ self.__set_schema_owner()
+
+ elif obj_type == 'table':
+ self.__set_table_owner()
+
+ elif obj_type == 'tablespace':
+ self.__set_tablespace_owner()
+
+ elif obj_type == 'view':
+ self.__set_view_owner()
+
+ elif obj_type == 'matview':
+ self.__set_mat_view_owner()
+
+ def __is_owner(self):
+ """Return True if self.role is the current object owner."""
+ if self.obj_type == 'table':
+ query = ("SELECT 1 FROM pg_tables "
+ "WHERE tablename = %(obj_name)s "
+ "AND tableowner = %(role)s")
+
+ elif self.obj_type == 'database':
+ query = ("SELECT 1 FROM pg_database AS d "
+ "JOIN pg_roles AS r ON d.datdba = r.oid "
+ "WHERE d.datname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'function':
+ query = ("SELECT 1 FROM pg_proc AS f "
+ "JOIN pg_roles AS r ON f.proowner = r.oid "
+ "WHERE f.proname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'sequence':
+ query = ("SELECT 1 FROM pg_class AS c "
+ "JOIN pg_roles AS r ON c.relowner = r.oid "
+ "WHERE c.relkind = 'S' AND c.relname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'schema':
+ query = ("SELECT 1 FROM information_schema.schemata "
+ "WHERE schema_name = %(obj_name)s "
+ "AND schema_owner = %(role)s")
+
+ elif self.obj_type == 'tablespace':
+ query = ("SELECT 1 FROM pg_tablespace AS t "
+ "JOIN pg_roles AS r ON t.spcowner = r.oid "
+ "WHERE t.spcname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'view':
+ query = ("SELECT 1 FROM pg_views "
+ "WHERE viewname = %(obj_name)s "
+ "AND viewowner = %(role)s")
+
+ elif self.obj_type == 'matview':
+ query = ("SELECT 1 FROM pg_matviews "
+ "WHERE matviewname = %(obj_name)s "
+ "AND matviewowner = %(role)s")
+
+ query_params = {'obj_name': self.obj_name, 'role': self.role}
+ return exec_sql(self, query, query_params, add_to_executed=False)
+
+ def __set_db_owner(self):
+ """Set the database owner."""
+ query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_func_owner(self):
+ """Set the function owner."""
+ query = 'ALTER FUNCTION %s OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_seq_owner(self):
+ """Set the sequence owner."""
+ query = 'ALTER SEQUENCE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_schema_owner(self):
+ """Set the schema owner."""
+ query = 'ALTER SCHEMA %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'schema'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_table_owner(self):
+ """Set the table owner."""
+ query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_tablespace_owner(self):
+ """Set the tablespace owner."""
+ query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_view_owner(self):
+ """Set the view owner."""
+ query = 'ALTER VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_mat_view_owner(self):
+ """Set the materialized view owner."""
+ query = 'ALTER MATERIALIZED VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __role_exists(self, role):
+ """Return True if role exists, otherwise return False."""
+ query_params = {'role': role}
+ query = "SELECT 1 FROM pg_roles WHERE rolname = %(role)s"
+ return exec_sql(self, query, query_params, add_to_executed=False)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ new_owner=dict(type='str', required=True),
+ obj_name=dict(type='str'),
+ obj_type=dict(type='str', aliases=['type'], choices=[
+ 'database', 'function', 'matview', 'sequence', 'schema', 'table', 'tablespace', 'view']),
+ reassign_owned_by=dict(type='list', elements='str'),
+ fail_on_role=dict(type='bool', default=True),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['obj_name', 'reassign_owned_by'],
+ ['obj_type', 'reassign_owned_by'],
+ ['obj_name', 'fail_on_role'],
+ ['obj_type', 'fail_on_role'],
+ ],
+ supports_check_mode=True,
+ )
+
+ new_owner = module.params['new_owner']
+ obj_name = module.params['obj_name']
+ obj_type = module.params['obj_type']
+ reassign_owned_by = module.params['reassign_owned_by']
+ fail_on_role = module.params['fail_on_role']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, new_owner, obj_name, reassign_owned_by, session_role)
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ conn_params = get_conn_params(module, module.params)
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ pg_ownership = PgOwnership(module, cursor, new_owner)
+
+ # if we want to change ownership:
+ if obj_name:
+ pg_ownership.set_owner(obj_type, obj_name)
+
+ # if we want to reassign objects owned by roles:
+ elif reassign_owned_by:
+ pg_ownership.reassign(reassign_owned_by, fail_on_role)
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(
+ changed=pg_ownership.changed,
+ queries=pg_ownership.executed_queries,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py
new file mode 100644
index 000000000..002e7817d
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py
@@ -0,0 +1,907 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Sebastiaan Mannem (@sebasmannem) <sebastiaan.mannem@enterprisedb.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+'''
+This module is used to manage postgres pg_hba files with Ansible.
+'''
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_pg_hba
+short_description: Add, remove or modify a rule in a pg_hba file
+description:
+ - The fundamental function of the module is to create, or delete lines in pg_hba files.
+ - The lines in the file should be in a typical pg_hba form and lines should be unique per key (type, databases, users, source).
+ If they are not unique and the SID is 'the one to change', only one for I(state=present) or
+ none for I(state=absent) of the SID's will remain.
+extends_documentation_fragment: files
+options:
+ address:
+ description:
+ - The source address/net where the connections could come from.
+ - Will not be used for entries of I(type)=C(local).
+ - You can also use keywords C(all), C(samehost), and C(samenet).
+ default: samehost
+ type: str
+ aliases: [ source, src ]
+ backup:
+ description:
+ - If set, create a backup of the C(pg_hba) file before it is modified.
+ The location of the backup is returned in the (backup) variable by this module.
+ default: false
+ type: bool
+ backup_file:
+ description:
+ - Write backup to a specific backupfile rather than a temp file.
+ type: str
+ create:
+ description:
+ - Create an C(pg_hba) file if none exists.
+ - When set to false, an error is raised when the C(pg_hba) file doesn't exist.
+ default: false
+ type: bool
+ contype:
+ description:
+ - Type of the rule. If not set, C(postgresql_pg_hba) will only return contents.
+ type: str
+ choices: [ local, host, hostnossl, hostssl, hostgssenc, hostnogssenc ]
+ comment:
+ description:
+ - A comment that will be placed in the same line behind the rule. See also the I(keep_comments_at_rules) parameter.
+ type: str
+ version_added: '1.5.0'
+ databases:
+ description:
+ - Databases this line applies to.
+ default: all
+ type: str
+ dest:
+ description:
+ - Path to C(pg_hba) file to modify.
+ type: path
+ required: true
+ method:
+ description:
+ - Authentication method to be used.
+ type: str
+ choices: [ cert, gss, ident, krb5, ldap, md5, pam, password, peer, radius, reject, scram-sha-256 , sspi, trust ]
+ default: md5
+ netmask:
+ description:
+ - The netmask of the source address.
+ type: str
+ options:
+ description:
+ - Additional options for the authentication I(method).
+ type: str
+ order:
+ description:
+ - The entries will be written out in a specific order.
+ With this option you can control by which field they are ordered first, second and last.
+ s=source, d=databases, u=users.
+ This option is deprecated since 2.9 and will be removed in community.postgresql 3.0.0.
+ Sortorder is now hardcoded to sdu.
+ type: str
+ default: sdu
+ choices: [ sdu, sud, dsu, dus, usd, uds ]
+ overwrite:
+ description:
+ - Remove all existing rules before adding rules. (Like I(state=absent) for all pre-existing rules.)
+ type: bool
+ default: false
+ keep_comments_at_rules:
+ description:
+ - If C(true), comments that stand together with a rule in one line are kept behind that line.
+ - If C(false), such comments are moved to the beginning of the file, like all other comments.
+ type: bool
+ default: false
+ version_added: '1.5.0'
+ rules:
+ description:
+ - A list of objects, specifying rules for the pg_hba.conf. Use this to manage multiple rules at once.
+ - "Each object can have the following keys (the 'rule-specific arguments'), which are treated the same as if they were arguments of this module:"
+ - C(address), C(comment), C(contype), C(databases), C(method), C(netmask), C(options), C(state), C(users)
+ - See also C(rules_behavior).
+ type: list
+ elements: dict
+ rules_behavior:
+ description:
+ - "Configure how the I(rules) argument works together with the rule-specific arguments outside the I(rules) argument."
+ - See I(rules) for the complete list of rule-specific arguments.
+ - When set to C(conflict), fail if I(rules) and, for example, I(address) are set.
+ - If C(combine), the normal rule-specific arguments are not defining a rule, but are used as defaults for the arguments in the I(rules) argument.
+ - Is used only when I(rules) is specified, ignored otherwise.
+ type: str
+ choices: [ conflict, combine ]
+ default: conflict
+ state:
+ description:
+ - The lines will be added/modified when C(state=present) and removed when C(state=absent).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ users:
+ description:
+ - Users this line applies to.
+ type: str
+ default: all
+
+notes:
+ - The default authentication assumes that on the host, you are either logging in as or
+ sudo'ing to an account with appropriate permissions to read and modify the file.
+ - This module also returns the pg_hba info. You can use this module to only retrieve it by only specifying I(dest).
+ The info can be found in the returned data under key pg_hba, being a list, containing a dict per rule.
+ - This module will sort resulting C(pg_hba) files if a rule change is required.
+ This could give unexpected results with manual created hba files, if it was improperly sorted.
+ For example a rule was created for a net first and for a ip in that net range next.
+ In that situation, the 'ip specific rule' will never hit, it is in the C(pg_hba) file obsolete.
+ After the C(pg_hba) file is rewritten by the M(community.postgresql.postgresql_pg_hba) module, the ip specific rule will be sorted above the range rule.
+ And then it will hit, which will give unexpected results.
+ - With the 'order' parameter you can control which field is used to sort first, next and last.
+
+seealso:
+- name: PostgreSQL pg_hba.conf file reference
+ description: Complete reference of the PostgreSQL pg_hba.conf file documentation.
+ link: https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
+
+requirements:
+ - ipaddress
+
+attributes:
+ check_mode:
+ support: full
+ description: Can run in check_mode and return changed status prediction without modifying target
+ diff_mode:
+ support: full
+ description: Will return details on what has changed (or possibly needs changing in check_mode), when in diff mode
+
+author:
+- Sebastiaan Mannem (@sebasmannem)
+- Felix Hamme (@betanummeric)
+'''
+
+EXAMPLES = '''
+- name: Grant users joe and simon access to databases sales and logistics from ipv6 localhost ::1/128 using peer authentication
+ community.postgresql.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: host
+ users: joe,simon
+ source: ::1
+ databases: sales,logistics
+ method: peer
+ create: true
+
+- name: Grant user replication from network 192.168.0.100/24 access for replication with client cert authentication
+ community.postgresql.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: host
+ users: replication
+ source: 192.168.0.100/24
+ databases: replication
+ method: cert
+
+- name: Revoke access from local user mary on database mydb
+ community.postgresql.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: local
+ users: mary
+ databases: mydb
+ state: absent
+
+- name: Grant some_user access to some_db, comment that and keep other rule-specific comments attached to their rules
+ community.postgresql.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: host
+ users: some_user
+ databases: some_db
+ method: md5
+ source: ::/0
+ keep_comments_at_rules: true
+ comment: "this rule is an example"
+
+- name: Replace everything with a new set of rules
+ community.postgresql.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ overwrite: true # remove preexisting rules
+
+ # custom defaults
+ rules_behavior: combine
+ contype: hostssl
+ address: 2001:db8::/64
+ comment: added in bulk
+
+ rules:
+ - users: user1
+ databases: db1
+ # contype, address and comment come from custom default
+ - users: user2
+ databases: db2
+ comment: added with love # overwrite custom default for this rule
+ # contype and address come from custom default
+ - users: user3
+ databases: db3
+ # contype, address and comment come from custom default
+'''
+
+RETURN = r'''
+msgs:
+ description: List of textual messages what was done.
+ returned: always
+ type: list
+ sample:
+ "msgs": [
+ "Removing",
+ "Changed",
+ "Writing"
+ ]
+backup_file:
+ description: File that the original pg_hba file was backed up to.
+ returned: changed
+ type: str
+ sample: /tmp/pg_hba_jxobj_p
+pg_hba:
+ description: List of the pg_hba rules as they are configured in the specified hba file.
+ returned: always
+ type: list
+ sample:
+ "pg_hba": [
+ {
+ "db": "all",
+ "method": "md5",
+ "src": "samehost",
+ "type": "host",
+ "usr": "all"
+ }
+ ]
+'''
+
+import os
+import re
+import traceback
+
+IPADDRESS_IMP_ERR = None
+try:
+ import ipaddress
+except ImportError:
+ IPADDRESS_IMP_ERR = traceback.format_exc()
+
+import tempfile
+import shutil
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+# from ansible.module_utils.postgres import postgres_common_argument_spec
+
+PG_HBA_METHODS = ["trust", "reject", "md5", "password", "gss", "sspi", "krb5", "ident", "peer",
+ "ldap", "radius", "cert", "pam", "scram-sha-256"]
+PG_HBA_TYPES = ["local", "host", "hostssl", "hostnossl", "hostgssenc", "hostnogssenc"]
+PG_HBA_ORDERS = ["sdu", "sud", "dsu", "dus", "usd", "uds"]
+PG_HBA_HDR = ['type', 'db', 'usr', 'src', 'mask', 'method', 'options']
+
+WHITESPACES_RE = re.compile(r'\s+')
+
+
+class PgHbaError(Exception):
+ '''
+ This exception is raised when parsing the pg_hba file ends in an error.
+ '''
+
+
+class PgHbaRuleError(PgHbaError):
+ '''
+ This exception is raised when parsing the pg_hba file ends in an error.
+ '''
+
+
+class PgHbaRuleChanged(PgHbaRuleError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHbaValueError(PgHbaError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHbaRuleValueError(PgHbaRuleError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHba(object):
+ """
+ PgHba object to read/write entries to/from.
+ pg_hba_file - the pg_hba file almost always /etc/pg_hba
+ """
+
+ def __init__(self, pg_hba_file=None, order="sdu", backup=False, create=False, keep_comments_at_rules=False):
+ if order not in PG_HBA_ORDERS:
+ msg = "invalid order setting {0} (should be one of '{1}')."
+ raise PgHbaError(msg.format(order, "', '".join(PG_HBA_ORDERS)))
+ self.pg_hba_file = pg_hba_file
+ self.rules = None
+ self.comment = None
+ self.order = order
+ self.backup = backup
+ self.last_backup = None
+ self.create = create
+ self.keep_comments_at_rules = keep_comments_at_rules
+ self.unchanged()
+ # self.databases will be update by add_rule and gives some idea of the number of databases
+ # (at least that are handled by this pg_hba)
+ self.databases = set(['postgres', 'template0', 'template1'])
+
+ # self.databases will be update by add_rule and gives some idea of the number of users
+ # (at least that are handled by this pg_hba) since this might also be groups with multiple
+ # users, this might be totally off, but at least it is some info...
+ self.users = set(['postgres'])
+
+ self.preexisting_rules = None
+ self.read()
+
+ def clear_rules(self):
+ self.rules = {}
+
+ def unchanged(self):
+ '''
+ This method resets self.diff to a empty default
+ '''
+ self.diff = {'before': {'file': self.pg_hba_file, 'pg_hba': []},
+ 'after': {'file': self.pg_hba_file, 'pg_hba': []}}
+
+ def read(self):
+ '''
+ Read in the pg_hba from the system
+ '''
+ self.rules = {}
+ self.comment = []
+ # read the pg_hbafile
+ try:
+ with open(self.pg_hba_file, 'r') as file:
+ for line in file:
+ # split into line and comment
+ line = line.strip()
+ comment = None
+ if '#' in line:
+ line, comment = line.split('#', 1)
+ if comment == '':
+ comment = None
+ line = line.rstrip()
+ # if there is just a comment, save it
+ if line == '':
+ if comment is not None:
+ self.comment.append('#' + comment)
+ else:
+ if comment is not None and not self.keep_comments_at_rules:
+ # save the comment independent of the line
+ self.comment.append('#' + comment)
+ comment = None
+ try:
+ self.add_rule(PgHbaRule(line=line, comment=comment))
+ except PgHbaRuleError:
+ pass
+ self.unchanged()
+ self.preexisting_rules = dict(self.rules)
+ except IOError:
+ pass
+
+ def write(self, backup_file=''):
+ '''
+ This method writes the PgHba rules (back) to a file.
+ '''
+ if not self.changed():
+ return False
+
+ contents = self.render()
+ if self.pg_hba_file:
+ if not (os.path.isfile(self.pg_hba_file) or self.create):
+ raise PgHbaError("pg_hba file '{0}' doesn't exist. "
+ "Use create option to autocreate.".format(self.pg_hba_file))
+ if self.backup and os.path.isfile(self.pg_hba_file):
+ if backup_file:
+ self.last_backup = backup_file
+ else:
+ _backup_file_h, self.last_backup = tempfile.mkstemp(prefix='pg_hba')
+ shutil.copy(self.pg_hba_file, self.last_backup)
+ fileh = open(self.pg_hba_file, 'w')
+ else:
+ filed, _path = tempfile.mkstemp(prefix='pg_hba')
+ fileh = os.fdopen(filed, 'w')
+
+ fileh.write(contents)
+ self.unchanged()
+ fileh.close()
+ return True
+
+ def add_rule(self, rule):
+ '''
+ This method can be used to add a rule to the list of rules in this PgHba object
+ '''
+ key = rule.key()
+ try:
+ try:
+ oldrule = self.rules[key]
+ except KeyError:
+ raise PgHbaRuleChanged
+ ekeys = set(list(oldrule.keys()) + list(rule.keys()))
+ ekeys.remove('line')
+ for k in ekeys:
+ if oldrule.get(k) != rule.get(k):
+ raise PgHbaRuleChanged('{0} changes {1}'.format(rule, oldrule))
+ except PgHbaRuleChanged:
+ self.rules[key] = rule
+ self.diff['after']['pg_hba'].append(rule.line())
+ if rule['db'] not in ['all', 'samerole', 'samegroup', 'replication']:
+ databases = set(rule['db'].split(','))
+ self.databases.update(databases)
+ if rule['usr'] != 'all':
+ user = rule['usr']
+ if user[0] == '+':
+ user = user[1:]
+ self.users.add(user)
+
+ def remove_rule(self, rule):
+ '''
+ This method can be used to find and remove a rule. It doesn't look for the exact rule, only
+ the rule with the same key.
+ '''
+ keys = rule.key()
+ try:
+ del self.rules[keys]
+ self.diff['before']['pg_hba'].append(rule.line())
+ except KeyError:
+ pass
+
+ def get_rules(self, with_lines=False):
+ '''
+ This method returns all the rules of the PgHba object
+ '''
+ rules = sorted(self.rules.values())
+ for rule in rules:
+ ret = {}
+ for key, value in rule.items():
+ ret[key] = value
+ if not with_lines:
+ if 'line' in ret:
+ del ret['line']
+ else:
+ ret['line'] = rule.line()
+
+ yield ret
+
+ def render(self):
+ '''
+ This method renders the content of the PgHba rules and comments.
+ The returning value can be used directly to write to a new file.
+ '''
+ comment = '\n'.join(self.comment)
+ rule_lines = []
+ for rule in self.get_rules(with_lines=True):
+ if 'comment' in rule:
+ rule_lines.append(rule['line'] + '\t#' + rule['comment'])
+ else:
+ rule_lines.append(rule['line'])
+ result = comment + '\n' + '\n'.join(rule_lines)
+ # End it properly with a linefeed (if not already).
+ if result and result[-1] not in ['\n', '\r']:
+ result += '\n'
+ return result
+
+ def changed(self):
+ '''
+ This method can be called to detect if the PgHba file has been changed.
+ '''
+ if not self.preexisting_rules and not self.rules:
+ return False
+ return self.preexisting_rules != self.rules
+
+
+class PgHbaRule(dict):
+ '''
+ This class represents one rule as defined in a line in a PgHbaFile.
+ '''
+
+ def __init__(self, contype=None, databases=None, users=None, source=None, netmask=None,
+ method=None, options=None, line=None, comment=None):
+ '''
+ This function can be called with a comma seperated list of databases and a comma seperated
+ list of users and it will act as a generator that returns a expanded list of rules one by
+ one.
+ '''
+
+ super(PgHbaRule, self).__init__()
+
+ if line:
+ # Read values from line if parsed
+ self.fromline(line)
+
+ if comment:
+ self['comment'] = comment
+
+ # read rule cols from parsed items
+ rule = dict(zip(PG_HBA_HDR, [contype, databases, users, source, netmask, method, options]))
+ for key, value in rule.items():
+ if value:
+ self[key] = value
+
+ # Some sanity checks
+ for key in ['method', 'type']:
+ if key not in self:
+ raise PgHbaRuleError('Missing {0} in rule {1}'.format(key, self))
+
+ if self['method'] not in PG_HBA_METHODS:
+ msg = "invalid method {0} (should be one of '{1}')."
+ raise PgHbaRuleValueError(msg.format(self['method'], "', '".join(PG_HBA_METHODS)))
+
+ if self['type'] not in PG_HBA_TYPES:
+ msg = "invalid connection type {0} (should be one of '{1}')."
+ raise PgHbaRuleValueError(msg.format(self['type'], "', '".join(PG_HBA_TYPES)))
+
+ if self['type'] == 'local':
+ self.unset('src')
+ self.unset('mask')
+ elif 'src' not in self:
+ raise PgHbaRuleError('Missing src in rule {1}'.format(self))
+ elif '/' in self['src']:
+ self.unset('mask')
+ else:
+ self['src'] = str(self.source())
+ self.unset('mask')
+
+ def unset(self, key):
+ '''
+ This method is used to unset certain columns if they exist
+ '''
+ if key in self:
+ del self[key]
+
+ def line(self):
+ '''
+ This method can be used to return (or generate) the line
+ '''
+ try:
+ return self['line']
+ except KeyError:
+ self['line'] = "\t".join([self[k] for k in PG_HBA_HDR if k in self.keys()])
+ return self['line']
+
+ def fromline(self, line):
+ '''
+ split into 'type', 'db', 'usr', 'src', 'mask', 'method', 'options' cols
+ '''
+ if WHITESPACES_RE.sub('', line) == '':
+ # empty line. skip this one...
+ return
+ cols = WHITESPACES_RE.split(line)
+ if len(cols) < 4:
+ msg = "Rule {0} has too few columns."
+ raise PgHbaValueError(msg.format(line))
+ if cols[0] not in PG_HBA_TYPES:
+ msg = "Rule {0} has unknown type: {1}."
+ raise PgHbaValueError(msg.format(line, cols[0]))
+ if cols[0] == 'local':
+ cols.insert(3, None) # No address
+ cols.insert(3, None) # No IP-mask
+ if len(cols) < 6:
+ cols.insert(4, None) # No IP-mask
+ elif cols[5] not in PG_HBA_METHODS:
+ cols.insert(4, None) # No IP-mask
+ if cols[5] not in PG_HBA_METHODS:
+ raise PgHbaValueError("Rule {0} of '{1}' type has invalid auth-method '{2}'".format(line, cols[0], cols[5]))
+
+ if len(cols) < 7:
+ cols.insert(6, None) # No auth-options
+ else:
+ cols[6] = " ".join(cols[6:]) # combine all auth-options
+ rule = dict(zip(PG_HBA_HDR, cols[:7]))
+ for key, value in rule.items():
+ if value:
+ self[key] = value
+
+ def key(self):
+ '''
+ This method can be used to get the key from a rule.
+ '''
+ if self['type'] == 'local':
+ source = 'local'
+ else:
+ source = str(self.source())
+ return (source, self['db'], self['usr'])
+
+ def source(self):
+ '''
+ This method is used to get the source of a rule as an ipaddress object if possible.
+ '''
+ if 'mask' in self.keys():
+ try:
+ ipaddress.ip_address(u'{0}'.format(self['src']))
+ except ValueError:
+ raise PgHbaValueError('Mask was specified, but source "{0}" '
+ 'is not valid ip'.format(self['src']))
+ # ipaddress module cannot work with ipv6 netmask, so lets convert it to prefixlen
+ # furthermore ipv4 with bad netmask throws 'Rule {} doesn't seem to be an ip, but has a
+ # mask error that doesn't seem to describe what is going on.
+ try:
+ mask_as_ip = ipaddress.ip_address(u'{0}'.format(self['mask']))
+ except ValueError:
+ raise PgHbaValueError('Mask {0} seems to be invalid'.format(self['mask']))
+ binvalue = "{0:b}".format(int(mask_as_ip))
+ if '01' in binvalue:
+ raise PgHbaValueError('IP mask {0} seems invalid '
+ '(binary value has 1 after 0)'.format(self['mask']))
+ prefixlen = binvalue.count('1')
+ sourcenw = '{0}/{1}'.format(self['src'], prefixlen)
+ try:
+ return ipaddress.ip_network(u'{0}'.format(sourcenw), strict=False)
+ except ValueError:
+ raise PgHbaValueError('{0} is not valid address range'.format(sourcenw))
+
+ try:
+ return ipaddress.ip_network(u'{0}'.format(self['src']), strict=False)
+ except ValueError:
+ return self['src']
+
+ def __lt__(self, other):
+ """This function helps sorted to decide how to sort.
+
+ It just checks itself against the other and decides on some key values
+ if it should be sorted higher or lower in the list.
+ The way it works:
+ For networks, every 1 in 'netmask in binary' makes the subnet more specific.
+ Therefore I chose to use prefix as the weight.
+ So a single IP (/32) should have twice the weight of a /16 network.
+ To keep everything in the same weight scale,
+ - for ipv6, we use a weight scale of 0 (all possible ipv6 addresses) to 128 (single ip)
+ - for ipv4, we use a weight scale of 0 (all possible ipv4 addresses) to 128 (single ip)
+ Therefore for ipv4, we use prefixlen (0-32) * 4 for weight,
+ which corresponds to ipv6 (0-128).
+ """
+ myweight = self.source_weight()
+ hisweight = other.source_weight()
+ if myweight != hisweight:
+ return myweight > hisweight
+
+ myweight = self.db_weight()
+ hisweight = other.db_weight()
+ if myweight != hisweight:
+ return myweight < hisweight
+
+ myweight = self.user_weight()
+ hisweight = other.user_weight()
+ if myweight != hisweight:
+ return myweight < hisweight
+ try:
+ return self['src'] < other['src']
+ except TypeError:
+ return self.source_type_weight() < other.source_type_weight()
+ except Exception:
+ # When all else fails, just compare the exact line.
+ return self.line() < other.line()
+
+ def source_weight(self):
+ """Report the weight of this source net.
+
+ Basically this is the netmask, where IPv4 is normalized to IPv6
+ (IPv4/32 has the same weight as IPv6/128).
+ """
+ if self['type'] == 'local':
+ return 130
+
+ sourceobj = self.source()
+ if isinstance(sourceobj, ipaddress.IPv4Network):
+ return sourceobj.prefixlen * 4
+ if isinstance(sourceobj, ipaddress.IPv6Network):
+ return sourceobj.prefixlen
+ if isinstance(sourceobj, str):
+ # You can also write all to match any IP address,
+ # samehost to match any of the server's own IP addresses,
+ # or samenet to match any address in any subnet that the server is connected to.
+ if sourceobj == 'all':
+ # (all is considered the full range of all ips, which has a weight of 0)
+ return 0
+ if sourceobj == 'samehost':
+ # (sort samehost second after local)
+ return 129
+ if sourceobj == 'samenet':
+ # Might write some fancy code to determine all prefix's
+ # from all interfaces and find a sane value for this one.
+ # For now, let's assume IPv4/24 or IPv6/96 (both have weight 96).
+ return 96
+ if sourceobj[0] == '.':
+ # suffix matching (domain name), let's assume a very large scale
+ # and therefore a very low weight IPv4/16 or IPv6/64 (both have weight 64).
+ return 64
+ # hostname, let's assume only one host matches, which is
+ # IPv4/32 or IPv6/128 (both have weight 128)
+ return 128
+ raise PgHbaValueError('Cannot deduct the source weight of this source {1}'.format(sourceobj))
+
+ def source_type_weight(self):
+ """Give a weight on the type of this source.
+
+ Basically make sure that IPv6Networks are sorted higher than IPv4Networks.
+ This is a 'when all else fails' solution in __lt__.
+ """
+ if self['type'] == 'local':
+ return 3
+
+ sourceobj = self.source()
+ if isinstance(sourceobj, ipaddress.IPv4Network):
+ return 2
+ if isinstance(sourceobj, ipaddress.IPv6Network):
+ return 1
+ if isinstance(sourceobj, str):
+ return 0
+ raise PgHbaValueError('This source {0} is of an unknown type...'.format(sourceobj))
+
+ def db_weight(self):
+ """Report the weight of the database.
+
+ Normally, just 1, but for replication this is 0, and for 'all', this is more than 2.
+ """
+ if self['db'] == 'all':
+ return 100000
+ if self['db'] == 'replication':
+ return 0
+ if self['db'] in ['samerole', 'samegroup']:
+ return 1
+ return 1 + self['db'].count(',')
+
+ def user_weight(self):
+ """Report weight when comparing users."""
+ if self['usr'] == 'all':
+ return 1000000
+ return 1
+
+
+def main():
+ '''
+ This function is the main function of this module
+ '''
+ # argument_spec = postgres_common_argument_spec()
+ argument_spec = dict()
+ argument_spec.update(
+ address=dict(type='str', default='samehost', aliases=['source', 'src']),
+ backup=dict(type='bool', default=False),
+ backup_file=dict(type='str'),
+ contype=dict(type='str', default=None, choices=PG_HBA_TYPES),
+ comment=dict(type='str', default=None),
+ create=dict(type='bool', default=False),
+ databases=dict(type='str', default='all'),
+ dest=dict(type='path', required=True),
+ method=dict(type='str', default='md5', choices=PG_HBA_METHODS),
+ netmask=dict(type='str'),
+ options=dict(type='str'),
+ order=dict(type='str', default="sdu", choices=PG_HBA_ORDERS,
+ removed_in_version='3.0.0', removed_from_collection='community.postgresql'),
+ keep_comments_at_rules=dict(type='bool', default=False),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ users=dict(type='str', default='all'),
+ rules=dict(type='list', elements='dict'),
+ rules_behavior=dict(type='str', default='conflict', choices=['combine', 'conflict']),
+ overwrite=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+ if IPADDRESS_IMP_ERR is not None:
+ module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR)
+
+ create = bool(module.params["create"] or module.check_mode)
+ if module.check_mode:
+ backup = False
+ else:
+ backup = module.params['backup']
+ dest = module.params["dest"]
+ order = module.params["order"]
+ keep_comments_at_rules = module.params["keep_comments_at_rules"]
+ rules = module.params["rules"]
+ rules_behavior = module.params["rules_behavior"]
+ overwrite = module.params["overwrite"]
+
+ ret = {'msgs': []}
+ try:
+ pg_hba = PgHba(dest, order, backup=backup, create=create, keep_comments_at_rules=keep_comments_at_rules)
+ except PgHbaError as error:
+ module.fail_json(msg='Error reading file:\n{0}'.format(error))
+
+ if overwrite:
+ pg_hba.clear_rules()
+
+ rule_keys = [
+ 'address',
+ 'comment',
+ 'contype',
+ 'databases',
+ 'method',
+ 'netmask',
+ 'options',
+ 'state',
+ 'users'
+ ]
+ if rules is None:
+ single_rule = dict()
+ for key in rule_keys:
+ single_rule[key] = module.params[key]
+ rules = [single_rule]
+ else:
+ if rules_behavior == 'conflict':
+ # it's ok if the module default is set
+ used_rule_keys = [key for key in rule_keys if module.params[key] != argument_spec[key].get('default', None)]
+ if len(used_rule_keys) > 0:
+ module.fail_json(msg='conflict: either argument "rules_behavior" needs to be changed or "rules" must'
+ ' not be set or {0} must not be set'.format(used_rule_keys))
+
+ new_rules = []
+ for index, rule in enumerate(rules):
+ # alias handling
+ address_keys = [key for key in rule.keys() if key in ('address', 'source', 'src')]
+ if len(address_keys) > 1:
+ module.fail_json(msg='rule number {0} of the "rules" argument ({1}) uses ambiguous settings: '
+ '{2} are aliases, only one is allowed'.format(index, address_keys, rule))
+ if len(address_keys) == 1:
+ address = rule[address_keys[0]]
+ del rule[address_keys[0]]
+ rule['address'] = address
+
+ for key in rule_keys:
+ if key not in rule:
+ if rules_behavior == 'combine':
+ # use user-supplied defaults or module defaults
+ rule[key] = module.params[key]
+ else:
+ # use module defaults
+ rule[key] = argument_spec[key].get('default', None)
+ new_rules.append(rule)
+ rules = new_rules
+
+ for rule in rules:
+ if rule.get('contype', None) is None:
+ continue
+
+ try:
+ for database in rule['databases'].split(','):
+ for user in rule['users'].split(','):
+ pg_hba_rule = PgHbaRule(rule['contype'], database, user, rule['address'], rule['netmask'],
+ rule['method'], rule['options'], comment=rule['comment'])
+ if rule['state'] == "present":
+ ret['msgs'].append('Adding rule {0}'.format(pg_hba_rule))
+ pg_hba.add_rule(pg_hba_rule)
+ else:
+ ret['msgs'].append('Removing rule {0}'.format(pg_hba_rule))
+ pg_hba.remove_rule(pg_hba_rule)
+ except PgHbaError as error:
+ module.fail_json(msg='Error modifying rules:\n{0}'.format(error))
+ file_args = module.load_file_common_arguments(module.params)
+ ret['changed'] = changed = pg_hba.changed()
+ if changed:
+ ret['msgs'].append('Changed')
+ ret['diff'] = pg_hba.diff
+
+ if not module.check_mode:
+ ret['msgs'].append('Writing')
+ try:
+ if pg_hba.write(module.params['backup_file']):
+ module.set_fs_attributes_if_different(file_args, True, pg_hba.diff,
+ expand=False)
+ except PgHbaError as error:
+ module.fail_json(msg='Error writing file:\n{0}'.format(error))
+ if pg_hba.last_backup:
+ ret['backup_file'] = pg_hba.last_backup
+
+ ret['pg_hba'] = list(pg_hba.get_rules())
+ module.exit_json(**ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py
new file mode 100644
index 000000000..fd104022b
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018-2020 Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_ping
+short_description: Check remote PostgreSQL server availability
+description:
+- Simple module to check remote PostgreSQL server availability.
+options:
+ db:
+ description:
+ - Name of a database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(false), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+seealso:
+- module: community.postgresql.postgresql_info
+attributes:
+ check_mode:
+ support: full
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+# PostgreSQL ping dbsrv server from the shell:
+# ansible dbsrv -m postgresql_ping
+
+# In the example below you need to generate certificates previously.
+# See https://www.postgresql.org/docs/current/libpq-ssl.html for more information.
+- name: >
+ Ping PostgreSQL server using non-default credentials and SSL
+ registering the return values into the result variable for future use
+ community.postgresql.postgresql_ping:
+ db: protected_db
+ login_host: dbsrv
+ login_user: secret
+ login_password: secret_pass
+ ca_cert: /root/root.crt
+ ssl_mode: verify-full
+ register: result
+ # If you need to fail when the server is not available,
+ # uncomment the following line:
+ #failed_when: not result.is_available
+
+# You can use the registered result with another task
+- name: This task should be executed only if the server is available
+ # ...
+ when: result.is_available == true
+'''
+
+RETURN = r'''
+is_available:
+ description: PostgreSQL server availability.
+ returned: always
+ type: bool
+ sample: true
+server_version:
+ description: PostgreSQL server version.
+ returned: always
+ type: dict
+ sample: { major: 13, minor: 2, full: '13.2', raw: 'PostgreSQL 13.2 on x86_64-pc-linux-gnu' }
+conn_err_msg:
+ description: Connection error message.
+ returned: always
+ type: str
+ sample: ''
+ version_added: 1.7.0
+'''
+
+import re
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+class PgPing(object):
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.is_available = False
+ self.version = {}
+
+ def do(self):
+ self.get_pg_version()
+ return (self.is_available, self.version)
+
+ def get_pg_version(self):
+ query = "SELECT version()"
+ raw = exec_sql(self, query, add_to_executed=False)[0][0]
+
+ if not raw:
+ return
+
+ self.is_available = True
+
+ full = raw.split()[1]
+ m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", full)
+
+ major = int(m.group(1))
+ minor = int(m.group(2))
+ patch = None
+ if m.group(3) is not None:
+ patch = int(m.group(3))
+
+ self.version = dict(
+ major=major,
+ minor=minor,
+ full=full,
+ raw=raw,
+ )
+
+ if patch is not None:
+ self.version['patch'] = patch
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ check_input(module, module.params['session_role'])
+
+ # Set some default values:
+ cursor = False
+ db_connection = False
+ result = dict(
+ changed=False,
+ is_available=False,
+ server_version=dict(),
+ conn_err_msg='',
+ )
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection, err = connect_to_db(module, conn_params, fail_on_conn=False)
+ if err:
+ result['conn_err_msg'] = err
+
+ if db_connection is not None:
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Do job:
+ pg_ping = PgPing(module, cursor)
+ if cursor:
+ # If connection established:
+ result["is_available"], result["server_version"] = pg_ping.do()
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py
new file mode 100644
index 000000000..44aaeba3b
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py
@@ -0,0 +1,1216 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_privs
+short_description: Grant or revoke privileges on PostgreSQL database objects
+description:
+- Grant or revoke privileges on PostgreSQL database objects.
+- This module is basically a wrapper around most of the functionality of
+ PostgreSQL's GRANT and REVOKE statements with detection of changes
+ (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)).
+- B(WARNING) The C(usage_on_types) option has been B(deprecated) and will be removed in
+ community.postgresql 3.0.0, please use the C(type) option with value C(type) to
+ GRANT/REVOKE permissions on types explicitly.
+options:
+ database:
+ description:
+ - Name of database to connect to.
+ required: true
+ type: str
+ aliases:
+ - db
+ - login_db
+ state:
+ description:
+ - If C(present), the specified privileges are granted, if C(absent) they are revoked.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ privs:
+ description:
+ - Comma separated list of privileges to grant/revoke.
+ type: str
+ aliases:
+ - priv
+ type:
+ description:
+ - Type of database object to set privileges on.
+ - The C(default_privs) choice is available starting at version 2.7.
+ - The C(foreign_data_wrapper) and C(foreign_server) object types are available since Ansible version 2.8.
+ - The C(type) choice is available since Ansible version 2.10.
+ - The C(procedure) is supported since collection version 1.3.0 and PostgreSQL 11.
+ type: str
+ default: table
+ choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function,
+ group, language, table, tablespace, schema, sequence, type , procedure]
+ objs:
+ description:
+ - Comma separated list of database objects to set privileges on.
+ - If I(type) is C(table), C(partition table), C(sequence), C(function) or C(procedure),
+ the special value C(ALL_IN_SCHEMA) can be provided instead to specify all
+ database objects of I(type) in the schema specified via I(schema).
+ (This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available
+ for C(function) and C(partition table) since Ansible 2.8).
+ - C(procedure) is supported since PostgreSQL 11 and community.postgresql collection 1.3.0.
+ - If I(type) is C(database), this parameter can be omitted, in which case
+ privileges are set for the database specified via I(database).
+ - If I(type) is C(function) or C(procedure), colons (":") in object names will be
+ replaced with commas (needed to specify signatures, see examples).
+ type: str
+ aliases:
+ - obj
+ schema:
+ description:
+ - Schema that contains the database objects specified via I(objs).
+ - May only be provided if I(type) is C(table), C(sequence), C(function), C(procedure), C(type),
+ or C(default_privs). Defaults to C(public) in these cases.
+ - Pay attention, for embedded types when I(type=type)
+ I(schema) can be C(pg_catalog) or C(information_schema) respectively.
+ - If not specified, uses C(public). Not to pass any schema, use C(not-specified).
+ type: str
+ roles:
+ description:
+ - Comma separated list of role (user/group) names to set permissions for.
+ - The special value C(PUBLIC) can be provided instead to set permissions
+ for the implicitly defined PUBLIC group.
+ type: str
+ required: true
+ aliases:
+ - role
+ fail_on_role:
+ description:
+ - If C(true), fail when target role (for whom privs need to be granted) does not exist.
+ Otherwise just warn and continue.
+ default: true
+ type: bool
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ target_roles:
+ description:
+ - A list of existing role (user/group) names to set as the
+ default permissions for database objects subsequently created by them.
+ - Parameter I(target_roles) is only available with C(type=default_privs).
+ type: str
+ grant_option:
+ description:
+ - Whether C(role) may grant/revoke the specified privileges/group memberships to others.
+ - Set to C(false) to revoke GRANT OPTION, leave unspecified to make no changes.
+ - I(grant_option) only has an effect if I(state) is C(present).
+ type: bool
+ aliases:
+ - admin_option
+ password:
+ description:
+ - The password to authenticate with.
+ - This option has been B(deprecated) and will be removed in community.postgresql 4.0.0,
+ use the I(login_password) option instead.
+ - Mutually exclusive with I(login_password).
+ type: str
+ default: ''
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters I(roles), I(target_roles), I(session_role),
+ I(schema) are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via the parameters are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+ usage_on_types:
+ description:
+ - This option has been B(deprecated) and will be removed in community.postgresql 3.0.0,
+ please use the I(type) option with value C(type) to GRANT/REVOKE permissions on types
+ explicitly.
+ - When adding default privileges, the module always implicitly adds ``USAGE ON TYPES``.
+ - To avoid this behavior, set I(usage_on_types) to C(false).
+ - Added to save backwards compatibility.
+ - Used only when adding default privileges, ignored otherwise.
+ type: bool
+ default: true
+ version_added: '1.2.0'
+
+notes:
+- Parameters that accept comma separated lists (I(privs), I(objs), I(roles))
+ have singular alias names (I(priv), I(obj), I(role)).
+- To revoke only C(GRANT OPTION) for a specific object, set I(state) to
+ C(present) and I(grant_option) to C(false) (see examples).
+- Note that when revoking privileges from a role R, this role may still have
+ access via privileges granted to any role R is a member of including C(PUBLIC).
+- Note that when you use C(PUBLIC) role, the module always reports that the state has been changed.
+- Note that when revoking privileges from a role R, you do so as the user
+ specified via I(login_user). If R has been granted the same privileges by
+ another user also, R can still access database objects via these privileges.
+- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs).
+
+seealso:
+- module: community.postgresql.postgresql_user
+- module: community.postgresql.postgresql_owner
+- module: community.postgresql.postgresql_membership
+- name: PostgreSQL privileges
+ description: General information about PostgreSQL privileges.
+ link: https://www.postgresql.org/docs/current/ddl-priv.html
+- name: PostgreSQL GRANT command reference
+ description: Complete reference of the PostgreSQL GRANT command documentation.
+ link: https://www.postgresql.org/docs/current/sql-grant.html
+- name: PostgreSQL REVOKE command reference
+ description: Complete reference of the PostgreSQL REVOKE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-revoke.html
+
+attributes:
+ check_mode:
+ support: full
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+author:
+- Bernhard Weitzhofer (@b6d)
+- Tobias Birkefeld (@tcraxs)
+'''
+
+EXAMPLES = r'''
+# On database "library":
+# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors
+# TO librarian, reader WITH GRANT OPTION
+- name: Grant privs to librarian and reader on database library
+ community.postgresql.postgresql_privs:
+ database: library
+ state: present
+ privs: SELECT,INSERT,UPDATE
+ type: table
+ objs: books,authors
+ schema: public
+ roles: librarian,reader
+ grant_option: true
+
+- name: Same as above leveraging default values
+ community.postgresql.postgresql_privs:
+ db: library
+ privs: SELECT,INSERT,UPDATE
+ objs: books,authors
+ roles: librarian,reader
+ grant_option: true
+
+# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader
+# Note that role "reader" will be *granted* INSERT privilege itself if this
+# isn't already the case (since state: present).
+- name: Revoke privs from reader
+ community.postgresql.postgresql_privs:
+ db: library
+ state: present
+ priv: INSERT
+ obj: books
+ role: reader
+ grant_option: false
+
+# "public" is the default schema. This also works for PostgreSQL 8.x.
+- name: REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader
+ community.postgresql.postgresql_privs:
+ db: library
+ state: absent
+ privs: INSERT,UPDATE
+ objs: ALL_IN_SCHEMA
+ role: reader
+
+- name: GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian
+ community.postgresql.postgresql_privs:
+ db: library
+ privs: ALL
+ type: schema
+ objs: public,math
+ role: librarian
+
+# Note the separation of arguments with colons.
+- name: GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader
+ community.postgresql.postgresql_privs:
+ db: library
+ privs: ALL
+ type: function
+ obj: add(int:int)
+ schema: math
+ roles: librarian,reader
+
+# Note that group role memberships apply cluster-wide and therefore are not
+# restricted to database "library" here.
+- name: GRANT librarian, reader TO alice, bob WITH ADMIN OPTION
+ community.postgresql.postgresql_privs:
+ db: library
+ type: group
+ objs: librarian,reader
+ roles: alice,bob
+ admin_option: true
+
+# Note that here "db: postgres" specifies the database to connect to, not the
+# database to grant privileges on (which is specified via the "objs" param)
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ community.postgresql.postgresql_privs:
+ db: postgres
+ privs: ALL
+ type: database
+ obj: library
+ role: librarian
+
+# If objs is omitted for type "database", it defaults to the database
+# to which the connection is established
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ community.postgresql.postgresql_privs:
+ db: library
+ privs: ALL
+ type: database
+ role: librarian
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO librarian
+ community.postgresql.postgresql_privs:
+ db: library
+ objs: ALL_DEFAULT
+ privs: ALL
+ type: default_privs
+ role: librarian
+ grant_option: true
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 1
+ community.postgresql.postgresql_privs:
+ db: library
+ objs: TABLES,SEQUENCES
+ privs: SELECT
+ type: default_privs
+ role: reader
+
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 2
+ community.postgresql.postgresql_privs:
+ db: library
+ objs: TYPES
+ privs: USAGE
+ type: default_privs
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN DATA WRAPPER fdw TO reader
+ community.postgresql.postgresql_privs:
+ db: test
+ objs: fdw
+ privs: ALL
+ type: foreign_data_wrapper
+ role: reader
+
+# Available since community.postgresql 0.2.0
+- name: GRANT ALL PRIVILEGES ON TYPE customtype TO reader
+ community.postgresql.postgresql_privs:
+ db: test
+ objs: customtype
+ privs: ALL
+ type: type
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN SERVER fdw_server TO reader
+ community.postgresql.postgresql_privs:
+ db: test
+ objs: fdw_server
+ privs: ALL
+ type: foreign_server
+ role: reader
+
+# Available since version 2.8
+# Grant 'execute' permissions on all functions in schema 'common' to role 'caller'
+- name: GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA common TO caller
+ community.postgresql.postgresql_privs:
+ type: function
+ state: present
+ privs: EXECUTE
+ roles: caller
+ objs: ALL_IN_SCHEMA
+ schema: common
+
+# Available since collection version 1.3.0
+# Grant 'execute' permissions on all procedures in schema 'common' to role 'caller'
+# Needs PostreSQL 11 or higher and community.postgresql 1.3.0 or higher
+- name: GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA common TO caller
+ community.postgresql.postgresql_privs:
+ type: procedure
+ state: present
+ privs: EXECUTE
+ roles: caller
+ objs: ALL_IN_SCHEMA
+ schema: common
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library GRANT SELECT ON TABLES TO reader
+# GRANT SELECT privileges for new TABLES objects created by librarian as
+# default to the role reader.
+# For specific
+- name: ALTER privs
+ community.postgresql.postgresql_privs:
+ db: library
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library REVOKE SELECT ON TABLES FROM reader
+# REVOKE SELECT privileges for new TABLES objects created by librarian as
+# default from the role reader.
+# For specific
+- name: ALTER privs
+ community.postgresql.postgresql_privs:
+ db: library
+ state: absent
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since community.postgresql 0.2.0
+- name: Grant type privileges for pg_catalog.numeric type to alice
+ community.postgresql.postgresql_privs:
+ type: type
+ roles: alice
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: acme
+
+- name: Alter default privileges grant usage on schemas to datascience
+ community.postgresql.postgresql_privs:
+ database: test
+ type: default_privs
+ privs: usage
+ objs: schemas
+ role: datascience
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";']
+'''
+
+import traceback
+
+PSYCOPG2_IMP_ERR = None
+try:
+ import psycopg2
+ import psycopg2.extensions
+except ImportError:
+ PSYCOPG2_IMP_ERR = traceback.format_exc()
+ psycopg2 = None
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ pg_quote_identifier,
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import postgres_common_argument_spec, get_conn_params
+from ansible.module_utils._text import to_native
+
+VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE',
+ 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT',
+ 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL'))
+VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'),
+ 'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'),
+ 'FUNCTIONS': ('ALL', 'EXECUTE'),
+ 'TYPES': ('ALL', 'USAGE'),
+ 'SCHEMAS': ('CREATE', 'USAGE'), }
+
+executed_queries = []
+
+
+class Error(Exception):
+ pass
+
+
+def role_exists(module, cursor, rolname):
+ """Check user exists or not"""
+ query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname
+ try:
+ cursor.execute(query)
+ return cursor.rowcount > 0
+
+ except Exception as e:
+ module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+
+ return False
+
+
+# We don't have functools.partial in Python < 2.5
+def partial(f, *args, **kwargs):
+ """Partial function application"""
+
+ def g(*g_args, **g_kwargs):
+ new_kwargs = kwargs.copy()
+ new_kwargs.update(g_kwargs)
+ return f(*(args + g_args), **g_kwargs)
+
+ g.f = f
+ g.args = args
+ g.kwargs = kwargs
+ return g
+
+
+class Connection(object):
+ """Wrapper around a psycopg2 connection with some convenience methods"""
+
+ def __init__(self, params, module):
+ self.database = params.database
+ self.module = module
+
+ conn_params = get_conn_params(module, params.__dict__, warn_db_default=False)
+
+ sslrootcert = params.ca_cert
+ if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
+ raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter')
+
+ self.connection = psycopg2.connect(**conn_params)
+ self.cursor = self.connection.cursor()
+ self.pg_version = self.connection.server_version
+
+ def commit(self):
+ self.connection.commit()
+
+ def rollback(self):
+ self.connection.rollback()
+
+ @property
+ def encoding(self):
+ """Connection encoding in Python-compatible form"""
+ return psycopg2.extensions.encodings[self.connection.encoding]
+
+ # Methods for querying database objects
+
+ # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like
+ # phrases in GRANT or REVOKE statements, therefore alternative methods are
+ # provided here.
+
+ def schema_exists(self, schema):
+ query = """SELECT count(*)
+ FROM pg_catalog.pg_namespace WHERE nspname = %s"""
+ self.cursor.execute(query, (schema,))
+ return self.cursor.fetchone()[0] > 0
+
+ def get_all_tables_in_schema(self, schema):
+ if schema:
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')"""
+ self.cursor.execute(query, (schema,))
+ else:
+ query = ("SELECT relname FROM pg_catalog.pg_class "
+ "WHERE relkind in ('r', 'v', 'm', 'p')")
+ self.cursor.execute(query)
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_sequences_in_schema(self, schema):
+ if schema:
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S'"""
+ self.cursor.execute(query, (schema,))
+ else:
+ self.cursor.execute("SELECT relname FROM pg_catalog.pg_class WHERE relkind = 'S'")
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_functions_in_schema(self, schema):
+ if schema:
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+
+ query = ("SELECT p.proname, oidvectortypes(p.proargtypes) "
+ "FROM pg_catalog.pg_proc p "
+ "JOIN pg_namespace n ON n.oid = p.pronamespace "
+ "WHERE nspname = %s")
+
+ if self.pg_version >= 110000:
+ query += " and p.prokind = 'f'"
+
+ self.cursor.execute(query, (schema,))
+ else:
+ self.cursor.execute("SELECT p.proname, oidvectortypes(p.proargtypes) FROM pg_catalog.pg_proc p")
+ return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
+
+ def get_all_procedures_in_schema(self, schema):
+ if self.pg_version < 110000:
+ raise Error("PostgreSQL verion must be >= 11 for type=procedure. Exit")
+
+ if schema:
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+
+ query = ("SELECT p.proname, oidvectortypes(p.proargtypes) "
+ "FROM pg_catalog.pg_proc p "
+ "JOIN pg_namespace n ON n.oid = p.pronamespace "
+ "WHERE nspname = %s and p.prokind = 'p'")
+
+ self.cursor.execute(query, (schema,))
+ else:
+ query = ("SELECT p.proname, oidvectortypes(p.proargtypes) "
+ "FROM pg_catalog.pg_proc p WHERE p.prokind = 'p'")
+ self.cursor.execute(query)
+ return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
+
+ # Methods for getting access control lists and group membership info
+
+ # To determine whether anything has changed after granting/revoking
+ # privileges, we compare the access control lists of the specified database
+ # objects before and afterwards. Python's list/string comparison should
+ # suffice for change detection, we should not actually have to parse ACLs.
+ # The same should apply to group membership information.
+
+ def get_table_acls(self, schema, tables):
+ if schema:
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, tables))
+ else:
+ query = ("SELECT relacl FROM pg_catalog.pg_class "
+ "WHERE relkind in ('r','p','v','m') AND relname = ANY (%s) "
+ "ORDER BY relname")
+ self.cursor.execute(query)
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_sequence_acls(self, schema, sequences):
+ if schema:
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, sequences))
+ else:
+ query = ("SELECT relacl FROM pg_catalog.pg_class "
+ "WHERE relkind = 'S' AND relname = ANY (%s) ORDER BY relname")
+ self.cursor.execute(query)
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_function_acls(self, schema, function_signatures):
+ funcnames = [f.split('(', 1)[0] for f in function_signatures]
+ if schema:
+ query = """SELECT proacl
+ FROM pg_catalog.pg_proc p
+ JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
+ WHERE nspname = %s AND proname = ANY (%s)
+ ORDER BY proname, proargtypes"""
+ self.cursor.execute(query, (schema, funcnames))
+ else:
+ query = ("SELECT proacl FROM pg_catalog.pg_proc WHERE proname = ANY (%s) "
+ "ORDER BY proname, proargtypes")
+ self.cursor.execute(query)
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_schema_acls(self, schemas):
+ query = """SELECT nspacl FROM pg_catalog.pg_namespace
+ WHERE nspname = ANY (%s) ORDER BY nspname"""
+ self.cursor.execute(query, (schemas,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_language_acls(self, languages):
+ query = """SELECT lanacl FROM pg_catalog.pg_language
+ WHERE lanname = ANY (%s) ORDER BY lanname"""
+ self.cursor.execute(query, (languages,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_tablespace_acls(self, tablespaces):
+ query = """SELECT spcacl FROM pg_catalog.pg_tablespace
+ WHERE spcname = ANY (%s) ORDER BY spcname"""
+ self.cursor.execute(query, (tablespaces,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_database_acls(self, databases):
+ query = """SELECT datacl FROM pg_catalog.pg_database
+ WHERE datname = ANY (%s) ORDER BY datname"""
+ self.cursor.execute(query, (databases,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_group_memberships(self, groups):
+ query = """SELECT roleid, grantor, member, admin_option
+ FROM pg_catalog.pg_auth_members am
+ JOIN pg_catalog.pg_roles r ON r.oid = am.roleid
+ WHERE r.rolname = ANY(%s)
+ ORDER BY roleid, grantor, member"""
+ self.cursor.execute(query, (groups,))
+ return self.cursor.fetchall()
+
+ def get_default_privs(self, schema, *args):
+ if schema:
+ query = """SELECT defaclacl
+ FROM pg_default_acl a
+ JOIN pg_namespace b ON a.defaclnamespace=b.oid
+ WHERE b.nspname = %s;"""
+ self.cursor.execute(query, (schema,))
+ else:
+ self.cursor.execute("SELECT defaclacl FROM pg_default_acl;")
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_data_wrapper_acls(self, fdws):
+ query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper
+ WHERE fdwname = ANY (%s) ORDER BY fdwname"""
+ self.cursor.execute(query, (fdws,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_server_acls(self, fs):
+ query = """SELECT srvacl FROM pg_catalog.pg_foreign_server
+ WHERE srvname = ANY (%s) ORDER BY srvname"""
+ self.cursor.execute(query, (fs,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_type_acls(self, schema, types):
+ if schema:
+ query = """SELECT t.typacl FROM pg_catalog.pg_type t
+ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
+ WHERE n.nspname = %s AND t.typname = ANY (%s) ORDER BY typname"""
+ self.cursor.execute(query, (schema, types))
+ else:
+ query = "SELECT typacl FROM pg_catalog.pg_type WHERE typname = ANY (%s) ORDER BY typname"
+ self.cursor.execute(query)
+ return [t[0] for t in self.cursor.fetchall()]
+
+ # Manipulating privileges
+
+ # WARNING: usage_on_types has been deprecated and will be removed in community.postgresql 3.0.0, please use an obj_type of 'type' instead.
+ def manipulate_privs(self, obj_type, privs, objs, orig_objs, roles, target_roles,
+ state, grant_option, schema_qualifier=None, fail_on_role=True, usage_on_types=True):
+ """Manipulate database object privileges.
+
+ :param obj_type: Type of database object to grant/revoke
+ privileges for.
+ :param privs: Either a list of privileges to grant/revoke
+ or None if type is "group".
+ :param objs: List of database objects to grant/revoke
+ privileges for.
+ :param orig_objs: ALL_IN_SCHEMA or None
+ :param roles: Either a list of role names or "PUBLIC"
+ for the implicitly defined "PUBLIC" group
+ :param target_roles: List of role names to grant/revoke
+ default privileges as.
+ :param state: "present" to grant privileges, "absent" to revoke.
+ :param grant_option: Only for state "present": If True, set
+ grant/admin option. If False, revoke it.
+ If None, don't change grant option.
+ :param schema_qualifier: Some object types ("TABLE", "SEQUENCE",
+ "FUNCTION") must be qualified by schema.
+ Ignored for other Types.
+ """
+ # get_status: function to get current status
+ if obj_type == 'table':
+ get_status = partial(self.get_table_acls, schema_qualifier)
+ elif obj_type == 'sequence':
+ get_status = partial(self.get_sequence_acls, schema_qualifier)
+ elif obj_type in ('function', 'procedure'):
+ get_status = partial(self.get_function_acls, schema_qualifier)
+ elif obj_type == 'schema':
+ get_status = self.get_schema_acls
+ elif obj_type == 'language':
+ get_status = self.get_language_acls
+ elif obj_type == 'tablespace':
+ get_status = self.get_tablespace_acls
+ elif obj_type == 'database':
+ get_status = self.get_database_acls
+ elif obj_type == 'group':
+ get_status = self.get_group_memberships
+ elif obj_type == 'default_privs':
+ get_status = partial(self.get_default_privs, schema_qualifier)
+ elif obj_type == 'foreign_data_wrapper':
+ get_status = self.get_foreign_data_wrapper_acls
+ elif obj_type == 'foreign_server':
+ get_status = self.get_foreign_server_acls
+ elif obj_type == 'type':
+ get_status = partial(self.get_type_acls, schema_qualifier)
+ else:
+ raise Error('Unsupported database object type "%s".' % obj_type)
+
+ # Return False (nothing has changed) if there are no objs to work on.
+ if not objs:
+ return False
+
+ quoted_schema_qualifier = '"%s"' % schema_qualifier.replace('"', '""') if schema_qualifier else None
+ # obj_ids: quoted db object identifiers (sometimes schema-qualified)
+ if obj_type in ('function', 'procedure'):
+ obj_ids = []
+ for obj in objs:
+ try:
+ f, args = obj.split('(', 1)
+ except Exception:
+ raise Error('Illegal function / procedure signature: "%s".' % obj)
+ obj_ids.append('%s."%s"(%s' % (quoted_schema_qualifier, f, args))
+ elif obj_type in ['table', 'sequence', 'type']:
+ obj_ids = ['%s."%s"' % (quoted_schema_qualifier, o) for o in objs]
+ else:
+ obj_ids = ['"%s"' % o for o in objs]
+
+ # set_what: SQL-fragment specifying what to set for the target roles:
+ # Either group membership or privileges on objects of a certain type
+ if obj_type == 'group':
+ set_what = ','.join(obj_ids)
+ elif obj_type == 'default_privs':
+ # We don't want privs to be quoted here
+ set_what = ','.join(privs)
+ else:
+ # function types are already quoted above
+ if obj_type not in ('function', 'procedure'):
+ obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids]
+ # Note: obj_type has been checked against a set of string literals
+ # and privs was escaped when it was parsed
+ # Note: Underscores are replaced with spaces to support multi-word obj_type
+ if orig_objs is not None:
+ set_what = '%s ON %s %s' % (','.join(privs), orig_objs, quoted_schema_qualifier)
+ else:
+ set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '), ','.join(obj_ids))
+
+ # for_whom: SQL-fragment specifying for whom to set the above
+ if roles == 'PUBLIC':
+ for_whom = 'PUBLIC'
+ else:
+ for_whom = []
+ for r in roles:
+ if not role_exists(self.module, self.cursor, r):
+ if fail_on_role:
+ self.module.fail_json(msg="Role '%s' does not exist" % r.strip())
+
+ else:
+ self.module.warn("Role '%s' does not exist, pass it" % r.strip())
+ else:
+ for_whom.append('"%s"' % r)
+
+ if not for_whom:
+ return False
+
+ for_whom = ','.join(for_whom)
+
+ # as_who:
+ as_who = None
+ if target_roles:
+ as_who = ','.join('"%s"' % r for r in target_roles)
+
+ status_before = get_status(objs)
+
+ query = QueryBuilder(state) \
+ .for_objtype(obj_type) \
+ .with_grant_option(grant_option) \
+ .for_whom(for_whom) \
+ .as_who(as_who) \
+ .for_schema(quoted_schema_qualifier) \
+ .set_what(set_what) \
+ .for_objs(objs) \
+ .usage_on_types(usage_on_types) \
+ .build()
+
+ executed_queries.append(query)
+ self.cursor.execute(query)
+ if roles == 'PUBLIC':
+ return True
+
+ status_after = get_status(objs)
+
+ def nonesorted(e):
+ # For python 3+ that can fail trying
+ # to compare NoneType elements by sort method.
+ if e is None:
+ return ''
+ return e
+
+ status_before.sort(key=nonesorted)
+ status_after.sort(key=nonesorted)
+ return status_before != status_after
+
+
+class QueryBuilder(object):
+ def __init__(self, state):
+ self._grant_option = None
+ self._for_whom = None
+ self._as_who = None
+ self._set_what = None
+ self._obj_type = None
+ self._state = state
+ self._schema = None
+ self._objs = None
+ self._usage_on_types = None
+ self.query = []
+
+ def for_objs(self, objs):
+ self._objs = objs
+ return self
+
+ def for_schema(self, schema):
+ self._schema = ' IN SCHEMA %s' % schema if schema is not None else ''
+ return self
+
+ def with_grant_option(self, option):
+ self._grant_option = option
+ return self
+
+ def for_whom(self, who):
+ self._for_whom = who
+ return self
+
+ def usage_on_types(self, usage_on_types):
+ self._usage_on_types = usage_on_types
+ return self
+
+ def as_who(self, target_roles):
+ self._as_who = target_roles
+ return self
+
+ def set_what(self, what):
+ self._set_what = what
+ return self
+
+ def for_objtype(self, objtype):
+ self._obj_type = objtype
+ return self
+
+ def build(self):
+ if self._state == 'present':
+ self.build_present()
+ elif self._state == 'absent':
+ self.build_absent()
+ else:
+ self.build_absent()
+ return '\n'.join(self.query)
+
+ def add_default_revoke(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0}{1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES{0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+
+ def add_grant_option(self):
+ if self._grant_option:
+ if self._obj_type == 'group':
+ self.query[-1] += ' WITH ADMIN OPTION;'
+ else:
+ self.query[-1] += ' WITH GRANT OPTION;'
+ elif self._grant_option is False:
+ self.query[-1] += ';'
+ if self._obj_type == 'group':
+ self.query.append('REVOKE ADMIN OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+ elif not self._obj_type == 'default_privs':
+ self.query.append('REVOKE GRANT OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+ else:
+ self.query[-1] += ';'
+
+ def add_default_priv(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0}{1} GRANT {2} ON {3} TO {4}'.format(self._as_who,
+ self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES{0} GRANT {1} ON {2} TO {3}'.format(self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ self.add_grant_option()
+
+ if self._usage_on_types:
+
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0}{1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who,
+ self._schema,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES{0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom))
+ self.add_grant_option()
+
+ def build_present(self):
+ if self._obj_type == 'default_privs':
+ self.add_default_revoke()
+ self.add_default_priv()
+ else:
+ self.query.append('GRANT {0} TO {1}'.format(self._set_what, self._for_whom))
+ self.add_grant_option()
+
+ def build_absent(self):
+ if self._obj_type == 'default_privs':
+ self.query = []
+ for obj in ['TABLES', 'SEQUENCES', 'TYPES']:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0}{1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES{0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append('REVOKE {0} FROM {1};'.format(self._set_what, self._for_whom))
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ database=dict(required=True, aliases=['db', 'login_db']),
+ state=dict(default='present', choices=['present', 'absent']),
+ privs=dict(required=False, aliases=['priv']),
+ type=dict(default='table',
+ choices=['table',
+ 'sequence',
+ 'function',
+ 'procedure',
+ 'database',
+ 'schema',
+ 'language',
+ 'tablespace',
+ 'group',
+ 'default_privs',
+ 'foreign_data_wrapper',
+ 'foreign_server',
+ 'type', ]),
+ objs=dict(required=False, aliases=['obj']),
+ schema=dict(required=False),
+ roles=dict(required=True, aliases=['role']),
+ session_role=dict(required=False),
+ target_roles=dict(required=False),
+ grant_option=dict(required=False, type='bool',
+ aliases=['admin_option']),
+ # WARNING: password is deprecated and will be removed in community.postgresql 4.0.0,
+ # login_password should be used instead
+ password=dict(default='', no_log=True,
+ removed_in_version='4.0.0',
+ removed_from_collection='community.postgreql'),
+ fail_on_role=dict(type='bool', default=True),
+ trust_input=dict(type='bool', default=True),
+ usage_on_types=dict(type='bool', default=True,
+ removed_in_version='3.0.0',
+ removed_from_collection='community.postgresql'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ fail_on_role = module.params['fail_on_role']
+ usage_on_types = module.params['usage_on_types']
+
+ # Create type object as namespace for module params
+ p = type('Params', (), module.params)
+
+ # WARNING: password is deprecated and will be removed in community.postgresql 4.0.0,
+ # login_password should be used instead
+ # https://github.com/ansible-collections/community.postgresql/issues/406
+ if p.password:
+ if p.login_password:
+ module.fail_json(msg='Use the "password" or "login_password" option but not both '
+ 'to pass a password to log in with.')
+ p.login_password = p.password
+
+ # param "schema": default, allowed depends on param "type"
+ if p.type in ['table', 'sequence', 'function', 'procedure', 'type', 'default_privs']:
+ if p.objs == 'schemas' or p.schema == 'not-specified':
+ p.schema = None
+ else:
+ p.schema = p.schema or 'public'
+ elif p.schema:
+ module.fail_json(msg='Argument "schema" is not allowed '
+ 'for type "%s".' % p.type)
+
+ # param "objs": ALL_IN_SCHEMA can be used only
+ # when param "type" is table, sequence, function or procedure
+ if p.objs == 'ALL_IN_SCHEMA' and p.type not in ('table', 'sequence', 'function', 'procedure'):
+ module.fail_json(msg='Argument "objs": ALL_IN_SCHEMA can be used only for '
+ 'type: table, sequence, function or procedure, '
+ '%s was passed.' % p.type)
+
+ # param "objs": default, required depends on param "type"
+ if p.type == 'database':
+ p.objs = p.objs or p.database
+ elif not p.objs:
+ module.fail_json(msg='Argument "objs" is required '
+ 'for type "%s".' % p.type)
+
+ # param "privs": allowed, required depends on param "type"
+ if p.type == 'group':
+ if p.privs:
+ module.fail_json(msg='Argument "privs" is not allowed '
+ 'for type "group".')
+ elif not p.privs:
+ module.fail_json(msg='Argument "privs" is required '
+ 'for type "%s".' % p.type)
+
+ # Check input
+ if not p.trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, p.roles, p.target_roles, p.session_role, p.schema)
+
+ # Connect to Database
+ if not psycopg2:
+ module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
+ try:
+ conn = Connection(p, module)
+ except psycopg2.Error as e:
+ module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc())
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+ except ValueError as e:
+ # We raise this when the psycopg library is too old
+ module.fail_json(msg=to_native(e))
+
+ if p.session_role:
+ try:
+ conn.cursor.execute('SET ROLE "%s"' % p.session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc())
+
+ try:
+ # privs
+ if p.privs:
+ privs = frozenset(pr.upper() for pr in p.privs.split(','))
+ if not privs.issubset(VALID_PRIVS):
+ module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS))
+ else:
+ privs = None
+ # objs:
+ orig_objs = None
+ if p.objs == 'ALL_IN_SCHEMA':
+ if p.type == 'table':
+ objs = conn.get_all_tables_in_schema(p.schema)
+ elif p.type == 'sequence':
+ objs = conn.get_all_sequences_in_schema(p.schema)
+ elif p.type == 'function':
+ objs = conn.get_all_functions_in_schema(p.schema)
+ elif p.type == 'procedure':
+ objs = conn.get_all_procedures_in_schema(p.schema)
+
+ if conn.pg_version >= 90000:
+ if p.type == 'table':
+ orig_objs = 'ALL TABLES IN SCHEMA'
+ elif p.type == 'sequence':
+ orig_objs = 'ALL SEQUENCES IN SCHEMA'
+ elif p.type == 'function':
+ orig_objs = 'ALL FUNCTIONS IN SCHEMA'
+ elif p.type == 'procedure':
+ orig_objs = 'ALL PROCEDURES IN SCHEMA'
+
+ elif p.type == 'default_privs':
+ if p.objs == 'ALL_DEFAULT':
+ VALID_DEFAULT_OBJS.pop('SCHEMAS')
+ objs = frozenset(VALID_DEFAULT_OBJS.keys())
+ else:
+ objs = frozenset(obj.upper() for obj in p.objs.split(','))
+ if not objs.issubset(VALID_DEFAULT_OBJS):
+ module.fail_json(
+ msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys()))
+ # Again, do we have valid privs specified for object type:
+ valid_objects_for_priv = frozenset(obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj]))
+ if not valid_objects_for_priv == objs:
+ module.fail_json(
+ msg='Invalid priv specified. Valid object for priv: {0}. Objects: {1}'.format(
+ valid_objects_for_priv, objs))
+ else:
+ objs = p.objs.split(',')
+
+ # function signatures are encoded using ':' to separate args
+ if p.type in ('function', 'procedure'):
+ objs = [obj.replace(':', ',') for obj in objs]
+
+ # roles
+ if p.roles.upper() == 'PUBLIC':
+ roles = 'PUBLIC'
+ else:
+ roles = p.roles.split(',')
+
+ if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]):
+ if fail_on_role:
+ module.fail_json(msg="Role '%s' does not exist" % roles[0].strip())
+ else:
+ module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip())
+ module.exit_json(changed=False, queries=executed_queries)
+
+ # check if target_roles is set with type: default_privs
+ if p.target_roles and not p.type == 'default_privs':
+ module.warn('"target_roles" will be ignored '
+ 'Argument "type: default_privs" is required for usage of "target_roles".')
+
+ # target roles
+ if p.target_roles:
+ target_roles = p.target_roles.split(',')
+ else:
+ target_roles = None
+
+ changed = conn.manipulate_privs(
+ obj_type=p.type,
+ privs=privs,
+ objs=objs,
+ orig_objs=orig_objs,
+ roles=roles,
+ target_roles=target_roles,
+ state=p.state,
+ grant_option=p.grant_option,
+ schema_qualifier=p.schema,
+ fail_on_role=fail_on_role,
+ usage_on_types=usage_on_types,
+ )
+
+ except Error as e:
+ conn.rollback()
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ except psycopg2.Error as e:
+ conn.rollback()
+ module.fail_json(msg=to_native(e))
+
+ if module.check_mode or not changed:
+ conn.rollback()
+ else:
+ conn.commit()
+
+ conn.cursor.close()
+ conn.connection.close()
+
+ module.exit_json(changed=changed, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py
new file mode 100644
index 000000000..5edfc2abb
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py
@@ -0,0 +1,691 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_publication
+short_description: Add, update, or remove PostgreSQL publication
+description:
+- Add, update, or remove PostgreSQL publication.
+options:
+ name:
+ description:
+ - Name of the publication to add, update, or remove.
+ required: true
+ type: str
+ db:
+ description:
+ - Name of the database to connect to and where
+ the publication state will be changed.
+ aliases: [ login_db ]
+ type: str
+ tables:
+ description:
+ - List of tables to add to the publication.
+ - If no value is set all tables are targeted.
+ - If the publication already exists for specific tables and I(tables) is not passed,
+ nothing will be changed.
+ - If you need to add all tables to the publication with the same name,
+ drop existent and create new without passing I(tables).
+ type: list
+ elements: str
+ state:
+ description:
+ - The publication state.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ parameters:
+ description:
+ - Dictionary with optional publication parameters.
+ - Available parameters depend on PostgreSQL version.
+ type: dict
+ owner:
+ description:
+ - Publication owner.
+ - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
+ type: str
+ cascade:
+ description:
+ - Drop publication dependencies. Has effect with I(state=absent) only.
+ type: bool
+ default: false
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters I(name), I(tables), I(owner),
+ I(session_role), I(params) are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via the parameters are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+
+notes:
+- PostgreSQL version must be 10 or greater.
+
+attributes:
+ check_mode:
+ support: full
+
+seealso:
+- name: CREATE PUBLICATION reference
+ description: Complete reference of the CREATE PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createpublication.html
+- name: ALTER PUBLICATION reference
+ description: Complete reference of the ALTER PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterpublication.html
+- name: DROP PUBLICATION reference
+ description: Complete reference of the DROP PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droppublication.html
+author:
+- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Create a new publication with name "acme" targeting all tables in database "test"
+ community.postgresql.postgresql_publication:
+ db: test
+ name: acme
+
+- name: Create publication "acme" publishing only prices and vehicles tables
+ community.postgresql.postgresql_publication:
+ name: acme
+ tables:
+ - prices
+ - vehicles
+
+- name: >
+ Create publication "acme", set user alice as an owner, targeting all tables
+ Allowable DML operations are INSERT and UPDATE only
+ community.postgresql.postgresql_publication:
+ name: acme
+ owner: alice
+ parameters:
+ publish: 'insert,update'
+
+- name: >
+ Assuming publication "acme" exists and there are targeted
+ tables "prices" and "vehicles", add table "stores" to the publication
+ community.postgresql.postgresql_publication:
+ name: acme
+ tables:
+ - prices
+ - vehicles
+ - stores
+
+- name: Remove publication "acme" if exists in database "test"
+ community.postgresql.postgresql_publication:
+ db: test
+ name: acme
+ state: absent
+'''
+
+RETURN = r'''
+exists:
+ description:
+ - Flag indicates the publication exists or not at the end of runtime.
+ returned: always
+ type: bool
+ sample: true
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'DROP PUBLICATION "acme" CASCADE' ]
+owner:
+ description: Owner of the publication at the end of runtime.
+ returned: if publication exists
+ type: str
+ sample: "alice"
+tables:
+ description:
+ - List of tables in the publication at the end of runtime.
+ - If all tables are published, returns empty list.
+ returned: if publication exists
+ type: list
+ sample: ["\"public\".\"prices\"", "\"public\".\"vehicles\""]
+alltables:
+ description:
+ - Flag indicates that all tables are published.
+ returned: if publication exists
+ type: bool
+ sample: false
+parameters:
+ description: Publication parameters at the end of runtime.
+ returned: if publication exists
+ type: dict
+ sample: {'publish': {'insert': false, 'delete': false, 'update': true}}
+'''
+
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+SUPPORTED_PG_VERSION = 10000
+
+
+################################
+# Module functions and classes #
+################################
+
+def transform_tables_representation(tbl_list):
+ """Add 'public.' to names of tables where a schema identifier is absent
+ and add quotes to each element.
+
+ Args:
+ tbl_list (list): List of table names.
+
+ Returns:
+ tbl_list (list): Changed list.
+ """
+ for i, table in enumerate(tbl_list):
+ if '.' not in table:
+ tbl_list[i] = pg_quote_identifier('public.%s' % table.strip(), 'table')
+ else:
+ tbl_list[i] = pg_quote_identifier(table.strip(), 'table')
+
+ return tbl_list
+
+
+class PgPublication():
+ """Class to work with PostgreSQL publication.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): The name of the publication.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): Name of the publication.
+ executed_queries (list): List of executed queries.
+ attrs (dict): Dict with publication attributes.
+ exists (bool): Flag indicates the publication exists or not.
+ """
+
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.executed_queries = []
+ self.attrs = {
+ 'alltables': False,
+ 'tables': [],
+ 'parameters': {},
+ 'owner': '',
+ }
+ self.exists = self.check_pub()
+
+ def get_info(self):
+ """Refresh the publication information.
+
+ Returns:
+ ``self.attrs``.
+ """
+ self.exists = self.check_pub()
+ return self.attrs
+
+ def check_pub(self):
+ """Check the publication and refresh ``self.attrs`` publication attribute.
+
+ Returns:
+ True if the publication with ``self.name`` exists, False otherwise.
+ """
+
+ pub_info = self.__get_general_pub_info()
+
+ if not pub_info:
+ # Publication does not exist:
+ return False
+
+ self.attrs['owner'] = pub_info.get('pubowner')
+
+ # Publication DML operations:
+ self.attrs['parameters']['publish'] = {}
+ self.attrs['parameters']['publish']['insert'] = pub_info.get('pubinsert', False)
+ self.attrs['parameters']['publish']['update'] = pub_info.get('pubupdate', False)
+ self.attrs['parameters']['publish']['delete'] = pub_info.get('pubdelete', False)
+ if pub_info.get('pubtruncate'):
+ self.attrs['parameters']['publish']['truncate'] = pub_info.get('pubtruncate')
+
+ # If alltables flag is False, get the list of targeted tables:
+ if not pub_info.get('puballtables'):
+ table_info = self.__get_tables_pub_info()
+ # Join sublists [['schema', 'table'], ...] to ['schema.table', ...]
+ # for better representation:
+ for i, schema_and_table in enumerate(table_info):
+ table_info[i] = pg_quote_identifier('.'.join(schema_and_table), 'table')
+
+ self.attrs['tables'] = table_info
+ else:
+ self.attrs['alltables'] = True
+
+ # Publication exists:
+ return True
+
+ def create(self, tables, params, owner, check_mode=True):
+ """Create the publication.
+
+ Args:
+ tables (list): List with names of the tables that need to be added to the publication.
+ params (dict): Dict contains optional publication parameters and their values.
+ owner (str): Name of the publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been created, otherwise False.
+ """
+ changed = True
+
+ query_fragments = ["CREATE PUBLICATION %s" % pg_quote_identifier(self.name, 'publication')]
+
+ if tables:
+ query_fragments.append("FOR TABLE %s" % ', '.join(tables))
+ else:
+ query_fragments.append("FOR ALL TABLES")
+
+ if params:
+ params_list = []
+ # Make list ["param = 'value'", ...] from params dict:
+ for (key, val) in iteritems(params):
+ params_list.append("%s = '%s'" % (key, val))
+
+ # Add the list to query_fragments:
+ query_fragments.append("WITH (%s)" % ', '.join(params_list))
+
+ changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ if owner:
+ # If check_mode, just add possible SQL to
+ # executed_queries and return:
+ self.__pub_set_owner(owner, check_mode=check_mode)
+
+ return changed
+
+ def update(self, tables, params, owner, check_mode=True):
+ """Update the publication.
+
+ Args:
+ tables (list): List with names of the tables that need to be presented in the publication.
+ params (dict): Dict contains optional publication parameters and their values.
+ owner (str): Name of the publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been updated, otherwise False.
+ """
+ changed = False
+
+ # Add or drop tables from published tables suit:
+ if tables and not self.attrs['alltables']:
+
+ # 1. If needs to add table to the publication:
+ for tbl in tables:
+ if tbl not in self.attrs['tables']:
+ # If needs to add table to the publication:
+ changed = self.__pub_add_table(tbl, check_mode=check_mode)
+
+ # 2. if there is a table in targeted tables
+ # that's not presented in the passed tables:
+ for tbl in self.attrs['tables']:
+ if tbl not in tables:
+ changed = self.__pub_drop_table(tbl, check_mode=check_mode)
+
+ elif tables and self.attrs['alltables']:
+ changed = self.__pub_set_tables(tables, check_mode=check_mode)
+
+ # Update pub parameters:
+ if params:
+ for key, val in iteritems(params):
+ if self.attrs['parameters'].get(key):
+
+ # In PostgreSQL 10/11 only 'publish' optional parameter is presented.
+ if key == 'publish':
+ # 'publish' value can be only a string with comma-separated items
+ # of allowed DML operations like 'insert,update' or
+ # 'insert,update,delete', etc.
+ # Make dictionary to compare with current attrs later:
+ val_dict = self.attrs['parameters']['publish'].copy()
+ val_list = val.split(',')
+ for v in val_dict:
+ if v in val_list:
+ val_dict[v] = True
+ else:
+ val_dict[v] = False
+
+ # Compare val_dict and the dict with current 'publish' parameters,
+ # if they're different, set new values:
+ if val_dict != self.attrs['parameters']['publish']:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ # Default behavior for other cases:
+ elif self.attrs['parameters'][key] != val:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ else:
+ # If the parameter was not set before:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ # Update pub owner:
+ if owner:
+ if owner != self.attrs['owner']:
+ changed = self.__pub_set_owner(owner, check_mode=check_mode)
+
+ return changed
+
+ def drop(self, cascade=False, check_mode=True):
+ """Drop the publication.
+
+ Kwargs:
+ cascade (bool): Flag indicates that publication needs to be deleted
+ with its dependencies.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been updated, otherwise False.
+ """
+ if self.exists:
+ query_fragments = []
+ query_fragments.append("DROP PUBLICATION %s" % pg_quote_identifier(self.name, 'publication'))
+ if cascade:
+ query_fragments.append("CASCADE")
+
+ return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ def __get_general_pub_info(self):
+ """Get and return general publication information.
+
+ Returns:
+ Dict with publication information if successful, False otherwise.
+ """
+ # Check pg_publication.pubtruncate exists (supported from PostgreSQL 11):
+ pgtrunc_sup = exec_sql(self, ("SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_publication' "
+ "AND column_name = 'pubtruncate'"), add_to_executed=False)
+
+ if pgtrunc_sup:
+ query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
+ "p.pubupdate , p.pubdelete, p.pubtruncate FROM pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid "
+ "WHERE p.pubname = %(pname)s")
+ else:
+ query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
+ "p.pubupdate , p.pubdelete FROM pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid "
+ "WHERE p.pubname = %(pname)s")
+
+ result = exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
+ if result:
+ return result[0]
+ else:
+ return False
+
+ def __get_tables_pub_info(self):
+ """Get and return tables that are published by the publication.
+
+ Returns:
+ List of dicts with published tables.
+ """
+ query = ("SELECT schemaname, tablename "
+ "FROM pg_publication_tables WHERE pubname = %(pname)s")
+ return exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
+
+ def __pub_add_table(self, table, check_mode=False):
+ """Add a table to the publication.
+
+ Args:
+ table (str): Table name.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s ADD TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ pg_quote_identifier(table, 'table')))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_drop_table(self, table, check_mode=False):
+ """Drop a table from the publication.
+
+ Args:
+ table (str): Table name.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s DROP TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ pg_quote_identifier(table, 'table')))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_tables(self, tables, check_mode=False):
+ """Set a table suit that need to be published by the publication.
+
+ Args:
+ tables (list): List of tables.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ quoted_tables = [pg_quote_identifier(t, 'table') for t in tables]
+ query = ("ALTER PUBLICATION %s SET TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ ', '.join(quoted_tables)))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_param(self, param, value, check_mode=False):
+ """Set an optional publication parameter.
+
+ Args:
+ param (str): Name of the parameter.
+ value (str): Parameter value.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s SET (%s = '%s')" % (pg_quote_identifier(self.name, 'publication'),
+ param, value))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_owner(self, role, check_mode=False):
+ """Set a publication owner.
+
+ Args:
+ role (str): Role (user) name that needs to be set as a publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ('ALTER PUBLICATION %s '
+ 'OWNER TO "%s"' % (pg_quote_identifier(self.name, 'publication'), role))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __exec_sql(self, query, check_mode=False):
+ """Execute SQL query.
+
+ Note: If we need just to get information from the database,
+ we use ``exec_sql`` function directly.
+
+ Args:
+ query (str): Query that needs to be executed.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just add ``query`` to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if check_mode:
+ self.executed_queries.append(query)
+ return True
+ else:
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(required=True),
+ db=dict(type='str', aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ tables=dict(type='list', elements='str'),
+ parameters=dict(type='dict'),
+ owner=dict(type='str'),
+ cascade=dict(type='bool', default=False),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Parameters handling:
+ name = module.params['name']
+ state = module.params['state']
+ tables = module.params['tables']
+ params = module.params['parameters']
+ owner = module.params['owner']
+ cascade = module.params['cascade']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not params:
+ params_list = None
+ else:
+ params_list = ['%s = %s' % (k, v) for k, v in iteritems(params)]
+
+ check_input(module, name, tables, owner, session_role, params_list)
+
+ if state == 'absent':
+ if tables:
+ module.warn('parameter "tables" is ignored when "state=absent"')
+ if params:
+ module.warn('parameter "parameters" is ignored when "state=absent"')
+ if owner:
+ module.warn('parameter "owner" is ignored when "state=absent"')
+
+ if state == 'present' and cascade:
+ module.warn('parameter "cascade" is ignored when "state=present"')
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ # We check publication state without DML queries execution, so set autocommit:
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Check version:
+ if cursor.connection.server_version < SUPPORTED_PG_VERSION:
+ module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
+
+ # Nothing was changed by default:
+ changed = False
+
+ ###################################
+ # Create object and do rock'n'roll:
+ publication = PgPublication(module, cursor, name)
+
+ if tables:
+ tables = transform_tables_representation(tables)
+
+ # If module.check_mode=True, nothing will be changed:
+ if state == 'present':
+ if not publication.exists:
+ changed = publication.create(tables, params, owner, check_mode=module.check_mode)
+
+ else:
+ changed = publication.update(tables, params, owner, check_mode=module.check_mode)
+
+ elif state == 'absent':
+ changed = publication.drop(cascade=cascade, check_mode=module.check_mode)
+
+ # Get final publication info:
+ pub_fin_info = {}
+ if state == 'present' or (state == 'absent' and module.check_mode):
+ pub_fin_info = publication.get_info()
+ elif state == 'absent' and not module.check_mode:
+ publication.exists = False
+
+ # Connection is not needed any more:
+ cursor.close()
+ db_connection.close()
+
+ # Update publication info and return ret values:
+ module.exit_json(changed=changed, queries=publication.executed_queries, exists=publication.exists, **pub_fin_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_query.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_query.py
new file mode 100644
index 000000000..83f1665ee
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_query.py
@@ -0,0 +1,538 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Felix Archambault
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_query
+short_description: Run PostgreSQL queries
+description:
+- Runs arbitrary PostgreSQL queries.
+- B(WARNING) The C(path_to_script) and C(as_single_query) options as well as
+ the C(query_list) and C(query_all_results) return values have been B(deprecated) and
+ will be removed in community.postgresql 3.0.0, please use the
+ M(community.postgresql.postgresql_script) module to execute statements from scripts.
+- Does not run against backup files. Use M(community.postgresql.postgresql_db) with I(state=restore)
+ to run queries on files made by pg_dump/pg_dumpall utilities.
+options:
+ query:
+ description:
+ - SQL query string or list of queries to run. Variables can be escaped with psycopg2 syntax
+ U(http://initd.org/psycopg/docs/usage.html).
+ type: raw
+ positional_args:
+ description:
+ - List of values to be passed as positional arguments to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(named_args).
+ type: list
+ elements: raw
+ named_args:
+ description:
+ - Dictionary of key-value arguments to pass to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(positional_args).
+ type: dict
+ path_to_script:
+ description:
+ - This option has been B(deprecated) and will be removed in community.postgresql 3.0.0,
+ please use the M(community.postgresql.postgresql_script) module to execute
+ statements from scripts.
+ - Path to a SQL script on the target machine.
+ - If the script contains several queries, they must be semicolon-separated.
+ - To run scripts containing objects with semicolons
+ (for example, function and procedure definitions), use I(as_single_query=true).
+ - To upload dumps or to execute other complex scripts, the preferable way
+ is to use the M(community.postgresql.postgresql_db) module with I(state=restore).
+ - Mutually exclusive with I(query).
+ type: path
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ autocommit:
+ description:
+ - Execute in autocommit mode when the query can't be run inside a transaction block
+ (e.g., VACUUM).
+ - Mutually exclusive with I(check_mode).
+ type: bool
+ default: false
+ encoding:
+ description:
+ - Set the client encoding for the current session (e.g. C(UTF-8)).
+ - The default is the encoding defined by the database.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(false), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+ search_path:
+ description:
+ - List of schema names to look in.
+ type: list
+ elements: str
+ version_added: '1.0.0'
+ as_single_query:
+ description:
+ - This option has been B(deprecated) and will be removed in community.postgresql 3.0.0,
+ please use the M(community.postgresql.postgresql_script) module to execute
+ statements from scripts.
+ - If C(true), when reading from the I(path_to_script) file,
+ executes its whole content in a single query (not splitting it up
+ into separate queries by semicolons). It brings the following changes in
+ the module's behavior.
+ - When C(true), the C(query_all_results) return value
+ contains only the result of the last statement.
+ - Whether the state is reported as changed or not
+ is determined by the last statement of the file.
+ - Used only when I(path_to_script) is specified, otherwise ignored.
+ - If set to C(false), the script can contain only semicolon-separated queries.
+ (see the I(path_to_script) option documentation).
+ type: bool
+ default: true
+ version_added: '1.1.0'
+seealso:
+- module: community.postgresql.postgresql_script
+- module: community.postgresql.postgresql_db
+- name: PostgreSQL Schema reference
+ description: Complete reference of the PostgreSQL schema documentation.
+ link: https://www.postgresql.org/docs/current/ddl-schemas.html
+
+attributes:
+ check_mode:
+ support: full
+
+author:
+- Felix Archambault (@archf)
+- Andrew Klychkov (@Andersson007)
+- Will Rouesnel (@wrouesnel)
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Simple select query to acme db
+ community.postgresql.postgresql_query:
+ db: acme
+ query: SELECT version()
+
+# The result of each query will be stored in query_all_results return value
+- name: Run several queries against acme db
+ community.postgresql.postgresql_query:
+ db: acme
+ query:
+ - SELECT version()
+ - SELECT id FROM accounts
+
+- name: Select query to db acme with positional arguments and non-default credentials
+ community.postgresql.postgresql_query:
+ db: acme
+ login_user: django
+ login_password: mysecretpass
+ query: SELECT * FROM acme WHERE id = %s AND story = %s
+ positional_args:
+ - 1
+ - test
+
+- name: Select query to test_db with named_args
+ community.postgresql.postgresql_query:
+ db: test_db
+ query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
+ named_args:
+ id_val: 1
+ story_val: test
+
+- name: Insert query to test_table in db test_db
+ community.postgresql.postgresql_query:
+ db: test_db
+ query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
+
+- name: Use connect_params to add any additional connection parameters that libpg supports
+ community.postgresql.postgresql_query:
+ connect_params:
+ target_session_attrs: read-write
+ connect_timeout: 10
+ login_host: "host1,host2"
+ login_user: "test"
+ login_password: "test1234"
+ db: 'test'
+ query: 'insert into test (test) values (now())'
+
+
+# WARNING: The path_to_script and as_single_query options have been deprecated
+# and will be removed in community.postgresql 3.0.0, please
+# use the community.postgresql.postgresql_script module instead.
+# If your script contains semicolons as parts of separate objects
+# like functions, procedures, and so on, use "as_single_query: true"
+- name: Run queries from SQL script using UTF-8 client encoding for session
+ community.postgresql.postgresql_query:
+ db: test_db
+ path_to_script: /var/lib/pgsql/test.sql
+ positional_args:
+ - 1
+ encoding: UTF-8
+
+- name: Example of using autocommit parameter
+ community.postgresql.postgresql_query:
+ db: test_db
+ query: VACUUM
+ autocommit: true
+
+- name: >
+ Insert data to the column of array type using positional_args.
+ Note that we use quotes here, the same as for passing JSON, etc.
+ community.postgresql.postgresql_query:
+ query: INSERT INTO test_table (array_column) VALUES (%s)
+ positional_args:
+ - '{1,2,3}'
+
+# Pass list and string vars as positional_args
+- name: Set vars
+ ansible.builtin.set_fact:
+ my_list:
+ - 1
+ - 2
+ - 3
+ my_arr: '{1, 2, 3}'
+
+- name: Select from test table by passing positional_args as arrays
+ community.postgresql.postgresql_query:
+ query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
+ positional_args:
+ - '{{ my_list }}'
+ - '{{ my_arr|string }}'
+
+# Select from test table looking into app1 schema first, then,
+# if the schema doesn't exist or the table hasn't been found there,
+# try to find it in the schema public
+- name: Select from test using search_path
+ community.postgresql.postgresql_query:
+ query: SELECT * FROM test_array_table
+ search_path:
+ - app1
+ - public
+
+# If you use a variable in positional_args / named_args that can
+# be undefined and you wish to set it as NULL, the constructions like
+# "{{ my_var if (my_var is defined) else none | default(none) }}"
+# will not work as expected substituting an empty string instead of NULL.
+# If possible, we suggest to use Ansible's DEFAULT_JINJA2_NATIVE configuration
+# (https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-jinja2-native).
+# Enabling it fixes this problem. If you cannot enable it, the following workaround
+# can be used.
+# You should precheck such a value and define it as NULL when undefined.
+# For example:
+- name: When undefined, set to NULL
+ set_fact:
+ my_var: NULL
+ when: my_var is undefined
+
+# Then:
+- name: Insert a value using positional arguments
+ community.postgresql.postgresql_query:
+ query: INSERT INTO test_table (col1) VALUES (%s)
+ positional_args:
+ - '{{ my_var }}'
+'''
+
+RETURN = r'''
+query:
+ description:
+ - Executed query.
+ - When reading several queries from a file, it contains only the last one.
+ returned: always
+ type: str
+ sample: 'SELECT * FROM bar'
+statusmessage:
+ description:
+ - Attribute containing the message returned by the command.
+ - When reading several queries from a file, it contains a message of the last one.
+ returned: always
+ type: str
+ sample: 'INSERT 0 1'
+query_result:
+ description:
+ - List of dictionaries in column:value form representing returned rows.
+ - When running queries from a file, returns result of the last query.
+ returned: always
+ type: list
+ elements: dict
+ sample: [{"Column": "Value1"},{"Column": "Value2"}]
+query_list:
+ description:
+ - List of executed queries.
+ Useful when reading several queries from a file.
+ returned: always
+ type: list
+ elements: str
+ sample: ['SELECT * FROM foo', 'SELECT * FROM bar']
+query_all_results:
+ description:
+ - List containing results of all queries executed (one sublist for every query).
+ Useful when running a list of queries.
+ returned: always
+ type: list
+ elements: list
+ sample: [[{"Column": "Value1"},{"Column": "Value2"}], [{"Column": "Value1"},{"Column": "Value2"}]]
+rowcount:
+ description:
+ - Number of produced or affected rows.
+ - When using a script with multiple queries,
+ it contains a total number of produced or affected rows.
+ returned: changed
+ type: int
+ sample: 5
+'''
+
+try:
+ from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # it is needed for checking 'no result to fetch' in main(),
+ # psycopg2 availability will be checked by connect_to_db() into
+ # ansible.module_utils.postgres
+ pass
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ convert_elements_to_pg_arrays,
+ convert_to_supported,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+ set_search_path,
+ TYPES_NEED_TO_CONVERT,
+)
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+
+# ===========================================
+# Module execution.
+#
+
+
+def insane_query(string):
+ for c in string:
+ if c not in (' ', '\n', '', '\t'):
+ return False
+
+ return True
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ query=dict(type='raw'),
+ db=dict(type='str', aliases=['login_db']),
+ positional_args=dict(type='list', elements='raw'),
+ named_args=dict(type='dict'),
+ session_role=dict(type='str'),
+ path_to_script=dict(type='path'),
+ autocommit=dict(type='bool', default=False),
+ encoding=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ search_path=dict(type='list', elements='str'),
+ as_single_query=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=True,
+ )
+
+ query = module.params["query"]
+ positional_args = module.params["positional_args"]
+ named_args = module.params["named_args"]
+ path_to_script = module.params["path_to_script"]
+ autocommit = module.params["autocommit"]
+ encoding = module.params["encoding"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+ search_path = module.params["search_path"]
+ as_single_query = module.params["as_single_query"]
+
+ if query and not isinstance(query, (str, list)):
+ module.fail_json(msg="query argument must be of type string or list")
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, session_role)
+
+ if autocommit and module.check_mode:
+ module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
+
+ if path_to_script and query:
+ module.fail_json(msg="path_to_script is mutually exclusive with query")
+
+ query_list = []
+ if path_to_script:
+ depr_msg = ("The 'path_to_script' option is deprecated. Please use the "
+ "'community.postgresql.postgresql_script' module to execute "
+ "statements from scripts")
+ module.deprecate(msg=depr_msg, version="3.0.0", collection_name="community.postgresql")
+
+ try:
+ with open(path_to_script, 'rb') as f:
+ query = to_native(f.read())
+
+ if not as_single_query:
+ depr_msg = ("The 'as_single_query' option is deprecated. Please use the "
+ "'community.postgresql.postgresql_script' module to execute "
+ "statements from scripts")
+ module.deprecate(msg=depr_msg, version="3.0.0", collection_name="community.postgresql")
+
+ if ';' in query:
+ for q in query.split(';'):
+ if insane_query(q):
+ continue
+ else:
+ query_list.append(q)
+ else:
+ query_list.append(query)
+ else:
+ query_list.append(query)
+
+ except Exception as e:
+ module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
+ else:
+ if isinstance(query, str):
+ query_list.append(query)
+ else: # if it's a list
+ query_list = query
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ conn_params = get_conn_params(module, module.params)
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=autocommit)
+ if encoding is not None:
+ db_connection.set_client_encoding(encoding)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if search_path:
+ set_search_path(cursor, '%s' % ','.join([x.strip(' ') for x in search_path]))
+
+ # Prepare args:
+ if positional_args:
+ args = positional_args
+ elif named_args:
+ args = named_args
+ else:
+ args = None
+
+ # Convert elements of type list to strings
+ # representing PG arrays
+ if args:
+ args = convert_elements_to_pg_arrays(args)
+
+ # Set defaults:
+ changed = False
+
+ query_all_results = []
+ rowcount = 0
+ statusmessage = ''
+
+ # Execute query:
+ for query in query_list:
+ try:
+ cursor.execute(query, args)
+ statusmessage = cursor.statusmessage
+ if cursor.rowcount > 0:
+ rowcount += cursor.rowcount
+
+ query_result = []
+ try:
+ for row in cursor.fetchall():
+ # Ansible engine does not support decimals.
+ # An explicit conversion is required on the module's side
+ row = dict(row)
+ for (key, val) in iteritems(row):
+ if isinstance(val, TYPES_NEED_TO_CONVERT):
+ row[key] = convert_to_supported(val)
+
+ query_result.append(row)
+
+ except Psycopg2ProgrammingError as e:
+ if to_native(e) == 'no results to fetch':
+ query_result = {}
+
+ except Exception as e:
+ module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
+
+ query_all_results.append(query_result)
+
+ if 'SELECT' not in statusmessage:
+ if re.search(re.compile(r'(UPDATE|INSERT|DELETE)'), statusmessage):
+ s = statusmessage.split()
+ if len(s) == 3:
+ if s[2] != '0':
+ changed = True
+
+ elif len(s) == 2:
+ if s[1] != '0':
+ changed = True
+
+ else:
+ changed = True
+
+ else:
+ changed = True
+
+ except Exception as e:
+ if not autocommit:
+ db_connection.rollback()
+
+ cursor.close()
+ db_connection.close()
+ module.fail_json(msg="Cannot execute SQL '%s' %s: %s, query list: %s" % (query, args, to_native(e), query_list))
+
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ if not autocommit:
+ db_connection.commit()
+
+ kw = dict(
+ changed=changed,
+ query=cursor.query,
+ query_list=query_list,
+ statusmessage=statusmessage,
+ query_result=query_result,
+ query_all_results=query_all_results,
+ rowcount=rowcount,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py
new file mode 100644
index 000000000..f107e1aa0
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py
@@ -0,0 +1,288 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_schema
+short_description: Add or remove PostgreSQL schema
+description:
+- Add or remove PostgreSQL schema.
+options:
+ name:
+ description:
+ - Name of the schema to add or remove.
+ required: true
+ type: str
+ aliases:
+ - schema
+ database:
+ description:
+ - Name of the database to connect to and add or remove the schema.
+ type: str
+ default: postgres
+ aliases:
+ - db
+ - login_db
+ owner:
+ description:
+ - Name of the role to set as owner of the schema.
+ type: str
+ default: ''
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role
+ were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The schema state.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ cascade_drop:
+ description:
+ - Drop schema with CASCADE to remove child objects.
+ type: bool
+ default: false
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters I(schema), I(owner), I(session_role) are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via the parameters are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL schemas
+ description: General information about PostgreSQL schemas.
+ link: https://www.postgresql.org/docs/current/ddl-schemas.html
+- name: CREATE SCHEMA reference
+ description: Complete reference of the CREATE SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createschema.html
+- name: ALTER SCHEMA reference
+ description: Complete reference of the ALTER SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterschema.html
+- name: DROP SCHEMA reference
+ description: Complete reference of the DROP SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropschema.html
+
+attributes:
+ check_mode:
+ support: full
+
+author:
+- Flavien Chantelot (@Dorn-) <contact@flavien.io>
+- Thomas O'Donnell (@andytom)
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Create a new schema with name acme in test database
+ community.postgresql.postgresql_schema:
+ db: test
+ name: acme
+
+- name: Create a new schema acme with a user bob who will own it
+ community.postgresql.postgresql_schema:
+ name: acme
+ owner: bob
+
+- name: Drop schema "acme" with cascade
+ community.postgresql.postgresql_schema:
+ name: acme
+ state: absent
+ cascade_drop: true
+'''
+
+RETURN = r'''
+schema:
+ description: Name of the schema.
+ returned: success, changed
+ type: str
+ sample: "acme"
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ["CREATE SCHEMA \"acme\""]
+'''
+
+import traceback
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+ SQLParseError,
+)
+from ansible.module_utils._text import to_native
+
+executed_queries = []
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+def set_owner(cursor, schema, owner):
+ query = 'ALTER SCHEMA %s OWNER TO "%s"' % (
+ pg_quote_identifier(schema, 'schema'), owner)
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+
+
+def get_schema_info(cursor, schema):
+ query = ("SELECT schema_owner AS owner "
+ "FROM information_schema.schemata "
+ "WHERE schema_name = %(schema)s")
+ cursor.execute(query, {'schema': schema})
+ return cursor.fetchone()
+
+
+def schema_exists(cursor, schema):
+ query = ("SELECT schema_name FROM information_schema.schemata "
+ "WHERE schema_name = %(schema)s")
+ cursor.execute(query, {'schema': schema})
+ return cursor.rowcount == 1
+
+
+def schema_delete(cursor, schema, cascade):
+ if schema_exists(cursor, schema):
+ query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
+ if cascade:
+ query += " CASCADE"
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ return False
+
+
+def schema_create(cursor, schema, owner):
+ if not schema_exists(cursor, schema):
+ query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
+ if owner:
+ query_fragments.append('AUTHORIZATION "%s"' % owner)
+ query = ' '.join(query_fragments)
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ schema_info = get_schema_info(cursor, schema)
+ if owner and owner != schema_info['owner']:
+ return set_owner(cursor, schema, owner)
+ else:
+ return False
+
+
+def schema_matches(cursor, schema, owner):
+ if not schema_exists(cursor, schema):
+ return False
+ else:
+ schema_info = get_schema_info(cursor, schema)
+ if owner and owner != schema_info['owner']:
+ return False
+ else:
+ return True
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ schema=dict(type="str", required=True, aliases=['name']),
+ owner=dict(type="str", default=""),
+ database=dict(type="str", default="postgres", aliases=["db", "login_db"]),
+ cascade_drop=dict(type="bool", default=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ session_role=dict(type="str"),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ schema = module.params["schema"]
+ owner = module.params["owner"]
+ state = module.params["state"]
+ cascade_drop = module.params["cascade_drop"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, schema, owner, session_role)
+
+ changed = False
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ conn_params = get_conn_params(module, module.params)
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ if module.check_mode:
+ if state == "absent":
+ changed = not schema_exists(cursor, schema)
+ elif state == "present":
+ changed = not schema_matches(cursor, schema, owner)
+ module.exit_json(changed=changed, schema=schema)
+
+ if state == "absent":
+ try:
+ changed = schema_delete(cursor, schema, cascade_drop)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == "present":
+ try:
+ changed = schema_create(cursor, schema, owner)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # Avoid catching this on Python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ db_connection.close()
+ module.exit_json(changed=changed, schema=schema, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_script.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_script.py
new file mode 100644
index 000000000..acd97f4d2
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_script.py
@@ -0,0 +1,353 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_script
+
+short_description: Run PostgreSQL statements from a file
+
+description:
+- Runs arbitrary PostgreSQL statements from a file.
+- The module always reports that the state has changed.
+- Does not run against backup files.
+ Use M(community.postgresql.postgresql_db) with I(state=restore)
+ to run queries on files made by pg_dump/pg_dumpall utilities.
+
+version_added: '2.1.0'
+
+options:
+ positional_args:
+ description:
+ - List of values to substitute variable placeholders within the file content.
+ - When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(named_args).
+ type: list
+ elements: raw
+ named_args:
+ description:
+ - Dictionary of key-value arguments to substitute
+ variable placeholders within the file content.
+ - When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(positional_args).
+ type: dict
+ path:
+ description:
+ - Path to a SQL script on the target machine.
+ - To upload dumps, the preferable way
+ is to use the M(community.postgresql.postgresql_db) module with I(state=restore).
+ type: path
+ session_role:
+ description:
+ - Switch to C(session_role) after connecting. The specified role must
+ be a role that the current C(login_user) is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the C(session_role) were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ encoding:
+ description:
+ - Set the client encoding for the current session (e.g. C(UTF-8)).
+ - The default is the encoding defined by the database.
+ type: str
+ trust_input:
+ description:
+ - If C(false), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections
+ via I(session_role) are possible.
+ type: bool
+ default: true
+ search_path:
+ description:
+ - Overrides the list of schemas to search for db objects in.
+ type: list
+ elements: str
+
+seealso:
+- module: community.postgresql.postgresql_db
+- module: community.postgresql.postgresql_query
+- name: PostgreSQL Schema reference
+ description: Complete reference of the PostgreSQL schema documentation.
+ link: https://www.postgresql.org/docs/current/ddl-schemas.html
+
+attributes:
+ check_mode:
+ support: none
+
+author:
+- Douglas J Hunley (@hunleyd)
+- A. Hart (@jtelcontar)
+- Daniel Scharon (@DanScharon)
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+# Assuming that the file contains
+# SELECT * FROM id_talbe WHERE id = %s,
+# '%s' will be substituted with 1
+- name: Run query from SQL script using UTF-8 client encoding for session and positional args
+ community.postgresql.postgresql_script:
+ db: test_db
+ path: /var/lib/pgsql/test.sql
+ positional_args:
+ - 1
+ encoding: UTF-8
+
+# Assuming that the file contains
+# SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s,
+# %-values will be substituted with 1 and 'test'
+- name: Select query to test_db with named_args
+ community.postgresql.postgresql_script:
+ db: test_db
+ path: /var/lib/pgsql/test.sql
+ named_args:
+ id_val: 1
+ story_val: test
+
+- block:
+ # Assuming that the the file contains
+ # SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
+ # Pass list and string vars as positional_args
+ - name: Set vars
+ ansible.builtin.set_fact:
+ my_list:
+ - 1
+ - 2
+ - 3
+ my_arr: '{1, 2, 3}'
+ - name: Passing positional_args as arrays
+ community.postgresql.postgresql_script:
+ path: /var/lib/pgsql/test.sql
+ positional_args:
+ - '{{ my_list }}'
+ - '{{ my_arr|string }}'
+
+# Assuming that the the file contains
+# SELECT * FROM test_table,
+# look into app1 schema first, then,
+# if the schema doesn't exist or the table hasn't been found there,
+# try to find it in the schema public
+- name: Select from test using search_path
+ community.postgresql.postgresql_script:
+ path: /var/lib/pgsql/test.sql
+ search_path:
+ - app1
+ - public
+
+- block:
+ # If you use a variable in positional_args/named_args that can
+ # be undefined and you wish to set it as NULL, constructions like
+ # "{{ my_var if (my_var is defined) else none | default(none) }}"
+ # will not work as expected substituting an empty string instead of NULL.
+ # If possible, we suggest using Ansible's DEFAULT_JINJA2_NATIVE configuration
+ # (https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-jinja2-native).
+ # Enabling it fixes this problem. If you cannot enable it, the following workaround
+ # can be used.
+ # You should precheck such a value and define it as NULL when undefined.
+ # For example:
+ - name: When undefined, set to NULL
+ set_fact:
+ my_var: NULL
+ when: my_var is undefined
+
+ # Then, assuming that the file contains
+ # INSERT INTO test_table (col1) VALUES (%s)
+ - name: Insert a value using positional arguments
+ community.postgresql.postgresql_script:
+ path: /var/lib/pgsql/test.sql
+ positional_args:
+ - '{{ my_var }}'
+'''
+
+RETURN = r'''
+query:
+ description:
+ - Executed query.
+ - When the C(positional_args) or C(named_args) options are used,
+ the query contains all variables that were substituted
+ inside the database connector.
+ returned: always
+ type: str
+ sample: 'SELECT * FROM bar'
+statusmessage:
+ description:
+ - Attribute containing the message returned by the database connector
+ after executing the script content.
+ - When there are several statements in the script, returns a message
+ related to the last statement.
+ returned: always
+ type: str
+ sample: 'INSERT 0 1'
+query_result:
+ description:
+ - List of dictionaries in the column:value form representing returned rows.
+ - When there are several statements in the script,
+ returns result of the last statement.
+ returned: always
+ type: list
+ elements: dict
+ sample: [{"Column": "Value1"},{"Column": "Value2"}]
+rowcount:
+ description:
+ - Number of produced or affected rows.
+ - When there are several statements in the script,
+ returns a number of rows affected by the last statement.
+ returned: changed
+ type: int
+ sample: 5
+'''
+
+try:
+ from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # it is needed for checking 'no result to fetch' in main(),
+ # psycopg2 availability will be checked by connect_to_db() into
+ # ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ convert_elements_to_pg_arrays,
+ convert_to_supported,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+ set_search_path,
+ TYPES_NEED_TO_CONVERT,
+)
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ path=dict(type='path'),
+ db=dict(type='str', aliases=['login_db']),
+ positional_args=dict(type='list', elements='raw'),
+ named_args=dict(type='dict'),
+ session_role=dict(type='str'),
+ encoding=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ search_path=dict(type='list', elements='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=False,
+ )
+
+ path = module.params["path"]
+ positional_args = module.params["positional_args"]
+ named_args = module.params["named_args"]
+ encoding = module.params["encoding"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+ search_path = module.params["search_path"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, session_role)
+
+ try:
+ with open(path, 'rb') as f:
+ script_content = to_native(f.read())
+
+ except Exception as e:
+ module.fail_json(msg="Cannot read file '%s' : %s" % (path, to_native(e)))
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ conn_params = get_conn_params(module, module.params)
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
+ if encoding is not None:
+ db_connection.set_client_encoding(encoding)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if search_path:
+ set_search_path(cursor, '%s' % ','.join([x.strip(' ') for x in search_path]))
+
+ # Prepare args:
+ if positional_args:
+ args = positional_args
+ elif named_args:
+ args = named_args
+ else:
+ args = None
+
+ # Convert elements of type list to strings
+ # representing PG arrays
+ if args:
+ args = convert_elements_to_pg_arrays(args)
+
+ # Execute script content:
+ try:
+ cursor.execute(script_content, args)
+ except Exception as e:
+ cursor.close()
+ db_connection.close()
+ module.fail_json(msg="Cannot execute SQL '%s' %s: %s" % (script_content, args, to_native(e)))
+
+ statusmessage = cursor.statusmessage
+
+ rowcount = cursor.rowcount
+
+ query_result = []
+ try:
+ for row in cursor.fetchall():
+ # Ansible engine does not support decimals.
+ # An explicit conversion is required on the module's side
+ row = dict(row)
+ for (key, val) in iteritems(row):
+ if isinstance(val, TYPES_NEED_TO_CONVERT):
+ row[key] = convert_to_supported(val)
+
+ query_result.append(row)
+
+ except Psycopg2ProgrammingError as e:
+ if to_native(e) == "no results to fetch":
+ query_result = {}
+
+ except Exception as e:
+ module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
+
+ kw = dict(
+ changed=True,
+ query=cursor.query,
+ statusmessage=statusmessage,
+ query_result=query_result,
+ rowcount=rowcount,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py
new file mode 100644
index 000000000..c874cb970
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py
@@ -0,0 +1,637 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_sequence
+short_description: Create, drop, or alter a PostgreSQL sequence
+description:
+- Allows to create, drop or change the definition of a sequence generator.
+options:
+ sequence:
+ description:
+ - The name of the sequence.
+ required: true
+ type: str
+ aliases:
+ - name
+ state:
+ description:
+ - The sequence state.
+ - If I(state=absent) other options will be ignored except of I(name) and
+ I(schema).
+ default: present
+ choices: [ absent, present ]
+ type: str
+ data_type:
+ description:
+ - Specifies the data type of the sequence. Valid types are bigint, integer,
+ and smallint. bigint is the default. The data type determines the default
+ minimum and maximum values of the sequence. For more info see the
+ documentation
+ U(https://www.postgresql.org/docs/current/sql-createsequence.html).
+ - Supported from PostgreSQL 10.
+ choices: [ bigint, integer, smallint ]
+ type: str
+ increment:
+ description:
+ - Increment specifies which value is added to the current sequence value
+ to create a new value.
+ - A positive value will make an ascending sequence, a negative one a
+ descending sequence. The default value is 1.
+ type: int
+ minvalue:
+ description:
+ - Minvalue determines the minimum value a sequence can generate. The
+ default for an ascending sequence is 1. The default for a descending
+ sequence is the minimum value of the data type.
+ type: int
+ aliases:
+ - min
+ maxvalue:
+ description:
+ - Maxvalue determines the maximum value for the sequence. The default for
+ an ascending sequence is the maximum
+ value of the data type. The default for a descending sequence is -1.
+ type: int
+ aliases:
+ - max
+ start:
+ description:
+ - Start allows the sequence to begin anywhere. The default starting value
+ is I(minvalue) for ascending sequences and I(maxvalue) for descending
+ ones.
+ type: int
+ cache:
+ description:
+ - Cache specifies how many sequence numbers are to be preallocated and
+ stored in memory for faster access. The minimum value is 1 (only one
+ value can be generated at a time, i.e., no cache), and this is also
+ the default.
+ type: int
+ cycle:
+ description:
+ - The cycle option allows the sequence to wrap around when the I(maxvalue)
+ or I(minvalue) has been reached by an ascending or descending sequence
+ respectively. If the limit is reached, the next number generated will be
+ the minvalue or maxvalue, respectively.
+ - If C(false) (NO CYCLE) is specified, any calls to nextval after the sequence
+ has reached its maximum value will return an error. False (NO CYCLE) is
+ the default.
+ type: bool
+ default: false
+ cascade:
+ description:
+ - Automatically drop objects that depend on the sequence, and in turn all
+ objects that depend on those objects.
+ - Ignored if I(state=present).
+ - Only used with I(state=absent).
+ type: bool
+ default: false
+ rename_to:
+ description:
+ - The new name for the I(sequence).
+ - Works only for existing sequences.
+ type: str
+ owner:
+ description:
+ - Set the owner for the I(sequence).
+ type: str
+ schema:
+ description:
+ - The schema of the I(sequence). This is be used to create and relocate
+ a I(sequence) in the given schema.
+ default: public
+ type: str
+ newschema:
+ description:
+ - The new schema for the I(sequence). Will be used for moving a
+ I(sequence) to another I(schema).
+ - Works only for existing sequences.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified I(session_role)
+ must be a role that the current I(login_user) is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the I(session_role) were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ default: ''
+ aliases:
+ - database
+ - login_db
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters I(sequence), I(schema), I(rename_to),
+ I(owner), I(newschema), I(session_role) are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via the parameters are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+
+notes:
+- If you do not pass db parameter, sequence will be created in the database
+ named postgres.
+
+attributes:
+ check_mode:
+ support: full
+
+seealso:
+- module: community.postgresql.postgresql_table
+- module: community.postgresql.postgresql_owner
+- module: community.postgresql.postgresql_privs
+- module: community.postgresql.postgresql_tablespace
+- name: CREATE SEQUENCE reference
+ description: Complete reference of the CREATE SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createsequence.html
+- name: ALTER SEQUENCE reference
+ description: Complete reference of the ALTER SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersequence.html
+- name: DROP SEQUENCE reference
+ description: Complete reference of the DROP SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropsequence.html
+author:
+- Tobias Birkefeld (@tcraxs)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create an ascending bigint sequence called foobar in the default
+ database
+ community.postgresql.postgresql_sequence:
+ name: foobar
+
+- name: Create an ascending integer sequence called foobar, starting at 101
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ data_type: integer
+ start: 101
+
+- name: Create an descending sequence called foobar, starting at 101 and
+ preallocated 10 sequence numbers in cache
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ increment: -1
+ cache: 10
+ start: 101
+
+- name: Create an ascending sequence called foobar, which cycle between 1 to 10
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ cycle: true
+ min: 1
+ max: 10
+
+- name: Create an ascending bigint sequence called foobar in the default
+ database with owner foobar
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ owner: foobar
+
+- name: Rename an existing sequence named foo to bar
+ community.postgresql.postgresql_sequence:
+ name: foo
+ rename_to: bar
+
+- name: Change the schema of an existing sequence to foobar
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ newschema: foobar
+
+- name: Change the owner of an existing sequence to foobar
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ owner: foobar
+
+- name: Drop a sequence called foobar
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ state: absent
+
+- name: Drop a sequence called foobar with cascade
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ cascade: true
+ state: absent
+'''
+
+RETURN = r'''
+state:
+ description: Sequence state at the end of execution.
+ returned: always
+ type: str
+ sample: 'present'
+sequence:
+ description: Sequence name.
+ returned: always
+ type: str
+ sample: 'foobar'
+queries:
+ description: List of queries that was tried to be executed.
+ returned: always
+ type: str
+ sample: [ "CREATE SEQUENCE \"foo\"" ]
+schema:
+ description: Name of the schema of the sequence.
+ returned: always
+ type: str
+ sample: 'foo'
+data_type:
+ description: Shows the current data type of the sequence.
+ returned: always
+ type: str
+ sample: 'bigint'
+increment:
+ description: The value of increment of the sequence. A positive value will
+ make an ascending sequence, a negative one a descending
+ sequence.
+ returned: always
+ type: int
+ sample: -1
+minvalue:
+ description: The value of minvalue of the sequence.
+ returned: always
+ type: int
+ sample: 1
+maxvalue:
+ description: The value of maxvalue of the sequence.
+ returned: always
+ type: int
+ sample: 9223372036854775807
+start:
+ description: The value of start of the sequence.
+ returned: always
+ type: int
+ sample: 12
+cycle:
+ description: Shows if the sequence cycle or not.
+ returned: always
+ type: bool
+ sample: false
+owner:
+ description: Shows the current owner of the sequence
+ after the successful run of the task.
+ returned: always
+ type: str
+ sample: 'postgres'
+newname:
+ description: Shows the new sequence name after rename.
+ returned: on success
+ type: str
+ sample: 'barfoo'
+newschema:
+ description: Shows the new schema of the sequence after schema change.
+ returned: on success
+ type: str
+ sample: 'foobar'
+'''
+
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class Sequence(object):
+ """Implements behavior of CREATE, ALTER or DROP SEQUENCE PostgreSQL command.
+
+ Arguments:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+
+ Attributes:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ changed (bool) -- something was changed after execution or not
+ executed_queries (list) -- executed queries
+ name (str) -- name of the sequence
+ owner (str) -- name of the owner of the sequence
+ schema (str) -- name of the schema (default: public)
+ data_type (str) -- data type of the sequence
+ start_value (int) -- value of the sequence start
+ minvalue (int) -- minimum value of the sequence
+ maxvalue (int) -- maximum value of the sequence
+ increment (int) -- increment value of the sequence
+ cycle (bool) -- sequence can cycle or not
+ new_name (str) -- name of the renamed sequence
+ new_schema (str) -- name of the new schema
+ exists (bool) -- sequence exists or not
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.executed_queries = []
+ self.name = self.module.params['sequence']
+ self.owner = ''
+ self.schema = self.module.params['schema']
+ self.data_type = ''
+ self.start_value = ''
+ self.minvalue = ''
+ self.maxvalue = ''
+ self.increment = ''
+ self.cycle = ''
+ self.new_name = ''
+ self.new_schema = ''
+ self.exists = False
+ # Collect info
+ self.get_info()
+
+ def get_info(self):
+ """Getter to refresh and get sequence info"""
+ query = ("SELECT "
+ "s.sequence_schema AS schemaname, "
+ "s.sequence_name AS sequencename, "
+ "pg_get_userbyid(c.relowner) AS sequenceowner, "
+ "s.data_type::regtype AS data_type, "
+ "s.start_value AS start_value, "
+ "s.minimum_value AS min_value, "
+ "s.maximum_value AS max_value, "
+ "s.increment AS increment_by, "
+ "s.cycle_option AS cycle "
+ "FROM information_schema.sequences s "
+ "JOIN pg_class c ON c.relname = s.sequence_name "
+ "LEFT JOIN pg_namespace n ON n.oid = c.relnamespace "
+ "WHERE NOT pg_is_other_temp_schema(n.oid) "
+ "AND c.relkind = 'S'::\"char\" "
+ "AND sequence_name = %(name)s "
+ "AND sequence_schema = %(schema)s")
+
+ res = exec_sql(self, query,
+ query_params={'name': self.name, 'schema': self.schema},
+ add_to_executed=False)
+
+ if not res:
+ self.exists = False
+ return False
+
+ if res:
+ self.exists = True
+ self.schema = res[0]['schemaname']
+ self.name = res[0]['sequencename']
+ self.owner = res[0]['sequenceowner']
+ self.data_type = res[0]['data_type']
+ self.start_value = res[0]['start_value']
+ self.minvalue = res[0]['min_value']
+ self.maxvalue = res[0]['max_value']
+ self.increment = res[0]['increment_by']
+ self.cycle = res[0]['cycle']
+
+ def create(self):
+ """Implements CREATE SEQUENCE command behavior."""
+ query = ['CREATE SEQUENCE']
+ query.append(self.__add_schema())
+
+ if self.module.params.get('data_type'):
+ query.append('AS %s' % self.module.params['data_type'])
+
+ if self.module.params.get('increment'):
+ query.append('INCREMENT BY %s' % self.module.params['increment'])
+
+ if self.module.params.get('minvalue'):
+ query.append('MINVALUE %s' % self.module.params['minvalue'])
+
+ if self.module.params.get('maxvalue'):
+ query.append('MAXVALUE %s' % self.module.params['maxvalue'])
+
+ if self.module.params.get('start'):
+ query.append('START WITH %s' % self.module.params['start'])
+
+ if self.module.params.get('cache'):
+ query.append('CACHE %s' % self.module.params['cache'])
+
+ if self.module.params.get('cycle'):
+ query.append('CYCLE')
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def drop(self):
+ """Implements DROP SEQUENCE command behavior."""
+ query = ['DROP SEQUENCE']
+ query.append(self.__add_schema())
+
+ if self.module.params.get('cascade'):
+ query.append('CASCADE')
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def rename(self):
+ """Implements ALTER SEQUENCE RENAME TO command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('RENAME TO "%s"' % self.module.params['rename_to'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def set_owner(self):
+ """Implements ALTER SEQUENCE OWNER TO command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('OWNER TO "%s"' % self.module.params['owner'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def set_schema(self):
+ """Implements ALTER SEQUENCE SET SCHEMA command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('SET SCHEMA "%s"' % self.module.params['newschema'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def __add_schema(self):
+ return '"%s"."%s"' % (self.schema, self.name)
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ sequence=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data_type=dict(type='str', choices=['bigint', 'integer', 'smallint']),
+ increment=dict(type='int'),
+ minvalue=dict(type='int', aliases=['min']),
+ maxvalue=dict(type='int', aliases=['max']),
+ start=dict(type='int'),
+ cache=dict(type='int'),
+ cycle=dict(type='bool', default=False),
+ schema=dict(type='str', default='public'),
+ cascade=dict(type='bool', default=False),
+ rename_to=dict(type='str'),
+ owner=dict(type='str'),
+ newschema=dict(type='str'),
+ db=dict(type='str', default='', aliases=['login_db', 'database']),
+ session_role=dict(type='str'),
+ trust_input=dict(type="bool", default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['rename_to', 'data_type'],
+ ['rename_to', 'increment'],
+ ['rename_to', 'minvalue'],
+ ['rename_to', 'maxvalue'],
+ ['rename_to', 'start'],
+ ['rename_to', 'cache'],
+ ['rename_to', 'cycle'],
+ ['rename_to', 'cascade'],
+ ['rename_to', 'owner'],
+ ['rename_to', 'newschema'],
+ ['cascade', 'data_type'],
+ ['cascade', 'increment'],
+ ['cascade', 'minvalue'],
+ ['cascade', 'maxvalue'],
+ ['cascade', 'start'],
+ ['cascade', 'cache'],
+ ['cascade', 'cycle'],
+ ['cascade', 'owner'],
+ ['cascade', 'newschema'],
+ ]
+ )
+
+ if not module.params["trust_input"]:
+ check_input(
+ module,
+ module.params['sequence'],
+ module.params['schema'],
+ module.params['rename_to'],
+ module.params['owner'],
+ module.params['newschema'],
+ module.params['session_role'],
+ )
+
+ # Note: we don't need to check mutually exclusive params here, because they are
+ # checked automatically by AnsibleModule (mutually_exclusive=[] list above).
+
+ # Change autocommit to False if check_mode:
+ autocommit = not module.check_mode
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=autocommit)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ data = Sequence(module, cursor)
+
+ # Set defaults:
+ changed = False
+
+ # Create new sequence
+ if not data.exists and module.params['state'] == 'present':
+ if module.params.get('rename_to'):
+ module.fail_json(msg="Sequence '%s' does not exist, nothing to rename" % module.params['sequence'])
+ if module.params.get('newschema'):
+ module.fail_json(msg="Sequence '%s' does not exist, change of schema not possible" % module.params['sequence'])
+
+ changed = data.create()
+
+ # Drop non-existing sequence
+ elif not data.exists and module.params['state'] == 'absent':
+ # Nothing to do
+ changed = False
+
+ # Drop existing sequence
+ elif data.exists and module.params['state'] == 'absent':
+ changed = data.drop()
+
+ # Rename sequence
+ if data.exists and module.params.get('rename_to'):
+ if data.name != module.params['rename_to']:
+ changed = data.rename()
+ if changed:
+ data.new_name = module.params['rename_to']
+
+ # Refresh information
+ if module.params['state'] == 'present':
+ data.get_info()
+
+ # Change owner, schema and settings
+ if module.params['state'] == 'present' and data.exists:
+ # change owner
+ if module.params.get('owner'):
+ if data.owner != module.params['owner']:
+ changed = data.set_owner()
+
+ # Set schema
+ if module.params.get('newschema'):
+ if data.schema != module.params['newschema']:
+ changed = data.set_schema()
+ if changed:
+ data.new_schema = module.params['newschema']
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ kw = dict(
+ changed=changed,
+ state='present',
+ sequence=data.name,
+ queries=data.executed_queries,
+ schema=data.schema,
+ data_type=data.data_type,
+ increment=data.increment,
+ minvalue=data.minvalue,
+ maxvalue=data.maxvalue,
+ start=data.start_value,
+ cycle=data.cycle,
+ owner=data.owner,
+ )
+
+ if module.params['state'] == 'present':
+ if data.new_name:
+ kw['newname'] = data.new_name
+ if data.new_schema:
+ kw['newschema'] = data.new_schema
+
+ elif module.params['state'] == 'absent':
+ kw['state'] = 'absent'
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_set.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_set.py
new file mode 100644
index 000000000..966aeb004
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_set.py
@@ -0,0 +1,514 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_set
+short_description: Change a PostgreSQL server configuration parameter
+description:
+ - Allows to change a PostgreSQL server configuration parameter.
+ - The module uses ALTER SYSTEM command and applies changes by reload server configuration.
+ - ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster.
+ - It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file.
+ - ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file,
+ which is read in addition to postgresql.conf.
+ - The module allows to reset parameter to boot_val (cluster initial value) by I(reset=true) or remove parameter
+ string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required).
+ - After change you can see in the ansible output the previous and
+ the new parameter value and other information using returned values and M(ansible.builtin.debug) module.
+options:
+ name:
+ description:
+ - Name of PostgreSQL server parameter. Pay attention that parameters are case sensitive (see examples below).
+ type: str
+ required: true
+ value:
+ description:
+ - Parameter value to set.
+ - To remove parameter string from postgresql.auto.conf and
+ reload the server configuration you must pass I(value=default).
+ With I(value=default) the playbook always returns changed is true.
+ type: str
+ reset:
+ description:
+ - Restore parameter to initial state (boot_val). Mutually exclusive with I(value).
+ type: bool
+ default: false
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+notes:
+- Supported version of PostgreSQL is 9.4 and later.
+- Pay attention, change setting with 'postmaster' context can return changed is true
+ when actually nothing changes because the same value may be presented in
+ several different form, for example, 1024MB, 1GB, etc. However in pg_settings
+ system view it can be defined like 131072 number of 8kB pages.
+ The final check of the parameter value cannot compare it because the server was
+ not restarted and the value in pg_settings is not updated yet.
+- For some parameters restart of PostgreSQL server is required.
+ See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+
+attributes:
+ check_mode:
+ support: full
+
+seealso:
+- module: community.postgresql.postgresql_info
+- name: PostgreSQL server configuration
+ description: General information about PostgreSQL server configuration.
+ link: https://www.postgresql.org/docs/current/runtime-config.html
+- name: PostgreSQL view pg_settings reference
+ description: Complete reference of the pg_settings view documentation.
+ link: https://www.postgresql.org/docs/current/view-pg-settings.html
+- name: PostgreSQL ALTER SYSTEM command reference
+ description: Complete reference of the ALTER SYSTEM command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersystem.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Restore wal_keep_segments parameter to initial state
+ community.postgresql.postgresql_set:
+ name: wal_keep_segments
+ reset: true
+
+# Set work_mem parameter to 32MB and show what's been changed and restart is required or not
+# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False")
+- name: Set work mem parameter
+ community.postgresql.postgresql_set:
+ name: work_mem
+ value: 32mb
+ register: set
+
+- name: Print the result if the setting changed
+ ansible.builtin.debug:
+ msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}"
+ when: set.changed
+# Ensure that the restart of PostgreSQL server must be required for some parameters.
+# In this situation you see the same parameter in prev_val_pretty and value_pretty, but 'changed=True'
+# (If you passed the value that was different from the current server setting).
+
+- name: Set log_min_duration_statement parameter to 1 second
+ community.postgresql.postgresql_set:
+ name: log_min_duration_statement
+ value: 1s
+
+- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf)
+ community.postgresql.postgresql_set:
+ name: wal_log_hints
+ value: default
+
+- name: Set TimeZone parameter (careful, case sensitive)
+ community.postgresql.postgresql_set:
+ name: TimeZone
+ value: 'Europe/Paris'
+
+'''
+
+RETURN = r'''
+name:
+ description: Name of PostgreSQL server parameter.
+ returned: always
+ type: str
+ sample: 'shared_buffers'
+restart_required:
+ description: Information about parameter current state.
+ returned: always
+ type: bool
+ sample: true
+prev_val_pretty:
+ description: Information about previous state of the parameter.
+ returned: always
+ type: str
+ sample: '4MB'
+value_pretty:
+ description: Information about current state of the parameter.
+ returned: always
+ type: str
+ sample: '64MB'
+value:
+ description:
+ - Dictionary that contains the current parameter value (at the time of playbook finish).
+ - Pay attention that for real change some parameters restart of PostgreSQL server is required.
+ - Returns the current value in the check mode.
+ returned: always
+ type: dict
+ sample: { "value": 67108864, "unit": "b" }
+context:
+ description:
+ - PostgreSQL setting context.
+ returned: always
+ type: str
+ sample: user
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except Exception:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from copy import deepcopy
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+
+PG_REQ_VER = 90400
+
+# To allow to set value like 1mb instead of 1MB, etc:
+LOWERCASE_SIZE_UNITS = ("mb", "gb", "tb")
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def param_get(cursor, module, name):
+ query = ("SELECT name, setting, unit, context, boot_val "
+ "FROM pg_settings WHERE name = %(name)s")
+ try:
+ cursor.execute(query, {'name': name})
+ info = cursor.fetchone()
+ cursor.execute("SHOW %s" % name)
+ val = cursor.fetchone()
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ if not info:
+ module.fail_json(msg="No such parameter: %s. "
+ "Please check its spelling or presence in your PostgreSQL version "
+ "(https://www.postgresql.org/docs/current/runtime-config.html)" % name)
+
+ raw_val = info['setting']
+ unit = info['unit']
+ context = info['context']
+ boot_val = info['boot_val']
+
+ if val[name] == 'True':
+ val[name] = 'on'
+ elif val[name] == 'False':
+ val[name] = 'off'
+
+ if unit == 'kB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024
+
+ unit = 'b'
+
+ elif unit == 'MB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024 * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024 * 1024
+
+ unit = 'b'
+
+ return {
+ 'current_val': val[name],
+ 'raw_val': raw_val,
+ 'unit': unit,
+ 'boot_val': boot_val,
+ 'context': context,
+ }
+
+
+def pretty_to_bytes(pretty_val):
+ # The function returns a value in bytes
+ # if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
+ # Otherwise it returns the passed argument.
+
+ # It's sometimes possible to have an empty values
+ if not pretty_val:
+ return pretty_val
+
+ # If the first char is not a digit, it does not make sense
+ # to parse further, so just return a passed value
+ if not pretty_val[0].isdigit():
+ return pretty_val
+
+ # If the last char is not an alphabetical symbol, it means that
+ # it does not contain any suffixes, so no sense to parse further
+ if not pretty_val[-1].isalpha():
+ try:
+ pretty_val = int(pretty_val)
+
+ except ValueError:
+ try:
+ pretty_val = float(pretty_val)
+
+ except ValueError:
+ return pretty_val
+
+ return pretty_val
+
+ # Extract digits
+ num_part = []
+ for c in pretty_val:
+ # When we reach the first non-digit element,
+ # e.g. in 1024kB, stop iterating
+ if not c.isdigit():
+ break
+ else:
+ num_part.append(c)
+
+ num_part = int(''.join(num_part))
+
+ val_in_bytes = None
+
+ if len(pretty_val) >= 2:
+ if 'kB' in pretty_val[-2:]:
+ val_in_bytes = num_part * 1024
+
+ elif 'MB' in pretty_val[-2:]:
+ val_in_bytes = num_part * 1024 * 1024
+
+ elif 'GB' in pretty_val[-2:]:
+ val_in_bytes = num_part * 1024 * 1024 * 1024
+
+ elif 'TB' in pretty_val[-2:]:
+ val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
+
+ # For cases like "1B"
+ if not val_in_bytes and 'B' in pretty_val[-1]:
+ val_in_bytes = num_part
+
+ if val_in_bytes is not None:
+ return val_in_bytes
+ else:
+ return pretty_val
+
+
+def param_set(cursor, module, name, value, context):
+ try:
+ if str(value).lower() == 'default':
+ query = "ALTER SYSTEM SET %s = DEFAULT" % name
+ else:
+ if isinstance(value, str) and ',' in value and not name.endswith(('_command', '_prefix')):
+ # Issue https://github.com/ansible-collections/community.postgresql/issues/78
+ # Change value from 'one, two, three' -> "'one','two','three'"
+ value = ','.join(["'" + elem.strip() + "'" for elem in value.split(',')])
+ query = "ALTER SYSTEM SET %s = %s" % (name, value)
+ else:
+ query = "ALTER SYSTEM SET %s = '%s'" % (name, value)
+ cursor.execute(query)
+
+ if context != 'postmaster':
+ cursor.execute("SELECT pg_reload_conf()")
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ return True
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ db=dict(type='str', aliases=['login_db']),
+ value=dict(type='str'),
+ reset=dict(type='bool', default=False),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ value = module.params['value']
+ reset = module.params['reset']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, name, value, session_role)
+
+ if value:
+ # Convert a value like 1mb (Postgres does not support) to 1MB, etc:
+ if len(value) > 2 and value[:-2].isdigit() and value[-2:] in LOWERCASE_SIZE_UNITS:
+ value = value.upper()
+
+ # Convert a value like 1b (Postgres does not support) to 1B:
+ elif len(value) > 1 and ('b' in value[-1] and value[:-1].isdigit()):
+ value = value.upper()
+
+ if value is not None and reset:
+ module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
+
+ if value is None and not reset:
+ module.fail_json(msg="%s: at least one of value or reset param must be specified" % name)
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ kw = {}
+ # Check server version (needs 9.4 or later):
+ ver = db_connection.server_version
+ if ver < PG_REQ_VER:
+ module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER))
+ kw = dict(
+ changed=False,
+ restart_required=False,
+ value_pretty="",
+ prev_val_pretty="",
+ value={"value": "", "unit": ""},
+ )
+ kw['name'] = name
+ db_connection.close()
+ module.exit_json(**kw)
+
+ # Set default returned values:
+ restart_required = False
+ changed = False
+ kw['name'] = name
+ kw['restart_required'] = False
+
+ # Get info about param state:
+ res = param_get(cursor, module, name)
+ current_val = res['current_val']
+ raw_val = res['raw_val']
+ unit = res['unit']
+ boot_val = res['boot_val']
+ context = res['context']
+
+ if value == 'True':
+ value = 'on'
+ elif value == 'False':
+ value = 'off'
+
+ kw['prev_val_pretty'] = current_val
+ kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
+ kw['context'] = context
+
+ # Do job
+ if context == "internal":
+ module.fail_json(msg="%s: cannot be changed (internal context). See "
+ "https://www.postgresql.org/docs/current/runtime-config-preset.html" % name)
+
+ if context == "postmaster":
+ restart_required = True
+
+ # If check_mode, just compare and exit:
+ if module.check_mode:
+ if pretty_to_bytes(value) == pretty_to_bytes(current_val):
+ kw['changed'] = False
+
+ else:
+ kw['value_pretty'] = value
+ kw['changed'] = True
+
+ # Anyway returns current raw value in the check_mode:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ kw['restart_required'] = restart_required
+ module.exit_json(**kw)
+
+ # Set param (value can be an empty string):
+ if value is not None and value != current_val:
+ changed = param_set(cursor, module, name, value, context)
+
+ kw['value_pretty'] = value
+
+ # Reset param:
+ elif reset:
+ if raw_val == boot_val:
+ # nothing to change, exit:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ module.exit_json(**kw)
+
+ changed = param_set(cursor, module, name, boot_val, context)
+
+ cursor.close()
+ db_connection.close()
+
+ # Reconnect and recheck current value:
+ if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'):
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ res = param_get(cursor, module, name)
+ # f_ means 'final'
+ f_value = res['current_val']
+ f_raw_val = res['raw_val']
+
+ if raw_val == f_raw_val:
+ changed = False
+
+ else:
+ changed = True
+
+ kw['value_pretty'] = f_value
+ kw['value'] = dict(
+ value=f_raw_val,
+ unit=unit,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ kw['changed'] = changed
+ kw['restart_required'] = restart_required
+
+ if restart_required and changed:
+ module.warn("Restart of PostgreSQL is required for setting %s" % name)
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py
new file mode 100644
index 000000000..b863784af
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, John Scalia (@jscalia), Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_slot
+short_description: Add or remove replication slots from a PostgreSQL database
+description:
+- Add or remove physical or logical replication slots from a PostgreSQL database.
+
+options:
+ name:
+ description:
+ - Name of the replication slot to add or remove.
+ type: str
+ required: true
+ aliases:
+ - slot_name
+ slot_type:
+ description:
+ - Slot type.
+ type: str
+ default: physical
+ choices: [ logical, physical ]
+ state:
+ description:
+ - The slot state.
+ - I(state=present) implies the slot must be present in the system.
+ - I(state=absent) implies the I(groups) must be revoked from I(target_roles).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ immediately_reserve:
+ description:
+ - Optional parameter that when C(true) specifies that the LSN for this replication slot be reserved
+ immediately, otherwise the default, C(false), specifies that the LSN is reserved on the first connection
+ from a streaming replication client.
+ - Is available from PostgreSQL version 9.6.
+ - Uses only with I(slot_type=physical).
+ - Mutually exclusive with I(slot_type=logical).
+ type: bool
+ default: false
+ output_plugin:
+ description:
+ - All logical slots must indicate which output plugin decoder they're using.
+ - This parameter does not apply to physical slots.
+ - It will be ignored with I(slot_type=physical).
+ type: str
+ default: "test_decoding"
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(false), check the value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+
+notes:
+- Physical replication slots were introduced to PostgreSQL with version 9.4,
+ while logical replication slots were added beginning with version 10.0.
+
+attributes:
+ check_mode:
+ support: full
+
+seealso:
+- name: PostgreSQL pg_replication_slots view reference
+ description: Complete reference of the PostgreSQL pg_replication_slots view.
+ link: https://www.postgresql.org/docs/current/view-pg-replication-slots.html
+- name: PostgreSQL streaming replication protocol reference
+ description: Complete reference of the PostgreSQL streaming replication protocol documentation.
+ link: https://www.postgresql.org/docs/current/protocol-replication.html
+- name: PostgreSQL logical replication protocol reference
+ description: Complete reference of the PostgreSQL logical replication protocol documentation.
+ link: https://www.postgresql.org/docs/current/protocol-logical-replication.html
+
+author:
+- John Scalia (@jscalia)
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Create physical_one physical slot if doesn't exist
+ become_user: postgres
+ community.postgresql.postgresql_slot:
+ slot_name: physical_one
+ db: ansible
+
+- name: Remove physical_one slot if exists
+ become_user: postgres
+ community.postgresql.postgresql_slot:
+ slot_name: physical_one
+ db: ansible
+ state: absent
+
+- name: Create logical_one logical slot to the database acme if doesn't exist
+ community.postgresql.postgresql_slot:
+ name: logical_slot_one
+ slot_type: logical
+ state: present
+ output_plugin: custom_decoder_one
+ db: "acme"
+
+- name: Remove logical_one slot if exists from the cluster running on another host and non-standard port
+ community.postgresql.postgresql_slot:
+ name: logical_one
+ login_host: mydatabase.example.org
+ port: 5433
+ login_user: ourSuperuser
+ login_password: thePassword
+ state: absent
+'''
+
+RETURN = r'''
+name:
+ description: Name of the slot.
+ returned: always
+ type: str
+ sample: "physical_one"
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "SELECT pg_create_physical_replication_slot('physical_one', False, False)" ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class PgSlot(object):
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.exists = False
+ self.kind = ''
+ self.__slot_exists()
+ self.changed = False
+ self.executed_queries = []
+
+ def create(self, kind='physical', immediately_reserve=False, output_plugin=False, just_check=False):
+ if self.exists:
+ if self.kind == kind:
+ return False
+ else:
+ self.module.warn("slot with name '%s' already exists "
+ "but has another type '%s'" % (self.name, self.kind))
+ return False
+
+ if just_check:
+ return None
+
+ if kind == 'physical':
+ # Check server version (immediately_reserved needs 9.6+):
+ if self.cursor.connection.server_version < 90600:
+ query = "SELECT pg_create_physical_replication_slot(%(name)s)"
+
+ else:
+ query = "SELECT pg_create_physical_replication_slot(%(name)s, %(i_reserve)s)"
+
+ self.changed = exec_sql(self, query,
+ query_params={'name': self.name, 'i_reserve': immediately_reserve},
+ return_bool=True)
+
+ elif kind == 'logical':
+ query = "SELECT pg_create_logical_replication_slot(%(name)s, %(o_plugin)s)"
+ self.changed = exec_sql(self, query,
+ query_params={'name': self.name, 'o_plugin': output_plugin}, return_bool=True)
+
+ def drop(self):
+ if not self.exists:
+ return False
+
+ query = "SELECT pg_drop_replication_slot(%(name)s)"
+ self.changed = exec_sql(self, query, query_params={'name': self.name}, return_bool=True)
+
+ def __slot_exists(self):
+ query = "SELECT slot_type FROM pg_replication_slots WHERE slot_name = %(name)s"
+ res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
+ if res:
+ self.exists = True
+ self.kind = res[0][0]
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", aliases=["login_db"]),
+ name=dict(type="str", required=True, aliases=["slot_name"]),
+ slot_type=dict(type="str", default="physical", choices=["logical", "physical"]),
+ immediately_reserve=dict(type="bool", default=False),
+ session_role=dict(type="str"),
+ output_plugin=dict(type="str", default="test_decoding"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ slot_type = module.params["slot_type"]
+ immediately_reserve = module.params["immediately_reserve"]
+ state = module.params["state"]
+ output_plugin = module.params["output_plugin"]
+
+ if not module.params["trust_input"]:
+ check_input(module, module.params['session_role'])
+
+ if immediately_reserve and slot_type == 'logical':
+ module.fail_json(msg="Module parameters immediately_reserve and slot_type=logical are mutually exclusive")
+
+ # When slot_type is logical and parameter db is not passed,
+ # the default database will be used to create the slot and
+ # the user should know about this.
+ # When the slot type is physical,
+ # it doesn't matter which database will be used
+ # because physical slots are global objects.
+ if slot_type == 'logical':
+ warn_db_default = True
+ else:
+ warn_db_default = False
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ conn_params = get_conn_params(module, module.params, warn_db_default=warn_db_default)
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##################################
+ # Create an object and do main job
+ pg_slot = PgSlot(module, cursor, name)
+
+ changed = False
+
+ if module.check_mode:
+ if state == "present":
+ if not pg_slot.exists:
+ changed = True
+
+ pg_slot.create(slot_type, immediately_reserve, output_plugin, just_check=True)
+
+ elif state == "absent":
+ if pg_slot.exists:
+ changed = True
+ else:
+ if state == "absent":
+ pg_slot.drop()
+
+ elif state == "present":
+ pg_slot.create(slot_type, immediately_reserve, output_plugin)
+
+ changed = pg_slot.changed
+
+ db_connection.close()
+ module.exit_json(changed=changed, name=name, queries=pg_slot.executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py
new file mode 100644
index 000000000..ae46a0dea
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py
@@ -0,0 +1,741 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_subscription
+short_description: Add, update, or remove PostgreSQL subscription
+description:
+- Add, update, or remove PostgreSQL subscription.
+version_added: '0.2.0'
+
+options:
+ name:
+ description:
+ - Name of the subscription to add, update, or remove.
+ type: str
+ required: true
+ db:
+ description:
+ - Name of the database to connect to and where
+ the subscription state will be changed.
+ aliases: [ login_db ]
+ type: str
+ required: true
+ state:
+ description:
+ - The subscription state.
+ - C(present) implies that if I(name) subscription doesn't exist, it will be created.
+ - C(absent) implies that if I(name) subscription exists, it will be removed.
+ - C(refresh) implies that if I(name) subscription exists, it will be refreshed.
+ Fetch missing table information from publisher. Always returns ``changed`` is ``True``.
+ This will start replication of tables that were added to the subscribed-to publications
+ since the last invocation of REFRESH PUBLICATION or since CREATE SUBSCRIPTION.
+ The existing data in the publications that are being subscribed to
+ should be copied once the replication starts.
+ - For more information about C(refresh) see U(https://www.postgresql.org/docs/current/sql-altersubscription.html).
+ type: str
+ choices: [ absent, present, refresh ]
+ default: present
+ owner:
+ description:
+ - Subscription owner.
+ - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
+ - Ignored when I(state) is not C(present).
+ type: str
+ publications:
+ description:
+ - The publication names on the publisher to use for the subscription.
+ - Ignored when I(state) is not C(present).
+ type: list
+ elements: str
+ connparams:
+ description:
+ - The connection dict param-value to connect to the publisher.
+ - For more information see U(https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
+ - Ignored when I(state) is not C(present).
+ type: dict
+ cascade:
+ description:
+ - Drop subscription dependencies. Has effect with I(state=absent) only.
+ - Ignored when I(state) is not C(absent).
+ type: bool
+ default: false
+ subsparams:
+ description:
+ - Dictionary of optional parameters for a subscription, e.g. copy_data, enabled, create_slot, etc.
+ - For update the subscription allowed keys are C(enabled), C(slot_name), C(synchronous_commit), C(publication_name).
+ - See available parameters to create a new subscription
+ on U(https://www.postgresql.org/docs/current/sql-createsubscription.html).
+ - Ignored when I(state) is not C(present).
+ type: dict
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters I(name), I(publications), I(owner),
+ I(session_role), I(connparams), I(subsparams) are potentially dangerous.
+ - It makes sense to use C(true) only when SQL injections via the parameters are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+
+notes:
+- PostgreSQL version must be 10 or greater.
+
+attributes:
+ check_mode:
+ support: full
+
+seealso:
+- module: community.postgresql.postgresql_publication
+- module: community.postgresql.postgresql_info
+- name: CREATE SUBSCRIPTION reference
+ description: Complete reference of the CREATE SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createsubscription.html
+- name: ALTER SUBSCRIPTION reference
+ description: Complete reference of the ALTER SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersubscription.html
+- name: DROP SUBSCRIPTION reference
+ description: Complete reference of the DROP SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropsubscription.html
+
+author:
+- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: >
+ Create acme subscription in mydb database using acme_publication and
+ the following connection parameters to connect to the publisher.
+ Set the subscription owner as alice.
+ community.postgresql.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: present
+ publications: acme_publication
+ owner: alice
+ connparams:
+ host: 127.0.0.1
+ port: 5432
+ user: repl
+ password: replpass
+ dbname: mydb
+
+- name: Assuming that acme subscription exists, try to change conn parameters
+ community.postgresql.postgresql_subscription:
+ db: mydb
+ name: acme
+ connparams:
+ host: 127.0.0.1
+ port: 5432
+ user: repl
+ password: replpass
+ connect_timeout: 100
+
+- name: Refresh acme publication
+ community.postgresql.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: refresh
+
+- name: Drop acme subscription from mydb with dependencies (cascade=true)
+ community.postgresql.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: absent
+ cascade: true
+
+- name: Assuming that acme subscription exists and enabled, disable the subscription
+ community.postgresql.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: present
+ subsparams:
+ enabled: false
+'''
+
+RETURN = r'''
+name:
+ description:
+ - Name of the subscription.
+ returned: always
+ type: str
+ sample: acme
+exists:
+ description:
+ - Flag indicates the subscription exists or not at the end of runtime.
+ returned: always
+ type: bool
+ sample: true
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'DROP SUBSCRIPTION "mysubscription"' ]
+initial_state:
+ description: Subscription configuration at the beginning of runtime.
+ returned: always
+ type: dict
+ sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
+final_state:
+ description: Subscription configuration at the end of runtime.
+ returned: always
+ type: dict
+ sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
+'''
+
+from copy import deepcopy
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import check_input
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+SUPPORTED_PG_VERSION = 10000
+
+SUBSPARAMS_KEYS_FOR_UPDATE = ('enabled', 'synchronous_commit', 'slot_name')
+
+
+################################
+# Module functions and classes #
+################################
+
+def convert_conn_params(conn_dict):
+ """Converts the passed connection dictionary to string.
+
+ Args:
+ conn_dict (list): Dictionary which needs to be converted.
+
+ Returns:
+ Connection string.
+ """
+ conn_list = []
+ for (param, val) in iteritems(conn_dict):
+ conn_list.append('%s=%s' % (param, val))
+
+ return ' '.join(conn_list)
+
+
+def convert_subscr_params(params_dict):
+ """Converts the passed params dictionary to string.
+
+ Args:
+ params_dict (list): Dictionary which needs to be converted.
+
+ Returns:
+ Parameters string.
+ """
+ params_list = []
+ for (param, val) in iteritems(params_dict):
+ if val is False:
+ val = 'false'
+ elif val is True:
+ val = 'true'
+
+ params_list.append('%s = %s' % (param, val))
+
+ return ', '.join(params_list)
+
+
+def cast_connparams(connparams_dict):
+ """Cast the passed connparams_dict dictionary
+
+ Returns:
+ Dictionary
+ """
+ for (param, val) in iteritems(connparams_dict):
+ try:
+ connparams_dict[param] = int(val)
+ except ValueError:
+ connparams_dict[param] = val
+
+ return connparams_dict
+
+
+class PgSubscription():
+ """Class to work with PostgreSQL subscription.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): The name of the subscription.
+ db (str): The database name the subscription will be associated with.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): Name of subscription.
+ executed_queries (list): List of executed queries.
+ attrs (dict): Dict with subscription attributes.
+ exists (bool): Flag indicates the subscription exists or not.
+ """
+
+ def __init__(self, module, cursor, name, db):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.db = db
+ self.executed_queries = []
+ self.attrs = {
+ 'owner': None,
+ 'enabled': None,
+ 'synccommit': None,
+ 'conninfo': {},
+ 'slotname': None,
+ 'publications': [],
+ }
+ self.empty_attrs = deepcopy(self.attrs)
+ self.exists = self.check_subscr()
+
+ def get_info(self):
+ """Refresh the subscription information.
+
+ Returns:
+ ``self.attrs``.
+ """
+ self.exists = self.check_subscr()
+ return self.attrs
+
+ def check_subscr(self):
+ """Check the subscription and refresh ``self.attrs`` subscription attribute.
+
+ Returns:
+ True if the subscription with ``self.name`` exists, False otherwise.
+ """
+
+ subscr_info = self.__get_general_subscr_info()
+
+ if not subscr_info:
+ # The subscription does not exist:
+ self.attrs = deepcopy(self.empty_attrs)
+ return False
+
+ self.attrs['owner'] = subscr_info.get('rolname')
+ self.attrs['enabled'] = subscr_info.get('subenabled')
+ self.attrs['synccommit'] = subscr_info.get('subenabled')
+ self.attrs['slotname'] = subscr_info.get('subslotname')
+ self.attrs['publications'] = subscr_info.get('subpublications')
+ if subscr_info.get('subconninfo'):
+ for param in subscr_info['subconninfo'].split(' '):
+ tmp = param.split('=')
+ try:
+ self.attrs['conninfo'][tmp[0]] = int(tmp[1])
+ except ValueError:
+ self.attrs['conninfo'][tmp[0]] = tmp[1]
+
+ return True
+
+ def create(self, connparams, publications, subsparams, check_mode=True):
+ """Create the subscription.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+ publications (list): Publications on the primary to use.
+ subsparams (str): Parameters string in WITH () clause style.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if the subscription has been created, otherwise False.
+ """
+ query_fragments = []
+ query_fragments.append("CREATE SUBSCRIPTION %s CONNECTION '%s' "
+ "PUBLICATION %s" % (self.name, connparams, ', '.join(publications)))
+
+ if subsparams:
+ query_fragments.append("WITH (%s)" % subsparams)
+
+ changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ return changed
+
+ def update(self, connparams, publications, subsparams, check_mode=True):
+ """Update the subscription.
+
+ Args:
+ connparams (dict): Connection dict in libpq style.
+ publications (list): Publications on the primary to use.
+ subsparams (dict): Dictionary of optional parameters.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if subscription has been updated, otherwise False.
+ """
+ changed = False
+
+ if connparams:
+ if connparams != self.attrs['conninfo']:
+ changed = self.__set_conn_params(convert_conn_params(connparams),
+ check_mode=check_mode)
+
+ if publications:
+ if sorted(self.attrs['publications']) != sorted(publications):
+ changed = self.__set_publications(publications, check_mode=check_mode)
+
+ if subsparams:
+ params_to_update = []
+
+ for (param, value) in iteritems(subsparams):
+ if param == 'enabled':
+ if self.attrs['enabled'] and value is False:
+ changed = self.enable(enabled=False, check_mode=check_mode)
+ elif not self.attrs['enabled'] and value is True:
+ changed = self.enable(enabled=True, check_mode=check_mode)
+
+ elif param == 'synchronous_commit':
+ if self.attrs['synccommit'] is True and value is False:
+ params_to_update.append("%s = false" % param)
+ elif self.attrs['synccommit'] is False and value is True:
+ params_to_update.append("%s = true" % param)
+
+ elif param == 'slot_name':
+ if self.attrs['slotname'] and self.attrs['slotname'] != value:
+ params_to_update.append("%s = %s" % (param, value))
+
+ else:
+ self.module.warn("Parameter '%s' is not in params supported "
+ "for update '%s', ignored..." % (param, SUBSPARAMS_KEYS_FOR_UPDATE))
+
+ if params_to_update:
+ changed = self.__set_params(params_to_update, check_mode=check_mode)
+
+ return changed
+
+ def drop(self, cascade=False, check_mode=True):
+ """Drop the subscription.
+
+ Kwargs:
+ cascade (bool): Flag indicates that the subscription needs to be deleted
+ with its dependencies.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if the subscription has been removed, otherwise False.
+ """
+ if self.exists:
+ query_fragments = ["DROP SUBSCRIPTION %s" % self.name]
+ if cascade:
+ query_fragments.append("CASCADE")
+
+ return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ def set_owner(self, role, check_mode=True):
+ """Set a subscription owner.
+
+ Args:
+ role (str): Role (user) name that needs to be set as a subscription owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s OWNER TO "%s"' % (self.name, role)
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def refresh(self, check_mode=True):
+ """Refresh publication.
+
+ Fetches missing table info from publisher.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s REFRESH PUBLICATION' % self.name
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_params(self, params_to_update, check_mode=True):
+ """Update optional subscription parameters.
+
+ Args:
+ params_to_update (list): Parameters with values to update.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s SET (%s)' % (self.name, ', '.join(params_to_update))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_conn_params(self, connparams, check_mode=True):
+ """Update connection parameters.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = "ALTER SUBSCRIPTION %s CONNECTION '%s'" % (self.name, connparams)
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_publications(self, publications, check_mode=True):
+ """Update publications.
+
+ Args:
+ publications (list): Publications on the primary to use.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s SET PUBLICATION %s' % (self.name, ', '.join(publications))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def enable(self, enabled=True, check_mode=True):
+ """Enable or disable the subscription.
+
+ Kwargs:
+ enable (bool): Flag indicates that the subscription needs
+ to be enabled or disabled.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if enabled:
+ query = 'ALTER SUBSCRIPTION %s ENABLE' % self.name
+ else:
+ query = 'ALTER SUBSCRIPTION %s DISABLE' % self.name
+
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __get_general_subscr_info(self):
+ """Get and return general subscription information.
+
+ Returns:
+ Dict with subscription information if successful, False otherwise.
+ """
+ query = ("SELECT d.datname, r.rolname, s.subenabled, "
+ "s.subconninfo, s.subslotname, s.subsynccommit, "
+ "s.subpublications FROM pg_catalog.pg_subscription s "
+ "JOIN pg_catalog.pg_database d "
+ "ON s.subdbid = d.oid "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON s.subowner = r.oid "
+ "WHERE s.subname = %(name)s AND d.datname = %(db)s")
+
+ result = exec_sql(self, query, query_params={'name': self.name, 'db': self.db}, add_to_executed=False)
+ if result:
+ return result[0]
+ else:
+ return False
+
+ def __exec_sql(self, query, check_mode=False):
+ """Execute SQL query.
+
+ Note: If we need just to get information from the database,
+ we use ``exec_sql`` function directly.
+
+ Args:
+ query (str): Query that needs to be executed.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just add ``query`` to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if check_mode:
+ self.executed_queries.append(query)
+ return True
+ else:
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ db=dict(type='str', required=True, aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'refresh']),
+ publications=dict(type='list', elements='str'),
+ connparams=dict(type='dict'),
+ cascade=dict(type='bool', default=False),
+ owner=dict(type='str'),
+ subsparams=dict(type='dict'),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Parameters handling:
+ db = module.params['db']
+ name = module.params['name']
+ state = module.params['state']
+ publications = module.params['publications']
+ cascade = module.params['cascade']
+ owner = module.params['owner']
+ subsparams = module.params['subsparams']
+ connparams = module.params['connparams']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not subsparams:
+ subsparams_str = None
+ else:
+ subsparams_str = convert_subscr_params(subsparams)
+
+ if not connparams:
+ connparams_str = None
+ else:
+ connparams_str = convert_conn_params(connparams)
+
+ check_input(module, name, publications, owner, session_role,
+ connparams_str, subsparams_str)
+
+ if state == 'present' and cascade:
+ module.warn('parameter "cascade" is ignored when state is not absent')
+
+ if state != 'present':
+ if owner:
+ module.warn("parameter 'owner' is ignored when state is not 'present'")
+ if publications:
+ module.warn("parameter 'publications' is ignored when state is not 'present'")
+ if connparams:
+ module.warn("parameter 'connparams' is ignored when state is not 'present'")
+ if subsparams:
+ module.warn("parameter 'subsparams' is ignored when state is not 'present'")
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ # Connect to DB and make cursor object:
+ pg_conn_params = get_conn_params(module, module.params)
+ # We check subscription state without DML queries execution, so set autocommit:
+ db_connection, dummy = connect_to_db(module, pg_conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Check version:
+ if cursor.connection.server_version < SUPPORTED_PG_VERSION:
+ module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
+
+ # Set defaults:
+ changed = False
+ initial_state = {}
+ final_state = {}
+
+ ###################################
+ # Create object and do rock'n'roll:
+ subscription = PgSubscription(module, cursor, name, db)
+
+ if subscription.exists:
+ initial_state = deepcopy(subscription.attrs)
+ final_state = deepcopy(initial_state)
+
+ if state == 'present':
+ if not subscription.exists:
+ if subsparams:
+ subsparams = convert_subscr_params(subsparams)
+
+ if connparams:
+ connparams = convert_conn_params(connparams)
+
+ changed = subscription.create(connparams,
+ publications,
+ subsparams,
+ check_mode=module.check_mode)
+
+ else:
+ if connparams:
+ connparams = cast_connparams(connparams)
+
+ changed = subscription.update(connparams,
+ publications,
+ subsparams,
+ check_mode=module.check_mode)
+
+ if owner and subscription.attrs['owner'] != owner:
+ changed = subscription.set_owner(owner, check_mode=module.check_mode) or changed
+
+ elif state == 'absent':
+ changed = subscription.drop(cascade, check_mode=module.check_mode)
+
+ elif state == 'refresh':
+ if not subscription.exists:
+ module.fail_json(msg="Refresh failed: subscription '%s' does not exist" % name)
+
+ # Always returns True:
+ changed = subscription.refresh(check_mode=module.check_mode)
+
+ # Get final subscription info:
+ final_state = subscription.get_info()
+
+ # Connection is not needed any more:
+ cursor.close()
+ db_connection.close()
+
+ # Return ret values and exit:
+ module.exit_json(changed=changed,
+ name=name,
+ exists=subscription.exists,
+ queries=subscription.executed_queries,
+ initial_state=initial_state,
+ final_state=final_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_table.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_table.py
new file mode 100644
index 000000000..33f1c752f
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_table.py
@@ -0,0 +1,619 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_table
+short_description: Create, drop, or modify a PostgreSQL table
+description:
+- Allows to create, drop, rename, truncate a table, or change some table attributes.
+options:
+ table:
+ description:
+ - Table name.
+ required: true
+ aliases:
+ - name
+ type: str
+ state:
+ description:
+ - The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ tablespace:
+ description:
+ - Set a tablespace for the table.
+ type: str
+ owner:
+ description:
+ - Set a table owner.
+ type: str
+ unlogged:
+ description:
+ - Create an unlogged table.
+ type: bool
+ default: false
+ like:
+ description:
+ - Create a table like another table (with similar DDL).
+ Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ including:
+ description:
+ - Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL.
+ Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ columns:
+ description:
+ - Columns that are needed.
+ type: list
+ elements: str
+ rename:
+ description:
+ - New table name. Mutually exclusive with I(tablespace), I(owner),
+ I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params).
+ type: str
+ truncate:
+ description:
+ - Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(rename), and I(storage_params).
+ type: bool
+ default: false
+ storage_params:
+ description:
+ - Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc.
+ Mutually exclusive with I(rename) and I(truncate).
+ type: list
+ elements: str
+ db:
+ description:
+ - Name of database to connect and where the table will be created.
+ type: str
+ default: ''
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ cascade:
+ description:
+ - Automatically drop objects that depend on the table (such as views).
+ Used with I(state=absent) only.
+ type: bool
+ default: false
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+
+notes:
+- If you do not pass db parameter, tables will be created in the database
+ named postgres.
+- PostgreSQL allows to create columnless table, so columns param is optional.
+- Unlogged tables are available from PostgreSQL server version 9.1.
+
+attributes:
+ check_mode:
+ support: full
+
+seealso:
+- module: community.postgresql.postgresql_sequence
+- module: community.postgresql.postgresql_idx
+- module: community.postgresql.postgresql_info
+- module: community.postgresql.postgresql_tablespace
+- module: community.postgresql.postgresql_owner
+- module: community.postgresql.postgresql_privs
+- module: community.postgresql.postgresql_copy
+- name: CREATE TABLE reference
+ description: Complete reference of the CREATE TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createtable.html
+- name: ALTER TABLE reference
+ description: Complete reference of the ALTER TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altertable.html
+- name: DROP TABLE reference
+ description: Complete reference of the DROP TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droptable.html
+- name: PostgreSQL data types
+ description: Complete reference of the PostgreSQL data types documentation.
+ link: https://www.postgresql.org/docs/current/datatype.html
+author:
+- Andrei Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner
+ community.postgresql.postgresql_table:
+ db: acme
+ name: tbl2
+ like: tbl1
+ owner: testuser
+
+- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes
+ community.postgresql.postgresql_table:
+ db: acme
+ table: tbl2
+ like: tbl1
+ including: comments, indexes
+ tablespace: ssd
+
+- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1
+ community.postgresql.postgresql_table:
+ name: test_table
+ columns:
+ - id bigserial primary key
+ - num bigint
+ - stories text
+ tablespace: ssd
+ storage_params:
+ - fillfactor=10
+ - autovacuum_analyze_threshold=1
+
+- name: Create an unlogged table in schema acme
+ community.postgresql.postgresql_table:
+ name: acme.useless_data
+ columns: waste_id int
+ unlogged: true
+
+- name: Rename table foo to bar
+ community.postgresql.postgresql_table:
+ table: foo
+ rename: bar
+
+- name: Rename table foo from schema acme to bar
+ community.postgresql.postgresql_table:
+ name: acme.foo
+ rename: bar
+
+- name: Set owner to someuser
+ community.postgresql.postgresql_table:
+ name: foo
+ owner: someuser
+
+- name: Change tablespace of foo table to new_tablespace and set owner to new_user
+ community.postgresql.postgresql_table:
+ name: foo
+ tablespace: new_tablespace
+ owner: new_user
+
+- name: Truncate table foo
+ community.postgresql.postgresql_table:
+ name: foo
+ truncate: true
+
+- name: Drop table foo from schema acme
+ community.postgresql.postgresql_table:
+ name: acme.foo
+ state: absent
+
+- name: Drop table bar cascade
+ community.postgresql.postgresql_table:
+ name: bar
+ state: absent
+ cascade: true
+'''
+
+RETURN = r'''
+table:
+ description: Name of a table.
+ returned: always
+ type: str
+ sample: 'foo'
+state:
+ description: Table state.
+ returned: always
+ type: str
+ sample: 'present'
+owner:
+ description: Table owner.
+ returned: always
+ type: str
+ sample: 'postgres'
+tablespace:
+ description: Tablespace.
+ returned: always
+ type: str
+ sample: 'ssd_tablespace'
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'CREATE TABLE "test_table" (id bigint)' ]
+storage_params:
+ description: Storage parameters.
+ returned: always
+ type: list
+ sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class Table(object):
+ def __init__(self, name, module, cursor):
+ self.name = name
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'owner': '',
+ 'tblspace': '',
+ 'storage_params': [],
+ }
+ self.exists = False
+ self.__exists_in_db()
+ self.executed_queries = []
+
+ def get_info(self):
+ """Getter to refresh and get table info"""
+ self.__exists_in_db()
+
+ def __exists_in_db(self):
+ """Check table exists and refresh info"""
+ if "." in self.name:
+ schema = self.name.split('.')[-2]
+ tblname = self.name.split('.')[-1]
+ else:
+ schema = 'public'
+ tblname = self.name
+
+ query = ("SELECT t.tableowner, t.tablespace, c.reloptions "
+ "FROM pg_tables AS t "
+ "INNER JOIN pg_class AS c ON c.relname = t.tablename "
+ "INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid "
+ "WHERE t.tablename = %(tblname)s "
+ "AND n.nspname = %(schema)s")
+ res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema},
+ add_to_executed=False)
+ if res:
+ self.exists = True
+ self.info = dict(
+ owner=res[0][0],
+ tblspace=res[0][1] if res[0][1] else '',
+ storage_params=res[0][2] if res[0][2] else [],
+ )
+
+ return True
+ else:
+ self.exists = False
+ return False
+
+ def create(self, columns='', params='', tblspace='',
+ unlogged=False, owner=''):
+ """
+ Create table.
+ If table exists, check passed args (params, tblspace, owner) and,
+ if they're different from current, change them.
+ Arguments:
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ columns - column string (comma separated).
+ """
+ name = pg_quote_identifier(self.name, 'table')
+
+ changed = False
+
+ if self.exists:
+ if tblspace == 'pg_default' and self.info['tblspace'] is None:
+ pass # Because they have the same meaning
+ elif tblspace and self.info['tblspace'] != tblspace:
+ self.set_tblspace(tblspace)
+ changed = True
+
+ if owner and self.info['owner'] != owner:
+ self.set_owner(owner)
+ changed = True
+
+ if params:
+ param_list = [p.strip(' ') for p in params.split(',')]
+
+ new_param = False
+ for p in param_list:
+ if p not in self.info['storage_params']:
+ new_param = True
+
+ if new_param:
+ self.set_stor_params(params)
+ changed = True
+
+ if changed:
+ return True
+ return False
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ if columns:
+ query += " (%s)" % columns
+ else:
+ query += " ()"
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if exec_sql(self, query, return_bool=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def create_like(self, src_table, including='', tblspace='',
+ unlogged=False, params='', owner=''):
+ """
+ Create table like another table (with similar DDL).
+ Arguments:
+ src_table - source table.
+ including - corresponds to optional INCLUDING expression
+ in CREATE TABLE ... LIKE statement.
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ """
+ changed = False
+
+ name = pg_quote_identifier(self.name, 'table')
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ query += " (LIKE %s" % pg_quote_identifier(src_table, 'table')
+
+ if including:
+ including = including.split(',')
+ for i in including:
+ query += " INCLUDING %s" % i
+
+ query += ')'
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if exec_sql(self, query, return_bool=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def truncate(self):
+ query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table')
+ return exec_sql(self, query, return_bool=True)
+
+ def rename(self, newname):
+ query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'),
+ pg_quote_identifier(newname, 'table'))
+ return exec_sql(self, query, return_bool=True)
+
+ def set_owner(self, username):
+ query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.name, 'table'), username)
+ return exec_sql(self, query, return_bool=True)
+
+ def drop(self, cascade=False):
+ if not self.exists:
+ return False
+
+ query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table')
+ if cascade:
+ query += " CASCADE"
+ return exec_sql(self, query, return_bool=True)
+
+ def set_tblspace(self, tblspace):
+ query = 'ALTER TABLE %s SET TABLESPACE "%s"' % (pg_quote_identifier(self.name, 'table'), tblspace)
+ return exec_sql(self, query, return_bool=True)
+
+ def set_stor_params(self, params):
+ query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params)
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ table=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ db=dict(type='str', default='', aliases=['login_db']),
+ tablespace=dict(type='str'),
+ owner=dict(type='str'),
+ unlogged=dict(type='bool', default=False),
+ like=dict(type='str'),
+ including=dict(type='str'),
+ rename=dict(type='str'),
+ truncate=dict(type='bool', default=False),
+ columns=dict(type='list', elements='str'),
+ storage_params=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ cascade=dict(type='bool', default=False),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ table = module.params['table']
+ state = module.params['state']
+ tablespace = module.params['tablespace']
+ owner = module.params['owner']
+ unlogged = module.params['unlogged']
+ like = module.params['like']
+ including = module.params['including']
+ newname = module.params['rename']
+ storage_params = module.params['storage_params']
+ truncate = module.params['truncate']
+ columns = module.params['columns']
+ cascade = module.params['cascade']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, table, tablespace, owner, like, including,
+ newname, storage_params, columns, session_role)
+
+ if state == 'present' and cascade:
+ module.warn("cascade=true is ignored when state=present")
+
+ # Check mutual exclusive parameters:
+ if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including):
+ module.fail_json(msg="%s: state=absent is mutually exclusive with: "
+ "truncate, rename, columns, tablespace, "
+ "including, like, storage_params, unlogged, owner" % table)
+
+ if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: truncate is mutually exclusive with: "
+ "rename, columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if newname and (columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: rename is mutually exclusive with: "
+ "columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if like and columns:
+ module.fail_json(msg="%s: like and columns params are mutually exclusive" % table)
+ if including and not like:
+ module.fail_json(msg="%s: including param needs like param specified" % table)
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ conn_params = get_conn_params(module, module.params)
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if storage_params:
+ storage_params = ','.join(storage_params)
+
+ if columns:
+ columns = ','.join(columns)
+
+ ##############
+ # Do main job:
+ table_obj = Table(table, module, cursor)
+
+ # Set default returned values:
+ changed = False
+ kw = {}
+ kw['table'] = table
+ kw['state'] = ''
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+
+ if state == 'absent':
+ changed = table_obj.drop(cascade=cascade)
+
+ elif truncate:
+ changed = table_obj.truncate()
+
+ elif newname:
+ changed = table_obj.rename(newname)
+ q = table_obj.executed_queries
+ table_obj = Table(newname, module, cursor)
+ table_obj.executed_queries = q
+
+ elif state == 'present' and not like:
+ changed = table_obj.create(columns, storage_params,
+ tablespace, unlogged, owner)
+
+ elif state == 'present' and like:
+ changed = table_obj.create_like(like, including, tablespace,
+ unlogged, storage_params)
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ # Refresh table info for RETURN.
+ # Note, if table has been renamed, it gets info by newname:
+ table_obj.get_info()
+ db_connection.commit()
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+ else:
+ # We just change the table state here
+ # to keep other information about the dropped table:
+ kw['state'] = 'absent'
+
+ kw['queries'] = table_obj.executed_queries
+ kw['changed'] = changed
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py
new file mode 100644
index 000000000..243005733
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py
@@ -0,0 +1,545 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Flavien Chantelot (@Dorn-)
+# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell)
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_tablespace
+short_description: Add or remove PostgreSQL tablespaces from remote hosts
+description:
+- Adds or removes PostgreSQL tablespaces from remote hosts.
+options:
+ tablespace:
+ description:
+ - Name of the tablespace to add or remove.
+ required: true
+ type: str
+ aliases:
+ - name
+ location:
+ description:
+ - Path to the tablespace directory in the file system.
+ - Ensure that the location exists and has right privileges.
+ type: path
+ aliases:
+ - path
+ state:
+ description:
+ - Tablespace state.
+ - I(state=present) implies the tablespace must be created if it doesn't exist.
+ - I(state=absent) implies the tablespace must be removed if present.
+ I(state=absent) is mutually exclusive with I(location), I(owner), i(set).
+ - See the Notes section for information about check mode restrictions.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ owner:
+ description:
+ - Name of the role to set as an owner of the tablespace.
+ - If this option is not specified, the tablespace owner is a role that creates the tablespace.
+ type: str
+ set:
+ description:
+ - Dict of tablespace options to set. Supported from PostgreSQL 9.0.
+ - For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html).
+ - When reset is passed as an option's value, if the option was set previously, it will be removed.
+ type: dict
+ rename_to:
+ description:
+ - New name of the tablespace.
+ - The new name cannot begin with pg_, as such names are reserved for system tablespaces.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ trust_input:
+ description:
+ - If C(false), check whether values of parameters I(tablespace), I(location), I(owner),
+ I(rename_to), I(session_role), I(settings_list) are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via the parameters are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not
+ support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands
+ can not be run inside the transaction block.
+
+seealso:
+- name: PostgreSQL tablespaces
+ description: General information about PostgreSQL tablespaces.
+ link: https://www.postgresql.org/docs/current/manage-ag-tablespaces.html
+- name: CREATE TABLESPACE reference
+ description: Complete reference of the CREATE TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createtablespace.html
+- name: ALTER TABLESPACE reference
+ description: Complete reference of the ALTER TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altertablespace.html
+- name: DROP TABLESPACE reference
+ description: Complete reference of the DROP TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droptablespace.html
+
+author:
+- Flavien Chantelot (@Dorn-)
+- Antoine Levy-Lambert (@antoinell)
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Create a new tablespace called acme and set bob as an its owner
+ community.postgresql.postgresql_tablespace:
+ name: acme
+ owner: bob
+ location: /data/foo
+
+- name: Create a new tablespace called bar with tablespace options
+ community.postgresql.postgresql_tablespace:
+ name: bar
+ set:
+ random_page_cost: 1
+ seq_page_cost: 1
+
+- name: Reset random_page_cost option
+ community.postgresql.postgresql_tablespace:
+ name: bar
+ set:
+ random_page_cost: reset
+
+- name: Rename the tablespace from bar to pcie_ssd
+ community.postgresql.postgresql_tablespace:
+ name: bar
+ rename_to: pcie_ssd
+
+- name: Drop tablespace called bloat
+ community.postgresql.postgresql_tablespace:
+ name: bloat
+ state: absent
+'''
+
+RETURN = r'''
+queries:
+ description: List of queries that was tried to be executed.
+ returned: always
+ type: str
+ sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ]
+tablespace:
+ description: Tablespace name.
+ returned: always
+ type: str
+ sample: 'ssd'
+owner:
+ description: Tablespace owner.
+ returned: always
+ type: str
+ sample: 'Bob'
+options:
+ description: Tablespace options.
+ returned: always
+ type: dict
+ sample: { 'random_page_cost': 1, 'seq_page_cost': 1 }
+location:
+ description: Path to the tablespace in the file system.
+ returned: always
+ type: str
+ sample: '/incredible/fast/ssd'
+newname:
+ description: New tablespace name.
+ returned: if existent
+ type: str
+ sample: new_ssd
+state:
+ description: Tablespace state at the end of execution.
+ returned: always
+ type: str
+ sample: 'present'
+'''
+
+try:
+ from psycopg2 import __version__ as PSYCOPG2_VERSION
+ from psycopg2.extras import DictCursor
+ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT
+ from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class PgTablespace(object):
+
+ """Class for working with PostgreSQL tablespaces.
+
+ Args:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ name (str) -- name of the tablespace
+
+ Attrs:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ name (str) -- name of the tablespace
+ exists (bool) -- flag the tablespace exists in the DB or not
+ owner (str) -- tablespace owner
+ location (str) -- path to the tablespace directory in the file system
+ executed_queries (list) -- list of executed queries
+ new_name (str) -- new name for the tablespace
+ opt_not_supported (bool) -- flag indicates a tablespace option is supported or not
+ """
+
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.exists = False
+ self.owner = ''
+ self.settings = {}
+ self.location = ''
+ self.executed_queries = []
+ self.new_name = ''
+ self.opt_not_supported = False
+ # Collect info:
+ self.get_info()
+
+ def get_info(self):
+ """Get tablespace information."""
+ # Check that spcoptions exists:
+ opt = exec_sql(self, "SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spcoptions'", add_to_executed=False)
+
+ # For 9.1 version and earlier:
+ location = exec_sql(self, "SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spclocation'", add_to_executed=False)
+ if location:
+ location = 'spclocation'
+ else:
+ location = 'pg_tablespace_location(t.oid)'
+
+ if not opt:
+ self.opt_not_supported = True
+ query = ("SELECT r.rolname, (SELECT Null), %s "
+ "FROM pg_catalog.pg_tablespace AS t "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON t.spcowner = r.oid " % location)
+ else:
+ query = ("SELECT r.rolname, t.spcoptions, %s "
+ "FROM pg_catalog.pg_tablespace AS t "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON t.spcowner = r.oid " % location)
+
+ res = exec_sql(self, query + "WHERE t.spcname = %(name)s",
+ query_params={'name': self.name}, add_to_executed=False)
+
+ if not res:
+ self.exists = False
+ return False
+
+ if res[0][0]:
+ self.exists = True
+ self.owner = res[0][0]
+
+ if res[0][1]:
+ # Options exist:
+ for i in res[0][1]:
+ i = i.split('=')
+ self.settings[i[0]] = i[1]
+
+ if res[0][2]:
+ # Location exists:
+ self.location = res[0][2]
+
+ def create(self, location):
+ """Create tablespace.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ location (str) -- tablespace directory path in the FS
+ """
+ query = ('CREATE TABLESPACE "%s" LOCATION \'%s\'' % (self.name, location))
+ return exec_sql(self, query, return_bool=True)
+
+ def drop(self):
+ """Drop tablespace.
+
+ Return True if success, otherwise, return False.
+ """
+ return exec_sql(self, 'DROP TABLESPACE "%s"' % self.name, return_bool=True)
+
+ def set_owner(self, new_owner):
+ """Set tablespace owner.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ new_owner (str) -- name of a new owner for the tablespace"
+ """
+ if new_owner == self.owner:
+ return False
+
+ query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.name, new_owner)
+ return exec_sql(self, query, return_bool=True)
+
+ def rename(self, newname):
+ """Rename tablespace.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ newname (str) -- new name for the tablespace"
+ """
+ query = 'ALTER TABLESPACE "%s" RENAME TO "%s"' % (self.name, newname)
+ self.new_name = newname
+ return exec_sql(self, query, return_bool=True)
+
+ def set_settings(self, new_settings):
+ """Set tablespace settings (options).
+
+ If some setting has been changed, set changed = True.
+ After all settings list is handling, return changed.
+
+ args:
+ new_settings (list) -- list of new settings
+ """
+ # settings must be a dict {'key': 'value'}
+ if self.opt_not_supported:
+ return False
+
+ changed = False
+
+ # Apply new settings:
+ for i in new_settings:
+ if new_settings[i] == 'reset':
+ if i in self.settings:
+ changed = self.__reset_setting(i)
+ self.settings[i] = None
+
+ elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]):
+ changed = self.__set_setting("%s = '%s'" % (i, new_settings[i]))
+
+ return changed
+
+ def __reset_setting(self, setting):
+ """Reset tablespace setting.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ setting (str) -- string in format "setting_name = 'setting_value'"
+ """
+ query = 'ALTER TABLESPACE "%s" RESET (%s)' % (self.name, setting)
+ return exec_sql(self, query, return_bool=True)
+
+ def __set_setting(self, setting):
+ """Set tablespace setting.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ setting (str) -- string in format "setting_name = 'setting_value'"
+ """
+ query = 'ALTER TABLESPACE "%s" SET (%s)' % (self.name, setting)
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ tablespace=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ location=dict(type='path', aliases=['path']),
+ owner=dict(type='str'),
+ set=dict(type='dict'),
+ rename_to=dict(type='str'),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=True,
+ )
+
+ tablespace = module.params["tablespace"]
+ state = module.params["state"]
+ location = module.params["location"]
+ owner = module.params["owner"]
+ rename_to = module.params["rename_to"]
+ settings = module.params["set"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if state == 'absent' and (location or owner or rename_to or settings):
+ module.fail_json(msg="state=absent is mutually exclusive location, "
+ "owner, rename_to, and set")
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not settings:
+ settings_list = None
+ else:
+ settings_list = ['%s = %s' % (k, v) for k, v in iteritems(settings)]
+
+ check_input(module, tablespace, location, owner,
+ rename_to, session_role, settings_list)
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection, dummy = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Change autocommit to False if check_mode:
+ if module.check_mode:
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=False)
+ else:
+ db_connection.set_isolation_level(READ_COMMITTED)
+
+ # Set defaults:
+ autocommit = False
+ changed = False
+
+ ##############
+ # Create PgTablespace object and do main job:
+ tblspace = PgTablespace(module, cursor, tablespace)
+
+ # If tablespace exists with different location, exit:
+ if tblspace.exists and location and location != tblspace.location:
+ module.fail_json(msg="Tablespace '%s' exists with "
+ "different location '%s'" % (tblspace.name, tblspace.location))
+
+ # Create new tablespace:
+ if not tblspace.exists and state == 'present':
+ if rename_to:
+ module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace)
+
+ if not location:
+ module.fail_json(msg="'location' parameter must be passed with "
+ "state=present if the tablespace doesn't exist")
+
+ # Because CREATE TABLESPACE can not be run inside the transaction block:
+ autocommit = True
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(AUTOCOMMIT)
+
+ changed = tblspace.create(location)
+
+ # Drop non-existing tablespace:
+ elif not tblspace.exists and state == 'absent':
+ # Nothing to do:
+ module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name)
+
+ # Drop existing tablespace:
+ elif tblspace.exists and state == 'absent':
+ # Because DROP TABLESPACE can not be run inside the transaction block:
+ autocommit = True
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(AUTOCOMMIT)
+
+ changed = tblspace.drop()
+
+ # Rename tablespace:
+ elif tblspace.exists and rename_to:
+ if tblspace.name != rename_to:
+ changed = tblspace.rename(rename_to)
+
+ if state == 'present':
+ # Refresh information:
+ tblspace.get_info()
+
+ # Change owner and settings:
+ if state == 'present' and tblspace.exists:
+ if owner:
+ changed = tblspace.set_owner(owner)
+
+ if settings:
+ changed = tblspace.set_settings(settings)
+
+ tblspace.get_info()
+
+ # Rollback if it's possible and check_mode:
+ if not autocommit:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ kw = dict(
+ changed=changed,
+ state='present',
+ tablespace=tblspace.name,
+ owner=tblspace.owner,
+ queries=tblspace.executed_queries,
+ options=tblspace.settings,
+ location=tblspace.location,
+ )
+
+ if state == 'present':
+ kw['state'] = 'present'
+
+ if tblspace.new_name:
+ kw['newname'] = tblspace.new_name
+
+ elif state == 'absent':
+ kw['state'] = 'absent'
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_user.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_user.py
new file mode 100644
index 000000000..594e0f1ae
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_user.py
@@ -0,0 +1,1085 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_user
+short_description: Create, alter, or remove a user (role) from a PostgreSQL server instance
+description:
+- Creates, alters, or removes a user (role) from a PostgreSQL server instance
+ ("cluster" in PostgreSQL terminology) and, optionally,
+ grants the user access to an existing database or tables.
+- A user is a role with login privilege.
+- You can also use it to grant or revoke user's privileges in a particular database.
+- You cannot remove a user while it still has any privileges granted to it in any database.
+- Set I(fail_on_user) to C(false) to make the module ignore failures when trying to remove a user.
+ In this case, the module reports if changes happened as usual and separately reports
+ whether the user has been removed or not.
+- B(WARNING) The I(priv) option has been B(deprecated) and will be removed in community.postgresql 3.0.0. Please use the
+ M(community.postgresql.postgresql_privs) module instead.
+- B(WARNING) The I(groups) option has been B(deprecated) ans will be removed in community.postgresql 3.0.0.
+ Please use the M(community.postgresql.postgresql_membership) module instead.
+options:
+ name:
+ description:
+ - Name of the user (role) to add or remove.
+ type: str
+ required: true
+ aliases:
+ - user
+ password:
+ description:
+ - Set the user's password, before 1.4 this was required.
+ - Password can be passed unhashed or hashed (MD5-hashed).
+ - An unhashed password is automatically hashed when saved into the
+ database if I(encrypted) is set, otherwise it is saved in
+ plain text format.
+ - When passing an MD5-hashed password, you must generate it with the format
+ C('str["md5"] + md5[ password + username ]'), resulting in a total of
+ 35 characters. An easy way to do this is
+ C(echo "md5`echo -n 'verysecretpasswordJOE' | md5sum | awk '{print $1}'`").
+ - Note that if the provided password string is already in MD5-hashed
+ format, then it is used as-is, regardless of I(encrypted) option.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and where user's permissions are granted.
+ type: str
+ default: ''
+ aliases:
+ - login_db
+ fail_on_user:
+ description:
+ - If C(true), fails when the user (role) cannot be removed. Otherwise just log and continue.
+ default: true
+ type: bool
+ aliases:
+ - fail_on_role
+ priv:
+ description:
+ - This option has been B(deprecated) and will be removed in
+ community.postgresql 3.0.0. Please use the M(community.postgresql.postgresql_privs) module to
+ GRANT/REVOKE permissions instead.
+ - "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where
+ you can define the user's privileges for the database ( allowed options - 'CREATE',
+ 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or
+ for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE',
+ 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example
+ C(table:SELECT) ). Mixed example of this string:
+ C(CONNECT/CREATE/table1:SELECT/table2:INSERT)."
+ - When I(priv) contains tables, the module uses the schema C(public) by default.
+ If you need to specify a different schema, use the C(schema_name.table_name) notation,
+ for example, C(pg_catalog.pg_stat_database:SELECT).
+ type: str
+ role_attr_flags:
+ description:
+ - "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER."
+ - Note that '[NO]CREATEUSER' is deprecated.
+ - To create a simple role for using it like a group, use C(NOLOGIN) flag.
+ - See the full list of supported flags in documentation for your PostgreSQL version.
+ type: str
+ default: ''
+ session_role:
+ description:
+ - Switch to session role after connecting.
+ - The specified session role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session role
+ were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The user (role) state.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ encrypted:
+ description:
+ - Whether the password is stored hashed in the database.
+ - You can specify an unhashed password, and PostgreSQL ensures
+ the stored password is hashed when I(encrypted=true) is set.
+ If you specify a hashed password, the module uses it as-is,
+ regardless of the setting of I(encrypted).
+ - "Note: Postgresql 10 and newer does not support unhashed passwords."
+ - Previous to Ansible 2.6, this was C(false) by default.
+ default: true
+ type: bool
+ expires:
+ description:
+ - The date at which the user's password is to expire.
+ - If set to C('infinity'), user's password never expires.
+ - Note that this value must be a valid SQL date and time type.
+ type: str
+ no_password_changes:
+ description:
+ - If C(true), does not inspect the database for password changes.
+ If the user already exists, skips all password related checks.
+ Useful when C(pg_authid) is not accessible (such as in AWS RDS).
+ Otherwise, makes password changes as necessary.
+ default: false
+ type: bool
+ conn_limit:
+ description:
+ - Specifies the user (role) connection limit.
+ type: int
+ ssl_mode:
+ description:
+ - Determines how an SSL session is negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, verifies that the server's certificate is signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ groups:
+ description:
+ - This option has been B(deprecated) and will be removed in community.postgresql 3.0.0.
+ Please use the I(postgresql_membership) module to GRANT/REVOKE group/role memberships
+ instead.
+ - The list of groups (roles) that you want to grant to the user.
+ type: list
+ elements: str
+ comment:
+ description:
+ - Adds a comment on the user (equivalent to the C(COMMENT ON ROLE) statement).
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(false), checks whether values of options I(name), I(password), I(privs), I(expires),
+ I(role_attr_flags), I(groups), I(comment), I(session_role) are potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections through the options are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+notes:
+- The module creates a user (role) with login privilege by default.
+ Use C(NOLOGIN) I(role_attr_flags) to change this behaviour.
+- If you specify C(PUBLIC) as the user (role), then the privilege changes apply to all users (roles).
+ You may not specify password or role_attr_flags when the C(PUBLIC) user is specified.
+- SCRAM-SHA-256-hashed passwords (SASL Authentication) require PostgreSQL version 10 or newer.
+ On the previous versions the whole hashed string is used as a password.
+- 'Working with SCRAM-SHA-256-hashed passwords, be sure you use the I(environment:) variable
+ C(PGOPTIONS: "-c password_encryption=scram-sha-256") (see the provided example).'
+- On some systems (such as AWS RDS), C(pg_authid) is not accessible, thus, the module cannot compare
+ the current and desired C(password). In this case, the module assumes that the passwords are
+ different and changes it reporting that the state has been changed.
+ To skip all password related checks for existing users, use I(no_password_changes=true).
+- On some systems (such as AWS RDS), C(SUPERUSER) is unavailable. This means the C(SUPERUSER) and
+ C(NOSUPERUSER) I(role_attr_flags) should not be specified to preserve idempotency and avoid
+ InsufficientPrivilege errors.
+
+attributes:
+ check_mode:
+ support: full
+
+seealso:
+- module: community.postgresql.postgresql_privs
+- module: community.postgresql.postgresql_membership
+- module: community.postgresql.postgresql_owner
+- name: PostgreSQL database roles
+ description: Complete reference of the PostgreSQL database roles documentation.
+ link: https://www.postgresql.org/docs/current/user-manag.html
+- name: PostgreSQL SASL Authentication
+ description: Complete reference of the PostgreSQL SASL Authentication.
+ link: https://www.postgresql.org/docs/current/sasl-authentication.html
+author:
+- Ansible Core Team
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+# This example uses the 'priv' argument which is deprecated.
+# You should use the 'postgresql_privs' module instead.
+- name: Connect to acme database, create django user, and grant access to database and products table
+ community.postgresql.postgresql_user:
+ db: acme
+ name: django
+ password: ceec4eif7ya
+ priv: "CONNECT/products:ALL"
+ expires: "Jan 31 2020"
+
+- name: Add a comment on django user
+ community.postgresql.postgresql_user:
+ db: acme
+ name: django
+ comment: This is a test user
+
+# Connect to default database, create rails user, set its password (MD5- or SHA256-hashed),
+# and grant privilege to create other databases and demote rails from super user status if user exists
+# the hash from the corresponding pg_authid entry.
+- name: Create rails user, set MD5-hashed password, grant privs
+ community.postgresql.postgresql_user:
+ name: rails
+ password: md59543f1d82624df2b31672ec0f7050460
+ # password: SCRAM-SHA-256$4096:zFuajwIVdli9mK=NJkcv1Q++$JC4gWIrEHmF6sqRbEiZw5FFW45HUPrpVzNdoM72o730+;fqA4vLN3mCZGbhcbQyvNYY7anCrUTsem1eCh/4YA94=
+ role_attr_flags: CREATEDB,NOSUPERUSER
+ # When using sha256-hashed password:
+ #environment:
+ # PGOPTIONS: "-c password_encryption=scram-sha-256"
+
+# This example uses the 'priv' argument which is deprecated.
+# You should use the 'postgresql_privs' module instead.
+- name: Connect to acme database and remove test user privileges from there
+ community.postgresql.postgresql_user:
+ db: acme
+ name: test
+ priv: "ALL/products:ALL"
+ state: absent
+ fail_on_user: false
+
+# This example uses the 'priv' argument which is deprecated.
+# You should use the 'postgresql_privs' module instead.
+- name: Connect to test database, remove test user from cluster
+ community.postgresql.postgresql_user:
+ db: test
+ name: test
+ priv: ALL
+ state: absent
+
+# This example uses the 'priv' argument which is deprecated.
+# You should use the 'postgresql_privs' module instead.
+- name: Connect to acme database and set user's password with no expire date
+ community.postgresql.postgresql_user:
+ db: acme
+ name: django
+ password: mysupersecretword
+ priv: "CONNECT/products:ALL"
+ expires: infinity
+
+# Example privileges string format
+# INSERT,UPDATE/table:SELECT/anothertable:ALL
+
+- name: Connect to test database and remove an existing user's password
+ community.postgresql.postgresql_user:
+ db: test
+ user: test
+ password: ""
+
+# This example uses the `group` argument which is deprecated.
+# You should use the `postgresql_membership` module instead.
+- name: Create user test and grant group user_ro and user_rw to it
+ community.postgresql.postgresql_user:
+ name: test
+ groups:
+ - user_ro
+ - user_rw
+
+# Create user with a cleartext password if it does not exist or update its password.
+# The password will be encrypted with SCRAM algorithm (available since PostgreSQL 10)
+- name: Create appclient user with SCRAM-hashed password
+ community.postgresql.postgresql_user:
+ name: appclient
+ password: "secret123"
+ environment:
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+
+# This example uses the 'priv' argument which is deprecated.
+# You should use the 'postgresql_privs' module instead.
+- name: Create a user, grant SELECT on pg_catalog.pg_stat_database
+ community.postgresql.postgresql_user:
+ name: monitoring
+ priv: 'pg_catalog.pg_stat_database:SELECT'
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"']
+'''
+
+import itertools
+import re
+import traceback
+from hashlib import md5, sha256
+import hmac
+from base64 import b64decode
+
+try:
+ import psycopg2
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ pg_quote_identifier,
+ SQLParseError,
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ ensure_required_libs,
+ get_conn_params,
+ get_server_version,
+ PgMembership,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.postgresql.plugins.module_utils import saslprep
+
+try:
+ # pbkdf2_hmac is missing on python 2.6, we can safely assume,
+ # that postresql 10 capable instance have at least python 2.7 installed
+ from hashlib import pbkdf2_hmac
+ pbkdf2_found = True
+except ImportError:
+ pbkdf2_found = False
+
+
+FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
+FLAGS_BY_VERSION = {'BYPASSRLS': 90500}
+
+SCRAM_SHA256_REGEX = r'^SCRAM-SHA-256\$(\d+):([A-Za-z0-9+\/=]+)\$([A-Za-z0-9+\/=]+):([A-Za-z0-9+\/=]+)$'
+
+# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
+ database=frozenset(
+ ('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
+ )
+
+# map to cope with idiosyncrasies of SUPERUSER and LOGIN
+PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
+ CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin',
+ REPLICATION='rolreplication', BYPASSRLS='rolbypassrls')
+
+executed_queries = []
+
+# This is a special list for debugging.
+# If you need to fetch information (e.g. results of cursor.fetchall(),
+# queries built with cursor.mogrify(), vars values, etc.):
+# 1. Put debug_info.append(<information_you_need>) as many times as you need.
+# 2. Run integration tests or you playbook with -vvv
+# 3. If it's not empty, you'll see the list in the returned json.
+debug_info = []
+
+
+class InvalidFlagsError(Exception):
+ pass
+
+
+class InvalidPrivsError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def user_exists(cursor, user):
+ # The PUBLIC user is a special case that is always there
+ if user == 'PUBLIC':
+ return True
+ query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
+ cursor.execute(query, {'user': user})
+ return cursor.rowcount > 0
+
+
+def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit):
+ """Create a new database user (role)."""
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ query_password_data = dict(password=password, expires=expires)
+ query = ['CREATE USER "%(user)s"' %
+ {"user": user}]
+ if password is not None and password != '':
+ query.append("WITH %(crypt)s" % {"crypt": encrypted})
+ query.append("PASSWORD %(password)s")
+ if expires is not None:
+ query.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query.append(role_attr_flags)
+ query = ' '.join(query)
+ executed_queries.append(query)
+ cursor.execute(query, query_password_data)
+ return True
+
+
+def user_should_we_change_password(current_role_attrs, user, password, encrypted):
+ """Check if we should change the user's password.
+
+ Compare the proposed password with the existing one, comparing
+ hashes if encrypted. If we can't access it assume yes.
+ """
+
+ if current_role_attrs is None:
+ # on some databases, E.g. AWS RDS instances, there is no access to
+ # the pg_authid relation to check the pre-existing password, so we
+ # just assume password is different
+ return True
+
+ # Do we actually need to do anything?
+ pwchanging = False
+ if password is not None:
+ # Empty password means that the role shouldn't have a password, which
+ # means we need to check if the current password is None.
+ if password == '':
+ if current_role_attrs['rolpassword'] is not None:
+ pwchanging = True
+ # If the provided password is a SCRAM hash, compare it directly to the current password
+ elif re.match(SCRAM_SHA256_REGEX, password):
+ if password != current_role_attrs['rolpassword']:
+ pwchanging = True
+
+ # SCRAM hashes are represented as a special object, containing hash data:
+ # `SCRAM-SHA-256$<iteration count>:<salt>$<StoredKey>:<ServerKey>`
+ # for reference, see https://www.postgresql.org/docs/current/catalog-pg-authid.html
+ elif current_role_attrs['rolpassword'] is not None \
+ and pbkdf2_found \
+ and re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword']):
+
+ r = re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword'])
+ try:
+ # extract SCRAM params from rolpassword
+ it = int(r.group(1))
+ salt = b64decode(r.group(2))
+ server_key = b64decode(r.group(4))
+ # we'll never need `storedKey` as it is only used for server auth in SCRAM
+ # storedKey = b64decode(r.group(3))
+
+ # from RFC5802 https://tools.ietf.org/html/rfc5802#section-3
+ # SaltedPassword := Hi(Normalize(password), salt, i)
+ # ServerKey := HMAC(SaltedPassword, "Server Key")
+ normalized_password = saslprep.saslprep(to_text(password))
+ salted_password = pbkdf2_hmac('sha256', to_bytes(normalized_password), salt, it)
+
+ server_key_verifier = hmac.new(salted_password, digestmod=sha256)
+ server_key_verifier.update(b'Server Key')
+
+ if server_key_verifier.digest() != server_key:
+ pwchanging = True
+ except Exception:
+ # We assume the password is not scram encrypted
+ # or we cannot check it properly, e.g. due to missing dependencies
+ pwchanging = True
+
+ # 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits
+ # 3: The size of the 'md5' prefix
+ # When the provided password looks like a MD5-hash, value of
+ # 'encrypted' is ignored.
+ elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED':
+ if password != current_role_attrs['rolpassword']:
+ pwchanging = True
+ elif encrypted == 'ENCRYPTED':
+ hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest())
+ if hashed_password != current_role_attrs['rolpassword']:
+ pwchanging = True
+
+ return pwchanging
+
+
+def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit):
+ """Change user password and/or attributes. Return True if changed, False otherwise."""
+ changed = False
+
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ if user == 'PUBLIC':
+ if password is not None:
+ module.fail_json(msg="cannot change the password for PUBLIC user")
+ elif role_attr_flags != '':
+ module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
+ else:
+ return False
+
+ # Handle passwords.
+ if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None):
+ # Select password and all flag-like columns in order to verify changes.
+ try:
+ select = "SELECT * FROM pg_authid where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError:
+ current_role_attrs = None
+ db_connection.rollback()
+
+ pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted)
+
+ if current_role_attrs is None:
+ try:
+ # AWS RDS instances does not allow user to access pg_authid
+ # so try to get current_role_attrs from pg_roles tables
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes from pg_roles
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError as e:
+ db_connection.rollback()
+ module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e))
+
+ role_attr_flags_changing = False
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if expires is not None:
+ cursor.execute("SELECT %s::timestamptz;", (expires,))
+ expires_with_tz = cursor.fetchone()[0]
+ expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil')
+ else:
+ expires_changing = False
+
+ conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit'])
+
+ if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' % {"user": user}]
+ if pwchanging:
+ if password != '':
+ alter.append("WITH %(crypt)s" % {"crypt": encrypted})
+ alter.append("PASSWORD %(password)s")
+ else:
+ alter.append("WITH PASSWORD NULL")
+ alter.append(role_attr_flags)
+ elif role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+ if expires is not None:
+ alter.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+
+ query_password_data = dict(password=password, expires=expires)
+ try:
+ statement = ' '.join(alter)
+ cursor.execute(statement, query_password_data)
+ changed = True
+ executed_queries.append(statement)
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+ except psycopg2.NotSupportedError as e:
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+
+ elif no_password_changes and role_attr_flags != '':
+ # Grab role information from pg_roles instead of pg_authid
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+
+ role_attr_flags_changing = False
+
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if not role_attr_flags_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' %
+ {"user": user}]
+ if role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+
+ try:
+ statement = ' '.join(alter)
+ cursor.execute(statement)
+ executed_queries.append(statement)
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+
+ # Grab new role attributes.
+ cursor.execute(select, {"user": user})
+ new_role_attrs = cursor.fetchone()
+
+ # Detect any differences between current_ and new_role_attrs.
+ changed = current_role_attrs != new_role_attrs
+
+ return changed
+
+
+def user_delete(cursor, user):
+ """Try to remove a user. Returns True if successful otherwise False"""
+ cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
+ try:
+ query = 'DROP USER "%s"' % user
+ executed_queries.append(query)
+ cursor.execute(query)
+ except Exception:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return False
+
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return True
+
+
+# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+def has_table_privileges(cursor, user, table, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_table_privileges(cursor, user, table)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+def get_table_privileges(cursor, user, table):
+ if '.' in table:
+ schema, table = table.split('.', 1)
+ else:
+ schema = 'public'
+ query = ("SELECT privilege_type FROM information_schema.role_table_grants "
+ "WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s")
+ cursor.execute(query, {'user': user, 'table': table, 'schema': schema})
+ return frozenset([x[0] for x in cursor.fetchall()])
+
+
+# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+def grant_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'GRANT %s ON TABLE %s TO "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+def revoke_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'REVOKE %s ON TABLE %s FROM "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+def get_database_privileges(cursor, user, db):
+ priv_map = {
+ 'C': 'CREATE',
+ 'T': 'TEMPORARY',
+ 'c': 'CONNECT',
+ }
+ query = 'SELECT datacl FROM pg_database WHERE datname = %s'
+ cursor.execute(query, (db,))
+ datacl = cursor.fetchone()[0]
+ if datacl is None:
+ return set()
+ r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
+ if r is None:
+ return set()
+ o = set()
+ for v in r.group(1):
+ o.add(priv_map[v])
+ return normalize_privileges(o, 'database')
+
+
+# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+def has_database_privileges(cursor, user, db, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_database_privileges(cursor, user, db)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+def grant_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'GRANT %s ON DATABASE %s TO "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+def revoke_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'REVOKE %s ON DATABASE %s FROM "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+def revoke_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ revoke_funcs = dict(table=revoke_table_privileges,
+ database=revoke_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested to be removed are
+ # currently granted to the user
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[0]:
+ revoke_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+def grant_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ grant_funcs = dict(table=grant_table_privileges,
+ database=grant_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested for the user are
+ # currently missing
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[2]:
+ grant_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def parse_role_attrs(role_attr_flags, srv_version):
+ """
+ Parse role attributes string for user creation.
+ Format:
+
+ attributes[,attributes,...]
+
+ Where:
+
+ attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
+ [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB",
+ "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION",
+ "[NO]BYPASSRLS" ]
+
+ Note: "[NO]BYPASSRLS" role attribute introduced in 9.5
+ Note: "[NO]CREATEUSER" role attribute is deprecated.
+
+ """
+ flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role)
+
+ valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(srv_version)))
+ valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags)))
+
+ if not flags.issubset(valid_flags):
+ raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
+ ' '.join(flags.difference(valid_flags)))
+
+ return ' '.join(flags)
+
+
+# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+def normalize_privileges(privs, type_):
+ new_privs = set(privs)
+ if 'ALL' in new_privs:
+ new_privs.update(VALID_PRIVS[type_])
+ new_privs.remove('ALL')
+ if 'TEMP' in new_privs:
+ new_privs.add('TEMPORARY')
+ new_privs.remove('TEMP')
+
+ return new_privs
+
+
+# WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+def parse_privs(privs, db):
+ """
+ Parse privilege string to determine permissions for database db.
+ Format:
+
+ privileges[/privileges/...]
+
+ Where:
+
+ privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
+ TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
+ """
+ if privs is None:
+ return privs
+
+ o_privs = {
+ 'database': {},
+ 'table': {}
+ }
+ for token in privs.split('/'):
+ if ':' not in token:
+ type_ = 'database'
+ name = db
+ priv_set = frozenset(x.strip().upper()
+ for x in token.split(',') if x.strip())
+ else:
+ type_ = 'table'
+ name, privileges = token.split(':', 1)
+ priv_set = frozenset(x.strip().upper()
+ for x in privileges.split(',') if x.strip())
+
+ if not priv_set.issubset(VALID_PRIVS[type_]):
+ raise InvalidPrivsError('Invalid privs specified for %s: %s' %
+ (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
+
+ priv_set = normalize_privileges(priv_set, type_)
+ o_privs[type_][name] = priv_set
+
+ return o_privs
+
+
+def get_valid_flags_by_version(srv_version):
+ """
+ Some role attributes were introduced after certain versions. We want to
+ compile a list of valid flags against the current Postgres version.
+ """
+ return [
+ flag
+ for flag, version_introduced in FLAGS_BY_VERSION.items()
+ if srv_version >= version_introduced
+ ]
+
+
+def get_comment(cursor, user):
+ """Get user's comment."""
+ query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') "
+ "FROM pg_catalog.pg_roles r "
+ "WHERE r.rolname = %(user)s")
+ cursor.execute(query, {'user': user})
+ return cursor.fetchone()[0]
+
+
+def add_comment(cursor, user, comment):
+ """Add comment on user."""
+ if comment != get_comment(cursor, user):
+ query = 'COMMENT ON ROLE "%s" IS ' % user
+ cursor.execute(query + '%(comment)s', {'comment': comment})
+ executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment}))
+ return True
+ else:
+ return False
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ user=dict(type='str', required=True, aliases=['name']),
+ password=dict(type='str', default=None, no_log=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ priv=dict(type='str', default=None, removed_in_version='3.0.0', removed_from_collection='community.postgreql'),
+ db=dict(type='str', default='', aliases=['login_db']),
+ fail_on_user=dict(type='bool', default=True, aliases=['fail_on_role']),
+ role_attr_flags=dict(type='str', default=''),
+ encrypted=dict(type='bool', default=True),
+ no_password_changes=dict(type='bool', default=False, no_log=False),
+ expires=dict(type='str', default=None),
+ conn_limit=dict(type='int', default=None),
+ session_role=dict(type='str'),
+ # WARNING: groups are deprecated and will be removed in community.postgresql 3.0.0
+ groups=dict(type='list', elements='str', removed_in_version='3.0.0', removed_from_collection='community.postgreql'),
+ comment=dict(type='str', default=None),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ user = module.params["user"]
+ password = module.params["password"]
+ state = module.params["state"]
+ fail_on_user = module.params["fail_on_user"]
+ # WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+ if module.params['db'] == '' and module.params["priv"] is not None:
+ module.fail_json(msg="privileges require a database to be specified")
+ # WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+ privs = parse_privs(module.params["priv"], module.params["db"])
+ no_password_changes = module.params["no_password_changes"]
+ if module.params["encrypted"]:
+ encrypted = "ENCRYPTED"
+ else:
+ encrypted = "UNENCRYPTED"
+ expires = module.params["expires"]
+ conn_limit = module.params["conn_limit"]
+ role_attr_flags = module.params["role_attr_flags"]
+ # WARNING: groups are deprecated and will be removed in community.postgresql 3.0.0
+ groups = module.params["groups"]
+ if groups:
+ groups = [e.strip() for e in groups]
+ comment = module.params["comment"]
+ session_role = module.params['session_role']
+
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ # WARNING: groups are deprecated and will be removed in community.postgresql 3.0.0
+ check_input(module, user, password, privs, expires,
+ role_attr_flags, groups, comment, session_role)
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection, dummy = connect_to_db(module, conn_params)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ srv_version = get_server_version(db_connection)
+
+ try:
+ role_attr_flags = parse_role_attrs(role_attr_flags, srv_version)
+ except InvalidFlagsError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ kw = dict(user=user)
+ changed = False
+ user_removed = False
+
+ if state == "present":
+ if user_exists(cursor, user):
+ try:
+ changed = user_alter(db_connection, module, user, password,
+ role_attr_flags, encrypted, expires, no_password_changes, conn_limit)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ else:
+ try:
+ changed = user_add(cursor, user, password,
+ role_attr_flags, encrypted, expires, conn_limit)
+ except psycopg2.ProgrammingError as e:
+ module.fail_json(msg="Unable to add user with given requirement "
+ "due to : %s" % to_native(e),
+ exception=traceback.format_exc())
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ # WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+ try:
+ changed = grant_privileges(cursor, user, privs) or changed
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ # WARNING: groups are deprecated and will be removed in community.postgresql 3.0.0
+ if groups:
+ target_roles = []
+ target_roles.append(user)
+ pg_membership = PgMembership(module, cursor, groups, target_roles)
+ changed = pg_membership.grant() or changed
+ executed_queries.extend(pg_membership.executed_queries)
+
+ if comment is not None:
+ try:
+ changed = add_comment(cursor, user, comment) or changed
+ except Exception as e:
+ module.fail_json(msg='Unable to add comment on role: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ else:
+ if user_exists(cursor, user):
+ if module.check_mode:
+ changed = True
+ kw['user_removed'] = True
+ else:
+ # WARNING: privs are deprecated and will be removed in community.postgresql 3.0.0
+ try:
+ changed = revoke_privileges(cursor, user, privs)
+ user_removed = user_delete(cursor, user)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ changed = changed or user_removed
+ if fail_on_user and not user_removed:
+ msg = "Unable to remove user"
+ module.fail_json(msg=msg)
+ kw['user_removed'] = user_removed
+
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ kw['changed'] = changed
+ kw['queries'] = executed_queries
+ if debug_info:
+ kw['debug_info'] = debug_info
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py b/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py
new file mode 100644
index 000000000..f443d50c3
--- /dev/null
+++ b/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py
@@ -0,0 +1,342 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_user_obj_stat_info
+short_description: Gather statistics about PostgreSQL user objects
+description:
+- Gathers statistics about PostgreSQL user objects.
+version_added: '0.2.0'
+options:
+ filter:
+ description:
+ - Limit the collected information by comma separated string or YAML list.
+ - Allowable values are C(functions), C(indexes), C(tables).
+ - By default, collects all subsets.
+ - Unsupported values are ignored.
+ type: list
+ elements: str
+ schema:
+ description:
+ - Restrict the output by certain schema.
+ type: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(false), check the value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(false) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+
+notes:
+- C(size) and C(total_size) returned values are presented in bytes.
+- For tracking function statistics the PostgreSQL C(track_functions) parameter must be enabled.
+ See U(https://www.postgresql.org/docs/current/runtime-config-statistics.html) for more information.
+
+attributes:
+ check_mode:
+ support: full
+
+seealso:
+- module: community.postgresql.postgresql_info
+- module: community.postgresql.postgresql_ping
+- name: PostgreSQL statistics collector reference
+ description: Complete reference of the PostgreSQL statistics collector documentation.
+ link: https://www.postgresql.org/docs/current/monitoring-stats.html
+author:
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Collect information about all supported user objects of the acme database
+ community.postgresql.postgresql_user_obj_stat_info:
+ db: acme
+
+- name: Collect information about all supported user objects in the custom schema of the acme database
+ community.postgresql.postgresql_user_obj_stat_info:
+ db: acme
+ schema: custom
+
+- name: Collect information about user tables and indexes in the acme database
+ community.postgresql.postgresql_user_obj_stat_info:
+ db: acme
+ filter: tables, indexes
+'''
+
+RETURN = r'''
+indexes:
+ description: User index statistics.
+ returned: always
+ type: dict
+ sample: {"public": {"test_id_idx": {"idx_scan": 0, "idx_tup_fetch": 0, "idx_tup_read": 0, "relname": "test", "size": 8192, ...}}}
+tables:
+ description: User table statistics.
+ returned: always
+ type: dict
+ sample: {"public": {"test": {"analyze_count": 3, "n_dead_tup": 0, "n_live_tup": 0, "seq_scan": 2, "size": 0, "total_size": 8192, ...}}}
+functions:
+ description: User function statistics.
+ returned: always
+ type: dict
+ sample: {"public": {"inc": {"calls": 1, "funcid": 26722, "self_time": 0.23, "total_time": 0.23}}}
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ ensure_required_libs,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+class PgUserObjStatInfo():
+ """Class to collect information about PostgreSQL user objects.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ executed_queries (list): List of executed queries.
+ info (dict): Statistics dictionary.
+ obj_func_mapping (dict): Mapping of object types to corresponding functions.
+ schema (str): Name of a schema to restrict stat collecting.
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'functions': {},
+ 'indexes': {},
+ 'tables': {},
+ }
+ self.obj_func_mapping = {
+ 'functions': self.get_func_stat,
+ 'indexes': self.get_idx_stat,
+ 'tables': self.get_tbl_stat,
+ }
+ self.schema = None
+
+ def collect(self, filter_=None, schema=None):
+ """Collect statistics information of user objects.
+
+ Kwargs:
+ filter_ (list): List of subsets which need to be collected.
+ schema (str): Restrict stat collecting by certain schema.
+
+ Returns:
+ ``self.info``.
+ """
+ if schema:
+ self.set_schema(schema)
+
+ if filter_:
+ for obj_type in filter_:
+ obj_type = obj_type.strip()
+ obj_func = self.obj_func_mapping.get(obj_type)
+
+ if obj_func is not None:
+ obj_func()
+ else:
+ self.module.warn("Unknown filter option '%s'" % obj_type)
+
+ else:
+ for obj_func in self.obj_func_mapping.values():
+ obj_func()
+
+ return self.info
+
+ def get_func_stat(self):
+ """Get function statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_functions"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_functions WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='functions',
+ schema_key='schemaname',
+ name_key='funcname')
+
+ def get_idx_stat(self):
+ """Get index statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_indexes"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='indexes',
+ schema_key='schemaname',
+ name_key='indexrelname')
+
+ def get_tbl_stat(self):
+ """Get table statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_tables"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_tables WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='tables',
+ schema_key='schemaname',
+ name_key='relname')
+
+ def __fill_out_info(self, result, info_key=None, schema_key=None, name_key=None):
+ # Convert result to list of dicts to handle it easier:
+ result = [dict(row) for row in result]
+
+ for elem in result:
+ # Add schema name as a key if not presented:
+ if not self.info[info_key].get(elem[schema_key]):
+ self.info[info_key][elem[schema_key]] = {}
+
+ # Add object name key as a subkey
+ # (they must be uniq over a schema, so no need additional checks):
+ self.info[info_key][elem[schema_key]][elem[name_key]] = {}
+
+ # Add other other attributes to a certain index:
+ for key, val in iteritems(elem):
+ if key not in (schema_key, name_key):
+ self.info[info_key][elem[schema_key]][elem[name_key]][key] = val
+
+ if info_key in ('tables', 'indexes'):
+ schemaname = elem[schema_key]
+ if self.schema:
+ schemaname = self.schema
+
+ relname = '%s.%s' % (schemaname, elem[name_key])
+
+ result = exec_sql(self, "SELECT pg_relation_size (%s)",
+ query_params=(relname,),
+ add_to_executed=False)
+
+ self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0]
+
+ if info_key == 'tables':
+ result = exec_sql(self, "SELECT pg_total_relation_size (%s)",
+ query_params=(relname,),
+ add_to_executed=False)
+
+ self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0]
+
+ def set_schema(self, schema):
+ """If schema exists, sets self.schema, otherwise fails."""
+ query = ("SELECT 1 FROM information_schema.schemata "
+ "WHERE schema_name = %s")
+ result = exec_sql(self, query, query_params=(schema,),
+ add_to_executed=False)
+
+ if result and result[0][0]:
+ self.schema = schema
+ else:
+ self.module.fail_json(msg="Schema '%s' does not exist" % (schema))
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ filter=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ schema=dict(type='str'),
+ trust_input=dict(type="bool", default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ filter_ = module.params["filter"]
+ schema = module.params["schema"]
+
+ if not module.params["trust_input"]:
+ check_input(module, module.params['session_role'])
+
+ # Ensure psycopg2 libraries are available before connecting to DB:
+ ensure_required_libs(module)
+ # Connect to DB and make cursor object:
+ pg_conn_params = get_conn_params(module, module.params)
+ # We don't need to commit anything, so, set it to False:
+ db_connection, dummy = connect_to_db(module, pg_conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ############################
+ # Create object and do work:
+ pg_obj_info = PgUserObjStatInfo(module, cursor)
+
+ info_dict = pg_obj_info.collect(filter_, schema)
+
+ # Clean up:
+ cursor.close()
+ db_connection.close()
+
+ # Return information:
+ module.exit_json(**info_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/requirements.txt b/ansible_collections/community/postgresql/requirements.txt
new file mode 100644
index 000000000..37ec460f8
--- /dev/null
+++ b/ansible_collections/community/postgresql/requirements.txt
@@ -0,0 +1 @@
+psycopg2-binary
diff --git a/ansible_collections/community/postgresql/shippable.yml b/ansible_collections/community/postgresql/shippable.yml
new file mode 100644
index 000000000..67136a43c
--- /dev/null
+++ b/ansible_collections/community/postgresql/shippable.yml
@@ -0,0 +1,60 @@
+language: python
+
+env:
+ matrix:
+ - T=none
+
+matrix:
+ exclude:
+ - env: T=none
+ include:
+ - env: T=devel/sanity/1
+ - env: T=devel/sanity/extra
+
+ - env: T=2.10/sanity/1
+
+ - env: T=2.9/sanity/1
+
+ - env: T=devel/units/1
+
+ - env: T=2.10/units/1
+
+ - env: T=2.9/units/1
+
+ - env: T=devel/rhel/7.8/1
+ - env: T=devel/rhel/8.2/1
+ - env: T=devel/freebsd/11.1/1
+ - env: T=devel/freebsd/12.1/1
+ - env: T=devel/linux/centos6/1
+ - env: T=devel/linux/centos7/1
+ #- env: T=devel/linux/centos8/1
+ - env: T=devel/linux/fedora33/1
+ #- env: T=devel/linux/fedora34/1
+ #- env: T=devel/linux/opensuse15py2/1
+ #- env: T=devel/linux/opensuse15/1
+ #- env: T=devel/linux/ubuntu1604/1
+ - env: T=devel/linux/ubuntu1804/1
+ - env: T=devel/linux/ubuntu2004/1
+
+ - env: T=2.10/rhel/8.2/1
+
+ - env: T=2.9/rhel/8.2/1
+
+branches:
+ except:
+ - "*-patch-*"
+ - "revert-*-*"
+ - "patchback/backports/*"
+
+build:
+ ci:
+ - tests/utils/shippable/timing.sh tests/utils/shippable/shippable.sh $T
+
+integrations:
+ notifications:
+ - integrationName: email
+ type: email
+ on_success: never
+ on_failure: never
+ on_start: never
+ on_pull_request: never
diff --git a/ansible_collections/community/postgresql/simplified_bsd.txt b/ansible_collections/community/postgresql/simplified_bsd.txt
new file mode 100644
index 000000000..e34763968
--- /dev/null
+++ b/ansible_collections/community/postgresql/simplified_bsd.txt
@@ -0,0 +1,7 @@
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/tasks/main.yml
new file mode 100644
index 000000000..359c5d3be
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/tasks/main.yml
@@ -0,0 +1,8 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_copy module
+- import_tasks: postgresql_copy_initial.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/tasks/postgresql_copy_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/tasks/postgresql_copy_initial.yml
new file mode 100644
index 000000000..5c51c108e
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/tasks/postgresql_copy_initial.yml
@@ -0,0 +1,278 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# The file for testing postgresql_copy module.
+
+- vars:
+ test_table: acme
+ data_file_txt: /tmp/data.txt
+ data_file_csv: /tmp/data.csv
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: true
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+
+ block:
+ # Test preparation:
+ - name: postgresql_copy - create test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ columns:
+ - id int
+ - name text
+
+ # Insert the data:
+ - name: postgresql_copy - insert rows into test table
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "INSERT INTO {{ test_table }} (id, name) VALUES (1, 'first')"
+
+ - name: postgresql_copy - ensure that test data files don't exist
+ <<: *task_parameters
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - '{{ data_file_csv }}'
+ - '{{ data_file_txt }}'
+
+ # ##############
+ # Do main tests:
+
+ # check_mode - if it's OK, must always return changed=True:
+ - name: postgresql_copy - check_mode, copy test table content to data_file_txt
+ check_mode: true
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ copy_to: '{{ data_file_txt }}'
+ src: '{{ test_table }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+
+ # check that nothing changed after the previous step:
+ - name: postgresql_copy - check that data_file_txt doesn't exist
+ <<: *task_parameters
+ ignore_errors: true
+ shell: head -n 1 '{{ data_file_txt }}'
+
+ - assert:
+ that:
+ - result.failed == true
+ - result.rc == 1
+
+ # check_mode - if it's OK, must always return changed=True:
+ - name: postgresql_copy - check_mode, copy test table content from data_file_txt
+ check_mode: true
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ copy_from: '{{ data_file_txt }}'
+ dst: '{{ test_table }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+
+ # check that nothing changed after the previous step:
+ - name: postgresql_copy - check that test table continue to have one row
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: 'SELECT * FROM {{ test_table }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # check_mode - test must fail because test table doesn't exist:
+ - name: postgresql_copy - check_mode, copy non existent table to data_file_txt
+ check_mode: true
+ ignore_errors: true
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ copy_to: '{{ data_file_txt }}'
+ src: non_existent_table
+ trust_input: false
+
+ - assert:
+ that:
+ - result.failed == true
+ - result.queries is not defined
+
+ - name: postgresql_copy - check trust_input
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ copy_to: '{{ data_file_txt }}'
+ src: '{{ test_table }}'
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ trust_input: false
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+ - name: postgresql_copy - copy test table data to data_file_txt
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ copy_to: '{{ data_file_txt }}'
+ src: '{{ test_table }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COPY \"{{ test_table }}\" TO '{{ data_file_txt }}'"]
+ - result.src == '{{ test_table }}'
+ - result.dst == '{{ data_file_txt }}'
+
+ # check the prev test
+ - name: postgresql_copy - check data_file_txt exists and not empty
+ <<: *task_parameters
+ shell: 'head -n 1 {{ data_file_txt }}'
+
+ - assert:
+ that:
+ - result.stdout == '1\tfirst'
+
+ # test different options and columns
+ - name: postgresql_copy - copy test table data to data_file_csv with options and columns
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ copy_to: '{{ data_file_csv }}'
+ src: '{{ test_table }}'
+ columns:
+ - id
+ - name
+ options:
+ format: csv
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COPY \"{{ test_table }}\" (id,name) TO '{{ data_file_csv }}' (format csv)"]
+ - result.src == '{{ test_table }}'
+ - result.dst == '{{ data_file_csv }}'
+
+ # check the prev test
+ - name: postgresql_copy - check data_file_csv exists and not empty
+ <<: *task_parameters
+ shell: 'head -n 1 {{ data_file_csv }}'
+
+ - assert:
+ that:
+ - result.stdout == '1,first'
+
+ - name: postgresql_copy - copy from data_file_csv to test table
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ copy_from: '{{ data_file_csv }}'
+ dst: '{{ test_table }}'
+ columns:
+ - id
+ - name
+ options:
+ format: csv
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COPY \"{{ test_table }}\" (id,name) FROM '{{ data_file_csv }}' (format csv)"]
+ - result.dst == '{{ test_table }}'
+ - result.src == '{{ data_file_csv }}'
+
+ - name: postgresql_copy - check that there are two rows in test table after the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT * FROM {{ test_table }} WHERE id = '1' AND name = 'first'"
+
+ - assert:
+ that:
+ - result.rowcount == 2
+
+ - name: postgresql_copy - test program option, copy to program
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ src: '{{ test_table }}'
+ copy_to: '/bin/true'
+ program: true
+ columns: id, name
+ options:
+ delimiter: '|'
+ trust_input: false
+ when: ansible_distribution != 'FreeBSD'
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COPY \"{{ test_table }}\" (id, name) TO PROGRAM '/bin/true' (delimiter '|')"]
+ - result.src == '{{ test_table }}'
+ - result.dst == '/bin/true'
+ when: ansible_distribution != 'FreeBSD'
+
+ - name: postgresql_copy - test program option, copy from program
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ dst: '{{ test_table }}'
+ copy_from: 'echo 1,first'
+ program: true
+ columns: id, name
+ options:
+ delimiter: ','
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COPY \"{{ test_table }}\" (id, name) FROM PROGRAM 'echo 1,first' (delimiter ',')"]
+ - result.dst == '{{ test_table }}'
+ - result.src == 'echo 1,first'
+ when: ansible_distribution != 'FreeBSD'
+
+ - name: postgresql_copy - check that there are three rows in test table after the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT * FROM {{ test_table }} WHERE id = '1' AND name = 'first'"
+
+ - assert:
+ that:
+ - result.rowcount == 3
+
+ # clean up
+ - name: postgresql_copy - remove test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ state: absent
+
+ - name: postgresql_copy - remove test data files
+ <<: *task_parameters
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - '{{ data_file_csv }}'
+ - '{{ data_file_txt }}'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/aliases
new file mode 100644
index 000000000..2f88eca08
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/aliases
@@ -0,0 +1,3 @@
+destructive
+shippable/posix/group1
+postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/defaults/main.yml
new file mode 100644
index 000000000..766feeecc
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/defaults/main.yml
@@ -0,0 +1,11 @@
+db_name: 'ansible_db'
+db_user1: 'ansible.db.user1'
+db_user2: 'ansible.db.user2'
+tmp_dir: '/tmp'
+db_session_role1: 'session_role1'
+db_session_role2: 'session_role2'
+
+# To test trust_input parameter and
+# possibility to create a database with dots in its name
+db_name_with_dot: 'db.name'
+suspicious_db_name: '{{ db_name_with_dot }}"; --'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/main.yml
new file mode 100644
index 000000000..dd55c3f98
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/main.yml
@@ -0,0 +1,47 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- import_tasks: postgresql_db_session_role.yml
+
+# Initial tests of postgresql_db module:
+- import_tasks: postgresql_db_initial.yml
+
+# General tests:
+- import_tasks: postgresql_db_general.yml
+
+# Tests for rename value of state option
+- import_tasks: state_rename.yml
+
+# Dump/restore tests per format:
+- include_tasks: state_dump_restore.yml
+ vars:
+ test_fixture: user
+ file: '{{ loop_item }}'
+ loop:
+ - dbdata.sql
+ - dbdata.sql.gz
+ - dbdata.sql.bz2
+ - dbdata.sql.xz
+ - dbdata.tar
+ - dbdata.tar.gz
+ - dbdata.tar.bz2
+ - dbdata.tar.xz
+ - dbdata.pgc
+ - dbdata.dir
+ - dbdata.dir.gz
+ - dbdata.dir.bz2
+ - dbdata.dir.xz
+ loop_control:
+ loop_var: loop_item
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+# Dump/restore tests per other logins:
+- import_tasks: state_dump_restore.yml
+ vars:
+ file: dbdata.tar
+ test_fixture: admin
+
+# Simple test to create and then drop with force
+- import_tasks: manage_database.yml
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/manage_database.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/manage_database.yml
new file mode 100644
index 000000000..42d0f4ee9
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/manage_database.yml
@@ -0,0 +1,9 @@
+- name: Create a simple database mydb
+ postgresql_db:
+ name: mydb
+
+- name: Drop the database with force
+ postgresql_db:
+ name: mydb
+ state: absent
+ force: true
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml
new file mode 100644
index 000000000..6a178bea1
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml
@@ -0,0 +1,152 @@
+- become_user: '{{ pg_user }}'
+ become: true
+ vars:
+ db_tablespace: bar
+ tblspc_location: /ssd
+ db_name: acme
+ block_parameters:
+ become_user: '{{ pg_user }}'
+ become: true
+ task_parameters:
+ register: result
+ pg_parameters:
+ login_user: '{{ pg_user }}'
+ block:
+ - name: postgresql_db - drop dir for test tablespace
+ become: true
+ become_user: root
+ file:
+ path: '{{ tblspc_location }}'
+ state: absent
+ ignore_errors: true
+ - name: postgresql_db - disable selinux
+ become: true
+ become_user: root
+ shell: setenforce 0
+ ignore_errors: true
+ - name: postgresql_db - create dir for test tablespace
+ become: true
+ become_user: root
+ file:
+ path: '{{ tblspc_location }}'
+ state: directory
+ owner: '{{ pg_user }}'
+ group: '{{ pg_user }}'
+ mode: '0700'
+ - name: postgresql_db_ - create a new tablespace
+ postgresql_tablespace:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ name: '{{ db_tablespace }}'
+ location: '{{ tblspc_location }}'
+ - register: result
+ name: postgresql_db_tablespace - Create DB with tablespace option in check mode
+ check_mode: true
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ db_name }}'
+ tablespace: '{{ db_tablespace }}'
+ - assert:
+ that:
+ - result is changed
+ - register: result
+ name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 0 because actually nothing changed
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''{{ db_tablespace }}''
+
+ '
+ - assert:
+ that:
+ - result.rowcount == 0
+ - register: result
+ name: postgresql_db_tablespace - Create DB with tablespace option
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ db_name }}'
+ tablespace: '{{ db_tablespace }}'
+ - assert:
+ that:
+ - result is changed
+ - result.executed_commands == ['CREATE DATABASE "{{ db_name }}" TABLESPACE "{{ db_tablespace }}"']
+ - register: result
+ name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 1
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''{{ db_tablespace }}''
+
+ '
+ - assert:
+ that:
+ - result.rowcount == 1
+ - register: result
+ name: postgresql_db_tablespace - The same DB with tablespace option again
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ db_name }}'
+ tablespace: '{{ db_tablespace }}'
+ - assert:
+ that:
+ - result is not changed
+ - register: result
+ name: postgresql_db_tablespace - Change tablespace in check_mode
+ check_mode: true
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ db_name }}'
+ tablespace: pg_default
+ - assert:
+ that:
+ - result is changed
+ - register: result
+ name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 1 because actually nothing changed
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''{{ db_tablespace }}''
+
+ '
+ - assert:
+ that:
+ - result.rowcount == 1
+ - register: result
+ name: postgresql_db_tablespace - Change tablespace in actual mode
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ db_name }}'
+ tablespace: pg_default
+ - assert:
+ that:
+ - result is changed
+ - register: result
+ name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 1
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''pg_default''
+
+ '
+ - assert:
+ that:
+ - result.rowcount == 1
+ - register: result
+ name: postgresql_db_tablespace - Drop test DB
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ db_name }}'
+ state: absent
+ - register: result
+ name: postgresql_db_tablespace - Remove tablespace
+ postgresql_tablespace:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ name: '{{ db_tablespace }}'
+ state: absent
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml
new file mode 100644
index 000000000..472524a23
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml
@@ -0,0 +1,366 @@
+#
+# Create and destroy db
+#
+- name: Create DB
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: assert that module reports the db was created
+ assert:
+ that:
+ - result is changed
+ - result.db == "{{ db_name }}"
+ - result.executed_commands == ['CREATE DATABASE "{{ db_name }}"']
+
+- name: Check that database created
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+
+- name: Run create on an already created db
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: assert that module reports the db was unchanged
+ assert:
+ that:
+ - result is not changed
+
+- name: Destroy DB
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: absent
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: assert that module reports the db was changed
+ assert:
+ that:
+ - result is changed
+ - result.executed_commands == ['DROP DATABASE "{{ db_name }}"']
+
+- name: Check that database was destroyed
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+- name: Destroy DB
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: absent
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: assert that removing an already removed db makes no change
+ assert:
+ that:
+ - result is not changed
+
+
+# This corner case works to add but not to drop. This is sufficiently crazy
+# that I'm not going to attempt to fix it unless someone lets me know that they
+# need the functionality
+#
+# - postgresql_db:
+# state: 'present'
+# name: '"silly.""name"'
+# - shell: echo "select datname from pg_database where datname = 'silly.""name';" | psql
+# register: result
+#
+# - assert:
+# that: "result.stdout_lines[-1] == '(1 row)'"
+# - postgresql_db:
+# state: absent
+# name: '"silly.""name"'
+# - shell: echo "select datname from pg_database where datname = 'silly.""name';" | psql
+# register: result
+#
+# - assert:
+# that: "result.stdout_lines[-1] == '(0 rows)'"
+
+#
+# Test conn_limit, encoding, collate, ctype, template options
+#
+- name: Create a DB with conn_limit, encoding, collate, ctype, and template options
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ name: '{{ db_name }}'
+ state: 'present'
+ conn_limit: '100'
+ encoding: 'LATIN1'
+ lc_collate: 'pt_BR{{ locale_latin_suffix }}'
+ lc_ctype: 'es_ES{{ locale_latin_suffix }}'
+ template: 'template0'
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.executed_commands == ["CREATE DATABASE \"{{ db_name }}\" TEMPLATE \"template0\" ENCODING 'LATIN1' LC_COLLATE 'pt_BR{{ locale_latin_suffix }}' LC_CTYPE 'es_ES{{ locale_latin_suffix }}' CONNECTION LIMIT 100"] or result.executed_commands == ["CREATE DATABASE \"{{ db_name }}\" TEMPLATE \"template0\" ENCODING E'LATIN1' LC_COLLATE E'pt_BR{{ locale_latin_suffix }}' LC_CTYPE E'es_ES{{ locale_latin_suffix }}' CONNECTION LIMIT 100"]
+
+- name: Check that the DB has all of our options
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select datname, datconnlimit, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'LATIN1' in result.stdout_lines[-2]"
+ - "'pt_BR' in result.stdout_lines[-2]"
+ - "'es_ES' in result.stdout_lines[-2]"
+ - "'UTF8' not in result.stdout_lines[-2]"
+ - "'en_US' not in result.stdout_lines[-2]"
+ - "'100' in result.stdout_lines[-2]"
+
+- name: Check that running db creation with options a second time does nothing
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ name: '{{ db_name }}'
+ state: 'present'
+ conn_limit: '100'
+ encoding: 'LATIN1'
+ lc_collate: 'pt_BR{{ locale_latin_suffix }}'
+ lc_ctype: 'es_ES{{ locale_latin_suffix }}'
+ template: 'template0'
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+
+- name: Check that attempting to change encoding returns an error
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ name: '{{ db_name }}'
+ state: 'present'
+ encoding: 'UTF8'
+ lc_collate: 'pt_BR{{ locale_utf8_suffix }}'
+ lc_ctype: 'es_ES{{ locale_utf8_suffix }}'
+ template: 'template0'
+ login_user: "{{ pg_user }}"
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is failed
+
+- name: Check that changing the conn_limit actually works
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ name: '{{ db_name }}'
+ state: 'present'
+ conn_limit: '200'
+ encoding: 'LATIN1'
+ lc_collate: 'pt_BR{{ locale_latin_suffix }}'
+ lc_ctype: 'es_ES{{ locale_latin_suffix }}'
+ template: 'template0'
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.executed_commands == ['ALTER DATABASE "{{ db_name }}" CONNECTION LIMIT 200']
+
+- name: Check that conn_limit has actually been set / updated to 200
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "SELECT datconnlimit AS conn_limit FROM pg_database WHERE datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'200' == '{{ result.stdout_lines[-2] | trim }}'"
+
+- name: Cleanup test DB
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ name: '{{ db_name }}'
+ state: 'absent'
+ login_user: "{{ pg_user }}"
+
+- shell: echo "select datname, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ become_user: "{{ pg_user }}"
+ become: true
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+#
+# Test db ownership
+#
+- name: Create an unprivileged user to own a DB
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ name: "{{ item }}"
+ encrypted: 'true'
+ password: "md55c8ccfd9d6711fc69a7eae647fc54f51"
+ login_user: "{{ pg_user }}"
+ db: postgres
+ loop:
+ - "{{ db_user1 }}"
+ - "{{ db_user2 }}"
+
+- name: Create db with user ownership
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "present"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.executed_commands == ['CREATE DATABASE "{{ db_name }}" OWNER "{{ db_user1 }}"']
+
+- name: Check that the user owns the newly created DB
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: >
+ SELECT 1 FROM pg_catalog.pg_database
+ WHERE datname = '{{ db_name }}'
+ AND pg_catalog.pg_get_userbyid(datdba) = '{{ db_user1 }}'
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: Change the owner on an existing db, username with dots
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "present"
+ owner: "{{ db_user2 }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.executed_commands == ['ALTER DATABASE "{{ db_name }}" OWNER TO "{{ db_user2 }}"']
+
+- name: Check the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: >
+ SELECT 1 FROM pg_catalog.pg_database
+ WHERE datname = '{{ db_name }}'
+ AND pg_catalog.pg_get_userbyid(datdba) = '{{ db_user2 }}'
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: Change the owner on an existing db
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "present"
+ owner: "{{ pg_user }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: assert that ansible says it changed the db
+ assert:
+ that:
+ - result is changed
+
+- name: Check that the user owns the newly created DB
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select pg_catalog.pg_get_userbyid(datdba) from pg_catalog.pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'{{ pg_user }}' == '{{ result.stdout_lines[-2] | trim }}'"
+
+- name: Cleanup db
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "absent"
+ login_user: "{{ pg_user }}"
+
+- name: Check that database was destroyed
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+- name: Cleanup test user
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ state: 'absent'
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- name: Check that they were removed
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_session_role.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_session_role.yml
new file mode 100644
index 000000000..74f9e3ff3
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_session_role.yml
@@ -0,0 +1,80 @@
+- name: Check that becoming an non-existing user throws an error
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: present
+ name: must_fail
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is failed
+
+- name: Create a high privileged user
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_session_role1 }}"
+ state: "present"
+ password: "password"
+ role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- name: Create a low privileged user using the newly created user
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_session_role2 }}"
+ state: "present"
+ password: "password"
+ role_attr_flags: "LOGIN"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ db: postgres
+
+- name: Create DB as session_role
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: present
+ name: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ register: result
+
+- name: Check that database created and is owned by correct user
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select rolname from pg_database join pg_roles on datdba = pg_roles.oid where datname = '{{ db_session_role1 }}';" | psql -AtXq postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '{{ db_session_role1 }}'"
+
+- name: Fail when creating database as low privileged user
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: present
+ name: "{{ db_session_role2 }}"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role2 }}"
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is failed
+
+- name: Drop test db
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: absent
+ name: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/state_dump_restore.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/state_dump_restore.yml
new file mode 100644
index 000000000..dbb3bf7a1
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/state_dump_restore.yml
@@ -0,0 +1,235 @@
+# test code for state dump and restore for postgresql_db module
+# copied from mysql_db/tasks/state_dump_import.yml
+# (c) 2014, Wayne Rosario <wrosario@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# ============================================================
+
+- name: Create a test user
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ state: "present"
+ encrypted: 'true'
+ password: "password"
+ role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- set_fact: db_file_name="{{tmp_dir}}/{{file}}"
+
+- set_fact:
+ admin_str: "psql -U {{ pg_user }}"
+
+- set_fact:
+ user_str: "env PGPASSWORD=password psql -h localhost -U {{ db_user1 }} {{ db_name }}"
+ when: test_fixture == "user"
+ # "-n public" is required to work around pg_restore issues with plpgsql
+
+- set_fact:
+ user_str: "psql -U {{ pg_user }} {{ db_name }}"
+ when: test_fixture == "admin"
+
+
+
+- set_fact:
+ sql_create: "create table employee(id int, name varchar(100));"
+ sql_insert: "insert into employee values (47,'Joe Smith');"
+ sql_select: "select * from employee;"
+
+- name: state dump/restore - create database
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+- name: state dump/restore - create table employee
+ command: '{{ user_str }} -c "{{ sql_create }}"'
+
+- name: state dump/restore - insert data into table employee
+ command: '{{ user_str }} -c "{{ sql_insert }}"'
+
+- name: state dump/restore - file name should not exist
+ file: name={{ db_file_name }} state=absent
+
+- name: test state=dump to backup the database (expect changed=true)
+ postgresql_db:
+ name: "{{ db_name }}"
+ target: "{{ db_file_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
+ target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
+ login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
+ login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
+ state: dump
+ dump_extra_args: --exclude-table=fake
+ register: result
+ become_user: "{{ pg_user }}"
+ become: true
+
+- name: assert output message backup the database
+ assert:
+ that:
+ - result is changed
+ - result.executed_commands[0] is search("--exclude-table=fake")
+
+- name: assert database was backed up successfully
+ command: file {{ db_file_name }}
+ register: result
+
+- name: state dump/restore - remove database for restore
+ postgresql_db:
+ name: "{{ db_name }}"
+ target: "{{ db_file_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
+ target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
+ login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
+ login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
+ state: absent
+
+- name: state dump/restore - re-create database
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+- name: test state=restore to restore the database (expect changed=true)
+ postgresql_db:
+ name: "{{ db_name }}"
+ target: "{{ db_file_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
+ target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
+ login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
+ login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
+ state: restore
+ register: result
+ become_user: "{{ pg_user }}"
+ become: true
+
+- name: assert output message restore the database
+ assert:
+ that:
+ - result is changed
+
+- name: select data from table employee
+ command: '{{ user_str }} -c "{{ sql_select }}"'
+ register: result
+
+- name: assert data in database is from the restore database
+ assert:
+ that:
+ - "'47' in result.stdout"
+ - "'Joe Smith' in result.stdout"
+
+############################
+# 1. Test trust_input parameter
+# 2. Test db name containing dots
+
+- name: state dump/restore - create database, trust_input no
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_db:
+ state: present
+ name: "{{ suspicious_db_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Passed input \'{{ suspicious_db_name }}\' is potentially dangerous'
+
+- name: state dump/restore - create database, trust_input true explicitly
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_db:
+ state: present
+ name: "{{ db_name_with_dot }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+ trust_input: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: test state=restore to restore the database (expect changed=true)
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_db:
+ name: "{{ db_name_with_dot }}"
+ target: "{{ db_file_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
+ target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
+ login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
+ login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
+ state: restore
+ register: result
+
+- name: assert output message restore the database
+ assert:
+ that:
+ - result is changed
+
+- name: state dump/restore - remove databases
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_db:
+ state: absent
+ name: "{{ db_name_with_dot }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+ trust_input: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+# Clean up
+- name: state dump/restore - remove database name
+ postgresql_db:
+ name: "{{ db_name }}"
+ target: "{{ db_file_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
+ target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
+ login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
+ login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
+ state: absent
+
+- name: remove file name
+ file: name={{ db_file_name }} state=absent
+
+- name: Remove the test user
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ state: "absent"
+ login_user: "{{ pg_user }}"
+ db: postgres
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/state_rename.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/state_rename.yml
new file mode 100644
index 000000000..dc87b76fb
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/state_rename.yml
@@ -0,0 +1,261 @@
+# 0. Check necessary options.
+# 1. When both databases do not exists, it must fail.
+# 2. When both databases exist, it must fail.
+# 3. When the source database exists and the target does not, rename it.
+# 4. When the source database doesn't exist and the target does, do nothing.
+# 5. Check mode
+
+- become_user: '{{ pg_user }}'
+ become: true
+ vars:
+ db_source_name: acme
+ db_target_name: acme1
+
+ task_parameters: &task_parameters
+ register: result
+
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+
+ block:
+ # 0. Check necessary options.
+ - name: Miss target option, must fail
+ <<: *task_parameters
+ postgresql_db:
+ <<: *pg_parameters
+ name: '{{ db_source_name }}'
+ state: rename
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == 'The "target" option must be defined when the "rename" option is used.'
+
+ - name: Target and name options are the same, must fail
+ <<: *task_parameters
+ postgresql_db:
+ <<: *pg_parameters
+ name: '{{ db_source_name }}'
+ state: rename
+ target: '{{ db_source_name }}'
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == 'The "name/db" option and the "target" option cannot be the same.'
+
+ - name: Maintenance_db and name options are the same, must fail
+ <<: *task_parameters
+ postgresql_db:
+ <<: *pg_parameters
+ name: postgres
+ state: rename
+ target: '{{ db_source_name }}'
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == 'The "maintenance_db" option and the "name/db" option cannot be the same.'
+
+ # 1. When both databases do not exists, it must fail.
+ - name: Try to rename when both do not exist, must fail
+ <<: *task_parameters
+ postgresql_db:
+ <<: *pg_parameters
+ name: '{{ db_source_name }}'
+ state: rename
+ target: '{{ db_target_name}}'
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == 'The source and the target databases do not exist.'
+
+ - name: Try to rename when both do not exist, must fail, check_mode
+ <<: *task_parameters
+ postgresql_db:
+ <<: *pg_parameters
+ name: '{{ db_source_name }}'
+ state: rename
+ target: '{{ db_target_name}}'
+ ignore_errors: true
+ check_mode: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == 'The source and the target databases do not exist.'
+
+ # 2. When both databases exist, it must fail.
+ - name: Create test DBs
+ <<: *task_parameters
+ postgresql_db:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ state: present
+ loop:
+ - '{{ db_source_name }}'
+ - '{{ db_target_name }}'
+
+ - name: Try to rename when both exist, must fail
+ <<: *task_parameters
+ postgresql_db:
+ <<: *pg_parameters
+ name: '{{ db_source_name }}'
+ state: rename
+ target: '{{ db_target_name}}'
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == 'Both the source and the target databases exist.'
+
+ - name: Try to rename when both exist, must fail
+ <<: *task_parameters
+ postgresql_db:
+ <<: *pg_parameters
+ name: '{{ db_source_name }}'
+ state: rename
+ target: '{{ db_target_name}}'
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == 'Both the source and the target databases exist.'
+
+ # 3. When the source database exists and the target does not, rename it.
+ # 4. When the source database doesn't exist and the target does, do nothing.
+ - name: Drop the target DB
+ <<: *task_parameters
+ postgresql_db:
+ <<: *pg_parameters
+ name: '{{ db_target_name }}'
+ state: absent
+
+ - name: Rename DB in check mode
+ <<: *task_parameters
+ postgresql_db:
+ <<: *pg_parameters
+ name: '{{ db_source_name }}'
+ state: rename
+ target: '{{ db_target_name }}'
+ check_mode: true
+
+ - assert:
+ that:
+ - result is succeeded
+ - result.executed_commands == []
+
+ - name: Check that nothing really happened
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT * FROM pg_database WHERE datname = '{{ db_source_name }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Check that nothing really happened
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT * FROM pg_database WHERE datname = '{{ db_target_name }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Rename DB in actual mode
+ <<: *task_parameters
+ postgresql_db:
+ <<: *pg_parameters
+ name: '{{ db_source_name }}'
+ state: rename
+ target: '{{ db_target_name}}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.executed_commands == ['ALTER DATABASE "{{ db_source_name }}" RENAME TO "{{ db_target_name}}"']
+
+ - name: Check the changes have been made
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT * FROM pg_database WHERE datname = '{{ db_source_name }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Check the changes have been made
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT * FROM pg_database WHERE datname = '{{ db_target_name }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Try to rename same DBs again in check mode
+ <<: *task_parameters
+ postgresql_db:
+ <<: *pg_parameters
+ name: '{{ db_source_name }}'
+ state: rename
+ target: '{{ db_target_name}}'
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+ - result.executed_commands == []
+
+ - name: Try to rename same DBs again in actual mode
+ <<: *task_parameters
+ postgresql_db:
+ <<: *pg_parameters
+ name: '{{ db_source_name }}'
+ state: rename
+ target: '{{ db_target_name}}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.executed_commands == []
+
+ - name: Check the state is the same
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT * FROM pg_database WHERE datname = '{{ db_source_name }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Check the state is the same
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT * FROM pg_database WHERE datname = '{{ db_target_name }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Clean up
+ - name: Remove test DB
+ <<: *task_parameters
+ postgresql_db:
+ <<: *pg_parameters
+ name: '{{ db_target_name }}'
+ state: absent
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/aliases
new file mode 100644
index 000000000..142e8aa07
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/aliases
@@ -0,0 +1,3 @@
+destructive
+shippable/posix/group1
+skip/freebsd
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/defaults/main.yml
new file mode 100644
index 000000000..05bac61d4
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/defaults/main.yml
@@ -0,0 +1,2 @@
+db_session_role1: 'session_role1'
+db_session_role2: 'session_role2'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/meta/main.yml
new file mode 100644
index 000000000..0ec7d2fcc
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_pkg_mgr
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/main.yml
new file mode 100644
index 000000000..1fa365be3
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/main.yml
@@ -0,0 +1,26 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- import_tasks: postgresql_ext_session_role.yml
+
+# Initial CI tests of postgresql_ext module.
+# pg_extension system view is available from PG 9.1.
+# The tests are restricted by Fedora because there will be errors related with
+# attempts to change the environment during postgis installation or
+# missing postgis package in repositories.
+# Anyway, these tests completely depend on Postgres version,
+# not specific distributions.
+- import_tasks: postgresql_ext_initial.yml
+ when:
+ - postgres_version_resp.stdout is version('9.1', '>=')
+ - ansible_distribution == 'Fedora'
+
+# CI tests of "version" option.
+# It uses a mock extension, see test/integration/targets/setup_postgresql_db/.
+# TODO: change postgresql_ext_initial.yml to use the mock extension too.
+- import_tasks: postgresql_ext_version_opt.yml
+ when:
+ - ansible_distribution == 'Ubuntu'
+ - postgres_version_resp.stdout is version('9.1', '>=')
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml
new file mode 100644
index 000000000..3e3eeda83
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml
@@ -0,0 +1,208 @@
+---
+- name: postgresql_ext - install postgis on Linux
+ package: name=postgis state=present
+ when: ansible_os_family != "Windows"
+
+- name: postgresql_ext - create schema schema1
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_schema:
+ database: postgres
+ name: schema1
+ state: present
+
+- name: postgresql_ext - drop extension if exists
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ query: DROP EXTENSION IF EXISTS postgis
+ ignore_errors: true
+
+- name: postgresql_ext - create extension postgis in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_ext:
+ login_db: postgres
+ login_port: 5432
+ name: postgis
+ check_mode: true
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == []
+
+- name: postgresql_ext - check that extension doesn't exist after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ query: SELECT extname FROM pg_extension WHERE extname='postgis'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_ext - create extension postgis
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_ext:
+ login_db: postgres
+ login_port: 5432
+ name: postgis
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE EXTENSION "postgis"']
+
+- name: postgresql_ext - check that extension exists after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ query: SELECT extname FROM pg_extension WHERE extname='postgis'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_ext - drop extension postgis
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_ext:
+ db: postgres
+ name: postgis
+ state: absent
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP EXTENSION "postgis"']
+
+- name: postgresql_ext - check that extension doesn't exist after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ query: SELECT extname FROM pg_extension WHERE extname='postgis'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_ext - create extension postgis
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_ext:
+ db: postgres
+ name: postgis
+ schema: schema1
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE EXTENSION "postgis" WITH SCHEMA "schema1"']
+
+- name: postgresql_ext - check that extension exists after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ query: "SELECT extname FROM pg_extension AS e LEFT JOIN pg_catalog.pg_namespace AS n \nON n.oid = e.extnamespace WHERE e.extname='postgis' AND n.nspname='schema1'\n"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_ext - drop extension postgis cascade
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_ext:
+ db: postgres
+ name: postgis
+ state: absent
+ cascade: true
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP EXTENSION "postgis" CASCADE']
+
+- name: postgresql_ext - check that extension doesn't exist after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ query: SELECT extname FROM pg_extension WHERE extname='postgis'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_ext - create extension postgis cascade
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_ext:
+ db: postgres
+ name: postgis
+ cascade: true
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '<=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE EXTENSION "postgis" CASCADE"']
+ when: postgres_version_resp.stdout is version('9.6', '<=')
+
+- name: postgresql_ext - check that extension exists after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ query: SELECT extname FROM pg_extension WHERE extname='postgis'
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '<=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.6', '<=')
+
+- name: postgresql_ext - check that using a dangerous name fails
+ postgresql_ext:
+ db: postgres
+ name: postgis
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ trust_input: false
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_session_role.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_session_role.yml
new file mode 100644
index 000000000..29173fd0b
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_session_role.yml
@@ -0,0 +1,114 @@
+- name: Create a high privileged user
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_session_role1 }}"
+ state: "present"
+ password: "password"
+ role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- name: Create DB as session_role
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: present
+ name: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ register: result
+
+- name: Check that pg_extension exists (PostgreSQL >= 9.1)
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select count(*) from pg_class where relname='pg_extension' and relkind='r'" | psql -AtXq postgres
+ register: pg_extension
+
+- name: Remove plpgsql from testdb using postgresql_ext
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_ext:
+ name: plpgsql
+ db: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ state: absent
+ when:
+ "pg_extension.stdout_lines[-1] == '1'"
+
+- name: Fail when trying to create an extension as a mere mortal user
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_ext:
+ name: plpgsql
+ db: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role2 }}"
+ ignore_errors: true
+ register: result
+ when:
+ "pg_extension.stdout_lines[-1] == '1'"
+
+- assert:
+ that:
+ - result is failed
+ when:
+ "pg_extension.stdout_lines[-1] == '1'"
+
+- name: Install extension as session_role
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_ext:
+ name: plpgsql
+ db: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ trust_input: false
+ when:
+ "pg_extension.stdout_lines[-1] == '1'"
+
+- name: Check that extension is created and is owned by session_role
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select rolname from pg_extension join pg_roles on extowner=pg_roles.oid where extname='plpgsql';" | psql -AtXq "{{ db_session_role1 }}"
+ register: result
+ when:
+ "pg_extension.stdout_lines[-1] == '1'"
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '{{ db_session_role1 }}'"
+ when:
+ "pg_extension.stdout_lines[-1] == '1'"
+
+- name: Remove plpgsql from testdb using postgresql_ext
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_ext:
+ name: plpgsql
+ db: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ state: absent
+ trust_input: false
+ when:
+ "pg_extension.stdout_lines[-1] == '1'"
+
+- name: Drop test db
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: absent
+ name: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+
+- name: Drop test users
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ item }}"
+ state: absent
+ login_user: "{{ pg_user }}"
+ db: postgres
+ with_items:
+ - "{{ db_session_role1 }}"
+ - "{{ db_session_role2 }}"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml
new file mode 100644
index 000000000..2443fe785
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml
@@ -0,0 +1,554 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# Tests for postgresql_ext version option
+
+- vars:
+ test_ext: dummy
+ test_schema: schema1
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: true
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+
+ block:
+ # Preparation:
+ - name: postgresql_ext_version - create schema schema1
+ <<: *task_parameters
+ postgresql_schema:
+ <<: *pg_parameters
+ name: "{{ test_schema }}"
+
+ # Do tests:
+ - name: postgresql_ext_version - create extension of specific version, check mode
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: '1.0'
+ trust_input: false
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: postgresql_ext_version - check that nothing was actually changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: postgresql_ext_version - create extension of specific version
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: '1.0'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["CREATE EXTENSION \"{{ test_ext }}\" WITH SCHEMA \"{{ test_schema }}\" VERSION '1.0'"]
+
+ - name: postgresql_ext_version - check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - try to create extension of the same version again in check_mode
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: '1.0'
+ trust_input: false
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: postgresql_ext_version - check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - try to create extension of the same version again in actual mode
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: '1.0'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: postgresql_ext_version - check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - update the extension to the next version in check_mode
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: '2.0'
+ trust_input: false
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: postgresql_ext_version - check, the version must be 1.0
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - update the extension to the next version
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: '2.0'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["ALTER EXTENSION \"{{ test_ext }}\" UPDATE TO '2.0'"]
+
+ - name: postgresql_ext_version - check, the version must be 2.0
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '2.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - check that version won't be changed if version won't be passed
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ trust_input: false
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: postgresql_ext_version - check, the version must be 2.0
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '2.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - update the extension to the latest version
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: latest
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["ALTER EXTENSION \"{{ test_ext }}\" UPDATE"]
+
+ - name: postgresql_ext_version - check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '4.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - try to update the extension to the latest version again which always runs an update.
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: latest
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: postgresql_ext_version - check that version number did not change even though update ran
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '4.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - try to downgrade the extension version, must fail
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: '1.0'
+ trust_input: false
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result.failed == true
+
+ - name: postgresql_ext_version - drop the extension in check_mode
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ state: absent
+ trust_input: false
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: postgresql_ext_version - check that extension exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '4.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - drop the extension in actual mode
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ state: absent
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: postgresql_ext_version - check that extension doesn't exist after the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: postgresql_ext_version - try to drop the non-existent extension again
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ state: absent
+ trust_input: false
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: postgresql_ext_version - create the extension without passing version
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["CREATE EXTENSION \"{{ test_ext }}\""]
+
+ - name: postgresql_ext_version - check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '4.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - try to install non-existent extension
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: non_existent
+ trust_input: false
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result.failed == true
+ - result.msg == "Extension non_existent is not available"
+
+ ######################################################################
+ # https://github.com/ansible-collections/community.general/issues/1095
+ - name: Install postgis
+ package:
+ name: '{{ postgis }}'
+
+ - name: Create postgis extension
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: postgis
+ version: latest
+
+ - assert:
+ that:
+ - result is changed
+
+ # https://github.com/ansible-collections/community.postgresql/issues/137
+ - name: Drop extension
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: '{{ test_ext }}'
+ schema: '{{ test_schema }}'
+ state: absent
+
+ - name: Non standard version
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: '{{ test_ext }}'
+ schema: '{{ test_schema }}'
+ version: 0
+
+ - name: Test
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+
+ - assert:
+ that:
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['major'] == 0
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['minor'] == None
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['raw'] == '0'
+
+ - name: Upgrade extension to a version that have a sub minor version 3.0-1
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: '{{ test_ext }}'
+ schema: '{{ test_schema }}'
+ version: '3.0-1'
+
+ - name: Test
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+
+ - assert:
+ that:
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['major'] == 3
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['minor'] == 0
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['raw'] == '3.0-1'
+
+ - name: Upgrade extension to version 3.0-foo
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: '{{ test_ext }}'
+ schema: '{{ test_schema }}'
+ version: '3.0-foo'
+
+ - name: Test
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+
+ - assert:
+ that:
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['major'] == None
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['minor'] == None
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['raw'] == '3.0-foo'
+
+ - name: Upgrade extension to version 3.beta
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: '{{ test_ext }}'
+ schema: '{{ test_schema }}'
+ version: '3.beta'
+
+ - name: Test
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+
+ - assert:
+ that:
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['major'] == None
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['minor'] == None
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['raw'] == '3.beta'
+
+ - name: Upgrade extension to version 3-1.0
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: '{{ test_ext }}'
+ schema: '{{ test_schema }}'
+ version: '3-1.0'
+
+ - name: Test
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+
+ - assert:
+ that:
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['major'] == 3
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['minor'] == 1
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['raw'] == '3-1.0'
+
+ - name: Upgrade extension to version 3-1.0-1
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: '{{ test_ext }}'
+ schema: '{{ test_schema }}'
+ version: '3-1.0-1'
+
+ - name: Test
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+
+ - assert:
+ that:
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['major'] == 3
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['minor'] == 1
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['raw'] == '3-1.0-1'
+
+ - name: Upgrade extension to version 3-1.foo
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: '{{ test_ext }}'
+ schema: '{{ test_schema }}'
+ version: '3-1.foo'
+
+ - name: Test
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+
+ - assert:
+ that:
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['major'] == None
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['minor'] == None
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['raw'] == '3-1.foo'
+
+ - name: Upgrade extension to version v4
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: '{{ test_ext }}'
+ schema: '{{ test_schema }}'
+ version: 'v4'
+
+ - name: Test
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+
+ - assert:
+ that:
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['major'] == None
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['minor'] == None
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['raw'] == 'v4'
+
+ - name: Upgrade extension to the latest
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: '{{ test_ext }}'
+ schema: '{{ test_schema }}'
+ version: latest
+
+ - name: Test
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+
+ - assert:
+ that:
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['major'] == 4
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['minor'] == 0
+ - result['databases']['postgres']['extensions']['dummy']['extversion']['raw'] == '4.0'
+
+ # Cleanup:
+ - name: postgresql_ext_version - drop the extension
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ state: absent
+ trust_input: false
+
+ - name: postgresql_ext_version - drop the schema
+ <<: *task_parameters
+ postgresql_schema:
+ <<: *pg_parameters
+ name: "{{ test_schema }}"
+ state: absent
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/tasks/main.yml
new file mode 100644
index 000000000..2f5945616
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/tasks/main.yml
@@ -0,0 +1,7 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_idx module
+- import_tasks: postgresql_idx_initial.yml
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/tasks/postgresql_idx_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/tasks/postgresql_idx_initial.yml
new file mode 100644
index 000000000..3c2dc3fb6
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/tasks/postgresql_idx_initial.yml
@@ -0,0 +1,377 @@
+- name: postgresql_idx - create test table called test_table
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE test_table (id int, story text);"
+ ignore_errors: true
+
+- name: postgresql_idx - drop test tablespace called ssd if exists
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "DROP TABLESPACE IF EXISTS ssd;"
+ ignore_errors: true
+
+- name: postgresql_idx - drop dir for test tablespace
+ become: true
+ file:
+ path: /mnt/ssd
+ state: absent
+ ignore_errors: true
+
+- name: postgresql_idx - create dir for test tablespace
+ become: true
+ file:
+ path: /mnt/ssd
+ state: directory
+ owner: '{{ pg_user }}'
+ mode: '0755'
+ ignore_errors: true
+
+- name: postgresql_idx - create test tablespace called ssd
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLESPACE ssd LOCATION '/mnt/ssd';"
+ ignore_errors: true
+ register: tablespace
+
+- name: postgresql_idx - create test schema
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE SCHEMA foo;"
+ ignore_errors: true
+
+- name: postgresql_idx - create table in non-default schema
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE foo.foo_table (id int, story text);"
+ ignore_errors: true
+
+- name: postgresql_idx - create btree index in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ table: test_table
+ columns: id, story
+ idxname: Test0_idx
+ check_mode: true
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.tblname == ''
+ - result.name == 'Test0_idx'
+ - result.state == 'absent'
+ - result.valid != ''
+ - result.tblspace == ''
+ - result.storage_params == []
+ - result.schema == ''
+ - result.query == ''
+
+- name: postgresql_idx - check nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_indexes WHERE indexname = 'Test0_idx'
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_idx - create btree index concurrently
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ table: test_table
+ columns: id, story
+ idxname: Test0_idx
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.tblname == 'test_table'
+ - result.name == 'Test0_idx'
+ - result.state == 'present'
+ - result.valid != ''
+ - result.tblspace == ''
+ - result.storage_params == []
+ - result.schema == 'public'
+ - result.query == 'CREATE INDEX CONCURRENTLY "Test0_idx" ON "public"."test_table" USING BTREE (id, story)'
+
+- name: postgresql_idx - check the index exists after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_indexes WHERE indexname = 'Test0_idx'
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_idx - try to create existing index again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ table: test_table
+ columns: id, story
+ idxname: Test0_idx
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.tblname == 'test_table'
+ - result.name == 'Test0_idx'
+ - result.state == 'present'
+ - result.valid != ''
+ - result.tblspace == ''
+ - result.storage_params == []
+ - result.schema == 'public'
+ - result.query == ''
+
+- name: postgresql_idx - create btree index - non-default schema, tablespace, storage parameter
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ schema: foo
+ table: foo_table
+ columns:
+ - id
+ - story
+ idxname: foo_test_idx
+ tablespace: ssd
+ storage_params: fillfactor=90
+ trust_input: false
+ register: result
+ ignore_errors: true
+ when: tablespace.rc == 0
+
+- assert:
+ that:
+ - result is changed
+ - result.tblname == 'foo_table'
+ - result.name == 'foo_test_idx'
+ - result.state == 'present'
+ - result.valid != ''
+ - result.tblspace == 'ssd'
+ - result.storage_params == [ "fillfactor=90" ]
+ - result.schema == 'foo'
+ - result.query == 'CREATE INDEX CONCURRENTLY "foo_test_idx" ON "foo"."foo_table" USING BTREE (id,story) WITH (fillfactor=90) TABLESPACE "ssd"'
+ when: tablespace.rc == 0
+
+- name: postgresql_idx - create brin index not concurrently
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ schema: public
+ table: test_table
+ state: present
+ type: brin
+ columns: id
+ idxname: test_brin_idx
+ concurrent: false
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.tblname == 'test_table'
+ - result.name == 'test_brin_idx'
+ - result.state == 'present'
+ - result.valid != ''
+ - result.tblspace == ''
+ - result.storage_params == []
+ - result.schema == 'public'
+ - result.query == 'CREATE INDEX "test_brin_idx" ON "public"."test_table" USING brin (id)'
+ when: postgres_version_resp.stdout is version('9.5', '>=')
+
+- name: postgresql_idx - create index with condition
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ table: test_table
+ columns: id
+ idxname: test1_idx
+ cond: id > 1 AND id != 10
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.tblname == 'test_table'
+ - result.name == 'test1_idx'
+ - result.state == 'present'
+ - result.valid != ''
+ - result.tblspace == ''
+ - result.storage_params == []
+ - result.schema == 'public'
+ - result.query == 'CREATE INDEX CONCURRENTLY "test1_idx" ON "public"."test_table" USING BTREE (id) WHERE id > 1 AND id != 10'
+
+- name: postgresql_idx - create unique index
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ table: test_table
+ columns: story
+ idxname: test_unique0_idx
+ unique: true
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.tblname == 'test_table'
+ - result.name == 'test_unique0_idx'
+ - result.state == 'present'
+ - result.valid != ''
+ - result.tblspace == ''
+ - result.storage_params == []
+ - result.schema == 'public'
+ - result.query == 'CREATE UNIQUE INDEX CONCURRENTLY "test_unique0_idx" ON "public"."test_table" USING BTREE (story)'
+
+- name: postgresql_idx - avoid unique index with type different of btree
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ table: test_table
+ columns: story
+ idxname: test_unique0_idx
+ unique: true
+ concurrent: false
+ type: brin
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.msg == 'Only btree currently supports unique indexes'
+
+- name: postgresql_idx - drop index from specific schema cascade in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ schema: foo
+ name: foo_test_idx
+ cascade: true
+ state: absent
+ concurrent: false
+ trust_input: true
+ check_mode: true
+ register: result
+ ignore_errors: true
+ when: tablespace.rc == 0
+
+- assert:
+ that:
+ - result is changed
+ - result.name == 'foo_test_idx'
+ - result.state == 'present'
+ - result.schema == 'foo'
+ - result.query == ''
+ when: tablespace.rc == 0
+
+- name: postgresql_idx - check the index exists after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_indexes WHERE indexname = 'foo_test_idx' AND schemaname = 'foo'
+ register: result
+ when: tablespace.rc == 0
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: tablespace.rc == 0
+
+- name: postgresql_idx - drop index from specific schema cascade
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ schema: foo
+ name: foo_test_idx
+ cascade: true
+ state: absent
+ concurrent: false
+ register: result
+ ignore_errors: true
+ when: tablespace.rc == 0
+
+- assert:
+ that:
+ - result is changed
+ - result.name == 'foo_test_idx'
+ - result.state == 'absent'
+ - result.schema == 'foo'
+ - result.query == 'DROP INDEX "foo"."foo_test_idx" CASCADE'
+ when: tablespace.rc == 0
+
+- name: postgresql_idx - check the index doesn't exist after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_indexes WHERE indexname = 'foo_test_idx' and schemaname = 'foo'
+ register: result
+ when: tablespace.rc == 0
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: tablespace.rc == 0
+
+- name: postgresql_idx - try to drop not existing index
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ schema: foo
+ name: foo_test_idx
+ state: absent
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.query == ''
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/aliases
new file mode 100644
index 000000000..786e05315
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/aliases
@@ -0,0 +1,4 @@
+destructive
+shippable/posix/group1
+skip/freebsd
+skip/rhel
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/defaults/main.yml
new file mode 100644
index 000000000..7a8fe2a37
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/defaults/main.yml
@@ -0,0 +1,13 @@
+---
+pg_user: postgres
+db_default: postgres
+
+test_table1: acme1
+test_pub: first_publication
+test_pub2: second_publication
+replication_role: logical_replication
+replication_pass: alsdjfKJKDf1#
+test_db: acme_db
+test_subscription: test
+test_subscription2: test2
+conn_timeout: 100
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/meta/main.yml
new file mode 100644
index 000000000..d72e4d23c
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_replication
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/main.yml
new file mode 100644
index 000000000..04c7788ad
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/main.yml
@@ -0,0 +1,12 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# For testing getting publication and subscription info
+- import_tasks: setup_publication.yml
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18'
+
+# Initial CI tests of postgresql_info module
+- import_tasks: postgresql_info_initial.yml
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml
new file mode 100644
index 000000000..6dfe50542
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml
@@ -0,0 +1,243 @@
+# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- vars:
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: true
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: '{{ db_default }}'
+ connect_params:
+ connect_timeout: 30
+
+
+ block:
+
+ - name: Create test subscription
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ login_db: '{{ test_db }}'
+ state: present
+ publications: '{{ test_pub }}'
+ connparams:
+ host: 127.0.0.1
+ port: '{{ primary_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+
+ - name: Create test subscription
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription2 }}'
+ login_db: '{{ test_db }}'
+ state: present
+ publications: '{{ test_pub2 }}'
+ connparams:
+ host: 127.0.0.1
+ port: '{{ primary_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+
+ - name: postgresql_info - create role to check session_role
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ login_user: "{{ pg_user }}"
+ name: session_superuser
+ role_attr_flags: SUPERUSER
+
+ - name: postgresql_info - create extra DBs for testing
+ <<: *task_parameters
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ login_port: '{{ replica_port }}'
+ name: "{{ item }}"
+ loop:
+ - db1
+ - db2
+
+ - name: postgresql_info - create extra schemas for testing
+ <<: *task_parameters
+ postgresql_schema:
+ login_user: '{{ pg_user }}'
+ login_port: '{{ replica_port }}'
+ db: '{{ item[0] }}'
+ name: "{{ item[1] }}"
+ loop:
+ - [ "db1", "db1_schema1"]
+ - [ "db1", "db1_schema2"]
+ - [ "db2", "db2_schema1"]
+ - [ "db2", "db2_schema2"]
+
+ - name: postgresql_table - create extra tables for testing
+ <<: *task_parameters
+ postgresql_table:
+ login_user: '{{ pg_user }}'
+ login_port: '{{ replica_port }}'
+ db: '{{ item[0] }}'
+ name: "{{ item[1] }}.{{ item[2] }}"
+ columns: waste_id int
+ loop:
+ - [ "db1", "db1_schema1", "db1_schema1_table1"]
+ - [ "db1", "db1_schema1", "db1_schema1_table2"]
+ - [ "db1", "db1_schema2", "db1_schema2_table1"]
+ - [ "db1", "db1_schema2", "db1_schema2_table2"]
+ - [ "db2", "db2_schema1", "db2_schema1_table1"]
+ - [ "db2", "db2_schema1", "db2_schema1_table2"]
+ - [ "db2", "db2_schema2", "db2_schema2_table1"]
+ - [ "db2", "db2_schema2", "db2_schema2_table2"]
+
+ - name: postgresql_info - test return values and session_role param
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ session_role: session_superuser
+
+ - assert:
+ that:
+ - result.version != {}
+ - result.version.raw is search('PostgreSQL')
+ - result.in_recovery == false
+ - result.databases.{{ db_default }}.collate
+ - result.databases.{{ db_default }}.languages
+ - result.databases.{{ db_default }}.namespaces
+ - result.databases.{{ db_default }}.extensions
+ - result.databases.{{ test_db }}.subscriptions.{{ test_subscription }}
+ - result.databases.{{ test_db }}.subscriptions.{{ test_subscription2 }}
+
+ - result.databases.db1.namespaces.db1_schema1
+ - result.databases.db1.namespaces.db1_schema2
+ - result.databases.db2.namespaces.db2_schema1
+ - result.databases.db2.namespaces.db2_schema2
+
+ - result.settings
+ - result.tablespaces
+ - result.roles
+
+ - assert:
+ that:
+ - result.version.patch != {}
+ - result.version.full == '{{ result.version.major }}.{{ result.version.minor }}.{{ result.version.patch }}'
+ when: result.version.major == 9
+
+ - assert:
+ that:
+ - result.version.full == '{{ result.version.major }}.{{ result.version.minor }}'
+ when: result.version.major >= 10
+
+ - name: postgresql_info - check filter param passed by list
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ filter:
+ - ver*
+ - rol*
+ - in_recov*
+
+ - assert:
+ that:
+ - result.version != {}
+ - result.roles
+ - result.in_recovery == false
+ - result.databases == {}
+ - result.repl_slots == {}
+ - result.replications == {}
+ - result.settings == {}
+ - result.tablespaces == {}
+
+ - name: postgresql_info - check filter param passed by string
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ filter: ver*,role*
+
+ - assert:
+ that:
+ - result.version != {}
+ - result.roles
+ - result.databases == {}
+ - result.repl_slots == {}
+ - result.replications == {}
+ - result.settings == {}
+ - result.tablespaces == {}
+
+ - name: postgresql_info - check filter param passed by string
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ filter: ver*
+
+ - assert:
+ that:
+ - result.version
+ - result.roles == {}
+
+ - name: postgresql_info - check excluding filter param passed by list
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ filter:
+ - "!ver*"
+ - "!rol*"
+ - "!in_rec*"
+
+ - assert:
+ that:
+ - result.version == {}
+ - result.in_recovery == None
+ - result.roles == {}
+ - result.databases
+
+ - name: postgresql_info - test return publication info
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+ login_db: '{{ test_db }}'
+ login_port: '{{ primary_port }}'
+ trust_input: true
+
+ - assert:
+ that:
+ - result.version != {}
+ - result.in_recovery == false
+ - result.databases.{{ db_default }}.collate
+ - result.databases.{{ db_default }}.languages
+ - result.databases.{{ db_default }}.namespaces
+ - result.databases.{{ db_default }}.extensions
+ - result.databases.{{ test_db }}.publications.{{ test_pub }}.ownername == '{{ pg_user }}'
+ - result.databases.{{ test_db }}.publications.{{ test_pub2 }}.puballtables == true
+ - result.settings
+ - result.tablespaces
+ - result.roles
+
+ - name: postgresql_info - test trust_input parameter
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+ login_db: '{{ test_db }}'
+ login_port: '{{ primary_port }}'
+ trust_input: false
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/setup_publication.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/setup_publication.yml
new file mode 100644
index 000000000..c68edd456
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/setup_publication.yml
@@ -0,0 +1,64 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# Preparation for further tests of postgresql_subscription module.
+
+- vars:
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: true
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: '{{ test_db }}'
+ connect_params:
+ connect_timeout: 30
+
+ block:
+ - name: Create test db
+ <<: *task_parameters
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ login_port: '{{ item }}'
+ maintenance_db: '{{ db_default }}'
+ name: '{{ test_db }}'
+ loop:
+ - '{{ primary_port }}'
+ - '{{ replica_port }}'
+
+ - name: Create test role
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ login_port: '{{ item }}'
+ name: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ role_attr_flags: LOGIN,REPLICATION
+ loop:
+ - '{{ primary_port }}'
+ - '{{ replica_port }}'
+
+ - name: Create test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ login_port: '{{ item }}'
+ name: '{{ test_table1 }}'
+ columns:
+ - id int
+ loop:
+ - '{{ primary_port }}'
+ - '{{ replica_port }}'
+
+ - name: Create publication
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ login_port: '{{ primary_port }}'
+ name: '{{ test_pub }}'
+
+ - name: Create publication
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ login_port: '{{ primary_port }}'
+ name: '{{ test_pub2 }}'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/main.yml
new file mode 100644
index 000000000..799501432
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/main.yml
@@ -0,0 +1,25 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - default.yml
+ paths:
+ - vars
+
+# Only run on CentOS 7 because there is a stack trace on CentOS 8 because the module
+# is looking for the incorrect version of plpython.
+# https://gist.github.com/samdoran/8fc1b4ae834d3e66d1895d087419b8d8
+- name: Initial CI tests of postgresql_lang module
+ when:
+ - ansible_facts.distribution == 'CentOS'
+ - ansible_facts.distribution_major_version is version ('7', '==')
+ block:
+ - include_tasks: postgresql_lang_initial.yml
+ - include_tasks: postgresql_lang_add_owner_param.yml
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml
new file mode 100644
index 000000000..a08ff82f2
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml
@@ -0,0 +1,199 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- vars:
+ test_user1: alice
+ test_user2: bob
+ test_lang: plperl
+ non_existent_role: fake_role
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: true
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+
+ block:
+ - name: Create roles for tests
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ loop:
+ - '{{ test_user1 }}'
+ - '{{ test_user2 }}'
+
+ - name: Create lang with owner in check_mode
+ <<: *task_parameters
+ postgresql_lang:
+ <<: *pg_parameters
+ name: '{{ test_lang }}'
+ owner: '{{ test_user1 }}'
+ trust_input: false
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == []
+
+ - name: Check that nothing was actually changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT r.rolname FROM pg_language l
+ JOIN pg_roles r ON l.lanowner = r.oid
+ WHERE l.lanname = '{{ test_lang }}'
+ AND r.rolname = '{{ test_user1 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Create lang with owner
+ <<: *task_parameters
+ postgresql_lang:
+ <<: *pg_parameters
+ name: '{{ test_lang }}'
+ owner: '{{ test_user1 }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE LANGUAGE "{{ test_lang }}"', 'ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user1 }}"']
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT r.rolname FROM pg_language l
+ JOIN pg_roles r ON l.lanowner = r.oid
+ WHERE l.lanname = '{{ test_lang }}'
+ AND r.rolname = '{{ test_user1 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Change lang owner in check_mode
+ <<: *task_parameters
+ postgresql_lang:
+ <<: *pg_parameters
+ name: '{{ test_lang }}'
+ owner: '{{ test_user2 }}'
+ trust_input: true
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user2 }}"']
+
+ - name: Check that nothing was actually changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT r.rolname FROM pg_language l
+ JOIN pg_roles r ON l.lanowner = r.oid
+ WHERE l.lanname = '{{ test_lang }}'
+ AND r.rolname = '{{ test_user2 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Change lang owner
+ <<: *task_parameters
+ postgresql_lang:
+ <<: *pg_parameters
+ name: '{{ test_lang }}'
+ owner: '{{ test_user2 }}'
+
+ - assert:
+ that:
+ - result is changed
+ # TODO: the first elem of the returned list below
+ # looks like a bug, not related with the option owner, needs to be checked
+ - result.queries == ["UPDATE pg_language SET lanpltrusted = false WHERE lanname = '{{ test_lang }}'", 'ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user2 }}"']
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT r.rolname FROM pg_language l
+ JOIN pg_roles r ON l.lanowner = r.oid
+ WHERE l.lanname = '{{ test_lang }}'
+ AND r.rolname = '{{ test_user2 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Try to change lang owner again to the same role
+ <<: *task_parameters
+ postgresql_lang:
+ <<: *pg_parameters
+ name: '{{ test_lang }}'
+ owner: '{{ test_user2 }}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT r.rolname FROM pg_language l
+ JOIN pg_roles r ON l.lanowner = r.oid
+ WHERE l.lanname = '{{ test_lang }}'
+ AND r.rolname = '{{ test_user2 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Drop test lang with owner, must ignore
+ <<: *task_parameters
+ postgresql_lang:
+ <<: *pg_parameters
+ name: '{{ test_lang }}'
+ state: absent
+ owner: '{{ non_existent_role }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["DROP LANGUAGE \"{{ test_lang }}\""]
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT r.rolname FROM pg_language l
+ JOIN pg_roles r ON l.lanowner = r.oid
+ WHERE l.lanname = '{{ test_lang }}'
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ # Clean up
+ - name: Drop test roles
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ state: absent
+ loop:
+ - '{{ test_user1 }}'
+ - '{{ test_user2 }}'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml
new file mode 100644
index 000000000..1d24778b4
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml
@@ -0,0 +1,231 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Preparation for tests:
+- name: Install PostgreSQL support packages
+ become: true
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ postgresql_lang_packages }}"
+ state: present
+
+###############
+# Do main tests
+#
+
+# Create language in check_mode:
+- name: postgresql_lang - create plperl in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_lang:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: plperl
+ register: result
+ ignore_errors: true
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == []
+
+- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Create language:
+- name: postgresql_lang - create plperl
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_lang:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: plperl
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE LANGUAGE "plperl"']
+
+- name: postgresql_lang - check that lang exists after previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Drop language in check_mode:
+- name: postgresql_lang - drop plperl in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_lang:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: plperl
+ state: absent
+ register: result
+ ignore_errors: true
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == []
+
+- name: postgresql_lang - check that lang exists after previous step, rowcount must be 1
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Drop language:
+- name: postgresql_lang - drop plperl
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_lang:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: plperl
+ state: absent
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP LANGUAGE "plperl"']
+
+- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Check fail_on_drop true
+- name: postgresql_lang - drop c language to check fail_on_drop true
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_lang:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: c
+ state: absent
+ fail_on_drop: true
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.failed == true
+
+# Check fail_on_drop no
+- name: postgresql_lang - drop c language to check fail_on_drop no
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_lang:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: c
+ state: absent
+ fail_on_drop: false
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.failed == false
+
+# Create trusted language:
+- name: postgresql_lang - create plpythonu
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_lang:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: plpythonu
+ trust: true
+ force_trust: true
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE TRUSTED LANGUAGE "plpythonu"', "UPDATE pg_language SET lanpltrusted = true WHERE lanname = 'plpythonu'"]
+
+- name: postgresql_lang - check that lang exists and it's trusted after previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_language WHERE lanname = 'plpythonu' AND lanpltrusted = 't'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Drop language cascade, tests of aliases:
+- name: postgresql_lang - drop plpythonu cascade
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_lang:
+ login_db: postgres
+ login_user: "{{ pg_user }}"
+ login_port: 5432
+ lang: plpythonu
+ state: absent
+ cascade: true
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP LANGUAGE "plpythonu" CASCADE']
+
+- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_language WHERE lanname = 'plpythonu'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-7.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-7.yml
new file mode 100644
index 000000000..8d4bcc7e2
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-7.yml
@@ -0,0 +1,3 @@
+postgresql_lang_packages:
+ - postgresql-plperl
+ - postgresql-plpython
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-8.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-8.yml
new file mode 100644
index 000000000..5da004c8f
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-8.yml
@@ -0,0 +1,3 @@
+postgresql_lang_packages:
+ - postgresql-plperl
+ - postgresql-plpython3
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/default.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/default.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/default.yml
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/defaults/main.yml
new file mode 100644
index 000000000..7b1d49e44
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/defaults/main.yml
@@ -0,0 +1,6 @@
+test_group1: group1
+test_group2: group2
+test_group3: group.with.dots
+test_user1: user1
+test_user2: user.with.dots
+dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/tasks/main.yml
new file mode 100644
index 000000000..ea058d084
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/tasks/main.yml
@@ -0,0 +1,7 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_membership module
+- import_tasks: postgresql_membership_initial.yml
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/tasks/postgresql_membership_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/tasks/postgresql_membership_initial.yml
new file mode 100644
index 000000000..3c8ef17a8
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/tasks/postgresql_membership_initial.yml
@@ -0,0 +1,736 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+####################
+# Prepare for tests:
+
+# Create test roles:
+- name: postgresql_membership - create test roles
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: "{{ item }}"
+ ignore_errors: true
+ with_items:
+ - "{{ test_group1 }}"
+ - "{{ test_group2 }}"
+ - "{{ test_group3 }}"
+ - "{{ test_user1 }}"
+ - "{{ test_user2 }}"
+
+################
+# Do main tests:
+
+### Test check_mode
+# Grant test_group1 to test_user1 in check_mode:
+- name: postgresql_membership - grant test_group1 to test_user1 in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: present
+ register: result
+ ignore_errors: true
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+ - result.groups == ["{{ test_group1 }}"]
+ - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\""]
+ - result.granted.{{ test_group1 }} == ["{{ test_user1 }}"]
+ - result.state == "present"
+ - result.target_roles == ["{{ test_user1 }}"]
+
+# Try to revoke test_group1 from test_user1 to check that
+# nothing actually changed in check_mode at the previous step:
+- name: postgresql_membership - try to revoke test_group1 from test_user1 for checking check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: absent
+ register: result
+ ignore_errors: true
+ check_mode: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.groups == ["{{ test_group1 }}"]
+ - result.queries == []
+ - result.revoked.{{ test_group1 }} == []
+ - result.state == "absent"
+ - result.target_roles == ["{{ test_user1 }}"]
+### End of test check_mode
+
+# Grant test_group1 to test_user1:
+- name: postgresql_membership - grant test_group1 to test_user1
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: present
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.groups == ["{{ test_group1 }}"]
+ - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\""]
+ - result.granted.{{ test_group1 }} == ["{{ test_user1 }}"]
+ - result.state == "present"
+ - result.target_roles == ["{{ test_user1 }}"]
+
+# Grant test_group1 to test_user1 again to check that nothing changes:
+- name: postgresql_membership - grant test_group1 to test_user1 again
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: present
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.groups == ["{{ test_group1 }}"]
+ - result.queries == []
+ - result.granted.{{ test_group1 }} == []
+ - result.state == "present"
+ - result.target_roles == ["{{ test_user1 }}"]
+
+# Revoke test_group1 from test_user1:
+- name: postgresql_membership - revoke test_group1 from test_user1
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: absent
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.groups == ["{{ test_group1 }}"]
+ - result.queries == ["REVOKE \"{{ test_group1 }}\" FROM \"{{ test_user1 }}\""]
+ - result.revoked.{{ test_group1 }} == ["{{ test_user1 }}"]
+ - result.state == "absent"
+ - result.target_roles == ["{{ test_user1 }}"]
+
+# Revoke test_group1 from test_user1 again to check that nothing changes:
+- name: postgresql_membership - revoke test_group1 from test_user1 again
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: absent
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.groups == ["{{ test_group1 }}"]
+ - result.queries == []
+ - result.revoked.{{ test_group1 }} == []
+ - result.state == "absent"
+ - result.target_roles == ["{{ test_user1 }}"]
+
+# Grant test_group1 and test_group2 to test_user1 and test_user2:
+- name: postgresql_membership - grant two groups to two users
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group:
+ - "{{ test_group1 }}"
+ - "{{ test_group2 }}"
+ user:
+ - "{{ test_user1 }}"
+ - "{{ test_user2 }}"
+ state: present
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.groups == ["{{ test_group1 }}", "{{ test_group2 }}"]
+ - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\"", "GRANT \"{{ test_group1 }}\" TO \"{{ test_user2 }}\"", "GRANT \"{{ test_group2 }}\" TO \"{{ test_user1 }}\"", "GRANT \"{{ test_group2 }}\" TO \"{{ test_user2 }}\""]
+ - result.granted.{{ test_group1 }} == ["{{ test_user1 }}", "{{ test_user2 }}"]
+ - result.granted.{{ test_group2 }} == ["{{ test_user1 }}", "{{ test_user2 }}"]
+ - result.state == "present"
+ - result.target_roles == ["{{ test_user1 }}", "{{ test_user2 }}"]
+
+# Grant test_group1 and test_group2 to test_user1 and test_user2 again to check that nothing changes:
+- name: postgresql_membership - grant two groups to two users again
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group:
+ - "{{ test_group1 }}"
+ - "{{ test_group2 }}"
+ user:
+ - "{{ test_user1 }}"
+ - "{{ test_user2 }}"
+ state: present
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.groups == ["{{ test_group1 }}", "{{ test_group2 }}"]
+ - result.queries == []
+ - result.granted.{{ test_group1 }} == []
+ - result.granted.{{ test_group2 }} == []
+ - result.state == "present"
+ - result.target_roles == ["{{ test_user1 }}", "{{ test_user2 }}"]
+
+# Revoke only test_group1 from test_user1:
+- name: postgresql_membership - revoke one group from one user
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: absent
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.groups == ["{{ test_group1 }}"]
+ - result.queries == ["REVOKE \"{{ test_group1 }}\" FROM \"{{ test_user1 }}\""]
+ - result.revoked.{{ test_group1 }} == ["{{ test_user1 }}"]
+ - result.state == "absent"
+ - result.target_roles == ["{{ test_user1 }}"]
+
+# Try to grant test_group1 and test_group2 to test_user1 and test_user2 again
+# to check that nothing changes with test_user2:
+- name: postgresql_membership - grant two groups to two users again
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group:
+ - "{{ test_group1 }}"
+ - "{{ test_group2 }}"
+ user:
+ - "{{ test_user1 }}"
+ - "{{ test_user2 }}"
+ state: present
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.groups == ["{{ test_group1 }}", "{{ test_group2 }}"]
+ - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\""]
+ - result.granted.{{ test_group1 }} == ["{{ test_user1 }}"]
+ - result.granted.{{ test_group2 }} == []
+ - result.state == "present"
+ - result.target_roles == ["{{ test_user1 }}", "{{ test_user2 }}"]
+
+#####################
+# Check fail_on_role:
+
+# Try to grant non existent group to non existent role with fail_on_role=true:
+- name: postgresql_membership - revoke non existen group from non existen role
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: fake_group
+ user: fake_user
+ state: present
+ fail_on_role: true
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+
+# Try to grant non existent group to non existent role with fail_on_role=no:
+- name: postgresql_membership - revoke non existen group from non existen role
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: fake_group
+ user: fake_user
+ state: present
+ fail_on_role: false
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.granted == {}
+ - result.groups == []
+ - result.target_roles == []
+ - result.state == 'present'
+
+# Try to revoke non existent group from non existent role with fail_on_role=no:
+- name: postgresql_membership - revoke non existen group from non existen role
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: fake_group
+ user: fake_user
+ state: absent
+ fail_on_role: false
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.revoked == {}
+ - result.groups == []
+ - result.target_roles == []
+ - result.state == 'absent'
+
+# Grant test_group3 with a name containing dots to test_user1.
+- name: postgresql_membership - grant test_group3 with dots to test_user1
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group3 }}"
+ user: "{{ test_user1 }}"
+ state: present
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["GRANT \"{{ test_group3 }}\" TO \"{{ test_user1 }}\""]
+
+#############################
+# Check trust_input parameter
+
+- name: postgresql_membership - try to use dangerous input, don't trust
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group:
+ - "{{ test_group3}}"
+ - "{{ dangerous_name }}"
+ user: "{{ test_user1 }}"
+ state: present
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous'
+
+- name: postgresql_membership - try to use dangerous input, trust explicitly
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group:
+ - "{{ test_group3}}"
+ - "{{ dangerous_name }}"
+ user: "{{ test_user1 }}"
+ state: present
+ trust_input: true
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Role {{ dangerous_name }} does not exist'
+
+########################
+# Tests for match method
+- name: Revoke all groups from a role check mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ groups: []
+ user: "{{ test_user1 }}"
+ state: exact
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["REVOKE \"{{ test_group1 }}\" FROM \"{{ test_user1 }}\"", "REVOKE \"{{ test_group2 }}\" FROM \"{{ test_user1 }}\"", "REVOKE \"{{ test_group3 }}\" FROM \"{{ test_user1 }}\""],
+ - result.revoked["group.with.dots"] == ["{{ test_user1 }}"]
+ - result.revoked["group1"] == ["{{ test_user1 }}"]
+ - result.revoked["group2"] == ["{{ test_user1 }}"]
+ - result.granted == {}
+
+
+- name: Check result of prev task
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user1 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.query_result.0.array == ['{{ test_group1 }}', '{{ test_group2 }}', '{{ test_group3 }}']
+
+- name: Revoke all groups from a role actual mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ groups: []
+ user: "{{ test_user1 }}"
+ state: exact
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["REVOKE \"{{ test_group1 }}\" FROM \"{{ test_user1 }}\"", "REVOKE \"{{ test_group2 }}\" FROM \"{{ test_user1 }}\"", "REVOKE \"{{ test_group3 }}\" FROM \"{{ test_user1 }}\""],
+ - result.revoked["group.with.dots"] == ["{{ test_user1 }}"]
+ - result.revoked["group1"] == ["{{ test_user1 }}"]
+ - result.revoked["group2"] == ["{{ test_user1 }}"]
+ - result.granted == {}
+
+
+- name: Check result of prev task
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user1 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.query_result.0.array == []
+
+- name: Grant all groups to a role check mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ groups:
+ - "{{ test_group1 }}"
+ - "{{ test_group2 }}"
+ - "{{ test_group3 }}"
+ user: "{{ test_user1 }}"
+ state: exact
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\"", "GRANT \"{{ test_group2 }}\" TO \"{{ test_user1 }}\"", "GRANT \"{{ test_group3 }}\" TO \"{{ test_user1 }}\""],
+ - result.granted["group.with.dots"] == ["{{ test_user1 }}"]
+ - result.granted["group1"] == ["{{ test_user1 }}"]
+ - result.granted["group2"] == ["{{ test_user1 }}"]
+ - result.revoked == {}
+
+
+- name: Check result of prev task
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user1 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.query_result.0.array == []
+
+- name: Grant all groups to a role real mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ groups:
+ - "{{ test_group1 }}"
+ - "{{ test_group2 }}"
+ - "{{ test_group3 }}"
+ user: "{{ test_user1 }}"
+ state: exact
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\"", "GRANT \"{{ test_group2 }}\" TO \"{{ test_user1 }}\"", "GRANT \"{{ test_group3 }}\" TO \"{{ test_user1 }}\""],
+ - result.granted["group.with.dots"] == ["{{ test_user1 }}"]
+ - result.granted["group1"] == ["{{ test_user1 }}"]
+ - result.granted["group2"] == ["{{ test_user1 }}"]
+ - result.revoked == {}
+
+
+- name: Check result of prev task
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user1 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.query_result.0.array == ['{{ test_group1 }}', '{{ test_group2 }}', '{{ test_group3 }}']
+
+- name: Change groups 1
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ groups:
+ - "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: exact
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["REVOKE \"{{ test_group2 }}\" FROM \"{{ test_user1 }}\"", "REVOKE \"{{ test_group3 }}\" FROM \"{{ test_user1 }}\""],
+ - result.revoked["group.with.dots"] == ["{{ test_user1 }}"]
+ - result.revoked["group2"] == ["{{ test_user1 }}"]
+ - result.granted == {}
+
+
+- name: Check result of prev task
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user1 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.query_result.0.array == ['{{ test_group1 }}']
+
+- name: Change groups 2
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ groups:
+ - "{{ test_group2 }}"
+ - "{{ test_group3 }}"
+ user: "{{ test_user1 }}"
+ state: exact
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["REVOKE \"{{ test_group1 }}\" FROM \"{{ test_user1 }}\"", "GRANT \"{{ test_group2 }}\" TO \"{{ test_user1 }}\"", "GRANT \"{{ test_group3 }}\" TO \"{{ test_user1 }}\""],
+ - result.granted["group.with.dots"] == ["{{ test_user1 }}"]
+ - result.granted["group2"] == ["{{ test_user1 }}"]
+ - result.revoked["group1"] == ["{{ test_user1 }}"]
+
+- name: Check result of prev task
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user1 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.query_result.0.array == ['{{ test_group2 }}', '{{ test_group3 }}']
+
+- name: Change groups 2 again
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ groups:
+ - "{{ test_group2 }}"
+ - "{{ test_group3 }}"
+ user: "{{ test_user1 }}"
+ state: exact
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == [],
+ - result.granted == {}
+ - result.revoked == {}
+
+
+- name: Check result of prev task
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user1 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.query_result.0.array == ['{{ test_group2 }}', '{{ test_group3 }}']
+
+- name: Change groups for two users
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ groups:
+ - "{{ test_group1 }}"
+ - "{{ test_group2 }}"
+ - "{{ test_group3 }}"
+ users:
+ - "{{ test_user1 }}"
+ - "{{ test_user2 }}"
+ state: exact
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\"", "GRANT \"{{ test_group3 }}\" TO \"{{ test_user2 }}\""],
+ - result.granted["group1"] == ["{{ test_user1 }}"]
+ - result.granted["group.with.dots"] == ["{{ test_user2 }}"]
+ - result.revoked == {}
+
+- name: Check result of prev task for {{ test_user1 }}
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user1 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.query_result.0.array == ['{{ test_group1 }}', '{{ test_group2 }}', '{{ test_group3 }}']
+
+- name: Check result of prev task for {{ test_user2 }}
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user2 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.query_result.0.array == ['{{ test_group1 }}', '{{ test_group2 }}', '{{ test_group3 }}']
+
+- name: Change groups for two users 2
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ groups:
+ - "{{ test_group2 }}"
+ - "{{ test_group3 }}"
+ users:
+ - "{{ test_user1 }}"
+ - "{{ test_user2 }}"
+ state: exact
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["REVOKE \"{{ test_group1 }}\" FROM \"{{ test_user1 }}\"", "REVOKE \"{{ test_group1 }}\" FROM \"{{ test_user2 }}\""],
+ - result.revoked["group1"] == ["{{ test_user1 }}", "{{ test_user2 }}"]
+ - result.granted == {}
+
+- name: Check result of prev task for {{ test_user1 }}
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user1 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.query_result.0.array == ['{{ test_group2 }}', '{{ test_group3 }}']
+
+- name: Check result of prev task for {{ test_user2 }}
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user2 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.query_result.0.array == ['{{ test_group2 }}', '{{ test_group3 }}']
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/defaults/main.yml
new file mode 100644
index 000000000..e43723c47
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/defaults/main.yml
@@ -0,0 +1,3 @@
+test_tablespace_path: "/ssd"
+
+dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/main.yml
new file mode 100644
index 000000000..4b2f57510
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/main.yml
@@ -0,0 +1,9 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_owner module
+- import_tasks: postgresql_owner_initial.yml
+ when:
+ - postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml
new file mode 100644
index 000000000..a21160282
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml
@@ -0,0 +1,1073 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+####################
+# Prepare for tests:
+
+# Create test roles:
+- name: postgresql_owner - create test roles
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_user:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ name: '{{ item }}'
+ ignore_errors: true
+ with_items:
+ - alice
+ - bob
+
+- name: postgresql_owner - create test database
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ db: acme
+
+- name: postgresql_owner - create test table
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: acme
+ query: CREATE TABLE my_table (id int)
+
+- name: postgresql_owner - set owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: my_table
+ obj_type: table
+
+- name: postgresql_owner - create test sequence
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: acme
+ query: CREATE SEQUENCE test_seq
+
+- name: postgresql_owner - create test function
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: acme
+ query: >
+ CREATE FUNCTION increment(integer) RETURNS integer AS 'select $1 + 1;'
+ LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT;
+
+- name: postgresql_owner - create test schema
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: acme
+ query: CREATE SCHEMA test_schema
+
+- name: postgresql_owner - create test view
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: acme
+ query: CREATE VIEW test_view AS SELECT * FROM my_table
+
+- name: postgresql_owner - create test materialized view
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: acme
+ query: CREATE MATERIALIZED VIEW test_mat_view AS SELECT * FROM my_table
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_owner - drop dir for test tablespace
+ become: true
+ file:
+ path: '{{ test_tablespace_path }}'
+ state: absent
+ ignore_errors: true
+
+- name: postgresql_owner - disable selinux
+ become: true
+ shell: setenforce 0
+ ignore_errors: true
+
+- name: postgresql_owner - create dir for test tablespace
+ become: true
+ file:
+ path: '{{ test_tablespace_path }}'
+ state: directory
+ owner: '{{ pg_user }}'
+ group: '{{ pg_user }}'
+ mode: '0700'
+ ignore_errors: true
+
+- name: >
+ postgresql_owner - create a new tablespace called acme and
+ set bob as an its owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: acme
+ login_user: '{{ pg_user }}'
+ name: acme
+ owner: alice
+ location: '{{ test_tablespace_path }}'
+
+################
+# Do main tests:
+
+#
+# check reassign_owned_by param
+#
+# try to reassign ownership to non existent user:
+- name: postgresql_owner - reassign_owned_by to non existent user
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: non_existent
+ reassign_owned_by: bob
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.failed == true
+
+- name: postgresql_owner - reassign_owned_by, check fail_on_role
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: alice
+ reassign_owned_by: non_existent
+ fail_on_role: false
+ register: result
+
+- assert:
+ that:
+ - result.failed == false
+
+- name: postgresql_owner - reassign_owned_by in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: alice
+ reassign_owned_by: bob
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['REASSIGN OWNED BY "bob" TO "alice"']
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_tables WHERE tablename = 'my_table'
+ AND tableowner = 'alice'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_owner - reassign_owned_by
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: alice
+ reassign_owned_by: bob
+ trust_input: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['REASSIGN OWNED BY "bob" TO "alice"']
+
+- name: postgresql_owner - check that ownership has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_tables WHERE tablename = 'my_table' AND tableowner = 'alice'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+###########################
+# Test trust_inpt parameter
+
+- name: postgresql_owner - reassign_owned_by, trust_input false
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: '{{ dangerous_name }}'
+ reassign_owned_by: alice
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous'
+
+- name: postgresql_owner - reassign_owned_by, trust_input true by default
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: '{{ dangerous_name }}'
+ reassign_owned_by: alice
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.msg is search('does not exist')
+# End of testing trust_input
+
+#
+# Check obj_type for each type
+#
+
+# #############################
+# check_mode obj_type: database
+- name: postgresql_owner - set db owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: acme
+ obj_type: database
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER DATABASE "acme" OWNER TO "bob"']
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_database AS d JOIN pg_roles AS r
+ ON d.datdba = r.oid WHERE d.datname = 'acme' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_owner - set db owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: acme
+ obj_type: database
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER DATABASE "acme" OWNER TO "bob"']
+
+- name: postgresql_owner - check that db owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_database AS d JOIN pg_roles AS r
+ ON d.datdba = r.oid WHERE d.datname = 'acme' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set db owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: acme
+ obj_type: database
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+- name: postgresql_owner - check that db owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_database AS d JOIN pg_roles AS r
+ ON d.datdba = r.oid WHERE d.datname = 'acme' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set table owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: my_table
+ obj_type: table
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER TABLE "my_table" OWNER TO "bob"']
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_tables WHERE tablename = 'my_table'
+ AND tableowner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_owner - set db owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: my_table
+ obj_type: table
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER TABLE "my_table" OWNER TO "bob"']
+
+- name: postgresql_owner - check that table owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_tables WHERE tablename = 'my_table'
+ AND tableowner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set db owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: my_table
+ obj_type: table
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+- name: postgresql_owner - check that table owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_tables WHERE tablename = 'my_table'
+ AND tableowner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set sequence owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_seq
+ obj_type: sequence
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER SEQUENCE "test_seq" OWNER TO "bob"']
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_class AS c JOIN pg_roles AS r
+ ON c.relowner = r.oid WHERE c.relkind = 'S'
+ AND c.relname = 'test_seq' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_owner - set db owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_seq
+ obj_type: sequence
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER SEQUENCE "test_seq" OWNER TO "bob"']
+
+- name: postgresql_owner - check that table owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_class AS c JOIN pg_roles AS r
+ ON c.relowner = r.oid WHERE c.relkind = 'S'
+ AND c.relname = 'test_seq' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set db owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_seq
+ obj_type: sequence
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+- name: postgresql_owner - check that sequence owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_class AS c JOIN pg_roles AS r
+ ON c.relowner = r.oid WHERE c.relkind = 'S'
+ AND c.relname = 'test_seq' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set function owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: increment
+ obj_type: function
+ check_mode: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER FUNCTION increment OWNER TO "bob"']
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r
+ ON f.proowner = r.oid WHERE f.proname = 'increment' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: postgresql_owner - set func owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: increment
+ obj_type: function
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER FUNCTION increment OWNER TO "bob"']
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: postgresql_owner - check that func owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r
+ ON f.proowner = r.oid WHERE f.proname = 'increment' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: postgresql_owner - set func owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: increment
+ obj_type: function
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: postgresql_owner - check that function owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r
+ ON f.proowner = r.oid WHERE f.proname = 'increment' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: postgresql_owner - set schema owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_schema
+ obj_type: schema
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER SCHEMA "test_schema" OWNER TO "bob"']
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM information_schema.schemata
+ WHERE schema_name = 'test_schema' AND schema_owner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_owner - set schema owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_schema
+ obj_type: schema
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER SCHEMA "test_schema" OWNER TO "bob"']
+
+- name: postgresql_owner - check that schema owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM information_schema.schemata
+ WHERE schema_name = 'test_schema' AND schema_owner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set schema owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_seq
+ obj_type: sequence
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+- name: postgresql_owner - check that schema owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM information_schema.schemata
+ WHERE schema_name = 'test_schema' AND schema_owner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set view owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_view
+ obj_type: view
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER VIEW "test_view" OWNER TO "bob"']
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_views WHERE viewname = 'test_view' AND viewowner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_owner - set view owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_view
+ obj_type: view
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER VIEW "test_view" OWNER TO "bob"']
+
+- name: postgresql_owner - check that view owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_views WHERE viewname = 'test_view' AND viewowner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set view owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_view
+ obj_type: view
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+- name: postgresql_owner - check that view owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_views WHERE viewname = 'test_view' AND viewowner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set matview owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_mat_view
+ obj_type: matview
+ check_mode: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER MATERIALIZED VIEW "test_mat_view" OWNER TO "bob"']
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_matviews WHERE matviewname = 'test_view' AND matviewowner = 'bob'
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_owner - set matview owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_mat_view
+ obj_type: matview
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER MATERIALIZED VIEW "test_mat_view" OWNER TO "bob"']
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_owner - check that matview owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_matviews WHERE matviewname = 'test_mat_view' AND matviewowner = 'bob'
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_owner - set matview owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_mat_view
+ obj_type: matview
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_owner - check that matview owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_matviews WHERE matviewname = 'test_mat_view' AND matviewowner = 'bob'
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_owner - set tablespace owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: acme
+ obj_type: tablespace
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER TABLESPACE "acme" OWNER TO "bob"']
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_tablespace AS t JOIN pg_roles AS r
+ ON t.spcowner = r.oid WHERE t.spcname = 'acme' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_owner - set tablespace owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: acme
+ obj_type: tablespace
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER TABLESPACE "acme" OWNER TO "bob"']
+
+- name: postgresql_owner - check that tablespace owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_tablespace AS t JOIN pg_roles AS r
+ ON t.spcowner = r.oid WHERE t.spcname = 'acme' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set tablespace owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: acme
+ obj_type: tablespace
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+- name: postgresql_owner - check that tablespace owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_tablespace AS t JOIN pg_roles AS r
+ ON t.spcowner = r.oid WHERE t.spcname = 'acme' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+#
+# Crean up
+#
+- name: postgresql_owner - drop test database
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ db: acme
+ state: absent
+
+- name: postgresql_owner - drop test tablespace
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ state: absent
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/defaults/main.yml
new file mode 100644
index 000000000..94df93f9e
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/defaults/main.yml
@@ -0,0 +1,29 @@
+---
+pg_hba_test_ips:
+- contype: local
+ users: 'all,postgres,test'
+- source: '0000:ffff::'
+ netmask: 'ffff:fff0::'
+- source: '192.168.0.0/24'
+ netmask: ''
+ databases: 'all,replication'
+- source: '192.168.1.0/24'
+ netmask: ''
+ databases: 'all'
+ method: reject
+- source: '127.0.0.1/32'
+ netmask: ''
+- source: '::1/128'
+ netmask: ''
+- source: '0000:ff00::'
+ netmask: 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ff00'
+ method: scram-sha-256
+- source: '172.16.0.0'
+ netmask: '255.255.0.0'
+ method: trust
+- contype: hostgssenc
+ users: postgres
+ source: '2001:db8::1/128'
+- contype: hostnogssenc
+ users: all
+ source: '2001:db8::1/128'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/main.yml
new file mode 100644
index 000000000..af51cbca9
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/main.yml
@@ -0,0 +1,8 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_pg_hba module
+- import_tasks: postgresql_pg_hba_initial.yml
+- import_tasks: postgresql_pg_hba_bulk_rules.yml
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_bulk_rules.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_bulk_rules.yml
new file mode 100644
index 000000000..4363f6dc9
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_bulk_rules.yml
@@ -0,0 +1,136 @@
+- name: set test variables
+ set_fact:
+ pghba_defaults: &pghba_defaults
+ create: true
+ dest: "/tmp/pg_hba_bulk_test.conf"
+ test_rule0: &test_rule0
+ contype: host
+ databases: "db0"
+ users: "user0"
+ address: "2001:db8::0/128"
+ method: pam
+ test_rule1: &test_rule1
+ contype: host
+ databases: "db1"
+ users: "user1"
+ address: "2001:db8::1/128"
+ method: pam
+ test_rule2: &test_rule2
+ contype: host
+ databases: "db2"
+ users: "user2"
+ address: "2001:db8::2/128"
+ method: pam
+
+- name: create one rule to clear
+ community.postgresql.postgresql_pg_hba:
+ <<: *pghba_defaults
+ state: "present"
+ <<: *test_rule0
+- name: overwrite with one normal rule
+ community.postgresql.postgresql_pg_hba:
+ <<: *pghba_defaults
+ overwrite: true
+ <<: *test_rule1
+ register: result
+- assert:
+ that:
+ - "result.pg_hba|length == 1"
+ - "result.pg_hba[0].db == test_rule1.databases"
+ - "result.pg_hba[0].src == test_rule1.address"
+ - "result.pg_hba[0].usr == test_rule1.users"
+ - "result.pg_hba[0].type == test_rule1.contype"
+- name: 'test the same again (overwrite: true, one normal rule) to ensure nothing changed'
+ community.postgresql.postgresql_pg_hba:
+ <<: *pghba_defaults
+ overwrite: true
+ <<: *test_rule1
+ register: result
+- assert:
+ that:
+ - "result.changed == false"
+
+- name: overwrite with one bulk rule
+ community.postgresql.postgresql_pg_hba:
+ <<: *pghba_defaults
+ overwrite: true
+ rules:
+ - "{{ test_rule2 }}"
+ register: result
+- assert:
+ that:
+ - "result.pg_hba|length == 1"
+ - "result.pg_hba[0].db == test_rule2.databases"
+ - "result.pg_hba[0].src == test_rule2.address"
+ - "result.pg_hba[0].usr == test_rule2.users"
+ - "result.pg_hba[0].type == test_rule2.contype"
+- name: 'test the same again (overwrite: true, one bulk rule) to ensure nothing changes'
+ community.postgresql.postgresql_pg_hba:
+ <<: *pghba_defaults
+ overwrite: true
+ rules:
+ - "{{ test_rule2 }}"
+ register: result
+- assert:
+ that:
+ - "result.changed == false"
+
+- name: test rules_behavior conflict
+ community.postgresql.postgresql_pg_hba: "{{ pghba_defaults|combine(item)|combine({'rules': [test_rule2]}) }}"
+ loop:
+ - address: 2001:db8::a/128
+ - comment: 'testcomment'
+ - contype: hostssl
+ - databases: db_a
+ - method: cert
+ - netmask: 255.255.255.0
+ # address: 192.0.2.0
+ - options: "clientcert=verify-full"
+ - state: absent
+ - users: testuser
+ register: result
+ ignore_errors: true
+- name: get jinja2 version
+ shell: '/usr/bin/pip --disable-pip-version-check --no-cache-dir show Jinja2 2>/dev/null | grep -oPm 1 "(?<=^Version: )\d+\.\d+"'
+ register: jinja2_version
+ ignore_errors: true
+- assert:
+ that:
+ - result.failed
+ - not result.changed
+ - "result.results|selectattr('changed')|length == 0"
+ - "result.results|rejectattr('failed')|length == 0"
+ # the 'in' test was added in jinja 2.10
+ - "jinja2_version.rc == 0 and jinja2_version.stdout|trim is version('2.10', '<') or result.results|selectattr('msg', 'in', 'conflict')|length == 0"
+
+- name: test rules with module defaults
+ community.postgresql.postgresql_pg_hba:
+ <<: *pghba_defaults
+ rules:
+ - contype: hostssl
+ register: result
+- assert:
+ that:
+ - result.changed
+ # assert that module defaults are used
+ - "{'db': 'all', 'method': 'md5', 'src': 'samehost', 'type': 'hostssl', 'usr': 'all'} in result.pg_hba"
+
+- name: test rules with custom defaults
+ community.postgresql.postgresql_pg_hba:
+ <<: *pghba_defaults
+ rules_behavior: combine
+ <<: *test_rule1
+ rules:
+ - {} # complete fallback to custom defaults
+ - databases: other_db # partial fallback to custom defaults
+ # no fallback
+ - <<: *test_rule2
+ state: absent
+ register: result
+- assert:
+ that:
+ - result.changed
+ - "{'db': 'all', 'method': 'md5', 'src': 'samehost', 'type': 'hostssl', 'usr': 'all'} in result.pg_hba" # unchanged preexisting from previous task
+ - "{'db': test_rule1.databases, 'method': test_rule1.method, 'src': test_rule1.address, 'type': test_rule1.contype, 'usr': test_rule1.users} in result.pg_hba"
+ - "{'db': test_rule2.databases, 'method': test_rule2.method, 'src': test_rule2.address, 'type': test_rule2.contype, 'usr': test_rule2.users} not in result.pg_hba"
+ - "{'db': 'other_db', 'method': test_rule1.method, 'src': test_rule1.address, 'type': test_rule1.contype, 'usr': test_rule1.users} in result.pg_hba"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml
new file mode 100644
index 000000000..2a9505a5b
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml
@@ -0,0 +1,264 @@
+- name: Make sure file does not exist
+ file:
+ dest: /tmp/pg_hba.conf
+ state: absent
+
+- name: check_mode run
+ postgresql_pg_hba:
+ dest: /tmp/pg_hba.conf
+ contype: host
+ source: '0000:ffff::'
+ netmask: 'ffff:fff0::'
+ method: md5
+ backup: 'True'
+ order: sud
+ state: "{{item}}"
+ check_mode: true
+ with_items:
+ - present
+ - absent
+
+- name: check_mode check
+ stat:
+ path: /tmp/pg_hba.conf
+ register: pg_hba_checkmode_check
+
+- name: Remove several ip addresses for idempotency check
+ postgresql_pg_hba:
+ contype: "{{item.contype|default('host')}}"
+ databases: "{{item.databases|default('all')}}"
+ dest: /tmp/pg_hba.conf
+ method: "{{item.method|default('md5')}}"
+ netmask: "{{item.netmask|default('')}}"
+ order: sud
+ source: "{{item.source|default('')}}"
+ state: absent
+ users: "{{item.users|default('all')}}"
+ with_items: "{{pg_hba_test_ips}}"
+ register: pg_hba_idempotency_check1
+
+- name: idempotency not creating file check
+ stat:
+ path: /tmp/pg_hba.conf
+ register: pg_hba_idempotency_file_check
+
+- name: Add several ip addresses
+ postgresql_pg_hba:
+ backup: 'True'
+ contype: "{{item.contype|default('host')}}"
+ create: 'True'
+ databases: "{{item.databases|default('all')}}"
+ dest: /tmp/pg_hba.conf
+ method: "{{item.method|default('md5')}}"
+ netmask: "{{item.netmask|default('')}}"
+ order: sud
+ source: "{{item.source|default('')}}"
+ state: present
+ users: "{{item.users|default('all')}}"
+ register: pg_hba_change
+ with_items: "{{pg_hba_test_ips}}"
+
+- name: Able to add options on rule without
+ postgresql_pg_hba:
+ dest: "/tmp/pg_hba.conf"
+ users: "+some"
+ order: "sud"
+ state: "present"
+ contype: "local"
+ method: "cert"
+ options: "{{ item }}"
+ address: ""
+ with_items:
+ - ""
+ - "clientcert=1"
+
+- name: Retain options even if they contain spaces
+ postgresql_pg_hba:
+ dest: "/tmp/pg_hba.conf"
+ users: "+some"
+ order: "sud"
+ state: "present"
+ contype: "{{ item.contype }}"
+ method: "{{ item.method }}"
+ options: "{{ item.options }}"
+ address: "{{ item.address }}"
+ with_items:
+ - { address: "", contype: "local", method: "ldap", options: "ldapserver=example.com ldapport=389 ldapprefix=\"cn=\"" }
+ - { address: "red", contype: "hostssl", method: "cert", options: "clientcert=1 map=mymap" }
+ - { address: "blue", contype: "hostssl", method: "cert", options: "clientcert=1 map=mymap" }
+ register: pg_hba_options
+
+- name: read pg_hba rules
+ postgresql_pg_hba:
+ dest: /tmp/pg_hba.conf
+ register: pg_hba
+
+- name: Add several ip addresses again for idempotency check
+ postgresql_pg_hba:
+ contype: "{{item.contype|default('host')}}"
+ databases: "{{item.databases|default('all')}}"
+ dest: /tmp/pg_hba.conf
+ method: "{{item.method|default('md5')}}"
+ netmask: "{{item.netmask|default('')}}"
+ order: sud
+ source: "{{item.source|default('')}}"
+ state: present
+ users: "{{item.users|default('all')}}"
+ with_items: "{{pg_hba_test_ips}}"
+ register: pg_hba_idempotency_check2
+
+- name: pre-backup stat
+ stat:
+ path: /tmp/pg_hba.conf
+ register: prebackupstat
+
+- name: Add new ip address for backup check and netmask_sameas_prefix check
+ postgresql_pg_hba:
+ backup: 'True'
+ contype: host
+ dest: /tmp/pg_hba.conf
+ method: md5
+ netmask: 255.255.255.0
+ order: sud
+ source: '172.21.0.0'
+ state: present
+ register: pg_hba_backup_check2
+
+- name: Add new ip address for netmask_sameas_prefix check
+ postgresql_pg_hba:
+ backup: 'True'
+ contype: host
+ dest: /tmp/pg_hba.conf
+ method: md5
+ order: sud
+ source: '172.21.0.0/24'
+ state: present
+ register: netmask_sameas_prefix_check
+
+- name: post-backup stat
+ stat:
+ path: "{{pg_hba_backup_check2.backup_file}}"
+ register: postbackupstat
+
+- name: Dont allow netmask for src in [all, samehost, samenet]
+ postgresql_pg_hba:
+ contype: host
+ dest: /tmp/pg_hba.conf
+ method: md5
+ netmask: '255.255.255.255'
+ order: sud
+ source: all
+ state: present
+ register: pg_hba_fail_src_all_with_netmask
+ ignore_errors: true
+
+- debug:
+ var: pg_hba.pg_hba
+- assert:
+ that:
+ - 'pg_hba.pg_hba == [
+ { "db": "all", "method": "ldap", "type": "local", "usr": "+some", "options": "ldapserver=example.com ldapport=389 ldapprefix=\"cn=\"" },
+ { "db": "all", "method": "md5", "type": "local", "usr": "postgres" },
+ { "db": "all", "method": "md5", "type": "local", "usr": "test" },
+ { "db": "all", "method": "md5", "type": "local", "usr": "all" },
+ { "db": "all", "method": "md5", "src": "2001:db8::1/128", "type": "hostgssenc", "usr": "postgres" },
+ { "db": "all", "method": "cert", "src": "blue", "type": "hostssl", "usr": "+some", "options": "clientcert=1 map=mymap" },
+ { "db": "all", "method": "cert", "src": "red", "type": "hostssl", "usr": "+some", "options": "clientcert=1 map=mymap" },
+ { "db": "all", "method": "md5", "src": "127.0.0.1/32", "type": "host", "usr": "all" },
+ { "db": "all", "method": "md5", "src": "2001:db8::1/128", "type": "hostnogssenc", "usr": "all" },
+ { "db": "all", "method": "md5", "src": "::1/128", "type": "host", "usr": "all" },
+ { "db": "all", "method": "scram-sha-256", "src": "0:ff00::/120", "type": "host", "usr": "all" },
+ { "db": "replication", "method": "md5", "src": "192.168.0.0/24", "type": "host", "usr": "all" },
+ { "db": "all", "method": "md5", "src": "192.168.0.0/24", "type": "host", "usr": "all" },
+ { "db": "all", "method": "reject", "src": "192.168.1.0/24", "type": "host", "usr": "all" },
+ { "db": "all", "method": "trust", "src": "172.16.0.0/16", "type": "host", "usr": "all" },
+ { "db": "all", "method": "md5", "src": "0:fff0::/28", "type": "host", "usr": "all" }
+ ]'
+ - 'pg_hba_change is changed'
+ - 'pg_hba_checkmode_check.stat.exists == false'
+ - 'not pg_hba_idempotency_check1 is changed'
+ - 'not pg_hba_idempotency_check2 is changed'
+ - 'pg_hba_idempotency_file_check.stat.exists == false'
+ - 'prebackupstat.stat.checksum == postbackupstat.stat.checksum'
+ - 'pg_hba_fail_src_all_with_netmask is failed'
+ - 'not netmask_sameas_prefix_check is changed'
+ - 'pg_hba_options is changed'
+
+- name: ensure test file is empty
+ copy:
+ content: ''
+ dest: /tmp/pg_hba2.conf
+
+- name: Create a rule with the comment 'comment1'
+ postgresql_pg_hba:
+ contype: host
+ dest: /tmp/pg_hba2.conf
+ create: true
+ method: md5
+ address: "2001:db8::1/128"
+ order: sud
+ state: present
+ comment: "comment1"
+
+- name: Fetch the file
+ fetch:
+ src: /tmp/pg_hba2.conf
+ dest: /tmp/pg_hba2.conf
+ flat: true
+- name: Read pg_hba2.conf
+ set_fact:
+ content: "{{ lookup('file', '/tmp/pg_hba2.conf') }}"
+- debug:
+ var: content
+- assert:
+ that:
+ - '"\nhost\tall\tall\t2001:db8::1/128\tmd5\t#comment1" == content'
+
+- name: Create a rule with the comment 'comment2'
+ postgresql_pg_hba:
+ contype: host
+ dest: /tmp/pg_hba2.conf
+ method: md5
+ address: "2001:db8::2/128"
+ order: sud
+ state: present
+ comment: "comment2"
+
+- name: Fetch the file
+ fetch:
+ src: /tmp/pg_hba2.conf
+ dest: /tmp/pg_hba2.conf
+ flat: true
+- name: Read pg_hba2.conf
+ set_fact:
+ content: "{{ lookup('file', '/tmp/pg_hba2.conf') }}"
+- debug:
+ var: content
+- assert:
+ that:
+ - '"#comment1\nhost\tall\tall\t2001:db8::1/128\tmd5\nhost\tall\tall\t2001:db8::2/128\tmd5\t#comment2" == content'
+
+- name: Create a rule with the comment 'comment3' and keep_comments_at_rules
+ postgresql_pg_hba:
+ contype: host
+ dest: /tmp/pg_hba2.conf
+ method: md5
+ address: "2001:db8::3/128"
+ order: sud
+ state: present
+ comment: "comment3"
+ keep_comments_at_rules: true
+
+- name: Fetch the file
+ fetch:
+ src: /tmp/pg_hba2.conf
+ dest: /tmp/pg_hba2.conf
+ flat: true
+- name: Read pg_hba2.conf
+ set_fact:
+ content: "{{ lookup('file', '/tmp/pg_hba2.conf') }}"
+- debug:
+ var: content
+- assert:
+ that:
+ - '"#comment1\nhost\tall\tall\t2001:db8::1/128\tmd5\nhost\tall\tall\t2001:db8::2/128\tmd5\t#comment2\nhost\tall\tall\t2001:db8::3/128\tmd5\t#comment3" == content'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/defaults/main.yml
new file mode 100644
index 000000000..73eb55ae2
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+db_default: postgres
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/main.yml
new file mode 100644
index 000000000..bcb18d2fe
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/main.yml
@@ -0,0 +1,9 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_ping module
+- import_tasks: postgresql_ping_initial.yml
+ vars:
+ db_name_nonexist: fake_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml
new file mode 100644
index 000000000..218ae9fd7
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml
@@ -0,0 +1,187 @@
+# Test code for the postgresql_ping module
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: postgresql_ping - test return values
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_ping:
+ db: "{{ db_default }}"
+ login_user: "{{ pg_user }}"
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.is_available == true
+ - result.server_version != {}
+ - result.server_version.raw is search('PostgreSQL')
+ - result.server_version.major != ''
+ - result.server_version.minor != ''
+ - result is not changed
+
+- assert:
+ that:
+ - result.server_version.patch != {}
+ - result.server_version.full == '{{ result.server_version.major }}.{{ result.server_version.minor }}.{{ result.server_version.patch }}'
+ when: result.server_version.major == 9
+
+- assert:
+ that:
+ - result.server_version.full == '{{ result.server_version.major }}.{{ result.server_version.minor }}'
+ when: result.server_version.major >= 10
+
+- name: postgresql_ping - check ping of non-existing database doesn't return anything
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_ping:
+ db: "{{ db_name_nonexist }}"
+ login_user: "{{ pg_user }}"
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.is_available == false
+ - result.server_version == {}
+ - result is not changed
+
+- name: postgresql_ping - check ping of the database on non-existent port does not return anything
+ become_user: "{{ pg_user }}"
+ become: true
+ environment:
+ PGPORT: 5435
+ ignore_errors: true
+ postgresql_ping:
+ db: "{{ db_default }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result.is_available == false
+ - result.server_version == {}
+ - result is not changed
+
+- name: postgresql_ping - check ping of the database by a non-existent user does not return anything
+ become_user: "{{ pg_user }}"
+ become: true
+ environment:
+ PGUSER: 'test_user'
+ ignore_errors: true
+ postgresql_ping:
+ db: "{{ db_default }}"
+ register: result
+
+- assert:
+ that:
+ - result.is_available == false
+ - result.server_version == {}
+ - result is not changed
+
+- name: Creating a "test_user" in postresql
+ shell:
+ cmd: psql -U "{{ pg_user }}" -c "CREATE ROLE test_user WITH LOGIN PASSWORD 'TEST_PASSWORD';"
+
+- name: postgresql_ping - check ping of the database by a existent user
+ become_user: "{{ pg_user }}"
+ become: true
+ environment:
+ PGUSER: 'test_user'
+ ignore_errors: true
+ postgresql_ping:
+ db: "{{ db_default }}"
+ login_password: "TEST_PASSWORD"
+ register: result
+
+- assert:
+ that:
+ - result.is_available == true
+ - result.server_version != {}
+ - result.server_version.raw is search('PostgreSQL')
+ - result.server_version.major != ''
+ - result.server_version.minor != ''
+ - result is not changed
+
+- name: postgresql_ping - ping DB with SSL 1
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_ping:
+ db: "{{ ssl_db }}"
+ login_user: "{{ ssl_user }}"
+ login_password: "{{ ssl_pass }}"
+ login_host: 127.0.0.1
+ login_port: 5432
+ ssl_mode: require
+ ca_cert: '{{ ssl_rootcert }}'
+ trust_input: true
+ register: result
+ when:
+ - ansible_os_family == 'Debian'
+ - postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result.is_available == true
+ - result.conn_err_msg == ''
+ when:
+ - ansible_os_family == 'Debian'
+ - postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_ping - ping DB with SSL 2
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_ping:
+ db: "{{ ssl_db }}"
+ login_user: "{{ ssl_user }}"
+ login_password: "{{ ssl_pass }}"
+ login_host: 127.0.0.1
+ login_port: 5432
+ ssl_mode: verify-full
+ ca_cert: '{{ ssl_rootcert }}'
+ ssl_cert: '{{ ssl_cert }}'
+ ssl_key: '{{ ssl_key }}'
+ trust_input: true
+ register: result
+ when:
+ - ansible_os_family == 'Debian'
+ - postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result.is_available == true
+ - result.conn_err_msg == ''
+ when:
+ - ansible_os_family == 'Debian'
+ - postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_ping - check trust_input
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_ping:
+ db: "{{ db_default }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+# Check conn_err_msg return value
+- name: Try to connect to non-existent DB
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_ping:
+ db: blahblah
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: Check conn_err_msg return value
+ assert:
+ that:
+ - result is succeeded
+ - result.conn_err_msg is search("database \"blahblah\" does not exist")
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/defaults/main.yml
new file mode 100644
index 000000000..0a2766702
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/defaults/main.yml
@@ -0,0 +1,14 @@
+db_name: ansible_db
+db_user1: ansible_db_user1
+db_user2: ansible_db_user2
+db_user3: ansible_db_user3
+db_user_with_dots1: role.with.dots1
+db_user_with_dots2: role.with.dots2
+db_name_with_hyphens: ansible-db
+db_user_with_hyphens: ansible-db-user
+db_schema_with_hyphens: ansible-db-schema
+db_schema_with_dot: test.schema
+db_schema_with_quote: 'TEST_schema"'
+db_session_role1: session_role1
+db_session_role2: session_role2
+dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/main.yml
new file mode 100644
index 000000000..cf7b63524
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/main.yml
@@ -0,0 +1,19 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: postgresql_privs_session_role.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+# Initial CI tests of postgresql_privs module:
+- include_tasks: postgresql_privs_initial.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+# General tests:
+- include_tasks: postgresql_privs_general.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+# Tests default_privs with target_role:
+- include_tasks: test_target_role.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/pg_authid_not_readable.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/pg_authid_not_readable.yml
new file mode 100644
index 000000000..3f810d473
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/pg_authid_not_readable.yml
@@ -0,0 +1,50 @@
+- name: "Admin user is allowed to access pg_authid relation: password comparison will succeed, password won't be updated"
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ encrypted: 'true'
+ password: "md5{{ (db_password ~ db_user1) | hash('md5')}}"
+ db: "{{ db_name }}"
+ priv: 'test_table1:INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER/test_table2:INSERT/CREATE,CONNECT,TEMP'
+ login_user: "{{ pg_user }}"
+ register: redo_as_admin
+
+- name: "Check that task succeeded without any change"
+ assert:
+ that:
+ - 'redo_as_admin is not failed'
+ - 'redo_as_admin is not changed'
+ - 'redo_as_admin is successful'
+
+- name: "Check that normal user isn't allowed to access pg_authid"
+ shell: 'psql -c "select * from pg_authid;" {{ db_name }} {{ db_user1 }}'
+ environment:
+ PGPASSWORD: '{{ db_password }}'
+ ignore_errors: true
+ register: pg_authid
+
+- assert:
+ that:
+ - 'pg_authid is failed'
+ - pg_authid.stderr is search('permission denied for (relation|table) pg_authid')
+
+- name: "Normal user isn't allowed to access pg_authid relation: password comparison will fail, password will be updated"
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ encrypted: 'true'
+ password: "md5{{ (db_password ~ db_user1) | hash('md5')}}"
+ db: "{{ db_name }}"
+ priv: 'test_table1:INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER/test_table2:INSERT/CREATE,CONNECT,TEMP'
+ login_user: "{{ db_user1 }}"
+ login_password: "{{ db_password }}"
+ register: redo_as_normal_user
+
+- name: "Check that task succeeded and that result is changed"
+ assert:
+ that:
+ - 'redo_as_normal_user is not failed'
+ - 'redo_as_normal_user is changed'
+ - 'redo_as_normal_user is successful'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml
new file mode 100644
index 000000000..4b4621010
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml
@@ -0,0 +1,1767 @@
+# Setup
+- name: Create DB
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: Create a user to be owner of objects
+ postgresql_user:
+ name: "{{ db_user3 }}"
+ state: present
+ encrypted: true
+ password: password
+ role_attr_flags: CREATEDB,LOGIN
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: Create a user to be given permissions and other tests
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: present
+ encrypted: true
+ password: password
+ role_attr_flags: LOGIN
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+#############################
+# Test of solving bug 656 #
+#############################
+- name: Create DB with hyphen in the name
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: present
+ name: "{{ db_name_with_hyphens }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Create a user with hyphen in the name
+ postgresql_user:
+ name: "{{ db_user_with_hyphens }}"
+ state: present
+ encrypted: true
+ password: password
+ role_attr_flags: CREATEDB,LOGIN
+ db: "{{ db_name_with_hyphens }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Create schema with hyphen in the name
+ postgresql_schema:
+ login_user: "{{ pg_user }}"
+ login_password: password
+ db: "{{ db_name_with_hyphens }}"
+ name: "{{ db_schema_with_hyphens }}"
+ state: present
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+# Grant rights to the public schema, since in PostgreSQL 15
+# the rights to this schema are taken away from all users except the owner
+- name: GRANT ALL PRIVILEGES ON SCHEMA public TO ansible_db_user1,2,3
+ community.postgresql.postgresql_privs:
+ db: "{{ db_name }}"
+ privs: ALL
+ type: schema
+ objs: public
+ role: "{{ item }}"
+ loop:
+ - "{{ db_user2 }}"
+ - "{{ db_user3 }}"
+
+# Also covers https://github.com/ansible-collections/community.general/issues/884
+- name: Set table default privs on the schema with hyphen in the name
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_password: password
+ db: "{{ db_name_with_hyphens }}"
+ schema: "{{ db_schema_with_hyphens }}"
+ role: "{{ db_user_with_hyphens }}"
+ type: default_privs
+ obj: TABLES
+ privs: all
+ state: present
+ usage_on_types: true
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries is search('ON TYPES')
+
+# Also covers https://github.com/ansible-collections/community.general/issues/884
+- name: Set table default privs on the schema with hyphen in the name
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_password: password
+ db: "{{ db_name_with_hyphens }}"
+ schema: "{{ db_schema_with_hyphens }}"
+ role: "{{ db_user_with_hyphens }}"
+ type: default_privs
+ obj: TABLES
+ privs: all
+ state: present
+ usage_on_types: false
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries is not search('ON TYPES')
+
+- name: Delete table default privs on the schema with hyphen in the name
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_password: password
+ db: "{{ db_name_with_hyphens }}"
+ schema: "{{ db_schema_with_hyphens }}"
+ role: "{{ db_user_with_hyphens }}"
+ type: default_privs
+ obj: TABLES
+ privs: all
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Delete schema with hyphen in the name
+ postgresql_schema:
+ login_user: "{{ pg_user }}"
+ login_password: password
+ db: "{{ db_name_with_hyphens }}"
+ name: "{{ db_schema_with_hyphens }}"
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Delete a user with hyphen in the name
+ postgresql_user:
+ name: "{{ db_user_with_hyphens }}"
+ state: absent
+ encrypted: true
+ login_password: password
+ role_attr_flags: CREATEDB,LOGIN
+ db: "{{ db_name_with_hyphens }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Delete DB with hyphen in the name
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: absent
+ name: "{{ db_name_with_hyphens }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+#############################
+# Test of solving bug 27327 #
+#############################
+
+# Create the test table and view:
+- name: Create table
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: test_table1
+ columns:
+ - id int
+
+- name: Create view
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "CREATE VIEW test_view AS SELECT id FROM test_table1"
+
+# Test check_mode:
+- name: Grant SELECT on test_view, check_mode
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ state: present
+ privs: SELECT
+ type: table
+ objs: test_view
+ roles: "{{ db_user2 }}"
+ trust_input: false
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+# Check:
+- name: Check that nothing was changed after the prev step
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT grantee FROM information_schema.role_table_grants WHERE table_name='test_view' AND grantee = '{{ db_user2 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Test true mode:
+- name: Grant SELECT on test_view
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ state: present
+ privs: SELECT
+ type: table
+ objs: test_view
+ roles: "{{ db_user2 }}"
+ trust_input: false
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+# Check:
+- name: Check that nothing was changed after the prev step
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT grantee FROM information_schema.role_table_grants WHERE table_name='test_view' AND grantee = '{{ db_user2 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Test true mode:
+- name: Try to grant SELECT again
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ state: present
+ privs: SELECT
+ type: table
+ objs: test_view
+ roles: "{{ db_user2 }}"
+ trust_input: false
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+# Cleanup:
+- name: Drop test view
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "DROP VIEW test_view"
+
+- name: Drop test table
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: test_table1
+ state: absent
+
+######################################################
+# Test foreign data wrapper and foreign server privs #
+######################################################
+
+# Foreign data wrapper setup
+- name: Create foreign data wrapper extension
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "CREATE EXTENSION postgres_fdw" | psql -d "{{ db_name }}"
+
+- name: Create dummy foreign data wrapper
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "CREATE FOREIGN DATA WRAPPER dummy" | psql -d "{{ db_name }}"
+
+- name: Create foreign server
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "CREATE SERVER dummy_server FOREIGN DATA WRAPPER dummy" | psql -d "{{ db_name }}"
+
+# Test
+- name: Grant foreign data wrapper privileges
+ postgresql_privs:
+ state: present
+ type: foreign_data_wrapper
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+# Checks
+- assert:
+ that:
+ - result is changed
+
+- name: Get foreign data wrapper privileges
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "{{ fdw_query }}" | psql -d "{{ db_name }}"
+ vars:
+ fdw_query: >
+ SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper
+ WHERE fdwname = ANY (ARRAY['dummy']) ORDER BY fdwname
+ register: fdw_result
+
+- assert:
+ that:
+ - "fdw_result.stdout_lines[-1] == '(1 row)'"
+ - "'{{ db_user2 }}' in fdw_result.stdout_lines[-2]"
+
+# Test
+- name: Grant foreign data wrapper privileges second time
+ postgresql_privs:
+ state: present
+ type: foreign_data_wrapper
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+# Checks
+- assert:
+ that:
+ - result is not changed
+
+# Test
+- name: Revoke foreign data wrapper privileges
+ postgresql_privs:
+ state: absent
+ type: foreign_data_wrapper
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+# Checks
+- assert:
+ that:
+ - result is changed
+
+- name: Get foreign data wrapper privileges
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "{{ fdw_query }}" | psql -d "{{ db_name }}"
+ vars:
+ fdw_query: >
+ SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper
+ WHERE fdwname = ANY (ARRAY['dummy']) ORDER BY fdwname
+ register: fdw_result
+
+- assert:
+ that:
+ - "fdw_result.stdout_lines[-1] == '(1 row)'"
+ - "'{{ db_user2 }}' not in fdw_result.stdout_lines[-2]"
+
+# Test
+- name: Revoke foreign data wrapper privileges for second time
+ postgresql_privs:
+ state: absent
+ type: foreign_data_wrapper
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+# Checks
+- assert:
+ that:
+ - result is not changed
+
+# Test
+- name: Grant foreign server privileges
+ postgresql_privs:
+ state: present
+ type: foreign_server
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy_server
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+# Checks
+- assert:
+ that:
+ - result is changed
+
+- name: Get foreign server privileges
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "{{ fdw_query }}" | psql -d "{{ db_name }}"
+ vars:
+ fdw_query: >
+ SELECT srvacl FROM pg_catalog.pg_foreign_server
+ WHERE srvname = ANY (ARRAY['dummy_server']) ORDER BY srvname
+ register: fs_result
+
+- assert:
+ that:
+ - "fs_result.stdout_lines[-1] == '(1 row)'"
+ - "'{{ db_user2 }}' in fs_result.stdout_lines[-2]"
+
+# Test
+- name: Grant foreign server privileges for second time
+ postgresql_privs:
+ state: present
+ type: foreign_server
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy_server
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+# Checks
+- assert:
+ that:
+ - result is not changed
+
+# Test
+- name: Revoke foreign server privileges
+ postgresql_privs:
+ state: absent
+ type: foreign_server
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy_server
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+# Checks
+- assert:
+ that:
+ - result is changed
+
+- name: Get foreign server privileges
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "{{ fdw_query }}" | psql -d "{{ db_name }}"
+ vars:
+ fdw_query: >
+ SELECT srvacl FROM pg_catalog.pg_foreign_server
+ WHERE srvname = ANY (ARRAY['dummy_server']) ORDER BY srvname
+ register: fs_result
+
+- assert:
+ that:
+ - "fs_result.stdout_lines[-1] == '(1 row)'"
+ - "'{{ db_user2 }}' not in fs_result.stdout_lines[-2]"
+
+# Test
+- name: Revoke foreign server privileges for second time
+ postgresql_privs:
+ state: absent
+ type: foreign_server
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy_server
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+# Checks
+- assert:
+ that:
+ - result is not changed
+
+# Foreign data wrapper cleanup
+- name: Drop foreign server
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "DROP SERVER dummy_server" | psql -d "{{ db_name }}"
+
+- name: Drop dummy foreign data wrapper
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "DROP FOREIGN DATA WRAPPER dummy" | psql -d "{{ db_name }}"
+
+- name: Drop foreign data wrapper extension
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "DROP EXTENSION postgres_fdw" | psql -d "{{ db_name }}"
+
+##########################################
+# Test ALL_IN_SCHEMA for 'function' type #
+##########################################
+
+# Function ALL_IN_SCHEMA Setup
+- name: Create function for test
+ postgresql_query:
+ query: CREATE FUNCTION public.a() RETURNS integer LANGUAGE SQL AS 'SELECT 2';
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+
+# Test
+- name: Grant execute to all functions
+ postgresql_privs:
+ type: function
+ state: present
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+# Checks
+- assert:
+ that: result is changed
+
+- name: Check that all functions have execute privileges
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: psql {{ db_name }} -c "SELECT proacl FROM pg_proc WHERE proname = 'a'" -t
+ register: result
+
+- assert:
+ that: "'{{ db_user2 }}=X/{{ db_user3 }}' in '{{ result.stdout_lines[0] }}'"
+
+# Test
+- name: Grant execute to all functions again
+ postgresql_privs:
+ type: function
+ state: present
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+# Checks
+- assert:
+ that: result is not changed
+
+# Test
+- name: Revoke execute to all functions
+ postgresql_privs:
+ type: function
+ state: absent
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+# Checks
+- assert:
+ that: result is changed
+
+# Test
+- name: Revoke execute to all functions again
+ postgresql_privs:
+ type: function
+ state: absent
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+- assert:
+ that: result is not changed
+
+# Function ALL_IN_SCHEMA cleanup
+- name: Remove function for test
+ postgresql_query:
+ query: DROP FUNCTION public.a();
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+
+# Issue https://github.com/ansible-collections/community.general/issues/994
+- name: Create a procedure for tests
+ postgresql_query:
+ query: "CREATE PROCEDURE mock_procedure() LANGUAGE SQL AS $$ SELECT 1; $$;"
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+# Issue https://github.com/ansible-collections/community.general/issues/994
+- name: Try to run module against a procedure, not function
+ postgresql_privs:
+ type: function
+ state: present
+ privs: ALL
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+###########################
+# Test for procedure type #
+###########################
+- name: Create another procedure for tests
+ postgresql_query:
+ query: "CREATE PROCEDURE mock_procedure1(int, int) LANGUAGE SQL AS $$ SELECT 1; $$;"
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- name: Grant privs on procedure
+ postgresql_privs:
+ type: procedure
+ state: present
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: 'mock_procedure1(int:int)'
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- name: Grant privs on procedure again
+ postgresql_privs:
+ type: procedure
+ state: present
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: 'mock_procedure1(int:int)'
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- name: Revoke procedure privs
+ postgresql_privs:
+ type: procedure
+ state: absent
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: 'mock_procedure1(int:int)'
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- name: Revoke procedure privs again
+ postgresql_privs:
+ type: procedure
+ state: absent
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: 'mock_procedure1(int:int)'
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- name: Grant procedure privs for all object in schema
+ postgresql_privs:
+ type: procedure
+ state: present
+ privs: ALL
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- name: Grant procedure privs for all object in schema again
+ postgresql_privs:
+ type: procedure
+ state: present
+ privs: ALL
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- name: Revoke procedure privs for all object in schema
+ postgresql_privs:
+ type: procedure
+ state: absent
+ privs: ALL
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+#################################################
+# Test ALL_IN_SCHEMA for 'partioned tables type #
+#################################################
+
+# Partitioning tables is a feature introduced in Postgresql 10.
+# (see https://www.postgresql.org/docs/10/ddl-partitioning.html )
+# The test below check for this version
+
+# Function ALL_IN_SCHEMA Setup
+- name: Create partioned table for test purpose
+ postgresql_query:
+ query: CREATE TABLE public.testpt (id int not null, logdate date not null) PARTITION BY RANGE (logdate);
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Test
+- name: Grant execute to all tables in check mode
+ postgresql_privs:
+ type: table
+ state: present
+ privs: SELECT
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: false
+ register: result
+ ignore_errors: true
+ when: postgres_version_resp.stdout is version('10', '>=')
+ check_mode: true
+
+# Checks
+- name: Check that all partitioned tables don't have select privileges after the check mode task
+ postgresql_query:
+ query: SELECT grantee, privilege_type FROM information_schema.role_table_grants WHERE table_name='testpt' and privilege_type='SELECT' and grantee = %(grantuser)s
+ db: "{{ db_name }}"
+ login_user: '{{ db_user2 }}'
+ login_password: password
+ named_args:
+ grantuser: '{{ db_user2 }}'
+ become: true
+ become_user: "{{ pg_user }}"
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Test
+- name: Grant execute with grant option on pg_create_restore_point function
+ postgresql_privs:
+ privs: EXECUTE
+ type: function
+ schema: pg_catalog
+ obj: pg_create_restore_point(text)
+ db: "{{ db_name }}"
+ roles: "{{ db_user2 }}"
+ login_user: "{{ pg_user }}"
+ grant_option: true
+ state: present
+ become: true
+ become_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that: result is changed
+
+- name: Check that user has GRANT privilege on the function
+ postgresql_query:
+ query: SELECT proacl FROM pg_proc WHERE proname='pg_create_restore_point'
+ db: "{{ db_name }}"
+ login_user: "{{ db_user2 }}"
+ login_password: password
+ become: true
+ become_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that: "'{{ db_user2 }}=X*/{{ pg_user }}' in result.query_result[0].proacl"
+
+# Test
+- name: Grant execute without specifying grant_option to check idempotence
+ postgresql_privs:
+ privs: EXECUTE
+ type: function
+ schema: pg_catalog
+ obj: pg_create_restore_point(text)
+ db: "{{ db_name }}"
+ roles: "{{ db_user2 }}"
+ login_user: "{{ pg_user }}"
+ state: present
+ become: true
+ become_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that: result is not changed
+
+- name: Check that user has GRANT privilege on the function
+ postgresql_query:
+ query: SELECT proacl FROM pg_proc WHERE proname='pg_create_restore_point'
+ db: "{{ db_name }}"
+ login_user: "{{ db_user2 }}"
+ login_password: password
+ become: true
+ become_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that: "'{{ db_user2 }}=X*/{{ pg_user }}' in result.query_result[0].proacl"
+
+# Test
+- name: Revoke grant option on pg_create_restore_point function
+ postgresql_privs:
+ privs: EXECUTE
+ type: function
+ schema: pg_catalog
+ obj: pg_create_restore_point(text)
+ db: "{{ db_name }}"
+ roles: "{{ db_user2 }}"
+ login_user: "{{ pg_user }}"
+ grant_option: false
+ state: present
+ become: true
+ become_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that: result is changed
+
+- name: Check that user does not have GRANT privilege on the function
+ postgresql_query:
+ query: SELECT proacl FROM pg_proc WHERE proname='pg_create_restore_point'
+ db: "{{ db_name }}"
+ login_user: "{{ db_user2 }}"
+ login_password: password
+ become: true
+ become_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that: "'{{ db_user2 }}=X/{{ pg_user }}' in result.query_result[0].proacl"
+
+# Test
+- name: Revoke execute on pg_create_restore_point function
+ postgresql_privs:
+ privs: EXECUTE
+ type: function
+ schema: pg_catalog
+ obj: pg_create_restore_point(text)
+ db: "{{ db_name }}"
+ roles: "{{ db_user2 }}"
+ login_user: "{{ pg_user }}"
+ state: absent
+ become: true
+ become_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that: result is changed
+
+- name: Check that user does not have EXECUTE privilege on the function
+ postgresql_query:
+ query: SELECT proacl FROM pg_proc WHERE proname='pg_create_restore_point'
+ db: "{{ db_name }}"
+ login_user: "{{ db_user2 }}"
+ login_password: password
+ become: true
+ become_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that: "'{{ db_user2 }}' not in result.query_result[0].proacl"
+
+# Test
+- name: Grant execute to all tables
+ postgresql_privs:
+ type: table
+ state: present
+ privs: SELECT
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: false
+ register: result
+ ignore_errors: true
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that: result is changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Check that all partitioned tables have select privileges
+ postgresql_query:
+ query: SELECT grantee, privilege_type FROM information_schema.role_table_grants WHERE table_name='testpt' and privilege_type='SELECT' and grantee = %(grantuser)s
+ db: "{{ db_name }}"
+ login_user: '{{ db_user2 }}'
+ login_password: password
+ named_args:
+ grantuser: '{{ db_user2 }}'
+ become: true
+ become_user: "{{ pg_user }}"
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Test
+- name: Grant execute to all tables again to see no changes are reported
+ postgresql_privs:
+ type: table
+ state: present
+ privs: SELECT
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: false
+ register: result
+ ignore_errors: true
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that: result is not changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Test
+- name: Revoke SELECT to all tables
+ postgresql_privs:
+ type: table
+ state: absent
+ privs: SELECT
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: false
+ register: result
+ ignore_errors: true
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that: result is changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Check that all partitioned tables don't have select privileges
+ postgresql_query:
+ query: SELECT grantee, privilege_type FROM information_schema.role_table_grants WHERE table_name='testpt' and privilege_type='SELECT' and grantee = %(grantuser)s
+ db: "{{ db_name }}"
+ login_user: '{{ db_user2 }}'
+ login_password: password
+ named_args:
+ grantuser: '{{ db_user2 }}'
+ become: true
+ become_user: "{{ pg_user }}"
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Test
+- name: Revoke SELECT to all tables and no changes are reported
+ postgresql_privs:
+ type: table
+ state: absent
+ privs: SELECT
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: false
+ register: result
+ ignore_errors: true
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that: result is not changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Table ALL_IN_SCHEMA cleanup
+- name: Remove table for test
+ postgresql_query:
+ query: DROP TABLE public.testpt;
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: false
+ ignore_errors: true
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+###########################################
+# Test for 'type' value of type parameter #
+###########################################
+
+# Test
+- name: Grant type privileges
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ state: present
+ type: type
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Get type privileges
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric';
+ register: typ_result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - "'{{ db_user2 }}' in typ_result.query_result[0].typacl"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Grant type privileges again using check_mode
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ state: present
+ type: type
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ check_mode: true
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that:
+ - result is not changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Get type privileges
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric';
+ register: typ_result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - "'{{ db_user2 }}' in typ_result.query_result[0].typacl"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Grant type privileges again
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ state: present
+ type: type
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that:
+ - result is not changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Get type privileges
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric';
+ register: typ_result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - "'{{ db_user2 }}' in typ_result.query_result[0].typacl"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Revoke type privileges in check_mode
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ state: absent
+ type: type
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ check_mode: true
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Get type privileges
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric';
+ register: typ_result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - "'{{ db_user2 }}' in typ_result.query_result[0].typacl"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Revoke type privileges
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ state: absent
+ type: type
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Get type privileges
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric';
+ register: typ_result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - "'{{ db_user2 }}' not in typ_result.query_result[0].typacl"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# type with default schema (public):
+- name: Create custom type in schema public
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ query: "CREATE TYPE compfoo AS (f1 int, f2 text)"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Test
+- name: Grant type privileges with default schema
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ state: present
+ type: type
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: compfoo
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Get type privileges
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ query: >
+ SELECT t.typacl FROM pg_catalog.pg_type t JOIN pg_catalog.pg_namespace n
+ ON n.oid = t.typnamespace WHERE t.typname = 'compfoo' AND n.nspname = 'public';
+ register: typ_result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - "'{{ db_user2 }}' in typ_result.query_result[0].typacl"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+######################################################################
+# https://github.com/ansible-collections/community.general/issues/1058
+- name: Create user for test
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ name: "test-role"
+ role_attr_flags: "NOLOGIN,NOSUPERUSER,INHERIT,NOCREATEDB,NOCREATEROLE,NOREPLICATION"
+
+- name: Test community.general/issue/1058 GRANT with hyphen
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "test-role"
+ objs: "{{ pg_user }}"
+ type: "group"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["GRANT \"{{ pg_user }}\" TO \"test-role\";"]
+
+- name: Test community.general/issue/1058 REVOKE
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "test-role"
+ objs: "{{ pg_user }}"
+ type: "group"
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["REVOKE \"{{ pg_user }}\" FROM \"test-role\";"]
+
+- name: Test community.general/issue/1058 GRANT without hyphen
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "{{ db_user3 }}"
+ objs: "{{ pg_user }}"
+ type: "group"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["GRANT \"{{ pg_user }}\" TO \"{{ db_user3 }}\";"]
+
+- name: Test community.general/issue/1058 GRANT with hyphen as an object
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "{{ db_user3 }}"
+ objs: "test-role,{{ db_user2 }}"
+ type: "group"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["GRANT \"test-role\",\"{{ db_user2 }}\" TO \"{{ db_user3 }}\";"]
+
+- name: Test community.general/issue/1058 GRANT with hyphen as an object
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "{{ db_user3 }}"
+ objs: "test-role"
+ type: "group"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+##############
+# Issue https://github.com/ansible-collections/community.postgresql/issues/381
+- name: create schemas with special names
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_schema:
+ login_user: "{{ pg_user }}"
+ login_password: password
+ db: "{{ db_name }}"
+ name: '"{{ item }}"'
+ state: present
+ loop:
+ - "{{ db_schema_with_dot|replace('\"', '\"\"') }}"
+ - "{{ db_schema_with_quote|replace('\"', '\"\"') }}"
+ register: result
+- assert:
+ that:
+ - result is changed
+- name: create tables in schemas with special names
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ login_user: "{{ pg_user }}"
+ login_password: password
+ db: "{{ db_name }}"
+ name: '"{{ item }}"."test.table.name"'
+ columns: []
+ loop:
+ - "{{ db_schema_with_dot|replace('\"', '\"\"') }}"
+ - "{{ db_schema_with_quote|replace('\"', '\"\"') }}"
+ register: result
+- assert:
+ that:
+ - result is changed
+- name: grant privileges on all tables in schemas with special names
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: PUBLIC
+ objs: ALL_IN_SCHEMA
+ type: table
+ privs: SELECT
+ schema: "{{ item }}"
+ loop:
+ - "{{ db_schema_with_dot }}"
+ - "{{ db_schema_with_quote }}"
+ register: result
+- assert:
+ that:
+ - result is changed
+- name: grant privileges on some table in schemas with special names
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: PUBLIC
+ objs: 'test.table.name'
+ type: table
+ privs: SELECT
+ schema: "{{ item }}"
+ loop:
+ - "{{ db_schema_with_dot }}"
+ - "{{ db_schema_with_quote }}"
+ register: result
+- assert:
+ that:
+ - result is changed
+- name: check permissions on tables in schemas with special names
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: "{{ db_name }}"
+ query: |
+ select true as granted from information_schema.role_table_grants
+ where table_schema=%s and table_name='test.table.name' and privilege_type='SELECT' and grantee='PUBLIC'
+ positional_args:
+ - "{{ item }}"
+ loop:
+ - "{{ db_schema_with_dot }}"
+ - "{{ db_schema_with_quote }}"
+ register: result
+- assert:
+ that:
+ - 'result.results|length == 2'
+ - 'result.results[0].rowcount == 1'
+ - 'not result.results[0].failed'
+ - 'result.results[1].rowcount == 1'
+ - 'not result.results[1].failed'
+- name: cleanup test schemas with special names
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_schema:
+ login_user: "{{ pg_user }}"
+ login_password: password
+ db: "{{ db_name }}"
+ name: '"{{ item }}"'
+ state: absent
+ cascade_drop: true
+ loop:
+ - "{{ db_schema_with_dot|replace('\"', '\"\"') }}"
+ - "{{ db_schema_with_quote|replace('\"', '\"\"') }}"
+ register: result
+
+
+##############
+# Issue https://github.com/ansible-collections/community.postgresql/issues/332
+- name: Test community.postgresql issue 332 grant usage
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "{{ db_user3 }}"
+ objs: schemas
+ type: default_privs
+ privs: usage
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Test community.postgresql issue 332 grant usage, run again
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "{{ db_user3 }}"
+ objs: schemas
+ type: default_privs
+ privs: usage
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Test community.postgresql issue 333 grant usage
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "{{ db_user3 }}"
+ objs: tables
+ type: default_privs
+ schema: not-specified
+ privs: select
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Test community.postgresql issue 333 grant usage, run again
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "{{ db_user3 }}"
+ objs: tables
+ type: default_privs
+ schema: not-specified
+ privs: select
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Test community.postgresql issue 373
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "{{ db_user3 }}"
+ objs: ALL_DEFAULT
+ type: default_privs
+ privs: ALL
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Test community.postgresql issue 379
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "{{ db_user3 }}"
+ objs: ALL_IN_SCHEMA
+ type: default_privs
+ privs: SELECT,INSERT,UPDATE,DELETE,EXECUTE
+ schema: public
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is failed
+ - result.msg is search('ALL_IN_SCHEMA can be used only for type')
+
+# Cleanup
+- name: Remove privs
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ state: absent
+ type: type
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: compfoo
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Reassign ownership
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_owner:
+ login_user: "{{ pg_user }}"
+ db: "{{ db_name }}"
+ new_owner: "{{ pg_user }}"
+ reassign_owned_by: "{{ item }}"
+ loop:
+ - "{{ db_user2 }}"
+ - "{{ db_user3 }}"
+
+- name: Drop a role for which the default privileges have been altered
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_db: "{{ db_name }}"
+ query: "DROP OWNED BY {{ item }};"
+ loop:
+ - "{{ db_user2 }}"
+ - "{{ db_user3 }}"
+
+- name: Remove user given permissions
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: absent
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: Remove user owner of objects
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ item }}"
+ state: absent
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ loop:
+ - '{{ db_user3 }}'
+ - 'test-role'
+
+- name: Destroy DB
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: absent
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml
new file mode 100644
index 000000000..814bc348d
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml
@@ -0,0 +1,407 @@
+# The tests below were added initially and moved here
+# from the shared target called ``postgresql`` by @Andersson007 <aaklychkov@mail.ru>.
+# You can see modern examples of CI tests in postgresql_publication directory, for example.
+
+#
+# Test settings privileges
+#
+- name: Create db
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "present"
+ login_user: "{{ pg_user }}"
+
+- name: Create some tables on the db
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "create table test_table1 (field text);" | psql {{ db_name }}
+
+- become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "create table test_table2 (field text);" | psql {{ db_name }}
+
+- vars:
+ db_password: 'secretù' # use UTF-8
+ block:
+ - name: Create a user with some permissions on the db
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ encrypted: 'true'
+ password: "md5{{ (db_password ~ db_user1) | hash('md5')}}"
+ db: "{{ db_name }}"
+ priv: 'test_table1:INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER/test_table2:INSERT/CREATE,CONNECT,TEMP'
+ login_user: "{{ pg_user }}"
+
+ - include_tasks: pg_authid_not_readable.yml
+
+- name: Check that the user has the requested permissions (table1)
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }}
+ register: result_table1
+
+- name: Check that the user has the requested permissions (table2)
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }}
+ register: result_table2
+
+- name: Check that the user has the requested permissions (database)
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }}
+ register: result_database
+
+- assert:
+ that:
+ - "result_table1.stdout_lines[-1] == '(7 rows)'"
+ - "'INSERT' in result_table1.stdout"
+ - "'SELECT' in result_table1.stdout"
+ - "'UPDATE' in result_table1.stdout"
+ - "'DELETE' in result_table1.stdout"
+ - "'TRUNCATE' in result_table1.stdout"
+ - "'REFERENCES' in result_table1.stdout"
+ - "'TRIGGER' in result_table1.stdout"
+ - "result_table2.stdout_lines[-1] == '(1 row)'"
+ - "'INSERT' == '{{ result_table2.stdout_lines[-2] | trim }}'"
+ - "result_database.stdout_lines[-1] == '(1 row)'"
+ - "'{{ db_user1 }}=CTc/{{ pg_user }}' in result_database.stdout_lines[-2]"
+
+- name: Add another permission for the user
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ encrypted: 'true'
+ password: "md55c8ccfd9d6711fc69a7eae647fc54f51"
+ db: "{{ db_name }}"
+ priv: 'test_table2:select'
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: Check that ansible reports it changed the user
+ assert:
+ that:
+ - result is changed
+
+- name: Check that the user has the requested permissions (table2)
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }}
+ register: result_table2
+
+- assert:
+ that:
+ - "result_table2.stdout_lines[-1] == '(2 rows)'"
+ - "'INSERT' in result_table2.stdout"
+ - "'SELECT' in result_table2.stdout"
+
+#
+# Test priv setting via postgresql_privs module
+# (Depends on state from previous _user privs tests)
+#
+
+- name: Revoke a privilege
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ type: "table"
+ state: "absent"
+ roles: "{{ db_user1 }}"
+ privs: "INSERT"
+ objs: "test_table2"
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+
+- name: Check that ansible reports it changed the user
+ assert:
+ that:
+ - result is changed
+
+- name: Check that the user has the requested permissions (table2)
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }}
+ register: result_table2
+
+- assert:
+ that:
+ - "result_table2.stdout_lines[-1] == '(1 row)'"
+ - "'SELECT' == '{{ result_table2.stdout_lines[-2] | trim }}'"
+
+- name: Revoke many privileges on multiple tables
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ state: "absent"
+ roles: "{{ db_user1 }}"
+ privs: "INSERT,select,UPDATE,TRUNCATE,REFERENCES,TRIGGER,delete"
+ objs: "test_table2,test_table1"
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+
+- name: Check that ansible reports it changed the user
+ assert:
+ that:
+ - result is changed
+
+- name: Check that permissions were revoked (table1)
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }}
+ register: result_table1
+
+- name: Check that permissions were revoked (table2)
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }}
+ register: result_table2
+
+- assert:
+ that:
+ - "result_table1.stdout_lines[-1] == '(0 rows)'"
+ - "result_table2.stdout_lines[-1] == '(0 rows)'"
+
+- name: Revoke database privileges
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ type: "database"
+ state: "absent"
+ roles: "{{ db_user1 }}"
+ privs: "Create,connect,TEMP"
+ objs: "{{ db_name }}"
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+
+- name: Check that the user has the requested permissions (database)
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }}
+ register: result_database
+
+- assert:
+ that:
+ - "result_database.stdout_lines[-1] == '(1 row)'"
+ - "'{{ db_user1 }}' not in result_database.stdout"
+
+- name: Grant database privileges
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ type: "database"
+ state: "present"
+ roles: "{{ db_user1 }}"
+ privs: "CREATE,connect"
+ objs: "{{ db_name }}"
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ register: result
+
+- name: Check that ansible reports it changed the user
+ assert:
+ that:
+ - result is changed
+
+- name: Check that the user has the requested permissions (database)
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }}
+ register: result_database
+
+- assert:
+ that:
+ - "result_database.stdout_lines[-1] == '(1 row)'"
+ - "'{{ db_user1 }}=Cc' in result_database.stdout"
+
+- name: Grant a single privilege on a table
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ state: "present"
+ roles: "{{ db_user1 }}"
+ privs: "INSERT"
+ objs: "test_table1"
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+
+- name: Check that permissions were added (table1)
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }}
+ register: result_table1
+
+- assert:
+ that:
+ - "result_table1.stdout_lines[-1] == '(1 row)'"
+ - "'{{ result_table1.stdout_lines[-2] | trim }}' == 'INSERT'"
+
+- name: Grant many privileges on multiple tables
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ state: "present"
+ roles: "{{ db_user1 }}"
+ privs: 'INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,trigger'
+ objs: "test_table2,test_table1"
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+
+- name: Check that permissions were added (table1)
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }}
+ register: result_table1
+
+- name: Check that permissions were added (table2)
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }}
+ register: result_table2
+
+- assert:
+ that:
+ - "result_table1.stdout_lines[-1] == '(7 rows)'"
+ - "'INSERT' in result_table1.stdout"
+ - "'SELECT' in result_table1.stdout"
+ - "'UPDATE' in result_table1.stdout"
+ - "'DELETE' in result_table1.stdout"
+ - "'TRUNCATE' in result_table1.stdout"
+ - "'REFERENCES' in result_table1.stdout"
+ - "'TRIGGER' in result_table1.stdout"
+ - "result_table2.stdout_lines[-1] == '(7 rows)'"
+ - "'INSERT' in result_table2.stdout"
+ - "'SELECT' in result_table2.stdout"
+ - "'UPDATE' in result_table2.stdout"
+ - "'DELETE' in result_table2.stdout"
+ - "'TRUNCATE' in result_table2.stdout"
+ - "'REFERENCES' in result_table2.stdout"
+ - "'TRIGGER' in result_table2.stdout"
+
+# Check passing roles with dots
+# https://github.com/ansible/ansible/issues/63204
+- name: Create roles for further tests
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: "{{ item }}"
+ loop:
+ - "{{ db_user_with_dots1 }}"
+ - "{{ db_user_with_dots2 }}"
+
+- name: Pass role with dots in its name to roles parameter
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ state: "present"
+ roles: "{{ db_user_with_dots1 }}"
+ privs: "INSERT"
+ objs: "test_table1"
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+
+- name: Check that permissions were added (table1)
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user_with_dots1 }}' and table_name='test_table1'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# We don't need to check anything here, only that nothing failed
+- name: Pass role with dots in its name to target_roles parameter
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ state: "present"
+ roles: "{{ db_user_with_dots1 }}"
+ privs: "INSERT"
+ objs: TABLES
+ type: default_privs
+ target_roles: "{{ db_user_with_dots2 }}"
+ trust_input: false
+
+# Bugfix for https://github.com/ansible-collections/community.general/issues/857
+- name: Test passing lowercase PUBLIC role
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ type: 'database'
+ privs: 'connect'
+ role: 'public'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["GRANT CONNECT ON database \"{{ db_name }}\" TO PUBLIC;"]
+
+#
+# Cleanup
+#
+- name: Cleanup db
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "absent"
+ login_user: "{{ pg_user }}"
+
+- name: Check that database was destroyed
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+- name: Cleanup test user
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ name: "{{ item }}"
+ state: 'absent'
+ login_user: "{{ pg_user }}"
+ db: postgres
+ loop:
+ - "{{ db_user1 }}"
+ - "{{ db_user2 }}"
+ - "{{ db_user3 }}"
+ - "{{ db_user_with_dots1 }}"
+ - "{{ db_user_with_dots2 }}"
+
+- name: Check that they were removed
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_session_role.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_session_role.yml
new file mode 100644
index 000000000..e0c083e90
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_session_role.yml
@@ -0,0 +1,102 @@
+- name: Create a high privileged user
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_session_role1 }}"
+ state: "present"
+ password: "password"
+ role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- name: Create a low privileged user using the newly created user
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_session_role2 }}"
+ state: "present"
+ password: "password"
+ role_attr_flags: "LOGIN"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ db: postgres
+
+- name: Create DB as session_role
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: present
+ name: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ register: result
+
+- name: Create table to be able to grant privileges
+ become_user: "{{ pg_user }}"
+ become: true
+ shell: echo "CREATE TABLE test(i int); CREATE TABLE test2(i int);" | psql -AtXq "{{ db_session_role1 }}"
+
+- name: Grant all privileges on test1 table to low privileged user
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ db: "{{ db_session_role1 }}"
+ type: table
+ objs: test
+ roles: "{{ db_session_role2 }}"
+ login_user: "{{ pg_user }}"
+ privs: select
+ admin_option: true
+
+- name: Verify admin option was successful for grants
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ db: "{{ db_session_role1 }}"
+ type: table
+ objs: test
+ roles: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ privs: select
+ session_role: "{{ db_session_role2 }}"
+
+- name: Verify no grants can be granted for test2 table
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ db: "{{ db_session_role1 }}"
+ type: table
+ objs: test2
+ roles: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ privs: update
+ session_role: "{{ db_session_role2 }}"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is failed
+
+########################
+# Test trust_input param
+
+- name: Verify trust_input parameter
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ db: "{{ db_session_role1 }}"
+ type: table
+ objs: test2
+ roles: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ privs: update
+ session_role: "{{ dangerous_name }}"
+ trust_input: false
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml
new file mode 100644
index 000000000..42ece0bad
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml
@@ -0,0 +1,120 @@
+# Setup
+- name: Create a test user
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- name: Create DB
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+- name: Create a user to be given permissions and other tests
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: present
+ encrypted: true
+ password: password
+ role_attr_flags: LOGIN
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+#######################################
+# Test default_privs with target_role #
+#######################################
+
+# Test
+- name: Grant default privileges for new table objects
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ db: "{{ db_name }}"
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: "{{ db_user2 }}"
+ target_roles: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that: result is changed
+
+- name: Check that default privileges are set
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: psql {{ db_name }} -c "SELECT defaclrole, defaclobjtype, defaclacl FROM pg_default_acl a JOIN pg_roles b ON a.defaclrole=b.oid;" -t
+ register: result
+
+- assert:
+ that: "'{{ db_user2 }}=r/{{ db_user1 }}' in '{{ result.stdout_lines[0] }}'"
+
+# Test
+- name: Revoke default privileges for new table objects
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_privs:
+ db: "{{ db_name }}"
+ state: absent
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: "{{ db_user2 }}"
+ target_roles: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that: result is changed
+
+# Cleanup
+- name: Remove user given permissions
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: absent
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: Remove user owner of objects
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ name: "{{ db_user3 }}"
+ state: absent
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: Destroy DBs
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: absent
+ name: "{{ item }}"
+ login_user: "{{ pg_user }}"
+ loop:
+ - "{{ db_name }}"
+ - "{{ db_session_role1 }}"
+
+- name: Remove test users
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ name: "{{ item }}"
+ state: absent
+ db: postgres
+ login_user: "{{ pg_user }}"
+ loop:
+ - "{{ db_user1 }}"
+ - "{{ db_session_role1 }}"
+ - "{{ db_session_role2 }}"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/aliases
new file mode 100644
index 000000000..142e8aa07
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/aliases
@@ -0,0 +1,3 @@
+destructive
+shippable/posix/group1
+skip/freebsd
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/main.yml
new file mode 100644
index 000000000..507c1e234
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/main.yml
@@ -0,0 +1,8 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_publication module
+- import_tasks: postgresql_publication_initial.yml
+ when: postgres_version_resp.stdout is version('10', '>=')
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml
new file mode 100644
index 000000000..584a4848b
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml
@@ -0,0 +1,436 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# The file for testing postgresql_copy module.
+
+- vars:
+ test_table1: acme1
+ test_table2: acme2
+ test_table3: acme3
+ test_pub: acme_publ
+ test_role: alice
+ dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ test_schema: acme_schema
+ test_db: acme_db
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: true
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: '{{ test_db }}'
+
+ block:
+ #################################################
+ # Test preparation, create database test objects:
+ - name: postgresql_publication - create test db
+ <<: *task_parameters
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ test_db }}'
+
+ - name: postgresql_publication - create test schema
+ <<: *task_parameters
+ postgresql_schema:
+ <<: *pg_parameters
+ name: '{{ test_schema }}'
+
+ - name: postgresql_publication - create test role
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_role }}'
+ role_attr_flags: SUPERUSER
+
+ - name: postgresql_publication - create test tables
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ columns:
+ - id int
+ loop:
+ - '{{ test_table1 }}'
+ - '{{ test_schema }}.{{ test_table2 }}'
+ - '{{ test_table3 }}'
+
+
+ ################
+ # Do main tests:
+
+ # Test
+ - name: postgresql_publication - create publication, check_mode
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ trust_input: false
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.exists == false
+ - result.queries == ["CREATE PUBLICATION \"{{ test_pub }}\" FOR ALL TABLES"]
+
+ # Check
+ - name: postgresql_publication - check that nothing has been changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ # Test
+ - name: postgresql_publication - create publication
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.exists == true
+ - result.queries == ["CREATE PUBLICATION \"{{ test_pub }}\" FOR ALL TABLES"]
+ - result.owner == '{{ pg_user }}'
+ - result.alltables == true
+ - result.tables == []
+ - result.parameters.publish != {}
+
+ # Check
+ - name: postgresql_publication - check that nothing has been changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}'
+ AND pubowner = '10' AND puballtables = 't'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Test
+ - name: postgresql_publication - drop publication, check_mode
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ state: absent
+ trust_input: false
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.exists == true
+ - result.queries == ["DROP PUBLICATION \"{{ test_pub }}\""]
+ - result.owner == '{{ pg_user }}'
+ - result.alltables == true
+ - result.tables == []
+ - result.parameters.publish != {}
+
+ # Check
+ - name: postgresql_publication - check that nothing has been changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Test
+ - name: postgresql_publication - drop publication
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ state: absent
+ cascade: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.exists == false
+ - result.queries == ["DROP PUBLICATION \"{{ test_pub }}\" CASCADE"]
+
+ # Check
+ - name: postgresql_publication - check that publication does not exist
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ # Test
+ - name: postgresql_publication - create publication with tables, owner, params
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ owner: '{{ test_role }}'
+ tables:
+ - '{{ test_table1 }}'
+ - '{{ test_schema }}.{{ test_table2 }}'
+ parameters:
+ publish: 'insert'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["CREATE PUBLICATION \"{{ test_pub }}\" FOR TABLE \"public\".\"{{ test_table1 }}\", \"{{ test_schema }}\".\"{{ test_table2 }}\" WITH (publish = 'insert')", "ALTER PUBLICATION \"{{ test_pub }}\" OWNER TO \"{{ test_role }}\""]
+ - result.owner == '{{ test_role }}'
+ - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\""]
+ - result.parameters.publish.insert == true
+ - result.parameters.publish.delete == false
+
+ # Check 1
+ - name: postgresql_publication - check that test publication exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}'
+ AND pubowner != '10' AND puballtables = 'f' AND pubinsert = 't' AND pubdelete = 'f'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Check 2
+ - name: postgresql_publication - check that test_table1 from schema public is in publication
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}' AND schemaname = 'public'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Check 3
+ - name: postgresql_publication - check that test_table2 from test schema is in publication
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}' AND schemaname = '{{ test_schema }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Test
+ - name: postgresql_publication - test trust_input parameter
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ session_role: '{{ dangerous_name }}'
+ owner: '{{ dangerous_name }}'
+ trust_input: false
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+ # Test
+ - name: postgresql_publication - add table to publication, change owner, check_mode
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ owner: '{{ pg_user }}'
+ tables:
+ - '{{ test_table1 }}'
+ - '{{ test_schema }}.{{ test_table2 }}'
+ - '{{ test_table3 }}'
+ trust_input: false
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["ALTER PUBLICATION \"{{ test_pub }}\" ADD TABLE \"public\".\"{{ test_table3 }}\"", "ALTER PUBLICATION \"{{ test_pub }}\" OWNER TO \"{{ pg_user }}\""]
+ - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\""]
+
+ # Check
+ - name: postgresql_publication - check that nothing changes after the previous step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}'
+ AND pubowner != '10' AND puballtables = 'f' AND pubinsert = 't' AND pubupdate = 't'
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ # Check
+ - name: postgresql_publication - check that 2 tables are in publication
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result.rowcount == 2
+
+ # Test
+ - name: postgresql_publication - add table to publication, change owner
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ owner: '{{ pg_user }}'
+ tables:
+ - '{{ test_table1 }}'
+ - '{{ test_schema }}.{{ test_table2 }}'
+ - '{{ test_table3 }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["ALTER PUBLICATION \"{{ test_pub }}\" ADD TABLE \"public\".\"{{ test_table3 }}\"", "ALTER PUBLICATION \"{{ test_pub }}\" OWNER TO \"{{ pg_user }}\""]
+ - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\"", "\"public\".\"{{ test_table3 }}\""]
+
+ # Check 1
+ - name: postgresql_publication - check owner has been changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' AND pubowner = '10'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Check 2
+ - name: postgresql_publication - check that 3 tables are in publication
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result.rowcount == 3
+
+ # Test
+ - name: postgresql_publication - remove table from publication, check_mode
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ tables:
+ - '{{ test_table1 }}'
+ - '{{ test_schema }}.{{ test_table2 }}'
+ parameters:
+ publish: 'insert'
+ trust_input: false
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["ALTER PUBLICATION \"{{ test_pub }}\" DROP TABLE \"public\".\"{{ test_table3 }}\""]
+ - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\"", "\"public\".\"{{ test_table3 }}\""]
+
+ # Check 1
+ - name: postgresql_publication - check that 3 tables are in publication
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result.rowcount == 3
+
+ # Check 2
+ - name: postgresql_publication - check no parameters have been changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' AND pubinsert = 't'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Test
+ - name: postgresql_publication - remove table from publication
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ tables:
+ - '{{ test_table1 }}'
+ - '{{ test_schema }}.{{ test_table2 }}'
+ parameters:
+ publish: 'delete'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["ALTER PUBLICATION \"{{ test_pub }}\" DROP TABLE \"public\".\"{{ test_table3 }}\"", "ALTER PUBLICATION \"{{ test_pub }}\" SET (publish = 'delete')"]
+ - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\""]
+
+ # Check 1
+ - name: postgresql_publication - check that 2 tables are in publication
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result.rowcount == 2
+
+ # Check 2
+ - name: postgresql_publication - check parameter has been changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' AND pubinsert = 'f'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ always:
+ ###########
+ # Clean up:
+
+ - name: postgresql_publication - remove test db
+ <<: *task_parameters
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ test_db }}'
+ state: absent
+
+ - name: postgresql_publication - remove test role
+ <<: *task_parameters
+ postgresql_user:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ name: '{{ test_role }}'
+ state: absent
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test0.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test0.sql
new file mode 100644
index 000000000..e8a5ca03d
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test0.sql
@@ -0,0 +1,6 @@
+SELECT version();
+
+SELECT story FROM test_table
+ WHERE id = %s OR story = 'Данные';
+
+
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test1.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test1.sql
new file mode 100644
index 000000000..028c192d7
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test1.sql
@@ -0,0 +1,10 @@
+CREATE FUNCTION add(integer, integer) RETURNS integer
+ AS 'select $1 + $2;'
+ LANGUAGE SQL
+ IMMUTABLE
+ RETURNS NULL ON NULL INPUT;
+
+SELECT story FROM test_table
+ WHERE id = %s OR story = 'Данные';
+
+SELECT version();
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/main.yml
new file mode 100644
index 000000000..7b24dbf92
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/main.yml
@@ -0,0 +1,7 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_query module
+- import_tasks: postgresql_query_initial.yml
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml
new file mode 100644
index 000000000..5d447d608
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml
@@ -0,0 +1,604 @@
+- vars:
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ connect_params:
+ connect_timeout: 30
+
+ block:
+
+ - name: postgresql_query - drop test table if exists
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "DROP TABLE IF EXISTS test_table;"
+ ignore_errors: true
+
+ - name: postgresql_query - create test table called test_table
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE test_table (id int, story text);"
+ ignore_errors: true
+
+ - name: postgresql_query - insert some data into test_table
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "INSERT INTO test_table (id, story) VALUES (1, 'first'), (2, 'second'), (3, 'third');"
+ ignore_errors: true
+
+ - name: Copy script files
+ become: true
+ copy:
+ src: '{{ item }}'
+ dest: '~{{ pg_user }}/{{ item }}'
+ owner: '{{ pg_user }}'
+ force: true
+ loop:
+ - test0.sql
+ - test1.sql
+ register: sql_file_created
+ ignore_errors: true
+
+ - name: postgresql_query - analyze test_table
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: ANALYZE test_table
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == 'ANALYZE test_table'
+ - result.query_list == ['ANALYZE test_table']
+ - result.rowcount == 0
+ - result.statusmessage == 'ANALYZE'
+ - result.query_result == {}
+ - result.query_all_results == [{}]
+
+ - name: postgresql_query - run queries from SQL script
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ path_to_script: ~{{ pg_user }}/test0.sql
+ positional_args:
+ - 1
+ encoding: UTF-8
+ as_single_query: false
+ register: result
+ ignore_errors: true
+ when: sql_file_created
+
+ - assert:
+ that:
+ - result is not changed
+ - result.query == "\n\nSELECT story FROM test_table\n WHERE id = 1 OR story = 'Данные'"
+ - result.query_result[0].story == 'first'
+ - result.rowcount == 2
+ - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT'
+ when: sql_file_created
+
+ - name: postgresql_query - simple select query to test_table
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM test_table
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is not changed
+ - result.query == 'SELECT * FROM test_table'
+ - result.rowcount == 3
+ - result.statusmessage == 'SELECT 3' or result.statusmessage == 'SELECT'
+ - result.query_result[0].id == 1
+ - result.query_result[1].id == 2
+ - result.query_result[2].id == 3
+ - result.query_result[0].story == 'first'
+ - result.query_result[1].story == 'second'
+ - result.query_result[2].story == 'third'
+
+ - name: postgresql_query - select query with named args
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT id FROM test_table WHERE id = %(id_val)s AND story = %(story_val)s
+ named_args:
+ id_val: 1
+ story_val: first
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is not changed
+ - result.query == "SELECT id FROM test_table WHERE id = 1 AND story = 'first'" or result.query == "SELECT id FROM test_table WHERE id = 1 AND story = E'first'"
+ - result.rowcount == 1
+ - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT'
+ - result.query_result[0].id == 1
+
+ - name: postgresql_query - select query with positional arguments
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT story FROM test_table WHERE id = %s AND story = %s
+ positional_args:
+ - 2
+ - second
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is not changed
+ - result.query == "SELECT story FROM test_table WHERE id = 2 AND story = 'second'" or result.query == "SELECT story FROM test_table WHERE id = 2 AND story = E'second'"
+ - result.rowcount == 1
+ - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT'
+ - result.query_result[0].story == 'second'
+
+ - name: postgresql_query - simple update query
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: UPDATE test_table SET story = 'new' WHERE id = 3
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == "UPDATE test_table SET story = 'new' WHERE id = 3"
+ - result.rowcount == 1
+ - result.statusmessage == 'UPDATE 1'
+ - result.query_result == {}
+
+ - name: check the previous update
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM test_table WHERE story = 'new' AND id = 3
+ register: result
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_query - simple update query in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: UPDATE test_table SET story = 'CHECK_MODE' WHERE id = 3
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == "UPDATE test_table SET story = 'CHECK_MODE' WHERE id = 3"
+ - result.rowcount == 1
+ - result.statusmessage == 'UPDATE 1'
+ - result.query_result == {}
+
+ - name: check the previous update that nothing has been changed
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM test_table WHERE story = 'CHECK_MODE' AND id = 3
+ register: result
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: postgresql_query - try to update not existing row
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: UPDATE test_table SET story = 'new' WHERE id = 100
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is not changed
+ - result.query == "UPDATE test_table SET story = 'new' WHERE id = 100"
+ - result.rowcount == 0
+ - result.statusmessage == 'UPDATE 0'
+ - result.query_result == {}
+
+ - name: postgresql_query - insert query
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: INSERT INTO test_table (id, story) VALUES (%s, %s)
+ positional_args:
+ - 4
+ - fourth
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == "INSERT INTO test_table (id, story) VALUES (4, 'fourth')" or result.query == "INSERT INTO test_table (id, story) VALUES (4, E'fourth')"
+ - result.rowcount == 1
+ - result.statusmessage == 'INSERT 0 1'
+ - result.query_result == {}
+
+ - name: postgresql_query - truncate test_table
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: TRUNCATE test_table
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == "TRUNCATE test_table"
+ - result.rowcount == 0
+ - result.statusmessage == 'TRUNCATE TABLE'
+ - result.query_result == {}
+
+ - name: postgresql_query - alter test_table
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: ALTER TABLE test_table ADD COLUMN foo int
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == "ALTER TABLE test_table ADD COLUMN foo int"
+ - result.rowcount == 0
+ - result.statusmessage == 'ALTER TABLE'
+
+ - name: postgresql_query - vacuum without autocommit must fail
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: VACUUM
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result.failed == true
+
+ - name: postgresql_query - autocommit in check_mode must fail
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: VACUUM
+ autocommit: true
+ check_mode: true
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result.failed == true
+ - result.msg == "Using autocommit is mutually exclusive with check_mode"
+
+ - name: postgresql_query - vacuum with autocommit
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: VACUUM
+ autocommit: true
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == "VACUUM"
+ - result.rowcount == 0
+ - result.statusmessage == 'VACUUM'
+ - result.query_result == {}
+
+ - name: postgresql_query - create test table for issue 59955
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_table:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ name: test_array_table
+ columns:
+ - arr_col int[]
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - set_fact:
+ my_list:
+ - 1
+ - 2
+ - 3
+ my_arr: '{1, 2, 3}'
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - name: postgresql_query - insert array into test table by positional args
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: INSERT INTO test_array_table (arr_col) VALUES (%s)
+ positional_args:
+ - '{{ my_list }}'
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == "INSERT INTO test_array_table (arr_col) VALUES ('{1, 2, 3}')"
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - name: postgresql_query - select array from test table by passing positional_args
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM test_array_table WHERE arr_col = %s
+ positional_args:
+ - '{{ my_list }}'
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - assert:
+ that:
+ - result is not changed
+ - result.query == "SELECT * FROM test_array_table WHERE arr_col = '{1, 2, 3}'"
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - name: postgresql_query - select array from test table by passing named_args
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM test_array_table WHERE arr_col = %(arr_val)s
+ named_args:
+ arr_val:
+ - '{{ my_list }}'
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - assert:
+ that:
+ - result is not changed
+ - result.query == "SELECT * FROM test_array_table WHERE arr_col = '{1, 2, 3}'"
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - name: postgresql_query - select array from test table by passing positional_args as a string
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM test_array_table WHERE arr_col = %s
+ positional_args:
+ - '{{ my_arr|string }}'
+ trust_input: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - assert:
+ that:
+ - result is not changed
+ - result.query == "SELECT * FROM test_array_table WHERE arr_col = '{1, 2, 3}'"
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - name: postgresql_query - test trust_input parameter
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ query: SELECT version()
+ trust_input: false
+ ignore_errors: true
+ register: result
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+ - name: postgresql_query - clean up
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_table:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ name: test_array_table
+ state: absent
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ #############################
+ # Check search_path parameter
+
+ - name: postgresql_set - create test schemas
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_schema:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ name: '{{ item }}'
+ loop:
+ - query_test1
+ - query_test2
+
+ - name: postgresql_set - create test tables
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_table:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ name: '{{ item }}'
+ columns:
+ - id int
+ loop:
+ - 'query_test1.test1'
+ - 'query_test2.test2'
+
+ - name: postgresql_query - insert data
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: 'INSERT INTO {{ item }} (id) VALUES (1)'
+ search_path:
+ - query_test1
+ - query_test2
+ loop:
+ - test1
+ - test2
+
+ - name: postgresql_query - get data
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: 'SELECT id FROM test1'
+ search_path:
+ - query_test1
+ - query_test2
+ register: result
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_query - get data, must fail
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: 'SELECT id FROM test1'
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+
+ # Tests for the as_single_query option
+ - name: Run queries from SQL script as a single query
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ path_to_script: ~{{ pg_user }}/test1.sql
+ positional_args:
+ - 1
+ encoding: UTF-8
+ as_single_query: true
+ register: result
+
+ - name: >
+ Must pass. Not changed because we can only
+ check statusmessage of the last query
+ assert:
+ that:
+ - result is not changed
+ - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT'
+ - result.query_list[0] == "CREATE FUNCTION add(integer, integer) RETURNS integer\n AS 'select $1 + $2;'\n LANGUAGE SQL\n IMMUTABLE\n RETURNS NULL ON NULL INPUT;\n\nSELECT story FROM test_table\n WHERE id = %s OR story = 'Данные';\n\nSELECT version();\n"
+
+ #############################################################################
+ # Issue https://github.com/ansible-collections/community.postgresql/issues/45
+ - name: Create table containing a decimal value
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: CREATE TABLE blabla (id int, num decimal)
+
+ - name: Insert data
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: INSERT INTO blabla (id, num) VALUES (1, 1::decimal)
+
+ - name: Get data
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM blabla
+ register: result
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ #############################################################################
+ # Issue https://github.com/ansible-collections/community.postgresql/issues/47
+ - name: Get datetime.timedelta value
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT EXTRACT(epoch from make_interval(secs => 3)) AS extract"
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ - result.query_result[0]["extract"] == 3 or result.query_result[0]["extract"] == 3.0
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+ - name: Get interval value
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT make_interval(secs => 3)"
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ - result.query_result[0]["make_interval"] == "0:00:03"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+ ##############################################################################
+ # Issue https://github.com/ansible-collections/community.postgresql/issues/312
+ - name: Run several queries
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ <<: *pg_parameters
+ query:
+ - SELECT 1
+ - SELECT 1
+ - SELECT 1
+ register: result
+
+ - assert:
+ that:
+ - result.rowcount == 3
+ - result.query_result == [{"?column?": 1}]
+ - 'result.query_all_results == [[{"?column?": 1}], [{"?column?": 1}], [{"?column?": 1}]]'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/defaults/main.yml
new file mode 100644
index 000000000..ff6dd5cb9
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+db_name: 'ansible_db'
+db_user1: 'ansible_db_user1'
+db_user2: 'ansible_db_user2'
+dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+db_session_role1: 'session_role1'
+db_session_role2: 'session_role2'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/main.yml
new file mode 100644
index 000000000..d894dd040
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/main.yml
@@ -0,0 +1,9 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- import_tasks: postgresql_schema_session_role.yml
+
+# Initial CI tests of postgresql_schema module
+- import_tasks: postgresql_schema_initial.yml
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml
new file mode 100644
index 000000000..58832f049
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml
@@ -0,0 +1,331 @@
+---
+
+# Setup
+- name: Create test roles
+ postgresql_user:
+ name: "{{ item }}"
+ state: present
+ encrypted: true
+ password: password
+ role_attr_flags: LOGIN
+ db: postgres
+ login_user: "{{ pg_user }}"
+ loop:
+ - "{{ db_user1 }}"
+ - "{{ db_user2 }}"
+
+- name: Create DB
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+# Test: CREATE SCHEMA in checkmode
+- name: Create a new schema with name "acme" in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ check_mode: true
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ - result.schema == 'acme'
+
+- name: Check that the new schema "acme" not exists
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Test: CREATE SCHEMA
+- name: Create a new schema with name "acme"
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ login_user: "{{ pg_user }}"
+ trust_input: true
+ register: result
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ - result.schema == 'acme'
+ - result.queries == [ 'CREATE SCHEMA "acme"' ]
+
+- name: Check that the new schema "acme" exists
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Test: DROP SCHEMA in checkmode
+- name: Drop schema "acme" in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ check_mode: true
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ state: absent
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that:
+ - result is not changed
+
+- name: Check that the new schema "acme" still exists
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Test: DROP SCHEMA
+- name: Drop schema "acme"
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ state: absent
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ - result.queries == [ 'DROP SCHEMA "acme"' ]
+
+- name: Check that no schema "acme" exists
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'"
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Test: trust_input parameter
+- name: Create a new schema with potentially dangerous owner name
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ login_user: "{{ pg_user }}"
+ owner: "{{ dangerous_name }}"
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+# Checks
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous'
+
+# Test: CREATE SCHEMA; WITH TABLE for DROP CASCADE test
+- name: Create a new schema "acme"
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: Create table in schema for DROP CASCADE check
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "CREATE TABLE acme.table1()"
+ register: result2
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ - result.schema == 'acme'
+ - result.queries == [ 'CREATE SCHEMA "acme"' ]
+ - result2.changed == true
+ - result2.statusmessage == 'CREATE TABLE'
+
+- name: Check that the new schema "acme" exists
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name,schema_owner FROM information_schema.schemata WHERE schema_name = 'acme'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: Check that the new table "table1" in schema 'acme' exists
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'acme' AND tablename = 'table1')"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Test: DROP SCHEMA ... CASCADE;
+- name: Drop schema "acme" with cascade
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ state: absent
+ cascade_drop: true
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ - result.queries == [ 'DROP SCHEMA "acme" CASCADE' ]
+
+- name: Check that no schema "acme" exists
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'"
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Test: CREATE SCHEMA WITH OWNER ...;
+- name: Create a new schema "acme" with a user "{{ db_user2 }}" who will own it
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ owner: "{{ db_user2 }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ - result.schema == 'acme'
+ - result.queries == [ 'CREATE SCHEMA "acme" AUTHORIZATION "{{ db_user2 }}"' ]
+
+- name: Check that the new schema "acme" exists and "{{ db_user2 }}" own it
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name,schema_owner FROM information_schema.schemata WHERE schema_name = 'acme' AND schema_owner = '{{ db_user2 }}'"
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Test: DROP SCHEMA
+- name: Drop schema "acme"
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ state: absent
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ - result.queries == [ 'DROP SCHEMA "acme"' ]
+
+- name: Check that no schema "acme" exists
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'"
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+
+# Cleanup
+- name: Remove user
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: absent
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: Destroy DB
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: absent
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_session_role.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_session_role.yml
new file mode 100644
index 000000000..4b8af75ff
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_session_role.yml
@@ -0,0 +1,78 @@
+- name: Create a high privileged user
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_session_role1 }}"
+ state: "present"
+ password: "password"
+ role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- name: Create DB as session_role
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: present
+ name: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ register: result
+
+- name: Create schema in own database
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_schema:
+ database: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ name: "{{ db_session_role1 }}"
+ session_role: "{{ db_session_role1 }}"
+
+- name: Create schema in own database, should be owned by session_role
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_schema:
+ database: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ name: "{{ db_session_role1 }}"
+ owner: "{{ db_session_role1 }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Fail when creating schema in postgres database as a regular user
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_schema:
+ database: postgres
+ login_user: "{{ pg_user }}"
+ name: "{{ db_session_role1 }}"
+ session_role: "{{ db_session_role1 }}"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is failed
+
+- name: Drop test db
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: absent
+ name: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+
+- name: Drop test users
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ item }}"
+ state: absent
+ login_user: "{{ pg_user }}"
+ db: postgres
+ with_items:
+ - "{{ db_session_role1 }}"
+ - "{{ db_session_role2 }}"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/defaults/main.yml
new file mode 100644
index 000000000..35e711215
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/defaults/main.yml
@@ -0,0 +1 @@
+db_default: postgres
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test0.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test0.sql
new file mode 100644
index 000000000..fb9ce5162
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test0.sql
@@ -0,0 +1,4 @@
+SELECT version();
+
+SELECT story FROM test_table
+ WHERE id = %s OR story = 'Данные';
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test1.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test1.sql
new file mode 100644
index 000000000..028c192d7
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test1.sql
@@ -0,0 +1,10 @@
+CREATE FUNCTION add(integer, integer) RETURNS integer
+ AS 'select $1 + $2;'
+ LANGUAGE SQL
+ IMMUTABLE
+ RETURNS NULL ON NULL INPUT;
+
+SELECT story FROM test_table
+ WHERE id = %s OR story = 'Данные';
+
+SELECT version();
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test10.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test10.sql
new file mode 100644
index 000000000..cbfdc457a
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test10.sql
@@ -0,0 +1 @@
+SELECT EXTRACT(epoch from make_interval(secs => 3)) AS extract
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test11.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test11.sql
new file mode 100644
index 000000000..6c1596f96
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test11.sql
@@ -0,0 +1 @@
+SELECT make_interval(secs => 3)
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test12.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test12.sql
new file mode 100644
index 000000000..33bd94eba
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test12.sql
@@ -0,0 +1 @@
+INSERT INTO test2 (id) VALUES (1)
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test2.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test2.sql
new file mode 100644
index 000000000..cba55b34a
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test2.sql
@@ -0,0 +1 @@
+ANALYZE test_table
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test3.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test3.sql
new file mode 100644
index 000000000..a2ac43509
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test3.sql
@@ -0,0 +1,4 @@
+SELECT version();
+
+SELECT story FROM test_table
+ WHERE id = %(item)s OR story = 'Данные';
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test4.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test4.sql
new file mode 100644
index 000000000..5ea74a292
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test4.sql
@@ -0,0 +1 @@
+INSERT INTO test_array_table (arr_col) VALUES (%s)
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test5.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test5.sql
new file mode 100644
index 000000000..e631dfb5a
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test5.sql
@@ -0,0 +1 @@
+SELECT * FROM test_array_table WHERE arr_col = %s
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test6.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test6.sql
new file mode 100644
index 000000000..48d210d4e
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test6.sql
@@ -0,0 +1 @@
+SELECT * FROM test_array_table WHERE arr_col = %(arr_val)s;
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test7.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test7.sql
new file mode 100644
index 000000000..96dfaebdc
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test7.sql
@@ -0,0 +1 @@
+INSERT INTO test1 (id) VALUES (1)
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test8.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test8.sql
new file mode 100644
index 000000000..8ca7c86dc
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test8.sql
@@ -0,0 +1 @@
+SELECT id FROM test1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test9.sql b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test9.sql
new file mode 100644
index 000000000..e52a59a3d
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/files/test9.sql
@@ -0,0 +1 @@
+SELECT * FROM blabla
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/tasks/main.yml
new file mode 100644
index 000000000..ae5922853
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/tasks/main.yml
@@ -0,0 +1,7 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_query module
+- import_tasks: postgresql_script_initial.yml
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/tasks/postgresql_script_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/tasks/postgresql_script_initial.yml
new file mode 100644
index 000000000..729f1670a
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_script/tasks/postgresql_script_initial.yml
@@ -0,0 +1,311 @@
+- vars:
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: true
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: '{{ db_default }}'
+ block:
+ - name: Drop test table if exists
+ <<: *task_parameters
+ shell: psql postgres -U "{{ pg_user }}" -t -c "DROP TABLE IF EXISTS test_table;"
+ ignore_errors: true
+
+ - name: Create test table called test_table
+ <<: *task_parameters
+ shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE test_table (id int, story text);"
+ ignore_errors: true
+
+ - name: Insert some data into test_table
+ <<: *task_parameters
+ shell: psql postgres -U "{{ pg_user }}" -t -c "INSERT INTO test_table (id, story) VALUES (1, 'first'), (2, 'second'), (3, 'third');"
+ ignore_errors: true
+
+ - name: Copy script files
+ become: true
+ copy:
+ src: '{{ item }}'
+ dest: '~{{ pg_user }}/{{ item }}'
+ owner: '{{ pg_user }}'
+ force: true
+ loop:
+ - test0.sql
+ - test1.sql
+ - test2.sql
+ - test3.sql
+ - test4.sql
+ - test5.sql
+ - test6.sql
+ - test7.sql
+ - test8.sql
+ - test9.sql
+ - test10.sql
+ - test11.sql
+ - test12.sql
+
+ - name: Analyze test_table
+ <<: *task_parameters
+ postgresql_script:
+ <<: *pg_parameters
+ path: '~{{ pg_user }}/test2.sql'
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == 'ANALYZE test_table\n'
+ - result.rowcount != 0
+ - result.statusmessage == 'ANALYZE'
+ - result.query_result == {}
+
+ - name: Run queries from SQL script using positional_args
+ <<: *task_parameters
+ postgresql_script:
+ <<: *pg_parameters
+ path: ~{{ pg_user }}/test0.sql
+ positional_args:
+ - 1
+ encoding: UTF-8
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == "SELECT version();\n\nSELECT story FROM test_table\n WHERE id = 1 OR story = 'Данные';\n"
+ - result.query_result[0].story == 'first'
+ - result.rowcount == 1
+ - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT'
+
+ - name: Run queries from SQL script using named_args
+ <<: *task_parameters
+ postgresql_script:
+ <<: *pg_parameters
+ path: ~{{ pg_user }}/test3.sql
+ named_args:
+ item: 1
+ encoding: UTF-8
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == "SELECT version();\n\nSELECT story FROM test_table\n WHERE id = 1 OR story = 'Данные';\n"
+ - result.query_result[0].story == 'first'
+ - result.rowcount == 1
+ - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT'
+
+ - name: Create test table for issue 59955
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: test_array_table
+ columns:
+ - arr_col int[]
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - set_fact:
+ my_list:
+ - 1
+ - 2
+ - 3
+ my_arr: '{1, 2, 3}'
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - name: Insert array into test table by positional args
+ <<: *task_parameters
+ postgresql_script:
+ <<: *pg_parameters
+ path: ~{{ pg_user }}/test4.sql
+ positional_args:
+ - '{{ my_list }}'
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == "INSERT INTO test_array_table (arr_col) VALUES ('{1, 2, 3}')\n"
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - name: Select array from test table by passing positional_args
+ <<: *task_parameters
+ postgresql_script:
+ <<: *pg_parameters
+ path: ~{{ pg_user }}/test5.sql
+ positional_args:
+ - '{{ my_list }}'
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == "SELECT * FROM test_array_table WHERE arr_col = '{1, 2, 3}'\n"
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - name: Select array from test table by passing named_args
+ <<: *task_parameters
+ postgresql_script:
+ <<: *pg_parameters
+ path: ~{{ pg_user }}/test6.sql
+ named_args:
+ arr_val:
+ - '{{ my_list }}'
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == "SELECT * FROM test_array_table WHERE arr_col = '{1, 2, 3}';\n"
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - name: Select array from test table by passing positional_args as a string
+ <<: *task_parameters
+ postgresql_script:
+ <<: *pg_parameters
+ path: ~{{ pg_user }}/test5.sql
+ positional_args:
+ - '{{ my_arr|string }}'
+ trust_input: true
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - assert:
+ that:
+ - result is changed
+ - result.query == "SELECT * FROM test_array_table WHERE arr_col = '{1, 2, 3}'\n"
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ - name: Test trust_input parameter
+ <<: *task_parameters
+ postgresql_script:
+ <<: *pg_parameters
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ path: ~{{ pg_user }}/test5.sql
+ trust_input: false
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+ - name: Clean up
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: test_array_table
+ state: absent
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+ #############################
+ # Check search_path parameter
+
+ - name: Create test schemas
+ <<: *task_parameters
+ postgresql_schema:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ loop:
+ - query_test1
+ - query_test2
+
+ - name: Create test tables
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ columns:
+ - id int
+ loop:
+ - 'query_test1.test1'
+ - 'query_test2.test2'
+
+ - name: Insert data
+ <<: *task_parameters
+ postgresql_script:
+ <<: *pg_parameters
+ path: ~{{ pg_user }}/{{ item }}.sql
+ search_path:
+ - query_test1
+ - query_test2
+ loop:
+ - test7
+ - test12
+
+ - name: Get data
+ <<: *task_parameters
+ postgresql_script:
+ <<: *pg_parameters
+ path: ~{{ pg_user }}/test8.sql
+ search_path:
+ - query_test1
+ - query_test2
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Get data, must fail as we don't specify search_path
+ <<: *task_parameters
+ postgresql_script:
+ <<: *pg_parameters
+ path: ~{{ pg_user }}/test8.sql
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+
+ #############################################################################
+ # Issue https://github.com/ansible-collections/community.postgresql/issues/45
+ - name: Create table containing a decimal value
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: blabla
+ columns:
+ - id int
+ - num decimal
+
+ - name: Insert data
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: INSERT INTO blabla (id, num) VALUES (1, 1::decimal)
+
+ - name: Get data
+ <<: *task_parameters
+ postgresql_script:
+ <<: *pg_parameters
+ path: ~{{ pg_user }}/test9.sql
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ #############################################################################
+ # Issue https://github.com/ansible-collections/community.postgresql/issues/47
+ - name: Get datetime.timedelta value
+ <<: *task_parameters
+ postgresql_script:
+ <<: *pg_parameters
+ path: ~{{ pg_user }}/test10.sql
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ - result.query_result[0]["extract"] == 3 or result.query_result[0]["extract"] == 3.0
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+ - name: Get interval value
+ <<: *task_parameters
+ postgresql_script:
+ <<: *pg_parameters
+ path: ~{{ pg_user }}/test11.sql
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ - result.query_result[0]["make_interval"] == "0:00:03"
+ when: postgres_version_resp.stdout is version('10', '>=')
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/defaults/main.yml
new file mode 100644
index 000000000..049b5531f
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+db_name: 'ansible_db'
+db_user1: 'ansible_db_user1'
+db_user2: 'ansible_db_user2'
+db_default: 'postgres'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/tasks/main.yml
new file mode 100644
index 000000000..b53069005
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/tasks/main.yml
@@ -0,0 +1,8 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_sequence module
+- import_tasks: postgresql_sequence_initial.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/tasks/postgresql_sequence_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/tasks/postgresql_sequence_initial.yml
new file mode 100644
index 000000000..c498033bd
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/tasks/postgresql_sequence_initial.yml
@@ -0,0 +1,730 @@
+---
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Preparation for tests.
+- name: postgresql_sequence - create a user to be owner of a database
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ state: present
+ encrypted: true
+ password: password
+ role_attr_flags: LOGIN
+ db: "{{ db_default }}"
+ login_user: "{{ pg_user }}"
+
+- name: postgresql_sequence - create DB
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+- name: Create a user to be owner of a sequence
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: present
+ encrypted: true
+ password: password
+ role_attr_flags: LOGIN
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: postgresql_sequence - create a schema
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_schema:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_schema
+
+####################
+# Test: create sequence in checkmode
+- name: postgresql_sequence - create a new sequence with name "foobar" in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ check_mode: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar'
+ - result.queries == ["CREATE SEQUENCE \"public\".\"foobar\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the new sequence "foobar" not exists
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 0
+ - result.statusmessage == 'SELECT 0'
+
+####################
+# Test: create sequence
+- name: postgresql_sequence - create a new sequence with name "foobar"
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar'
+ - result.queries == ["CREATE SEQUENCE \"public\".\"foobar\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the new sequence "foobar" exists
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: drop sequence in checkmode
+- name: postgresql_sequence - drop a sequence called foobar
+ become_user: "{{ pg_user }}"
+ become: true
+ check_mode: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar
+ state: absent
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar'
+ - result.queries == ["DROP SEQUENCE \"public\".\"foobar\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar" still exists
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: drop sequence
+- name: postgresql_sequence - drop a sequence called foobar
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar
+ state: absent
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar'
+ - result.queries == ["DROP SEQUENCE \"public\".\"foobar\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar" not exists
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 0
+
+####################
+# Test: drop nonexistent sequence
+- name: postgresql_sequence - drop a sequence called foobar which does not exists
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar
+ state: absent
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is not changed
+ - result.sequence == 'foobar'
+ - result.queries == []
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar" not exists
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 0
+
+####################
+# Test: create sequence with options
+- name: postgresql_sequence - create an descending sequence called foobar_desc, starting at 101 and which cycle between 1 to 1000
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_desc
+ increment: -1
+ start: 101
+ minvalue: 1
+ maxvalue: 1000
+ cycle: true
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar_desc'
+ - result.increment == '-1'
+ - result.minvalue == '1'
+ - result.maxvalue == '1000'
+ - result.cycle == 'YES'
+ - result.queries == ["CREATE SEQUENCE \"public\".\"foobar_desc\" INCREMENT BY -1 MINVALUE 1 MAXVALUE 1000 START WITH 101 CYCLE"]
+
+# Real SQL check
+- name: postgresql_sequence - check that the new sequence "foobar_desc" exists
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar_desc'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: rename a sequence in checkmode
+- name: postgresql_sequence - rename an existing sequence named foobar_desc to foobar_with_options
+ become_user: "{{ pg_user }}"
+ become: true
+ check_mode: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_desc
+ rename_to: foobar_with_options
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar_desc'
+ - result.newname == 'foobar_with_options'
+ - result.queries == ["ALTER SEQUENCE \"public\".\"foobar_desc\" RENAME TO \"foobar_with_options\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar_desc" still exists and is not renamed
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar_desc'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: rename a sequence
+- name: postgresql_sequence - rename an existing sequence named foobar_desc to foobar_with_options
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_desc
+ rename_to: foobar_with_options
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar_desc'
+ - result.newname == 'foobar_with_options'
+ - result.queries == ["ALTER SEQUENCE \"public\".\"foobar_desc\" RENAME TO \"foobar_with_options\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the renamed sequence "foobar_with_options" exists
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar_with_options'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: change schema of a sequence in checkmode
+- name: postgresql_sequence - change schema of an existing sequence from public to foobar_schema
+ become_user: "{{ pg_user }}"
+ become: true
+ check_mode: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_with_options
+ newschema: foobar_schema
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar_with_options'
+ - result.schema == 'public'
+ - result.newschema == 'foobar_schema'
+ - result.queries == ["ALTER SEQUENCE \"public\".\"foobar_with_options\" SET SCHEMA \"foobar_schema\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar_with_options" still exists in the old schema
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name,sequence_schema FROM information_schema.sequences WHERE sequence_name = 'foobar_with_options' AND sequence_schema = 'public'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: change schema of a sequence
+- name: postgresql_sequence - change schema of an existing sequence from public to foobar_schema
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_with_options
+ newschema: foobar_schema
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar_with_options'
+ - result.schema == 'public'
+ - result.newschema == 'foobar_schema'
+ - result.queries == ["ALTER SEQUENCE \"public\".\"foobar_with_options\" SET SCHEMA \"foobar_schema\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar_with_options" exists in new schema
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name,sequence_schema FROM information_schema.sequences WHERE sequence_name = 'foobar_with_options' AND sequence_schema = 'foobar_schema'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: change owner of a sequence in checkmode
+- name: postgresql_sequence - change owner of an existing sequence from "{{ pg_user }}" to "{{ db_user1 }}"
+ become_user: "{{ pg_user }}"
+ become: true
+ check_mode: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_with_options
+ schema: foobar_schema
+ owner: "{{ db_user1 }}"
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar_with_options'
+ - result.owner == "{{ pg_user }}"
+ - result.queries == ["ALTER SEQUENCE \"foobar_schema\".\"foobar_with_options\" OWNER TO \"{{ db_user1 }}\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar_with_options" has still the old owner
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT c.relname,a.rolname,n.nspname
+ FROM pg_class as c
+ JOIN pg_authid as a on (c.relowner = a.oid)
+ JOIN pg_namespace as n on (c.relnamespace = n.oid)
+ WHERE c.relkind = 'S' and
+ c.relname = 'foobar_with_options' and
+ n.nspname = 'foobar_schema' and
+ a.rolname = '{{ pg_user }}'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: change owner of a sequence
+- name: postgresql_sequence - change owner of an existing sequence from "{{ pg_user }}" to "{{ db_user1 }}"
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_with_options
+ schema: foobar_schema
+ owner: "{{ db_user1 }}"
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar_with_options'
+ - result.owner == "{{ pg_user }}"
+ - result.queries == ["ALTER SEQUENCE \"foobar_schema\".\"foobar_with_options\" OWNER TO \"{{ db_user1 }}\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar_with_options" has a new owner
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT c.relname,a.rolname,n.nspname
+ FROM pg_class as c
+ JOIN pg_authid as a on (c.relowner = a.oid)
+ JOIN pg_namespace as n on (c.relnamespace = n.oid)
+ WHERE c.relkind = 'S' and
+ c.relname = 'foobar_with_options' and
+ n.nspname = 'foobar_schema' and
+ a.rolname = '{{ db_user1 }}'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: drop sequence with cascade
+
+# CREATE SEQUENCE seq1;
+# CREATE TABLE t1 (f1 INT NOT NULL DEFAULT nextval('seq1'));
+# DROP SEQUENCE seq1 CASCADE;
+- name: postgresql_sequence - create sequence for drop cascade test
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: seq1
+
+- name: postgresql_sequence - create table which use sequence for drop cascade test
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_table:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: t1
+ columns:
+ - f1 INT NOT NULL DEFAULT nextval('seq1')
+
+####################
+# Test: drop sequence with cascade in checkmode
+- name: postgresql_sequence - drop with cascade a sequence called seq1
+ become_user: "{{ pg_user }}"
+ become: true
+ check_mode: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: seq1
+ state: absent
+ cascade: true
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'seq1'
+ - result.queries == ["DROP SEQUENCE \"public\".\"seq1\" CASCADE"]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "seq1" still exists
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'seq1'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: drop sequence with cascade
+- name: postgresql_sequence - drop with cascade a sequence called seq1
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: seq1
+ state: absent
+ cascade: true
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'seq1'
+ - result.queries == ["DROP SEQUENCE \"public\".\"seq1\" CASCADE"]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "seq1" not exists
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'seq1'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 0
+
+####################
+# Test: create sequence with owner in checkmode
+- name: postgresql_sequence - create a new sequence with name "foobar2" with owner "{{ db_user2 }}"
+ become_user: "{{ pg_user }}"
+ become: true
+ check_mode: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar2
+ owner: "{{ db_user2 }}"
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar2'
+ - result.queries == ["CREATE SEQUENCE \"public\".\"foobar2\"", "ALTER SEQUENCE \"public\".\"foobar2\" OWNER TO \"ansible_db_user2\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the new sequence "foobar2" does not exists
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar2'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 0
+
+####################
+# Test: create sequence with owner
+- name: postgresql_sequence - create a new sequence with name "foobar2" with owner "{{ db_user2 }}"
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar2
+ owner: "{{ db_user2 }}"
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar2'
+ - result.queries == ["CREATE SEQUENCE \"public\".\"foobar2\"", "ALTER SEQUENCE \"public\".\"foobar2\" OWNER TO \"ansible_db_user2\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the new sequence "foobar2" exists
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar2'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_sequence - check that the sequence "foobar2" has owner "{{ db_user2 }}"
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT c.relname,a.rolname,n.nspname
+ FROM pg_class as c
+ JOIN pg_authid as a on (c.relowner = a.oid)
+ JOIN pg_namespace as n on (c.relnamespace = n.oid)
+ WHERE c.relkind = 'S' and
+ c.relname = 'foobar2' and
+ n.nspname = 'public' and
+ a.rolname = '{{ db_user2 }}'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: create sequence with trust_input
+- name: postgresql_sequence - check that trust_input works as expected
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: 'just_a_name"; SELECT * FROM information_schema.tables; --'
+ trust_input: false
+ owner: "{{ db_user2 }}"
+ ignore_errors: true
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+# Cleanup
+- name: postgresql_sequence - destroy DB
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_db:
+ state: absent
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: remove test roles
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ state: absent
+ login_db: "{{ db_default }}"
+ login_user: "{{ pg_user }}"
+ name: "{{ item }}"
+ loop:
+ - "{{ db_user1 }}"
+ - "{{ db_user2 }}"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/main.yml
new file mode 100644
index 000000000..3f16eb0d6
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/main.yml
@@ -0,0 +1,11 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_initial module
+- include_tasks: postgresql_set_initial.yml
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- include_tasks: options_coverage.yml
+ when: postgres_version_resp.stdout is version('9.6', '>=')
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/options_coverage.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/options_coverage.yml
new file mode 100644
index 000000000..c4598d2a9
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/options_coverage.yml
@@ -0,0 +1,71 @@
+# Test code for the postgresql_set module
+# Copyright: (c) 2021, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- vars:
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: true
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+
+ block:
+ - name: Define a test setting map
+ set_fact:
+ setting_map:
+ allow_system_table_mods: on
+ archive_command: /bin/true
+ archive_timeout: 10min
+ autovacuum_work_mem: '-1'
+ backend_flush_after: 0
+ autovacuum_vacuum_scale_factor: 0.5
+ client_encoding: UTF-8
+ bgwriter_delay: 400
+ maintenance_work_mem: 32mb
+ effective_cache_size: 1024kB
+ shared_buffers: 1GB
+ wal_level: replica
+ log_statement: mod
+ track_functions: none
+ shared_preload_libraries: 'pg_stat_statements, pgaudit'
+ log_line_prefix: 'db=%d,user=%u,app=%a,client=%h '
+
+ # Check mode:
+ - name: Set settings in check mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: '{{ item.key }}'
+ value: '{{ item.value }}'
+ check_mode: true
+ with_dict: '{{ setting_map }}'
+
+ # Actual mode:
+ - name: Set settings in actual mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: '{{ item.key }}'
+ value: '{{ item.value }}'
+ with_dict: '{{ setting_map }}'
+
+ # https://github.com/ansible-collections/community.postgresql/issues/78
+ - name: Test param with comma containing values
+ <<: *task_parameters
+ shell: "grep shared_preload_libraries {{ pg_auto_conf }}"
+ register: result
+
+ - assert:
+ that:
+ - result.stdout == "shared_preload_libraries = 'pg_stat_statements, pgaudit'"
+
+ # Test for single-value params with commas and spaces in value
+ - name: Test single-value param with commas and spaces in value
+ <<: *task_parameters
+ shell: "grep log_line_prefix {{ pg_auto_conf }}"
+ register: result
+
+ - assert:
+ that:
+ - result.stdout == "log_line_prefix = 'db=%d,user=%u,app=%a,client=%h '"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/postgresql_set_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/postgresql_set_initial.yml
new file mode 100644
index 000000000..ddff916aa
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/postgresql_set_initial.yml
@@ -0,0 +1,442 @@
+# Test code for the postgresql_set module
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# Notice: assertions are different for Ubuntu 16.04 and FreeBSD because they don't work
+# correctly for these tests. There are some stranges exactly in Shippable CI.
+# However I checked it manually for all points (including Ubuntu 16.05 and FreeBSD)
+# and it worked as expected.
+
+- vars:
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: true
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+
+ block:
+ - name: postgresql_set - preparation to the next step
+ <<: *task_parameters
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_set:
+ <<: *pg_parameters
+ name: work_mem
+ reset: true
+
+ #####################
+ # Testing check_mode:
+ - name: postgresql_set - get work_mem initial value
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SHOW work_mem
+ register: before
+
+ - name: postgresql_set - set work_mem (restart is not required), check_mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: work_mem
+ value: 12MB
+ register: set_wm
+ check_mode: true
+
+ - assert:
+ that:
+ - set_wm.name == 'work_mem'
+ - set_wm.changed == true
+ - set_wm.prev_val_pretty == before.query_result[0].work_mem
+ - set_wm.value_pretty == '12MB'
+ - set_wm.restart_required == false
+
+ - name: postgresql_set - get work_mem value to check, must be the same as initial
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SHOW work_mem
+ register: after
+
+ - assert:
+ that:
+ - before.query_result[0].work_mem == after.query_result[0].work_mem
+ ######
+ #
+
+ - name: postgresql_set - set work_mem (restart is not required)
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: work_mem
+ value: 12mb
+ register: set_wm
+
+ - assert:
+ that:
+ - set_wm.name == 'work_mem'
+ - set_wm.changed == true
+ - set_wm.value_pretty == '12MB'
+ - set_wm.value_pretty != set_wm.prev_val_pretty
+ - set_wm.restart_required == false
+ - set_wm.value.value == 12582912
+ - set_wm.value.unit == 'b'
+ when:
+ - ansible_distribution != "Ubuntu"
+ - ansible_distribution_major_version != '16'
+ - ansible_distribution != "FreeBSD"
+
+ - assert:
+ that:
+ - set_wm.name == 'work_mem'
+ - set_wm.changed == true
+ - set_wm.restart_required == false
+ when:
+ - ansible_distribution == "Ubuntu"
+ - ansible_distribution_major_version == '16'
+
+ - name: postgresql_set - reset work_mem (restart is not required)
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: work_mem
+ reset: true
+ register: reset_wm
+
+ - assert:
+ that:
+ - reset_wm.name == 'work_mem'
+ - reset_wm.changed == true
+ - reset_wm.value_pretty != reset_wm.prev_val_pretty
+ - reset_wm.restart_required == false
+ - reset_wm.value.value != '12582912'
+ when:
+ - ansible_distribution != "Ubuntu"
+ - ansible_distribution_major_version != '16'
+ - ansible_distribution != "FreeBSD"
+
+ - assert:
+ that:
+ - reset_wm.name == 'work_mem'
+ - reset_wm.changed == true
+ - reset_wm.restart_required == false
+ when:
+ - ansible_distribution == "Ubuntu"
+ - ansible_distribution_major_version == '16'
+
+ - name: postgresql_set - reset work_mem again to check that nothing changed (restart is not required)
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: work_mem
+ reset: true
+ register: reset_wm2
+
+ - assert:
+ that:
+ - reset_wm2.name == 'work_mem'
+ - reset_wm2.changed == false
+ - reset_wm2.value_pretty == reset_wm2.prev_val_pretty
+ - reset_wm2.restart_required == false
+ when:
+ - ansible_distribution != "Ubuntu"
+ - ansible_distribution_major_version != '16'
+
+ - assert:
+ that:
+ - reset_wm2.name == 'work_mem'
+ - reset_wm2.changed == false
+ - reset_wm2.restart_required == false
+ when:
+ - ansible_distribution == "Ubuntu"
+ - ansible_distribution_major_version == '16'
+
+ - name: postgresql_set - preparation to the next step
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: work_mem
+ value: 14MB
+
+ - name: postgresql_set - set work_mem to initial state (restart is not required)
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: work_mem
+ value: default
+ register: def_wm
+
+ - assert:
+ that:
+ - def_wm.name == 'work_mem'
+ - def_wm.changed == true
+ - def_wm.value_pretty != def_wm.prev_val_pretty
+ - def_wm.restart_required == false
+ - def_wm.value.value != '14680064'
+ when:
+ - ansible_distribution != "Ubuntu"
+ - ansible_distribution_major_version != '16'
+ - ansible_distribution != 'FreeBSD'
+
+ - assert:
+ that:
+ - def_wm.name == 'work_mem'
+ - def_wm.changed == true
+ - def_wm.restart_required == false
+ when:
+ - ansible_distribution == "Ubuntu"
+ - ansible_distribution_major_version == '16'
+ - ansible_distribution != 'FreeBSD'
+
+ - name: postgresql_set - set shared_buffers (restart is required)
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: shared_buffers
+ value: 111MB
+ register: set_shb
+
+ - assert:
+ that:
+ - set_shb.name == 'shared_buffers'
+ - set_shb.changed == true
+ - set_shb.restart_required == true
+
+ # We don't check value.unit because it is none
+ - name: postgresql_set - set autovacuum (enabled by default, restart is not required)
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: autovacuum
+ value: off
+ register: set_aut
+
+ - assert:
+ that:
+ - set_aut.name == 'autovacuum'
+ - set_aut.changed == true
+ - set_aut.restart_required == false
+ - set_aut.value.value == 'off'
+
+ # Test check_mode, step 1. At the previous test we set autovacuum = 'off'
+ - name: postgresql - try to change autovacuum again in check_mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: autovacuum
+ value: on
+ register: set_aut
+ check_mode: true
+
+ - assert:
+ that:
+ - set_aut.name == 'autovacuum'
+ - set_aut.changed == true
+ - set_aut.restart_required == false
+ - set_aut.value.value == 'off'
+
+ # Test check_mode, step 2
+ - name: postgresql - check that autovacuum wasn't actually changed after change in check_mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: autovacuum
+ value: off
+ register: set_aut
+ check_mode: true
+
+ - assert:
+ that:
+ - set_aut.name == 'autovacuum'
+ - set_aut.changed == false
+ - set_aut.restart_required == false
+ - set_aut.value.value == 'off'
+
+ # Additional check by SQL query:
+ - name: postgresql_set - get autovacuum value to check, must be off
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SHOW autovacuum
+ register: result
+
+ - assert:
+ that:
+ - result.query_result[0].autovacuum == 'off'
+
+ # Test check_mode, step 3. It is different from
+ # the prev test - it runs without check_mode: true.
+ # Before the check_mode tests autovacuum was off
+ - name: postgresql - check that autovacuum wasn't actually changed after change in check_mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: autovacuum
+ value: off
+ register: set_aut
+
+ - assert:
+ that:
+ - set_aut.name == 'autovacuum'
+ - set_aut.changed == false
+ - set_aut.restart_required == false
+ - set_aut.value.value == 'off'
+
+ #################
+ # Bugfix of 67377
+ - name: archive command with mb
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ trust_input: true
+ name: archive_command
+ value: 'test ! -f /mnt/postgres/mb/%f && cp %p /mnt/postgres/mb/%f'
+
+ # Check:
+ - name: check value
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: select reset_val from pg_settings where name = 'archive_command'
+ register: result
+
+ - assert:
+ that:
+ - result.query_result.0.reset_val == "test ! -f /mnt/postgres/mb/%f && cp %p /mnt/postgres/mb/%f"
+
+ #############################
+ # Check trust_input parameter
+ - name: postgresql_set - check trust_input
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: shared_buffers
+ value: 111MB
+ trust_input: false
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+ ###############################################################################
+ # Bugfix of https://github.com/ansible-collections/community.general/issues/775
+ - name: postgresql_set - turn on archive mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: archive_mode
+ value: 'on'
+
+ - name: Restart PostgreSQL
+ become: true
+ service:
+ name: "{{ postgresql_service }}"
+ state: restarted
+
+ - name: postgresql_set - set empty string as value
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: archive_command
+ value: ''
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: postgresql_set - set empty string as value again
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: archive_command
+ value: ''
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: postgresql_set - set empty string as value again in check mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: archive_command
+ value: ''
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Pass non-existent parameter
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: Timezone
+ value: utc
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('No such parameter')
+
+ #######################################################################
+ # https://github.com/ansible-collections/community.postgresql/issues/48
+ - name: Pass a parameter containing b in check_mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: archive_command
+ value: '/usr/bin/touch %f'
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Pass a parameter containing b
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: archive_command
+ value: '/usr/bin/touch %f'
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Pass another parameter containing B in check_mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: track_activity_query_size
+ value: '4096B'
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Pass another parameter containing b in check_mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: track_activity_query_size
+ value: '2048b'
+ register: result
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/tasks/main.yml
new file mode 100644
index 000000000..d44aab9de
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/tasks/main.yml
@@ -0,0 +1,9 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_slot module
+# Physical replication slots are available since PostgreSQL 9.4
+- import_tasks: postgresql_slot_initial.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/tasks/postgresql_slot_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/tasks/postgresql_slot_initial.yml
new file mode 100644
index 000000000..23a1cfb0e
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/tasks/postgresql_slot_initial.yml
@@ -0,0 +1,735 @@
+---
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: postgresql_slot - set max_replication_slots
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_set:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: max_replication_slots
+ value: '10'
+
+- name: postgresql_slot - set wal_level to logical
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_set:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: wal_level
+ value: logical
+
+# To avoid CI timeouts
+- name: Kill all postgres processes
+ shell: 'pkill -u {{ pg_user }}'
+ become: true
+ when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8'
+ ignore_errors: true
+
+- name: postgresql_slot - stop PostgreSQL
+ become: true
+ service:
+ name: "{{ postgresql_service }}"
+ state: stopped
+ when: (ansible_facts.distribution_major_version != '8' and ansible_facts.distribution == 'CentOS') or ansible_facts.distribution != 'CentOS'
+
+- name: postgresql_slot - pause between stop and start PostgreSQL
+ ansible.builtin.pause:
+ seconds: 5
+
+- name: postgresql_slot - start PostgreSQL
+ become: true
+ service:
+ name: "{{ postgresql_service }}"
+ state: started
+
+#
+# state=present
+#
+
+# check_mode
+- name: postgresql_slot - create slot in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot0
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == []
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot0'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# true mode
+- name: postgresql_slot - create physical slot
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot0
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["SELECT pg_create_physical_replication_slot('slot0', false)"]
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["SELECT pg_create_physical_replication_slot('slot0')"]
+ when: postgres_version_resp.stdout is version('9.6', '<')
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that the slot exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot0' and slot_type = 'physical'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# check mode
+- name: postgresql_slot - try create physical slot again in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot0
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot0' and slot_type = 'physical'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# true mode
+- name: postgresql_slot - try create physical slot again
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot0
+ slot_type: physical
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot0' and slot_type = 'physical'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+#
+# immediately_reserve
+#
+
+- name: postgresql_slot - create physical slot with immediately_reserve
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot1
+ immediately_reserve: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["SELECT pg_create_physical_replication_slot('slot1', true)"]
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that the slot exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1' and slot_type = 'physical' and restart_lsn is not NULL"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+#
+# slot_type: logical
+#
+# available from postgresql 10
+#
+# on RedHat family tests failed:
+# ERROR: could not access file "test_decoding": No such file or directory
+# "Your distrib did not compile the test decoder."
+# So the tests are restricted by Ubuntu because of the module functionality
+# depends on PostgreSQL server version only.
+
+# check_mode
+- name: postgresql_slot - create slot in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ slot_type: logical
+ check_mode: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# true mode
+- name: postgresql_slot - create logical slot
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ slot_type: logical
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["SELECT pg_create_logical_replication_slot('slot2', 'test_decoding')"]
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that the slot exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# check mode
+- name: postgresql_slot - try create logical slot again in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ slot_type: logical
+ check_mode: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# true mode
+- name: postgresql_slot - try create logical slot again
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ slot_type: logical
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+#
+# output_plugin: test_decoding
+#
+
+- name: postgresql_slot - create logical slot with output_plugin
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot3
+ slot_type: logical
+ output_plugin: test_decoding
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["SELECT pg_create_logical_replication_slot('slot3', 'test_decoding')"]
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that the slot exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot3' and slot_type = 'logical' and plugin = 'test_decoding'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+#
+# state: absent for logical slots
+#
+
+# check_mode
+- name: postgresql_slot - drop logical slot in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ state: absent
+ check_mode: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# true mode
+- name: postgresql_slot - drop logical slot
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ state: absent
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["SELECT pg_drop_replication_slot('slot2')"]
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that the slot does not exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# check mode
+- name: postgresql_slot - try drop logical slot again in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ state: absent
+ check_mode: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# true mode
+- name: postgresql_slot - try drop logical slot again
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ state: absent
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+#
+# state=absent for physical slots
+#
+
+# check_mode
+- name: postgresql_slot - drop physical slot in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot1
+ state: absent
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# true mode
+- name: postgresql_slot - drop physical slot
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot1
+ state: absent
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["SELECT pg_drop_replication_slot('slot1')"]
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that the slot does not exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1' and slot_type = 'physical'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# check mode
+- name: postgresql_slot - try drop physical slot again in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot1
+ state: absent
+ check_mode: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1' and slot_type = 'physical'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# true mode
+- name: postgresql_slot - try drop physical slot again
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot1
+ state: absent
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1' and slot_type = 'physical'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# Check trust input
+- name: postgresql_slot - try using a bad name
+ postgresql_slot:
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ db: postgres
+ name: slot1
+ trust_input: false
+ register: result
+ ignore_errors: true
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- name: postgresql_slot - check that using a dangerous name fails
+ assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+#
+# clean up
+#
+- name: postgresql_slot - clean up
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: "{{ item }}"
+ state: absent
+ ignore_errors: true
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+ with_items:
+ - slot0
+ - slot3
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/aliases
new file mode 100644
index 000000000..786e05315
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/aliases
@@ -0,0 +1,4 @@
+destructive
+shippable/posix/group1
+skip/freebsd
+skip/rhel
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/defaults/main.yml
new file mode 100644
index 000000000..8709694ba
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/defaults/main.yml
@@ -0,0 +1,13 @@
+pg_user: postgres
+db_default: postgres
+
+test_table1: acme1
+test_pub: first_publication
+test_pub2: second_publication
+replication_role: logical_replication
+replication_pass: alsdjfKJKDf1#
+test_db: acme_db
+test_subscription: test
+test_role1: alice
+test_role2: bob
+conn_timeout: 100
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/meta/main.yml
new file mode 100644
index 000000000..d72e4d23c
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_replication
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/main.yml
new file mode 100644
index 000000000..e440e8c80
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/main.yml
@@ -0,0 +1,12 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial tests of postgresql_subscription module:
+
+- import_tasks: setup_publication.yml
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18'
+
+- import_tasks: postgresql_subscription_initial.yml
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml
new file mode 100644
index 000000000..b464c3dbe
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml
@@ -0,0 +1,672 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- vars:
+ dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: true
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: '{{ test_db }}'
+
+ block:
+
+ - name: Create roles to test owner parameter
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ item }}'
+ role_attr_flags: SUPERUSER,LOGIN
+ loop:
+ - '{{ test_role1 }}'
+ - '{{ test_role2 }}'
+
+ ####################
+ # Test mode: present
+ ####################
+ - name: Create subscription
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications: '{{ test_pub }}'
+ connparams:
+ host: 127.0.0.1
+ port: '{{ primary_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == ["CREATE SUBSCRIPTION test CONNECTION 'host=127.0.0.1 port={{ primary_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }}' PUBLICATION {{ test_pub }}"]
+ - result.exists == true
+ - result.initial_state == {}
+ - result.final_state.owner == '{{ pg_user }}'
+ - result.final_state.enabled == true
+ - result.final_state.publications == ["{{ test_pub }}"]
+ - result.final_state.synccommit == true
+ - result.final_state.slotname == '{{ test_subscription }}'
+ - result.final_state.conninfo.dbname == '{{ test_db }}'
+ - result.final_state.conninfo.host == '127.0.0.1'
+ - result.final_state.conninfo.port == {{ primary_port }}
+ - result.final_state.conninfo.user == '{{ replication_role }}'
+ - result.final_state.conninfo.password == '{{ replication_pass }}'
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ ###################
+ # Test mode: absent
+ ###################
+
+ - name: Drop subscription in check mode
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: absent
+ trust_input: false
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["DROP SUBSCRIPTION {{ test_subscription }}"]
+ - result.final_state == result.initial_state
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Drop subscription
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["DROP SUBSCRIPTION {{ test_subscription }}"]
+ - result.final_state != result.initial_state
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ ##################
+ # Test owner param
+ ##################
+
+ - name: Create with owner
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications: '{{ test_pub }}'
+ owner: '{{ test_role1 }}'
+ connparams:
+ host: 127.0.0.1
+ port: '{{ primary_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result.final_state.owner == '{{ test_role1 }}'
+ - result.queries[1] == 'ALTER SUBSCRIPTION {{ test_subscription }} OWNER TO "{{ test_role1 }}"'
+
+ - name: Try to set this owner again
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications: '{{ test_pub }}'
+ owner: '{{ test_role1 }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is not changed
+ - result.initial_state == result.final_state
+ - result.final_state.owner == '{{ test_role1 }}'
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription AS s
+ JOIN pg_catalog.pg_roles AS r ON s.subowner = r.oid
+ WHERE subname = '{{ test_subscription }}' and r.rolname = '{{ test_role1 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Set another owner in check mode
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications: '{{ test_pub }}'
+ owner: '{{ test_role2 }}'
+ trust_input: false
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.initial_state == result.final_state
+ - result.final_state.owner == '{{ test_role1 }}'
+ - result.queries == ['ALTER SUBSCRIPTION {{ test_subscription }} OWNER TO "{{ test_role2 }}"']
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription AS s
+ JOIN pg_catalog.pg_roles AS r ON s.subowner = r.oid
+ WHERE subname = '{{ test_subscription }}' and r.rolname = '{{ test_role1 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Set another owner
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications: '{{ test_pub }}'
+ owner: '{{ test_role2 }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.initial_state != result.final_state
+ - result.final_state.owner == '{{ test_role2 }}'
+ - result.queries == ['ALTER SUBSCRIPTION {{ test_subscription }} OWNER TO "{{ test_role2 }}"']
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription AS s
+ JOIN pg_catalog.pg_roles AS r ON s.subowner = r.oid
+ WHERE subname = '{{ test_subscription }}' and r.rolname = '{{ test_role2 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ ##########################
+ # Test trust_input param #
+ ##########################
+
+ - name: Test trust_input parameter
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications: '{{ test_pub }}'
+ session_role: '{{ dangerous_name }}'
+ owner: '{{ test_role1 }}'
+ trust_input: false
+ connparams:
+ host: 127.0.0.1
+ port: '{{ primary_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+ ##############
+ # Test cascade
+ ##############
+
+ - name: Drop subscription cascade in check mode
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: absent
+ cascade: true
+ trust_input: false
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["DROP SUBSCRIPTION {{ test_subscription }} CASCADE"]
+ - result.final_state == result.initial_state
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Drop subscription cascade
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: absent
+ cascade: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["DROP SUBSCRIPTION {{ test_subscription }} CASCADE"]
+ - result.final_state != result.initial_state
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ ###########################
+ # Test subsparams parameter
+ ###########################
+
+ - name: Create subscription with subsparams
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications: '{{ test_pub }}'
+ connparams:
+ host: 127.0.0.1
+ port: '{{ primary_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+ subsparams:
+ enabled: false
+ synchronous_commit: false
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == ["CREATE SUBSCRIPTION test CONNECTION 'host=127.0.0.1 port={{ primary_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }}' PUBLICATION {{ test_pub }} WITH (enabled = false, synchronous_commit = false)"]
+ - result.exists == true
+ - result.final_state.enabled == false
+ - result.final_state.synccommit == false
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'
+ AND subenabled = 'f' AND subsynccommit = 'false'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Enable changed params
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ publications: '{{ test_pub }}'
+ subsparams:
+ enabled: true
+ synchronous_commit: true
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} ENABLE", "ALTER SUBSCRIPTION {{ test_subscription }} SET (synchronous_commit = true)"]
+ - result.exists == true
+ - result.final_state.enabled == true
+ - result.final_state.synccommit == true
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'
+ AND subenabled = 't' AND subsynccommit = 'true'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Enable the same params again
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ publications: '{{ test_pub }}'
+ subsparams:
+ enabled: true
+ synchronous_commit: true
+ trust_input: false
+
+ - assert:
+ that:
+ - result is not changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == []
+ - result.exists == true
+ - result.final_state == result.initial_state
+ - result.final_state.enabled == true
+ - result.final_state.synccommit == true
+
+ ##########################
+ # Test change publications
+ ##########################
+
+ - name: Change publications in check mode
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications:
+ - '{{ test_pub }}'
+ - '{{ test_pub2 }}'
+ trust_input: false
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.final_state.publications == result.initial_state.publications
+ - result.final_state.publications == ['{{ test_pub }}']
+ - result.queries == ['ALTER SUBSCRIPTION {{ test_subscription }} SET PUBLICATION {{ test_pub }}, {{ test_pub2 }}']
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'
+ AND subpublications = '{"{{ test_pub }}"}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Change publications
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications:
+ - '{{ test_pub }}'
+ - '{{ test_pub2 }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.final_state.publications != result.initial_state.publications
+ - result.final_state.publications == ['{{ test_pub }}', '{{ test_pub2 }}']
+ - result.queries == ['ALTER SUBSCRIPTION {{ test_subscription }} SET PUBLICATION {{ test_pub }}, {{ test_pub2 }}']
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'
+ AND subpublications = '{"{{ test_pub }}", "{{ test_pub2 }}"}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Change publications with the same values again
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications:
+ - '{{ test_pub }}'
+ - '{{ test_pub2 }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is not changed
+ - result.name == '{{ test_subscription }}'
+ - result.final_state.publications == result.initial_state.publications
+ - result.final_state.publications == ['{{ test_pub }}', '{{ test_pub2 }}']
+ - result.queries == []
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'
+ AND subpublications = '{"{{ test_pub }}", "{{ test_pub2 }}"}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ ######################
+ # Test update conninfo
+ ######################
+
+ - name: Change conninfo in check mode
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ connparams:
+ host: 127.0.0.1
+ port: '{{ primary_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+ connect_timeout: '{{ conn_timeout }}'
+ trust_input: false
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} CONNECTION 'host=127.0.0.1 port={{ primary_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }} connect_timeout={{ conn_timeout }}'"]
+ - result.initial_state.conninfo == result.final_state.conninfo
+
+ - name: Change conninfo
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ connparams:
+ host: 127.0.0.1
+ port: '{{ primary_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+ connect_timeout: '{{ conn_timeout }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} CONNECTION 'host=127.0.0.1 port={{ primary_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }} connect_timeout={{ conn_timeout }}'"]
+ - result.initial_state.conninfo != result.final_state.conninfo
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: "SELECT * FROM pg_subscription WHERE subname = '{{ test_subscription }}'"
+
+ - assert:
+ that:
+ - result.query_result[0].subconninfo == "host=127.0.0.1 port={{ primary_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }} connect_timeout={{ conn_timeout }}"
+
+ - name: Try to change conninfo again with the same values
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ connparams:
+ host: 127.0.0.1
+ port: '{{ primary_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+ connect_timeout: '{{ conn_timeout }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is not changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == []
+ - result.initial_state.conninfo == result.final_state.conninfo
+ - result.final_state.conninfo.connect_timeout == {{ conn_timeout }}
+
+ ####################
+ # Test state refresh
+ ####################
+
+ - name: Refresh in check mode
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: refresh
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} REFRESH PUBLICATION"]
+
+ - name: Refresh
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: refresh
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} REFRESH PUBLICATION"]
+
+ ##########
+ # Clean up
+ ##########
+ - name: Drop subscription
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: absent
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/setup_publication.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/setup_publication.yml
new file mode 100644
index 000000000..712b3701d
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/setup_publication.yml
@@ -0,0 +1,85 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# Preparation for further tests of postgresql_subscription module.
+
+- vars:
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: true
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: '{{ test_db }}'
+
+ block:
+ - name: postgresql_publication - create test db
+ <<: *task_parameters
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ login_port: '{{ item }}'
+ maintenance_db: '{{ db_default }}'
+ name: '{{ test_db }}'
+ loop:
+ - '{{ primary_port }}'
+ - '{{ replica_port }}'
+
+ - name: postgresql_publication - create test role
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ login_port: '{{ item }}'
+ name: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ role_attr_flags: LOGIN,REPLICATION
+ loop:
+ - '{{ primary_port }}'
+ - '{{ replica_port }}'
+
+ - name: postgresql_publication - create test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ login_port: '{{ item }}'
+ name: '{{ test_table1 }}'
+ columns:
+ - id int
+ loop:
+ - '{{ primary_port }}'
+ - '{{ replica_port }}'
+
+ - name: postgresql_publication - create publication
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ login_port: '{{ primary_port }}'
+ name: '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.exists == true
+ - result.queries == ["CREATE PUBLICATION \"{{ test_pub }}\" FOR ALL TABLES"]
+ - result.owner == '{{ pg_user }}'
+ - result.alltables == true
+ - result.tables == []
+ - result.parameters.publish != {}
+
+ - name: postgresql_publication - create one more publication
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ login_port: '{{ primary_port }}'
+ name: '{{ test_pub2 }}'
+
+ - name: postgresql_publication - check the publication was created
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ primary_port }}'
+ query: >
+ SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}'
+ AND pubowner = '10' AND puballtables = 't'
+
+ - assert:
+ that:
+ - result.rowcount == 1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/aliases
new file mode 100644
index 000000000..0d91b7de0
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group5
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/tasks/main.yml
new file mode 100644
index 000000000..3534c73b0
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/tasks/main.yml
@@ -0,0 +1,7 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_table module
+- import_tasks: postgresql_table_initial.yml
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/tasks/postgresql_table_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/tasks/postgresql_table_initial.yml
new file mode 100644
index 000000000..db0f2732e
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/tasks/postgresql_table_initial.yml
@@ -0,0 +1,899 @@
+# Test code for the postgresql_set module
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create a role for tests:
+- name: postgresql_table - create a role for tests
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: alice
+
+- name: postgresql_table - create test schema
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_schema:
+ database: postgres
+ login_user: "{{ pg_user }}"
+ name: acme
+
+#
+# Check table creation
+#
+
+# Create a simple table in check_mode:
+- name: postgresql_table - create table in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_table:
+ login_db: postgres
+ login_port: 5432
+ login_user: "{{ pg_user }}"
+ name: test1
+ owner: alice
+ columns: id int
+ register: result
+ ignore_errors: true
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+ - result.table == 'test1'
+ - result.queries == ['CREATE TABLE "test1" (id int)', 'ALTER TABLE "test1" OWNER TO "alice"']
+ - result.state == 'absent'
+
+# Check that the table doesn't exist after the previous step, rowcount must be 0
+- name: postgresql_table - check that table doesn't exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test1'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Create a simple table:
+- name: postgresql_table - create table
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_table:
+ login_db: postgres
+ login_port: 5432
+ login_user: "{{ pg_user }}"
+ name: test1
+ owner: alice
+ columns: id int
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.table == 'test1'
+ - result.queries == ['CREATE TABLE "test1" (id int)', 'ALTER TABLE "test1" OWNER TO "alice"']
+ - result.state == 'present'
+ - result.storage_params == []
+ - result.tablespace == ""
+ - result.owner == "alice"
+
+# Check that the table exists after the previous step, rowcount must be 1
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test1'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Check that the tableowner is alice
+- name: postgresql_table - check that table owner is alice
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_tables WHERE tablename = 'test1' AND tableowner = 'alice'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+#
+# Check create table like another table
+#
+
+# Create a table LIKE another table without any additional parameters in check_mode:
+- name: postgresql_table - create table like in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ like: test1
+ register: result
+ ignore_errors: true
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+ - result.table == 'test2'
+ - result.queries == ['CREATE TABLE "test2" (LIKE "test1")']
+ - result.state == 'absent'
+
+# Check that the table doesn't exist after the previous step, rowcount must be 0
+- name: postgresql_table - check that table doesn't exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test2'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Create a table LIKE another table without any additional parameters:
+- name: postgresql_table - create table like
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ like: test1
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.table == 'test2'
+ - result.queries == ['CREATE TABLE "test2" (LIKE "test1")']
+ - result.state == 'present'
+ - result.storage_params == []
+ - result.tablespace == ""
+ - result.owner == "{{ pg_user }}"
+
+# Check that the table exists after the previous step, rowcount must be 1
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test2'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+#
+# Check drop table
+#
+
+# Drop a table in check_mode:
+- name: postgresql_table - drop table in check_mode
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ state: absent
+ register: result
+ ignore_errors: true
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP TABLE "test2"']
+ - result.state == 'present'
+ - result.storage_params == []
+ - result.tablespace == ""
+ - result.owner == "{{ pg_user }}"
+
+# Check that the table exists after the previous step, rowcount must be 1
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test2'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Drop a table:
+- name: postgresql_table - drop table
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ state: absent
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP TABLE "test2"']
+ - result.state == 'absent'
+
+# Check that the table doesn't exist after the previous step, rowcount must be 0
+- name: postgresql_table - check that table doesn't exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test2'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Create a table like another table including:
+- name: postgresql_table - create table like with including indexes
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ like: test1
+ including: indexes
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE TABLE "test2" (LIKE "test1" INCLUDING indexes)']
+ - result.state == 'present'
+ - result.storage_params == []
+ - result.tablespace == ""
+ - result.owner == "{{ pg_user }}"
+
+# Check to create table if it exists:
+- name: postgresql_table - try to create existing table again
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ like: test1
+ including: indexes
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+
+# Drop the table to prepare for the next step:
+- name: postgresql_table - drop table
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ state: absent
+ register: result
+ ignore_errors: true
+
+# Try to drop non existing table:
+- name: postgresql_table - try drop dropped table again
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ state: absent
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+
+#
+# Change ownership
+#
+
+# Create user to prepare for the next step:
+- name: postgresql_table - create the new user test_user
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: test_user
+ state: present
+ ignore_errors: true
+
+# Try to change owner to test_user in check_mode
+- name: postgresql_table - change table ownership to test_user in check_mode
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test1
+ owner: test_user
+ register: result
+ ignore_errors: true
+ check_mode: true
+
+- assert:
+ that:
+ - result.owner == 'alice'
+ - result.queries == ['ALTER TABLE "test1" OWNER TO "test_user"']
+ - result.state == 'present'
+ - result is changed
+
+# Check that the tableowner was not changed to test_user
+- name: postgresql_table - check that table owner was not changed
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_tables WHERE tablename = 'test1' AND tableowner = 'test_user'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+# Try to change owner to test_user
+- name: postgresql_table - change table ownership to test_user
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test1
+ owner: test_user
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.owner == 'test_user'
+ - result.queries == ['ALTER TABLE "test1" OWNER TO "test_user"']
+ - result.state == 'present'
+ - result is changed
+
+# Check that the tableowner was changed to test_user
+- name: postgresql_table - check that table owner was changed
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_tables WHERE tablename = 'test1' AND tableowner = 'test_user'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+#
+# Additional storage parameters
+#
+
+# Create a table with additional storage parameters:
+- name: postgresql_table - create table with storage_params
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test3
+ columns:
+ - id int
+ - name text
+ storage_params:
+ - fillfactor=10
+ - autovacuum_analyze_threshold=1
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.state == 'present'
+ - result.queries == ['CREATE TABLE "test3" (id int,name text) WITH (fillfactor=10,autovacuum_analyze_threshold=1)']
+ - result.storage_params == [ "fillfactor=10", "autovacuum_analyze_threshold=1" ]
+
+# Check storage parameters
+- name: postgresql_table - check storage parameters
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT reloptions FROM pg_class WHERE relname = 'test3'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.query_result[0].reloptions == ["fillfactor=10", "autovacuum_analyze_threshold=1"]
+#
+# Check truncate table
+#
+
+# Insert a row to test table:
+- name: postgresql_table - insert a row
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "INSERT INTO test3 (id, name) VALUES (1, 'first')"
+
+# Truncate a table in check_mode:
+- name: postgresql_table - truncate table
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test3
+ truncate: true
+ register: result
+ ignore_errors: true
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['TRUNCATE TABLE "test3"']
+ - result.state == "present"
+
+# Check the row exists:
+- name: postgresql_table - check that row exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT * FROM test3 WHERE id = '1'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Truncate a table. It always returns changed == true
+# because it always creates a new table with the same schema and drop the old table:
+- name: postgresql_table - truncate table
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test3
+ truncate: true
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['TRUNCATE TABLE "test3"']
+ - result.state == "present"
+
+# Check the row exists:
+- name: postgresql_table - check that row doesn't exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT * FROM test3 WHERE id = '1'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+#
+# Check rename table
+#
+
+# Rename a table in check_mode.
+# In check_mode test4 won't be exist after the following playbook,
+# so result.changed == 'absent' for the table with this name
+- name: postgresql_table - rename table in check_mode
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test3
+ rename: test4
+ register: result
+ ignore_errors: true
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER TABLE "test3" RENAME TO "test4"']
+ - result.state == "absent"
+
+# Check that the table exists after the previous step, rowcount must be 1
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test3'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Rename a table:
+- name: postgresql_table - rename table
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test3
+ rename: test4
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER TABLE "test3" RENAME TO "test4"']
+ - result.state == "present"
+
+# Check that the table test 3 doesn't exist after the previous step, rowcount must be - 0
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test3'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Check that the table test 4 exists after the previous step, rowcount must be - 1
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test4'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+#
+# Check create unlogged table
+#
+
+# Create unlogged table in check_mode:
+- name: postgresql_table - create unlogged table in check_mode
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test5
+ unlogged: true
+ register: result
+ ignore_errors: true
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE UNLOGGED TABLE "test5" ()']
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+# Check that the table doesn't exist after the previous step, rowcount must be - 0
+- name: postgresql_table - check that table doesn't exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test5'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Create unlogged table:
+- name: postgresql_table - create unlogged table
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test5
+ unlogged: true
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE UNLOGGED TABLE "test5" ()']
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+# Check that the table exists after the previous step, rowcount must be - 1
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test5'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+# Drop table CASCADE:
+- name: postgresql_table - drop table cascade
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test5
+ state: absent
+ cascade: true
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP TABLE "test5" CASCADE']
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+# Check that the table doesn't exist after the previous step, rowcount must be - 0
+- name: postgresql_table - check that table doesn't exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test5'"
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+#
+# Create, drop, and rename table in a specific schema:
+#
+- name: postgresql_table - create table in a specific schema
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: acme.test_schema_table
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE TABLE "acme"."test_schema_table" ()']
+
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test_schema_table' and schemaname = 'acme'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_table - try to create a table with the same name and schema again
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: acme.test_schema_table
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+- name: postgresql_table - create a table in the default schema for the next test
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test_schema_table
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: postgresql_table - drop the table from schema acme
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: postgres.acme.test_schema_table
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP TABLE "postgres"."acme"."test_schema_table"']
+
+- name: postgresql_table - check that the table doesn't exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test_schema_table' and schemaname = 'acme'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_table - try to drop the table from schema acme again
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: acme.test_schema_table
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+- name: postgresql_table - check that the table with the same name in schema public exists
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test_schema_table' and schemaname = 'public'"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_table - rename the table that contents a schema name
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: public.test_schema_table
+ rename: new_test_schema_table
+ trust_input: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER TABLE "public"."test_schema_table" RENAME TO "new_test_schema_table"']
+
+############################
+# Test trust_input parameter
+- name: postgresql_table - check trust_input
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: postgres.acme.test_schema_table
+ state: absent
+ trust_input: false
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+#
+# Clean up
+#
+- name: postgresql_table - drop test schema
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_schema:
+ database: postgres
+ login_user: "{{ pg_user }}"
+ name: acme
+ state: absent
+ cascade_drop: true
+
+- name: postgresql_table - drop test role
+ become_user: "{{ pg_user }}"
+ become: true
+ postgresql_user:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: "{{ item }}"
+ state: absent
+ loop:
+ - test_user
+ - alice
+ ignore_errors: true
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/defaults/main.yml
new file mode 100644
index 000000000..1eb5b843f
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+test_tablespace_path: "/ssd"
+dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/main.yml
new file mode 100644
index 000000000..21a47ee3b
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/main.yml
@@ -0,0 +1,7 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_tablespace module
+- import_tasks: postgresql_tablespace_initial.yml
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml
new file mode 100644
index 000000000..b8e6d0b54
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml
@@ -0,0 +1,245 @@
+- name: postgresql_tablespace - drop dir for test tablespace
+ become: true
+ file:
+ path: '{{ test_tablespace_path }}'
+ state: absent
+ ignore_errors: true
+
+- name: postgresql_tablespace - disable selinux
+ become: true
+ shell: setenforce 0
+ ignore_errors: true
+
+- name: postgresql_tablespace - create dir for test tablespace
+ become: true
+ file:
+ path: '{{ test_tablespace_path }}'
+ state: directory
+ owner: '{{ pg_user }}'
+ group: '{{ pg_user }}'
+ mode: '0700'
+ ignore_errors: true
+
+- name: postgresql_tablespace - create test role to test change ownership
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_user:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: bob
+ state: present
+ ignore_errors: true
+
+- name: postgresql_tablespace - create test role to test change ownership
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_user:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: alice
+ state: present
+ ignore_errors: true
+
+- name: postgresql_tablespace - create a new tablespace called acme and set bob as an its owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ owner: bob
+ location: /ssd
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.owner == 'bob'
+ - result.queries == ["CREATE TABLESPACE \"acme\" LOCATION '/ssd'", "ALTER TABLESPACE \"acme\" OWNER TO \"bob\""]
+ - result.state == 'present'
+ - result.tablespace == 'acme'
+ - result.options == {}
+ - result.location == '/ssd'
+
+- name: postgresql_tablespace - try to create the same tablespace with different location
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ location: /another-ssd
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.msg == "Tablespace 'acme' exists with different location '/ssd'"
+
+- name: postgresql_tablespace - change tablespace owner to alice
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ owner: alice
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.owner == 'alice'
+ - result.queries == ["ALTER TABLESPACE \"acme\" OWNER TO \"alice\""]
+ - result.state == 'present'
+ - result.tablespace == 'acme'
+ - result.options == {}
+
+- name: postgresql_tablespace - try to change tablespace owner to alice again to be sure that nothing changes
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ owner: alice
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.owner == 'alice'
+ - result.queries == []
+ - result.state == 'present'
+ - result.tablespace == 'acme'
+ - result.options == {}
+
+- name: postgresql_tablespace - change tablespace options
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ set:
+ seq_page_cost: 4
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.owner == 'alice'
+ - result.queries == ["ALTER TABLESPACE \"acme\" SET (seq_page_cost = '4')"]
+ - result.state == 'present'
+ - result.tablespace == 'acme'
+ - result.options.seq_page_cost == '4'
+ when: postgres_version_resp.stdout is version('9.0', '>=')
+
+- name: postgresql_tablespace - reset seq_page_cost option
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ login_db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ set:
+ seq_page_cost: reset
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["ALTER TABLESPACE \"acme\" RESET (seq_page_cost)"]
+ when: postgres_version_resp.stdout is version('9.0', '>=')
+
+- name: postgresql_tablespace - reset seq_page_cost option again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ set:
+ seq_page_cost: reset
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('9.0', '>=')
+
+- name: postgresql_tablespace - rename tablespace
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ rename_to: foo
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.newname == 'foo'
+ - result.queries == ["ALTER TABLESPACE \"acme\" RENAME TO \"foo\""]
+
+- name: postgresql_tablespace - rename tablespace to potentially dangerous name
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: foo
+ rename_to: '{{ dangerous_name }}'
+ trust_input: false
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous'
+
+- name: postgresql_tablespace - drop tablespace
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: foo
+ state: absent
+ trust_input: true
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.state == 'absent'
+ - result.queries == ["DROP TABLESPACE \"foo\""]
+
+- name: postgresql_tablespace - try to drop nonexistent tablespace
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: foo
+ state: absent
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.msg == "Tries to drop nonexistent tablespace 'foo'"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/aliases
new file mode 100644
index 000000000..a4c92ef85
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/defaults/main.yml
new file mode 100644
index 000000000..dbcbea120
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/defaults/main.yml
@@ -0,0 +1,4 @@
+db_name: 'ansible_db'
+db_user1: 'ansible_db_user1'
+db_user2: 'ansible_db_user2'
+dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/main.yml
new file mode 100644
index 000000000..150d26efd
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/main.yml
@@ -0,0 +1,12 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_user module:
+- import_tasks: postgresql_user_initial.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+# General tests:
+- import_tasks: postgresql_user_general.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml
new file mode 100644
index 000000000..cde95b0c6
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml
@@ -0,0 +1,802 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# Integration tests for postgresql_user module.
+
+- vars:
+ test_user: hello.user.with.dots
+ test_user2: hello
+ test_group1: group1
+ test_group2: group2
+ test_table: test
+ test_comment1: 'comment1'
+ test_comment2: 'comment2'
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: true
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+
+ block:
+ #
+ # Common tests
+ #
+ - name: Create role in check_mode
+ <<: *task_parameters
+ check_mode: true
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user doesn't exist
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Create role in actual mode
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Add a comment on the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ comment: '{{ test_comment1 }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS '{{ test_comment1 }}'"]
+
+ - name: check the comment
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment
+ FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ - result.query_result[0].comment == '{{ test_comment1 }}'
+
+ - name: Try to add the same comment on the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ comment: '{{ test_comment1 }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Try to add another comment on the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ comment: '{{ test_comment2 }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS '{{ test_comment2 }}'"]
+
+ - name: check the comment
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment
+ FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ - result.query_result[0].comment == '{{ test_comment2 }}'
+
+ - name: Try to create role again in check_mode
+ <<: *task_parameters
+ check_mode: true
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Try to create role again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Drop role in check_mode
+ <<: *task_parameters
+ check_mode: true
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user actually exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Drop role in actual mode
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user doesn't exist
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Try to drop role in check mode again
+ <<: *task_parameters
+ check_mode: true
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Try to drop role in actual mode again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ #
+ # password, no_password_changes, encrypted, expires parameters
+ #
+
+ - name: Create role with password, passed as hashed md5
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: md59543f1d82624df2b31672ec0f7050460
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check that the user exist with a proper password
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'md59543f1d82624df2b31672ec0f7050460'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Test no_password_changes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: u123
+ no_password_changes: true
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+
+ - name: Check that nothing changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'md59543f1d82624df2b31672ec0f7050460'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Storing unencrypted passwords is not available from PostgreSQL 10
+ - name: Change password, passed as unencrypted
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: myunencryptedpass
+ encrypted: false
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - name: Check that the user exist with the unencrypted password
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'myunencryptedpass'"
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - name: Change password, explicit encrypted=true
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: myunencryptedpass
+ encrypted: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check that the user exist with encrypted password
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword != 'myunencryptedpass'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Change rolvaliduntil attribute
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ expires: 'Jan 31 2020'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolvaliduntil::text like '2020-01-31%'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Try to set the same rolvaliduntil value again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ expires: 'Jan 31 2020'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check that nothing changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolvaliduntil::text like '2020-01-31%'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ #
+ # role_attr_flags
+ #
+ - name: Set role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: CREATEROLE,CREATEDB
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 't' and rolcreatedb = 't'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Set the same role attributes again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: CREATEROLE,CREATEDB
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 't' and rolcreatedb = 't'
+
+ - name: Set role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: NOCREATEROLE,NOCREATEDB
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 'f' and rolcreatedb = 'f'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Set role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: NOCREATEROLE,NOCREATEDB
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 'f' and rolcreatedb = 'f'
+
+ #
+ # priv
+ #
+ - name: Create test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ columns:
+ - id int
+
+ - name: Insert data to test table
+ <<: *task_parameters
+ postgresql_query:
+ query: "INSERT INTO {{ test_table }} (id) VALUES ('1')"
+ <<: *pg_parameters
+
+ - name: Check that test_user is not allowed to read the data
+ <<: *task_parameters
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ session_role: '{{ test_user }}'
+ query: 'SELECT * FROM {{ test_table }}'
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - "'permission denied' in result.msg"
+
+ - name: Grant privileges
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ priv: '{{ test_table }}:SELECT'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Check that test_user is allowed to read the data
+ <<: *task_parameters
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ session_role: '{{ test_user }}'
+ query: 'SELECT * FROM {{ test_table }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Grant the same privileges again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ priv: '{{ test_table }}:SELECT'
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Remove test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ state: absent
+
+ #
+ # fail_on_user
+ #
+ - name: Create role for test
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user2 }}'
+
+ - name: Create test table, set owner as test_user
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ owner: '{{ test_user2 }}'
+
+ - name: Test fail_on_user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user2 }}'
+ state: absent
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == 'Unable to remove user'
+
+ - name: Test fail_on_user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ fail_on_user: false
+
+ - assert:
+ that:
+ - result is not changed
+
+ #
+ # Test groups parameter
+ #
+ - name: Create test group
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group2 }}'
+ role_attr_flags: NOLOGIN
+
+ - name: Create role test_group1 and grant test_group2 to test_group1 in check_mode
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group1 }}'
+ groups: '{{ test_group2 }}'
+ role_attr_flags: NOLOGIN
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_group1 }}'
+ - result.queries == ['CREATE USER "{{ test_group1 }}" NOLOGIN', 'GRANT "{{ test_group2 }}" TO "{{ test_group1 }}"']
+
+ - name: check that the user doesn't exist
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_group1 }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Create role test_group1 and grant test_group2 to test_group1
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group1 }}'
+ groups: '{{ test_group2 }}'
+ role_attr_flags: NOLOGIN
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_group1 }}'
+ - result.queries == ['CREATE USER "{{ test_group1 }}" NOLOGIN', 'GRANT "{{ test_group2 }}" TO "{{ test_group1 }}"']
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_group1 }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Grant test_group2 to test_group1 again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group1 }}'
+ groups: '{{ test_group2 }}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_group1 }}'
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Grant groups to existent role
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ groups:
+ - '{{ test_group1 }}'
+ - '{{ test_group2 }}'
+ trust_input: false
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+ - result.queries == ['GRANT "{{ test_group1 }}" TO "{{ test_user }}"', 'GRANT "{{ test_group2 }}" TO "{{ test_user }}"']
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT * FROM pg_group WHERE groname in ('{{ test_group1 }}', '{{ test_group2 }}') AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 2
+
+ ########################
+ # Test trust_input param
+
+ - name: Create role with potentially dangerous name, don't trust
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ dangerous_name }}'
+ trust_input: false
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous'
+
+ - name: Create role with potentially dangerous name, trust
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ dangerous_name }}'
+
+ - assert:
+ that:
+ - result is changed
+
+ ########################################################################
+ # https://github.com/ansible-collections/community.postgresql/issues/122
+
+ - name: Create role with SELECT
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: 'issue122'
+ priv: 'pg_catalog.pg_stat_database:SELECT'
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Create role with SELECT again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: 'issue122'
+ priv: 'pg_catalog.pg_stat_database:SELECT'
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ always:
+ #
+ # Clean up
+ #
+ - name: Drop test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ state: absent
+
+ - name: Drop test user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ state: absent
+ loop:
+ - '{{ test_user }}'
+ - '{{ test_user2 }}'
+ - '{{ test_group1 }}'
+ - '{{ test_group2 }}'
+ - '{{ dangerous_name }}'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_initial.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_initial.yml
new file mode 100644
index 000000000..5d057d2dd
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_initial.yml
@@ -0,0 +1,156 @@
+#
+# Create and destroy user, test 'password' and 'encrypted' parameters
+#
+# unencrypted values are not supported on newer versions
+# do not run the encrypted: no tests if on 10+
+- ansible.builtin.set_fact:
+ encryption_values:
+ - 'true'
+
+- ansible.builtin.set_fact:
+ encryption_values: '{{ encryption_values + ["false"]}}'
+ when: postgres_version_resp.stdout is version('10', '<=')
+
+- include_tasks: test_password.yml
+ vars:
+ encrypted: '{{ loop_item }}'
+ db_password1: 'secretù' # use UTF-8
+ loop: '{{ encryption_values }}'
+ loop_control:
+ loop_var: loop_item
+
+# BYPASSRLS role attribute was introduced in PostgreSQL 9.5, so
+# we want to test attribute management differently depending
+# on the version.
+- ansible.builtin.set_fact:
+ bypassrls_supported: "{{ postgres_version_resp.stdout is version('9.5.0', '>=') }}"
+
+# test 'no_password_change' and 'role_attr_flags' parameters
+- include_tasks: test_no_password_change.yml
+ vars:
+ no_password_changes: '{{ loop_item }}'
+ loop:
+ - 'true'
+ - 'false'
+ loop_control:
+ loop_var: loop_item
+
+### TODO: fail_on_user
+
+#
+# Test login_user functionality
+#
+- name: Create a user to test login module parameters
+ become: true
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ state: "present"
+ encrypted: 'true'
+ password: "password"
+ role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
+ login_user: "{{ pg_user }}"
+ trust_input: false
+ db: postgres
+
+- name: Create db
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "present"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+
+- name: Check that database created
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- ansible.builtin.assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+
+- name: Create a user
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: "present"
+ encrypted: 'true'
+ password: "md55c8ccfd9d6711fc69a7eae647fc54f51"
+ db: "{{ db_name }}"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+ trust_input: false
+
+- name: Check that it was created
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql -d postgres
+ register: result
+
+- ansible.builtin.assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+
+- name: Grant database privileges
+ postgresql_privs:
+ type: "database"
+ state: "present"
+ roles: "{{ db_user2 }}"
+ privs: "CREATE,connect"
+ objs: "{{ db_name }}"
+ db: "{{ db_name }}"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+
+- name: Check that the user has the requested permissions (database)
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }}
+ register: result_database
+
+- ansible.builtin.assert:
+ that:
+ - "result_database.stdout_lines[-1] == '(1 row)'"
+ - "db_user2 ~ '=Cc' in result_database.stdout"
+
+- name: Remove user
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: 'absent'
+ priv: "ALL"
+ db: "{{ db_name }}"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+ trust_input: false
+
+- name: Check that they were removed
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql -d postgres
+ register: result
+
+- ansible.builtin.assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+- name: Destroy DB
+ postgresql_db:
+ state: absent
+ name: "{{ db_name }}"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+
+- name: Check that database was destroyed
+ become: true
+ become_user: "{{ pg_user }}"
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- ansible.builtin.assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml
new file mode 100644
index 000000000..41ecbe61f
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml
@@ -0,0 +1,167 @@
+- vars:
+ task_parameters: &task_parameters
+ become_user: "{{ pg_user }}"
+ become: true
+ register: result
+ postgresql_parameters: &parameters
+ db: postgres
+ name: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+ block:
+
+ - name: Create a user with all role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "SUPERUSER,CREATEROLE,CREATEDB,INHERIT,LOGIN{{ bypassrls_supported | ternary(',BYPASSRLS', '') }}"
+ no_password_changes: '{{ no_password_changes }}' # no_password_changes is ignored when user doesn't already exist
+
+ - name: Check that the user has the requested role attributes
+ <<: *task_parameters
+ shell: "echo \"select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin {{ bypassrls_supported | ternary(\", 'bypassrls:'||rolbypassrls\", '') }} from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'super:t' in result.stdout_lines[-2]"
+ - "'createrole:t' in result.stdout_lines[-2]"
+ - "'create:t' in result.stdout_lines[-2]"
+ - "'inherit:t' in result.stdout_lines[-2]"
+ - "'login:t' in result.stdout_lines[-2]"
+
+ - block:
+ - name: Check that the user has the requested role attribute BYPASSRLS
+ <<: *task_parameters
+ shell: "echo \"select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "not bypassrls_supported or 'bypassrls:t' in result.stdout_lines[-2]"
+ when: bypassrls_supported
+
+ - name: Modify a user to have no role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "NOSUPERUSER,NOCREATEROLE,NOCREATEDB,noinherit,NOLOGIN{{ bypassrls_supported | ternary(',NOBYPASSRLS', '') }}"
+ no_password_changes: '{{ no_password_changes }}'
+
+ - name: Check that ansible reports it modified the role
+ assert:
+ that:
+ - result is changed
+
+ - name: "Check that the user doesn't have any attribute"
+ <<: *task_parameters
+ shell: "echo \"select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'super:f' in result.stdout_lines[-2]"
+ - "'createrole:f' in result.stdout_lines[-2]"
+ - "'create:f' in result.stdout_lines[-2]"
+ - "'inherit:f' in result.stdout_lines[-2]"
+ - "'login:f' in result.stdout_lines[-2]"
+
+ - block:
+ - name: Check that the user has the requested role attribute BYPASSRLS
+ <<: *task_parameters
+ shell: "echo \"select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "not bypassrls_supported or 'bypassrls:f' in result.stdout_lines[-2]"
+ when: bypassrls_supported
+
+ - name: Try to add an invalid attribute
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "NOSUPERUSER,NOCREATEROLE,NOCREATEDB,noinherit,NOLOGIN{{ bypassrls_supported | ternary(',NOBYPASSRLS', '') }},INVALID"
+ no_password_changes: '{{ no_password_changes }}'
+ ignore_errors: true
+
+ - name: Check that ansible reports failure
+ assert:
+ that:
+ - result is not changed
+ - result is failed
+ - "result.msg == 'Invalid role_attr_flags specified: INVALID'"
+
+ - name: Modify a single role attribute on a user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "LOGIN"
+ no_password_changes: '{{ no_password_changes }}'
+
+ - name: Check that ansible reports it modified the role
+ assert:
+ that:
+ - result is changed
+
+ - name: Check the role attributes
+ <<: *task_parameters
+ shell: echo "select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'super:f' in result.stdout_lines[-2]"
+ - "'createrole:f' in result.stdout_lines[-2]"
+ - "'create:f' in result.stdout_lines[-2]"
+ - "'inherit:f' in result.stdout_lines[-2]"
+ - "'login:t' in result.stdout_lines[-2]"
+
+ - block:
+ - name: Check the role attribute BYPASSRLS
+ <<: *task_parameters
+ shell: echo "select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "( postgres_version_resp.stdout is version('9.5.0', '<')) or 'bypassrls:f' in result.stdout_lines[-2]"
+ when: bypassrls_supported
+
+ - name: Check that using same attribute a second time does nothing
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "LOGIN"
+ no_password_changes: '{{ no_password_changes }}'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - name: Check there isn't any update reported
+ assert:
+ that:
+ - result is not changed
+
+ - name: Cleanup the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: 'absent'
+ no_password_changes: '{{ no_password_changes }}' # user deletion: no_password_changes is ignored
+
+ - name: Check that user was removed
+ <<: *task_parameters
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+ always:
+ - name: Cleanup the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: 'absent'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/test_password.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/test_password.yml
new file mode 100644
index 000000000..aece258fd
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/test_password.yml
@@ -0,0 +1,429 @@
+- vars:
+ task_parameters: &task_parameters
+ become_user: "{{ pg_user }}"
+ become: true
+ register: result
+ postgresql_query_parameters: &query_parameters
+ db: postgres
+ login_user: "{{ pg_user }}"
+ postgresql_parameters: &parameters
+ <<: *query_parameters
+ name: "{{ db_user1 }}"
+
+ block:
+ - name: 'Check that PGOPTIONS environment variable is effective (1/2)'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ ignore_errors: true
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - name: 'Check that PGOPTIONS environment variable is effective (2/2)'
+ assert:
+ that:
+ - "{{ result is failed }}"
+
+ - name: 'Create a user (password encrypted: {{ encrypted }})'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: '{{ encrypted }}'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - block: &changed # block is only used here in order to be able to define YAML anchor
+ - name: Check that ansible reports it was created
+ assert:
+ that:
+ - "{{ result is changed }}"
+
+ - name: Check that it was created
+ <<: *task_parameters
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+
+ - name: Check that creating user a second time does nothing
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: '{{ encrypted }}'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - block: &not_changed # block is only used here in order to be able to define YAML anchor
+ - name: Check that ansible reports no change
+ assert:
+ that:
+ - "{{ result is not changed }}"
+
+ - name: 'Define an expiration time'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ expires: '2025-01-01'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Redefine the same expiration time'
+ <<: *task_parameters
+ postgresql_user:
+ expires: '2025-01-01'
+ <<: *parameters
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - block:
+
+ - name: 'Using MD5-hashed password: check that password not changed when using cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: 'true'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: "Using MD5-hashed password: check that password not changed when using md5 hash with 'ENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'true'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: "Using MD5-hashed password: check that password not changed when using md5 hash with 'UNENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'false'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Redefine the same expiration time and password (encrypted)'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ encrypted: 'true'
+ password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}"
+ expires: '2025-01-01'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using MD5-hashed password: check that password changed when using another cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: 'prefix{{ db_password1 }}'
+ encrypted: 'true'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: "Using MD5-hashed password: check that password changed when using another md5 hash with 'ENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ ('prefix1' ~ db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'true'
+
+ - <<: *changed
+
+ - name: "Using MD5-hashed password: check that password changed when using md5 hash with 'UNENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ ('prefix2' ~ db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'false'
+ register: change_pass_unencrypted
+ failed_when:
+ - change_pass_unencrypted is failed
+ # newer version of psycopg2 no longer supported unencrypted password, we ignore the error
+ - '"UNENCRYPTED PASSWORD is no longer supported" not in change_pass_unencrypted.msg'
+
+ - <<: *changed
+
+ - name: 'Using MD5-hashed password: check that password changed when clearing the password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'true'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Using MD5-hashed password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'true'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using cleartext password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'false'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using MD5-hashed password: check that password changed when using a cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: 'true'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ when: encrypted == 'true' and postgres_version_resp.stdout is version('14', '<')
+
+ - block:
+
+ - name: 'Using cleartext password: check that password not changed when using cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: 'false'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Redefine the same expiration time and password (not encrypted)'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: 'false'
+ expires: '2025-01-01'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using cleartext password: check that password changed when using another cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "changed{{ db_password1 }}"
+ encrypted: 'false'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Using cleartext password: check that password changed when clearing the password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'false'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Using cleartext password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'false'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using MD5-hashed password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'true'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using cleartext password: check that password changed when using cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: 'false'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ when: encrypted == 'false'
+
+ # start of block scram-sha-256
+ # scram-sha-256 password encryption type is supported since PostgreSQL 10
+ - when: postgres_version_resp.stdout is version('10', '>=')
+ block:
+
+ - name: 'Using cleartext password with scram-sha-256: resetting password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ""
+ encrypted: "{{ encrypted }}"
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - name: 'Using cleartext password with scram-sha-256: check that password is changed when using cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: "{{ encrypted }}"
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ # ansible postgresql_user module interface does not (yet) support forcing password_encryption
+ # type value, we'll have to hack it in env variable to force correct encryption
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+
+ - <<: *changed
+
+ - name: 'Using cleartext password with scram-sha-256: ensure password is properly encrypted'
+ <<: *task_parameters
+ postgresql_query:
+ <<: *query_parameters
+ query: select * from pg_authid where rolname=%s and rolpassword like %s
+ positional_args:
+ - '{{ db_user1 }}'
+ - 'SCRAM-SHA-256$%'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: 'Using cleartext password with scram-sha-256: check that password is not changed when using the same password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: "{{ encrypted }}"
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+
+ - <<: *not_changed
+
+ - name: 'Using cleartext password with scram-sha-256: check that password is changed when using another cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "changed{{ db_password1 }}"
+ encrypted: "{{ encrypted }}"
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+
+ - <<: *changed
+
+ - name: 'Using cleartext password with scram-sha-256: check that password is changed when clearing the password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: "{{ encrypted }}"
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+
+ - <<: *changed
+
+ - name: 'Using cleartext password with scram-sha-256: check that password is not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: "{{ encrypted }}"
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+
+ - <<: *not_changed
+
+ # end of block scram-sha-256
+
+ - name: Remove user
+ <<: *task_parameters
+ postgresql_user:
+ state: 'absent'
+ <<: *parameters
+
+ - <<: *changed
+
+ - name: Check that they were removed
+ <<: *task_parameters
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+ - name: Check that removing user a second time does nothing
+ <<: *task_parameters
+ postgresql_user:
+ state: 'absent'
+ <<: *parameters
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ always:
+ - name: Remove user
+ <<: *task_parameters
+ postgresql_user:
+ state: 'absent'
+ <<: *parameters
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/aliases b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/aliases
new file mode 100644
index 000000000..786e05315
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/aliases
@@ -0,0 +1,4 @@
+destructive
+shippable/posix/group1
+skip/freebsd
+skip/rhel
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/defaults/main.yml
new file mode 100644
index 000000000..f697cefd3
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/defaults/main.yml
@@ -0,0 +1,12 @@
+pg_user: postgres
+db_default: postgres
+
+test_table1: acme1
+test_table2: acme2
+test_table3: acme3
+test_idx1: idx1
+test_idx2: idx2
+test_func1: func1
+test_func2: func2
+test_func3: func3
+test_schema1: schema1
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/meta/main.yml
new file mode 100644
index 000000000..4ce5a5837
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/main.yml
new file mode 100644
index 000000000..fa47fdc58
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/main.yml
@@ -0,0 +1,8 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial tests of postgresql_user_obj_stat_info module:
+- import_tasks: postgresql_user_obj_stat_info.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml
new file mode 100644
index 000000000..62f72d9ec
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml
@@ -0,0 +1,222 @@
+---
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- vars:
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: true
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: '{{ db_default }}'
+
+ block:
+ # Preparation:
+ # 0. create test schema
+ # 1. create test tables
+ # 2. create test indexes
+ # 3. create test functions
+ # 4. enable track_functions and restart
+
+ - name: Create schema
+ <<: *task_parameters
+ postgresql_schema:
+ <<: *pg_parameters
+ name: '{{ test_schema1 }}'
+
+ - name: Create test tables
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ columns:
+ - id int
+ loop:
+ - '{{ test_table1 }}'
+ - '{{ test_table2 }}'
+
+ - name: Create test table in another schema
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_schema1 }}.{{ test_table3 }}'
+
+ - name: Create test indexes
+ <<: *task_parameters
+ postgresql_idx:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ table: '{{ test_table1 }}'
+ columns:
+ - id
+ loop:
+ - '{{ test_idx1 }}'
+ - '{{ test_idx2 }}'
+
+ - name: Set track_function (restart is required)
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: track_functions
+ value: all
+
+ # To avoid CI timeouts
+ - name: Kill all postgres processes
+ shell: 'pkill -u {{ pg_user }}'
+ become: true
+ when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8'
+ ignore_errors: true
+
+ - name: Stop PostgreSQL
+ become: true
+ service:
+ name: "{{ postgresql_service }}"
+ state: stopped
+ when: (ansible_facts.distribution_major_version != '8' and ansible_facts.distribution == 'CentOS') or ansible_facts.distribution != 'CentOS'
+
+ - name: Pause between stop and start PosgreSQL
+ pause:
+ seconds: 5
+
+ - name: Start PostgreSQL
+ become: true
+ service:
+ name: "{{ postgresql_service }}"
+ state: started
+
+ - name: Create test functions
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: 'CREATE FUNCTION {{ item }}() RETURNS boolean AS $$ BEGIN RETURN 1; END; $$ LANGUAGE PLPGSQL'
+ loop:
+ - '{{ test_func1 }}'
+ - '{{ test_func2 }}'
+ - '{{ test_schema1 }}.{{ test_func3 }}'
+
+ - name: Touch test functions
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: 'SELECT {{ item }}()'
+ loop:
+ - '{{ test_func1 }}'
+ - '{{ test_func2 }}'
+ - '{{ test_schema1 }}.{{ test_func3 }}'
+
+ #######
+ # Tests
+ #######
+ # 0. Without filter
+ - name: Collect all stats
+ <<: *task_parameters
+ postgresql_user_obj_stat_info:
+ <<: *pg_parameters
+
+ - assert:
+ that:
+ - result is not changed
+ - result.tables.public.{{ test_table1 }}.size == 0
+ - result.tables.public.{{ test_table1 }}.size == 0
+ - result.tables.{{ test_schema1 }}.{{ test_table3 }}.size == 0
+ - result.functions.public.{{ test_func1 }}.calls == 1
+ - result.functions.public.{{ test_func2 }}.calls == 1
+ - result.functions.{{ test_schema1 }}.{{ test_func3 }}.calls == 1
+ - result.indexes.public.{{ test_idx1 }}.idx_scan == 0
+ - result.indexes.public.{{ test_idx2 }}.idx_scan == 0
+
+ # 1. With filter
+ - name: Collect stats with filter
+ <<: *task_parameters
+ postgresql_user_obj_stat_info:
+ <<: *pg_parameters
+ filter: tables, indexes
+
+ - assert:
+ that:
+ - result is not changed
+ - result.tables.public.{{ test_table1 }}.size == 0
+ - result.tables.public.{{ test_table1 }}.size == 0
+ - result.tables.{{ test_schema1 }}.{{ test_table3 }}.size == 0
+ - result.functions == {}
+ - result.indexes.public.{{ test_idx1 }}.idx_scan == 0
+ - result.indexes.public.{{ test_idx2 }}.idx_scan == 0
+
+ # 2. With schema
+ - name: Collect stats for objects in certain schema
+ <<: *task_parameters
+ postgresql_user_obj_stat_info:
+ <<: *pg_parameters
+ schema: public
+
+ - assert:
+ that:
+ - result is not changed
+ - result.tables.public.{{ test_table1 }}.size == 0
+ - result.tables.public.{{ test_table1 }}.size == 0
+ - result.indexes.public.{{ test_idx1 }}.idx_scan == 0
+ - result.indexes.public.{{ test_idx2 }}.idx_scan == 0
+ - result.functions.public.{{ test_func1 }}.calls == 1
+ - result.functions.public.{{ test_func2 }}.calls == 1
+ - result.tables.{{ test_schema1 }} is not defined
+
+
+ # 3. With wrong schema
+ - name: Try to collect data in nonexistent schema
+ <<: *task_parameters
+ postgresql_user_obj_stat_info:
+ <<: *pg_parameters
+ schema: nonexistent
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == "Schema 'nonexistent' does not exist"
+
+ # 4. Test Trust Input
+ - name: Try running with SQL injection
+ <<: *task_parameters
+ postgresql_user_obj_stat_info:
+ <<: *pg_parameters
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ trust_input: false
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+ ##########
+ # Clean up
+ ##########
+ - name: Drop schema
+ <<: *task_parameters
+ postgresql_schema:
+ <<: *pg_parameters
+ name: '{{ test_schema1 }}'
+ state: absent
+ cascade_drop: true
+
+ - name: Drop test tables
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ state: absent
+ loop:
+ - '{{ test_table1 }}'
+ - '{{ test_table2 }}'
+
+ - name: Drop test functions
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: 'DROP FUNCTION {{ item }}()'
+ loop:
+ - '{{ test_func1 }}'
+ - '{{ test_func2 }}'
+ - '{{ test_schema1 }}.{{ test_func3 }}'
+ ignore_errors: true
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_pkg_mgr/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_pkg_mgr/tasks/main.yml
new file mode 100644
index 000000000..cc4e3b0dd
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_pkg_mgr/tasks/main.yml
@@ -0,0 +1,17 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- set_fact:
+ pkg_mgr: community.general.pkgng
+ ansible_pkg_mgr: community.general.pkgng
+ cacheable: true
+ when: ansible_os_family == "FreeBSD"
+
+- set_fact:
+ pkg_mgr: community.general.zypper
+ ansible_pkg_mgr: community.general.zypper
+ cacheable: true
+ when: ansible_os_family == "Suse"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/defaults/main.yml
new file mode 100644
index 000000000..973d41591
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/defaults/main.yml
@@ -0,0 +1,21 @@
+postgresql_service: postgresql
+
+postgresql_packages:
+ - postgresql-server
+ - python-psycopg2
+
+pg_user: postgres
+pg_group: root
+
+locale_latin_suffix:
+locale_utf8_suffix:
+
+postgis: postgis
+
+# defaults for test SSL
+ssl_db: 'ssl_db'
+ssl_user: 'ssl_user'
+ssl_pass: 'ssl_pass'
+ssl_rootcert: '/etc/server-ca.crt'
+ssl_cert: '/etc/client.crt'
+ssl_key: '/etc/client.key' \ No newline at end of file
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--0.sql b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--0.sql
new file mode 100644
index 000000000..626b0fb48
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''0'')::text';
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql
new file mode 100644
index 000000000..53c79666b
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''1.0'')::text';
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql
new file mode 100644
index 000000000..227ba1b4c
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''2.0'')::text';
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3-1.0-1.sql b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3-1.0-1.sql
new file mode 100644
index 000000000..3c23cb5d1
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3-1.0-1.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''3-1.0-1'')::text';
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3-1.0.sql b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3-1.0.sql
new file mode 100644
index 000000000..252680ad6
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3-1.0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''3-1.0'')::text';
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3-1.foo.sql b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3-1.foo.sql
new file mode 100644
index 000000000..6d706ede9
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3-1.foo.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''3-1.foo'')::text';
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0-1.sql b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0-1.sql
new file mode 100644
index 000000000..b366bd17d
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0-1.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''3.0-1'')::text';
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0-foo.sql b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0-foo.sql
new file mode 100644
index 000000000..659e2113b
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0-foo.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''3.0-foo'')::text';
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql
new file mode 100644
index 000000000..7d6a60e54
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''3.0'')::text';
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.beta.sql b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.beta.sql
new file mode 100644
index 000000000..0e945a1d0
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.beta.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''3.beta'')::text';
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--4.0.sql b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--4.0.sql
new file mode 100644
index 000000000..33aaef568
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--4.0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''4.0'')::text';
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--v4.sql b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--v4.sql
new file mode 100644
index 000000000..bb966db98
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--v4.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''v4'')::text';
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy.control b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy.control
new file mode 100644
index 000000000..fda97bc58
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy.control
@@ -0,0 +1,3 @@
+comment = 'dummy extension used to test postgresql_ext Ansible module'
+default_version = '4.0'
+relocatable = true
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf
new file mode 100644
index 000000000..58de3607f
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf
@@ -0,0 +1,10 @@
+# !!! This file managed by Ansible. Any local changes may be overwritten. !!!
+
+# Database administrative login by UNIX sockets
+# note: you may wish to restrict this further later
+local all {{ pg_user }} trust
+
+# TYPE DATABASE USER CIDR-ADDRESS METHOD
+local all all md5
+host all all 127.0.0.1/32 md5
+host all all ::1/128 md5
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/meta/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/meta/main.yml
new file mode 100644
index 000000000..5438ced5c
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/main.yml
new file mode 100644
index 000000000..80bb3c4d4
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/main.yml
@@ -0,0 +1,279 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: python 2
+ set_fact:
+ python_suffix: ''
+ when: ansible_python_version is version('3', '<')
+
+- name: python 3
+ set_fact:
+ python_suffix: -py3
+ when: ansible_python_version is version('3', '>=')
+
+- name: Include distribution and Python version specific variables
+ include_vars: '{{ lookup(''first_found'', params) }}'
+ vars:
+ params:
+ files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_os_family }}{{ python_suffix }}.yml'
+ - default{{ python_suffix }}.yml
+ paths:
+ - '{{ role_path }}/vars'
+
+- name: Make sure the dbus service is enabled under systemd
+ shell: systemctl enable dbus || systemctl enable dbus-broker
+ ignore_errors: true
+ when: ansible_service_mgr == 'systemd' and ansible_distribution == 'Fedora'
+
+- name: Make sure the dbus service is started under systemd
+ systemd:
+ name: dbus
+ state: started
+ when: ansible_service_mgr == 'systemd' and ansible_distribution == 'Fedora'
+
+- name: Kill all postgres processes
+ shell: 'pkill -u {{ pg_user }}'
+ become: true
+ when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8'
+ ignore_errors: true
+
+- name: stop postgresql service
+ service: name={{ postgresql_service }} state=stopped
+ ignore_errors: true
+
+- name: remove old db (RedHat)
+ file:
+ path: '{{ pg_dir }}'
+ state: absent
+ ignore_errors: true
+ when: ansible_os_family == "RedHat"
+
+- name: remove old db config and files (debian)
+ file:
+ path: '{{ loop_item }}'
+ state: absent
+ ignore_errors: true
+ when: ansible_os_family == "Debian"
+ loop:
+ - /etc/postgresql
+ - /var/lib/postgresql
+ loop_control:
+ loop_var: loop_item
+
+#
+# Install PostgreSQL 15 on Ubuntu 20.04
+- name: Install PostgreSQL 15 on Ubuntu 20.04
+ when:
+ - ansible_facts.distribution == 'Ubuntu'
+ - ansible_facts.distribution_major_version == '20'
+ block:
+ - name: Run autoremove
+ become: true
+ apt:
+ autoremove: true
+
+ - name: Install wget
+ package:
+ name: wget
+
+ - name: Create the file repository configuration
+ lineinfile:
+ create: true
+ line: "deb http://apt.postgresql.org/pub/repos/apt {{ ansible_facts['distribution_release'] }}-pgdg main"
+ path: '/etc/apt/sources.list.d/pgdg.list'
+ state: 'present'
+
+ - name: Import the repository signing key
+ ansible.builtin.apt_key:
+ state: present
+ url: https://www.postgresql.org/media/keys/ACCC4CF8.asc
+
+ - name: Update the package lists
+ apt:
+ update_cache: true
+
+ - name: Install locale needed
+ shell: 'locale-gen {{ item }}'
+ loop:
+ - es_ES
+ - pt_BR
+
+ - name: Update locale
+ shell: 'update-locale'
+##
+#
+
+- name: install dependencies for postgresql test
+ package:
+ name: '{{ postgresql_package_item }}'
+ state: present
+ with_items: '{{ postgresql_packages }}'
+ loop_control:
+ loop_var: postgresql_package_item
+
+- name: Initialize postgres (RedHat systemd)
+ command: postgresql-setup initdb
+ when: ansible_os_family == "RedHat" and ansible_service_mgr == "systemd"
+
+- name: Initialize postgres (RedHat sysv)
+ command: /sbin/service postgresql initdb
+ when: ansible_os_family == "RedHat" and ansible_service_mgr != "systemd"
+
+- name: Initialize postgres (Debian)
+ shell: . /usr/share/postgresql-common/maintscripts-functions && set_system_locale && /usr/bin/pg_createcluster -u postgres {{ pg_ver }} main
+ args:
+ creates: /etc/postgresql/{{ pg_ver }}/
+ when: ansible_os_family == 'Debian'
+
+- name: Copy pg_hba into place
+ template:
+ src: files/pg_hba.conf
+ dest: '{{ pg_hba_location }}'
+ owner: '{{ pg_user }}'
+ group: '{{ pg_group }}'
+ mode: '0644'
+
+- name: Generate locales (Debian)
+ locale_gen:
+ name: '{{ item }}'
+ state: present
+ with_items:
+ - pt_BR
+ - es_ES
+ when: ansible_os_family == 'Debian'
+
+- block:
+ - name: Install langpacks (RHEL8)
+ yum:
+ name:
+ - glibc-langpack-es
+ - glibc-langpack-pt
+ - glibc-all-langpacks
+ state: present
+ when: ansible_distribution_major_version is version('8', '>=')
+
+ - name: Check if locales need to be generated (RedHat)
+ shell: localedef --list-archive | grep -a -q '^{{ locale }}$'
+ register: locale_present
+ ignore_errors: true
+ with_items:
+ - es_ES
+ - pt_BR
+ loop_control:
+ loop_var: locale
+
+ - block:
+ - name: Reinstall internationalization files
+ command: yum -y reinstall glibc-common
+ rescue:
+ - name: Install internationalization files
+ yum:
+ name: glibc-common
+ state: present
+ when: locale_present is failed
+
+ - name: Generate locale (RedHat)
+ command: localedef -f ISO-8859-1 -i {{ item.locale }} {{ item.locale }}
+ when: item is failed
+ with_items: '{{ locale_present.results }}'
+ when: ansible_os_family == 'RedHat' and ansible_distribution != 'Fedora'
+
+- name: Install glibc langpacks (Fedora >= 24)
+ package:
+ name: '{{ item }}'
+ state: latest
+ with_items:
+ - glibc-langpack-es
+ - glibc-langpack-pt
+ when: ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('24', '>=')
+
+- name: start postgresql service
+ service: name={{ postgresql_service }} state=started
+
+- name: Pause between start and stop
+ pause:
+ seconds: 5
+
+- name: Kill all postgres processes
+ shell: 'pkill -u {{ pg_user }}'
+ become: true
+ when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8'
+ ignore_errors: true
+ register: terminate
+
+- name: Stop postgresql service
+ service: name={{ postgresql_service }} state=stopped
+ when: terminate is not succeeded
+
+- name: Pause between stop and start
+ pause:
+ seconds: 5
+
+- name: Start postgresql service
+ service: name={{ postgresql_service }} state=started
+
+- name: copy control file for dummy ext
+ copy:
+ src: dummy.control
+ dest: /usr/share/postgresql/{{ pg_ver }}/extension/dummy.control
+ mode: '0444'
+ when: ansible_os_family == 'Debian'
+
+- name: copy version files for dummy ext
+ copy:
+ src: '{{ item }}'
+ dest: /usr/share/postgresql/{{ pg_ver }}/extension/{{ item }}
+ mode: '0444'
+ with_items:
+ - dummy--0.sql
+ - dummy--1.0.sql
+ - dummy--2.0.sql
+ - dummy--3.0.sql
+ - dummy--3.0-1.sql
+ - dummy--3.0-foo.sql
+ - dummy--3.beta.sql
+ - dummy--3-1.0.sql
+ - dummy--3-1.0-1.sql
+ - dummy--3-1.foo.sql
+ - dummy--v4.sql
+ - dummy--4.0.sql
+ when: ansible_os_family == 'Debian'
+
+- name: add update paths
+ file:
+ path: /usr/share/postgresql/{{ pg_ver }}/extension/{{ item }}
+ mode: '0444'
+ state: touch
+ with_items:
+ - dummy--0--1.0.sql
+ - dummy--1.0--2.0.sql
+ - dummy--2.0--3.0.sql
+ - dummy--3.0--3.0-1.sql
+ - dummy--3.0-1--3.0-foo.sql
+ - dummy--3.0-foo--3.beta.sql
+ - dummy--3.beta--3-1.0.sql
+ - dummy--3-1.0--3-1.0-1.sql
+ - dummy--3-1.0-1--3-1.foo.sql
+ - dummy--3-1.foo--v4.sql
+ - dummy--v4--4.0.sql
+ when: ansible_os_family == 'Debian'
+
+- name: Get PostgreSQL version
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: echo 'SHOW SERVER_VERSION' | psql --tuples-only --no-align --dbname postgres
+ register: postgres_version_resp
+
+- name: Print PostgreSQL server version
+ debug:
+ msg: '{{ postgres_version_resp.stdout }}'
+
+- import_tasks: ssl.yml
+ when:
+ - ansible_os_family == 'Debian'
+ - postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/ssl.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/ssl.yml
new file mode 100644
index 000000000..9f53e80ea
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/ssl.yml
@@ -0,0 +1,108 @@
+- name: postgresql SSL - create database
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_db:
+ name: '{{ ssl_db }}'
+
+- name: postgresql SSL - create role
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_user:
+ name: '{{ ssl_user }}'
+ role_attr_flags: SUPERUSER
+ password: '{{ ssl_pass }}'
+
+- name: postgresql SSL - install openssl
+ become: true
+ package: name=openssl state=present
+
+- name: postgresql SSL - create certs 1 (Make a self-signed server CA)
+ become_user: root
+ become: true
+ shell: openssl req -sha256 -new -x509 -days 365 -nodes -out /etc/server-ca.crt -keyout /etc/server-ca.key -subj "/CN="
+
+- name: postgresql SSL - create certs 2 (Generate server CSR)
+ become_user: root
+ become: true
+ shell: openssl req -sha256 -new -nodes -subj "/CN=127.0.0.1" -out /etc/server.csr -keyout /etc/server.key
+
+- name: postgresql SSL - create certs 3 (Sign a server certificate)
+ become_user: root
+ become: true
+ shell: openssl x509 -req -sha256 -days 365 -in /etc/server.csr -CA /etc/server-ca.crt -CAkey /etc/server-ca.key -CAcreateserial -out /etc/server.crt
+
+- name: postgresql SSL - create certs 4 (Make a self-signed client CA)
+ become_user: root
+ become: true
+ shell: openssl req -sha256 -new -x509 -days 365 -nodes -out /etc/client-ca.crt -keyout /etc/client-ca.key -subj "/CN="
+
+- name: postgresql SSL - create certs 5 (Generate client CSR)
+ become_user: root
+ become: true
+ shell: openssl req -sha256 -new -nodes -subj "/CN={{ ssl_user }}" -out /etc/client.csr -keyout /etc/client.key
+
+- name: postgresql SSL - create certs 6 (Sign a client certificate)
+ become_user: root
+ become: true
+ shell: openssl x509 -req -sha256 -days 365 -in /etc/client.csr -CA /etc/client-ca.crt -CAkey /etc/client-ca.key -CAcreateserial -out /etc/client.crt
+
+- name: postgresql SSL - set right permissions to files
+ become_user: root
+ become: true
+ file:
+ path: '{{ item }}'
+ mode: '0600'
+ owner: '{{ pg_user }}'
+ group: '{{ pg_user }}'
+ with_items:
+ - /etc/server.key
+ - /etc/server.crt
+ - /etc/server.csr
+ - /etc/client.csr
+ - /etc/client.key
+ - /etc/client-ca.crt
+ - /etc/client-ca.key
+ - /etc/server-ca.key
+ - /etc/server-ca.crt
+
+- name: postgresql SSL - enable SSL
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_set:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ name: ssl
+ value: true
+
+- name: postgresql SSL - add ssl_cert_file
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_set:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ name: ssl_cert_file
+ value: /etc/server.crt
+
+- name: postgresql SSL - add ssl_key_file
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_set:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ name: ssl_key_file
+ value: /etc/server.key
+
+- name: postgresql SSL - add ssl_ca_file
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_set:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ name: ssl_ca_file
+ value: /etc/client-ca.crt
+
+- name: postgresql SSL - reload PostgreSQL to enable ssl on
+ become: true
+ service:
+ name: '{{ postgresql_service }}'
+ state: reloaded
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml
new file mode 100644
index 000000000..932738d39
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml
@@ -0,0 +1,9 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.4/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.4/main"
+pg_auto_conf: "{{ pg_dir }}/postgresql.auto.conf"
+pg_ver: 9.4
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml
new file mode 100644
index 000000000..72041a3d7
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml
@@ -0,0 +1,9 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python3-psycopg2"
+ - "bzip2"
+ - "xz"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
+pg_auto_conf: "{{ pg_dir }}/postgresql.auto.conf"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml
new file mode 100644
index 000000000..30720f8fe
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python-psycopg2"
+ - "bzip2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
+pg_auto_conf: "{{ pg_dir }}/postgresql.auto.conf"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml
new file mode 100644
index 000000000..ff543a385
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml
@@ -0,0 +1,13 @@
+postgresql_packages:
+ - "apt-utils"
+ - "postgresql"
+ - "postgresql-common"
+ - "python3-psycopg2"
+ - "postgresql-client"
+
+pg_hba_location: "/etc/postgresql/15/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/15/main"
+pg_auto_conf: "{{ pg_dir }}/postgresql.auto.conf"
+pg_ver: 15
+
+postgis: postgresql-15-postgis-3
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml
new file mode 100644
index 000000000..3ff3e0de5
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml
@@ -0,0 +1,7 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python3-psycopg2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
+pg_auto_conf: "{{ pg_dir }}/postgresql.auto.conf"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default.yml
new file mode 100644
index 000000000..71f1cd46e
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default.yml
@@ -0,0 +1,7 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python-psycopg2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
+pg_auto_conf: "{{ pg_dir }}/postgresql.auto.conf"
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/defaults/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/defaults/main.yml
new file mode 100644
index 000000000..5ac314c4b
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/defaults/main.yml
@@ -0,0 +1,26 @@
+# General:
+pg_user: postgres
+db_default: postgres
+
+pg_package_list:
+- postgresql
+- postgresql-client
+- python3-psycopg2
+
+packages_to_remove:
+- postgresql
+- postgresql-client
+
+# Master specific defaults:
+primary_root_dir: '/var/lib/pgsql/primary'
+primary_data_dir: '{{ primary_root_dir }}/data'
+primary_postgresql_conf: '{{ primary_data_dir }}/postgresql.conf'
+primary_pg_hba_conf: '{{ primary_data_dir }}/pg_hba.conf'
+primary_port: 5431
+
+# Replica specific defaults:
+replica_root_dir: '/var/lib/pgsql/replica'
+replica_data_dir: '{{ replica_root_dir }}/data'
+replica_postgresql_conf: '{{ replica_data_dir }}/postgresql.conf'
+replica_pg_hba_conf: '{{ replica_data_dir }}/pg_hba.conf'
+replica_port: 5434
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/handlers/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/handlers/main.yml
new file mode 100644
index 000000000..ea230c778
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/handlers/main.yml
@@ -0,0 +1,24 @@
+- name: Stop services
+ become: true
+ become_user: '{{ pg_user }}'
+ shell: '{{ pg_ctl }} -D {{ item.datadir }} -o "-p {{ item.port }}" -m immediate stop'
+ loop:
+ - { datadir: '{{ primary_data_dir }}', port: '{{ primary_port }}' }
+ - { datadir: '{{ replica_data_dir }}', port: '{{ replica_port }}' }
+ listen: stop postgresql
+
+- name: Remove packages
+ apt:
+ name: '{{ packages_to_remove }}'
+ state: absent
+ listen: cleanup postgresql
+
+- name: Remove FS objects
+ file:
+ state: absent
+ path: "{{ item }}"
+ force: true
+ loop:
+ - "{{ primary_root_dir }}"
+ - "{{ replica_root_dir }}"
+ listen: cleanup postgresql
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/main.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/main.yml
new file mode 100644
index 000000000..4c6421a18
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/main.yml
@@ -0,0 +1,13 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Setup PostgreSQL primary-standby replication into one container:
+- import_tasks: setup_postgresql_cluster.yml
+ when:
+ - ansible_distribution == 'Ubuntu'
+ - ansible_distribution_major_version >= '18'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml
new file mode 100644
index 000000000..2bff42e78
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml
@@ -0,0 +1,149 @@
+- name: Remove preinstalled packages
+ apt:
+ name: '{{ packages_to_remove }}'
+ state: absent
+ become: true
+
+- name: Run autoremove
+ become: true
+ apt:
+ autoremove: true
+
+- name: Configure Ubuntu 20 for PostgreSQL
+ when:
+ - ansible_facts['distribution'] == 'Ubuntu'
+ - ansible_facts['distribution_major_version'] is version('20', 'ge')
+ block:
+ - name: Install wget
+ package:
+ name: wget
+
+ - name: Add PGDG repository
+ lineinfile:
+ create: true
+ line: "deb http://apt.postgresql.org/pub/repos/apt {{ ansible_facts['distribution_release'] }}-pgdg main"
+ path: '/etc/apt/sources.list.d/pgdg.list'
+ state: 'present'
+
+ - name: Add PGDG GPG key
+ ansible.builtin.apt_key:
+ state: present
+ url: https://www.postgresql.org/media/keys/ACCC4CF8.asc
+
+ - name: Update apt cache
+ apt:
+ update_cache: true
+
+- name: Install apt-utils
+ apt:
+ name: apt-utils
+
+- name: Install packages
+ apt:
+ name: '{{ pg_package_list }}'
+ policy_rc_d: 101 # prevent the service from starting
+ notify: cleanup postgresql
+
+- name: Delete postgresql related files
+ file:
+ state: absent
+ path: '{{ item }}'
+ force: true
+ loop:
+ - '{{ primary_root_dir }}'
+ - '{{ replica_root_dir }}'
+ - /etc/postgresql
+ - /var/lib/postgresql
+
+- name: Create dirs needed
+ file:
+ state: directory
+ recurse: true
+ path: '{{ item }}'
+ owner: postgres
+ group: postgres
+ mode: '0700'
+ loop:
+ - '{{ primary_data_dir }}'
+ - '{{ replica_data_dir }}'
+ - /var/lib/postgresql
+ notify: cleanup postgresql
+
+- name: Find initdb
+ shell: find /usr/lib -type f -name "initdb"
+ register: result
+
+- name: Set path to initdb
+ set_fact:
+ initdb: '{{ result.stdout }}'
+
+- name: Initialize databases
+ become: true
+ become_user: '{{ pg_user }}'
+ shell: '{{ initdb }} --pgdata {{ item }}'
+ loop:
+ - '{{ primary_data_dir }}'
+ - '{{ replica_data_dir }}'
+
+- name: Copy config templates
+ template:
+ src: '{{ item.conf_templ }}'
+ dest: '{{ item.conf_dest }}'
+ owner: postgres
+ group: postgres
+ force: true
+ loop:
+ - conf_templ: primary_postgresql.conf.j2
+ conf_dest: '{{ primary_postgresql_conf }}'
+ - conf_templ: replica_postgresql.conf.j2
+ conf_dest: '{{ replica_postgresql_conf }}'
+ - conf_templ: pg_hba.conf.j2
+ conf_dest: '{{ primary_pg_hba_conf }}'
+ - conf_templ: pg_hba.conf.j2
+ conf_dest: '{{ replica_pg_hba_conf }}'
+
+- name: Find pg_ctl
+ shell: find /usr/lib -type f -name "pg_ctl"
+ register: result
+
+- name: Set path to initdb
+ set_fact:
+ pg_ctl: '{{ result.stdout }}'
+
+- name: Start primary
+ become: true
+ become_user: '{{ pg_user }}'
+ shell: '{{ pg_ctl }} -D {{ primary_data_dir }} -o "-p {{ primary_port }}" -l {{ primary_data_dir }}/primary.log start'
+ notify:
+ - stop postgresql
+
+- name: Start replica
+ become: true
+ become_user: '{{ pg_user }}'
+ shell: '{{ pg_ctl }} -D {{ replica_data_dir }} -o "-p {{ replica_port }}" -l {{ replica_data_dir }}/replica.log start'
+
+- name: Check connectivity to the primary and get PostgreSQL version
+ become: true
+ become_user: '{{ pg_user }}'
+ postgresql_ping:
+ db: '{{ db_default }}'
+ login_user: '{{ pg_user }}'
+ login_port: '{{ primary_port }}'
+ register: result
+
+- name: Check connectivity to the replica and get PostgreSQL version
+ become: true
+ become_user: '{{ pg_user }}'
+ postgresql_ping:
+ db: '{{ db_default }}'
+ login_user: '{{ pg_user }}'
+ login_port: '{{ replica_port }}'
+
+- name: Define server version
+ set_fact:
+ pg_major_version: '{{ result.server_version.major }}'
+ pg_minor_version: '{{ result.server_version.minor }}'
+
+- name: Print PostgreSQL version
+ debug:
+ msg: PostgreSQL version is {{ pg_major_version }}.{{ pg_minor_version }}
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2 b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2
new file mode 100644
index 000000000..62e05ffc8
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2
@@ -0,0 +1,7 @@
+local all all trust
+local replication logical_replication trust
+host replication logical_replication 127.0.0.1/32 trust
+host replication logical_replication 0.0.0.0/0 trust
+local all logical_replication trust
+host all logical_replication 127.0.0.1/32 trust
+host all logical_replication 0.0.0.0/0 trust
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/primary_postgresql.conf.j2 b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/primary_postgresql.conf.j2
new file mode 100644
index 000000000..545769f35
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/primary_postgresql.conf.j2
@@ -0,0 +1,28 @@
+# Important parameters:
+listen_addresses='*'
+port = {{ primary_port }}
+wal_level = logical
+max_wal_senders = 8
+track_commit_timestamp = on
+max_replication_slots = 10
+
+# Unimportant parameters:
+max_connections=10
+shared_buffers=8MB
+dynamic_shared_memory_type=posix
+log_destination='stderr'
+logging_collector=on
+log_directory='log'
+log_filename='postgresql-%a.log'
+log_truncate_on_rotation=on
+log_rotation_age=1d
+log_rotation_size=0
+log_line_prefix='%m[%p]'
+log_timezone='W-SU'
+datestyle='iso,mdy'
+timezone='W-SU'
+lc_messages='en_US.UTF-8'
+lc_monetary='en_US.UTF-8'
+lc_numeric='en_US.UTF-8'
+lc_time='en_US.UTF-8'
+default_text_search_config='pg_catalog.english'
diff --git a/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2 b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2
new file mode 100644
index 000000000..206ab2eb3
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2
@@ -0,0 +1,28 @@
+# Important parameters:
+listen_addresses='*'
+port = {{ replica_port }}
+wal_level = logical
+max_wal_senders = 8
+track_commit_timestamp = on
+max_replication_slots = 10
+
+# Unimportant parameters:
+max_connections=10
+shared_buffers=8MB
+dynamic_shared_memory_type=posix
+log_destination='stderr'
+logging_collector=on
+log_directory='log'
+log_filename='postgresql-%a.log'
+log_truncate_on_rotation=on
+log_rotation_age=1d
+log_rotation_size=0
+log_line_prefix='%m[%p]'
+log_timezone='W-SU'
+datestyle='iso,mdy'
+timezone='W-SU'
+lc_messages='en_US.UTF-8'
+lc_monetary='en_US.UTF-8'
+lc_numeric='en_US.UTF-8'
+lc_time='en_US.UTF-8'
+default_text_search_config='pg_catalog.english'
diff --git a/ansible_collections/community/postgresql/tests/requirements.yml b/ansible_collections/community/postgresql/tests/requirements.yml
new file mode 100644
index 000000000..5a2c9c805
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/requirements.yml
@@ -0,0 +1,3 @@
+integration_tests_dependencies:
+- community.general
+unit_tests_dependencies: []
diff --git a/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.json b/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.json
new file mode 100644
index 000000000..c789a7fd3
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.json
@@ -0,0 +1,7 @@
+{
+ "include_symlinks": true,
+ "prefixes": [
+ "plugins/"
+ ],
+ "output": "path-message"
+}
diff --git a/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.py b/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.py
new file mode 100755
index 000000000..49806f2e2
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Prevent unwanted files from being added to the source tree."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ """Main entry point."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ allowed_extensions = (
+ '.cs',
+ '.ps1',
+ '.psm1',
+ '.py',
+ )
+
+ skip_paths = set([
+ ])
+
+ skip_directories = (
+ )
+
+ for path in paths:
+ if path in skip_paths:
+ continue
+
+ if any(path.startswith(skip_directory) for skip_directory in skip_directories):
+ continue
+
+ ext = os.path.splitext(path)[1]
+
+ if ext not in allowed_extensions:
+ print('%s: extension must be one of: %s' % (path, ', '.join(allowed_extensions)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/tests/sanity/ignore-2.12.txt b/ansible_collections/community/postgresql/tests/sanity/ignore-2.12.txt
new file mode 100644
index 000000000..b9cd1303f
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/sanity/ignore-2.12.txt
@@ -0,0 +1,5 @@
+tests/utils/shippable/check_matrix.py replace-urlopen
+tests/utils/shippable/timing.py shebang
+plugins/modules/postgresql_db.py use-argspec-type-path
+plugins/modules/postgresql_db.py validate-modules:use-run-command-not-popen
+plugins/modules/postgresql_tablespace.py validate-modules:mutually_exclusive-unknown
diff --git a/ansible_collections/community/postgresql/tests/sanity/ignore-2.13.txt b/ansible_collections/community/postgresql/tests/sanity/ignore-2.13.txt
new file mode 100644
index 000000000..b9cd1303f
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/sanity/ignore-2.13.txt
@@ -0,0 +1,5 @@
+tests/utils/shippable/check_matrix.py replace-urlopen
+tests/utils/shippable/timing.py shebang
+plugins/modules/postgresql_db.py use-argspec-type-path
+plugins/modules/postgresql_db.py validate-modules:use-run-command-not-popen
+plugins/modules/postgresql_tablespace.py validate-modules:mutually_exclusive-unknown
diff --git a/ansible_collections/community/postgresql/tests/sanity/ignore-2.14.txt b/ansible_collections/community/postgresql/tests/sanity/ignore-2.14.txt
new file mode 100644
index 000000000..b9cd1303f
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/sanity/ignore-2.14.txt
@@ -0,0 +1,5 @@
+tests/utils/shippable/check_matrix.py replace-urlopen
+tests/utils/shippable/timing.py shebang
+plugins/modules/postgresql_db.py use-argspec-type-path
+plugins/modules/postgresql_db.py validate-modules:use-run-command-not-popen
+plugins/modules/postgresql_tablespace.py validate-modules:mutually_exclusive-unknown
diff --git a/ansible_collections/community/postgresql/tests/sanity/ignore-2.15.txt b/ansible_collections/community/postgresql/tests/sanity/ignore-2.15.txt
new file mode 100644
index 000000000..58b57c247
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/sanity/ignore-2.15.txt
@@ -0,0 +1,6 @@
+tests/utils/shippable/check_matrix.py replace-urlopen
+tests/utils/shippable/timing.py shebang
+plugins/modules/postgresql_db.py use-argspec-type-path
+plugins/modules/postgresql_db.py validate-modules:use-run-command-not-popen
+plugins/modules/postgresql_tablespace.py validate-modules:mutually_exclusive-unknown
+plugins/module_utils/version.py pylint:unused-import
diff --git a/ansible_collections/community/postgresql/tests/sanity/ignore-2.16.txt b/ansible_collections/community/postgresql/tests/sanity/ignore-2.16.txt
new file mode 100644
index 000000000..58b57c247
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/sanity/ignore-2.16.txt
@@ -0,0 +1,6 @@
+tests/utils/shippable/check_matrix.py replace-urlopen
+tests/utils/shippable/timing.py shebang
+plugins/modules/postgresql_db.py use-argspec-type-path
+plugins/modules/postgresql_db.py validate-modules:use-run-command-not-popen
+plugins/modules/postgresql_tablespace.py validate-modules:mutually_exclusive-unknown
+plugins/module_utils/version.py pylint:unused-import
diff --git a/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/__init__.py b/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/__init__.py
diff --git a/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_postgres.py b/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_postgres.py
new file mode 100644
index 000000000..975542446
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_postgres.py
@@ -0,0 +1,338 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+from os import environ
+
+import pytest
+
+import ansible_collections.community.postgresql.plugins.module_utils.postgres as pg
+
+
+INPUT_DICT = dict(
+ session_role=dict(default=''),
+ login_user=dict(default='postgres'),
+ login_password=dict(default='test', no_log=True),
+ login_host=dict(default='test'),
+ login_unix_socket=dict(default=''),
+ port=dict(type='int', default=5432, aliases=['login_port']),
+ ssl_mode=dict(
+ default='prefer',
+ choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']
+ ),
+ ca_cert=dict(aliases=['ssl_rootcert']),
+ ssl_cert=dict(type='path'),
+ ssl_key=dict(type='path'),
+)
+
+EXPECTED_DICT = dict(
+ user=dict(default='postgres'),
+ password=dict(default='test', no_log=True),
+ host=dict(default='test'),
+ port=dict(type='int', default=5432, aliases=['login_port']),
+ sslmode=dict(
+ default='prefer',
+ choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']
+ ),
+ sslrootcert=dict(aliases=['ssl_rootcert']),
+ sslcert=dict(type='path'),
+ sslkey=dict(type='path'),
+)
+
+
+class TestPostgresCommonArgSpec():
+
+ """
+ Namespace for testing postgresql_common_arg_spec() function.
+ """
+
+ def test_postgres_common_argument_spec(self):
+ """
+ Test for postgresql_common_arg_spec() function.
+
+ The tested function just returns a dictionary with the default
+ parameters and their values for PostgreSQL modules.
+ The return and expected dictionaries must be compared.
+ """
+ expected_dict = dict(
+ login_user=dict(default='postgres', aliases=['login']),
+ login_password=dict(default='', no_log=True),
+ login_host=dict(default='', aliases=['host']),
+ login_unix_socket=dict(default='', aliases=['unix_socket']),
+ port=dict(type='int', default=5432, aliases=['login_port']),
+ ssl_mode=dict(
+ default='prefer',
+ choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']
+ ),
+ ca_cert=dict(aliases=['ssl_rootcert']),
+ ssl_cert=dict(type='path'),
+ ssl_key=dict(type='path'),
+ connect_params=dict(default={}, type='dict'),
+ )
+ assert pg.postgres_common_argument_spec() == expected_dict
+
+ # Setting new values for checking environment variables
+ expected_dict['port']['default'] = 5435
+ expected_dict['login_user']['default'] = 'test_user'
+
+ # Setting environment variables
+ environ['PGUSER'] = 'test_user'
+ environ['PGPORT'] = '5435'
+ assert pg.postgres_common_argument_spec() == expected_dict
+
+
+@pytest.fixture
+def m_psycopg2():
+ """Return mock object for psycopg2 emulation."""
+ global Cursor
+ Cursor = None
+
+ class Cursor():
+ def __init__(self):
+ self.passed_query = None
+
+ def execute(self, query):
+ self.passed_query = query
+
+ def close(self):
+ pass
+
+ global DbConnection
+ DbConnection = None
+
+ class DbConnection():
+ def __init__(self):
+ pass
+
+ def cursor(self, cursor_factory=None):
+ return Cursor()
+
+ def set_session(self, autocommit=None):
+ pass
+
+ def set_isolation_level(self, isolevel):
+ pass
+
+ class Extras():
+ def __init__(self):
+ self.DictCursor = True
+
+ class Extensions():
+ def __init__(self):
+ self.ISOLATION_LEVEL_AUTOCOMMIT = True
+
+ class DummyPsycopg2():
+ def __init__(self):
+ self.__version__ = '2.4.3'
+ self.extras = Extras()
+ self.extensions = Extensions()
+
+ def connect(self, host=None, port=None, user=None,
+ password=None, sslmode=None, sslrootcert=None, connect_params=None):
+ if user == 'Exception':
+ raise Exception()
+
+ return DbConnection()
+
+ return DummyPsycopg2()
+
+
+class TestEnsureReqLibs():
+
+ """
+ Namespace for testing ensure_required_libs() function.
+
+ If there is something wrong with libs, the function invokes fail_json()
+ method of AnsibleModule object passed as an argument called 'module'.
+ Therefore we must check:
+ 1. value of err_msg attribute of m_ansible_module mock object.
+ """
+
+ @pytest.fixture(scope='class')
+ def m_ansible_module(self):
+ """Return an object of dummy AnsibleModule class."""
+ class Dummym_ansible_module():
+ def __init__(self):
+ self.params = {'ca_cert': False}
+ self.err_msg = ''
+
+ def fail_json(self, msg):
+ self.err_msg = msg
+
+ return Dummym_ansible_module()
+
+ def test_ensure_req_libs_has_not_psycopg2(self, m_ansible_module):
+ """Test ensure_required_libs() with psycopg2 is None."""
+ # HAS_PSYCOPG2 is False by default
+ pg.ensure_required_libs(m_ansible_module)
+ assert 'Failed to import the required Python library (psycopg2)' in m_ansible_module.err_msg
+
+ def test_ensure_req_libs_has_psycopg2(self, m_ansible_module, monkeypatch):
+ """Test ensure_required_libs() with psycopg2 is not None."""
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+
+ pg.ensure_required_libs(m_ansible_module)
+ assert m_ansible_module.err_msg == ''
+
+ def test_ensure_req_libs_ca_cert(self, m_ansible_module, m_psycopg2, monkeypatch):
+ """
+ Test with module.params['ca_cert'], psycopg2 version is suitable.
+ """
+ m_ansible_module.params['ca_cert'] = True
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+
+ pg.ensure_required_libs(m_ansible_module)
+ assert m_ansible_module.err_msg == ''
+
+ def test_ensure_req_libs_ca_cert_low_psycopg2_ver(self, m_ansible_module, m_psycopg2, monkeypatch):
+ """
+ Test with module.params['ca_cert'], psycopg2 version is wrong.
+ """
+ m_ansible_module.params['ca_cert'] = True
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+ # Set wrong psycopg2 version number:
+ psycopg2 = m_psycopg2
+ psycopg2.__version__ = '2.4.2'
+ monkeypatch.setattr(pg, 'psycopg2', psycopg2)
+
+ pg.ensure_required_libs(m_ansible_module)
+ assert 'psycopg2 must be at least 2.4.3' in m_ansible_module.err_msg
+
+
+@pytest.fixture(scope='class')
+def m_ansible_module():
+ """Return an object of dummy AnsibleModule class."""
+ class DummyAnsibleModule():
+ def __init__(self):
+
+ # take default params from argument spec
+ spec = pg.postgres_common_argument_spec()
+ params = dict()
+ for k in spec.keys():
+ params[k] = spec[k].get('default')
+
+ self.params = params
+ self.err_msg = ''
+ self.warn_msg = ''
+
+ def fail_json(self, msg):
+ self.err_msg = msg
+
+ def warn(self, msg):
+ self.warn_msg = msg
+
+ return DummyAnsibleModule()
+
+
+class TestConnectToDb():
+
+ """
+ Namespace for testing connect_to_db() function.
+
+ When some connection errors occure connect_to_db() caught any of them
+ and invoke fail_json() or warn() methods of AnsibleModule object
+ depending on the passed parameters.
+ connect_to_db may return db_connection object or None if errors occured.
+ Therefore we must check:
+ 1. Values of err_msg and warn_msg attributes of m_ansible_module mock object.
+ 2. Types of return objects (db_connection and cursor).
+ """
+
+ def test_connect_to_db(self, m_ansible_module, monkeypatch, m_psycopg2):
+ """Test connect_to_db(), common test."""
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+
+ conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params)
+ db_connection, dummy = pg.connect_to_db(m_ansible_module, conn_params)
+ cursor = db_connection.cursor()
+ # if errors, db_connection returned as None:
+ assert type(db_connection) == DbConnection
+ assert type(cursor) == Cursor
+ assert m_ansible_module.err_msg == ''
+ # The default behaviour, normal in this case:
+ assert 'Database name has not been passed' in m_ansible_module.warn_msg
+
+ def test_session_role(self, m_ansible_module, monkeypatch, m_psycopg2):
+ """Test connect_to_db(), switch on session_role."""
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+
+ m_ansible_module.params['session_role'] = 'test_role'
+ conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params)
+ db_connection, dummy = pg.connect_to_db(m_ansible_module, conn_params)
+ cursor = db_connection.cursor()
+ # if errors, db_connection returned as None:
+ assert type(db_connection) == DbConnection
+ assert type(cursor) == Cursor
+ assert m_ansible_module.err_msg == ''
+ # The default behaviour, normal in this case:
+ assert 'Database name has not been passed' in m_ansible_module.warn_msg
+
+ def test_fail_on_conn_true(self, m_ansible_module, monkeypatch, m_psycopg2):
+ """
+ Test connect_to_db(), fail_on_conn arg passed as True (the default behavior).
+ """
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+
+ m_ansible_module.params['login_user'] = 'Exception' # causes Exception
+
+ conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params)
+ db_connection, dummy = pg.connect_to_db(m_ansible_module, conn_params, fail_on_conn=True)
+
+ assert 'unable to connect to database' in m_ansible_module.err_msg
+ assert db_connection is None
+
+ def test_fail_on_conn_false(self, m_ansible_module, monkeypatch, m_psycopg2):
+ """
+ Test connect_to_db(), fail_on_conn arg passed as False.
+ """
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+
+ m_ansible_module.params['login_user'] = 'Exception' # causes Exception
+
+ conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params)
+ db_connection, dummy = pg.connect_to_db(m_ansible_module, conn_params, fail_on_conn=False)
+
+ assert m_ansible_module.err_msg == ''
+ assert 'PostgreSQL server is unavailable' in m_ansible_module.warn_msg
+ assert db_connection is None
+
+ def test_autocommit_true(self, m_ansible_module, monkeypatch, m_psycopg2):
+ """
+ Test connect_to_db(), autocommit arg passed as True (the default is False).
+ """
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+
+ # case 1: psycopg2.__version >= 2.4.2 (the default in m_psycopg2)
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+
+ conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params)
+ db_connection, dummy = pg.connect_to_db(m_ansible_module, conn_params, autocommit=True)
+ cursor = db_connection.cursor()
+
+ # if errors, db_connection returned as None:
+ assert type(db_connection) == DbConnection
+ assert type(cursor) == Cursor
+ assert m_ansible_module.err_msg == ''
+
+
+class TestGetConnParams():
+
+ """Namespace for testing get_conn_params() function."""
+
+ def test_get_conn_params_def(self, m_ansible_module, m_psycopg2, monkeypatch):
+ """Test get_conn_params(), warn_db_default kwarg is default."""
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+ assert pg.get_conn_params(m_ansible_module, INPUT_DICT) == EXPECTED_DICT
+ assert m_ansible_module.warn_msg == 'Database name has not been passed, used default database to connect to.'
+
+ def test_get_conn_params_warn_db_def_false(self, m_ansible_module, m_psycopg2, monkeypatch):
+ """Test get_conn_params(), warn_db_default kwarg is False."""
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+ assert pg.get_conn_params(m_ansible_module, INPUT_DICT, warn_db_default=False) == EXPECTED_DICT
+ assert m_ansible_module.warn_msg == ''
diff --git a/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_saslprep.py b/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_saslprep.py
new file mode 100644
index 000000000..62a1704ad
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_saslprep.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Andrey Tuzhilin <andrei.tuzhilin@gmail.com>
+# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.postgresql.plugins.module_utils.saslprep import saslprep
+
+
+VALID = [
+ (u'', u''),
+ (u'\u00A0', u' '),
+ (u'a', u'a'),
+ (u'й', u'й'),
+ (u'\u30DE\u30C8\u30EA\u30C3\u30AF\u30B9', u'\u30DE\u30C8\u30EA\u30C3\u30AF\u30B9'),
+ (u'The\u00ADM\u00AAtr\u2168', u'TheMatrIX'),
+ (u'I\u00ADX', u'IX'),
+ (u'user', u'user'),
+ (u'USER', u'USER'),
+ (u'\u00AA', u'a'),
+ (u'\u2168', u'IX'),
+ (u'\u05BE\u00A0\u05BE', u'\u05BE\u0020\u05BE'),
+]
+
+INVALID = [
+ (None, TypeError),
+ (b'', TypeError),
+ (u'\u0221', ValueError),
+ (u'\u0007', ValueError),
+ (u'\u0627\u0031', ValueError),
+ (u'\uE0001', ValueError),
+ (u'\uE0020', ValueError),
+ (u'\uFFF9', ValueError),
+ (u'\uFDD0', ValueError),
+ (u'\u0000', ValueError),
+ (u'\u06DD', ValueError),
+ (u'\uFFFFD', ValueError),
+ (u'\uD800', ValueError),
+ (u'\u200E', ValueError),
+ (u'\u05BE\u00AA\u05BE', ValueError),
+]
+
+
+@pytest.mark.parametrize('source,target', VALID)
+def test_saslprep_conversions(source, target):
+ assert saslprep(source) == target
+
+
+@pytest.mark.parametrize('source,exception', INVALID)
+def test_saslprep_exceptions(source, exception):
+ with pytest.raises(exception) as ex:
+ saslprep(source)
diff --git a/ansible_collections/community/postgresql/tests/unit/plugins/modules/__init__.py b/ansible_collections/community/postgresql/tests/unit/plugins/modules/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/unit/plugins/modules/__init__.py
diff --git a/ansible_collections/community/postgresql/tests/unit/plugins/modules/test_postgresql_set.py b/ansible_collections/community/postgresql/tests/unit/plugins/modules/test_postgresql_set.py
new file mode 100644
index 000000000..a10678202
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/unit/plugins/modules/test_postgresql_set.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2021, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.postgresql.plugins.modules.postgresql_set import pretty_to_bytes
+
+
+@pytest.mark.parametrize('input_,expected', [
+ ('', ''),
+ ('test', 'test'),
+ ('0.1', 0.1),
+ ('1024', 1024),
+ ('1024B', 1024),
+ ('1kB', 1024),
+ ('100kB', 102400),
+ ('1MB', 1048576),
+ ('100MB', 104857600),
+ ('1GB', 1073741824),
+ ('10GB', 10737418240),
+ ('127.0.0.1', '127.0.0.1')
+]
+)
+def test_pretty_to_bytes(input_, expected):
+ assert pretty_to_bytes(input_) == expected
diff --git a/ansible_collections/community/postgresql/tests/utils/constraints.txt b/ansible_collections/community/postgresql/tests/utils/constraints.txt
new file mode 100644
index 000000000..ae6000ae1
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/utils/constraints.txt
@@ -0,0 +1,52 @@
+coverage >= 4.2, < 5.0.0, != 4.3.2 ; python_version <= '3.7' # features in 4.2+ required, avoid known bug in 4.3.2 on python 2.6, coverage 5.0+ incompatible
+coverage >= 4.5.4, < 5.0.0 ; python_version > '3.7' # coverage had a bug in < 4.5.4 that would cause unit tests to hang in Python 3.8, coverage 5.0+ incompatible
+cryptography < 2.2 ; python_version < '2.7' # cryptography 2.2 drops support for python 2.6
+deepdiff < 4.0.0 ; python_version < '3' # deepdiff 4.0.0 and later require python 3
+jinja2 < 2.11 ; python_version < '2.7' # jinja2 2.11 and later require python 2.7 or later
+urllib3 < 1.24 ; python_version < '2.7' # urllib3 1.24 and later require python 2.7 or later
+pywinrm >= 0.3.0 # message encryption support
+sphinx < 1.6 ; python_version < '2.7' # sphinx 1.6 and later require python 2.7 or later
+sphinx < 1.8 ; python_version >= '2.7' # sphinx 1.8 and later are currently incompatible with rstcheck 3.3
+pygments >= 2.4.0 # Pygments 2.4.0 includes bugfixes for YAML and YAML+Jinja lexers
+wheel < 0.30.0 ; python_version < '2.7' # wheel 0.30.0 and later require python 2.7 or later
+yamllint != 1.8.0, < 1.14.0 ; python_version < '2.7' # yamllint 1.8.0 and 1.14.0+ require python 2.7+
+pycrypto >= 2.6 # Need features found in 2.6 and greater
+ncclient >= 0.5.2 # Need features added in 0.5.2 and greater
+idna < 2.6, >= 2.5 # linode requires idna < 2.9, >= 2.5, requests requires idna < 2.6, but cryptography will cause the latest version to be installed instead
+paramiko < 2.4.0 ; python_version < '2.7' # paramiko 2.4.0 drops support for python 2.6
+pytest < 3.3.0 ; python_version < '2.7' # pytest 3.3.0 drops support for python 2.6
+pytest < 5.0.0 ; python_version == '2.7' # pytest 5.0.0 and later will no longer support python 2.7
+pytest-forked < 1.0.2 ; python_version < '2.7' # pytest-forked 1.0.2 and later require python 2.7 or later
+pytest-forked >= 1.0.2 ; python_version >= '2.7' # pytest-forked before 1.0.2 does not work with pytest 4.2.0+ (which requires python 2.7+)
+ntlm-auth >= 1.3.0 # message encryption support using cryptography
+requests < 2.20.0 ; python_version < '2.7' # requests 2.20.0 drops support for python 2.6
+requests-ntlm >= 1.1.0 # message encryption support
+requests-credssp >= 0.1.0 # message encryption support
+voluptuous >= 0.11.0 # Schema recursion via Self
+openshift >= 0.6.2, < 0.9.0 # merge_type support
+virtualenv < 16.0.0 ; python_version < '2.7' # virtualenv 16.0.0 and later require python 2.7 or later
+pathspec < 0.6.0 ; python_version < '2.7' # pathspec 0.6.0 and later require python 2.7 or later
+pyopenssl < 18.0.0 ; python_version < '2.7' # pyOpenSSL 18.0.0 and later require python 2.7 or later
+pyfmg == 0.6.1 # newer versions do not pass current unit tests
+pyyaml < 5.1 ; python_version < '2.7' # pyyaml 5.1 and later require python 2.7 or later
+pycparser < 2.19 ; python_version < '2.7' # pycparser 2.19 and later require python 2.7 or later
+mock >= 2.0.0 # needed for features backported from Python 3.6 unittest.mock (assert_called, assert_called_once...)
+pytest-mock >= 1.4.0 # needed for mock_use_standalone_module pytest option
+xmltodict < 0.12.0 ; python_version < '2.7' # xmltodict 0.12.0 and later require python 2.7 or later
+lxml < 4.3.0 ; python_version < '2.7' # lxml 4.3.0 and later require python 2.7 or later
+pyvmomi < 6.0.0 ; python_version < '2.7' # pyvmomi 6.0.0 and later require python 2.7 or later
+pyone == 1.1.9 # newer versions do not pass current integration tests
+boto3 < 1.11 ; python_version < '2.7' # boto3 1.11 drops Python 2.6 support
+botocore >= 1.10.0, < 1.14 ; python_version < '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca; botocore 1.14 drops Python 2.6 support
+botocore >= 1.10.0 ; python_version >= '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca
+setuptools < 45 ; python_version <= '2.7' # setuptools 45 and later require python 3.5 or later
+cffi >= 1.14.2, != 1.14.3 # Yanked version which older versions of pip will still install:
+
+# freeze pylint and its requirements for consistent test results
+astroid == 2.2.5
+isort == 4.3.15
+lazy-object-proxy == 1.3.1
+mccabe == 0.6.1
+pylint == 2.3.1
+typed-ast == 1.4.0 # 1.4.0 is required to compile on Python 3.8
+wrapt == 1.11.1
diff --git a/ansible_collections/community/postgresql/tests/utils/shippable/aix.sh b/ansible_collections/community/postgresql/tests/utils/shippable/aix.sh
new file mode 100755
index 000000000..cd3014cca
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/utils/shippable/aix.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="shippable/posix/group${args[2]}/"
+else
+ target="shippable/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/ansible_collections/community/postgresql/tests/utils/shippable/check_matrix.py b/ansible_collections/community/postgresql/tests/utils/shippable/check_matrix.py
new file mode 100755
index 000000000..608db6923
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/utils/shippable/check_matrix.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+"""Verify the currently executing Shippable test matrix matches the one defined in the "shippable.yml" file."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import json
+import os
+import re
+import sys
+import time
+
+try:
+ from typing import NoReturn
+except ImportError:
+ NoReturn = None
+
+try:
+ # noinspection PyCompatibility
+ from urllib2 import urlopen # pylint: disable=ansible-bad-import-from
+except ImportError:
+ # noinspection PyCompatibility
+ from urllib.request import urlopen
+
+
+def main(): # type: () -> None
+ """Main entry point."""
+ repo_full_name = os.environ['REPO_FULL_NAME']
+ required_repo_full_name = 'ansible-collections/community.postgresql'
+
+ if repo_full_name != required_repo_full_name:
+ sys.stderr.write('Skipping matrix check on repo "%s" which is not "%s".\n' % (repo_full_name, required_repo_full_name))
+ return
+
+ with open('shippable.yml', 'rb') as yaml_file:
+ yaml = yaml_file.read().decode('utf-8').splitlines()
+
+ defined_matrix = [match.group(1) for match in [re.search(r'^ *- env: T=(.*)$', line) for line in yaml] if match and match.group(1) != 'none']
+
+ if not defined_matrix:
+ fail('No matrix entries found in the "shippable.yml" file.',
+ 'Did you modify the "shippable.yml" file?')
+
+ run_id = os.environ['SHIPPABLE_BUILD_ID']
+ sleep = 1
+ jobs = []
+
+ for attempts_remaining in range(4, -1, -1):
+ try:
+ jobs = json.loads(urlopen('https://api.shippable.com/jobs?runIds=%s' % run_id).read())
+
+ if not isinstance(jobs, list):
+ raise Exception('Shippable run %s data is not a list.' % run_id)
+
+ break
+ except Exception as ex:
+ if not attempts_remaining:
+ fail('Unable to retrieve Shippable run %s matrix.' % run_id,
+ str(ex))
+
+ sys.stderr.write('Unable to retrieve Shippable run %s matrix: %s\n' % (run_id, ex))
+ sys.stderr.write('Trying again in %d seconds...\n' % sleep)
+ time.sleep(sleep)
+ sleep *= 2
+
+ if len(jobs) != len(defined_matrix):
+ if len(jobs) == 1:
+ hint = '\n\nMake sure you do not use the "Rebuild with SSH" option.'
+ else:
+ hint = ''
+
+ fail('Shippable run %s has %d jobs instead of the expected %d jobs.' % (run_id, len(jobs), len(defined_matrix)),
+ 'Try re-running the entire matrix.%s' % hint)
+
+ actual_matrix = dict((job.get('jobNumber'), dict(tuple(line.split('=', 1)) for line in job.get('env', [])).get('T', '')) for job in jobs)
+ errors = [(job_number, test, actual_matrix.get(job_number)) for job_number, test in enumerate(defined_matrix, 1) if actual_matrix.get(job_number) != test]
+
+ if len(errors):
+ error_summary = '\n'.join('Job %s expected "%s" but found "%s" instead.' % (job_number, expected, actual) for job_number, expected, actual in errors)
+
+ fail('Shippable run %s has a job matrix mismatch.' % run_id,
+ 'Try re-running the entire matrix.\n\n%s' % error_summary)
+
+
+def fail(message, output): # type: (str, str) -> NoReturn
+ # Include a leading newline to improve readability on Shippable "Tests" tab.
+ # Without this, the first line becomes indented.
+ output = '\n' + output.strip()
+
+ timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
+
+ # hack to avoid requiring junit-xml, which isn't pre-installed on Shippable outside our test containers
+ xml = '''
+<?xml version="1.0" encoding="utf-8"?>
+<testsuites disabled="0" errors="1" failures="0" tests="1" time="0.0">
+\t<testsuite disabled="0" errors="1" failures="0" file="None" log="None" name="ansible-test" skipped="0" tests="1" time="0" timestamp="%s" url="None">
+\t\t<testcase classname="timeout" name="timeout">
+\t\t\t<error message="%s" type="error">%s</error>
+\t\t</testcase>
+\t</testsuite>
+</testsuites>
+''' % (timestamp, message, output)
+
+ path = 'shippable/testresults/check-matrix.xml'
+ dir_path = os.path.dirname(path)
+
+ if not os.path.exists(dir_path):
+ os.makedirs(dir_path)
+
+ with open(path, 'w') as junit_fd:
+ junit_fd.write(xml.lstrip())
+
+ sys.stderr.write(message + '\n')
+ sys.stderr.write(output + '\n')
+
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/postgresql/tests/utils/shippable/freebsd.sh b/ansible_collections/community/postgresql/tests/utils/shippable/freebsd.sh
new file mode 100755
index 000000000..cd3014cca
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/utils/shippable/freebsd.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="shippable/posix/group${args[2]}/"
+else
+ target="shippable/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/ansible_collections/community/postgresql/tests/utils/shippable/linux.sh b/ansible_collections/community/postgresql/tests/utils/shippable/linux.sh
new file mode 100755
index 000000000..9cc2f966c
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/utils/shippable/linux.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+image="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="shippable/posix/group${args[2]}/"
+else
+ target="shippable/posix/"
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --docker "${image}"
diff --git a/ansible_collections/community/postgresql/tests/utils/shippable/remote.sh b/ansible_collections/community/postgresql/tests/utils/shippable/remote.sh
new file mode 100755
index 000000000..cd3014cca
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/utils/shippable/remote.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="shippable/posix/group${args[2]}/"
+else
+ target="shippable/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/ansible_collections/community/postgresql/tests/utils/shippable/rhel.sh b/ansible_collections/community/postgresql/tests/utils/shippable/rhel.sh
new file mode 100755
index 000000000..cd3014cca
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/utils/shippable/rhel.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="shippable/posix/group${args[2]}/"
+else
+ target="shippable/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/ansible_collections/community/postgresql/tests/utils/shippable/sanity.sh b/ansible_collections/community/postgresql/tests/utils/shippable/sanity.sh
new file mode 100755
index 000000000..c216220e8
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/utils/shippable/sanity.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+group="${args[1]}"
+
+if [ "${BASE_BRANCH:-}" ]; then
+ base_branch="origin/${BASE_BRANCH}"
+else
+ base_branch=""
+fi
+
+if [ "${group}" == "extra" ]; then
+ # ansible-galaxy -vvv collection install community.internal_test_tools
+ git clone --single-branch --depth 1 https://github.com/ansible-collections/community.internal_test_tools.git ../internal_test_tools
+
+ ../internal_test_tools/tools/run.py --color
+ exit
+fi
+
+# shellcheck disable=SC2086
+ansible-test sanity --color -v --junit ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \
+ --docker --base-branch "${base_branch}" \
+ --allow-disabled
diff --git a/ansible_collections/community/postgresql/tests/utils/shippable/shippable.sh b/ansible_collections/community/postgresql/tests/utils/shippable/shippable.sh
new file mode 100755
index 000000000..b181297f9
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/utils/shippable/shippable.sh
@@ -0,0 +1,208 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+ansible_version="${args[0]}"
+script="${args[1]}"
+
+function join {
+ local IFS="$1";
+ shift;
+ echo "$*";
+}
+
+test="$(join / "${args[@]:1}")"
+
+docker images ansible/ansible
+docker images quay.io/ansible/*
+docker ps
+
+for container in $(docker ps --format '{{.Image}} {{.ID}}' | grep -v -e '^drydock/' -e '^quay.io/ansible/azure-pipelines-test-container:' | sed 's/^.* //'); do
+ docker rm -f "${container}" || true # ignore errors
+done
+
+docker ps
+
+if [ -d /home/shippable/cache/ ]; then
+ ls -la /home/shippable/cache/
+fi
+
+command -v python
+python -V
+
+function retry
+{
+ # shellcheck disable=SC2034
+ for repetition in 1 2 3; do
+ set +e
+ "$@"
+ result=$?
+ set -e
+ if [ ${result} == 0 ]; then
+ return ${result}
+ fi
+ echo "@* -> ${result}"
+ done
+ echo "Command '@*' failed 3 times!"
+ exit 255
+}
+
+command -v pip
+pip --version
+pip list --disable-pip-version-check
+if [ "${ansible_version}" == "devel" ]; then
+ retry pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
+else
+ retry pip install "https://github.com/ansible/ansible/archive/stable-${ansible_version}.tar.gz" --disable-pip-version-check
+fi
+
+if [ "${SHIPPABLE_BUILD_ID:-}" ]; then
+ export ANSIBLE_COLLECTIONS_PATHS="${HOME}/.ansible"
+ SHIPPABLE_RESULT_DIR="$(pwd)/shippable"
+ TEST_DIR="${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/postgresql"
+ mkdir -p "${TEST_DIR}"
+ cp -aT "${SHIPPABLE_BUILD_DIR}" "${TEST_DIR}"
+ cd "${TEST_DIR}"
+else
+ export ANSIBLE_COLLECTIONS_PATHS="${PWD}/../../../"
+fi
+
+# START: HACK install dependencies for integration tests
+if [ "${script}" != "units" ] && [ "${script}" != "sanity" ] && [ "${ansible_version}" != "2.9" ]; then
+ git clone --depth=1 --single-branch https://github.com/ansible-collections/community.general \
+ "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/general"
+fi
+# END: HACK
+
+export PYTHONIOENCODING='utf-8'
+
+if [ "${JOB_TRIGGERED_BY_NAME:-}" == "nightly-trigger" ]; then
+ COVERAGE=yes
+ COMPLETE=yes
+fi
+
+if [ -n "${COVERAGE:-}" ]; then
+ # on-demand coverage reporting triggered by setting the COVERAGE environment variable to a non-empty value
+ export COVERAGE="--coverage"
+elif [[ "${COMMIT_MESSAGE}" =~ ci_coverage ]]; then
+ # on-demand coverage reporting triggered by having 'ci_coverage' in the latest commit message
+ export COVERAGE="--coverage"
+else
+ # on-demand coverage reporting disabled (default behavior, always-on coverage reporting remains enabled)
+ export COVERAGE="--coverage-check"
+fi
+
+if [ -n "${COMPLETE:-}" ]; then
+ # disable change detection triggered by setting the COMPLETE environment variable to a non-empty value
+ export CHANGED=""
+elif [[ "${COMMIT_MESSAGE}" =~ ci_complete ]]; then
+ # disable change detection triggered by having 'ci_complete' in the latest commit message
+ export CHANGED=""
+else
+ # enable change detection (default behavior)
+ export CHANGED="--changed"
+fi
+
+if [ "${IS_PULL_REQUEST:-}" == "true" ]; then
+ # run unstable tests which are targeted by focused changes on PRs
+ export UNSTABLE="--allow-unstable-changed"
+else
+ # do not run unstable tests outside PRs
+ export UNSTABLE=""
+fi
+
+# remove empty core/extras module directories from PRs created prior to the repo-merge
+find plugins -type d -empty -print -delete
+
+function cleanup
+{
+ # for complete on-demand coverage generate a report for all files with no coverage on the "sanity/5" job so we only have one copy
+ if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ] && [ "${test}" == "sanity/5" ]; then
+ stub="--stub"
+ # trigger coverage reporting for stubs even if no other coverage data exists
+ mkdir -p tests/output/coverage/
+ else
+ stub=""
+ fi
+
+ if [ -d tests/output/coverage/ ]; then
+ if find tests/output/coverage/ -mindepth 1 -name '.*' -prune -o -print -quit | grep -q .; then
+ process_coverage='yes' # process existing coverage files
+ elif [ "${stub}" ]; then
+ process_coverage='yes' # process coverage when stubs are enabled
+ else
+ process_coverage=''
+ fi
+
+ if [ "${process_coverage}" ]; then
+ # use python 3.7 for coverage to avoid running out of memory during coverage xml processing
+ # only use it for coverage to avoid the additional overhead of setting up a virtual environment for a potential no-op job
+ virtualenv --python /usr/bin/python3.7 ~/ansible-venv
+ set +ux
+ . ~/ansible-venv/bin/activate
+ set -ux
+
+ # shellcheck disable=SC2086
+ ansible-test coverage xml --color -v --requirements --group-by command --group-by version ${stub:+"$stub"}
+ cp -a tests/output/reports/coverage=*.xml "$SHIPPABLE_RESULT_DIR/codecoverage/"
+
+ if [ "${ansible_version}" != "2.9" ]; then
+ # analyze and capture code coverage aggregated by integration test target
+ ansible-test coverage analyze targets generate -v "$SHIPPABLE_RESULT_DIR/testresults/coverage-analyze-targets.json"
+ fi
+
+ # upload coverage report to codecov.io only when using complete on-demand coverage
+ if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ]; then
+ for file in tests/output/reports/coverage=*.xml; do
+ flags="${file##*/coverage=}"
+ flags="${flags%-powershell.xml}"
+ flags="${flags%.xml}"
+ # remove numbered component from stub files when converting to tags
+ flags="${flags//stub-[0-9]*/stub}"
+ flags="${flags//=/,}"
+ flags="${flags//[^a-zA-Z0-9_,]/_}"
+
+ bash <(curl -s https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh) \
+ -f "${file}" \
+ -F "${flags}" \
+ -n "${test}" \
+ -t 00c0f9fa-ac1b-43d3-addf-99de803232c1 \
+ -X coveragepy \
+ -X gcov \
+ -X fix \
+ -X search \
+ -X xcode \
+ || echo "Failed to upload code coverage report to codecov.io: ${file}"
+ done
+ fi
+ fi
+ fi
+
+ if [ -d tests/output/junit/ ]; then
+ cp -aT tests/output/junit/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+
+ if [ -d tests/output/data/ ]; then
+ cp -a tests/output/data/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+
+ if [ -d tests/output/bot/ ]; then
+ cp -aT tests/output/bot/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+}
+
+if [ "${SHIPPABLE_BUILD_ID:-}" ]; then trap cleanup EXIT; fi
+
+if [[ "${COVERAGE:-}" == "--coverage" ]]; then
+ timeout=60
+else
+ timeout=50
+fi
+
+ansible-test env --dump --show --timeout "${timeout}" --color -v
+
+if [ "${SHIPPABLE_BUILD_ID:-}" ]; then "tests/utils/shippable/check_matrix.py"; fi
+"tests/utils/shippable/${script}.sh" "${test}"
diff --git a/ansible_collections/community/postgresql/tests/utils/shippable/timing.py b/ansible_collections/community/postgresql/tests/utils/shippable/timing.py
new file mode 100755
index 000000000..fb538271b
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/utils/shippable/timing.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python3.7
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import time
+
+start = time.time()
+
+sys.stdin.reconfigure(errors='surrogateescape')
+sys.stdout.reconfigure(errors='surrogateescape')
+
+for line in sys.stdin:
+ seconds = time.time() - start
+ sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
+ sys.stdout.flush()
diff --git a/ansible_collections/community/postgresql/tests/utils/shippable/timing.sh b/ansible_collections/community/postgresql/tests/utils/shippable/timing.sh
new file mode 100755
index 000000000..77e257830
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/utils/shippable/timing.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eu
+
+"$@" 2>&1 | "$(dirname "$0")/timing.py"
diff --git a/ansible_collections/community/postgresql/tests/utils/shippable/units.sh b/ansible_collections/community/postgresql/tests/utils/shippable/units.sh
new file mode 100755
index 000000000..f204dc87e
--- /dev/null
+++ b/ansible_collections/community/postgresql/tests/utils/shippable/units.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+group="${args[1]}"
+
+if [[ "${COVERAGE:-}" == "--coverage" ]]; then
+ timeout=90
+else
+ timeout=30
+fi
+
+group1=()
+
+case "${group}" in
+ 1) options=("${group1[@]:+${group1[@]}}") ;;
+esac
+
+ansible-test env --timeout "${timeout}" --color -v
+
+# shellcheck disable=SC2086
+ansible-test units --color -v --docker default ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \
+ "${options[@]:+${options[@]}}" \