diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-14 20:03:01 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-14 20:03:01 +0000 |
commit | a453ac31f3428614cceb99027f8efbdb9258a40b (patch) | |
tree | f61f87408f32a8511cbd91799f9cececb53e0374 /collections-debian-merged/ansible_collections/community/postgresql | |
parent | Initial commit. (diff) | |
download | ansible-a453ac31f3428614cceb99027f8efbdb9258a40b.tar.xz ansible-a453ac31f3428614cceb99027f8efbdb9258a40b.zip |
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collections-debian-merged/ansible_collections/community/postgresql')
226 files changed, 33232 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/README.md b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/README.md new file mode 100644 index 00000000..385e70ba --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/README.md @@ -0,0 +1,3 @@ +## Azure Pipelines Configuration + +Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information. diff --git a/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/azure-pipelines.yml b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/azure-pipelines.yml new file mode 100644 index 00000000..5981da78 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/azure-pipelines.yml @@ -0,0 +1,192 @@ +trigger: + batch: true + branches: + include: + - main + - stable-* + +pr: + autoCancel: true + branches: + include: + - main + - stable-* + +schedules: + - cron: 0 9 * * * + displayName: Nightly + always: true + branches: + include: + - main + - stable-* + +variables: + - name: checkoutPath + value: ansible_collections/community/postgresql + - name: coverageBranches + value: main + - name: pipelinesCoverage + value: coverage + - name: entryPoint + value: tests/utils/shippable/shippable.sh + - name: fetchDepth + value: 0 + +resources: + containers: + - container: default + image: quay.io/ansible/azure-pipelines-test-container:1.7.1 + +pool: Standard + +stages: +### Sanity & units + - stage: Ansible_devel + displayName: Sanity & Units devel + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + targets: + - name: Sanity + test: 'devel/sanity/1' + - name: Sanity Extra # Only on devel + test: 'devel/sanity/extra' + - name: Units + test: 'devel/units/1' + - stage: Ansible_2_10 + displayName: Sanity & Units 2.10 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + targets: + - name: Sanity + test: '2.10/sanity/1' + - name: Units + test: '2.10/units/1' + - stage: Ansible_2_9 + displayName: Sanity & Units 2.9 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + targets: + - name: Sanity + test: '2.9/sanity/1' + - name: Units + test: '2.9/units/1' +### Docker + - stage: Docker_devel + displayName: Docker devel + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: devel/linux/{0}/1 + targets: + - name: CentOS 6 + test: centos6 + - name: CentOS 7 + test: centos7 + - name: Fedora 32 + test: fedora32 + - name: Fedora 33 + test: fedora33 + - name: Ubuntu 16.04 + test: ubuntu1604 + - name: Ubuntu 18.04 + test: ubuntu1804 + - stage: Docker_2_10 + displayName: Docker 2.10 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.10/linux/{0}/1 + targets: + - name: CentOS 6 + test: centos6 + - name: CentOS 7 + test: centos7 + - name: Fedora 31 + test: fedora31 + - name: Fedora 32 + test: fedora32 + - name: Ubuntu 16.04 + test: ubuntu1604 + - name: Ubuntu 18.04 + test: ubuntu1804 + - stage: Docker_2_9 + displayName: Docker 2.9 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.9/linux/{0}/1 + targets: + - name: CentOS 6 + test: centos6 + - name: CentOS 7 + test: centos7 + - name: Fedora 31 + test: fedora31 + - name: Ubuntu 16.04 + test: ubuntu1604 + - name: Ubuntu 18.04 + test: ubuntu1804 + +### Remote + - stage: Remote_devel + displayName: Remote devel + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: devel/{0}/1 + targets: + - name: RHEL 7.8 + test: rhel/7.8 + - name: RHEL 8.2 + test: rhel/8.2 + - name: FreeBSD 11.1 + test: freebsd/11.1 + - name: FreeBSD 12.1 + test: freebsd/12.1 + - stage: Remote_2_10 + displayName: Remote 2.10 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: RHEL 8.2 {0} + testFormat: 2.10/{0}/1 + targets: + - name: RHEL 8.2 + test: rhel/8.2 + - stage: Remote_2_9 + displayName: Remote 2.9 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: RHEL 8.2 {0} + testFormat: 2.9/{0}/1 + targets: + - name: RHEL 8.2 + test: rhel/8.2 + - stage: Summary + condition: succeededOrFailed() + dependsOn: + - Ansible_devel + - Ansible_2_10 + - Ansible_2_9 + - Docker_devel + - Docker_2_10 + - Docker_2_9 + - Remote_devel + - Remote_2_10 + - Remote_2_9 + jobs: + - template: templates/coverage.yml diff --git a/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/aggregate-coverage.sh b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/aggregate-coverage.sh new file mode 100755 index 00000000..f3113dd0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/aggregate-coverage.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# Aggregate code coverage results for later processing. + +set -o pipefail -eu + +agent_temp_directory="$1" + +PATH="${PWD}/bin:${PATH}" + +mkdir "${agent_temp_directory}/coverage/" + +options=(--venv --venv-system-site-packages --color -v) + +ansible-test coverage combine --export "${agent_temp_directory}/coverage/" "${options[@]}" + +if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then + # Only analyze coverage if the installed version of ansible-test supports it. + # Doing so allows this script to work unmodified for multiple Ansible versions. + ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}" +fi diff --git a/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/combine-coverage.py b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/combine-coverage.py new file mode 100755 index 00000000..506ade64 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/combine-coverage.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +""" +Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job. +Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}" +The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName) +Keep in mind that Azure Pipelines does not enforce unique job display names (only names). +It is up to pipeline authors to avoid name collisions when deviating from the recommended format. +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import re +import shutil +import sys + + +def main(): + """Main program entry point.""" + source_directory = sys.argv[1] + + if '/ansible_collections/' in os.getcwd(): + output_path = "tests/output" + else: + output_path = "test/results" + + destination_directory = os.path.join(output_path, 'coverage') + + if not os.path.exists(destination_directory): + os.makedirs(destination_directory) + + jobs = {} + count = 0 + + for name in os.listdir(source_directory): + match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name) + label = match.group('label') + attempt = int(match.group('attempt')) + jobs[label] = max(attempt, jobs.get(label, 0)) + + for label, attempt in jobs.items(): + name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt) + source = os.path.join(source_directory, name) + source_files = os.listdir(source) + + for source_file in source_files: + source_path = os.path.join(source, source_file) + destination_path = os.path.join(destination_directory, source_file + '.' + label) + print('"%s" -> "%s"' % (source_path, destination_path)) + shutil.copyfile(source_path, destination_path) + count += 1 + + print('Coverage file count: %d' % count) + print('##vso[task.setVariable variable=coverageFileCount]%d' % count) + print('##vso[task.setVariable variable=outputPath]%s' % output_path) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/process-results.sh b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/process-results.sh new file mode 100755 index 00000000..f3f1d1ba --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/process-results.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Check the test results and set variables for use in later steps. + +set -o pipefail -eu + +if [[ "$PWD" =~ /ansible_collections/ ]]; then + output_path="tests/output" +else + output_path="test/results" +fi + +echo "##vso[task.setVariable variable=outputPath]${output_path}" + +if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then + echo "##vso[task.setVariable variable=haveTestResults]true" +fi + +if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then + echo "##vso[task.setVariable variable=haveBotResults]true" +fi + +if compgen -G "${output_path}"'/coverage/*' > /dev/null; then + echo "##vso[task.setVariable variable=haveCoverageData]true" +fi diff --git a/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/publish-codecov.sh b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/publish-codecov.sh new file mode 100755 index 00000000..7aeabda0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/publish-codecov.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# Upload code coverage reports to codecov.io. +# Multiple coverage files from multiple languages are accepted and aggregated after upload. +# Python coverage, as well as PowerShell and Python stubs can all be uploaded. + +set -o pipefail -eu + +output_path="$1" + +curl --silent --show-error https://codecov.io/bash > codecov.sh + +for file in "${output_path}"/reports/coverage*.xml; do + name="${file}" + name="${name##*/}" # remove path + name="${name##coverage=}" # remove 'coverage=' prefix if present + name="${name%.xml}" # remove '.xml' suffix + + bash codecov.sh \ + -f "${file}" \ + -n "${name}" \ + -X coveragepy \ + -X gcov \ + -X fix \ + -X search \ + -X xcode \ + || echo "Failed to upload code coverage report to codecov.io: ${file}" +done diff --git a/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/report-coverage.sh b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/report-coverage.sh new file mode 100755 index 00000000..1bd91bdc --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/report-coverage.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Generate code coverage reports for uploading to Azure Pipelines and codecov.io. + +set -o pipefail -eu + +PATH="${PWD}/bin:${PATH}" + +if ! ansible-test --help >/dev/null 2>&1; then + # Install the devel version of ansible-test for generating code coverage reports. + # This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs). + # Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used. + pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check +fi + +ansible-test coverage xml --stub --venv --venv-system-site-packages --color -v diff --git a/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/run-tests.sh b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/run-tests.sh new file mode 100755 index 00000000..a947fdf0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/run-tests.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +# Configure the test environment and run the tests. + +set -o pipefail -eu + +entry_point="$1" +test="$2" +read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds + +export COMMIT_MESSAGE +export COMPLETE +export COVERAGE +export IS_PULL_REQUEST + +if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then + IS_PULL_REQUEST=true + COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2) +else + IS_PULL_REQUEST= + COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD) +fi + +COMPLETE= +COVERAGE= + +if [ "${BUILD_REASON}" = "Schedule" ]; then + COMPLETE=yes + + if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then + COVERAGE=yes + fi +fi + +"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/time-command.py b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/time-command.py new file mode 100755 index 00000000..5e8eb8d4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/scripts/time-command.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +"""Prepends a relative timestamp to each input line from stdin and writes it to stdout.""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import time + + +def main(): + """Main program entry point.""" + start = time.time() + + sys.stdin.reconfigure(errors='surrogateescape') + sys.stdout.reconfigure(errors='surrogateescape') + + for line in sys.stdin: + seconds = time.time() - start + sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line)) + sys.stdout.flush() + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/templates/coverage.yml b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/templates/coverage.yml new file mode 100644 index 00000000..1864e444 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/templates/coverage.yml @@ -0,0 +1,39 @@ +# This template adds a job for processing code coverage data. +# It will upload results to Azure Pipelines and codecov.io. +# Use it from a job stage that completes after all other jobs have completed. +# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed. + +jobs: + - job: Coverage + displayName: Code Coverage + container: default + workspace: + clean: all + steps: + - checkout: self + fetchDepth: $(fetchDepth) + path: $(checkoutPath) + - task: DownloadPipelineArtifact@2 + displayName: Download Coverage Data + inputs: + path: coverage/ + patterns: "Coverage */*=coverage.combined" + - bash: .azure-pipelines/scripts/combine-coverage.py coverage/ + displayName: Combine Coverage Data + - bash: .azure-pipelines/scripts/report-coverage.sh + displayName: Generate Coverage Report + condition: gt(variables.coverageFileCount, 0) + - task: PublishCodeCoverageResults@1 + inputs: + codeCoverageTool: Cobertura + # Azure Pipelines only accepts a single coverage data file. + # That means only Python or PowerShell coverage can be uploaded, but not both. + # Set the "pipelinesCoverage" variable to determine which type is uploaded. + # Use "coverage" for Python and "coverage-powershell" for PowerShell. + summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml" + displayName: Publish to Azure Pipelines + condition: gt(variables.coverageFileCount, 0) + - bash: .azure-pipelines/scripts/publish-codecov.sh "$(outputPath)" + displayName: Publish to codecov.io + condition: gt(variables.coverageFileCount, 0) + continueOnError: true diff --git a/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/templates/matrix.yml b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/templates/matrix.yml new file mode 100644 index 00000000..4e9555dd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/templates/matrix.yml @@ -0,0 +1,55 @@ +# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template. +# If this matrix template does not provide the required functionality, consider using the test template directly instead. + +parameters: + # A required list of dictionaries, one per test target. + # Each item in the list must contain a "test" or "name" key. + # Both may be provided. If one is omitted, the other will be used. + - name: targets + type: object + + # An optional list of values which will be used to multiply the targets list into a matrix. + # Values can be strings or numbers. + - name: groups + type: object + default: [] + + # An optional format string used to generate the job name. + # - {0} is the name of an item in the targets list. + - name: nameFormat + type: string + default: "{0}" + + # An optional format string used to generate the test name. + # - {0} is the name of an item in the targets list. + - name: testFormat + type: string + default: "{0}" + + # An optional format string used to add the group to the job name. + # {0} is the formatted name of an item in the targets list. + # {{1}} is the group -- be sure to include the double "{{" and "}}". + - name: nameGroupFormat + type: string + default: "{0} - {{1}}" + + # An optional format string used to add the group to the test name. + # {0} is the formatted test of an item in the targets list. + # {{1}} is the group -- be sure to include the double "{{" and "}}". + - name: testGroupFormat + type: string + default: "{0}/{{1}}" + +jobs: + - template: test.yml + parameters: + jobs: + - ${{ if eq(length(parameters.groups), 0) }}: + - ${{ each target in parameters.targets }}: + - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }} + test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }} + - ${{ if not(eq(length(parameters.groups), 0)) }}: + - ${{ each group in parameters.groups }}: + - ${{ each target in parameters.targets }}: + - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }} + test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }} diff --git a/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/templates/test.yml b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/templates/test.yml new file mode 100644 index 00000000..5250ed80 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/.azure-pipelines/templates/test.yml @@ -0,0 +1,45 @@ +# This template uses the provided list of jobs to create test one or more test jobs. +# It can be used directly if needed, or through the matrix template. + +parameters: + # A required list of dictionaries, one per test job. + # Each item in the list must contain a "job" and "name" key. + - name: jobs + type: object + +jobs: + - ${{ each job in parameters.jobs }}: + - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }} + displayName: ${{ job.name }} + container: default + workspace: + clean: all + steps: + - checkout: self + fetchDepth: $(fetchDepth) + path: $(checkoutPath) + - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)" + displayName: Run Tests + - bash: .azure-pipelines/scripts/process-results.sh + condition: succeededOrFailed() + displayName: Process Results + - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)" + condition: eq(variables.haveCoverageData, 'true') + displayName: Aggregate Coverage Data + - task: PublishTestResults@2 + condition: eq(variables.haveTestResults, 'true') + inputs: + testResultsFiles: "$(outputPath)/junit/*.xml" + displayName: Publish Test Results + - task: PublishPipelineArtifact@1 + condition: eq(variables.haveBotResults, 'true') + displayName: Publish Bot Results + inputs: + targetPath: "$(outputPath)/bot/" + artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" + - task: PublishPipelineArtifact@1 + condition: eq(variables.haveCoverageData, 'true') + displayName: Publish Coverage Data + inputs: + targetPath: "$(Agent.TempDirectory)/coverage/" + artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/CHANGELOG.rst b/collections-debian-merged/ansible_collections/community/postgresql/CHANGELOG.rst new file mode 100644 index 00000000..ed164b4d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/CHANGELOG.rst @@ -0,0 +1,81 @@ +============================================= +Community PostgreSQL Collection Release Notes +============================================= + +.. contents:: Topics + + +v1.1.1 +====== + +Release Summary +--------------- + +This is the patch release of the ``community.postgresql`` collection. +This changelog contains all changes to the modules in this collection that +have been added after the release of ``community.postgresql`` 1.1.0. + +Bugfixes +-------- + +- postgresql_query - add a warning to set ``as_single_query`` option explicitly (https://github.com/ansible-collections/community.postgresql/pull/54). +- postgresql_query - fix datetime.timedelta type handling (https://github.com/ansible-collections/community.postgresql/issues/47). +- postgresql_query - fix decimal handling (https://github.com/ansible-collections/community.postgresql/issues/45). +- postgresql_set - fails in check_mode on non-numeric values containing `B` (https://github.com/ansible-collections/community.postgresql/issues/48). + +v1.1.0 +====== + +Release Summary +--------------- + +This is the minor release of the ``community.postgresql`` collection. +This changelog contains all changes to the modules in this collection that +have been added after the release of ``community.postgresql`` 1.0.0. + +Minor Changes +------------- + +- postgresql_query - add ``as_single_query`` option to execute a script content as a single query to avoid semicolon related errors (https://github.com/ansible-collections/community.postgresql/pull/37). + +Bugfixes +-------- + +- postgresql_info - fix crash caused by wrong PgSQL version parsing (https://github.com/ansible-collections/community.postgresql/issues/40). +- postgresql_ping - fix crash caused by wrong PgSQL version parsing (https://github.com/ansible-collections/community.postgresql/issues/40). +- postgresql_set - return a message instead of traceback when a passed parameter has not been found (https://github.com/ansible-collections/community.postgresql/issues/41). + +v1.0.0 +====== + +Release Summary +--------------- + +This is the first proper release of the ``community.postgresql`` collection which is needed to include the collection in Ansible. +This changelog does not contain any changes because there are no changes made since release 0.1.0. + + +v0.1.0 +====== + +Release Summary +--------------- + +The ``community.postgresql`` continues the work on the Ansible PostgreSQL +modules from their state in ``community.general`` 1.2.0. +The changes listed here are thus relative to the modules ``community.general.postgresql_*``. + + +Minor Changes +------------- + +- postgresql_info - add ``in_recovery`` return value to show if a service in recovery mode or not (https://github.com/ansible-collections/community.general/issues/1068). +- postgresql_privs - add ``procedure`` type support (https://github.com/ansible-collections/community.general/issues/1002). +- postgresql_query - add ``query_list`` and ``query_all_results`` return values (https://github.com/ansible-collections/community.general/issues/838). + +Bugfixes +-------- + +- postgresql_ext - fix the module crashes when available ext versions cannot be compared with current version (https://github.com/ansible-collections/community.general/issues/1095). +- postgresql_ext - fix version selection when ``version=latest`` (https://github.com/ansible-collections/community.general/pull/1078). +- postgresql_privs - fix module fails when ``type`` group and passing ``objs`` value containing hyphens (https://github.com/ansible-collections/community.general/issues/1058). diff --git a/collections-debian-merged/ansible_collections/community/postgresql/COPYING b/collections-debian-merged/ansible_collections/community/postgresql/COPYING new file mode 100644 index 00000000..f288702d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <https://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + <program> Copyright (C) <year> <name of author> + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +<https://www.gnu.org/licenses/>. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +<https://www.gnu.org/licenses/why-not-lgpl.html>. diff --git a/collections-debian-merged/ansible_collections/community/postgresql/FILES.json b/collections-debian-merged/ansible_collections/community/postgresql/FILES.json new file mode 100644 index 00000000..ea80e471 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/FILES.json @@ -0,0 +1,2385 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/postgresql_membership.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44410cab59069279cb1cffeb80af60201f9a321d1bbf207f2b3e13e28bcfb781", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_user.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d73884a85c6b512d048eb6f21efbe22f68dd752f364793413a9d5cfe0feb328c", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_lang.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2058e9c8ab39b0253ff97f6a25c23df4abd1cef25f7bd7718233872c85e0fd42", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_pg_hba.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a2b9c7684cd9154a0aea6afb4b190b2ef58b42a6aa5a7a308941acdee765f6b", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_ping.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fc6151be4d00704177b56761a509eb0c87cf47f6e48fc9624992b27b4e5267da", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "382549c4fa156ea7334da79a7c30ab917384fc8282b3dce6cb667ac0ddd33f05", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_user_obj_stat_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d32782e75fdf5971297eeb70feb35a97cab8fb981f2563f14a0cdf0fc7fa358c", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_sequence.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9190f3ba2f54944ca829d1a56b5742b15caf9798665c9472ed3d9217bc961874", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_set.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4fe0eb6dfc6445eff50073408332afdfb06a0a8eb94c75dcbe681c220cd29ef5", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_copy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "33fcce0d082f18c5ca99bd06c26bccc953a34f902573fea7ded5427b98a73fcb", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_ext.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dedd8728c19dfefe70667bbd692386a79db8bfd6383d229dc3e43dcef2f6ea56", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_privs.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b1a8af6bcc8c03a0b72134d73353fdbb890de23bcc5d73efecc67e5f5a902f91", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_table.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4193948365321688d2307a258ebe90c9ee4c808aaed24639fb42ba43d74ce9f8", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_tablespace.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "19a2682bd8cf3a0df60d4e7bfafe2dd0dc80b64c8b8bf9540785c935e18781b7", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_schema.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0941619650d5e9ca5d23b50696a954789d73d21252b5f553594146ffc2fc8f21", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_owner.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31b460d944df29be5f40a636ffa660bffb2b36108fd0aa7c42fcdfccf0cff4df", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_db.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff85ac757a5c2f8a017449f9f65bf3b2dd3c99dfb8d424b00f0ff51ad6dc1ce4", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_subscription.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6edb826cd79d6aca511ac44d0c2571f219493a619ca90d5ed76ebbebf092b482", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_slot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f623b52acec17d3c41b251ef83ec301be67ef463fc433608d4f456779af5225", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_idx.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1e8c059194e66fb1ee9f3be03af5733cb72d255c0b323694a5ec8c92bd560298", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_publication.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6218d527eab37ba608f756e103d02dd441ab6fee2f32e656bcb2197ce6154f66", + "format": 1 + }, + { + "name": "plugins/modules/postgresql_query.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0f01589f9c3ca2ed814a01bee389472fa3b4dd64ecd432955a57408fdfa65203", + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/postgres.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5829a5a9e73df091b04cc3e24c8ac3b467721b0c70a84bed979339fe850e5a53", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/postgres.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65e3101d323d453f7292912da477050d0fb90f7b0e4fd155cad0698f1c1c3815", + "format": 1 + }, + { + "name": "plugins/module_utils/saslprep.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc03619992801129d5aacabd6caca116a11077a3270d1a8934213a0f870725af", + "format": 1 + }, + { + "name": "plugins/module_utils/database.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf286e7fb228799c46b66d7e34bacb613838f65d8faa42efd2a1f29ee6759c1b", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "02454f2d75ab7663e302f81b1ee3422429bdc52163ee16375b5eda0c4973cfe7", + "format": 1 + }, + { + "name": ".azure-pipelines", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".azure-pipelines/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".azure-pipelines/templates/coverage.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "daf1930264760d47b54588f05c6339fd69ca2d239c77c44bc4cee3c4e9f76447", + "format": 1 + }, + { + "name": ".azure-pipelines/templates/matrix.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4fb0d3ffb2125d5806c7597e4f9d4b2af69cf8c337e9d57803081eddd4a6b081", + "format": 1 + }, + { + "name": ".azure-pipelines/templates/test.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2cfa1271f94c71f05ffa0b1f763d8946394b5636e14579cda8ee14bb38bbcf1c", + "format": 1 + }, + { + "name": ".azure-pipelines/azure-pipelines.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d08d7cf1cb05ff35b985a5fd11d54a44e1435d08b237d060ccf418cabaea46d", + "format": 1 + }, + { + "name": ".azure-pipelines/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61f20decd3c8fb34ac2cc6ff79f598fc5136e642130a7ba065ccc5aa37960cd2", + "format": 1 + }, + { + "name": ".azure-pipelines/scripts", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".azure-pipelines/scripts/combine-coverage.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e34d4e863a65b9f53c4ca8ae37655858969898a949e050e9cb3cb0d5f02342d0", + "format": 1 + }, + { + "name": ".azure-pipelines/scripts/aggregate-coverage.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "820353ffde6fd3ad655118772547549d84ccf0a7ba951e8fb1325f912ef640a0", + "format": 1 + }, + { + "name": ".azure-pipelines/scripts/report-coverage.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f6a373322759ccc2736fb25d25d8c402dfe16b5d9a57cfccb1ca8cb136e09663", + "format": 1 + }, + { + "name": ".azure-pipelines/scripts/publish-codecov.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2662ee5039b851fd66c069ff6704024951ced29fa04bf1e2df5b75f18fc2a32b", + "format": 1 + }, + { + "name": ".azure-pipelines/scripts/time-command.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0232f415efeb583ddff907c058986963b775441eaf129d7162aee0acb0d36834", + "format": 1 + }, + { + "name": ".azure-pipelines/scripts/run-tests.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb08a3ec5715b00d476ae6d63ca22e11a9ad8887239439937d2a7ea342e5a623", + "format": 1 + }, + { + "name": ".azure-pipelines/scripts/process-results.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c03d7273fe58882a439b6723e92ab89f1e127772b5ce35aa67c546dd62659741", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "df18179bb2f5447a56ac92261a911649b96821c0b2c08eea62d5cc6b0195203f", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_postgres.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "86ec52877f1e27e9ca6596b87ad2ee87586018f49b6c10d29b94d081d51989e7", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/test_saslprep.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5e044a935e091aaf52115d3f3238bcd32b3627f9cebac26bd1d6d52aa339953", + "format": 1 + }, + { + "name": "tests/unit/plugins/module_utils/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.11.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "49102a67465c1a0afcbbee7add94310d2a1f673eb6e588d37ccf598c92159724", + "format": 1 + }, + { + "name": "tests/sanity/extra", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/extra/no-unwanted-files.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a3d3b17f699b042958c7cd845a9d685bc935d83062e0bcf077f2c7200e2c0bac", + "format": 1 + }, + { + "name": "tests/sanity/extra/no-unwanted-files.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1468e7b22ba353d18fcf2f5b18607873f792de629f887798f081eb6e2cd54fc", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.9.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4fa0f70f77a42e1969eead40104af12e8a476fc8d68eb99f9437a2db17a3b01a", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.10.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "49102a67465c1a0afcbbee7add94310d2a1f673eb6e588d37ccf598c92159724", + "format": 1 + }, + { + "name": "tests/integration", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ext", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ext/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6109411f740064ab34f8658eedb93865183bdc5b561ac184e3d6ea7c8af4ab6c", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d95163444521b730b60aa1b1113c51a350afb9ba594cc6a9e319a8d298606f2a", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ext/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69406a13751afa1a236adfa8a4f6d029018ee42986ab25676885bb54a0d5adc5", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ext/tasks/postgresql_ext_session_role.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c598bdac114d3caf6d677960202338b5d28fd7b4597d346d72fc543c8e125fbc", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ext/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ext/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "62bf42b117b74dcc39491f0501575a090971dd200281a3d7b1323601f8429365", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ext/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d1d066196bc922c2a80e67260f93a69bd805233b4f4b60baed742ef0e68e8e2", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ext/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ext/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0825fa293c525220ac6e3f52f1fbd7464a1ea91281eda9fb3dc3d614ba77f815", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_db", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_db/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_db/tasks/state_dump_restore.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "977d4f865cde7124f68c3c97f31776d15d86897a843c7fe9e815134e2852babb", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "061558b412eb0734d680cc4498c45dacb89222065c292631fe4f9351d9f74eca", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_db/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61aeb2ebcd4439d4c8f2f150ffabf461df17e0da5760297e07790b59f6911506", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ae81d2773a21629eda0387590cfc79f41514fe513f8f16c80019a66f72da26c", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_db/tasks/postgresql_db_session_role.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "41224534c3fee82c517472b1ff3af82528eb5f67122e2a4de23a1e2a1ec2d73a", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_db/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_db/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_db/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3dbb7181be6f88eb1887cc28be153f197c76ccdb0512af52f311816658b1e9e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_db/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_db/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "11d903913550d93aaffcda9458d70349ce6e703cf071e922c8124dc24b7f9cdb", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_replication", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_replication/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a70a2f2e5beaa52cefcc93461ca067ef2b665b859a468cbe74d27464253bc6e", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_replication/templates/master_postgresql.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f4fedb50a382f0153f37bb273808a4df282bffd4b35410dac27a4b1a6c961624", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b3e39d80a8a59947b5fba38e8db942a1933ffefcef368cd13a5594fc2f65668", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_replication/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_replication/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d8abd7fe65aa2234a28d02124d7b660f17796e64fe4e6833b2031487c9e46a7", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e6986d91d5fb93bae8843f7df3d4b39db79181fa17c0e38118e8052835dc2c2", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_replication/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_replication/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a04df3a152ce62fb42770e18089f0ed3802d618ae9cad8b8b57274e8875b4cd", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_replication/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_replication/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3285130635bbc661c3a3e195cce90ca914de220a86e94b6b423dde6a9ae909f3", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_slot", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_slot/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_slot/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5419b276f7c47acaf995267ce3acbb56a0922a4f715db0df27cf0e535cd0220d", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_slot/tasks/postgresql_slot_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b8d53742b5e50fc42d5ae1b7de9d5090168ac4ecc5c6ce06b142a3cc6938eb5", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_slot/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_slot/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_slot/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_table", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_table/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_table/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "472e36bde52e14d2f401739cdbcaf09c945bafaec718ff016890616b6bc3c118", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_table/tasks/postgresql_table_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d4c3198701c97bd1ce2daad5592e08b15ad9f0324f9c856ce16433c73b0e0406", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_table/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_table/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_table/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "01f5b747e4b37d8d14ee65bf72bc036da73766464ae02af174f5b6d41394aa87", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_schema", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_schema/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_schema/tasks/postgresql_schema_session_role.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da5a970704edf3a78de91acb2549a101cfadda8793c90c65723938c18036e3cb", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4e71d17cbb99fbec0b80c86ec63447f1ffafe17baf914a44ab751b673145626e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_schema/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "01c1f7f0d634cabfe23bdbaac0559d84c25265bafeebf3ffba6175a6e90f837a", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_schema/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_schema/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_schema/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_schema/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_schema/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "db462dadd7106222bf4e2f1beab06e522172bd5ba2502cb702aaead6a06ac716", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_publication", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_publication/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_publication/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "663e2ade4e3c5f2b5e2c75da96b8f596196a21fe8bb4ce5fcecb63e2f664d0b7", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "30586aed9fc7d67db41330928b859804e787699cd953223ef14c245b170d1a07", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_publication/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_publication/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_publication/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d1d066196bc922c2a80e67260f93a69bd805233b4f4b60baed742ef0e68e8e2", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_pkg_mgr", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_pkg_mgr/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_pkg_mgr/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a3ac16c80747874defcb3b8bcd0cf796fb847b440ee5ae240d7b5b71cf32c0e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_idx", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_idx/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_idx/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be09f7acd3c03ff517ade5601bc6a55aafe3c0d8e60fc37133f1e24d69935f8c", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_idx/tasks/postgresql_idx_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5310371ca64df3006a6a5d2193d495509cdf281282c8c17d238185659b5f2f5e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_idx/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_idx/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_idx/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a16cb164d32705033b9e7a7c4e9b8050de79c561deddbcc8603e8d0d59cb563e", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/files/dummy.control", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8000b3819e84f8f1af575e137e4f478bc16cef5b0b11867f4d348840ea34bff", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/files/pg_hba.conf", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a1d8fd0e6645d939cf0fc5a67738039e036f06c540efeb8a18bf9fed779ddb40", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4e7839720cd873fbfbe855a61c55f4d69bf5154c420a5a776daccba0db0326e", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be49da51a69d0f8af9ad8bfd120189b95aa9feb2ea00be9e2f6e06af3a5c754b", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a64e1cd8f108335b01429db1e69cea7470b3399c08f186092377e89c88c55ac2", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/tasks/ssl.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1e33b09073258235aaf62001cdbe34257c2a25412856d1fb2dbefac18fa4c17f", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3aed68dc0d315a161453b95ef5d5fc2e386fe3569386bc1620128bd59e955afb", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/default-py3.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bd40f6ab072c5b9d5c83d83595fc6a6489dfc8ddeb4c470b01d8b6b3d539b361", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cec5e777df87e1ef7dfac426286cc5a26d3ed9bc8d7e4e3a8c307f6d670b5edd", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83efd43f61060d2f160e5a2fa8fcd185029672112068fc103f0e35ab384bb8b2", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c03e2e8a0629d06cb1bd91a6e0dc3b72a3079d5c045d07f09684d63e2f1759e9", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d202e54ca2923b961c8d293fcb733c3a7acfa5aceeb0a524f230314e760c62b", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8b67d7d2dc8c0caa1b89de5338dfabcc75e6480ecc6cd92bc26da43affd9568", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40a6304f5e4cf6e2baaa10718ae657c1ca67bb1cf127bd971b2a438d6c64f215", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44b26cda219358db0bdac8d4df06f14be1405c0ec75c9cd066b79a84fd97990e", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/default.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64f6e20d24c3def6bae446791f056131c8d272d6dda5f354ae63bfdc415e0819", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3dff2ca237b634d4731dad7c812af330acd802a9aafa126c1ce623d80a2330b4", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "510078001c894619b1437c45e647391781d9fb1a17bcff5cb26d7939a4970a16", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92b075e3829177e0a0163e38e7a65f108084d520ac1d4f55031c4b574654a7af", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d202e54ca2923b961c8d293fcb733c3a7acfa5aceeb0a524f230314e760c62b", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be5506a46fa7e9d06827fb16fcbcb51093d07c4e50e476b928bd285e4d3d6d60", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "641d72a4f0cd5bb640de8ed043befc0cadcf9c70cc399f0a1485483e32c35fe7", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_postgresql_db/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0349988df512a65575f9594c62d6d8a0aa0cea38ef60f75236e6c9c1bb075d58", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_query", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_query/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_query/files/test0.sql", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c3c59720ac1314a814f51dced181f9d624d11b0087f095cf75dc97a90b6cd777", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_query/files/test1.sql", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8de29e382b1af32c9352400d107e16d232405cc50c126ae7f99a5a0879f34320", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_query/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_query/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a77f5f53953beb0037561ef70130651715fe677f0428a3f56e9facc00dcc5eb5", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a38fe33791abba7d5ed391032d579ecf3e29cb722802ab9134869dd5d2968cb", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_query/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_query/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_query/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "41aee9caccefdd5eec4b8fd1eeaea84532cd4402095e8742d0e866cc8139b5b4", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_info/tasks/setup_publication.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c1b2d7e52bc4193e32bc18eada60703e9baab6fed72b72d46ec059d671a1471", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c57a82aac7bbdc95103399dd7cdf22c8f4ddd32c6757f31a7bba7bc12ee2dcc", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ab541e45bbbeb211496e76434bd09715e9a541449c267f449d4625b044465286", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fcd58a1a5188a145948f0d2cd67f7d46900d949fcb7f3b28e7731b0a062df66e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "18ab0e6a6ced70d5cfc47e95b4d965681f21e946f0b624a53d591461f6068117", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_membership", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_membership/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_membership/tasks/postgresql_membership_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3bf999eaca031d48e1f02c951c913b339c20992f5fed64196b8c95f7f3d8f221", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_membership/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d5aca162654e8110b7883ff6ce7517273d7b49688a05d8331b0d39f5321b4d8a", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_membership/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_membership/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_membership/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_membership/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_membership/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b96d9358354b72ca27b4079b748fc140086aa57b553209aa83887e15ee219420", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_lang", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_lang/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "725d64739e31b82e7102e348baffdd50693713723c434fffc17c5d4a9d174868", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_lang/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b75ee9a6bf02c5ff2f9e629a35e588e5297d1bca6463f5fc69a06aa27735d96f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e19f7046dc136bb74a25b4fc5bd45ae7c7dd48be648dd28fd12ffccd54ae1e83", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_lang/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_lang/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_lang/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_lang/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_lang/vars/default.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_lang/vars/CentOS-8.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a5b45ee4b79b491c7c057d1c4c940df1ef7fa8e7fa6e1d006cbb1f839eeca40d", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_lang/vars/CentOS-7.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2395b707eb60229acffb97605104820a7f00a23d83d63d90e353929e97fb4e9", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_set", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_set/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_set/tasks/options_coverage.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c21ee7d0e841618f1dac234c54c2a8902fde8608bbc80cde8394f8f6a6bef31", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_set/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "955f64aa9a8675f80d702ab39436a2b236d1adb7b9c89a4dfe313fe1e0c51594", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_set/tasks/postgresql_set_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a04bb3707fc2f553413dd3946312dd913da22d66108bee535168c8dc0df9d658", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_set/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_set/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_set/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user_obj_stat_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user_obj_stat_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user_obj_stat_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72441f6b0cc63e71b547b18314bc8b45bb8a792ba466d75c8a4b2c63c7bdecc2", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb9e09507ad5d6513450009d5745cf6af17ebae39a58b53bad34607387170752", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user_obj_stat_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user_obj_stat_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user_obj_stat_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fcd58a1a5188a145948f0d2cd67f7d46900d949fcb7f3b28e7731b0a062df66e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user_obj_stat_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user_obj_stat_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bd425d7bbca57edbcc6883e032b1d00ecdc9b2cc1d75b0be4fda3e488fed7053", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_copy", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_copy/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_copy/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb8afe7f5d60c2d26691849facab97a0cabdf2113dff7963c9dcf8ea399d0354", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_copy/tasks/postgresql_copy_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9208884beaa8b6ff04285e748e50ef0402f1af9f9875929dd2eacea9c389112e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_copy/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_copy/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_copy/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_tablespace", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_tablespace/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_tablespace/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "188470559381756e939df9d964d444ff47e7df613c77b6cc2653627bab65df69", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c42d4a477852bb0307062e6aa65d150e9fd5e84c643e74c458b576b2b1b72844", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_tablespace/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_tablespace/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_tablespace/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_tablespace/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_tablespace/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "589d9dd6e690018d918be7f002c310d40aa4632f100d32008dbd7db685d47ecb", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_privs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_privs/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_privs/tasks/postgresql_privs_session_role.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec0bfa34cdff051289a8824b5efaed38d2a3e5e1c3810a2f3f155b6f72f3a551", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_privs/tasks/pg_authid_not_readable.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b9c4a77bc5ca43db99fdce7e5ae3edf5cb72fdce4f328e11d3cef0399be907f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3581e99cafd5936362840e483d966ac78f69c4969dbd55609d18e114574fd4d0", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_privs/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "52d9ea758f3abea032d42028fe55c7373ac250cc5285a23e22f49229a14831f9", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0b3669f59f5e07f827479495a55c7cfd401b682e5067aeec46e0b9f380776ed5", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_privs/tasks/test_target_role.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b6544ff9e5356dbde038ea100d1d6e25eaa25ae588222dbb8d94e649256b013", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_privs/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_privs/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_privs/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_privs/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_privs/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68036cf7cd11887e6421c5863abd5ab5b4592fe4d70f5199dd284d82c581c798", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ping", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ping/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ping/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "115c090f1029bac19af8970417d5e50b212a935c55a2d23059175fdb308f3b92", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31a47d8be93cb3ebd67498a9016ced3d259b1d1bffcbfec8ab84bf03c677bcee", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ping/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ping/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ping/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ping/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_ping/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee4c4c179b3e7fdde23d8cf3a4855188e229f2a4ba2828bb3dd8b0a6a1365aea", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc4c15de06b2d6a7367156243b5f7a40bea5863eeb098e44cf7ddc3b304e0125", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b2d881033346cf5e84f4a84dbdcbdd6539fca4a20663af9aa958f7befe2a07a7", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user/tasks/test_password.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c49f0cdaf8a111ae6d575cffaf696a863eb1288bc7df778d48871da06a4bba19", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user/tasks/postgresql_user_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f4342b6e307d91be7a98212fe29adbaa34d6ac12905a1e1f4347eea6c74173d8", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ab6c81a10d4fad3dc540e804f657478b4f6b8b940549742bf944216ed51bd07", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_user/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b14eed97d15235e2056971743de23709e18c00b91c358fe1e79002c98ece9d60", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_sequence", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_sequence/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_sequence/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7e2b8a8ad0b65cf738ea7fdcb6b18f528a6bcfffa1981f91838fc284ac4ddb6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_sequence/tasks/postgresql_sequence_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0763f1ec93dac58e3dbe26963e7ff9f4df97e5692302d31e19ee7edffb58384e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_sequence/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_sequence/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_sequence/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_sequence/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_sequence/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d437a0ee3b271c8934e68d14684931a56a398d633c6b549e74ee3a9f17f9bc46", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_pg_hba", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_pg_hba/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_pg_hba/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447bebf5eaa66b04d77ddc3640990e7816dbaf207422402037a5869a4694399c", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bd5c23f758ddbf09bb7959fb3aefcf2fae8bfb1e3fdf3ee21b74a96776fa8608", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_pg_hba/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_pg_hba/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_pg_hba/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_pg_hba/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_pg_hba/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92cf983b9149e1ac59ed2b39a814b5c1c9204f949884922cbd265ce0eeec4747", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_subscription", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_subscription/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3870dfe0e07b4439e25117ae0b424887347c1cd677f608522d6d7a99384642a", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_subscription/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eca673fddaf151a108873b2567fc9e40fb19ec24f6559355a602e983fdcf5495", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_subscription/tasks/setup_publication.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b08433d5f3e4e564e74dcf0a3fe4221379ab317705845907d68d5203ab1704c2", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_subscription/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_subscription/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ab541e45bbbeb211496e76434bd09715e9a541449c267f449d4625b044465286", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_subscription/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fcd58a1a5188a145948f0d2cd67f7d46900d949fcb7f3b28e7731b0a062df66e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_subscription/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_subscription/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a1c340db087979bebe0424643ee450456e3306e9bbc5e82d42cc34857023e03e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_owner", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_owner/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a46fea48ad48b4722d9a103098fdb57ba23dab047a74d6db00a70b53141847e3", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_owner/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be8c3305d4986b588578932b0b29cf10d6c85909adf2a31afeaa8fa4c9d9d946", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_owner/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_owner/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_owner/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f", + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_owner/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/postgresql_owner/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c1aa84798ed23a23135fbf53a881b42e2c1edc79f8edc7509829125fefa1a05", + "format": 1 + }, + { + "name": "tests/utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/utils/shippable", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/utils/shippable/freebsd.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92", + "format": 1 + }, + { + "name": "tests/utils/shippable/linux.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "07aa5e07a0b732a671bf9fdadfe073dd310b81857b897328ce2fa829e2c76315", + "format": 1 + }, + { + "name": "tests/utils/shippable/rhel.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92", + "format": 1 + }, + { + "name": "tests/utils/shippable/timing.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f3f3cc03a997cdba719b0542fe668fc612451841cbe840ab36865f30aa54a1bd", + "format": 1 + }, + { + "name": "tests/utils/shippable/check_matrix.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "163dc2e4b0fb28faa6a03c02c7a5b2e470ca156e119943bf1d8bbf5efff02c18", + "format": 1 + }, + { + "name": "tests/utils/shippable/shippable.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4bfb96822521d9f13120593d56ec9c21c49ff5cb13d3a435b1fe65eae28c31db", + "format": 1 + }, + { + "name": "tests/utils/shippable/aix.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92", + "format": 1 + }, + { + "name": "tests/utils/shippable/sanity.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a01d92ca36ea457c0e7032ece03a0b485377eef8c8598d8f7c04a185fba279ed", + "format": 1 + }, + { + "name": "tests/utils/shippable/remote.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92", + "format": 1 + }, + { + "name": "tests/utils/shippable/units.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a712977b416e5b93325b40d0bf855e4817e597076552f79777aeca8d2fa192bd", + "format": 1 + }, + { + "name": "tests/utils/shippable/timing.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ebb7d3553349747ad41d80899ed353e13cf32fcbecbb6566cf36e9d2bc33703e", + "format": 1 + }, + { + "name": "tests/utils/constraints.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5beb3383ef5546038c2c00ea8e5f438e607d91828ce2259594fd8fbaea003ec9", + "format": 1 + }, + { + "name": "tests/requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "309eab525a905a19f04aa713a60a344b64f115cbdece10d237ddd35f5fc76311", + "format": 1 + }, + { + "name": "CHANGELOG.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a7ca4fac8a50379c0bdc4ab9dd0fdf9820367fb3c137a2b66b1dc267a3c9ad98", + "format": 1 + }, + { + "name": "changelogs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/config.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc4c2808161bd591e443ffd315931ec82d014ee09698caf078ca953e8689a8ce", + "format": 1 + }, + { + "name": "changelogs/fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "changelogs/fragments/.keep", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "changelogs/changelog.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c5a84cccccd12a06f481f24666f0ebd188fd5c95f69854d031c6f59ce64c32a", + "format": 1 + }, + { + "name": "shippable.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b25a0d6005dd9ca6a8b332606992ad6ce1b8570dce061bb3eafe80ec60d49e5f", + "format": 1 + }, + { + "name": "COPYING", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986", + "format": 1 + } + ], + "format": 1 +}
\ No newline at end of file diff --git a/collections-debian-merged/ansible_collections/community/postgresql/MANIFEST.json b/collections-debian-merged/ansible_collections/community/postgresql/MANIFEST.json new file mode 100644 index 00000000..1d67863a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/MANIFEST.json @@ -0,0 +1,32 @@ +{ + "collection_info": { + "namespace": "community", + "name": "postgresql", + "version": "1.1.1", + "authors": [ + "Ansible PostgreSQL community" + ], + "readme": "README.md", + "tags": [ + "database", + "postgres", + "postgresql" + ], + "description": null, + "license": [], + "license_file": "COPYING", + "dependencies": {}, + "repository": "https://github.com/ansible-collections/community.postgresql", + "documentation": "https://docs.ansible.com/ansible/latest/collections/community/postgresql", + "homepage": "https://github.com/ansible-collections/community.postgresql", + "issues": "https://github.com/ansible-collections/community.postgresql/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc617e18d5a269c5179a150958e223af4d512f398e6d1af793140c2c7f3a9f05", + "format": 1 + }, + "format": 1 +}
\ No newline at end of file diff --git a/collections-debian-merged/ansible_collections/community/postgresql/README.md b/collections-debian-merged/ansible_collections/community/postgresql/README.md new file mode 100644 index 00000000..fe54424d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/README.md @@ -0,0 +1,105 @@ +# PostgreSQL collection for Ansible +[![Build Status]( +https://dev.azure.com/ansible/community.postgres/_apis/build/status/CI?branchName=main)](https://dev.azure.com/ansible/community.postgres/_build?definitionId=28) +[![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.postgresql)](https://codecov.io/gh/ansible-collections/community.postgresql) + +## External requirements + +The PostgreSQL modules rely on the [Psycopg2](https://www.psycopg.org/docs/) PostgreSQL database adapter. + +## Tested with Ansible + +- 2.9 +- 2.10 +- devel + +## Included content + +- **Info modules**: + - [postgresql_info](https://docs.ansible.com/ansible/latest/modules/postgresql_info_module.html) + - [postgresql_ping](https://docs.ansible.com/ansible/latest/modules/postgresql_ping_module.html) + - [postgresql_user_obj_stat_info](https://docs.ansible.com/ansible/latest/modules/postgresql_user_obj_stat_info_module.html) + +- **Basic modules**: + - [postgresql_db](https://docs.ansible.com/ansible/latest/modules/postgresql_db_module.html) + - [postgresql_ext](https://docs.ansible.com/ansible/latest/modules/postgresql_ext_module.html) + - [postgresql_lang](https://docs.ansible.com/ansible/latest/modules/postgresql_lang_module.html) + - [postgresql_pg_hba](https://docs.ansible.com/ansible/latest/modules/postgresql_hba_module.html) + - [postgresql_privs](https://docs.ansible.com/ansible/latest/modules/postgresql_privs_module.html) + - [postgresql_set](https://docs.ansible.com/ansible/latest/modules/postgresql_set_module.html) + - [postgresql_schema](https://docs.ansible.com/ansible/latest/modules/postgresql_schema_module.html) + - [postgresql_tablespace](https://docs.ansible.com/ansible/latest/modules/postgresql_tablespace_module.html) + - [postgresql_query](https://docs.ansible.com/ansible/latest/modules/postgresql_query_module.html) + - [postgresql_user](https://docs.ansible.com/ansible/latest/modules/postgresql_user_module.html) + +- **Other modules**: + - [postgresql_copy](https://docs.ansible.com/ansible/latest/modules/postgresql_copy_module.html) + - [postgresql_idx](https://docs.ansible.com/ansible/latest/modules/postgresql_idx_module.html) + - [postgresql_membership](https://docs.ansible.com/ansible/latest/modules/postgresql_membership_module.html) + - [postgresql_owner](https://docs.ansible.com/ansible/latest/modules/postgresql_owner_module.html) + - [postgresql_publication](https://docs.ansible.com/ansible/latest/modules/postgresql_publication_module.html) + - [postgresql_sequence](https://docs.ansible.com/ansible/latest/modules/postgresql_sequence_module.html) + - [postgresql_slot](https://docs.ansible.com/ansible/latest/modules/postgresql_slot_module.html) + - [postgresql_subscription](https://docs.ansible.com/ansible/latest/modules/postgresql_subscription_module.html) + - [postgresql_table](https://docs.ansible.com/ansible/latest/modules/postgresql_table_module.html) + +## Using this collection + +### Installing the Collection from Ansible Galaxy + +Before using the PostgreSQL collection, you need to install it with the Ansible Galaxy CLI: + +```bash +ansible-galaxy collection install community.postgresql +``` + +You can include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml`, using the format: + +```yaml +--- +collections: + - name: community.postgresql +``` + +You can also download the tarball from Ansible Galaxy and install the collection manually wherever you need. + +See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details. + +## Contributing to this collection + +<!--Describe how the community can contribute to your collection. At a minimum, include how and where users can create issues to report problems or request features for this collection. List contribution requirements, including preferred workflows and necessary testing, so you can benefit from community PRs. If you are following general Ansible contributor guidelines, you can link to - [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html). --> + +We're following the general Ansible contributor guidelines; see [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html). + +If you want to clone this repositority (or a fork of it) to improve it, you can proceed as follows: +1. Create a directory `ansible_collections/community`; +2. In there, checkout this repository (or a fork) as `postgresql`; +3. Add the directory containing `ansible_collections` to your [ANSIBLE_COLLECTIONS_PATH](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths). + +See [Ansible's dev guide](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections) for more information. + +There is also [the community pinboard](https://github.com/ansible/community/issues/435) used by [the PostgreSQL working group](https://github.com/ansible/community/wiki/PostgreSQL) for announcements and discussing general questions. + +## Release notes + +See the [changelog](https://github.com/ansible-collections/community.postgresql/blob/main/CHANGELOG.rst). + +## Roadmap + +See the [release plan](https://github.com/ansible-collections/community.postgresql/issues/13). +See blah blah + +## More information + +- [Ansible Collection overview](https://github.com/ansible-collections/overview) +- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html) +- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html) +- [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) + +## Licensing + +<!-- Include the appropriate license information here and a pointer to the full licensing details. If the collection contains modules migrated from the ansible/ansible repo, you must use the same license that existed in the ansible/ansible repo. See the GNU license example below. --> + +GNU General Public License v3.0 or later. + +See [LICENSE](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text. diff --git a/collections-debian-merged/ansible_collections/community/postgresql/changelogs/changelog.yaml b/collections-debian-merged/ansible_collections/community/postgresql/changelogs/changelog.yaml new file mode 100644 index 00000000..026b4daa --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/changelogs/changelog.yaml @@ -0,0 +1,89 @@ +ancestor: null +releases: + 0.1.0: + changes: + bugfixes: + - postgresql_ext - fix the module crashes when available ext versions cannot + be compared with current version (https://github.com/ansible-collections/community.general/issues/1095). + - postgresql_ext - fix version selection when ``version=latest`` (https://github.com/ansible-collections/community.general/pull/1078). + - postgresql_privs - fix module fails when ``type`` group and passing ``objs`` + value containing hyphens (https://github.com/ansible-collections/community.general/issues/1058). + minor_changes: + - postgresql_info - add ``in_recovery`` return value to show if a service in + recovery mode or not (https://github.com/ansible-collections/community.general/issues/1068). + - postgresql_privs - add ``procedure`` type support (https://github.com/ansible-collections/community.general/issues/1002). + - postgresql_query - add ``query_list`` and ``query_all_results`` return values + (https://github.com/ansible-collections/community.general/issues/838). + release_summary: 'The ``community.postgresql`` continues the work on the Ansible + PostgreSQL + + modules from their state in ``community.general`` 1.2.0. + + The changes listed here are thus relative to the modules ``community.general.postgresql_*``. + +' + fragments: + - 0.1.0.yml + - 1048-postgresql_privs_add_procedure_type.yml + - 1059-postgresql_privs_fix_failings_when_using_roles_with_hyphen.yml + - 1078-postgresql_ext_fix_version_selection_when_version_is_latest.yml + - 1091-postgresql_info_add_in_recovery_ret_val.yml + - 1099-postgresql_ext_fix_failing_when_version_cannot_be_compared.yml + - 886-postgresql_query_add_ret_vals.yml + release_date: '2020-10-29' + 1.0.0: + changes: + release_summary: 'This is the first proper release of the ``community.postgresql`` + collection which is needed to include the collection in Ansible. + + This changelog does not contain any changes because there are no changes made + since release 0.1.0. + +' + fragments: + - 1.0.0.yml + release_date: '2020-11-17' + 1.1.0: + changes: + bugfixes: + - postgresql_info - fix crash caused by wrong PgSQL version parsing (https://github.com/ansible-collections/community.postgresql/issues/40). + - postgresql_ping - fix crash caused by wrong PgSQL version parsing (https://github.com/ansible-collections/community.postgresql/issues/40). + - postgresql_set - return a message instead of traceback when a passed parameter + has not been found (https://github.com/ansible-collections/community.postgresql/issues/41). + minor_changes: + - postgresql_query - add ``as_single_query`` option to execute a script content + as a single query to avoid semicolon related errors (https://github.com/ansible-collections/community.postgresql/pull/37). + release_summary: 'This is the minor release of the ``community.postgresql`` + collection. + + This changelog contains all changes to the modules in this collection that + + have been added after the release of ``community.postgresql`` 1.0.0.' + fragments: + - 1.1.0.yml + - 37-postgresql_query_add_single_query_opt.yml + - 42-postgresql_set_add_message_when_parameter_not_found.yml + - 43-modules_fix_version_parsing.yml + release_date: '2021-01-18' + 1.1.1: + changes: + bugfixes: + - postgresql_query - add a warning to set ``as_single_query`` option explicitly + (https://github.com/ansible-collections/community.postgresql/pull/54). + - postgresql_query - fix datetime.timedelta type handling (https://github.com/ansible-collections/community.postgresql/issues/47). + - postgresql_query - fix decimal handling (https://github.com/ansible-collections/community.postgresql/issues/45). + - postgresql_set - fails in check_mode on non-numeric values containing `B` + (https://github.com/ansible-collections/community.postgresql/issues/48). + release_summary: 'This is the patch release of the ``community.postgresql`` + collection. + + This changelog contains all changes to the modules in this collection that + + have been added after the release of ``community.postgresql`` 1.1.0.' + fragments: + - 1.1.1.yml + - 46-postgresql_query_fix_decimal_handling.yml + - 51-postgresql_query_fix_datetime_timedelta_type_handling.yml + - 52-postgresql_set_fix_b_values_handling.yml + - 54-postgresql_query_add_warning_as_single_query.yml + release_date: '2021-02-09' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/changelogs/config.yaml b/collections-debian-merged/ansible_collections/community/postgresql/changelogs/config.yaml new file mode 100644 index 00000000..70f0481a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/changelogs/config.yaml @@ -0,0 +1,29 @@ +changelog_filename_template: ../CHANGELOG.rst +changelog_filename_version_depth: 0 +changes_file: changelog.yaml +changes_format: combined +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +prelude_section_name: release_summary +prelude_section_title: Release Summary +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: Community PostgreSQL Collection +trivial_section_name: trivial diff --git a/collections-debian-merged/ansible_collections/community/postgresql/changelogs/fragments/.keep b/collections-debian-merged/ansible_collections/community/postgresql/changelogs/fragments/.keep new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/changelogs/fragments/.keep diff --git a/collections-debian-merged/ansible_collections/community/postgresql/meta/runtime.yml b/collections-debian-merged/ansible_collections/community/postgresql/meta/runtime.yml new file mode 100644 index 00000000..2ee3c9fa --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/meta/runtime.yml @@ -0,0 +1,2 @@ +--- +requires_ansible: '>=2.9.10' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/doc_fragments/postgres.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/doc_fragments/postgres.py new file mode 100644 index 00000000..a207bc35 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/doc_fragments/postgres.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + # Postgres documentation fragment + DOCUMENTATION = r''' +options: + login_user: + description: + - The username used to authenticate with. + type: str + default: postgres + login_password: + description: + - The password used to authenticate with. + type: str + login_host: + description: + - Host running the database. + type: str + login_unix_socket: + description: + - Path to a Unix domain socket for local connections. + type: str + port: + description: + - Database port to connect to. + type: int + default: 5432 + aliases: [ login_port ] + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + type: str + aliases: [ ssl_rootcert ] +notes: +- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host. +- To avoid "Peer authentication failed for user postgres" error, + use postgres user as a I(become_user). +- This module uses psycopg2, a Python PostgreSQL database adapter. You must + ensure that psycopg2 is installed on the host before using this module. +- If the remote host is the PostgreSQL server (which is the default case), then + PostgreSQL must also be installed on the remote host. +- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages + on the remote host before using this module. +- The ca_cert parameter requires at least Postgres version 8.4 and I(psycopg2) version 2.4.3. +requirements: [ psycopg2 ] +''' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/module_utils/database.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/module_utils/database.py new file mode 100644 index 00000000..67850308 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/module_utils/database.py @@ -0,0 +1,189 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com> +# +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + + +# Input patterns for is_input_dangerous function: +# +# 1. '"' in string and '--' in string or +# "'" in string and '--' in string +PATTERN_1 = re.compile(r'(\'|\").*--') + +# 2. union \ intersect \ except + select +PATTERN_2 = re.compile(r'(UNION|INTERSECT|EXCEPT).*SELECT', re.IGNORECASE) + +# 3. ';' and any KEY_WORDS +PATTERN_3 = re.compile(r';.*(SELECT|UPDATE|INSERT|DELETE|DROP|TRUNCATE|ALTER)', re.IGNORECASE) + + +class SQLParseError(Exception): + pass + + +class UnclosedQuoteError(SQLParseError): + pass + + +# maps a type of identifier to the maximum number of dot levels that are +# allowed to specify that identifier. For example, a database column can be +# specified by up to 4 levels: database.schema.table.column +_PG_IDENTIFIER_TO_DOT_LEVEL = dict( + database=1, + schema=2, + table=3, + column=4, + role=1, + tablespace=1, + sequence=3, + publication=1, +) +_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1) + + +def _find_end_quote(identifier, quote_char): + accumulate = 0 + while True: + try: + quote = identifier.index(quote_char) + except ValueError: + raise UnclosedQuoteError + accumulate = accumulate + quote + try: + next_char = identifier[quote + 1] + except IndexError: + return accumulate + if next_char == quote_char: + try: + identifier = identifier[quote + 2:] + accumulate = accumulate + 2 + except IndexError: + raise UnclosedQuoteError + else: + return accumulate + + +def _identifier_parse(identifier, quote_char): + if not identifier: + raise SQLParseError('Identifier name unspecified or unquoted trailing dot') + + already_quoted = False + if identifier.startswith(quote_char): + already_quoted = True + try: + end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1 + except UnclosedQuoteError: + already_quoted = False + else: + if end_quote < len(identifier) - 1: + if identifier[end_quote + 1] == '.': + dot = end_quote + 1 + first_identifier = identifier[:dot] + next_identifier = identifier[dot + 1:] + further_identifiers = _identifier_parse(next_identifier, quote_char) + further_identifiers.insert(0, first_identifier) + else: + raise SQLParseError('User escaped identifiers must escape extra quotes') + else: + further_identifiers = [identifier] + + if not already_quoted: + try: + dot = identifier.index('.') + except ValueError: + identifier = identifier.replace(quote_char, quote_char * 2) + identifier = ''.join((quote_char, identifier, quote_char)) + further_identifiers = [identifier] + else: + if dot == 0 or dot >= len(identifier) - 1: + identifier = identifier.replace(quote_char, quote_char * 2) + identifier = ''.join((quote_char, identifier, quote_char)) + further_identifiers = [identifier] + else: + first_identifier = identifier[:dot] + next_identifier = identifier[dot + 1:] + further_identifiers = _identifier_parse(next_identifier, quote_char) + first_identifier = first_identifier.replace(quote_char, quote_char * 2) + first_identifier = ''.join((quote_char, first_identifier, quote_char)) + further_identifiers.insert(0, first_identifier) + + return further_identifiers + + +def pg_quote_identifier(identifier, id_type): + identifier_fragments = _identifier_parse(identifier, quote_char='"') + if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]: + raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type])) + return '.'.join(identifier_fragments) + + +def mysql_quote_identifier(identifier, id_type): + identifier_fragments = _identifier_parse(identifier, quote_char='`') + if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]: + raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type])) + + special_cased_fragments = [] + for fragment in identifier_fragments: + if fragment == '`*`': + special_cased_fragments.append('*') + else: + special_cased_fragments.append(fragment) + + return '.'.join(special_cased_fragments) + + +def is_input_dangerous(string): + """Check if the passed string is potentially dangerous. + Can be used to prevent SQL injections. + + Note: use this function only when you can't use + psycopg2's cursor.execute method parametrized + (typically with DDL queries). + """ + if not string: + return False + + for pattern in (PATTERN_1, PATTERN_2, PATTERN_3): + if re.search(pattern, string): + return True + + return False + + +def check_input(module, *args): + """Wrapper for is_input_dangerous function.""" + needs_to_check = args + + dangerous_elements = [] + + for elem in needs_to_check: + if isinstance(elem, str): + if is_input_dangerous(elem): + dangerous_elements.append(elem) + + elif isinstance(elem, list): + for e in elem: + if is_input_dangerous(e): + dangerous_elements.append(e) + + elif elem is None or isinstance(elem, bool): + pass + + else: + elem = str(elem) + if is_input_dangerous(elem): + dangerous_elements.append(elem) + + if dangerous_elements: + module.fail_json(msg="Passed input '%s' is " + "potentially dangerous" % ', '.join(dangerous_elements)) diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/module_utils/postgres.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/module_utils/postgres.py new file mode 100644 index 00000000..f0d6f88e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/module_utils/postgres.py @@ -0,0 +1,314 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Ted Timmons <ted@timmons.me>, 2017. +# Most of this was originally added by other creators in the postgresql_user module. +# +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +psycopg2 = None # This line needs for unit tests +try: + import psycopg2 + HAS_PSYCOPG2 = True +except ImportError: + HAS_PSYCOPG2 = False + +from ansible.module_utils.basic import missing_required_lib +from ansible.module_utils._text import to_native +from ansible.module_utils.six import iteritems +from distutils.version import LooseVersion + + +def postgres_common_argument_spec(): + """ + Return a dictionary with connection options. + + The options are commonly used by most of PostgreSQL modules. + """ + return dict( + login_user=dict(default='postgres'), + login_password=dict(default='', no_log=True), + login_host=dict(default=''), + login_unix_socket=dict(default=''), + port=dict(type='int', default=5432, aliases=['login_port']), + ssl_mode=dict(default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']), + ca_cert=dict(aliases=['ssl_rootcert']), + ) + + +def ensure_required_libs(module): + """Check required libraries.""" + if not HAS_PSYCOPG2: + module.fail_json(msg=missing_required_lib('psycopg2')) + + if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'): + module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter') + + +def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True): + """Connect to a PostgreSQL database. + + Return psycopg2 connection object. + + Args: + module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class + conn_params (dict) -- dictionary with connection parameters + + Kwargs: + autocommit (bool) -- commit automatically (default False) + fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True) + """ + ensure_required_libs(module) + + db_connection = None + try: + db_connection = psycopg2.connect(**conn_params) + if autocommit: + if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'): + db_connection.set_session(autocommit=True) + else: + db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + + # Switch role, if specified: + if module.params.get('session_role'): + cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) + + try: + cursor.execute('SET ROLE "%s"' % module.params['session_role']) + except Exception as e: + module.fail_json(msg="Could not switch role: %s" % to_native(e)) + finally: + cursor.close() + + except TypeError as e: + if 'sslrootcert' in e.args[0]: + module.fail_json(msg='Postgresql server must be at least ' + 'version 8.4 to support sslrootcert') + + if fail_on_conn: + module.fail_json(msg="unable to connect to database: %s" % to_native(e)) + else: + module.warn("PostgreSQL server is unavailable: %s" % to_native(e)) + db_connection = None + + except Exception as e: + if fail_on_conn: + module.fail_json(msg="unable to connect to database: %s" % to_native(e)) + else: + module.warn("PostgreSQL server is unavailable: %s" % to_native(e)) + db_connection = None + + return db_connection + + +def exec_sql(obj, query, query_params=None, return_bool=False, add_to_executed=True, dont_exec=False): + """Execute SQL. + + Auxiliary function for PostgreSQL user classes. + + Returns a query result if possible or a boolean value. + + Args: + obj (obj) -- must be an object of a user class. + The object must have module (AnsibleModule class object) and + cursor (psycopg cursor object) attributes + query (str) -- SQL query to execute + + Kwargs: + query_params (dict or tuple) -- Query parameters to prevent SQL injections, + could be a dict or tuple + return_bool (bool) -- return True instead of rows if a query was successfully executed. + It's necessary for statements that don't return any result like DDL queries (default False). + add_to_executed (bool) -- append the query to obj.executed_queries attribute + dont_exec (bool) -- used with add_to_executed=True to generate a query, add it + to obj.executed_queries list and return True (default False) + """ + + if dont_exec: + # This is usually needed to return queries in check_mode + # without execution + query = obj.cursor.mogrify(query, query_params) + if add_to_executed: + obj.executed_queries.append(query) + + return True + + try: + if query_params is not None: + obj.cursor.execute(query, query_params) + else: + obj.cursor.execute(query) + + if add_to_executed: + if query_params is not None: + obj.executed_queries.append(obj.cursor.mogrify(query, query_params)) + else: + obj.executed_queries.append(query) + + if not return_bool: + res = obj.cursor.fetchall() + return res + return True + except Exception as e: + obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e))) + return False + + +def get_conn_params(module, params_dict, warn_db_default=True): + """Get connection parameters from the passed dictionary. + + Return a dictionary with parameters to connect to PostgreSQL server. + + Args: + module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class + params_dict (dict) -- dictionary with variables + + Kwargs: + warn_db_default (bool) -- warn that the default DB is used (default True) + """ + # To use defaults values, keyword arguments must be absent, so + # check which values are empty and don't include in the return dictionary + params_map = { + "login_host": "host", + "login_user": "user", + "login_password": "password", + "port": "port", + "ssl_mode": "sslmode", + "ca_cert": "sslrootcert" + } + + # Might be different in the modules: + if params_dict.get('db'): + params_map['db'] = 'database' + elif params_dict.get('database'): + params_map['database'] = 'database' + elif params_dict.get('login_db'): + params_map['login_db'] = 'database' + else: + if warn_db_default: + module.warn('Database name has not been passed, ' + 'used default database to connect to.') + + kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict) + if k in params_map and v != '' and v is not None) + + # If a login_unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost" + if is_localhost and params_dict["login_unix_socket"] != "": + kw["host"] = params_dict["login_unix_socket"] + + return kw + + +class PgMembership(object): + def __init__(self, module, cursor, groups, target_roles, fail_on_role=True): + self.module = module + self.cursor = cursor + self.target_roles = [r.strip() for r in target_roles] + self.groups = [r.strip() for r in groups] + self.executed_queries = [] + self.granted = {} + self.revoked = {} + self.fail_on_role = fail_on_role + self.non_existent_roles = [] + self.changed = False + self.__check_roles_exist() + + def grant(self): + for group in self.groups: + self.granted[group] = [] + + for role in self.target_roles: + # If role is in a group now, pass: + if self.__check_membership(group, role): + continue + + query = 'GRANT "%s" TO "%s"' % (group, role) + self.changed = exec_sql(self, query, return_bool=True) + + if self.changed: + self.granted[group].append(role) + + return self.changed + + def revoke(self): + for group in self.groups: + self.revoked[group] = [] + + for role in self.target_roles: + # If role is not in a group now, pass: + if not self.__check_membership(group, role): + continue + + query = 'REVOKE "%s" FROM "%s"' % (group, role) + self.changed = exec_sql(self, query, return_bool=True) + + if self.changed: + self.revoked[group].append(role) + + return self.changed + + def __check_membership(self, src_role, dst_role): + query = ("SELECT ARRAY(SELECT b.rolname FROM " + "pg_catalog.pg_auth_members m " + "JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) " + "WHERE m.member = r.oid) " + "FROM pg_catalog.pg_roles r " + "WHERE r.rolname = %(dst_role)s") + + res = exec_sql(self, query, query_params={'dst_role': dst_role}, add_to_executed=False) + membership = [] + if res: + membership = res[0][0] + + if not membership: + return False + + if src_role in membership: + return True + + return False + + def __check_roles_exist(self): + existent_groups = self.__roles_exist(self.groups) + existent_roles = self.__roles_exist(self.target_roles) + + for group in self.groups: + if group not in existent_groups: + if self.fail_on_role: + self.module.fail_json(msg="Role %s does not exist" % group) + else: + self.module.warn("Role %s does not exist, pass" % group) + self.non_existent_roles.append(group) + + for role in self.target_roles: + if role not in existent_roles: + if self.fail_on_role: + self.module.fail_json(msg="Role %s does not exist" % role) + else: + self.module.warn("Role %s does not exist, pass" % role) + + if role not in self.groups: + self.non_existent_roles.append(role) + + else: + if self.fail_on_role: + self.module.exit_json(msg="Role role '%s' is a member of role '%s'" % (role, role)) + else: + self.module.warn("Role role '%s' is a member of role '%s', pass" % (role, role)) + + # Update role lists, excluding non existent roles: + self.groups = [g for g in self.groups if g not in self.non_existent_roles] + + self.target_roles = [r for r in self.target_roles if r not in self.non_existent_roles] + + def __roles_exist(self, roles): + tmp = ["'" + x + "'" for x in roles] + query = "SELECT rolname FROM pg_roles WHERE rolname IN (%s)" % ','.join(tmp) + return [x[0] for x in exec_sql(self, query, add_to_executed=False)] diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/module_utils/saslprep.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/module_utils/saslprep.py new file mode 100644 index 00000000..3e16c716 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/module_utils/saslprep.py @@ -0,0 +1,178 @@ +# -*- coding: utf-8 -*- + +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. + +# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from stringprep import ( + in_table_a1, + in_table_b1, + in_table_c3, + in_table_c4, + in_table_c5, + in_table_c6, + in_table_c7, + in_table_c8, + in_table_c9, + in_table_c12, + in_table_c21_c22, + in_table_d1, + in_table_d2, +) +from unicodedata import normalize + +from ansible.module_utils.six import text_type + + +def is_unicode_str(string): + return True if isinstance(string, text_type) else False + + +def mapping_profile(string): + """RFC4013 Mapping profile implementation.""" + # Regarding RFC4013, + # This profile specifies: + # - non-ASCII space characters [StringPrep, C.1.2] that can be + # mapped to SPACE (U+0020), and + # - the "commonly mapped to nothing" characters [StringPrep, B.1] + # that can be mapped to nothing. + + tmp = [] + for c in string: + # If not the "commonly mapped to nothing" + if not in_table_b1(c): + if in_table_c12(c): + # map non-ASCII space characters + # (that can be mapped) to Unicode space + tmp.append(u' ') + else: + tmp.append(c) + + return u"".join(tmp) + + +def is_ral_string(string): + """RFC3454 Check bidirectional category of the string""" + # Regarding RFC3454, + # Table D.1 lists the characters that belong + # to Unicode bidirectional categories "R" and "AL". + # If a string contains any RandALCat character, a RandALCat + # character MUST be the first character of the string, and a + # RandALCat character MUST be the last character of the string. + if in_table_d1(string[0]): + if not in_table_d1(string[-1]): + raise ValueError('RFC3454: incorrect bidirectional RandALCat string.') + return True + return False + + +def prohibited_output_profile(string): + """RFC4013 Prohibited output profile implementation.""" + # Implements: + # RFC4013, 2.3. Prohibited Output. + # This profile specifies the following characters as prohibited input: + # - Non-ASCII space characters [StringPrep, C.1.2] + # - ASCII control characters [StringPrep, C.2.1] + # - Non-ASCII control characters [StringPrep, C.2.2] + # - Private Use characters [StringPrep, C.3] + # - Non-character code points [StringPrep, C.4] + # - Surrogate code points [StringPrep, C.5] + # - Inappropriate for plain text characters [StringPrep, C.6] + # - Inappropriate for canonical representation characters [StringPrep, C.7] + # - Change display properties or deprecated characters [StringPrep, C.8] + # - Tagging characters [StringPrep, C.9] + # RFC4013, 2.4. Bidirectional Characters. + # RFC4013, 2.5. Unassigned Code Points. + + # Determine how to handle bidirectional characters (RFC3454): + if is_ral_string(string): + # If a string contains any RandALCat characters, + # The string MUST NOT contain any LCat character: + is_prohibited_bidi_ch = in_table_d2 + bidi_table = 'D.2' + else: + # Forbid RandALCat characters in LCat string: + is_prohibited_bidi_ch = in_table_d1 + bidi_table = 'D.1' + + RFC = 'RFC4013' + for c in string: + # RFC4013 2.3. Prohibited Output: + if in_table_c12(c): + raise ValueError('%s: prohibited non-ASCII space characters ' + 'that cannot be replaced (C.1.2).' % RFC) + if in_table_c21_c22(c): + raise ValueError('%s: prohibited control characters (C.2.1).' % RFC) + if in_table_c3(c): + raise ValueError('%s: prohibited private Use characters (C.3).' % RFC) + if in_table_c4(c): + raise ValueError('%s: prohibited non-character code points (C.4).' % RFC) + if in_table_c5(c): + raise ValueError('%s: prohibited surrogate code points (C.5).' % RFC) + if in_table_c6(c): + raise ValueError('%s: prohibited inappropriate for plain text ' + 'characters (C.6).' % RFC) + if in_table_c7(c): + raise ValueError('%s: prohibited inappropriate for canonical ' + 'representation characters (C.7).' % RFC) + if in_table_c8(c): + raise ValueError('%s: prohibited change display properties / ' + 'deprecated characters (C.8).' % RFC) + if in_table_c9(c): + raise ValueError('%s: prohibited tagging characters (C.9).' % RFC) + + # RFC4013, 2.4. Bidirectional Characters: + if is_prohibited_bidi_ch(c): + raise ValueError('%s: prohibited bidi characters (%s).' % (RFC, bidi_table)) + + # RFC4013, 2.5. Unassigned Code Points: + if in_table_a1(c): + raise ValueError('%s: prohibited unassigned code points (A.1).' % RFC) + + +def saslprep(string): + """RFC4013 implementation. + Implements "SASLprep" profile (RFC4013) of the "stringprep" algorithm (RFC3454) + to prepare Unicode strings representing user names and passwords for comparison. + Regarding the RFC4013, the "SASLprep" profile is intended to be used by + Simple Authentication and Security Layer (SASL) mechanisms + (such as PLAIN, CRAM-MD5, and DIGEST-MD5), as well as other protocols + exchanging simple user names and/or passwords. + + Args: + string (unicode string): Unicode string to validate and prepare. + + Returns: + Prepared unicode string. + """ + # RFC4013: "The algorithm assumes all strings are + # comprised of characters from the Unicode [Unicode] character set." + # Validate the string is a Unicode string + # (text_type is the string type if PY3 and unicode otherwise): + if not is_unicode_str(string): + raise TypeError('input must be of type %s, not %s' % (text_type, type(string))) + + # RFC4013: 2.1. Mapping. + string = mapping_profile(string) + + # RFC4013: 2.2. Normalization. + # "This profile specifies using Unicode normalization form KC." + string = normalize('NFKC', string) + if not string: + return u'' + + # RFC4013: 2.3. Prohibited Output. + # RFC4013: 2.4. Bidirectional Characters. + # RFC4013: 2.5. Unassigned Code Points. + prohibited_output_profile(string) + + return string diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py new file mode 100644 index 00000000..6f083f4a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py @@ -0,0 +1,419 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_copy +short_description: Copy data between a file/program and a PostgreSQL table +description: +- Copy data between a file/program and a PostgreSQL table. + +options: + copy_to: + description: + - Copy the contents of a table to a file. + - Can also copy the results of a SELECT query. + - Mutually exclusive with I(copy_from) and I(dst). + type: path + aliases: [ to ] + copy_from: + description: + - Copy data from a file to a table (appending the data to whatever is in the table already). + - Mutually exclusive with I(copy_to) and I(src). + type: path + aliases: [ from ] + src: + description: + - Copy data from I(copy_from) to I(src=tablename). + - Used with I(copy_to) only. + type: str + aliases: [ source ] + dst: + description: + - Copy data to I(dst=tablename) from I(copy_from=/path/to/data.file). + - Used with I(copy_from) only. + type: str + aliases: [ destination ] + columns: + description: + - List of column names for the src/dst table to COPY FROM/TO. + type: list + elements: str + aliases: [ column ] + program: + description: + - Mark I(src)/I(dst) as a program. Data will be copied to/from a program. + - See block Examples and PROGRAM arg description U(https://www.postgresql.org/docs/current/sql-copy.html). + type: bool + default: no + options: + description: + - Options of COPY command. + - See the full list of available options U(https://www.postgresql.org/docs/current/sql-copy.html). + type: dict + db: + description: + - Name of database to connect to. + type: str + aliases: [ login_db ] + session_role: + description: + - Switch to session_role after connecting. + The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + trust_input: + description: + - If C(no), check whether values of parameters are potentially dangerous. + - It makes sense to use C(no) only when SQL injections are possible. + type: bool + default: yes + version_added: '0.2.0' +notes: +- Supports PostgreSQL version 9.4+. +- COPY command is only allowed to database superusers. +- If I(check_mode=yes), we just check the src/dst table availability + and return the COPY query that actually has not been executed. +- If i(check_mode=yes) and the source has been passed as SQL, the module + will execute it and rolled the transaction back but pay attention + it can affect database performance (e.g., if SQL collects a lot of data). + +seealso: +- name: COPY command reference + description: Complete reference of the COPY command documentation. + link: https://www.postgresql.org/docs/current/sql-copy.html + +author: +- Andrew Klychkov (@Andersson007) + +extends_documentation_fragment: +- community.postgresql.postgres +''' + +EXAMPLES = r''' +- name: Copy text TAB-separated data from file /tmp/data.txt to acme table + community.postgresql.postgresql_copy: + copy_from: /tmp/data.txt + dst: acme + +- name: Copy CSV (comma-separated) data from file /tmp/data.csv to columns id, name of table acme + community.postgresql.postgresql_copy: + copy_from: /tmp/data.csv + dst: acme + columns: id,name + options: + format: csv + +- name: > + Copy text vertical-bar-separated data from file /tmp/data.txt to bar table. + The NULL values are specified as N + community.postgresql.postgresql_copy: + copy_from: /tmp/data.csv + dst: bar + options: + delimiter: '|' + null: 'N' + +- name: Copy data from acme table to file /tmp/data.txt in text format, TAB-separated + community.postgresql.postgresql_copy: + src: acme + copy_to: /tmp/data.txt + +- name: Copy data from SELECT query to/tmp/data.csv in CSV format + community.postgresql.postgresql_copy: + src: 'SELECT * FROM acme' + copy_to: /tmp/data.csv + options: + format: csv + +- name: Copy CSV data from my_table to gzip + community.postgresql.postgresql_copy: + src: my_table + copy_to: 'gzip > /tmp/data.csv.gz' + program: yes + options: + format: csv + +- name: > + Copy data from columns id, name of table bar to /tmp/data.txt. + Output format is text, vertical-bar-separated, NULL as N + community.postgresql.postgresql_copy: + src: bar + columns: + - id + - name + copy_to: /tmp/data.csv + options: + delimiter: '|' + null: 'N' +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: str + sample: [ "COPY test_table FROM '/tmp/data_file.txt' (FORMAT csv, DELIMITER ',', NULL 'NULL')" ] +src: + description: Data source. + returned: always + type: str + sample: "mytable" +dst: + description: Data destination. + returned: always + type: str + sample: "/tmp/data.csv" +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, + pg_quote_identifier, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils.six import iteritems + + +class PgCopyData(object): + + """Implements behavior of COPY FROM, COPY TO PostgreSQL command. + + Arguments: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + + Attributes: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + changed (bool) -- something was changed after execution or not + executed_queries (list) -- executed queries + dst (str) -- data destination table (when copy_from) + src (str) -- data source table (when copy_to) + opt_need_quotes (tuple) -- values of these options must be passed + to SQL in quotes + """ + + def __init__(self, module, cursor): + self.module = module + self.cursor = cursor + self.executed_queries = [] + self.changed = False + self.dst = '' + self.src = '' + self.opt_need_quotes = ( + 'DELIMITER', + 'NULL', + 'QUOTE', + 'ESCAPE', + 'ENCODING', + ) + + def copy_from(self): + """Implements COPY FROM command behavior.""" + self.src = self.module.params['copy_from'] + self.dst = self.module.params['dst'] + + query_fragments = ['COPY %s' % pg_quote_identifier(self.dst, 'table')] + + if self.module.params.get('columns'): + query_fragments.append('(%s)' % ','.join(self.module.params['columns'])) + + query_fragments.append('FROM') + + if self.module.params.get('program'): + query_fragments.append('PROGRAM') + + query_fragments.append("'%s'" % self.src) + + if self.module.params.get('options'): + query_fragments.append(self.__transform_options()) + + # Note: check mode is implemented here: + if self.module.check_mode: + self.changed = self.__check_table(self.dst) + + if self.changed: + self.executed_queries.append(' '.join(query_fragments)) + else: + if exec_sql(self, ' '.join(query_fragments), return_bool=True): + self.changed = True + + def copy_to(self): + """Implements COPY TO command behavior.""" + self.src = self.module.params['src'] + self.dst = self.module.params['copy_to'] + + if 'SELECT ' in self.src.upper(): + # If src is SQL SELECT statement: + query_fragments = ['COPY (%s)' % self.src] + else: + # If src is a table: + query_fragments = ['COPY %s' % pg_quote_identifier(self.src, 'table')] + + if self.module.params.get('columns'): + query_fragments.append('(%s)' % ','.join(self.module.params['columns'])) + + query_fragments.append('TO') + + if self.module.params.get('program'): + query_fragments.append('PROGRAM') + + query_fragments.append("'%s'" % self.dst) + + if self.module.params.get('options'): + query_fragments.append(self.__transform_options()) + + # Note: check mode is implemented here: + if self.module.check_mode: + self.changed = self.__check_table(self.src) + + if self.changed: + self.executed_queries.append(' '.join(query_fragments)) + else: + if exec_sql(self, ' '.join(query_fragments), return_bool=True): + self.changed = True + + def __transform_options(self): + """Transform options dict into a suitable string.""" + for (key, val) in iteritems(self.module.params['options']): + if key.upper() in self.opt_need_quotes: + self.module.params['options'][key] = "'%s'" % val + + opt = ['%s %s' % (key, val) for (key, val) in iteritems(self.module.params['options'])] + return '(%s)' % ', '.join(opt) + + def __check_table(self, table): + """Check table or SQL in transaction mode for check_mode. + + Return True if it is OK. + + Arguments: + table (str) - Table name that needs to be checked. + It can be SQL SELECT statement that was passed + instead of the table name. + """ + if 'SELECT ' in table.upper(): + # In this case table is actually SQL SELECT statement. + # If SQL fails, it's handled by exec_sql(): + exec_sql(self, table, add_to_executed=False) + # If exec_sql was passed, it means all is OK: + return True + + exec_sql(self, 'SELECT 1 FROM %s' % pg_quote_identifier(table, 'table'), + add_to_executed=False) + # If SQL was executed successfully: + return True + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + copy_to=dict(type='path', aliases=['to']), + copy_from=dict(type='path', aliases=['from']), + src=dict(type='str', aliases=['source']), + dst=dict(type='str', aliases=['destination']), + columns=dict(type='list', elements='str', aliases=['column']), + options=dict(type='dict'), + program=dict(type='bool', default=False), + db=dict(type='str', aliases=['login_db']), + session_role=dict(type='str'), + trust_input=dict(type='bool', default=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['copy_from', 'copy_to'], + ['copy_from', 'src'], + ['copy_to', 'dst'], + ] + ) + + if not module.params['trust_input']: + # Check input for potentially dangerous elements: + opt_list = None + if module.params['options']: + opt_list = ['%s %s' % (key, val) for (key, val) in iteritems(module.params['options'])] + + check_input(module, + module.params['copy_to'], + module.params['copy_from'], + module.params['src'], + module.params['dst'], + opt_list, + module.params['columns'], + module.params['session_role']) + + # Note: we don't need to check mutually exclusive params here, because they are + # checked automatically by AnsibleModule (mutually_exclusive=[] list above). + if module.params.get('copy_from') and not module.params.get('dst'): + module.fail_json(msg='dst param is necessary with copy_from') + + elif module.params.get('copy_to') and not module.params.get('src'): + module.fail_json(msg='src param is necessary with copy_to') + + # Connect to DB and make cursor object: + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=False) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + ############## + # Create the object and do main job: + data = PgCopyData(module, cursor) + + # Note: parameters like dst, src, etc. are got + # from module object into data object of PgCopyData class. + # Therefore not need to pass args to the methods below. + # Note: check mode is implemented inside the methods below + # by checking passed module.check_mode arg. + if module.params.get('copy_to'): + data.copy_to() + + elif module.params.get('copy_from'): + data.copy_from() + + # Finish: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + cursor.close() + db_connection.close() + + # Return some values: + module.exit_json( + changed=data.changed, + queries=data.executed_queries, + src=data.src, + dst=data.dst, + ) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_db.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_db.py new file mode 100644 index 00000000..4a50176a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_db.py @@ -0,0 +1,673 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_db +short_description: Add or remove PostgreSQL databases from a remote host +description: + - Add or remove PostgreSQL databases from a remote host. +options: + name: + description: + - Name of the database to add or remove. + type: str + required: true + aliases: [ db ] + port: + description: + - Database port to connect (if needed). + type: int + default: 5432 + aliases: + - login_port + owner: + description: + - Name of the role to set as owner of the database. + type: str + template: + description: + - Template used to create the database. + type: str + encoding: + description: + - Encoding of the database. + type: str + lc_collate: + description: + - Collation order (LC_COLLATE) to use in the database + must match collation order of template database unless C(template0) is used as template. + type: str + lc_ctype: + description: + - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...). + - Must match LC_CTYPE of template database unless C(template0) is used as template. + type: str + session_role: + description: + - Switch to session_role after connecting. + - The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role + were the one that had logged in originally. + type: str + state: + description: + - The database state. + - C(present) implies that the database should be created if necessary. + - C(absent) implies that the database should be removed if present. + - C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4) + Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module, + returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.), + so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of + pg_dump returns rc 1 in this case. + - C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4). + - The format of the backup will be detected based on the target name. + - Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz). + - Supported formats for dump and restore include C(.sql) and C(.tar). + - "Restore program is selected by target file format: C(.tar) and C(.pgc) are handled by pg_restore, other with pgsql." + type: str + choices: [ absent, dump, present, restore ] + default: present + target: + description: + - File to back up or restore from. + - Used when I(state) is C(dump) or C(restore). + type: path + target_opts: + description: + - Additional arguments for pg_dump or restore program (pg_restore or psql, depending on target's format). + - Used when I(state) is C(dump) or C(restore). + type: str + maintenance_db: + description: + - The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to. + type: str + default: postgres + conn_limit: + description: + - Specifies the database connection limit. + type: str + tablespace: + description: + - The tablespace to set for the database + U(https://www.postgresql.org/docs/current/sql-alterdatabase.html). + - If you want to move the database back to the default tablespace, + explicitly set this to pg_default. + type: path + dump_extra_args: + description: + - Provides additional arguments when I(state) is C(dump). + - Cannot be used with dump-file-format-related arguments like ``--format=d``. + type: str + version_added: '0.2.0' + trust_input: + description: + - If C(no), check whether values of parameters I(owner), I(conn_limit), I(encoding), + I(db), I(template), I(tablespace), I(session_role) are potentially dangerous. + - It makes sense to use C(no) only when SQL injections via the parameters are possible. + type: bool + default: yes + version_added: '0.2.0' +seealso: +- name: CREATE DATABASE reference + description: Complete reference of the CREATE DATABASE command documentation. + link: https://www.postgresql.org/docs/current/sql-createdatabase.html +- name: DROP DATABASE reference + description: Complete reference of the DROP DATABASE command documentation. + link: https://www.postgresql.org/docs/current/sql-dropdatabase.html +- name: pg_dump reference + description: Complete reference of pg_dump documentation. + link: https://www.postgresql.org/docs/current/app-pgdump.html +- name: pg_restore reference + description: Complete reference of pg_restore documentation. + link: https://www.postgresql.org/docs/current/app-pgrestore.html +- module: community.postgresql.postgresql_tablespace +- module: community.postgresql.postgresql_info +- module: community.postgresql.postgresql_ping +notes: +- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8. +- Supports C(check_mode). +author: "Ansible Core Team" +extends_documentation_fragment: +- community.postgresql.postgres + +''' + +EXAMPLES = r''' +- name: Create a new database with name "acme" + community.postgresql.postgresql_db: + name: acme + +# Note: If a template different from "template0" is specified, +# encoding and locale settings must match those of the template. +- name: Create a new database with name "acme" and specific encoding and locale # settings + community.postgresql.postgresql_db: + name: acme + encoding: UTF-8 + lc_collate: de_DE.UTF-8 + lc_ctype: de_DE.UTF-8 + template: template0 + +# Note: Default limit for the number of concurrent connections to +# a specific database is "-1", which means "unlimited" +- name: Create a new database with name "acme" which has a limit of 100 concurrent connections + community.postgresql.postgresql_db: + name: acme + conn_limit: "100" + +- name: Dump an existing database to a file + community.postgresql.postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql + +- name: Dump an existing database to a file excluding the test table + community.postgresql.postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql + dump_extra_args: --exclude-table=test + +- name: Dump an existing database to a file (with compression) + community.postgresql.postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql.gz + +- name: Dump a single schema for an existing database + community.postgresql.postgresql_db: + name: acme + state: dump + target: /tmp/acme.sql + target_opts: "-n public" + +- name: Dump only table1 and table2 from the acme database + community.postgresql.postgresql_db: + name: acme + state: dump + target: /tmp/table1_table2.sql + target_opts: "-t table1 -t table2" + +# Note: In the example below, if database foo exists and has another tablespace +# the tablespace will be changed to foo. Access to the database will be locked +# until the copying of database files is finished. +- name: Create a new database called foo in tablespace bar + community.postgresql.postgresql_db: + name: foo + tablespace: bar +''' + +RETURN = r''' +executed_commands: + description: List of commands which tried to run. + returned: always + type: list + sample: ["CREATE DATABASE acme"] + version_added: '0.2.0' +''' + + +import os +import subprocess +import traceback + +try: + import psycopg2 + import psycopg2.extras +except ImportError: + HAS_PSYCOPG2 = False +else: + HAS_PSYCOPG2 = True + +import ansible_collections.community.postgresql.plugins.module_utils.postgres as pgutils +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, + SQLParseError, +) +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import shlex_quote +from ansible.module_utils._text import to_native + +executed_commands = [] + + +class NotSupportedError(Exception): + pass + +# =========================================== +# PostgreSQL module specific support methods. +# + + +def set_owner(cursor, db, owner): + query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (db, owner) + executed_commands.append(query) + cursor.execute(query) + return True + + +def set_conn_limit(cursor, db, conn_limit): + query = 'ALTER DATABASE "%s" CONNECTION LIMIT %s' % (db, conn_limit) + executed_commands.append(query) + cursor.execute(query) + return True + + +def get_encoding_id(cursor, encoding): + query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;" + cursor.execute(query, {'encoding': encoding}) + return cursor.fetchone()['encoding_id'] + + +def get_db_info(cursor, db): + query = """ + SELECT rolname AS owner, + pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id, + datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit, + spcname AS tablespace + FROM pg_database + JOIN pg_roles ON pg_roles.oid = pg_database.datdba + JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace + WHERE datname = %(db)s + """ + cursor.execute(query, {'db': db}) + return cursor.fetchone() + + +def db_exists(cursor, db): + query = "SELECT * FROM pg_database WHERE datname=%(db)s" + cursor.execute(query, {'db': db}) + return cursor.rowcount == 1 + + +def db_delete(cursor, db): + if db_exists(cursor, db): + query = 'DROP DATABASE "%s"' % db + executed_commands.append(query) + cursor.execute(query) + return True + else: + return False + + +def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace): + params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace) + if not db_exists(cursor, db): + query_fragments = ['CREATE DATABASE "%s"' % db] + if owner: + query_fragments.append('OWNER "%s"' % owner) + if template: + query_fragments.append('TEMPLATE "%s"' % template) + if encoding: + query_fragments.append('ENCODING %(enc)s') + if lc_collate: + query_fragments.append('LC_COLLATE %(collate)s') + if lc_ctype: + query_fragments.append('LC_CTYPE %(ctype)s') + if tablespace: + query_fragments.append('TABLESPACE "%s"' % tablespace) + if conn_limit: + query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit}) + query = ' '.join(query_fragments) + executed_commands.append(cursor.mogrify(query, params)) + cursor.execute(query, params) + return True + else: + db_info = get_db_info(cursor, db) + if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): + raise NotSupportedError( + 'Changing database encoding is not supported. ' + 'Current encoding: %s' % db_info['encoding'] + ) + elif lc_collate and lc_collate != db_info['lc_collate']: + raise NotSupportedError( + 'Changing LC_COLLATE is not supported. ' + 'Current LC_COLLATE: %s' % db_info['lc_collate'] + ) + elif lc_ctype and lc_ctype != db_info['lc_ctype']: + raise NotSupportedError( + 'Changing LC_CTYPE is not supported.' + 'Current LC_CTYPE: %s' % db_info['lc_ctype'] + ) + else: + changed = False + + if owner and owner != db_info['owner']: + changed = set_owner(cursor, db, owner) + + if conn_limit and conn_limit != str(db_info['conn_limit']): + changed = set_conn_limit(cursor, db, conn_limit) + + if tablespace and tablespace != db_info['tablespace']: + changed = set_tablespace(cursor, db, tablespace) + + return changed + + +def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace): + if not db_exists(cursor, db): + return False + else: + db_info = get_db_info(cursor, db) + if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): + return False + elif lc_collate and lc_collate != db_info['lc_collate']: + return False + elif lc_ctype and lc_ctype != db_info['lc_ctype']: + return False + elif owner and owner != db_info['owner']: + return False + elif conn_limit and conn_limit != str(db_info['conn_limit']): + return False + elif tablespace and tablespace != db_info['tablespace']: + return False + else: + return True + + +def db_dump(module, target, target_opts="", + db=None, + dump_extra_args=None, + user=None, + password=None, + host=None, + port=None, + **kw): + + flags = login_flags(db, host, port, user, db_prefix=False) + cmd = module.get_bin_path('pg_dump', True) + comp_prog_path = None + + if os.path.splitext(target)[-1] == '.tar': + flags.append(' --format=t') + elif os.path.splitext(target)[-1] == '.pgc': + flags.append(' --format=c') + if os.path.splitext(target)[-1] == '.gz': + if module.get_bin_path('pigz'): + comp_prog_path = module.get_bin_path('pigz', True) + else: + comp_prog_path = module.get_bin_path('gzip', True) + elif os.path.splitext(target)[-1] == '.bz2': + comp_prog_path = module.get_bin_path('bzip2', True) + elif os.path.splitext(target)[-1] == '.xz': + comp_prog_path = module.get_bin_path('xz', True) + + cmd += "".join(flags) + + if dump_extra_args: + cmd += " {0} ".format(dump_extra_args) + + if target_opts: + cmd += " {0} ".format(target_opts) + + if comp_prog_path: + # Use a fifo to be notified of an error in pg_dump + # Using shell pipe has no way to return the code of the first command + # in a portable way. + fifo = os.path.join(module.tmpdir, 'pg_fifo') + os.mkfifo(fifo) + cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo) + else: + cmd = '{0} > {1}'.format(cmd, shlex_quote(target)) + + return do_with_password(module, cmd, password) + + +def db_restore(module, target, target_opts="", + db=None, + user=None, + password=None, + host=None, + port=None, + **kw): + + flags = login_flags(db, host, port, user) + comp_prog_path = None + cmd = module.get_bin_path('psql', True) + + if os.path.splitext(target)[-1] == '.sql': + flags.append(' --file={0}'.format(target)) + + elif os.path.splitext(target)[-1] == '.tar': + flags.append(' --format=Tar') + cmd = module.get_bin_path('pg_restore', True) + + elif os.path.splitext(target)[-1] == '.pgc': + flags.append(' --format=Custom') + cmd = module.get_bin_path('pg_restore', True) + + elif os.path.splitext(target)[-1] == '.gz': + comp_prog_path = module.get_bin_path('zcat', True) + + elif os.path.splitext(target)[-1] == '.bz2': + comp_prog_path = module.get_bin_path('bzcat', True) + + elif os.path.splitext(target)[-1] == '.xz': + comp_prog_path = module.get_bin_path('xzcat', True) + + cmd += "".join(flags) + if target_opts: + cmd += " {0} ".format(target_opts) + + if comp_prog_path: + env = os.environ.copy() + if password: + env = {"PGPASSWORD": password} + p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env) + (stdout2, stderr2) = p2.communicate() + p1.stdout.close() + p1.wait() + if p1.returncode != 0: + stderr1 = p1.stderr.read() + return p1.returncode, '', stderr1, 'cmd: ****' + else: + return p2.returncode, '', stderr2, 'cmd: ****' + else: + cmd = '{0} < {1}'.format(cmd, shlex_quote(target)) + + return do_with_password(module, cmd, password) + + +def login_flags(db, host, port, user, db_prefix=True): + """ + returns a list of connection argument strings each prefixed + with a space and quoted where necessary to later be combined + in a single shell string with `"".join(rv)` + + db_prefix determines if "--dbname" is prefixed to the db argument, + since the argument was introduced in 9.3. + """ + flags = [] + if db: + if db_prefix: + flags.append(' --dbname={0}'.format(shlex_quote(db))) + else: + flags.append(' {0}'.format(shlex_quote(db))) + if host: + flags.append(' --host={0}'.format(host)) + if port: + flags.append(' --port={0}'.format(port)) + if user: + flags.append(' --username={0}'.format(user)) + return flags + + +def do_with_password(module, cmd, password): + env = {} + if password: + env = {"PGPASSWORD": password} + executed_commands.append(cmd) + rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env) + return rc, stderr, stdout, cmd + + +def set_tablespace(cursor, db, tablespace): + query = 'ALTER DATABASE "%s" SET TABLESPACE "%s"' % (db, tablespace) + executed_commands.append(query) + cursor.execute(query) + return True + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = pgutils.postgres_common_argument_spec() + argument_spec.update( + db=dict(type='str', required=True, aliases=['name']), + owner=dict(type='str', default=''), + template=dict(type='str', default=''), + encoding=dict(type='str', default=''), + lc_collate=dict(type='str', default=''), + lc_ctype=dict(type='str', default=''), + state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']), + target=dict(type='path', default=''), + target_opts=dict(type='str', default=''), + maintenance_db=dict(type='str', default="postgres"), + session_role=dict(type='str'), + conn_limit=dict(type='str', default=''), + tablespace=dict(type='path', default=''), + dump_extra_args=dict(type='str', default=None), + trust_input=dict(type='bool', default=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + db = module.params["db"] + owner = module.params["owner"] + template = module.params["template"] + encoding = module.params["encoding"] + lc_collate = module.params["lc_collate"] + lc_ctype = module.params["lc_ctype"] + target = module.params["target"] + target_opts = module.params["target_opts"] + state = module.params["state"] + changed = False + maintenance_db = module.params['maintenance_db'] + session_role = module.params["session_role"] + conn_limit = module.params['conn_limit'] + tablespace = module.params['tablespace'] + dump_extra_args = module.params['dump_extra_args'] + trust_input = module.params['trust_input'] + + # Check input + if not trust_input: + # Check input for potentially dangerous elements: + check_input(module, owner, conn_limit, encoding, db, template, tablespace, session_role) + + raw_connection = state in ("dump", "restore") + + if not raw_connection: + pgutils.ensure_required_libs(module) + + # To use defaults values, keyword arguments must be absent, so + # check which values are empty and don't include in the **kw + # dictionary + params_map = { + "login_host": "host", + "login_user": "user", + "login_password": "password", + "port": "port", + "ssl_mode": "sslmode", + "ca_cert": "sslrootcert" + } + kw = dict((params_map[k], v) for (k, v) in iteritems(module.params) + if k in params_map and v != '' and v is not None) + + # If a login_unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + + if is_localhost and module.params["login_unix_socket"] != "": + kw["host"] = module.params["login_unix_socket"] + + if target == "": + target = "{0}/{1}.sql".format(os.getcwd(), db) + target = os.path.expanduser(target) + + if not raw_connection: + try: + db_connection = psycopg2.connect(database=maintenance_db, **kw) + + # Enable autocommit so we can create databases + if psycopg2.__version__ >= '2.4.2': + db_connection.autocommit = True + else: + db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) + + except TypeError as e: + if 'sslrootcert' in e.args[0]: + module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)), + exception=traceback.format_exc()) + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + except Exception as e: + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + + if session_role: + try: + cursor.execute('SET ROLE "%s"' % session_role) + except Exception as e: + module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc()) + + try: + if module.check_mode: + if state == "absent": + changed = db_exists(cursor, db) + elif state == "present": + changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace) + module.exit_json(changed=changed, db=db, executed_commands=executed_commands) + + if state == "absent": + try: + changed = db_delete(cursor, db) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + elif state == "present": + try: + changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + elif state in ("dump", "restore"): + method = state == "dump" and db_dump or db_restore + try: + if state == 'dump': + rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw) + else: + rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw) + + if rc != 0: + module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd) + else: + module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd, + executed_commands=executed_commands) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + except NotSupportedError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except SystemExit: + # Avoid catching this on Python 2.4 + raise + except Exception as e: + module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, db=db, executed_commands=executed_commands) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py new file mode 100644 index 00000000..7514a0c0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py @@ -0,0 +1,444 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_ext +short_description: Add or remove PostgreSQL extensions from a database +description: +- Add or remove PostgreSQL extensions from a database. +options: + name: + description: + - Name of the extension to add or remove. + required: true + type: str + aliases: + - ext + db: + description: + - Name of the database to add or remove the extension to/from. + required: true + type: str + aliases: + - login_db + schema: + description: + - Name of the schema to add the extension to. + type: str + session_role: + description: + - Switch to session_role after connecting. + - The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally. + type: str + state: + description: + - The database extension state. + default: present + choices: [ absent, present ] + type: str + cascade: + description: + - Automatically install/remove any extensions that this extension depends on + that are not already installed/removed (supported since PostgreSQL 9.6). + type: bool + default: no + login_unix_socket: + description: + - Path to a Unix domain socket for local connections. + type: str + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + type: str + aliases: [ ssl_rootcert ] + version: + description: + - Extension version to add or update to. Has effect with I(state=present) only. + - If not specified, the latest extension version will be created. + - It can't downgrade an extension version. + When version downgrade is needed, remove the extension and create new one with appropriate version. + - Set I(version=latest) to update the extension to the latest available version. + type: str + trust_input: + description: + - If C(no), check whether values of parameters I(ext), I(schema), + I(version), I(session_role) are potentially dangerous. + - It makes sense to use C(no) only when SQL injections via the parameters are possible. + type: bool + default: yes + version_added: '0.2.0' +seealso: +- name: PostgreSQL extensions + description: General information about PostgreSQL extensions. + link: https://www.postgresql.org/docs/current/external-extensions.html +- name: CREATE EXTENSION reference + description: Complete reference of the CREATE EXTENSION command documentation. + link: https://www.postgresql.org/docs/current/sql-createextension.html +- name: ALTER EXTENSION reference + description: Complete reference of the ALTER EXTENSION command documentation. + link: https://www.postgresql.org/docs/current/sql-alterextension.html +- name: DROP EXTENSION reference + description: Complete reference of the DROP EXTENSION command documentation. + link: https://www.postgresql.org/docs/current/sql-droppublication.html +notes: +- Supports C(check_mode). +- The default authentication assumes that you are either logging in as + or sudo'ing to the C(postgres) account on the host. +- This module uses I(psycopg2), a Python PostgreSQL database adapter. +- You must ensure that C(psycopg2) is installed on the host before using this module. +- If the remote host is the PostgreSQL server (which is the default case), + then PostgreSQL must also be installed on the remote host. +- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), + and C(python-psycopg2) packages on the remote host before using this module. +- Incomparable versions, for example PostGIS ``unpackaged``, cannot be installed. +requirements: [ psycopg2 ] +author: +- Daniel Schep (@dschep) +- Thomas O'Donnell (@andytom) +- Sandro Santilli (@strk) +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.postgresql.postgres + +''' + +EXAMPLES = r''' +- name: Adds postgis extension to the database acme in the schema foo + community.postgresql.postgresql_ext: + name: postgis + db: acme + schema: foo + +- name: Removes postgis extension to the database acme + community.postgresql.postgresql_ext: + name: postgis + db: acme + state: absent + +- name: Adds earthdistance extension to the database template1 cascade + community.postgresql.postgresql_ext: + name: earthdistance + db: template1 + cascade: true + +# In the example below, if earthdistance extension is installed, +# it will be removed too because it depends on cube: +- name: Removes cube extension from the database acme cascade + community.postgresql.postgresql_ext: + name: cube + db: acme + cascade: yes + state: absent + +- name: Create extension foo of version 1.2 or update it if it's already created + community.postgresql.postgresql_ext: + db: acme + name: foo + version: 1.2 + +- name: Assuming extension foo is created, update it to the latest version + community.postgresql.postgresql_ext: + db: acme + name: foo + version: latest +''' + +RETURN = r''' +query: + description: List of executed queries. + returned: always + type: list + sample: ["DROP EXTENSION \"acme\""] + +''' + +import traceback + +from distutils.version import LooseVersion + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils._text import to_native + +executed_queries = [] + + +# =========================================== +# PostgreSQL module specific support methods. +# + +def ext_exists(cursor, ext): + query = "SELECT * FROM pg_extension WHERE extname=%(ext)s" + cursor.execute(query, {'ext': ext}) + return cursor.rowcount == 1 + + +def ext_delete(cursor, ext, cascade): + if ext_exists(cursor, ext): + query = "DROP EXTENSION \"%s\"" % ext + if cascade: + query += " CASCADE" + cursor.execute(query) + executed_queries.append(query) + return True + else: + return False + + +def ext_update_version(cursor, ext, version): + """Update extension version. + + Return True if success. + + Args: + cursor (cursor) -- cursor object of psycopg2 library + ext (str) -- extension name + version (str) -- extension version + """ + query = "ALTER EXTENSION \"%s\" UPDATE" % ext + params = {} + + if version != 'latest': + query += " TO %(ver)s" + params['ver'] = version + + cursor.execute(query, params) + executed_queries.append(cursor.mogrify(query, params)) + + return True + + +def ext_create(cursor, ext, schema, cascade, version): + query = "CREATE EXTENSION \"%s\"" % ext + params = {} + + if schema: + query += " WITH SCHEMA \"%s\"" % schema + if version: + query += " VERSION %(ver)s" + params['ver'] = version + if cascade: + query += " CASCADE" + + cursor.execute(query, params) + executed_queries.append(cursor.mogrify(query, params)) + return True + + +def ext_get_versions(cursor, ext): + """ + Get the current created extension version and available versions. + + Return tuple (current_version, [list of available versions]). + + Note: the list of available versions contains only versions + that higher than the current created version. + If the extension is not created, this list will contain all + available versions. + + Args: + cursor (cursor) -- cursor object of psycopg2 library + ext (str) -- extension name + """ + + # 1. Get the current extension version: + query = ("SELECT extversion FROM pg_catalog.pg_extension " + "WHERE extname = %(ext)s") + + current_version = '0' + cursor.execute(query, {'ext': ext}) + res = cursor.fetchone() + if res: + current_version = res[0] + + # 2. Get available versions: + query = ("SELECT version FROM pg_available_extension_versions " + "WHERE name = %(ext)s") + cursor.execute(query, {'ext': ext}) + res = cursor.fetchall() + + available_versions = parse_ext_versions(current_version, res) + + if current_version == '0': + current_version = False + + return (current_version, available_versions) + + +def parse_ext_versions(current_version, ext_ver_list): + """Parse ext versions. + + Args: + current_version (str) -- version to compare elements of ext_ver_list with + ext_ver_list (list) -- list containing dicts with versions + + Return a sorted list with versions that are higher than current_version. + + Note: Incomparable versions (e.g., postgis version "unpackaged") are skipped. + """ + available_versions = [] + + for line in ext_ver_list: + if line['version'] == 'unpackaged': + continue + + try: + if LooseVersion(line['version']) > LooseVersion(current_version): + available_versions.append(line['version']) + except Exception: + # When a version cannot be compared, skip it + # (there's a note in the documentation) + continue + + return sorted(available_versions, key=LooseVersion) + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + db=dict(type="str", required=True, aliases=["login_db"]), + ext=dict(type="str", required=True, aliases=["name"]), + schema=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present"]), + cascade=dict(type="bool", default=False), + session_role=dict(type="str"), + version=dict(type="str"), + trust_input=dict(type="bool", default=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + ext = module.params["ext"] + schema = module.params["schema"] + state = module.params["state"] + cascade = module.params["cascade"] + version = module.params["version"] + session_role = module.params["session_role"] + trust_input = module.params["trust_input"] + changed = False + + if not trust_input: + check_input(module, ext, schema, version, session_role) + + if version and state == 'absent': + module.warn("Parameter version is ignored when state=absent") + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + try: + # Get extension info and available versions: + curr_version, available_versions = ext_get_versions(cursor, ext) + + if state == "present": + if version == 'latest': + if available_versions: + version = available_versions[-1] + else: + version = '' + + if version: + # If the specific version is passed and it is not available for update: + if version not in available_versions: + if not curr_version: + module.fail_json(msg="Passed version '%s' is not available" % version) + + elif LooseVersion(curr_version) == LooseVersion(version): + changed = False + + else: + module.fail_json(msg="Passed version '%s' is lower than " + "the current created version '%s' or " + "the passed version is not available" % (version, curr_version)) + + # If the specific version is passed and it is higher that the current version: + if curr_version: + if LooseVersion(curr_version) < LooseVersion(version): + if module.check_mode: + changed = True + else: + changed = ext_update_version(cursor, ext, version) + + # If the specific version is passed and it is created now: + if curr_version == version: + changed = False + + # If the ext doesn't exist and installed: + elif not curr_version and available_versions: + if module.check_mode: + changed = True + else: + changed = ext_create(cursor, ext, schema, cascade, version) + + # If version is not passed: + else: + if not curr_version: + # If the ext doesn't exist and it's installed: + if available_versions: + if module.check_mode: + changed = True + else: + changed = ext_create(cursor, ext, schema, cascade, version) + + # If the ext doesn't exist and not installed: + else: + module.fail_json(msg="Extension %s is not installed" % ext) + + elif state == "absent": + if curr_version: + if module.check_mode: + changed = True + else: + changed = ext_delete(cursor, ext, cascade) + else: + changed = False + + except Exception as e: + db_connection.close() + module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc()) + + db_connection.close() + module.exit_json(changed=changed, db=module.params["db"], ext=ext, queries=executed_queries) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py new file mode 100644 index 00000000..d798b74c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py @@ -0,0 +1,589 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018-2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_idx +short_description: Create or drop indexes from a PostgreSQL database +description: +- Create or drop indexes from a PostgreSQL database. + +options: + idxname: + description: + - Name of the index to create or drop. + type: str + required: true + aliases: + - name + db: + description: + - Name of database to connect to and where the index will be created/dropped. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. + The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + schema: + description: + - Name of a database schema where the index will be created. + type: str + state: + description: + - Index state. + - C(present) implies the index will be created if it does not exist. + - C(absent) implies the index will be dropped if it exists. + type: str + default: present + choices: [ absent, present ] + table: + description: + - Table to create index on it. + - Mutually exclusive with I(state=absent). + type: str + columns: + description: + - List of index columns that need to be covered by index. + - Mutually exclusive with I(state=absent). + type: list + elements: str + aliases: + - column + cond: + description: + - Index conditions. + - Mutually exclusive with I(state=absent). + type: str + idxtype: + description: + - Index type (like btree, gist, gin, etc.). + - Mutually exclusive with I(state=absent). + type: str + aliases: + - type + concurrent: + description: + - Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY). + - Pay attention, if I(concurrent=no), the table will be locked (ACCESS EXCLUSIVE) during the building process. + For more information about the lock levels see U(https://www.postgresql.org/docs/current/explicit-locking.html). + - If the building process was interrupted for any reason when I(cuncurrent=yes), the index becomes invalid. + In this case it should be dropped and created again. + - Mutually exclusive with I(cascade=yes). + type: bool + default: yes + unique: + description: + - Enable unique index. + - Only btree currently supports unique indexes. + type: bool + default: no + version_added: '0.2.0' + tablespace: + description: + - Set a tablespace for the index. + - Mutually exclusive with I(state=absent). + type: str + storage_params: + description: + - Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc. + - Mutually exclusive with I(state=absent). + type: list + elements: str + cascade: + description: + - Automatically drop objects that depend on the index, + and in turn all objects that depend on those objects. + - It used only with I(state=absent). + - Mutually exclusive with I(concurrent=yes). + type: bool + default: no + trust_input: + description: + - If C(no), check whether values of parameters I(idxname), I(session_role), + I(schema), I(table), I(columns), I(tablespace), I(storage_params), + I(cond) are potentially dangerous. + - It makes sense to use C(no) only when SQL injections via the parameters are possible. + type: bool + default: yes + version_added: '0.2.0' + +seealso: +- module: community.postgresql.postgresql_table +- module: community.postgresql.postgresql_tablespace +- name: PostgreSQL indexes reference + description: General information about PostgreSQL indexes. + link: https://www.postgresql.org/docs/current/indexes.html +- name: CREATE INDEX reference + description: Complete reference of the CREATE INDEX command documentation. + link: https://www.postgresql.org/docs/current/sql-createindex.html +- name: ALTER INDEX reference + description: Complete reference of the ALTER INDEX command documentation. + link: https://www.postgresql.org/docs/current/sql-alterindex.html +- name: DROP INDEX reference + description: Complete reference of the DROP INDEX command documentation. + link: https://www.postgresql.org/docs/current/sql-dropindex.html + +notes: +- Supports C(check_mode). +- The index building process can affect database performance. +- To avoid table locks on production databases, use I(concurrent=yes) (default behavior). + +author: +- Andrew Klychkov (@Andersson007) +- Thomas O'Donnell (@andytom) + +extends_documentation_fragment: +- community.postgresql.postgres + +''' + +EXAMPLES = r''' +- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products + community.postgresql.postgresql_idx: + db: acme + table: products + columns: id,name + name: test_idx + +- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter + community.postgresql.postgresql_idx: + db: acme + table: products + columns: + - id + - name + idxname: test_idx + tablespace: ssd + storage_params: + - fillfactor=90 + +- name: Create gist index test_gist_idx concurrently on column geo_data of table map + community.postgresql.postgresql_idx: + db: somedb + table: map + idxtype: gist + columns: geo_data + idxname: test_gist_idx + +# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops +- name: Create gin index gin0_idx not concurrently on column comment of table test + community.postgresql.postgresql_idx: + idxname: gin0_idx + table: test + columns: comment gin_trgm_ops + concurrent: no + idxtype: gin + +- name: Drop btree test_idx concurrently + community.postgresql.postgresql_idx: + db: mydb + idxname: test_idx + state: absent + +- name: Drop test_idx cascade + community.postgresql.postgresql_idx: + db: mydb + idxname: test_idx + state: absent + cascade: yes + concurrent: no + +- name: Create btree index test_idx concurrently on columns id,comment where column id > 1 + community.postgresql.postgresql_idx: + db: mydb + table: test + columns: id,comment + idxname: test_idx + cond: id > 1 + +- name: Create unique btree index if not exists test_unique_idx on column name of table products + community.postgresql.postgresql_idx: + db: acme + table: products + columns: name + name: test_unique_idx + unique: yes + concurrent: no +''' + +RETURN = r''' +name: + description: Index name. + returned: always + type: str + sample: 'foo_idx' +state: + description: Index state. + returned: always + type: str + sample: 'present' +schema: + description: Schema where index exists. + returned: always + type: str + sample: 'public' +tablespace: + description: Tablespace where index exists. + returned: always + type: str + sample: 'ssd' +query: + description: Query that was tried to be executed. + returned: always + type: str + sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)' +storage_params: + description: Index storage parameters. + returned: always + type: list + sample: [ "fillfactor=90" ] +valid: + description: Index validity. + returned: always + type: bool + sample: true +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import check_input +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN') + + +# =========================================== +# PostgreSQL module specific support methods. +# + +class Index(object): + + """Class for working with PostgreSQL indexes. + + TODO: + 1. Add possibility to change ownership + 2. Add possibility to change tablespace + 3. Add list called executed_queries (executed_query should be left too) + 4. Use self.module instead of passing arguments to the methods whenever possible + + Args: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + schema (str) -- name of the index schema + name (str) -- name of the index + + Attrs: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + schema (str) -- name of the index schema + name (str) -- name of the index + exists (bool) -- flag the index exists in the DB or not + info (dict) -- dict that contents information about the index + executed_query (str) -- executed query + """ + + def __init__(self, module, cursor, schema, name): + self.name = name + if schema: + self.schema = schema + else: + self.schema = 'public' + self.module = module + self.cursor = cursor + self.info = { + 'name': self.name, + 'state': 'absent', + 'schema': '', + 'tblname': '', + 'tblspace': '', + 'valid': True, + 'storage_params': [], + } + self.exists = False + self.__exists_in_db() + self.executed_query = '' + + def get_info(self): + """Refresh index info. + + Return self.info dict. + """ + self.__exists_in_db() + return self.info + + def __exists_in_db(self): + """Check index existence, collect info, add it to self.info dict. + + Return True if the index exists, otherwise, return False. + """ + query = ("SELECT i.schemaname, i.tablename, i.tablespace, " + "pi.indisvalid, c.reloptions " + "FROM pg_catalog.pg_indexes AS i " + "JOIN pg_catalog.pg_class AS c " + "ON i.indexname = c.relname " + "JOIN pg_catalog.pg_index AS pi " + "ON c.oid = pi.indexrelid " + "WHERE i.indexname = %(name)s") + + res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False) + if res: + self.exists = True + self.info = dict( + name=self.name, + state='present', + schema=res[0][0], + tblname=res[0][1], + tblspace=res[0][2] if res[0][2] else '', + valid=res[0][3], + storage_params=res[0][4] if res[0][4] else [], + ) + return True + + else: + self.exists = False + return False + + def create(self, tblname, idxtype, columns, cond, tblspace, + storage_params, concurrent=True, unique=False): + """Create PostgreSQL index. + + Return True if success, otherwise, return False. + + Args: + tblname (str) -- name of a table for the index + idxtype (str) -- type of the index like BTREE, BRIN, etc + columns (str) -- string of comma-separated columns that need to be covered by index + tblspace (str) -- tablespace for storing the index + storage_params (str) -- string of comma-separated storage parameters + + Kwargs: + concurrent (bool) -- build index in concurrent mode, default True + """ + if self.exists: + return False + + if idxtype is None: + idxtype = "BTREE" + + query = 'CREATE' + + if unique: + query += ' UNIQUE' + + query += ' INDEX' + + if concurrent: + query += ' CONCURRENTLY' + + query += ' "%s"' % self.name + + query += ' ON "%s"."%s" ' % (self.schema, tblname) + + query += 'USING %s (%s)' % (idxtype, columns) + + if storage_params: + query += ' WITH (%s)' % storage_params + + if tblspace: + query += ' TABLESPACE "%s"' % tblspace + + if cond: + query += ' WHERE %s' % cond + + self.executed_query = query + + return exec_sql(self, query, return_bool=True, add_to_executed=False) + + def drop(self, cascade=False, concurrent=True): + """Drop PostgreSQL index. + + Return True if success, otherwise, return False. + + Args: + schema (str) -- name of the index schema + + Kwargs: + cascade (bool) -- automatically drop objects that depend on the index, + default False + concurrent (bool) -- build index in concurrent mode, default True + """ + if not self.exists: + return False + + query = 'DROP INDEX' + + if concurrent: + query += ' CONCURRENTLY' + + query += ' "%s"."%s"' % (self.schema, self.name) + + if cascade: + query += ' CASCADE' + + self.executed_query = query + + return exec_sql(self, query, return_bool=True, add_to_executed=False) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + idxname=dict(type='str', required=True, aliases=['name']), + db=dict(type='str', aliases=['login_db']), + state=dict(type='str', default='present', choices=['absent', 'present']), + concurrent=dict(type='bool', default=True), + unique=dict(type='bool', default=False), + table=dict(type='str'), + idxtype=dict(type='str', aliases=['type']), + columns=dict(type='list', elements='str', aliases=['column']), + cond=dict(type='str'), + session_role=dict(type='str'), + tablespace=dict(type='str'), + storage_params=dict(type='list', elements='str'), + cascade=dict(type='bool', default=False), + schema=dict(type='str'), + trust_input=dict(type='bool', default=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + idxname = module.params["idxname"] + state = module.params["state"] + concurrent = module.params["concurrent"] + unique = module.params["unique"] + table = module.params["table"] + idxtype = module.params["idxtype"] + columns = module.params["columns"] + cond = module.params["cond"] + tablespace = module.params["tablespace"] + storage_params = module.params["storage_params"] + cascade = module.params["cascade"] + schema = module.params["schema"] + session_role = module.params["session_role"] + trust_input = module.params["trust_input"] + + if not trust_input: + # Check input for potentially dangerous elements: + check_input(module, idxname, session_role, schema, table, columns, + tablespace, storage_params, cond) + + if concurrent and cascade: + module.fail_json(msg="Concurrent mode and cascade parameters are mutually exclusive") + + if unique and (idxtype and idxtype != 'btree'): + module.fail_json(msg="Only btree currently supports unique indexes") + + if state == 'present': + if not table: + module.fail_json(msg="Table must be specified") + if not columns: + module.fail_json(msg="At least one column must be specified") + else: + if table or columns or cond or idxtype or tablespace: + module.fail_json(msg="Index %s is going to be removed, so it does not " + "make sense to pass a table name, columns, conditions, " + "index type, or tablespace" % idxname) + + if cascade and state != 'absent': + module.fail_json(msg="cascade parameter used only with state=absent") + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + # Set defaults: + changed = False + + # Do job: + index = Index(module, cursor, schema, idxname) + kw = index.get_info() + kw['query'] = '' + + # + # check_mode start + if module.check_mode: + if state == 'present' and index.exists: + kw['changed'] = False + module.exit_json(**kw) + + elif state == 'present' and not index.exists: + kw['changed'] = True + module.exit_json(**kw) + + elif state == 'absent' and not index.exists: + kw['changed'] = False + module.exit_json(**kw) + + elif state == 'absent' and index.exists: + kw['changed'] = True + module.exit_json(**kw) + # check_mode end + # + + if state == "present": + if idxtype and idxtype.upper() not in VALID_IDX_TYPES: + module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname)) + + columns = ','.join(columns) + + if storage_params: + storage_params = ','.join(storage_params) + + changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent, unique) + + if changed: + kw = index.get_info() + kw['state'] = 'present' + kw['query'] = index.executed_query + + else: + changed = index.drop(cascade, concurrent) + + if changed: + kw['state'] = 'absent' + kw['query'] = index.executed_query + + if not kw['valid']: + db_connection.rollback() + module.warn("Index %s is invalid! ROLLBACK" % idxname) + + if not concurrent: + db_connection.commit() + + kw['changed'] = changed + db_connection.close() + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_info.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_info.py new file mode 100644 index 00000000..ac412c49 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_info.py @@ -0,0 +1,1032 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_info +short_description: Gather information about PostgreSQL servers +description: +- Gathers information about PostgreSQL servers. +options: + filter: + description: + - Limit the collected information by comma separated string or YAML list. + - Allowable values are C(version), + C(databases), C(in_recovery), C(settings), C(tablespaces), C(roles), + C(replications), C(repl_slots). + - By default, collects all subsets. + - You can use shell-style (fnmatch) wildcard to pass groups of values (see Examples). + - You can use '!' before value (for example, C(!settings)) to exclude it from the information. + - If you pass including and excluding values to the filter, for example, I(filter=!settings,ver), + the excluding values will be ignored. + type: list + elements: str + db: + description: + - Name of database to connect. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + trust_input: + description: + - If C(no), check whether a value of I(session_role) is potentially dangerous. + - It makes sense to use C(no) only when SQL injections via I(session_role) are possible. + type: bool + default: yes + version_added: '0.2.0' +seealso: +- module: community.postgresql.postgresql_ping +author: +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.postgresql.postgres + +notes: +- Supports C(check_mode). +''' + +EXAMPLES = r''' +# Display info from postgres hosts. +# ansible postgres -m postgresql_info + +# Display only databases and roles info from all hosts using shell-style wildcards: +# ansible all -m postgresql_info -a 'filter=dat*,rol*' + +# Display only replications and repl_slots info from standby hosts using shell-style wildcards: +# ansible standby -m postgresql_info -a 'filter=repl*' + +# Display all info from databases hosts except settings: +# ansible databases -m postgresql_info -a 'filter=!settings' + +- name: Collect PostgreSQL version and extensions + become: yes + become_user: postgres + community.postgresql.postgresql_info: + filter: ver*,ext* + +- name: Collect all info except settings and roles + become: yes + become_user: postgres + community.postgresql.postgresql_info: + filter: "!settings,!roles" + +# On FreeBSD with PostgreSQL 9.5 version and lower use pgsql user to become +# and pass "postgres" as a database to connect to +- name: Collect tablespaces and repl_slots info + become: yes + become_user: pgsql + community.postgresql.postgresql_info: + db: postgres + filter: + - tablesp* + - repl_sl* + +- name: Collect all info except databases + become: yes + become_user: postgres + community.postgresql.postgresql_info: + filter: + - "!databases" +''' + +RETURN = r''' +version: + description: Database server version U(https://www.postgresql.org/support/versioning/). + returned: always + type: dict + sample: { "version": { "major": 10, "minor": 6 } } + contains: + major: + description: Major server version. + returned: always + type: int + sample: 11 + minor: + description: Minor server version. + returned: always + type: int + sample: 1 +in_recovery: + description: Indicates if the service is in recovery mode or not. + returned: always + type: bool + sample: false +databases: + description: Information about databases. + returned: always + type: dict + sample: + - { "postgres": { "access_priv": "", "collate": "en_US.UTF-8", + "ctype": "en_US.UTF-8", "encoding": "UTF8", "owner": "postgres", "size": "7997 kB" } } + contains: + database_name: + description: Database name. + returned: always + type: dict + sample: template1 + contains: + access_priv: + description: Database access privileges. + returned: always + type: str + sample: "=c/postgres_npostgres=CTc/postgres" + collate: + description: + - Database collation U(https://www.postgresql.org/docs/current/collation.html). + returned: always + type: str + sample: en_US.UTF-8 + ctype: + description: + - Database LC_CTYPE U(https://www.postgresql.org/docs/current/multibyte.html). + returned: always + type: str + sample: en_US.UTF-8 + encoding: + description: + - Database encoding U(https://www.postgresql.org/docs/current/multibyte.html). + returned: always + type: str + sample: UTF8 + owner: + description: + - Database owner U(https://www.postgresql.org/docs/current/sql-createdatabase.html). + returned: always + type: str + sample: postgres + size: + description: Database size in bytes. + returned: always + type: str + sample: 8189415 + extensions: + description: + - Extensions U(https://www.postgresql.org/docs/current/sql-createextension.html). + returned: always + type: dict + sample: + - { "plpgsql": { "description": "PL/pgSQL procedural language", + "extversion": { "major": 1, "minor": 0 } } } + contains: + extdescription: + description: Extension description. + returned: if existent + type: str + sample: PL/pgSQL procedural language + extversion: + description: Extension description. + returned: always + type: dict + contains: + major: + description: Extension major version. + returned: always + type: int + sample: 1 + minor: + description: Extension minor version. + returned: always + type: int + sample: 0 + nspname: + description: Namespace where the extension is. + returned: always + type: str + sample: pg_catalog + languages: + description: Procedural languages U(https://www.postgresql.org/docs/current/xplang.html). + returned: always + type: dict + sample: { "sql": { "lanacl": "", "lanowner": "postgres" } } + contains: + lanacl: + description: + - Language access privileges + U(https://www.postgresql.org/docs/current/catalog-pg-language.html). + returned: always + type: str + sample: "{postgres=UC/postgres,=U/postgres}" + lanowner: + description: + - Language owner U(https://www.postgresql.org/docs/current/catalog-pg-language.html). + returned: always + type: str + sample: postgres + namespaces: + description: + - Namespaces (schema) U(https://www.postgresql.org/docs/current/sql-createschema.html). + returned: always + type: dict + sample: { "pg_catalog": { "nspacl": "{postgres=UC/postgres,=U/postgres}", "nspowner": "postgres" } } + contains: + nspacl: + description: + - Access privileges U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html). + returned: always + type: str + sample: "{postgres=UC/postgres,=U/postgres}" + nspowner: + description: + - Schema owner U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html). + returned: always + type: str + sample: postgres + publications: + description: + - Information about logical replication publications (available for PostgreSQL 10 and higher) + U(https://www.postgresql.org/docs/current/logical-replication-publication.html). + - Content depends on PostgreSQL server version. + returned: if configured + type: dict + sample: { "pub1": { "ownername": "postgres", "puballtables": true, "pubinsert": true, "pubupdate": true } } + version_added: '0.2.0' + subscriptions: + description: + - Information about replication subscriptions (available for PostgreSQL 10 and higher) + U(https://www.postgresql.org/docs/current/logical-replication-subscription.html). + - Content depends on PostgreSQL server version. + returned: if configured + type: dict + sample: + - { "my_subscription": {"ownername": "postgres", "subenabled": true, "subpublications": ["first_publication"] } } + version_added: '0.2.0' +repl_slots: + description: + - Replication slots (available in 9.4 and later) + U(https://www.postgresql.org/docs/current/view-pg-replication-slots.html). + returned: if existent + type: dict + sample: { "slot0": { "active": false, "database": null, "plugin": null, "slot_type": "physical" } } + contains: + active: + description: + - True means that a receiver has connected to it, and it is currently reserving archives. + returned: always + type: bool + sample: true + database: + description: Database name this slot is associated with, or null. + returned: always + type: str + sample: acme + plugin: + description: + - Base name of the shared object containing the output plugin + this logical slot is using, or null for physical slots. + returned: always + type: str + sample: pgoutput + slot_type: + description: The slot type - physical or logical. + returned: always + type: str + sample: logical +replications: + description: + - Information about the current replications by process PIDs + U(https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-STATS-VIEWS-TABLE). + returned: if pg_stat_replication view existent + type: dict + sample: + - { "76580": { "app_name": "standby1", "backend_start": "2019-02-03 00:14:33.908593+03", + "client_addr": "10.10.10.2", "client_hostname": "", "state": "streaming", "usename": "postgres" } } + contains: + usename: + description: + - Name of the user logged into this WAL sender process ('usename' is a column name in pg_stat_replication view). + returned: always + type: str + sample: replication_user + app_name: + description: Name of the application that is connected to this WAL sender. + returned: if existent + type: str + sample: acme_srv + client_addr: + description: + - IP address of the client connected to this WAL sender. + - If this field is null, it indicates that the client is connected + via a Unix socket on the server machine. + returned: always + type: str + sample: 10.0.0.101 + client_hostname: + description: + - Host name of the connected client, as reported by a reverse DNS lookup of client_addr. + - This field will only be non-null for IP connections, and only when log_hostname is enabled. + returned: always + type: str + sample: dbsrv1 + backend_start: + description: Time when this process was started, i.e., when the client connected to this WAL sender. + returned: always + type: str + sample: "2019-02-03 00:14:33.908593+03" + state: + description: Current WAL sender state. + returned: always + type: str + sample: streaming +tablespaces: + description: + - Information about tablespaces U(https://www.postgresql.org/docs/current/catalog-pg-tablespace.html). + returned: always + type: dict + sample: + - { "test": { "spcacl": "{postgres=C/postgres,andreyk=C/postgres}", "spcoptions": [ "seq_page_cost=1" ], + "spcowner": "postgres" } } + contains: + spcacl: + description: Tablespace access privileges. + returned: always + type: str + sample: "{postgres=C/postgres,andreyk=C/postgres}" + spcoptions: + description: Tablespace-level options. + returned: always + type: list + sample: [ "seq_page_cost=1" ] + spcowner: + description: Owner of the tablespace. + returned: always + type: str + sample: test_user +roles: + description: + - Information about roles U(https://www.postgresql.org/docs/current/user-manag.html). + returned: always + type: dict + sample: + - { "test_role": { "canlogin": true, "member_of": [ "user_ro" ], "superuser": false, + "valid_until": "9999-12-31T23:59:59.999999+00:00" } } + contains: + canlogin: + description: Login privilege U(https://www.postgresql.org/docs/current/role-attributes.html). + returned: always + type: bool + sample: true + member_of: + description: + - Role membership U(https://www.postgresql.org/docs/current/role-membership.html). + returned: always + type: list + sample: [ "read_only_users" ] + superuser: + description: User is a superuser or not. + returned: always + type: bool + sample: false + valid_until: + description: + - Password expiration date U(https://www.postgresql.org/docs/current/sql-alterrole.html). + returned: always + type: str + sample: "9999-12-31T23:59:59.999999+00:00" +pending_restart_settings: + description: + - List of settings that are pending restart to be set. + returned: always + type: list + sample: [ "shared_buffers" ] +settings: + description: + - Information about run-time server parameters + U(https://www.postgresql.org/docs/current/view-pg-settings.html). + returned: always + type: dict + sample: + - { "work_mem": { "boot_val": "4096", "context": "user", "max_val": "2147483647", + "min_val": "64", "setting": "8192", "sourcefile": "/var/lib/pgsql/10/data/postgresql.auto.conf", + "unit": "kB", "vartype": "integer", "val_in_bytes": 4194304 } } + contains: + setting: + description: Current value of the parameter. + returned: always + type: str + sample: 49152 + unit: + description: Implicit unit of the parameter. + returned: always + type: str + sample: kB + boot_val: + description: + - Parameter value assumed at server startup if the parameter is not otherwise set. + returned: always + type: str + sample: 4096 + min_val: + description: + - Minimum allowed value of the parameter (null for non-numeric values). + returned: always + type: str + sample: 64 + max_val: + description: + - Maximum allowed value of the parameter (null for non-numeric values). + returned: always + type: str + sample: 2147483647 + sourcefile: + description: + - Configuration file the current value was set in. + - Null for values set from sources other than configuration files, + or when examined by a user who is neither a superuser or a member of pg_read_all_settings. + - Helpful when using include directives in configuration files. + returned: always + type: str + sample: /var/lib/pgsql/10/data/postgresql.auto.conf + context: + description: + - Context required to set the parameter's value. + - For more information see U(https://www.postgresql.org/docs/current/view-pg-settings.html). + returned: always + type: str + sample: user + vartype: + description: + - Parameter type (bool, enum, integer, real, or string). + returned: always + type: str + sample: integer + val_in_bytes: + description: + - Current value of the parameter in bytes. + returned: if supported + type: int + sample: 2147483647 + pretty_val: + description: + - Value presented in the pretty form. + returned: always + type: str + sample: 2MB + pending_restart: + description: + - True if the value has been changed in the configuration file but needs a restart; or false otherwise. + - Returns only if C(settings) is passed. + returned: always + type: bool + sample: false +''' + +from fnmatch import fnmatch + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + + +# =========================================== +# PostgreSQL module specific support methods. +# + +class PgDbConn(object): + """Auxiliary class for working with PostgreSQL connection objects. + + Arguments: + module (AnsibleModule): Object of AnsibleModule class that + contains connection parameters. + """ + + def __init__(self, module): + self.module = module + self.db_conn = None + self.cursor = None + + def connect(self): + """Connect to a PostgreSQL database and return a cursor object. + + Note: connection parameters are passed by self.module object. + """ + conn_params = get_conn_params(self.module, self.module.params, warn_db_default=False) + self.db_conn = connect_to_db(self.module, conn_params) + return self.db_conn.cursor(cursor_factory=DictCursor) + + def reconnect(self, dbname): + """Reconnect to another database and return a PostgreSQL cursor object. + + Arguments: + dbname (string): Database name to connect to. + """ + self.db_conn.close() + + self.module.params['database'] = dbname + return self.connect() + + +class PgClusterInfo(object): + """Class for collection information about a PostgreSQL instance. + + Arguments: + module (AnsibleModule): Object of AnsibleModule class. + db_conn_obj (psycopg2.connect): PostgreSQL connection object. + """ + + def __init__(self, module, db_conn_obj): + self.module = module + self.db_obj = db_conn_obj + self.cursor = db_conn_obj.connect() + self.pg_info = { + "version": {}, + "in_recovery": None, + "tablespaces": {}, + "databases": {}, + "replications": {}, + "repl_slots": {}, + "settings": {}, + "roles": {}, + "pending_restart_settings": [], + } + + def collect(self, val_list=False): + """Collect information based on 'filter' option.""" + subset_map = { + "version": self.get_pg_version, + "in_recovery": self.get_recovery_state, + "tablespaces": self.get_tablespaces, + "databases": self.get_db_info, + "replications": self.get_repl_info, + "repl_slots": self.get_rslot_info, + "settings": self.get_settings, + "roles": self.get_role_info, + } + + incl_list = [] + excl_list = [] + # Notice: incl_list and excl_list + # don't make sense together, therefore, + # if incl_list is not empty, we collect + # only values from it: + if val_list: + for i in val_list: + if i[0] != '!': + incl_list.append(i) + else: + excl_list.append(i.lstrip('!')) + + if incl_list: + for s in subset_map: + for i in incl_list: + if fnmatch(s, i): + subset_map[s]() + break + elif excl_list: + found = False + # Collect info: + for s in subset_map: + for e in excl_list: + if fnmatch(s, e): + found = True + + if not found: + subset_map[s]() + else: + found = False + + # Default behaviour, if include or exclude is not passed: + else: + # Just collect info for each item: + for s in subset_map: + subset_map[s]() + + return self.pg_info + + def get_pub_info(self): + """Get publication statistics.""" + query = ("SELECT p.*, r.rolname AS ownername " + "FROM pg_catalog.pg_publication AS p " + "JOIN pg_catalog.pg_roles AS r " + "ON p.pubowner = r.oid") + + result = self.__exec_sql(query) + + if result: + result = [dict(row) for row in result] + else: + return {} + + publications = {} + + for elem in result: + if not publications.get(elem['pubname']): + publications[elem['pubname']] = {} + + for key, val in iteritems(elem): + if key != 'pubname': + publications[elem['pubname']][key] = val + + return publications + + def get_subscr_info(self): + """Get subscription statistics.""" + query = ("SELECT s.*, r.rolname AS ownername, d.datname AS dbname " + "FROM pg_catalog.pg_subscription s " + "JOIN pg_catalog.pg_database d " + "ON s.subdbid = d.oid " + "JOIN pg_catalog.pg_roles AS r " + "ON s.subowner = r.oid") + + result = self.__exec_sql(query) + + if result: + result = [dict(row) for row in result] + else: + return {} + + subscr_info = {} + + for elem in result: + if not subscr_info.get(elem['dbname']): + subscr_info[elem['dbname']] = {} + + if not subscr_info[elem['dbname']].get(elem['subname']): + subscr_info[elem['dbname']][elem['subname']] = {} + + for key, val in iteritems(elem): + if key not in ('subname', 'dbname'): + subscr_info[elem['dbname']][elem['subname']][key] = val + + return subscr_info + + def get_tablespaces(self): + """Get information about tablespaces.""" + # Check spcoption exists: + opt = self.__exec_sql("SELECT column_name " + "FROM information_schema.columns " + "WHERE table_name = 'pg_tablespace' " + "AND column_name = 'spcoptions'") + + if not opt: + query = ("SELECT s.spcname, a.rolname, s.spcacl " + "FROM pg_tablespace AS s " + "JOIN pg_authid AS a ON s.spcowner = a.oid") + else: + query = ("SELECT s.spcname, a.rolname, s.spcacl, s.spcoptions " + "FROM pg_tablespace AS s " + "JOIN pg_authid AS a ON s.spcowner = a.oid") + + res = self.__exec_sql(query) + ts_dict = {} + for i in res: + ts_name = i[0] + ts_info = dict( + spcowner=i[1], + spcacl=i[2] if i[2] else '', + ) + if opt: + ts_info['spcoptions'] = i[3] if i[3] else [] + + ts_dict[ts_name] = ts_info + + self.pg_info["tablespaces"] = ts_dict + + def get_ext_info(self): + """Get information about existing extensions.""" + # Check that pg_extension exists: + res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM " + "information_schema.tables " + "WHERE table_name = 'pg_extension')") + if not res[0][0]: + return True + + query = ("SELECT e.extname, e.extversion, n.nspname, c.description " + "FROM pg_catalog.pg_extension AS e " + "LEFT JOIN pg_catalog.pg_namespace AS n " + "ON n.oid = e.extnamespace " + "LEFT JOIN pg_catalog.pg_description AS c " + "ON c.objoid = e.oid " + "AND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass") + res = self.__exec_sql(query) + ext_dict = {} + for i in res: + ext_ver = i[1].split('.') + + ext_dict[i[0]] = dict( + extversion=dict( + major=int(ext_ver[0]), + minor=int(ext_ver[1]), + ), + nspname=i[2], + description=i[3], + ) + + return ext_dict + + def get_role_info(self): + """Get information about roles (in PgSQL groups and users are roles).""" + query = ("SELECT r.rolname, r.rolsuper, r.rolcanlogin, " + "r.rolvaliduntil, " + "ARRAY(SELECT b.rolname " + "FROM pg_catalog.pg_auth_members AS m " + "JOIN pg_catalog.pg_roles AS b ON (m.roleid = b.oid) " + "WHERE m.member = r.oid) AS memberof " + "FROM pg_catalog.pg_roles AS r " + "WHERE r.rolname !~ '^pg_'") + + res = self.__exec_sql(query) + rol_dict = {} + for i in res: + rol_dict[i[0]] = dict( + superuser=i[1], + canlogin=i[2], + valid_until=i[3] if i[3] else '', + member_of=i[4] if i[4] else [], + ) + + self.pg_info["roles"] = rol_dict + + def get_rslot_info(self): + """Get information about replication slots if exist.""" + # Check that pg_replication_slots exists: + res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM " + "information_schema.tables " + "WHERE table_name = 'pg_replication_slots')") + if not res[0][0]: + return True + + query = ("SELECT slot_name, plugin, slot_type, database, " + "active FROM pg_replication_slots") + res = self.__exec_sql(query) + + # If there is no replication: + if not res: + return True + + rslot_dict = {} + for i in res: + rslot_dict[i[0]] = dict( + plugin=i[1], + slot_type=i[2], + database=i[3], + active=i[4], + ) + + self.pg_info["repl_slots"] = rslot_dict + + def get_settings(self): + """Get server settings.""" + # Check pending restart column exists: + pend_rest_col_exists = self.__exec_sql("SELECT 1 FROM information_schema.columns " + "WHERE table_name = 'pg_settings' " + "AND column_name = 'pending_restart'") + if not pend_rest_col_exists: + query = ("SELECT name, setting, unit, context, vartype, " + "boot_val, min_val, max_val, sourcefile " + "FROM pg_settings") + else: + query = ("SELECT name, setting, unit, context, vartype, " + "boot_val, min_val, max_val, sourcefile, pending_restart " + "FROM pg_settings") + + res = self.__exec_sql(query) + + set_dict = {} + for i in res: + val_in_bytes = None + setting = i[1] + if i[2]: + unit = i[2] + else: + unit = '' + + if unit == 'kB': + val_in_bytes = int(setting) * 1024 + + elif unit == '8kB': + val_in_bytes = int(setting) * 1024 * 8 + + elif unit == 'MB': + val_in_bytes = int(setting) * 1024 * 1024 + + if val_in_bytes is not None and val_in_bytes < 0: + val_in_bytes = 0 + + setting_name = i[0] + pretty_val = self.__get_pretty_val(setting_name) + + pending_restart = None + if pend_rest_col_exists: + pending_restart = i[9] + + set_dict[setting_name] = dict( + setting=setting, + unit=unit, + context=i[3], + vartype=i[4], + boot_val=i[5] if i[5] else '', + min_val=i[6] if i[6] else '', + max_val=i[7] if i[7] else '', + sourcefile=i[8] if i[8] else '', + pretty_val=pretty_val, + ) + if val_in_bytes is not None: + set_dict[setting_name]['val_in_bytes'] = val_in_bytes + + if pending_restart is not None: + set_dict[setting_name]['pending_restart'] = pending_restart + if pending_restart: + self.pg_info["pending_restart_settings"].append(setting_name) + + self.pg_info["settings"] = set_dict + + def get_repl_info(self): + """Get information about replication if the server is a master.""" + # Check that pg_replication_slots exists: + res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM " + "information_schema.tables " + "WHERE table_name = 'pg_stat_replication')") + if not res[0][0]: + return True + + query = ("SELECT r.pid, a.rolname, r.application_name, r.client_addr, " + "r.client_hostname, r.backend_start::text, r.state " + "FROM pg_stat_replication AS r " + "JOIN pg_authid AS a ON r.usesysid = a.oid") + res = self.__exec_sql(query) + + # If there is no replication: + if not res: + return True + + repl_dict = {} + for i in res: + repl_dict[i[0]] = dict( + usename=i[1], + app_name=i[2] if i[2] else '', + client_addr=i[3], + client_hostname=i[4] if i[4] else '', + backend_start=i[5], + state=i[6], + ) + + self.pg_info["replications"] = repl_dict + + def get_lang_info(self): + """Get information about current supported languages.""" + query = ("SELECT l.lanname, a.rolname, l.lanacl " + "FROM pg_language AS l " + "JOIN pg_authid AS a ON l.lanowner = a.oid") + res = self.__exec_sql(query) + lang_dict = {} + for i in res: + lang_dict[i[0]] = dict( + lanowner=i[1], + lanacl=i[2] if i[2] else '', + ) + + return lang_dict + + def get_namespaces(self): + """Get information about namespaces.""" + query = ("SELECT n.nspname, a.rolname, n.nspacl " + "FROM pg_catalog.pg_namespace AS n " + "JOIN pg_authid AS a ON a.oid = n.nspowner") + res = self.__exec_sql(query) + + nsp_dict = {} + for i in res: + nsp_dict[i[0]] = dict( + nspowner=i[1], + nspacl=i[2] if i[2] else '', + ) + + return nsp_dict + + def get_pg_version(self): + """Get major and minor PostgreSQL server version.""" + query = "SELECT version()" + raw = self.__exec_sql(query)[0][0] + raw = raw.split()[1].split('.') + self.pg_info["version"] = dict( + major=int(raw[0]), + minor=int(raw[1].rstrip(',')), + ) + + def get_recovery_state(self): + """Get if the service is in recovery mode.""" + self.pg_info["in_recovery"] = self.__exec_sql("SELECT pg_is_in_recovery()")[0][0] + + def get_db_info(self): + """Get information about the current database.""" + # Following query returns: + # Name, Owner, Encoding, Collate, Ctype, Access Priv, Size + query = ("SELECT d.datname, " + "pg_catalog.pg_get_userbyid(d.datdba), " + "pg_catalog.pg_encoding_to_char(d.encoding), " + "d.datcollate, " + "d.datctype, " + "pg_catalog.array_to_string(d.datacl, E'\n'), " + "CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') " + "THEN pg_catalog.pg_database_size(d.datname)::text " + "ELSE 'No Access' END, " + "t.spcname " + "FROM pg_catalog.pg_database AS d " + "JOIN pg_catalog.pg_tablespace t ON d.dattablespace = t.oid " + "WHERE d.datname != 'template0'") + + res = self.__exec_sql(query) + + db_dict = {} + for i in res: + db_dict[i[0]] = dict( + owner=i[1], + encoding=i[2], + collate=i[3], + ctype=i[4], + access_priv=i[5] if i[5] else '', + size=i[6], + ) + + if self.cursor.connection.server_version >= 100000: + subscr_info = self.get_subscr_info() + + for datname in db_dict: + self.cursor = self.db_obj.reconnect(datname) + db_dict[datname]['namespaces'] = self.get_namespaces() + db_dict[datname]['extensions'] = self.get_ext_info() + db_dict[datname]['languages'] = self.get_lang_info() + if self.cursor.connection.server_version >= 100000: + db_dict[datname]['publications'] = self.get_pub_info() + db_dict[datname]['subscriptions'] = subscr_info.get(datname, {}) + + self.pg_info["databases"] = db_dict + + def __get_pretty_val(self, setting): + """Get setting's value represented by SHOW command.""" + return self.__exec_sql("SHOW %s" % setting)[0][0] + + def __exec_sql(self, query): + """Execute SQL and return the result.""" + try: + self.cursor.execute(query) + res = self.cursor.fetchall() + if res: + return res + except Exception as e: + self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e))) + self.cursor.close() + return False + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + db=dict(type='str', aliases=['login_db']), + filter=dict(type='list', elements='str'), + session_role=dict(type='str'), + trust_input=dict(type='bool', default=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + filter_ = module.params['filter'] + + if not module.params['trust_input']: + # Check input for potentially dangerous elements: + check_input(module, module.params['session_role']) + + db_conn_obj = PgDbConn(module) + + # Do job: + pg_info = PgClusterInfo(module, db_conn_obj) + + module.exit_json(**pg_info.collect(filter_)) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py new file mode 100644 index 00000000..338522d5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py @@ -0,0 +1,365 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2014, Jens Depuydt <http://www.jensd.be> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: postgresql_lang +short_description: Adds, removes or changes procedural languages with a PostgreSQL database +description: +- Adds, removes or changes procedural languages with a PostgreSQL database. +- This module allows you to add a language, remote a language or change the trust + relationship with a PostgreSQL database. +- The module can be used on the machine where executed or on a remote host. +- When removing a language from a database, it is possible that dependencies prevent + the database from being removed. In that case, you can specify I(cascade=yes) to + automatically drop objects that depend on the language (such as functions in the + language). +- In case the language can't be deleted because it is required by the + database system, you can specify I(fail_on_drop=no) to ignore the error. +- Be careful when marking a language as trusted since this could be a potential + security breach. Untrusted languages allow only users with the PostgreSQL superuser + privilege to use this language to create new functions. +options: + lang: + description: + - Name of the procedural language to add, remove or change. + required: true + type: str + aliases: + - name + trust: + description: + - Make this language trusted for the selected db. + type: bool + default: 'no' + db: + description: + - Name of database to connect to and where the language will be added, removed or changed. + type: str + aliases: + - login_db + required: true + force_trust: + description: + - Marks the language as trusted, even if it's marked as untrusted in pg_pltemplate. + - Use with care! + type: bool + default: 'no' + fail_on_drop: + description: + - If C(yes), fail when removing a language. Otherwise just log and continue. + - In some cases, it is not possible to remove a language (used by the db-system). + - When dependencies block the removal, consider using I(cascade). + type: bool + default: 'yes' + cascade: + description: + - When dropping a language, also delete object that depend on this language. + - Only used when I(state=absent). + type: bool + default: 'no' + session_role: + description: + - Switch to session_role after connecting. + - The specified I(session_role) must be a role that the current I(login_user) is a member of. + - Permissions checking for SQL commands is carried out as though the + I(session_role) were the one that had logged in originally. + type: str + state: + description: + - The state of the language for the selected database. + type: str + default: present + choices: [ absent, present ] + login_unix_socket: + description: + - Path to a Unix domain socket for local connections. + type: str + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + type: str + aliases: [ ssl_rootcert ] + owner: + description: + - Set an owner for the language. + - Ignored when I(state=absent). + type: str + version_added: '0.2.0' + trust_input: + description: + - If C(no), check whether values of parameters I(lang), I(session_role), + I(owner) are potentially dangerous. + - It makes sense to use C(no) only when SQL injections via the parameters are possible. + type: bool + default: yes + version_added: '0.2.0' +seealso: +- name: PostgreSQL languages + description: General information about PostgreSQL languages. + link: https://www.postgresql.org/docs/current/xplang.html +- name: CREATE LANGUAGE reference + description: Complete reference of the CREATE LANGUAGE command documentation. + link: https://www.postgresql.org/docs/current/sql-createlanguage.html +- name: ALTER LANGUAGE reference + description: Complete reference of the ALTER LANGUAGE command documentation. + link: https://www.postgresql.org/docs/current/sql-alterlanguage.html +- name: DROP LANGUAGE reference + description: Complete reference of the DROP LANGUAGE command documentation. + link: https://www.postgresql.org/docs/current/sql-droplanguage.html +author: +- Jens Depuydt (@jensdepuydt) +- Thomas O'Donnell (@andytom) +extends_documentation_fragment: +- community.postgresql.postgres + +notes: +- Supports C(check_mode). +''' + +EXAMPLES = r''' +- name: Add language pltclu to database testdb if it doesn't exist + community.postgresql.postgresql_lang: db=testdb lang=pltclu state=present + +# Add language pltclu to database testdb if it doesn't exist and mark it as trusted. +# Marks the language as trusted if it exists but isn't trusted yet. +# force_trust makes sure that the language will be marked as trusted +- name: Add language pltclu to database testdb if it doesn't exist and mark it as trusted + community.postgresql.postgresql_lang: + db: testdb + lang: pltclu + state: present + trust: yes + force_trust: yes + +- name: Remove language pltclu from database testdb + community.postgresql.postgresql_lang: + db: testdb + lang: pltclu + state: absent + +- name: Remove language pltclu from database testdb and remove all dependencies + community.postgresql.postgresql_lang: + db: testdb + lang: pltclu + state: absent + cascade: yes + +- name: Remove language c from database testdb but ignore errors if something prevents the removal + community.postgresql.postgresql_lang: + db: testdb + lang: pltclu + state: absent + fail_on_drop: no + +- name: In testdb change owner of mylang to alice + community.postgresql.postgresql_lang: + db: testdb + lang: mylang + owner: alice +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: list + sample: ['CREATE LANGUAGE "acme"'] +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import check_input +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) + +executed_queries = [] + + +def lang_exists(cursor, lang): + """Checks if language exists for db""" + query = "SELECT lanname FROM pg_language WHERE lanname = %(lang)s" + cursor.execute(query, {'lang': lang}) + return cursor.rowcount > 0 + + +def lang_istrusted(cursor, lang): + """Checks if language is trusted for db""" + query = "SELECT lanpltrusted FROM pg_language WHERE lanname = %(lang)s" + cursor.execute(query, {'lang': lang}) + return cursor.fetchone()[0] + + +def lang_altertrust(cursor, lang, trust): + """Changes if language is trusted for db""" + query = "UPDATE pg_language SET lanpltrusted = %(trust)s WHERE lanname = %(lang)s" + cursor.execute(query, {'trust': trust, 'lang': lang}) + executed_queries.append(cursor.mogrify(query, {'trust': trust, 'lang': lang})) + return True + + +def lang_add(cursor, lang, trust): + """Adds language for db""" + if trust: + query = 'CREATE TRUSTED LANGUAGE "%s"' % lang + else: + query = 'CREATE LANGUAGE "%s"' % lang + executed_queries.append(query) + cursor.execute(query) + return True + + +def lang_drop(cursor, lang, cascade): + """Drops language for db""" + cursor.execute("SAVEPOINT ansible_pgsql_lang_drop") + try: + if cascade: + query = "DROP LANGUAGE \"%s\" CASCADE" % lang + else: + query = "DROP LANGUAGE \"%s\"" % lang + executed_queries.append(query) + cursor.execute(query) + except Exception: + cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop") + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop") + return False + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop") + return True + + +def get_lang_owner(cursor, lang): + """Get language owner. + + Args: + cursor (cursor): psycopg2 cursor object. + lang (str): language name. + """ + query = ("SELECT r.rolname FROM pg_language l " + "JOIN pg_roles r ON l.lanowner = r.oid " + "WHERE l.lanname = %(lang)s") + cursor.execute(query, {'lang': lang}) + return cursor.fetchone()[0] + + +def set_lang_owner(cursor, lang, owner): + """Set language owner. + + Args: + cursor (cursor): psycopg2 cursor object. + lang (str): language name. + owner (str): name of new owner. + """ + query = "ALTER LANGUAGE \"%s\" OWNER TO \"%s\"" % (lang, owner) + executed_queries.append(query) + cursor.execute(query) + return True + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + db=dict(type="str", required=True, aliases=["login_db"]), + lang=dict(type="str", required=True, aliases=["name"]), + state=dict(type="str", default="present", choices=["absent", "present"]), + trust=dict(type="bool", default="no"), + force_trust=dict(type="bool", default="no"), + cascade=dict(type="bool", default="no"), + fail_on_drop=dict(type="bool", default="yes"), + session_role=dict(type="str"), + owner=dict(type="str"), + trust_input=dict(type="bool", default="yes") + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + db = module.params["db"] + lang = module.params["lang"] + state = module.params["state"] + trust = module.params["trust"] + force_trust = module.params["force_trust"] + cascade = module.params["cascade"] + fail_on_drop = module.params["fail_on_drop"] + owner = module.params["owner"] + session_role = module.params["session_role"] + trust_input = module.params["trust_input"] + + if not trust_input: + # Check input for potentially dangerous elements: + check_input(module, lang, session_role, owner) + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=False) + cursor = db_connection.cursor() + + changed = False + kw = {'db': db, 'lang': lang, 'trust': trust} + + if state == "present": + if lang_exists(cursor, lang): + lang_trusted = lang_istrusted(cursor, lang) + if (lang_trusted and not trust) or (not lang_trusted and trust): + if module.check_mode: + changed = True + else: + changed = lang_altertrust(cursor, lang, trust) + else: + if module.check_mode: + changed = True + else: + changed = lang_add(cursor, lang, trust) + if force_trust: + changed = lang_altertrust(cursor, lang, trust) + + else: + if lang_exists(cursor, lang): + if module.check_mode: + changed = True + kw['lang_dropped'] = True + else: + changed = lang_drop(cursor, lang, cascade) + if fail_on_drop and not changed: + msg = ("unable to drop language, use cascade " + "to delete dependencies or fail_on_drop=no to ignore") + module.fail_json(msg=msg) + kw['lang_dropped'] = changed + + if owner and state == 'present': + if lang_exists(cursor, lang): + if owner != get_lang_owner(cursor, lang): + changed = set_lang_owner(cursor, lang, owner) + + if changed: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + kw['changed'] = changed + kw['queries'] = executed_queries + db_connection.close() + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py new file mode 100644 index 00000000..f9423220 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_membership +short_description: Add or remove PostgreSQL roles from groups +description: +- Adds or removes PostgreSQL roles from groups (other roles). +- Users are roles with login privilege. +- Groups are PostgreSQL roles usually without LOGIN privilege. +- "Common use case:" +- 1) add a new group (groups) by M(community.postgresql.postgresql_user) module with I(role_attr_flags=NOLOGIN) +- 2) grant them desired privileges by M(community.postgresql.postgresql_privs) module +- 3) add desired PostgreSQL users to the new group (groups) by this module +options: + groups: + description: + - The list of groups (roles) that need to be granted to or revoked from I(target_roles). + required: yes + type: list + elements: str + aliases: + - group + - source_role + - source_roles + target_roles: + description: + - The list of target roles (groups will be granted to them). + required: yes + type: list + elements: str + aliases: + - target_role + - users + - user + fail_on_role: + description: + - If C(yes), fail when group or target_role doesn't exist. If C(no), just warn and continue. + default: yes + type: bool + state: + description: + - Membership state. + - I(state=present) implies the I(groups)must be granted to I(target_roles). + - I(state=absent) implies the I(groups) must be revoked from I(target_roles). + type: str + default: present + choices: [ absent, present ] + db: + description: + - Name of database to connect to. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. + The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + trust_input: + description: + - If C(no), check whether values of parameters I(groups), + I(target_roles), I(session_role) are potentially dangerous. + - It makes sense to use C(no) only when SQL injections via the parameters are possible. + type: bool + default: yes + version_added: '0.2.0' +seealso: +- module: community.postgresql.postgresql_user +- module: community.postgresql.postgresql_privs +- module: community.postgresql.postgresql_owner +- name: PostgreSQL role membership reference + description: Complete reference of the PostgreSQL role membership documentation. + link: https://www.postgresql.org/docs/current/role-membership.html +- name: PostgreSQL role attributes reference + description: Complete reference of the PostgreSQL role attributes documentation. + link: https://www.postgresql.org/docs/current/role-attributes.html +author: +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.postgresql.postgres +notes: +- Supports C(check_mode). +''' + +EXAMPLES = r''' +- name: Grant role read_only to alice and bob + community.postgresql.postgresql_membership: + group: read_only + target_roles: + - alice + - bob + state: present + +# you can also use target_roles: alice,bob,etc to pass the role list + +- name: Revoke role read_only and exec_func from bob. Ignore if roles don't exist + community.postgresql.postgresql_membership: + groups: + - read_only + - exec_func + target_role: bob + fail_on_role: no + state: absent +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: str + sample: [ "GRANT \"user_ro\" TO \"alice\"" ] +granted: + description: Dict of granted groups and roles. + returned: if I(state=present) + type: dict + sample: { "ro_group": [ "alice", "bob" ] } +revoked: + description: Dict of revoked groups and roles. + returned: if I(state=absent) + type: dict + sample: { "ro_group": [ "alice", "bob" ] } +state: + description: Membership state that tried to be set. + returned: always + type: str + sample: "present" +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import check_input +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + PgMembership, + postgres_common_argument_spec, +) + + +# =========================================== +# Module execution. +# + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + groups=dict(type='list', elements='str', required=True, aliases=['group', 'source_role', 'source_roles']), + target_roles=dict(type='list', elements='str', required=True, aliases=['target_role', 'user', 'users']), + fail_on_role=dict(type='bool', default=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + db=dict(type='str', aliases=['login_db']), + session_role=dict(type='str'), + trust_input=dict(type='bool', default=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + groups = module.params['groups'] + target_roles = module.params['target_roles'] + fail_on_role = module.params['fail_on_role'] + state = module.params['state'] + session_role = module.params['session_role'] + trust_input = module.params['trust_input'] + if not trust_input: + # Check input for potentially dangerous elements: + check_input(module, groups, target_roles, session_role) + + conn_params = get_conn_params(module, module.params, warn_db_default=False) + db_connection = connect_to_db(module, conn_params, autocommit=False) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + ############## + # Create the object and do main job: + + pg_membership = PgMembership(module, cursor, groups, target_roles, fail_on_role) + + if state == 'present': + pg_membership.grant() + + elif state == 'absent': + pg_membership.revoke() + + # Rollback if it's possible and check_mode: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + cursor.close() + db_connection.close() + + # Make return values: + return_dict = dict( + changed=pg_membership.changed, + state=state, + groups=pg_membership.groups, + target_roles=pg_membership.target_roles, + queries=pg_membership.executed_queries, + ) + + if state == 'present': + return_dict['granted'] = pg_membership.granted + elif state == 'absent': + return_dict['revoked'] = pg_membership.revoked + + module.exit_json(**return_dict) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py new file mode 100644 index 00000000..62b968cb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py @@ -0,0 +1,454 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_owner +short_description: Change an owner of PostgreSQL database object +description: +- Change an owner of PostgreSQL database object. +- Also allows to reassign the ownership of database objects owned by a database role to another role. + +options: + new_owner: + description: + - Role (user/group) to set as an I(obj_name) owner. + type: str + required: yes + obj_name: + description: + - Name of a database object to change ownership. + - Mutually exclusive with I(reassign_owned_by). + type: str + obj_type: + description: + - Type of a database object. + - Mutually exclusive with I(reassign_owned_by). + type: str + choices: [ database, function, matview, sequence, schema, table, tablespace, view ] + aliases: + - type + reassign_owned_by: + description: + - The list of role names. The ownership of all the objects within the current database, + and of all shared objects (databases, tablespaces), owned by this role(s) will be reassigned to I(owner). + - Pay attention - it reassigns all objects owned by this role(s) in the I(db)! + - If role(s) exists, always returns changed True. + - Cannot reassign ownership of objects that are required by the database system. + - Mutually exclusive with C(obj_type). + type: list + elements: str + fail_on_role: + description: + - If C(yes), fail when I(reassign_owned_by) role does not exist. + Otherwise just warn and continue. + - Mutually exclusive with I(obj_name) and I(obj_type). + default: yes + type: bool + db: + description: + - Name of database to connect to. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. + The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + trust_input: + description: + - If C(no), check whether values of parameters I(new_owner), I(obj_name), + I(reassign_owned_by), I(session_role) are potentially dangerous. + - It makes sense to use C(no) only when SQL injections via the parameters are possible. + type: bool + default: yes + version_added: '0.2.0' +seealso: +- module: community.postgresql.postgresql_user +- module: community.postgresql.postgresql_privs +- module: community.postgresql.postgresql_membership +- name: PostgreSQL REASSIGN OWNED command reference + description: Complete reference of the PostgreSQL REASSIGN OWNED command documentation. + link: https://www.postgresql.org/docs/current/sql-reassign-owned.html +author: +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.postgresql.postgres +notes: +- Supports C(check_mode). +''' + +EXAMPLES = r''' +# Set owner as alice for function myfunc in database bar by ansible ad-hoc command: +# ansible -m postgresql_owner -a "db=bar new_owner=alice obj_name=myfunc obj_type=function" + +- name: The same as above by playbook + community.postgresql.postgresql_owner: + db: bar + new_owner: alice + obj_name: myfunc + obj_type: function + +- name: Set owner as bob for table acme in database bar + community.postgresql.postgresql_owner: + db: bar + new_owner: bob + obj_name: acme + obj_type: table + +- name: Set owner as alice for view test_view in database bar + community.postgresql.postgresql_owner: + db: bar + new_owner: alice + obj_name: test_view + obj_type: view + +- name: Set owner as bob for tablespace ssd in database foo + community.postgresql.postgresql_owner: + db: foo + new_owner: bob + obj_name: ssd + obj_type: tablespace + +- name: Reassign all object in database bar owned by bob to alice + community.postgresql.postgresql_owner: + db: bar + new_owner: alice + reassign_owned_by: bob + +- name: Reassign all object in database bar owned by bob and bill to alice + community.postgresql.postgresql_owner: + db: bar + new_owner: alice + reassign_owned_by: + - bob + - bill +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: str + sample: [ 'REASSIGN OWNED BY "bob" TO "alice"' ] +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, + pg_quote_identifier, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +class PgOwnership(object): + + """Class for changing ownership of PostgreSQL objects. + + Arguments: + module (AnsibleModule): Object of Ansible module class. + cursor (psycopg2.connect.cursor): Cursor object for interaction with the database. + role (str): Role name to set as a new owner of objects. + + Important: + If you want to add handling of a new type of database objects: + 1. Add a specific method for this like self.__set_db_owner(), etc. + 2. Add a condition with a check of ownership for new type objects to self.__is_owner() + 3. Add a condition with invocation of the specific method to self.set_owner() + 4. Add the information to the module documentation + That's all. + """ + + def __init__(self, module, cursor, role): + self.module = module + self.cursor = cursor + self.check_role_exists(role) + self.role = role + self.changed = False + self.executed_queries = [] + self.obj_name = '' + self.obj_type = '' + + def check_role_exists(self, role, fail_on_role=True): + """Check the role exists or not. + + Arguments: + role (str): Role name. + fail_on_role (bool): If True, fail when the role does not exist. + Otherwise just warn and continue. + """ + if not self.__role_exists(role): + if fail_on_role: + self.module.fail_json(msg="Role '%s' does not exist" % role) + else: + self.module.warn("Role '%s' does not exist, pass" % role) + + return False + + else: + return True + + def reassign(self, old_owners, fail_on_role): + """Implements REASSIGN OWNED BY command. + + If success, set self.changed as True. + + Arguments: + old_owners (list): The ownership of all the objects within + the current database, and of all shared objects (databases, tablespaces), + owned by these roles will be reassigned to self.role. + fail_on_role (bool): If True, fail when a role from old_owners does not exist. + Otherwise just warn and continue. + """ + roles = [] + for r in old_owners: + if self.check_role_exists(r, fail_on_role): + roles.append('"%s"' % r) + + # Roles do not exist, nothing to do, exit: + if not roles: + return False + + old_owners = ','.join(roles) + + query = ['REASSIGN OWNED BY'] + query.append(old_owners) + query.append('TO "%s"' % self.role) + query = ' '.join(query) + + self.changed = exec_sql(self, query, return_bool=True) + + def set_owner(self, obj_type, obj_name): + """Change owner of a database object. + + Arguments: + obj_type (str): Type of object (like database, table, view, etc.). + obj_name (str): Object name. + """ + self.obj_name = obj_name + self.obj_type = obj_type + + # if a new_owner is the object owner now, + # nothing to do: + if self.__is_owner(): + return False + + if obj_type == 'database': + self.__set_db_owner() + + elif obj_type == 'function': + self.__set_func_owner() + + elif obj_type == 'sequence': + self.__set_seq_owner() + + elif obj_type == 'schema': + self.__set_schema_owner() + + elif obj_type == 'table': + self.__set_table_owner() + + elif obj_type == 'tablespace': + self.__set_tablespace_owner() + + elif obj_type == 'view': + self.__set_view_owner() + + elif obj_type == 'matview': + self.__set_mat_view_owner() + + def __is_owner(self): + """Return True if self.role is the current object owner.""" + if self.obj_type == 'table': + query = ("SELECT 1 FROM pg_tables " + "WHERE tablename = %(obj_name)s " + "AND tableowner = %(role)s") + + elif self.obj_type == 'database': + query = ("SELECT 1 FROM pg_database AS d " + "JOIN pg_roles AS r ON d.datdba = r.oid " + "WHERE d.datname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'function': + query = ("SELECT 1 FROM pg_proc AS f " + "JOIN pg_roles AS r ON f.proowner = r.oid " + "WHERE f.proname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'sequence': + query = ("SELECT 1 FROM pg_class AS c " + "JOIN pg_roles AS r ON c.relowner = r.oid " + "WHERE c.relkind = 'S' AND c.relname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'schema': + query = ("SELECT 1 FROM information_schema.schemata " + "WHERE schema_name = %(obj_name)s " + "AND schema_owner = %(role)s") + + elif self.obj_type == 'tablespace': + query = ("SELECT 1 FROM pg_tablespace AS t " + "JOIN pg_roles AS r ON t.spcowner = r.oid " + "WHERE t.spcname = %(obj_name)s " + "AND r.rolname = %(role)s") + + elif self.obj_type == 'view': + query = ("SELECT 1 FROM pg_views " + "WHERE viewname = %(obj_name)s " + "AND viewowner = %(role)s") + + elif self.obj_type == 'matview': + query = ("SELECT 1 FROM pg_matviews " + "WHERE matviewname = %(obj_name)s " + "AND matviewowner = %(role)s") + + query_params = {'obj_name': self.obj_name, 'role': self.role} + return exec_sql(self, query, query_params, add_to_executed=False) + + def __set_db_owner(self): + """Set the database owner.""" + query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (self.obj_name, self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_func_owner(self): + """Set the function owner.""" + query = 'ALTER FUNCTION %s OWNER TO "%s"' % (self.obj_name, self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_seq_owner(self): + """Set the sequence owner.""" + query = 'ALTER SEQUENCE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_schema_owner(self): + """Set the schema owner.""" + query = 'ALTER SCHEMA %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'schema'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_table_owner(self): + """Set the table owner.""" + query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_tablespace_owner(self): + """Set the tablespace owner.""" + query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.obj_name, self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_view_owner(self): + """Set the view owner.""" + query = 'ALTER VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __set_mat_view_owner(self): + """Set the materialized view owner.""" + query = 'ALTER MATERIALIZED VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'), + self.role) + self.changed = exec_sql(self, query, return_bool=True) + + def __role_exists(self, role): + """Return True if role exists, otherwise return False.""" + query_params = {'role': role} + query = "SELECT 1 FROM pg_roles WHERE rolname = %(role)s" + return exec_sql(self, query, query_params, add_to_executed=False) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + new_owner=dict(type='str', required=True), + obj_name=dict(type='str'), + obj_type=dict(type='str', aliases=['type'], choices=[ + 'database', 'function', 'matview', 'sequence', 'schema', 'table', 'tablespace', 'view']), + reassign_owned_by=dict(type='list', elements='str'), + fail_on_role=dict(type='bool', default=True), + db=dict(type='str', aliases=['login_db']), + session_role=dict(type='str'), + trust_input=dict(type='bool', default=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['obj_name', 'reassign_owned_by'], + ['obj_type', 'reassign_owned_by'], + ['obj_name', 'fail_on_role'], + ['obj_type', 'fail_on_role'], + ], + supports_check_mode=True, + ) + + new_owner = module.params['new_owner'] + obj_name = module.params['obj_name'] + obj_type = module.params['obj_type'] + reassign_owned_by = module.params['reassign_owned_by'] + fail_on_role = module.params['fail_on_role'] + session_role = module.params['session_role'] + trust_input = module.params['trust_input'] + if not trust_input: + # Check input for potentially dangerous elements: + check_input(module, new_owner, obj_name, reassign_owned_by, session_role) + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=False) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + ############## + # Create the object and do main job: + pg_ownership = PgOwnership(module, cursor, new_owner) + + # if we want to change ownership: + if obj_name: + pg_ownership.set_owner(obj_type, obj_name) + + # if we want to reassign objects owned by roles: + elif reassign_owned_by: + pg_ownership.reassign(reassign_owned_by, fail_on_role) + + # Rollback if it's possible and check_mode: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + cursor.close() + db_connection.close() + + module.exit_json( + changed=pg_ownership.changed, + queries=pg_ownership.executed_queries, + ) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py new file mode 100644 index 00000000..de52e9f7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py @@ -0,0 +1,746 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2019, Sebastiaan Mannem (@sebasmannem) <sebastiaan.mannem@enterprisedb.com> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +''' +This module is used to manage postgres pg_hba files with Ansible. +''' + +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_pg_hba +short_description: Add, remove or modify a rule in a pg_hba file +description: + - The fundamental function of the module is to create, or delete lines in pg_hba files. + - The lines in the file should be in a typical pg_hba form and lines should be unique per key (type, databases, users, source). + If they are not unique and the SID is 'the one to change', only one for I(state=present) or + none for I(state=absent) of the SID's will remain. +extends_documentation_fragment: files +options: + address: + description: + - The source address/net where the connections could come from. + - Will not be used for entries of I(type)=C(local). + - You can also use keywords C(all), C(samehost), and C(samenet). + default: samehost + type: str + aliases: [ source, src ] + backup: + description: + - If set, create a backup of the C(pg_hba) file before it is modified. + The location of the backup is returned in the (backup) variable by this module. + default: false + type: bool + backup_file: + description: + - Write backup to a specific backupfile rather than a temp file. + type: str + create: + description: + - Create an C(pg_hba) file if none exists. + - When set to false, an error is raised when the C(pg_hba) file doesn't exist. + default: false + type: bool + contype: + description: + - Type of the rule. If not set, C(postgresql_pg_hba) will only return contents. + type: str + choices: [ local, host, hostnossl, hostssl ] + databases: + description: + - Databases this line applies to. + default: all + type: str + dest: + description: + - Path to C(pg_hba) file to modify. + type: path + required: true + method: + description: + - Authentication method to be used. + type: str + choices: [ cert, gss, ident, krb5, ldap, md5, pam, password, peer, radius, reject, scram-sha-256 , sspi, trust ] + default: md5 + netmask: + description: + - The netmask of the source address. + type: str + options: + description: + - Additional options for the authentication I(method). + type: str + order: + description: + - The entries will be written out in a specific order. + With this option you can control by which field they are ordered first, second and last. + s=source, d=databases, u=users. + This option is deprecated since 2.9 and will be removed in community.postgresql 3.0.0. + Sortorder is now hardcoded to sdu. + type: str + default: sdu + choices: [ sdu, sud, dsu, dus, usd, uds ] + state: + description: + - The lines will be added/modified when C(state=present) and removed when C(state=absent). + type: str + default: present + choices: [ absent, present ] + users: + description: + - Users this line applies to. + type: str + default: all + +notes: + - The default authentication assumes that on the host, you are either logging in as or + sudo'ing to an account with appropriate permissions to read and modify the file. + - This module also returns the pg_hba info. You can use this module to only retrieve it by only specifying I(dest). + The info can be found in the returned data under key pg_hba, being a list, containing a dict per rule. + - This module will sort resulting C(pg_hba) files if a rule change is required. + This could give unexpected results with manual created hba files, if it was improperly sorted. + For example a rule was created for a net first and for a ip in that net range next. + In that situation, the 'ip specific rule' will never hit, it is in the C(pg_hba) file obsolete. + After the C(pg_hba) file is rewritten by the M(community.postgresql.postgresql_pg_hba) module, the ip specific rule will be sorted above the range rule. + And then it will hit, which will give unexpected results. + - With the 'order' parameter you can control which field is used to sort first, next and last. + - The module supports a check mode and a diff mode. + +seealso: +- name: PostgreSQL pg_hba.conf file reference + description: Complete reference of the PostgreSQL pg_hba.conf file documentation. + link: https://www.postgresql.org/docs/current/auth-pg-hba-conf.html + +requirements: + - ipaddress + +author: Sebastiaan Mannem (@sebasmannem) +''' + +EXAMPLES = ''' +- name: Grant users joe and simon access to databases sales and logistics from ipv6 localhost ::1/128 using peer authentication + community.postgresql.postgresql_pg_hba: + dest: /var/lib/postgres/data/pg_hba.conf + contype: host + users: joe,simon + source: ::1 + databases: sales,logistics + method: peer + create: true + +- name: Grant user replication from network 192.168.0.100/24 access for replication with client cert authentication + community.postgresql.postgresql_pg_hba: + dest: /var/lib/postgres/data/pg_hba.conf + contype: host + users: replication + source: 192.168.0.100/24 + databases: replication + method: cert + +- name: Revoke access from local user mary on database mydb + community.postgresql.postgresql_pg_hba: + dest: /var/lib/postgres/data/pg_hba.conf + contype: local + users: mary + databases: mydb + state: absent +''' + +RETURN = r''' +msgs: + description: List of textual messages what was done. + returned: always + type: list + sample: + "msgs": [ + "Removing", + "Changed", + "Writing" + ] +backup_file: + description: File that the original pg_hba file was backed up to. + returned: changed + type: str + sample: /tmp/pg_hba_jxobj_p +pg_hba: + description: List of the pg_hba rules as they are configured in the specified hba file. + returned: always + type: list + sample: + "pg_hba": [ + { + "db": "all", + "method": "md5", + "src": "samehost", + "type": "host", + "usr": "all" + } + ] +''' + +import os +import re +import traceback + +IPADDRESS_IMP_ERR = None +try: + import ipaddress +except ImportError: + IPADDRESS_IMP_ERR = traceback.format_exc() + +import tempfile +import shutil +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +# from ansible.module_utils.postgres import postgres_common_argument_spec + +PG_HBA_METHODS = ["trust", "reject", "md5", "password", "gss", "sspi", "krb5", "ident", "peer", + "ldap", "radius", "cert", "pam", "scram-sha-256"] +PG_HBA_TYPES = ["local", "host", "hostssl", "hostnossl"] +PG_HBA_ORDERS = ["sdu", "sud", "dsu", "dus", "usd", "uds"] +PG_HBA_HDR = ['type', 'db', 'usr', 'src', 'mask', 'method', 'options'] + +WHITESPACES_RE = re.compile(r'\s+') + + +class PgHbaError(Exception): + ''' + This exception is raised when parsing the pg_hba file ends in an error. + ''' + + +class PgHbaRuleError(PgHbaError): + ''' + This exception is raised when parsing the pg_hba file ends in an error. + ''' + + +class PgHbaRuleChanged(PgHbaRuleError): + ''' + This exception is raised when a new parsed rule is a changed version of an existing rule. + ''' + + +class PgHbaValueError(PgHbaError): + ''' + This exception is raised when a new parsed rule is a changed version of an existing rule. + ''' + + +class PgHbaRuleValueError(PgHbaRuleError): + ''' + This exception is raised when a new parsed rule is a changed version of an existing rule. + ''' + + +class PgHba(object): + """ + PgHba object to read/write entries to/from. + pg_hba_file - the pg_hba file almost always /etc/pg_hba + """ + def __init__(self, pg_hba_file=None, order="sdu", backup=False, create=False): + if order not in PG_HBA_ORDERS: + msg = "invalid order setting {0} (should be one of '{1}')." + raise PgHbaError(msg.format(order, "', '".join(PG_HBA_ORDERS))) + self.pg_hba_file = pg_hba_file + self.rules = None + self.comment = None + self.order = order + self.backup = backup + self.last_backup = None + self.create = create + self.unchanged() + # self.databases will be update by add_rule and gives some idea of the number of databases + # (at least that are handled by this pg_hba) + self.databases = set(['postgres', 'template0', 'template1']) + + # self.databases will be update by add_rule and gives some idea of the number of users + # (at least that are handled by this pg_hba) since this might also be groups with multiple + # users, this might be totally off, but at least it is some info... + self.users = set(['postgres']) + + self.read() + + def unchanged(self): + ''' + This method resets self.diff to a empty default + ''' + self.diff = {'before': {'file': self.pg_hba_file, 'pg_hba': []}, + 'after': {'file': self.pg_hba_file, 'pg_hba': []}} + + def read(self): + ''' + Read in the pg_hba from the system + ''' + self.rules = {} + self.comment = [] + # read the pg_hbafile + try: + with open(self.pg_hba_file, 'r') as file: + for line in file: + line = line.strip() + # uncomment + if '#' in line: + line, comment = line.split('#', 1) + self.comment.append('#' + comment) + try: + self.add_rule(PgHbaRule(line=line)) + except PgHbaRuleError: + pass + self.unchanged() + except IOError: + pass + + def write(self, backup_file=''): + ''' + This method writes the PgHba rules (back) to a file. + ''' + if not self.changed(): + return False + + contents = self.render() + if self.pg_hba_file: + if not (os.path.isfile(self.pg_hba_file) or self.create): + raise PgHbaError("pg_hba file '{0}' doesn't exist. " + "Use create option to autocreate.".format(self.pg_hba_file)) + if self.backup and os.path.isfile(self.pg_hba_file): + if backup_file: + self.last_backup = backup_file + else: + __backup_file_h, self.last_backup = tempfile.mkstemp(prefix='pg_hba') + shutil.copy(self.pg_hba_file, self.last_backup) + fileh = open(self.pg_hba_file, 'w') + else: + filed, __path = tempfile.mkstemp(prefix='pg_hba') + fileh = os.fdopen(filed, 'w') + + fileh.write(contents) + self.unchanged() + fileh.close() + return True + + def add_rule(self, rule): + ''' + This method can be used to add a rule to the list of rules in this PgHba object + ''' + key = rule.key() + try: + try: + oldrule = self.rules[key] + except KeyError: + raise PgHbaRuleChanged + ekeys = set(list(oldrule.keys()) + list(rule.keys())) + ekeys.remove('line') + for k in ekeys: + if oldrule.get(k) != rule.get(k): + raise PgHbaRuleChanged('{0} changes {1}'.format(rule, oldrule)) + except PgHbaRuleChanged: + self.rules[key] = rule + self.diff['after']['pg_hba'].append(rule.line()) + if rule['db'] not in ['all', 'samerole', 'samegroup', 'replication']: + databases = set(rule['db'].split(',')) + self.databases.update(databases) + if rule['usr'] != 'all': + user = rule['usr'] + if user[0] == '+': + user = user[1:] + self.users.add(user) + + def remove_rule(self, rule): + ''' + This method can be used to find and remove a rule. It doesn't look for the exact rule, only + the rule with the same key. + ''' + keys = rule.key() + try: + del self.rules[keys] + self.diff['before']['pg_hba'].append(rule.line()) + except KeyError: + pass + + def get_rules(self, with_lines=False): + ''' + This method returns all the rules of the PgHba object + ''' + rules = sorted(self.rules.values()) + for rule in rules: + ret = {} + for key, value in rule.items(): + ret[key] = value + if not with_lines: + if 'line' in ret: + del ret['line'] + else: + ret['line'] = rule.line() + + yield ret + + def render(self): + ''' + This method renders the content of the PgHba rules and comments. + The returning value can be used directly to write to a new file. + ''' + comment = '\n'.join(self.comment) + rule_lines = '\n'.join([rule['line'] for rule in self.get_rules(with_lines=True)]) + result = comment + '\n' + rule_lines + # End it properly with a linefeed (if not already). + if result and result[-1] not in ['\n', '\r']: + result += '\n' + return result + + def changed(self): + ''' + This method can be called to detect if the PgHba file has been changed. + ''' + return bool(self.diff['before']['pg_hba'] or self.diff['after']['pg_hba']) + + +class PgHbaRule(dict): + ''' + This class represents one rule as defined in a line in a PgHbaFile. + ''' + + def __init__(self, contype=None, databases=None, users=None, source=None, netmask=None, + method=None, options=None, line=None): + ''' + This function can be called with a comma seperated list of databases and a comma seperated + list of users and it will act as a generator that returns a expanded list of rules one by + one. + ''' + + super(PgHbaRule, self).__init__() + + if line: + # Read values from line if parsed + self.fromline(line) + + # read rule cols from parsed items + rule = dict(zip(PG_HBA_HDR, [contype, databases, users, source, netmask, method, options])) + for key, value in rule.items(): + if value: + self[key] = value + + # Some sanity checks + for key in ['method', 'type']: + if key not in self: + raise PgHbaRuleError('Missing {0} in rule {1}'.format(key, self)) + + if self['method'] not in PG_HBA_METHODS: + msg = "invalid method {0} (should be one of '{1}')." + raise PgHbaRuleValueError(msg.format(self['method'], "', '".join(PG_HBA_METHODS))) + + if self['type'] not in PG_HBA_TYPES: + msg = "invalid connection type {0} (should be one of '{1}')." + raise PgHbaRuleValueError(msg.format(self['type'], "', '".join(PG_HBA_TYPES))) + + if self['type'] == 'local': + self.unset('src') + self.unset('mask') + elif 'src' not in self: + raise PgHbaRuleError('Missing src in rule {1}'.format(self)) + elif '/' in self['src']: + self.unset('mask') + else: + self['src'] = str(self.source()) + self.unset('mask') + + def unset(self, key): + ''' + This method is used to unset certain columns if they exist + ''' + if key in self: + del self[key] + + def line(self): + ''' + This method can be used to return (or generate) the line + ''' + try: + return self['line'] + except KeyError: + self['line'] = "\t".join([self[k] for k in PG_HBA_HDR if k in self.keys()]) + return self['line'] + + def fromline(self, line): + ''' + split into 'type', 'db', 'usr', 'src', 'mask', 'method', 'options' cols + ''' + if WHITESPACES_RE.sub('', line) == '': + # empty line. skip this one... + return + cols = WHITESPACES_RE.split(line) + if len(cols) < 4: + msg = "Rule {0} has too few columns." + raise PgHbaValueError(msg.format(line)) + if cols[0] not in PG_HBA_TYPES: + msg = "Rule {0} has unknown type: {1}." + raise PgHbaValueError(msg.format(line, cols[0])) + if cols[0] == 'local': + cols.insert(3, None) # No address + cols.insert(3, None) # No IP-mask + if len(cols) < 6: + cols.insert(4, None) # No IP-mask + elif cols[5] not in PG_HBA_METHODS: + cols.insert(4, None) # No IP-mask + if cols[5] not in PG_HBA_METHODS: + raise PgHbaValueError("Rule {0} of '{1}' type has invalid auth-method '{2}'".format(line, cols[0], cols[5])) + + if len(cols) < 7: + cols.insert(6, None) # No auth-options + else: + cols[6] = " ".join(cols[6:]) # combine all auth-options + rule = dict(zip(PG_HBA_HDR, cols[:7])) + for key, value in rule.items(): + if value: + self[key] = value + + def key(self): + ''' + This method can be used to get the key from a rule. + ''' + if self['type'] == 'local': + source = 'local' + else: + source = str(self.source()) + return (source, self['db'], self['usr']) + + def source(self): + ''' + This method is used to get the source of a rule as an ipaddress object if possible. + ''' + if 'mask' in self.keys(): + try: + ipaddress.ip_address(u'{0}'.format(self['src'])) + except ValueError: + raise PgHbaValueError('Mask was specified, but source "{0}" ' + 'is no valid ip'.format(self['src'])) + # ipaddress module cannot work with ipv6 netmask, so lets convert it to prefixlen + # furthermore ipv4 with bad netmask throws 'Rule {} doesn't seem to be an ip, but has a + # mask error that doesn't seem to describe what is going on. + try: + mask_as_ip = ipaddress.ip_address(u'{0}'.format(self['mask'])) + except ValueError: + raise PgHbaValueError('Mask {0} seems to be invalid'.format(self['mask'])) + binvalue = "{0:b}".format(int(mask_as_ip)) + if '01' in binvalue: + raise PgHbaValueError('IP mask {0} seems invalid ' + '(binary value has 1 after 0)'.format(self['mask'])) + prefixlen = binvalue.count('1') + sourcenw = '{0}/{1}'.format(self['src'], prefixlen) + try: + return ipaddress.ip_network(u'{0}'.format(sourcenw), strict=False) + except ValueError: + raise PgHbaValueError('{0} is no valid address range'.format(sourcenw)) + + try: + return ipaddress.ip_network(u'{0}'.format(self['src']), strict=False) + except ValueError: + return self['src'] + + def __lt__(self, other): + """This function helps sorted to decide how to sort. + + It just checks itself against the other and decides on some key values + if it should be sorted higher or lower in the list. + The way it works: + For networks, every 1 in 'netmask in binary' makes the subnet more specific. + Therefore I chose to use prefix as the weight. + So a single IP (/32) should have twice the weight of a /16 network. + To keep everything in the same weight scale, + - for ipv6, we use a weight scale of 0 (all possible ipv6 addresses) to 128 (single ip) + - for ipv4, we use a weight scale of 0 (all possible ipv4 addresses) to 128 (single ip) + Therefore for ipv4, we use prefixlen (0-32) * 4 for weight, + which corresponds to ipv6 (0-128). + """ + myweight = self.source_weight() + hisweight = other.source_weight() + if myweight != hisweight: + return myweight > hisweight + + myweight = self.db_weight() + hisweight = other.db_weight() + if myweight != hisweight: + return myweight < hisweight + + myweight = self.user_weight() + hisweight = other.user_weight() + if myweight != hisweight: + return myweight < hisweight + try: + return self['src'] < other['src'] + except TypeError: + return self.source_type_weight() < other.source_type_weight() + except Exception: + # When all else fails, just compare the exact line. + return self.line() < other.line() + + def source_weight(self): + """Report the weight of this source net. + + Basically this is the netmask, where IPv4 is normalized to IPv6 + (IPv4/32 has the same weight as IPv6/128). + """ + if self['type'] == 'local': + return 130 + + sourceobj = self.source() + if isinstance(sourceobj, ipaddress.IPv4Network): + return sourceobj.prefixlen * 4 + if isinstance(sourceobj, ipaddress.IPv6Network): + return sourceobj.prefixlen + if isinstance(sourceobj, str): + # You can also write all to match any IP address, + # samehost to match any of the server's own IP addresses, + # or samenet to match any address in any subnet that the server is connected to. + if sourceobj == 'all': + # (all is considered the full range of all ips, which has a weight of 0) + return 0 + if sourceobj == 'samehost': + # (sort samehost second after local) + return 129 + if sourceobj == 'samenet': + # Might write some fancy code to determine all prefix's + # from all interfaces and find a sane value for this one. + # For now, let's assume IPv4/24 or IPv6/96 (both have weight 96). + return 96 + if sourceobj[0] == '.': + # suffix matching (domain name), let's assume a very large scale + # and therefore a very low weight IPv4/16 or IPv6/64 (both have weight 64). + return 64 + # hostname, let's assume only one host matches, which is + # IPv4/32 or IPv6/128 (both have weight 128) + return 128 + raise PgHbaValueError('Cannot deduct the source weight of this source {1}'.format(sourceobj)) + + def source_type_weight(self): + """Give a weight on the type of this source. + + Basically make sure that IPv6Networks are sorted higher than IPv4Networks. + This is a 'when all else fails' solution in __lt__. + """ + if self['type'] == 'local': + return 3 + + sourceobj = self.source() + if isinstance(sourceobj, ipaddress.IPv4Network): + return 2 + if isinstance(sourceobj, ipaddress.IPv6Network): + return 1 + if isinstance(sourceobj, str): + return 0 + raise PgHbaValueError('This source {0} is of an unknown type...'.format(sourceobj)) + + def db_weight(self): + """Report the weight of the database. + + Normally, just 1, but for replication this is 0, and for 'all', this is more than 2. + """ + if self['db'] == 'all': + return 100000 + if self['db'] == 'replication': + return 0 + if self['db'] in ['samerole', 'samegroup']: + return 1 + return 1 + self['db'].count(',') + + def user_weight(self): + """Report weight when comparing users.""" + if self['usr'] == 'all': + return 1000000 + return 1 + + +def main(): + ''' + This function is the main function of this module + ''' + # argument_spec = postgres_common_argument_spec() + argument_spec = dict() + argument_spec.update( + address=dict(type='str', default='samehost', aliases=['source', 'src']), + backup=dict(type='bool', default=False), + backup_file=dict(type='str'), + contype=dict(type='str', default=None, choices=PG_HBA_TYPES), + create=dict(type='bool', default=False), + databases=dict(type='str', default='all'), + dest=dict(type='path', required=True), + method=dict(type='str', default='md5', choices=PG_HBA_METHODS), + netmask=dict(type='str'), + options=dict(type='str'), + order=dict(type='str', default="sdu", choices=PG_HBA_ORDERS, + removed_in_version='3.0.0', removed_from_collection='community.postgresql'), + state=dict(type='str', default="present", choices=["absent", "present"]), + users=dict(type='str', default='all') + ) + module = AnsibleModule( + argument_spec=argument_spec, + add_file_common_args=True, + supports_check_mode=True + ) + if IPADDRESS_IMP_ERR is not None: + module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR) + + contype = module.params["contype"] + create = bool(module.params["create"] or module.check_mode) + if module.check_mode: + backup = False + else: + backup = module.params['backup'] + backup_file = module.params['backup_file'] + databases = module.params["databases"] + dest = module.params["dest"] + + method = module.params["method"] + netmask = module.params["netmask"] + options = module.params["options"] + order = module.params["order"] + source = module.params["address"] + state = module.params["state"] + users = module.params["users"] + + ret = {'msgs': []} + try: + pg_hba = PgHba(dest, order, backup=backup, create=create) + except PgHbaError as error: + module.fail_json(msg='Error reading file:\n{0}'.format(error)) + + if contype: + try: + for database in databases.split(','): + for user in users.split(','): + rule = PgHbaRule(contype, database, user, source, netmask, method, options) + if state == "present": + ret['msgs'].append('Adding') + pg_hba.add_rule(rule) + else: + ret['msgs'].append('Removing') + pg_hba.remove_rule(rule) + except PgHbaError as error: + module.fail_json(msg='Error modifying rules:\n{0}'.format(error)) + file_args = module.load_file_common_arguments(module.params) + ret['changed'] = changed = pg_hba.changed() + if changed: + ret['msgs'].append('Changed') + ret['diff'] = pg_hba.diff + + if not module.check_mode: + ret['msgs'].append('Writing') + try: + if pg_hba.write(backup_file): + module.set_fs_attributes_if_different(file_args, True, pg_hba.diff, + expand=False) + except PgHbaError as error: + module.fail_json(msg='Error writing file:\n{0}'.format(error)) + if pg_hba.last_backup: + ret['backup_file'] = pg_hba.last_backup + + ret['pg_hba'] = list(pg_hba.get_rules()) + module.exit_json(**ret) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py new file mode 100644 index 00000000..2a7aa0cd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py @@ -0,0 +1,171 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_ping +short_description: Check remote PostgreSQL server availability +description: +- Simple module to check remote PostgreSQL server availability. +options: + db: + description: + - Name of a database to connect to. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + version_added: '0.2.0' + trust_input: + description: + - If C(no), check whether a value of I(session_role) is potentially dangerous. + - It makes sense to use C(no) only when SQL injections via I(session_role) are possible. + type: bool + default: yes + version_added: '0.2.0' +seealso: +- module: community.postgresql.postgresql_info +author: +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.postgresql.postgres +notes: +- Supports C(check_mode). +''' + +EXAMPLES = r''' +# PostgreSQL ping dbsrv server from the shell: +# ansible dbsrv -m postgresql_ping + +# In the example below you need to generate certificates previously. +# See https://www.postgresql.org/docs/current/libpq-ssl.html for more information. +- name: PostgreSQL ping dbsrv server using not default credentials and ssl + community.postgresql.postgresql_ping: + db: protected_db + login_host: dbsrv + login_user: secret + login_password: secret_pass + ca_cert: /root/root.crt + ssl_mode: verify-full +''' + +RETURN = r''' +is_available: + description: PostgreSQL server availability. + returned: always + type: bool + sample: true +server_version: + description: PostgreSQL server version. + returned: always + type: dict + sample: { major: 10, minor: 1 } +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +# =========================================== +# PostgreSQL module specific support methods. +# + + +class PgPing(object): + def __init__(self, module, cursor): + self.module = module + self.cursor = cursor + self.is_available = False + self.version = {} + + def do(self): + self.get_pg_version() + return (self.is_available, self.version) + + def get_pg_version(self): + query = "SELECT version()" + raw = exec_sql(self, query, add_to_executed=False)[0][0] + if raw: + self.is_available = True + raw = raw.split()[1].split('.') + self.version = dict( + major=int(raw[0]), + minor=int(raw[1].rstrip(',')), + ) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + db=dict(type='str', aliases=['login_db']), + session_role=dict(type='str'), + trust_input=dict(type='bool', default=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + if not module.params['trust_input']: + # Check input for potentially dangerous elements: + check_input(module, module.params['session_role']) + + # Set some default values: + cursor = False + db_connection = False + result = dict( + changed=False, + is_available=False, + server_version=dict(), + ) + + conn_params = get_conn_params(module, module.params, warn_db_default=False) + db_connection = connect_to_db(module, conn_params, fail_on_conn=False) + + if db_connection is not None: + cursor = db_connection.cursor(cursor_factory=DictCursor) + + # Do job: + pg_ping = PgPing(module, cursor) + if cursor: + # If connection established: + result["is_available"], result["server_version"] = pg_ping.do() + db_connection.rollback() + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py new file mode 100644 index 00000000..9cd1b155 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py @@ -0,0 +1,1172 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_privs +short_description: Grant or revoke privileges on PostgreSQL database objects +description: +- Grant or revoke privileges on PostgreSQL database objects. +- This module is basically a wrapper around most of the functionality of + PostgreSQL's GRANT and REVOKE statements with detection of changes + (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)). +options: + database: + description: + - Name of database to connect to. + required: yes + type: str + aliases: + - db + - login_db + state: + description: + - If C(present), the specified privileges are granted, if C(absent) they are revoked. + type: str + default: present + choices: [ absent, present ] + privs: + description: + - Comma separated list of privileges to grant/revoke. + type: str + aliases: + - priv + type: + description: + - Type of database object to set privileges on. + - The C(default_privs) choice is available starting at version 2.7. + - The C(foreign_data_wrapper) and C(foreign_server) object types are available since Ansible version 2.8. + - The C(type) choice is available since Ansible version 2.10. + - The C(procedure) is supported since collection version 1.3.0 and PostgreSQL 11. + type: str + default: table + choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function, + group, language, table, tablespace, schema, sequence, type , procedure] + objs: + description: + - Comma separated list of database objects to set privileges on. + - If I(type) is C(table), C(partition table), C(sequence), C(function) or C(procedure), + the special valueC(ALL_IN_SCHEMA) can be provided instead to specify all + database objects of type I(type) in the schema specified via I(schema). + (This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available + for C(function) and C(partition table) since Ansible 2.8). + - C(procedure) is supported since PostgreSQL 11 and M(community.postgresql) collection 1.3.0. + - If I(type) is C(database), this parameter can be omitted, in which case + privileges are set for the database specified via I(database). + - If I(type) is I(function) or I(procedure), colons (":") in object names will be + replaced with commas (needed to specify signatures, see examples). + type: str + aliases: + - obj + schema: + description: + - Schema that contains the database objects specified via I(objs). + - May only be provided if I(type) is C(table), C(sequence), C(function), C(procedure), C(type), + or C(default_privs). Defaults to C(public) in these cases. + - Pay attention, for embedded types when I(type=type) + I(schema) can be C(pg_catalog) or C(information_schema) respectively. + type: str + roles: + description: + - Comma separated list of role (user/group) names to set permissions for. + - The special value C(PUBLIC) can be provided instead to set permissions + for the implicitly defined PUBLIC group. + type: str + required: yes + aliases: + - role + fail_on_role: + description: + - If C(yes), fail when target role (for whom privs need to be granted) does not exist. + Otherwise just warn and continue. + default: yes + type: bool + session_role: + description: + - Switch to session_role after connecting. + - The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally. + type: str + target_roles: + description: + - A list of existing role (user/group) names to set as the + default permissions for database objects subsequently created by them. + - Parameter I(target_roles) is only available with C(type=default_privs). + type: str + grant_option: + description: + - Whether C(role) may grant/revoke the specified privileges/group memberships to others. + - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes. + - I(grant_option) only has an effect if I(state) is C(present). + type: bool + aliases: + - admin_option + host: + description: + - Database host address. If unspecified, connect via Unix socket. + type: str + aliases: + - login_host + port: + description: + - Database port to connect to. + type: int + default: 5432 + aliases: + - login_port + unix_socket: + description: + - Path to a Unix domain socket for local connections. + type: str + aliases: + - login_unix_socket + login: + description: + - The username to authenticate with. + type: str + default: postgres + aliases: + - login_user + password: + description: + - The password to authenticate with. + type: str + aliases: + - login_password + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + type: str + aliases: + - ssl_rootcert + trust_input: + description: + - If C(no), check whether values of parameters I(roles), I(target_roles), I(session_role), + I(schema) are potentially dangerous. + - It makes sense to use C(no) only when SQL injections via the parameters are possible. + type: bool + default: yes + version_added: '0.2.0' + usage_on_types: + description: + - When adding default privileges, the module always implicitly adds ``USAGE ON TYPES``. + - To avoid this behavior, set I(usage_on_types) to C(no). + - Added to save backwards compatibility. + - Used only when adding default privileges, ignored otherwise. + type: bool + default: yes + version_added: '1.2.0' + +notes: +- Supports C(check_mode). +- Parameters that accept comma separated lists (I(privs), I(objs), I(roles)) + have singular alias names (I(priv), I(obj), I(role)). +- To revoke only C(GRANT OPTION) for a specific object, set I(state) to + C(present) and I(grant_option) to C(no) (see examples). +- Note that when revoking privileges from a role R, this role may still have + access via privileges granted to any role R is a member of including C(PUBLIC). +- Note that when you use C(PUBLIC) role, the module always reports that the state has been changed. +- Note that when revoking privileges from a role R, you do so as the user + specified via I(login). If R has been granted the same privileges by + another user also, R can still access database objects via these privileges. +- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs). + +seealso: +- module: community.postgresql.postgresql_user +- module: community.postgresql.postgresql_owner +- module: community.postgresql.postgresql_membership +- name: PostgreSQL privileges + description: General information about PostgreSQL privileges. + link: https://www.postgresql.org/docs/current/ddl-priv.html +- name: PostgreSQL GRANT command reference + description: Complete reference of the PostgreSQL GRANT command documentation. + link: https://www.postgresql.org/docs/current/sql-grant.html +- name: PostgreSQL REVOKE command reference + description: Complete reference of the PostgreSQL REVOKE command documentation. + link: https://www.postgresql.org/docs/current/sql-revoke.html + +extends_documentation_fragment: +- community.postgresql.postgres + + +author: +- Bernhard Weitzhofer (@b6d) +- Tobias Birkefeld (@tcraxs) +''' + +EXAMPLES = r''' +# On database "library": +# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors +# TO librarian, reader WITH GRANT OPTION +- name: Grant privs to librarian and reader on database library + community.postgresql.postgresql_privs: + database: library + state: present + privs: SELECT,INSERT,UPDATE + type: table + objs: books,authors + schema: public + roles: librarian,reader + grant_option: yes + +- name: Same as above leveraging default values + community.postgresql.postgresql_privs: + db: library + privs: SELECT,INSERT,UPDATE + objs: books,authors + roles: librarian,reader + grant_option: yes + +# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader +# Note that role "reader" will be *granted* INSERT privilege itself if this +# isn't already the case (since state: present). +- name: Revoke privs from reader + community.postgresql.postgresql_privs: + db: library + state: present + priv: INSERT + obj: books + role: reader + grant_option: no + +# "public" is the default schema. This also works for PostgreSQL 8.x. +- name: REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader + community.postgresql.postgresql_privs: + db: library + state: absent + privs: INSERT,UPDATE + objs: ALL_IN_SCHEMA + role: reader + +- name: GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian + community.postgresql.postgresql_privs: + db: library + privs: ALL + type: schema + objs: public,math + role: librarian + +# Note the separation of arguments with colons. +- name: GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader + community.postgresql.postgresql_privs: + db: library + privs: ALL + type: function + obj: add(int:int) + schema: math + roles: librarian,reader + +# Note that group role memberships apply cluster-wide and therefore are not +# restricted to database "library" here. +- name: GRANT librarian, reader TO alice, bob WITH ADMIN OPTION + community.postgresql.postgresql_privs: + db: library + type: group + objs: librarian,reader + roles: alice,bob + admin_option: yes + +# Note that here "db: postgres" specifies the database to connect to, not the +# database to grant privileges on (which is specified via the "objs" param) +- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian + community.postgresql.postgresql_privs: + db: postgres + privs: ALL + type: database + obj: library + role: librarian + +# If objs is omitted for type "database", it defaults to the database +# to which the connection is established +- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian + community.postgresql.postgresql_privs: + db: library + privs: ALL + type: database + role: librarian + +# Available since version 2.7 +# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS +# ALL_DEFAULT works only with privs=ALL +# For specific +- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO librarian + community.postgresql.postgresql_privs: + db: library + objs: ALL_DEFAULT + privs: ALL + type: default_privs + role: librarian + grant_option: yes + +# Available since version 2.7 +# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS +# ALL_DEFAULT works only with privs=ALL +# For specific +- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 1 + community.postgresql.postgresql_privs: + db: library + objs: TABLES,SEQUENCES + privs: SELECT + type: default_privs + role: reader + +- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 2 + community.postgresql.postgresql_privs: + db: library + objs: TYPES + privs: USAGE + type: default_privs + role: reader + +# Available since version 2.8 +- name: GRANT ALL PRIVILEGES ON FOREIGN DATA WRAPPER fdw TO reader + community.postgresql.postgresql_privs: + db: test + objs: fdw + privs: ALL + type: foreign_data_wrapper + role: reader + +# Available since community.postgresql 0.2.0 +- name: GRANT ALL PRIVILEGES ON TYPE customtype TO reader + community.postgresql.postgresql_privs: + db: test + objs: customtype + privs: ALL + type: type + role: reader + +# Available since version 2.8 +- name: GRANT ALL PRIVILEGES ON FOREIGN SERVER fdw_server TO reader + community.postgresql.postgresql_privs: + db: test + objs: fdw_server + privs: ALL + type: foreign_server + role: reader + +# Available since version 2.8 +# Grant 'execute' permissions on all functions in schema 'common' to role 'caller' +- name: GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA common TO caller + community.postgresql.postgresql_privs: + type: function + state: present + privs: EXECUTE + roles: caller + objs: ALL_IN_SCHEMA + schema: common + +# Available since collection version 1.3.0 +# Grant 'execute' permissions on all procedures in schema 'common' to role 'caller' +# Needs PostreSQL 11 or higher and community.postgresql 1.3.0 or higher +- name: GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA common TO caller + community.postgresql.postgresql_privs: + type: prucedure + state: present + privs: EXECUTE + roles: caller + objs: ALL_IN_SCHEMA + schema: common + +# Available since version 2.8 +# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library GRANT SELECT ON TABLES TO reader +# GRANT SELECT privileges for new TABLES objects created by librarian as +# default to the role reader. +# For specific +- name: ALTER privs + community.postgresql.postgresql_privs: + db: library + schema: library + objs: TABLES + privs: SELECT + type: default_privs + role: reader + target_roles: librarian + +# Available since version 2.8 +# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library REVOKE SELECT ON TABLES FROM reader +# REVOKE SELECT privileges for new TABLES objects created by librarian as +# default from the role reader. +# For specific +- name: ALTER privs + community.postgresql.postgresql_privs: + db: library + state: absent + schema: library + objs: TABLES + privs: SELECT + type: default_privs + role: reader + target_roles: librarian + +# Available since community.postgresql 0.2.0 +- name: Grant type privileges for pg_catalog.numeric type to alice + community.postgresql.postgresql_privs: + type: type + roles: alice + privs: ALL + objs: numeric + schema: pg_catalog + db: acme +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: list + sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";'] +''' + +import traceback + +PSYCOPG2_IMP_ERR = None +try: + import psycopg2 + import psycopg2.extensions +except ImportError: + PSYCOPG2_IMP_ERR = traceback.format_exc() + psycopg2 = None + +# import module snippets +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + pg_quote_identifier, + check_input, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import postgres_common_argument_spec +from ansible.module_utils._text import to_native + +VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', + 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT', + 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE')) +VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'), + 'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'), + 'FUNCTIONS': ('ALL', 'EXECUTE'), + 'TYPES': ('ALL', 'USAGE')} + +executed_queries = [] + + +class Error(Exception): + pass + + +def role_exists(module, cursor, rolname): + """Check user exists or not""" + query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname + try: + cursor.execute(query) + return cursor.rowcount > 0 + + except Exception as e: + module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e))) + + return False + + +# We don't have functools.partial in Python < 2.5 +def partial(f, *args, **kwargs): + """Partial function application""" + + def g(*g_args, **g_kwargs): + new_kwargs = kwargs.copy() + new_kwargs.update(g_kwargs) + return f(*(args + g_args), **g_kwargs) + + g.f = f + g.args = args + g.kwargs = kwargs + return g + + +class Connection(object): + """Wrapper around a psycopg2 connection with some convenience methods""" + + def __init__(self, params, module): + self.database = params.database + self.module = module + # To use defaults values, keyword arguments must be absent, so + # check which values are empty and don't include in the **kw + # dictionary + params_map = { + "host": "host", + "login": "user", + "password": "password", + "port": "port", + "database": "database", + "ssl_mode": "sslmode", + "ca_cert": "sslrootcert" + } + + kw = dict((params_map[k], getattr(params, k)) for k in params_map + if getattr(params, k) != '' and getattr(params, k) is not None) + + # If a unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + if is_localhost and params.unix_socket != "": + kw["host"] = params.unix_socket + + sslrootcert = params.ca_cert + if psycopg2.__version__ < '2.4.3' and sslrootcert is not None: + raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter') + + self.connection = psycopg2.connect(**kw) + self.cursor = self.connection.cursor() + self.pg_version = self.connection.server_version + + def commit(self): + self.connection.commit() + + def rollback(self): + self.connection.rollback() + + @property + def encoding(self): + """Connection encoding in Python-compatible form""" + return psycopg2.extensions.encodings[self.connection.encoding] + + # Methods for querying database objects + + # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like + # phrases in GRANT or REVOKE statements, therefore alternative methods are + # provided here. + + def schema_exists(self, schema): + query = """SELECT count(*) + FROM pg_catalog.pg_namespace WHERE nspname = %s""" + self.cursor.execute(query, (schema,)) + return self.cursor.fetchone()[0] > 0 + + def get_all_tables_in_schema(self, schema): + if not self.schema_exists(schema): + raise Error('Schema "%s" does not exist.' % schema) + query = """SELECT relname + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')""" + self.cursor.execute(query, (schema,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_all_sequences_in_schema(self, schema): + if not self.schema_exists(schema): + raise Error('Schema "%s" does not exist.' % schema) + query = """SELECT relname + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind = 'S'""" + self.cursor.execute(query, (schema,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_all_functions_in_schema(self, schema): + if not self.schema_exists(schema): + raise Error('Schema "%s" does not exist.' % schema) + + query = ("SELECT p.proname, oidvectortypes(p.proargtypes) " + "FROM pg_catalog.pg_proc p " + "JOIN pg_namespace n ON n.oid = p.pronamespace " + "WHERE nspname = %s") + + if self.pg_version >= 110000: + query += " and p.prokind = 'f'" + + self.cursor.execute(query, (schema,)) + return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()] + + def get_all_procedures_in_schema(self, schema): + if self.pg_version < 110000: + raise Error("PostgreSQL verion must be >= 11 for type=procedure. Exit") + + if not self.schema_exists(schema): + raise Error('Schema "%s" does not exist.' % schema) + + query = ("SELECT p.proname, oidvectortypes(p.proargtypes) " + "FROM pg_catalog.pg_proc p " + "JOIN pg_namespace n ON n.oid = p.pronamespace " + "WHERE nspname = %s and p.prokind = 'p'") + + self.cursor.execute(query, (schema,)) + return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()] + + # Methods for getting access control lists and group membership info + + # To determine whether anything has changed after granting/revoking + # privileges, we compare the access control lists of the specified database + # objects before and afterwards. Python's list/string comparison should + # suffice for change detection, we should not actually have to parse ACLs. + # The same should apply to group membership information. + + def get_table_acls(self, schema, tables): + query = """SELECT relacl + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s) + ORDER BY relname""" + self.cursor.execute(query, (schema, tables)) + return [t[0] for t in self.cursor.fetchall()] + + def get_sequence_acls(self, schema, sequences): + query = """SELECT relacl + FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s) + ORDER BY relname""" + self.cursor.execute(query, (schema, sequences)) + return [t[0] for t in self.cursor.fetchall()] + + def get_function_acls(self, schema, function_signatures): + funcnames = [f.split('(', 1)[0] for f in function_signatures] + query = """SELECT proacl + FROM pg_catalog.pg_proc p + JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace + WHERE nspname = %s AND proname = ANY (%s) + ORDER BY proname, proargtypes""" + self.cursor.execute(query, (schema, funcnames)) + return [t[0] for t in self.cursor.fetchall()] + + def get_schema_acls(self, schemas): + query = """SELECT nspacl FROM pg_catalog.pg_namespace + WHERE nspname = ANY (%s) ORDER BY nspname""" + self.cursor.execute(query, (schemas,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_language_acls(self, languages): + query = """SELECT lanacl FROM pg_catalog.pg_language + WHERE lanname = ANY (%s) ORDER BY lanname""" + self.cursor.execute(query, (languages,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_tablespace_acls(self, tablespaces): + query = """SELECT spcacl FROM pg_catalog.pg_tablespace + WHERE spcname = ANY (%s) ORDER BY spcname""" + self.cursor.execute(query, (tablespaces,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_database_acls(self, databases): + query = """SELECT datacl FROM pg_catalog.pg_database + WHERE datname = ANY (%s) ORDER BY datname""" + self.cursor.execute(query, (databases,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_group_memberships(self, groups): + query = """SELECT roleid, grantor, member, admin_option + FROM pg_catalog.pg_auth_members am + JOIN pg_catalog.pg_roles r ON r.oid = am.roleid + WHERE r.rolname = ANY(%s) + ORDER BY roleid, grantor, member""" + self.cursor.execute(query, (groups,)) + return self.cursor.fetchall() + + def get_default_privs(self, schema, *args): + query = """SELECT defaclacl + FROM pg_default_acl a + JOIN pg_namespace b ON a.defaclnamespace=b.oid + WHERE b.nspname = %s;""" + self.cursor.execute(query, (schema,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_foreign_data_wrapper_acls(self, fdws): + query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper + WHERE fdwname = ANY (%s) ORDER BY fdwname""" + self.cursor.execute(query, (fdws,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_foreign_server_acls(self, fs): + query = """SELECT srvacl FROM pg_catalog.pg_foreign_server + WHERE srvname = ANY (%s) ORDER BY srvname""" + self.cursor.execute(query, (fs,)) + return [t[0] for t in self.cursor.fetchall()] + + def get_type_acls(self, schema, types): + query = """SELECT t.typacl FROM pg_catalog.pg_type t + JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = %s AND t.typname = ANY (%s) ORDER BY typname""" + self.cursor.execute(query, (schema, types)) + return [t[0] for t in self.cursor.fetchall()] + + # Manipulating privileges + + def manipulate_privs(self, obj_type, privs, objs, roles, target_roles, + state, grant_option, schema_qualifier=None, fail_on_role=True, usage_on_types=True): + """Manipulate database object privileges. + + :param obj_type: Type of database object to grant/revoke + privileges for. + :param privs: Either a list of privileges to grant/revoke + or None if type is "group". + :param objs: List of database objects to grant/revoke + privileges for. + :param roles: Either a list of role names or "PUBLIC" + for the implicitly defined "PUBLIC" group + :param target_roles: List of role names to grant/revoke + default privileges as. + :param state: "present" to grant privileges, "absent" to revoke. + :param grant_option: Only for state "present": If True, set + grant/admin option. If False, revoke it. + If None, don't change grant option. + :param schema_qualifier: Some object types ("TABLE", "SEQUENCE", + "FUNCTION") must be qualified by schema. + Ignored for other Types. + """ + # get_status: function to get current status + if obj_type == 'table': + get_status = partial(self.get_table_acls, schema_qualifier) + elif obj_type == 'sequence': + get_status = partial(self.get_sequence_acls, schema_qualifier) + elif obj_type in ('function', 'procedure'): + get_status = partial(self.get_function_acls, schema_qualifier) + elif obj_type == 'schema': + get_status = self.get_schema_acls + elif obj_type == 'language': + get_status = self.get_language_acls + elif obj_type == 'tablespace': + get_status = self.get_tablespace_acls + elif obj_type == 'database': + get_status = self.get_database_acls + elif obj_type == 'group': + get_status = self.get_group_memberships + elif obj_type == 'default_privs': + get_status = partial(self.get_default_privs, schema_qualifier) + elif obj_type == 'foreign_data_wrapper': + get_status = self.get_foreign_data_wrapper_acls + elif obj_type == 'foreign_server': + get_status = self.get_foreign_server_acls + elif obj_type == 'type': + get_status = partial(self.get_type_acls, schema_qualifier) + else: + raise Error('Unsupported database object type "%s".' % obj_type) + + # Return False (nothing has changed) if there are no objs to work on. + if not objs: + return False + + # obj_ids: quoted db object identifiers (sometimes schema-qualified) + if obj_type in ('function', 'procedure'): + obj_ids = [] + for obj in objs: + try: + f, args = obj.split('(', 1) + except Exception: + raise Error('Illegal function / procedure signature: "%s".' % obj) + obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args)) + elif obj_type in ['table', 'sequence', 'type']: + obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs] + else: + obj_ids = ['"%s"' % o for o in objs] + + # set_what: SQL-fragment specifying what to set for the target roles: + # Either group membership or privileges on objects of a certain type + if obj_type == 'group': + set_what = ','.join(obj_ids) + elif obj_type == 'default_privs': + # We don't want privs to be quoted here + set_what = ','.join(privs) + else: + # function types are already quoted above + if obj_type not in ('function', 'procedure'): + obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids] + # Note: obj_type has been checked against a set of string literals + # and privs was escaped when it was parsed + # Note: Underscores are replaced with spaces to support multi-word obj_type + set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '), + ','.join(obj_ids)) + + # for_whom: SQL-fragment specifying for whom to set the above + if roles == 'PUBLIC': + for_whom = 'PUBLIC' + else: + for_whom = [] + for r in roles: + if not role_exists(self.module, self.cursor, r): + if fail_on_role: + self.module.fail_json(msg="Role '%s' does not exist" % r.strip()) + + else: + self.module.warn("Role '%s' does not exist, pass it" % r.strip()) + else: + for_whom.append('"%s"' % r) + + if not for_whom: + return False + + for_whom = ','.join(for_whom) + + # as_who: + as_who = None + if target_roles: + as_who = ','.join('"%s"' % r for r in target_roles) + + if schema_qualifier: + schema_qualifier = '"%s"' % schema_qualifier + + status_before = get_status(objs) + + query = QueryBuilder(state) \ + .for_objtype(obj_type) \ + .with_grant_option(grant_option) \ + .for_whom(for_whom) \ + .as_who(as_who) \ + .for_schema(schema_qualifier) \ + .set_what(set_what) \ + .for_objs(objs) \ + .usage_on_types(usage_on_types) \ + .build() + + executed_queries.append(query) + self.cursor.execute(query) + if roles == 'PUBLIC': + return True + + status_after = get_status(objs) + + def nonesorted(e): + # For python 3+ that can fail trying + # to compare NoneType elements by sort method. + if e is None: + return '' + return e + + status_before.sort(key=nonesorted) + status_after.sort(key=nonesorted) + return status_before != status_after + + +class QueryBuilder(object): + def __init__(self, state): + self._grant_option = None + self._for_whom = None + self._as_who = None + self._set_what = None + self._obj_type = None + self._state = state + self._schema = None + self._objs = None + self._usage_on_types = None + self.query = [] + + def for_objs(self, objs): + self._objs = objs + return self + + def for_schema(self, schema): + self._schema = schema + return self + + def with_grant_option(self, option): + self._grant_option = option + return self + + def for_whom(self, who): + self._for_whom = who + return self + + def usage_on_types(self, usage_on_types): + self._usage_on_types = usage_on_types + return self + + def as_who(self, target_roles): + self._as_who = target_roles + return self + + def set_what(self, what): + self._set_what = what + return self + + def for_objtype(self, objtype): + self._obj_type = objtype + return self + + def build(self): + if self._state == 'present': + self.build_present() + elif self._state == 'absent': + self.build_absent() + else: + self.build_absent() + return '\n'.join(self.query) + + def add_default_revoke(self): + for obj in self._objs: + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who, + self._schema, obj, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj, + self._for_whom)) + + def add_grant_option(self): + if self._grant_option: + if self._obj_type == 'group': + self.query[-1] += ' WITH ADMIN OPTION;' + else: + self.query[-1] += ' WITH GRANT OPTION;' + elif self._grant_option is False: + self.query[-1] += ';' + if self._obj_type == 'group': + self.query.append('REVOKE ADMIN OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom)) + elif not self._obj_type == 'default_privs': + self.query.append('REVOKE GRANT OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom)) + else: + self.query[-1] += ';' + + def add_default_priv(self): + for obj in self._objs: + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT {2} ON {3} TO {4}'.format(self._as_who, + self._schema, + self._set_what, + obj, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT {1} ON {2} TO {3}'.format(self._schema, + self._set_what, + obj, + self._for_whom)) + self.add_grant_option() + + if self._usage_on_types: + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who, + self._schema, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom)) + self.add_grant_option() + + def build_present(self): + if self._obj_type == 'default_privs': + self.add_default_revoke() + self.add_default_priv() + else: + self.query.append('GRANT {0} TO {1}'.format(self._set_what, self._for_whom)) + self.add_grant_option() + + def build_absent(self): + if self._obj_type == 'default_privs': + self.query = [] + for obj in ['TABLES', 'SEQUENCES', 'TYPES']: + if self._as_who: + self.query.append( + 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who, + self._schema, obj, + self._for_whom)) + else: + self.query.append( + 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj, + self._for_whom)) + else: + self.query.append('REVOKE {0} FROM {1};'.format(self._set_what, self._for_whom)) + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + database=dict(required=True, aliases=['db', 'login_db']), + state=dict(default='present', choices=['present', 'absent']), + privs=dict(required=False, aliases=['priv']), + type=dict(default='table', + choices=['table', + 'sequence', + 'function', + 'procedure', + 'database', + 'schema', + 'language', + 'tablespace', + 'group', + 'default_privs', + 'foreign_data_wrapper', + 'foreign_server', + 'type', ]), + objs=dict(required=False, aliases=['obj']), + schema=dict(required=False), + roles=dict(required=True, aliases=['role']), + session_role=dict(required=False), + target_roles=dict(required=False), + grant_option=dict(required=False, type='bool', + aliases=['admin_option']), + host=dict(default='', aliases=['login_host']), + unix_socket=dict(default='', aliases=['login_unix_socket']), + login=dict(default='postgres', aliases=['login_user']), + password=dict(default='', aliases=['login_password'], no_log=True), + fail_on_role=dict(type='bool', default=True), + trust_input=dict(type='bool', default=True), + usage_on_types=dict(type='bool', default=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + fail_on_role = module.params['fail_on_role'] + usage_on_types = module.params['usage_on_types'] + + # Create type object as namespace for module params + p = type('Params', (), module.params) + # param "schema": default, allowed depends on param "type" + if p.type in ['table', 'sequence', 'function', 'procedure', 'type', 'default_privs']: + p.schema = p.schema or 'public' + elif p.schema: + module.fail_json(msg='Argument "schema" is not allowed ' + 'for type "%s".' % p.type) + + # param "objs": default, required depends on param "type" + if p.type == 'database': + p.objs = p.objs or p.database + elif not p.objs: + module.fail_json(msg='Argument "objs" is required ' + 'for type "%s".' % p.type) + + # param "privs": allowed, required depends on param "type" + if p.type == 'group': + if p.privs: + module.fail_json(msg='Argument "privs" is not allowed ' + 'for type "group".') + elif not p.privs: + module.fail_json(msg='Argument "privs" is required ' + 'for type "%s".' % p.type) + + # Check input + if not p.trust_input: + # Check input for potentially dangerous elements: + check_input(module, p.roles, p.target_roles, p.session_role, p.schema) + + # Connect to Database + if not psycopg2: + module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR) + try: + conn = Connection(p, module) + except psycopg2.Error as e: + module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc()) + except TypeError as e: + if 'sslrootcert' in e.args[0]: + module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert') + module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) + except ValueError as e: + # We raise this when the psycopg library is too old + module.fail_json(msg=to_native(e)) + + if p.session_role: + try: + conn.cursor.execute('SET ROLE "%s"' % p.session_role) + except Exception as e: + module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc()) + + try: + # privs + if p.privs: + privs = frozenset(pr.upper() for pr in p.privs.split(',')) + if not privs.issubset(VALID_PRIVS): + module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS)) + else: + privs = None + # objs: + if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA': + objs = conn.get_all_tables_in_schema(p.schema) + elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA': + objs = conn.get_all_sequences_in_schema(p.schema) + elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA': + objs = conn.get_all_functions_in_schema(p.schema) + elif p.type == 'procedure' and p.objs == 'ALL_IN_SCHEMA': + objs = conn.get_all_procedures_in_schema(p.schema) + elif p.type == 'default_privs': + if p.objs == 'ALL_DEFAULT': + objs = frozenset(VALID_DEFAULT_OBJS.keys()) + else: + objs = frozenset(obj.upper() for obj in p.objs.split(',')) + if not objs.issubset(VALID_DEFAULT_OBJS): + module.fail_json( + msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys())) + # Again, do we have valid privs specified for object type: + valid_objects_for_priv = frozenset(obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj])) + if not valid_objects_for_priv == objs: + module.fail_json( + msg='Invalid priv specified. Valid object for priv: {0}. Objects: {1}'.format( + valid_objects_for_priv, objs)) + else: + objs = p.objs.split(',') + + # function signatures are encoded using ':' to separate args + if p.type in ('function', 'procedure'): + objs = [obj.replace(':', ',') for obj in objs] + + # roles + if p.roles.upper() == 'PUBLIC': + roles = 'PUBLIC' + else: + roles = p.roles.split(',') + + if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]): + module.exit_json(changed=False) + + if fail_on_role: + module.fail_json(msg="Role '%s' does not exist" % roles[0].strip()) + + else: + module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip()) + + # check if target_roles is set with type: default_privs + if p.target_roles and not p.type == 'default_privs': + module.warn('"target_roles" will be ignored ' + 'Argument "type: default_privs" is required for usage of "target_roles".') + + # target roles + if p.target_roles: + target_roles = p.target_roles.split(',') + else: + target_roles = None + + changed = conn.manipulate_privs( + obj_type=p.type, + privs=privs, + objs=objs, + roles=roles, + target_roles=target_roles, + state=p.state, + grant_option=p.grant_option, + schema_qualifier=p.schema, + fail_on_role=fail_on_role, + usage_on_types=usage_on_types, + ) + + except Error as e: + conn.rollback() + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + except psycopg2.Error as e: + conn.rollback() + module.fail_json(msg=to_native(e)) + + if module.check_mode or not changed: + conn.rollback() + else: + conn.commit() + module.exit_json(changed=changed, queries=executed_queries) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py new file mode 100644 index 00000000..06692c09 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py @@ -0,0 +1,683 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Loic Blot (@nerzhul) <loic.blot@unix-experience.fr> +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: postgresql_publication +short_description: Add, update, or remove PostgreSQL publication +description: +- Add, update, or remove PostgreSQL publication. +options: + name: + description: + - Name of the publication to add, update, or remove. + required: true + type: str + db: + description: + - Name of the database to connect to and where + the publication state will be changed. + aliases: [ login_db ] + type: str + tables: + description: + - List of tables to add to the publication. + - If no value is set all tables are targeted. + - If the publication already exists for specific tables and I(tables) is not passed, + nothing will be changed. + - If you need to add all tables to the publication with the same name, + drop existent and create new without passing I(tables). + type: list + elements: str + state: + description: + - The publication state. + default: present + choices: [ absent, present ] + type: str + parameters: + description: + - Dictionary with optional publication parameters. + - Available parameters depend on PostgreSQL version. + type: dict + owner: + description: + - Publication owner. + - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role). + type: str + cascade: + description: + - Drop publication dependencies. Has effect with I(state=absent) only. + type: bool + default: false + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + version_added: '0.2.0' + trust_input: + description: + - If C(no), check whether values of parameters I(name), I(tables), I(owner), + I(session_role), I(params) are potentially dangerous. + - It makes sense to use C(no) only when SQL injections via the parameters are possible. + type: bool + default: yes + version_added: '0.2.0' +notes: +- PostgreSQL version must be 10 or greater. +- Supports C(check_mode). +seealso: +- name: CREATE PUBLICATION reference + description: Complete reference of the CREATE PUBLICATION command documentation. + link: https://www.postgresql.org/docs/current/sql-createpublication.html +- name: ALTER PUBLICATION reference + description: Complete reference of the ALTER PUBLICATION command documentation. + link: https://www.postgresql.org/docs/current/sql-alterpublication.html +- name: DROP PUBLICATION reference + description: Complete reference of the DROP PUBLICATION command documentation. + link: https://www.postgresql.org/docs/current/sql-droppublication.html +author: +- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr> +- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +extends_documentation_fragment: +- community.postgresql.postgres +''' + +EXAMPLES = r''' +- name: Create a new publication with name "acme" targeting all tables in database "test" + community.postgresql.postgresql_publication: + db: test + name: acme + +- name: Create publication "acme" publishing only prices and vehicles tables + community.postgresql.postgresql_publication: + name: acme + tables: + - prices + - vehicles + +- name: > + Create publication "acme", set user alice as an owner, targeting all tables + Allowable DML operations are INSERT and UPDATE only + community.postgresql.postgresql_publication: + name: acme + owner: alice + parameters: + publish: 'insert,update' + +- name: > + Assuming publication "acme" exists and there are targeted + tables "prices" and "vehicles", add table "stores" to the publication + community.postgresql.postgresql_publication: + name: acme + tables: + - prices + - vehicles + - stores + +- name: Remove publication "acme" if exists in database "test" + community.postgresql.postgresql_publication: + db: test + name: acme + state: absent +''' + +RETURN = r''' +exists: + description: + - Flag indicates the publication exists or not at the end of runtime. + returned: always + type: bool + sample: true +queries: + description: List of executed queries. + returned: always + type: str + sample: [ 'DROP PUBLICATION "acme" CASCADE' ] +owner: + description: Owner of the publication at the end of runtime. + returned: if publication exists + type: str + sample: "alice" +tables: + description: + - List of tables in the publication at the end of runtime. + - If all tables are published, returns empty list. + returned: if publication exists + type: list + sample: ["\"public\".\"prices\"", "\"public\".\"vehicles\""] +alltables: + description: + - Flag indicates that all tables are published. + returned: if publication exists + type: bool + sample: false +parameters: + description: Publication parameters at the end of runtime. + returned: if publication exists + type: dict + sample: {'publish': {'insert': false, 'delete': false, 'update': true}} +''' + + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, + pg_quote_identifier, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils.six import iteritems + +SUPPORTED_PG_VERSION = 10000 + + +################################ +# Module functions and classes # +################################ + +def transform_tables_representation(tbl_list): + """Add 'public.' to names of tables where a schema identifier is absent + and add quotes to each element. + + Args: + tbl_list (list): List of table names. + + Returns: + tbl_list (list): Changed list. + """ + for i, table in enumerate(tbl_list): + if '.' not in table: + tbl_list[i] = pg_quote_identifier('public.%s' % table.strip(), 'table') + else: + tbl_list[i] = pg_quote_identifier(table.strip(), 'table') + + return tbl_list + + +class PgPublication(): + """Class to work with PostgreSQL publication. + + Args: + module (AnsibleModule): Object of AnsibleModule class. + cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + name (str): The name of the publication. + + Attributes: + module (AnsibleModule): Object of AnsibleModule class. + cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + name (str): Name of the publication. + executed_queries (list): List of executed queries. + attrs (dict): Dict with publication attributes. + exists (bool): Flag indicates the publication exists or not. + """ + + def __init__(self, module, cursor, name): + self.module = module + self.cursor = cursor + self.name = name + self.executed_queries = [] + self.attrs = { + 'alltables': False, + 'tables': [], + 'parameters': {}, + 'owner': '', + } + self.exists = self.check_pub() + + def get_info(self): + """Refresh the publication information. + + Returns: + ``self.attrs``. + """ + self.exists = self.check_pub() + return self.attrs + + def check_pub(self): + """Check the publication and refresh ``self.attrs`` publication attribute. + + Returns: + True if the publication with ``self.name`` exists, False otherwise. + """ + + pub_info = self.__get_general_pub_info() + + if not pub_info: + # Publication does not exist: + return False + + self.attrs['owner'] = pub_info.get('pubowner') + + # Publication DML operations: + self.attrs['parameters']['publish'] = {} + self.attrs['parameters']['publish']['insert'] = pub_info.get('pubinsert', False) + self.attrs['parameters']['publish']['update'] = pub_info.get('pubupdate', False) + self.attrs['parameters']['publish']['delete'] = pub_info.get('pubdelete', False) + if pub_info.get('pubtruncate'): + self.attrs['parameters']['publish']['truncate'] = pub_info.get('pubtruncate') + + # If alltables flag is False, get the list of targeted tables: + if not pub_info.get('puballtables'): + table_info = self.__get_tables_pub_info() + # Join sublists [['schema', 'table'], ...] to ['schema.table', ...] + # for better representation: + for i, schema_and_table in enumerate(table_info): + table_info[i] = pg_quote_identifier('.'.join(schema_and_table), 'table') + + self.attrs['tables'] = table_info + else: + self.attrs['alltables'] = True + + # Publication exists: + return True + + def create(self, tables, params, owner, check_mode=True): + """Create the publication. + + Args: + tables (list): List with names of the tables that need to be added to the publication. + params (dict): Dict contains optional publication parameters and their values. + owner (str): Name of the publication owner. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + changed (bool): True if publication has been created, otherwise False. + """ + changed = True + + query_fragments = ["CREATE PUBLICATION %s" % pg_quote_identifier(self.name, 'publication')] + + if tables: + query_fragments.append("FOR TABLE %s" % ', '.join(tables)) + else: + query_fragments.append("FOR ALL TABLES") + + if params: + params_list = [] + # Make list ["param = 'value'", ...] from params dict: + for (key, val) in iteritems(params): + params_list.append("%s = '%s'" % (key, val)) + + # Add the list to query_fragments: + query_fragments.append("WITH (%s)" % ', '.join(params_list)) + + changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode) + + if owner: + # If check_mode, just add possible SQL to + # executed_queries and return: + self.__pub_set_owner(owner, check_mode=check_mode) + + return changed + + def update(self, tables, params, owner, check_mode=True): + """Update the publication. + + Args: + tables (list): List with names of the tables that need to be presented in the publication. + params (dict): Dict contains optional publication parameters and their values. + owner (str): Name of the publication owner. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + changed (bool): True if publication has been updated, otherwise False. + """ + changed = False + + # Add or drop tables from published tables suit: + if tables and not self.attrs['alltables']: + + # 1. If needs to add table to the publication: + for tbl in tables: + if tbl not in self.attrs['tables']: + # If needs to add table to the publication: + changed = self.__pub_add_table(tbl, check_mode=check_mode) + + # 2. if there is a table in targeted tables + # that's not presented in the passed tables: + for tbl in self.attrs['tables']: + if tbl not in tables: + changed = self.__pub_drop_table(tbl, check_mode=check_mode) + + elif tables and self.attrs['alltables']: + changed = self.__pub_set_tables(tables, check_mode=check_mode) + + # Update pub parameters: + if params: + for key, val in iteritems(params): + if self.attrs['parameters'].get(key): + + # In PostgreSQL 10/11 only 'publish' optional parameter is presented. + if key == 'publish': + # 'publish' value can be only a string with comma-separated items + # of allowed DML operations like 'insert,update' or + # 'insert,update,delete', etc. + # Make dictionary to compare with current attrs later: + val_dict = self.attrs['parameters']['publish'].copy() + val_list = val.split(',') + for v in val_dict: + if v in val_list: + val_dict[v] = True + else: + val_dict[v] = False + + # Compare val_dict and the dict with current 'publish' parameters, + # if they're different, set new values: + if val_dict != self.attrs['parameters']['publish']: + changed = self.__pub_set_param(key, val, check_mode=check_mode) + + # Default behavior for other cases: + elif self.attrs['parameters'][key] != val: + changed = self.__pub_set_param(key, val, check_mode=check_mode) + + else: + # If the parameter was not set before: + changed = self.__pub_set_param(key, val, check_mode=check_mode) + + # Update pub owner: + if owner: + if owner != self.attrs['owner']: + changed = self.__pub_set_owner(owner, check_mode=check_mode) + + return changed + + def drop(self, cascade=False, check_mode=True): + """Drop the publication. + + Kwargs: + cascade (bool): Flag indicates that publication needs to be deleted + with its dependencies. + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + changed (bool): True if publication has been updated, otherwise False. + """ + if self.exists: + query_fragments = [] + query_fragments.append("DROP PUBLICATION %s" % pg_quote_identifier(self.name, 'publication')) + if cascade: + query_fragments.append("CASCADE") + + return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode) + + def __get_general_pub_info(self): + """Get and return general publication information. + + Returns: + Dict with publication information if successful, False otherwise. + """ + # Check pg_publication.pubtruncate exists (supported from PostgreSQL 11): + pgtrunc_sup = exec_sql(self, ("SELECT 1 FROM information_schema.columns " + "WHERE table_name = 'pg_publication' " + "AND column_name = 'pubtruncate'"), add_to_executed=False) + + if pgtrunc_sup: + query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, " + "p.pubupdate , p.pubdelete, p.pubtruncate FROM pg_publication AS p " + "JOIN pg_catalog.pg_roles AS r " + "ON p.pubowner = r.oid " + "WHERE p.pubname = %(pname)s") + else: + query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, " + "p.pubupdate , p.pubdelete FROM pg_publication AS p " + "JOIN pg_catalog.pg_roles AS r " + "ON p.pubowner = r.oid " + "WHERE p.pubname = %(pname)s") + + result = exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False) + if result: + return result[0] + else: + return False + + def __get_tables_pub_info(self): + """Get and return tables that are published by the publication. + + Returns: + List of dicts with published tables. + """ + query = ("SELECT schemaname, tablename " + "FROM pg_publication_tables WHERE pubname = %(pname)s") + return exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False) + + def __pub_add_table(self, table, check_mode=False): + """Add a table to the publication. + + Args: + table (str): Table name. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = ("ALTER PUBLICATION %s ADD TABLE %s" % (pg_quote_identifier(self.name, 'publication'), + pg_quote_identifier(table, 'table'))) + return self.__exec_sql(query, check_mode=check_mode) + + def __pub_drop_table(self, table, check_mode=False): + """Drop a table from the publication. + + Args: + table (str): Table name. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = ("ALTER PUBLICATION %s DROP TABLE %s" % (pg_quote_identifier(self.name, 'publication'), + pg_quote_identifier(table, 'table'))) + return self.__exec_sql(query, check_mode=check_mode) + + def __pub_set_tables(self, tables, check_mode=False): + """Set a table suit that need to be published by the publication. + + Args: + tables (list): List of tables. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + quoted_tables = [pg_quote_identifier(t, 'table') for t in tables] + query = ("ALTER PUBLICATION %s SET TABLE %s" % (pg_quote_identifier(self.name, 'publication'), + ', '.join(quoted_tables))) + return self.__exec_sql(query, check_mode=check_mode) + + def __pub_set_param(self, param, value, check_mode=False): + """Set an optional publication parameter. + + Args: + param (str): Name of the parameter. + value (str): Parameter value. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = ("ALTER PUBLICATION %s SET (%s = '%s')" % (pg_quote_identifier(self.name, 'publication'), + param, value)) + return self.__exec_sql(query, check_mode=check_mode) + + def __pub_set_owner(self, role, check_mode=False): + """Set a publication owner. + + Args: + role (str): Role (user) name that needs to be set as a publication owner. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = ('ALTER PUBLICATION %s ' + 'OWNER TO "%s"' % (pg_quote_identifier(self.name, 'publication'), role)) + return self.__exec_sql(query, check_mode=check_mode) + + def __exec_sql(self, query, check_mode=False): + """Execute SQL query. + + Note: If we need just to get information from the database, + we use ``exec_sql`` function directly. + + Args: + query (str): Query that needs to be executed. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just add ``query`` to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + if check_mode: + self.executed_queries.append(query) + return True + else: + return exec_sql(self, query, return_bool=True) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + name=dict(required=True), + db=dict(type='str', aliases=['login_db']), + state=dict(type='str', default='present', choices=['absent', 'present']), + tables=dict(type='list', elements='str'), + parameters=dict(type='dict'), + owner=dict(type='str'), + cascade=dict(type='bool', default=False), + session_role=dict(type='str'), + trust_input=dict(type='bool', default=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + # Parameters handling: + name = module.params['name'] + state = module.params['state'] + tables = module.params['tables'] + params = module.params['parameters'] + owner = module.params['owner'] + cascade = module.params['cascade'] + session_role = module.params['session_role'] + trust_input = module.params['trust_input'] + + if not trust_input: + # Check input for potentially dangerous elements: + if not params: + params_list = None + else: + params_list = ['%s = %s' % (k, v) for k, v in iteritems(params)] + + check_input(module, name, tables, owner, session_role, params_list) + + if state == 'absent': + if tables: + module.warn('parameter "tables" is ignored when "state=absent"') + if params: + module.warn('parameter "parameters" is ignored when "state=absent"') + if owner: + module.warn('parameter "owner" is ignored when "state=absent"') + + if state == 'present' and cascade: + module.warn('parameter "cascade" is ignored when "state=present"') + + # Connect to DB and make cursor object: + conn_params = get_conn_params(module, module.params) + # We check publication state without DML queries execution, so set autocommit: + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + # Check version: + if cursor.connection.server_version < SUPPORTED_PG_VERSION: + module.fail_json(msg="PostgreSQL server version should be 10.0 or greater") + + # Nothing was changed by default: + changed = False + + ################################### + # Create object and do rock'n'roll: + publication = PgPublication(module, cursor, name) + + if tables: + tables = transform_tables_representation(tables) + + # If module.check_mode=True, nothing will be changed: + if state == 'present': + if not publication.exists: + changed = publication.create(tables, params, owner, check_mode=module.check_mode) + + else: + changed = publication.update(tables, params, owner, check_mode=module.check_mode) + + elif state == 'absent': + changed = publication.drop(cascade=cascade, check_mode=module.check_mode) + + # Get final publication info: + pub_fin_info = {} + if state == 'present' or (state == 'absent' and module.check_mode): + pub_fin_info = publication.get_info() + elif state == 'absent' and not module.check_mode: + publication.exists = False + + # Connection is not needed any more: + cursor.close() + db_connection.close() + + # Update publication info and return ret values: + module.exit_json(changed=changed, queries=publication.executed_queries, exists=publication.exists, **pub_fin_info) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_query.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_query.py new file mode 100644 index 00000000..259a8d48 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_query.py @@ -0,0 +1,524 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Felix Archambault +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_query +short_description: Run PostgreSQL queries +description: +- Runs arbitrary PostgreSQL queries. +- Can run queries from SQL script files. +- Does not run against backup files. Use M(community.postgresql.postgresql_db) with I(state=restore) + to run queries on files made by pg_dump/pg_dumpall utilities. +options: + query: + description: + - SQL query to run. Variables can be escaped with psycopg2 syntax + U(http://initd.org/psycopg/docs/usage.html). + type: str + positional_args: + description: + - List of values to be passed as positional arguments to the query. + When the value is a list, it will be converted to PostgreSQL array. + - Mutually exclusive with I(named_args). + type: list + elements: raw + named_args: + description: + - Dictionary of key-value arguments to pass to the query. + When the value is a list, it will be converted to PostgreSQL array. + - Mutually exclusive with I(positional_args). + type: dict + path_to_script: + description: + - Path to a SQL script on the target machine. + - If the script contains several queries, they must be semicolon-separated. + - To run scripts containing objects with semicolons + (for example, function and procedure definitions), use I(as_single_query=yes). + - To upload dumps or to execute other complex scripts, the preferable way + is to use the M(community.postgresql.postgresql_db) module with I(state=restore). + - Mutually exclusive with I(query). + type: path + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + db: + description: + - Name of database to connect to and run queries against. + type: str + aliases: + - login_db + autocommit: + description: + - Execute in autocommit mode when the query can't be run inside a transaction block + (e.g., VACUUM). + - Mutually exclusive with I(check_mode). + type: bool + default: no + encoding: + description: + - Set the client encoding for the current session (e.g. C(UTF-8)). + - The default is the encoding defined by the database. + type: str + version_added: '0.2.0' + trust_input: + description: + - If C(no), check whether a value of I(session_role) is potentially dangerous. + - It makes sense to use C(no) only when SQL injections via I(session_role) are possible. + type: bool + default: yes + version_added: '0.2.0' + search_path: + description: + - List of schema names to look in. + type: list + elements: str + version_added: '1.0.0' + as_single_query: + description: + - If C(yes), when reading from the I(path_to_script) file, + executes its whole content in a single query. + - When C(yes), the C(query_all_results) return value + contains only the result of the last statement. + - Whether the state is reported as changed or not + is determined by the last statement of the file. + - Used only when I(path_to_script) is specified, otherwise ignored. + - If set to C(no), the script can contain only semicolon-separated queries. + (see the I(path_to_script) option documentation). + - The default value is C(no). + type: bool + version_added: '1.1.0' +seealso: +- module: community.postgresql.postgresql_db +- name: PostgreSQL Schema reference + description: Complete reference of the PostgreSQL schema documentation. + link: https://www.postgresql.org/docs/current/ddl-schemas.html +author: +- Felix Archambault (@archf) +- Andrew Klychkov (@Andersson007) +- Will Rouesnel (@wrouesnel) +extends_documentation_fragment: +- community.postgresql.postgres +notes: +- Supports C(check_mode). +''' + +EXAMPLES = r''' +- name: Simple select query to acme db + community.postgresql.postgresql_query: + db: acme + query: SELECT version() + +- name: Select query to db acme with positional arguments and non-default credentials + community.postgresql.postgresql_query: + db: acme + login_user: django + login_password: mysecretpass + query: SELECT * FROM acme WHERE id = %s AND story = %s + positional_args: + - 1 + - test + +- name: Select query to test_db with named_args + community.postgresql.postgresql_query: + db: test_db + query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s + named_args: + id_val: 1 + story_val: test + +- name: Insert query to test_table in db test_db + community.postgresql.postgresql_query: + db: test_db + query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story') + +# If your script contains semicolons as parts of separate objects +# like functions, procedures, and so on, use "as_single_query: yes" +- name: Run queries from SQL script using UTF-8 client encoding for session + community.postgresql.postgresql_query: + db: test_db + path_to_script: /var/lib/pgsql/test.sql + positional_args: + - 1 + encoding: UTF-8 + +- name: Example of using autocommit parameter + community.postgresql.postgresql_query: + db: test_db + query: VACUUM + autocommit: yes + +- name: > + Insert data to the column of array type using positional_args. + Note that we use quotes here, the same as for passing JSON, etc. + community.postgresql.postgresql_query: + query: INSERT INTO test_table (array_column) VALUES (%s) + positional_args: + - '{1,2,3}' + +# Pass list and string vars as positional_args +- name: Set vars + ansible.builtin.set_fact: + my_list: + - 1 + - 2 + - 3 + my_arr: '{1, 2, 3}' + +- name: Select from test table by passing positional_args as arrays + community.postgresql.postgresql_query: + query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s + positional_args: + - '{{ my_list }}' + - '{{ my_arr|string }}' + +# Select from test table looking into app1 schema first, then, +# if the schema doesn't exist or the table hasn't been found there, +# try to find it in the schema public +- name: Select from test using search_path + community.postgresql.postgresql_query: + query: SELECT * FROM test_array_table + search_path: + - app1 + - public + +# If you use a variable in positional_args / named_args that can +# be undefined and you wish to set it as NULL, the constructions like +# "{{ my_var if (my_var is defined) else none | default(none) }}" +# will not work as expected substituting an empty string instead of NULL. +# If possible, we suggest to use Ansible's DEFAULT_JINJA2_NATIVE configuration +# (https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-jinja2-native). +# Enabling it fixes this problem. If you cannot enable it, the following workaround +# can be used. +# You should precheck such a value and define it as NULL when undefined. +# For example: +- name: When undefined, set to NULL + set_fact: + my_var: NULL + when: my_var is undefined + +# Then: +- name: Insert a value using positional arguments + community.postgresql.postgresql_query: + query: INSERT INTO test_table (col1) VALUES (%s) + positional_args: + - '{{ my_var }}' +''' + +RETURN = r''' +query: + description: + - Executed query. + - When reading several queries from a file, it contains only the last one. + returned: always + type: str + sample: 'SELECT * FROM bar' +statusmessage: + description: + - Attribute containing the message returned by the command. + - When reading several queries from a file, it contains a message of the last one. + returned: always + type: str + sample: 'INSERT 0 1' +query_result: + description: + - List of dictionaries in column:value form representing returned rows. + - When running queries from a file, returns result of the last query. + returned: always + type: list + elements: dict + sample: [{"Column": "Value1"},{"Column": "Value2"}] +query_list: + description: + - List of executed queries. + Useful when reading several queries from a file. + returned: always + type: list + elements: str + sample: ['SELECT * FROM foo', 'SELECT * FROM bar'] +query_all_results: + description: + - List containing results of all queries executed (one sublist for every query). + Useful when reading several queries from a file. + returned: always + type: list + elements: list + sample: [[{"Column": "Value1"},{"Column": "Value2"}], [{"Column": "Value1"},{"Column": "Value2"}]] +rowcount: + description: + - Number of produced or affected rows. + - When using a script with multiple queries, + it contains a total number of produced or affected rows. + returned: changed + type: int + sample: 5 +''' + +try: + from psycopg2 import ProgrammingError as Psycopg2ProgrammingError + from psycopg2.extras import DictCursor +except ImportError: + # it is needed for checking 'no result to fetch' in main(), + # psycopg2 availability will be checked by connect_to_db() into + # ansible.module_utils.postgres + pass + +import datetime +import decimal +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils._text import to_native +from ansible.module_utils.six import iteritems + + +# =========================================== +# Module execution. +# + +def list_to_pg_array(elem): + """Convert the passed list to PostgreSQL array + represented as a string. + + Args: + elem (list): List that needs to be converted. + + Returns: + elem (str): String representation of PostgreSQL array. + """ + elem = str(elem).strip('[]') + elem = '{' + elem + '}' + return elem + + +def convert_elements_to_pg_arrays(obj): + """Convert list elements of the passed object + to PostgreSQL arrays represented as strings. + + Args: + obj (dict or list): Object whose elements need to be converted. + + Returns: + obj (dict or list): Object with converted elements. + """ + if isinstance(obj, dict): + for (key, elem) in iteritems(obj): + if isinstance(elem, list): + obj[key] = list_to_pg_array(elem) + + elif isinstance(obj, list): + for i, elem in enumerate(obj): + if isinstance(elem, list): + obj[i] = list_to_pg_array(elem) + + return obj + + +def set_search_path(cursor, search_path): + """Set session's search_path. + + Args: + cursor (Psycopg2 cursor): Database cursor object. + search_path (str): String containing comma-separated schema names. + """ + cursor.execute('SET search_path TO %s' % search_path) + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + query=dict(type='str'), + db=dict(type='str', aliases=['login_db']), + positional_args=dict(type='list', elements='raw'), + named_args=dict(type='dict'), + session_role=dict(type='str'), + path_to_script=dict(type='path'), + autocommit=dict(type='bool', default=False), + encoding=dict(type='str'), + trust_input=dict(type='bool', default=True), + search_path=dict(type='list', elements='str'), + as_single_query=dict(type='bool'), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=(('positional_args', 'named_args'),), + supports_check_mode=True, + ) + + query = module.params["query"] + positional_args = module.params["positional_args"] + named_args = module.params["named_args"] + path_to_script = module.params["path_to_script"] + autocommit = module.params["autocommit"] + encoding = module.params["encoding"] + session_role = module.params["session_role"] + trust_input = module.params["trust_input"] + search_path = module.params["search_path"] + as_single_query = module.params["as_single_query"] + + if path_to_script and as_single_query is None: + module.warn('You use the "path_to_script" option with the "as_single_query" ' + 'option unset. The default is false. ' + 'To avoid crashes, please read the documentation ' + 'and define the "as_single_query" option explicitly.') + + if not trust_input: + # Check input for potentially dangerous elements: + check_input(module, session_role) + + if autocommit and module.check_mode: + module.fail_json(msg="Using autocommit is mutually exclusive with check_mode") + + if path_to_script and query: + module.fail_json(msg="path_to_script is mutually exclusive with query") + + if positional_args: + positional_args = convert_elements_to_pg_arrays(positional_args) + + elif named_args: + named_args = convert_elements_to_pg_arrays(named_args) + + query_list = [] + if path_to_script: + try: + with open(path_to_script, 'rb') as f: + query = to_native(f.read()) + + if not as_single_query: + if ';' in query: + query_list = [q for q in query.split(';') if q != '\n'] + else: + query_list.append(query) + else: + query_list.append(query) + + except Exception as e: + module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e))) + else: + query_list.append(query) + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=autocommit) + if encoding is not None: + db_connection.set_client_encoding(encoding) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + if search_path: + set_search_path(cursor, '%s' % ','.join([x.strip(' ') for x in search_path])) + + # Prepare args: + if module.params.get("positional_args"): + arguments = module.params["positional_args"] + elif module.params.get("named_args"): + arguments = module.params["named_args"] + else: + arguments = None + + # Set defaults: + changed = False + + query_all_results = [] + rowcount = 0 + statusmessage = '' + + # Execute query: + for query in query_list: + try: + cursor.execute(query, arguments) + statusmessage = cursor.statusmessage + if cursor.rowcount > 0: + rowcount += cursor.rowcount + + query_result = [] + try: + for row in cursor.fetchall(): + # Ansible engine does not support decimals. + # An explicit conversion is required on the module's side + row = dict(row) + for (key, val) in iteritems(row): + if isinstance(val, decimal.Decimal): + row[key] = float(val) + + elif isinstance(val, datetime.timedelta): + row[key] = str(val) + + query_result.append(row) + + except Psycopg2ProgrammingError as e: + if to_native(e) == 'no results to fetch': + query_result = {} + + except Exception as e: + module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e)) + + query_all_results.append(query_result) + + if 'SELECT' not in statusmessage: + if re.search(re.compile(r'(UPDATE|INSERT|DELETE)'), statusmessage): + s = statusmessage.split() + if len(s) == 3: + if s[2] != '0': + changed = True + + elif len(s) == 2: + if s[1] != '0': + changed = True + + else: + changed = True + + else: + changed = True + + except Exception as e: + if not autocommit: + db_connection.rollback() + + cursor.close() + db_connection.close() + module.fail_json(msg="Cannot execute SQL '%s' %s: %s, query list: %s" % (query, arguments, to_native(e), query_list)) + + if module.check_mode: + db_connection.rollback() + else: + if not autocommit: + db_connection.commit() + + kw = dict( + changed=changed, + query=cursor.query, + query_list=query_list, + statusmessage=statusmessage, + query_result=query_result, + query_all_results=query_all_results, + rowcount=rowcount, + ) + + cursor.close() + db_connection.close() + + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py new file mode 100644 index 00000000..6e511376 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py @@ -0,0 +1,294 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_schema +short_description: Add or remove PostgreSQL schema +description: +- Add or remove PostgreSQL schema. +options: + name: + description: + - Name of the schema to add or remove. + required: true + type: str + aliases: + - schema + database: + description: + - Name of the database to connect to and add or remove the schema. + type: str + default: postgres + aliases: + - db + - login_db + owner: + description: + - Name of the role to set as owner of the schema. + type: str + session_role: + description: + - Switch to session_role after connecting. + - The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session_role + were the one that had logged in originally. + type: str + state: + description: + - The schema state. + type: str + default: present + choices: [ absent, present ] + cascade_drop: + description: + - Drop schema with CASCADE to remove child objects. + type: bool + default: false + ssl_mode: + description: + - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. + - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, the server's certificate will be verified to be signed by one of these authorities. + type: str + aliases: [ ssl_rootcert ] + trust_input: + description: + - If C(no), check whether values of parameters I(schema), I(owner), I(session_role) are potentially dangerous. + - It makes sense to use C(no) only when SQL injections via the parameters are possible. + type: bool + default: yes + version_added: '0.2.0' +seealso: +- name: PostgreSQL schemas + description: General information about PostgreSQL schemas. + link: https://www.postgresql.org/docs/current/ddl-schemas.html +- name: CREATE SCHEMA reference + description: Complete reference of the CREATE SCHEMA command documentation. + link: https://www.postgresql.org/docs/current/sql-createschema.html +- name: ALTER SCHEMA reference + description: Complete reference of the ALTER SCHEMA command documentation. + link: https://www.postgresql.org/docs/current/sql-alterschema.html +- name: DROP SCHEMA reference + description: Complete reference of the DROP SCHEMA command documentation. + link: https://www.postgresql.org/docs/current/sql-dropschema.html +author: +- Flavien Chantelot (@Dorn-) <contact@flavien.io> +- Thomas O'Donnell (@andytom) +extends_documentation_fragment: +- community.postgresql.postgres +notes: +- Supports C(check_mode). +''' + +EXAMPLES = r''' +- name: Create a new schema with name acme in test database + community.postgresql.postgresql_schema: + db: test + name: acme + +- name: Create a new schema acme with a user bob who will own it + community.postgresql.postgresql_schema: + name: acme + owner: bob + +- name: Drop schema "acme" with cascade + community.postgresql.postgresql_schema: + name: acme + state: absent + cascade_drop: yes +''' + +RETURN = r''' +schema: + description: Name of the schema. + returned: success, changed + type: str + sample: "acme" +queries: + description: List of executed queries. + returned: always + type: list + sample: ["CREATE SCHEMA \"acme\""] +''' + +import traceback + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, + pg_quote_identifier, + SQLParseError, +) +from ansible.module_utils._text import to_native + +executed_queries = [] + + +class NotSupportedError(Exception): + pass + + +# =========================================== +# PostgreSQL module specific support methods. +# + +def set_owner(cursor, schema, owner): + query = 'ALTER SCHEMA %s OWNER TO "%s"' % ( + pg_quote_identifier(schema, 'schema'), owner) + cursor.execute(query) + executed_queries.append(query) + return True + + +def get_schema_info(cursor, schema): + query = ("SELECT schema_owner AS owner " + "FROM information_schema.schemata " + "WHERE schema_name = %(schema)s") + cursor.execute(query, {'schema': schema}) + return cursor.fetchone() + + +def schema_exists(cursor, schema): + query = ("SELECT schema_name FROM information_schema.schemata " + "WHERE schema_name = %(schema)s") + cursor.execute(query, {'schema': schema}) + return cursor.rowcount == 1 + + +def schema_delete(cursor, schema, cascade): + if schema_exists(cursor, schema): + query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema') + if cascade: + query += " CASCADE" + cursor.execute(query) + executed_queries.append(query) + return True + else: + return False + + +def schema_create(cursor, schema, owner): + if not schema_exists(cursor, schema): + query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')] + if owner: + query_fragments.append('AUTHORIZATION "%s"' % owner) + query = ' '.join(query_fragments) + cursor.execute(query) + executed_queries.append(query) + return True + else: + schema_info = get_schema_info(cursor, schema) + if owner and owner != schema_info['owner']: + return set_owner(cursor, schema, owner) + else: + return False + + +def schema_matches(cursor, schema, owner): + if not schema_exists(cursor, schema): + return False + else: + schema_info = get_schema_info(cursor, schema) + if owner and owner != schema_info['owner']: + return False + else: + return True + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + schema=dict(type="str", required=True, aliases=['name']), + owner=dict(type="str", default=""), + database=dict(type="str", default="postgres", aliases=["db", "login_db"]), + cascade_drop=dict(type="bool", default=False), + state=dict(type="str", default="present", choices=["absent", "present"]), + session_role=dict(type="str"), + trust_input=dict(type="bool", default=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + schema = module.params["schema"] + owner = module.params["owner"] + state = module.params["state"] + cascade_drop = module.params["cascade_drop"] + session_role = module.params["session_role"] + trust_input = module.params["trust_input"] + + if not trust_input: + # Check input for potentially dangerous elements: + check_input(module, schema, owner, session_role) + + changed = False + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + try: + if module.check_mode: + if state == "absent": + changed = not schema_exists(cursor, schema) + elif state == "present": + changed = not schema_matches(cursor, schema, owner) + module.exit_json(changed=changed, schema=schema) + + if state == "absent": + try: + changed = schema_delete(cursor, schema, cascade_drop) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + elif state == "present": + try: + changed = schema_create(cursor, schema, owner) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except NotSupportedError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + except SystemExit: + # Avoid catching this on Python 2.4 + raise + except Exception as e: + module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc()) + + db_connection.close() + module.exit_json(changed=changed, schema=schema, queries=executed_queries) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py new file mode 100644 index 00000000..229068ee --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py @@ -0,0 +1,628 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_sequence +short_description: Create, drop, or alter a PostgreSQL sequence +description: +- Allows to create, drop or change the definition of a sequence generator. +options: + sequence: + description: + - The name of the sequence. + required: true + type: str + aliases: + - name + state: + description: + - The sequence state. + - If I(state=absent) other options will be ignored except of I(name) and + I(schema). + default: present + choices: [ absent, present ] + type: str + data_type: + description: + - Specifies the data type of the sequence. Valid types are bigint, integer, + and smallint. bigint is the default. The data type determines the default + minimum and maximum values of the sequence. For more info see the + documentation + U(https://www.postgresql.org/docs/current/sql-createsequence.html). + - Supported from PostgreSQL 10. + choices: [ bigint, integer, smallint ] + type: str + increment: + description: + - Increment specifies which value is added to the current sequence value + to create a new value. + - A positive value will make an ascending sequence, a negative one a + descending sequence. The default value is 1. + type: int + minvalue: + description: + - Minvalue determines the minimum value a sequence can generate. The + default for an ascending sequence is 1. The default for a descending + sequence is the minimum value of the data type. + type: int + aliases: + - min + maxvalue: + description: + - Maxvalue determines the maximum value for the sequence. The default for + an ascending sequence is the maximum + value of the data type. The default for a descending sequence is -1. + type: int + aliases: + - max + start: + description: + - Start allows the sequence to begin anywhere. The default starting value + is I(minvalue) for ascending sequences and I(maxvalue) for descending + ones. + type: int + cache: + description: + - Cache specifies how many sequence numbers are to be preallocated and + stored in memory for faster access. The minimum value is 1 (only one + value can be generated at a time, i.e., no cache), and this is also + the default. + type: int + cycle: + description: + - The cycle option allows the sequence to wrap around when the I(maxvalue) + or I(minvalue) has been reached by an ascending or descending sequence + respectively. If the limit is reached, the next number generated will be + the minvalue or maxvalue, respectively. + - If C(false) (NO CYCLE) is specified, any calls to nextval after the sequence + has reached its maximum value will return an error. False (NO CYCLE) is + the default. + type: bool + default: no + cascade: + description: + - Automatically drop objects that depend on the sequence, and in turn all + objects that depend on those objects. + - Ignored if I(state=present). + - Only used with I(state=absent). + type: bool + default: no + rename_to: + description: + - The new name for the I(sequence). + - Works only for existing sequences. + type: str + owner: + description: + - Set the owner for the I(sequence). + type: str + schema: + description: + - The schema of the I(sequence). This is be used to create and relocate + a I(sequence) in the given schema. + default: public + type: str + newschema: + description: + - The new schema for the I(sequence). Will be used for moving a + I(sequence) to another I(schema). + - Works only for existing sequences. + type: str + session_role: + description: + - Switch to session_role after connecting. The specified I(session_role) + must be a role that the current I(login_user) is a member of. + - Permissions checking for SQL commands is carried out as though + the I(session_role) were the one that had logged in originally. + type: str + db: + description: + - Name of database to connect to and run queries against. + type: str + aliases: + - database + - login_db + trust_input: + description: + - If C(no), check whether values of parameters I(sequence), I(schema), I(rename_to), + I(owner), I(newschema), I(session_role) are potentially dangerous. + - It makes sense to use C(no) only when SQL injections via the parameters are possible. + type: bool + default: yes + version_added: '0.2.0' +notes: +- Supports C(check_mode). +- If you do not pass db parameter, sequence will be created in the database + named postgres. +seealso: +- module: community.postgresql.postgresql_table +- module: community.postgresql.postgresql_owner +- module: community.postgresql.postgresql_privs +- module: community.postgresql.postgresql_tablespace +- name: CREATE SEQUENCE reference + description: Complete reference of the CREATE SEQUENCE command documentation. + link: https://www.postgresql.org/docs/current/sql-createsequence.html +- name: ALTER SEQUENCE reference + description: Complete reference of the ALTER SEQUENCE command documentation. + link: https://www.postgresql.org/docs/current/sql-altersequence.html +- name: DROP SEQUENCE reference + description: Complete reference of the DROP SEQUENCE command documentation. + link: https://www.postgresql.org/docs/current/sql-dropsequence.html +author: +- Tobias Birkefeld (@tcraxs) +- Thomas O'Donnell (@andytom) +extends_documentation_fragment: +- community.postgresql.postgres + +''' + +EXAMPLES = r''' +- name: Create an ascending bigint sequence called foobar in the default + database + community.postgresql.postgresql_sequence: + name: foobar + +- name: Create an ascending integer sequence called foobar, starting at 101 + community.postgresql.postgresql_sequence: + name: foobar + data_type: integer + start: 101 + +- name: Create an descending sequence called foobar, starting at 101 and + preallocated 10 sequence numbers in cache + community.postgresql.postgresql_sequence: + name: foobar + increment: -1 + cache: 10 + start: 101 + +- name: Create an ascending sequence called foobar, which cycle between 1 to 10 + community.postgresql.postgresql_sequence: + name: foobar + cycle: yes + min: 1 + max: 10 + +- name: Create an ascending bigint sequence called foobar in the default + database with owner foobar + community.postgresql.postgresql_sequence: + name: foobar + owner: foobar + +- name: Rename an existing sequence named foo to bar + community.postgresql.postgresql_sequence: + name: foo + rename_to: bar + +- name: Change the schema of an existing sequence to foobar + community.postgresql.postgresql_sequence: + name: foobar + newschema: foobar + +- name: Change the owner of an existing sequence to foobar + community.postgresql.postgresql_sequence: + name: foobar + owner: foobar + +- name: Drop a sequence called foobar + community.postgresql.postgresql_sequence: + name: foobar + state: absent + +- name: Drop a sequence called foobar with cascade + community.postgresql.postgresql_sequence: + name: foobar + cascade: yes + state: absent +''' + +RETURN = r''' +state: + description: Sequence state at the end of execution. + returned: always + type: str + sample: 'present' +sequence: + description: Sequence name. + returned: always + type: str + sample: 'foobar' +queries: + description: List of queries that was tried to be executed. + returned: always + type: str + sample: [ "CREATE SEQUENCE \"foo\"" ] +schema: + description: Name of the schema of the sequence. + returned: always + type: str + sample: 'foo' +data_type: + description: Shows the current data type of the sequence. + returned: always + type: str + sample: 'bigint' +increment: + description: The value of increment of the sequence. A positive value will + make an ascending sequence, a negative one a descending + sequence. + returned: always + type: int + sample: '-1' +minvalue: + description: The value of minvalue of the sequence. + returned: always + type: int + sample: '1' +maxvalue: + description: The value of maxvalue of the sequence. + returned: always + type: int + sample: '9223372036854775807' +start: + description: The value of start of the sequence. + returned: always + type: int + sample: '12' +cycle: + description: Shows if the sequence cycle or not. + returned: always + type: str + sample: 'NO' +owner: + description: Shows the current owner of the sequence + after the successful run of the task. + returned: always + type: str + sample: 'postgres' +newname: + description: Shows the new sequence name after rename. + returned: on success + type: str + sample: 'barfoo' +newschema: + description: Shows the new schema of the sequence after schema change. + returned: on success + type: str + sample: 'foobar' +''' + + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +class Sequence(object): + """Implements behavior of CREATE, ALTER or DROP SEQUENCE PostgreSQL command. + + Arguments: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + + Attributes: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + changed (bool) -- something was changed after execution or not + executed_queries (list) -- executed queries + name (str) -- name of the sequence + owner (str) -- name of the owner of the sequence + schema (str) -- name of the schema (default: public) + data_type (str) -- data type of the sequence + start_value (int) -- value of the sequence start + minvalue (int) -- minimum value of the sequence + maxvalue (int) -- maximum value of the sequence + increment (int) -- increment value of the sequence + cycle (bool) -- sequence can cycle or not + new_name (str) -- name of the renamed sequence + new_schema (str) -- name of the new schema + exists (bool) -- sequence exists or not + """ + + def __init__(self, module, cursor): + self.module = module + self.cursor = cursor + self.executed_queries = [] + self.name = self.module.params['sequence'] + self.owner = '' + self.schema = self.module.params['schema'] + self.data_type = '' + self.start_value = '' + self.minvalue = '' + self.maxvalue = '' + self.increment = '' + self.cycle = '' + self.new_name = '' + self.new_schema = '' + self.exists = False + # Collect info + self.get_info() + + def get_info(self): + """Getter to refresh and get sequence info""" + query = ("SELECT " + "s.sequence_schema AS schemaname, " + "s.sequence_name AS sequencename, " + "pg_get_userbyid(c.relowner) AS sequenceowner, " + "s.data_type::regtype AS data_type, " + "s.start_value AS start_value, " + "s.minimum_value AS min_value, " + "s.maximum_value AS max_value, " + "s.increment AS increment_by, " + "s.cycle_option AS cycle " + "FROM information_schema.sequences s " + "JOIN pg_class c ON c.relname = s.sequence_name " + "LEFT JOIN pg_namespace n ON n.oid = c.relnamespace " + "WHERE NOT pg_is_other_temp_schema(n.oid) " + "AND c.relkind = 'S'::\"char\" " + "AND sequence_name = %(name)s " + "AND sequence_schema = %(schema)s") + + res = exec_sql(self, query, + query_params={'name': self.name, 'schema': self.schema}, + add_to_executed=False) + + if not res: + self.exists = False + return False + + if res: + self.exists = True + self.schema = res[0]['schemaname'] + self.name = res[0]['sequencename'] + self.owner = res[0]['sequenceowner'] + self.data_type = res[0]['data_type'] + self.start_value = res[0]['start_value'] + self.minvalue = res[0]['min_value'] + self.maxvalue = res[0]['max_value'] + self.increment = res[0]['increment_by'] + self.cycle = res[0]['cycle'] + + def create(self): + """Implements CREATE SEQUENCE command behavior.""" + query = ['CREATE SEQUENCE'] + query.append(self.__add_schema()) + + if self.module.params.get('data_type'): + query.append('AS %s' % self.module.params['data_type']) + + if self.module.params.get('increment'): + query.append('INCREMENT BY %s' % self.module.params['increment']) + + if self.module.params.get('minvalue'): + query.append('MINVALUE %s' % self.module.params['minvalue']) + + if self.module.params.get('maxvalue'): + query.append('MAXVALUE %s' % self.module.params['maxvalue']) + + if self.module.params.get('start'): + query.append('START WITH %s' % self.module.params['start']) + + if self.module.params.get('cache'): + query.append('CACHE %s' % self.module.params['cache']) + + if self.module.params.get('cycle'): + query.append('CYCLE') + + return exec_sql(self, ' '.join(query), return_bool=True) + + def drop(self): + """Implements DROP SEQUENCE command behavior.""" + query = ['DROP SEQUENCE'] + query.append(self.__add_schema()) + + if self.module.params.get('cascade'): + query.append('CASCADE') + + return exec_sql(self, ' '.join(query), return_bool=True) + + def rename(self): + """Implements ALTER SEQUENCE RENAME TO command behavior.""" + query = ['ALTER SEQUENCE'] + query.append(self.__add_schema()) + query.append('RENAME TO "%s"' % self.module.params['rename_to']) + + return exec_sql(self, ' '.join(query), return_bool=True) + + def set_owner(self): + """Implements ALTER SEQUENCE OWNER TO command behavior.""" + query = ['ALTER SEQUENCE'] + query.append(self.__add_schema()) + query.append('OWNER TO "%s"' % self.module.params['owner']) + + return exec_sql(self, ' '.join(query), return_bool=True) + + def set_schema(self): + """Implements ALTER SEQUENCE SET SCHEMA command behavior.""" + query = ['ALTER SEQUENCE'] + query.append(self.__add_schema()) + query.append('SET SCHEMA "%s"' % self.module.params['newschema']) + + return exec_sql(self, ' '.join(query), return_bool=True) + + def __add_schema(self): + return '"%s"."%s"' % (self.schema, self.name) + + +# =========================================== +# Module execution. +# + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + sequence=dict(type='str', required=True, aliases=['name']), + state=dict(type='str', default='present', choices=['absent', 'present']), + data_type=dict(type='str', choices=['bigint', 'integer', 'smallint']), + increment=dict(type='int'), + minvalue=dict(type='int', aliases=['min']), + maxvalue=dict(type='int', aliases=['max']), + start=dict(type='int'), + cache=dict(type='int'), + cycle=dict(type='bool', default=False), + schema=dict(type='str', default='public'), + cascade=dict(type='bool', default=False), + rename_to=dict(type='str'), + owner=dict(type='str'), + newschema=dict(type='str'), + db=dict(type='str', default='', aliases=['login_db', 'database']), + session_role=dict(type='str'), + trust_input=dict(type="bool", default=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['rename_to', 'data_type'], + ['rename_to', 'increment'], + ['rename_to', 'minvalue'], + ['rename_to', 'maxvalue'], + ['rename_to', 'start'], + ['rename_to', 'cache'], + ['rename_to', 'cycle'], + ['rename_to', 'cascade'], + ['rename_to', 'owner'], + ['rename_to', 'newschema'], + ['cascade', 'data_type'], + ['cascade', 'increment'], + ['cascade', 'minvalue'], + ['cascade', 'maxvalue'], + ['cascade', 'start'], + ['cascade', 'cache'], + ['cascade', 'cycle'], + ['cascade', 'owner'], + ['cascade', 'newschema'], + ] + ) + + if not module.params["trust_input"]: + check_input( + module, + module.params['sequence'], + module.params['schema'], + module.params['rename_to'], + module.params['owner'], + module.params['newschema'], + module.params['session_role'], + ) + + # Note: we don't need to check mutually exclusive params here, because they are + # checked automatically by AnsibleModule (mutually_exclusive=[] list above). + + # Change autocommit to False if check_mode: + autocommit = not module.check_mode + # Connect to DB and make cursor object: + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=autocommit) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + ############## + # Create the object and do main job: + data = Sequence(module, cursor) + + # Set defaults: + changed = False + + # Create new sequence + if not data.exists and module.params['state'] == 'present': + if module.params.get('rename_to'): + module.fail_json(msg="Sequence '%s' does not exist, nothing to rename" % module.params['sequence']) + if module.params.get('newschema'): + module.fail_json(msg="Sequence '%s' does not exist, change of schema not possible" % module.params['sequence']) + + changed = data.create() + + # Drop non-existing sequence + elif not data.exists and module.params['state'] == 'absent': + # Nothing to do + changed = False + + # Drop existing sequence + elif data.exists and module.params['state'] == 'absent': + changed = data.drop() + + # Rename sequence + if data.exists and module.params.get('rename_to'): + if data.name != module.params['rename_to']: + changed = data.rename() + if changed: + data.new_name = module.params['rename_to'] + + # Refresh information + if module.params['state'] == 'present': + data.get_info() + + # Change owner, schema and settings + if module.params['state'] == 'present' and data.exists: + # change owner + if module.params.get('owner'): + if data.owner != module.params['owner']: + changed = data.set_owner() + + # Set schema + if module.params.get('newschema'): + if data.schema != module.params['newschema']: + changed = data.set_schema() + if changed: + data.new_schema = module.params['newschema'] + + # Rollback if it's possible and check_mode: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + cursor.close() + db_connection.close() + + # Make return values: + kw = dict( + changed=changed, + state='present', + sequence=data.name, + queries=data.executed_queries, + schema=data.schema, + data_type=data.data_type, + increment=data.increment, + minvalue=data.minvalue, + maxvalue=data.maxvalue, + start=data.start_value, + cycle=data.cycle, + owner=data.owner, + ) + + if module.params['state'] == 'present': + if data.new_name: + kw['newname'] = data.new_name + if data.new_schema: + kw['newschema'] = data.new_schema + + elif module.params['state'] == 'absent': + kw['state'] = 'absent' + + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_set.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_set.py new file mode 100644 index 00000000..4e909a3b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_set.py @@ -0,0 +1,480 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_set +short_description: Change a PostgreSQL server configuration parameter +description: + - Allows to change a PostgreSQL server configuration parameter. + - The module uses ALTER SYSTEM command and applies changes by reload server configuration. + - ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster. + - It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file. + - ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file, + which is read in addition to postgresql.conf. + - The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter + string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required). + - After change you can see in the ansible output the previous and + the new parameter value and other information using returned values and M(ansible.builtin.debug) module. +options: + name: + description: + - Name of PostgreSQL server parameter. + type: str + required: true + value: + description: + - Parameter value to set. + - To remove parameter string from postgresql.auto.conf and + reload the server configuration you must pass I(value=default). + With I(value=default) the playbook always returns changed is true. + type: str + reset: + description: + - Restore parameter to initial state (boot_val). Mutually exclusive with I(value). + type: bool + default: false + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + db: + description: + - Name of database to connect. + type: str + aliases: + - login_db + trust_input: + description: + - If C(no), check whether values of parameters are potentially dangerous. + - It makes sense to use C(no) only when SQL injections are possible. + type: bool + default: yes + version_added: '0.2.0' +notes: +- Supported version of PostgreSQL is 9.4 and later. +- Supports C(check_mode). +- Pay attention, change setting with 'postmaster' context can return changed is true + when actually nothing changes because the same value may be presented in + several different form, for example, 1024MB, 1GB, etc. However in pg_settings + system view it can be defined like 131072 number of 8kB pages. + The final check of the parameter value cannot compare it because the server was + not restarted and the value in pg_settings is not updated yet. +- For some parameters restart of PostgreSQL server is required. + See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html). +seealso: +- module: community.postgresql.postgresql_info +- name: PostgreSQL server configuration + description: General information about PostgreSQL server configuration. + link: https://www.postgresql.org/docs/current/runtime-config.html +- name: PostgreSQL view pg_settings reference + description: Complete reference of the pg_settings view documentation. + link: https://www.postgresql.org/docs/current/view-pg-settings.html +- name: PostgreSQL ALTER SYSTEM command reference + description: Complete reference of the ALTER SYSTEM command documentation. + link: https://www.postgresql.org/docs/current/sql-altersystem.html +author: +- Andrew Klychkov (@Andersson007) +extends_documentation_fragment: +- community.postgresql.postgres + +''' + +EXAMPLES = r''' +- name: Restore wal_keep_segments parameter to initial state + community.postgresql.postgresql_set: + name: wal_keep_segments + reset: yes + +# Set work_mem parameter to 32MB and show what's been changed and restart is required or not +# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False") +- name: Set work mem parameter + community.postgresql.postgresql_set: + name: work_mem + value: 32mb + register: set + +- name: Print the result if the setting changed + ansible.builtin.debug: + msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}" + when: set.changed +# Ensure that the restart of PostgreSQL server must be required for some parameters. +# In this situation you see the same parameter in prev_val_pretty and value_pretty, but 'changed=True' +# (If you passed the value that was different from the current server setting). + +- name: Set log_min_duration_statement parameter to 1 second + community.postgresql.postgresql_set: + name: log_min_duration_statement + value: 1s + +- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf) + community.postgresql.postgresql_set: + name: wal_log_hints + value: default +''' + +RETURN = r''' +name: + description: Name of PostgreSQL server parameter. + returned: always + type: str + sample: 'shared_buffers' +restart_required: + description: Information about parameter current state. + returned: always + type: bool + sample: true +prev_val_pretty: + description: Information about previous state of the parameter. + returned: always + type: str + sample: '4MB' +value_pretty: + description: Information about current state of the parameter. + returned: always + type: str + sample: '64MB' +value: + description: + - Dictionary that contains the current parameter value (at the time of playbook finish). + - Pay attention that for real change some parameters restart of PostgreSQL server is required. + - Returns the current value in the check mode. + returned: always + type: dict + sample: { "value": 67108864, "unit": "b" } +context: + description: + - PostgreSQL setting context. + returned: always + type: str + sample: user +''' + +try: + from psycopg2.extras import DictCursor +except Exception: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from copy import deepcopy + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils._text import to_native + +PG_REQ_VER = 90400 + +# To allow to set value like 1mb instead of 1MB, etc: +LOWERCASE_SIZE_UNITS = ("mb", "gb", "tb") + +# =========================================== +# PostgreSQL module specific support methods. +# + + +def param_get(cursor, module, name): + query = ("SELECT name, setting, unit, context, boot_val " + "FROM pg_settings WHERE name = %(name)s") + try: + cursor.execute(query, {'name': name}) + info = cursor.fetchall() + cursor.execute("SHOW %s" % name) + val = cursor.fetchone() + + except Exception as e: + module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e))) + + if not info: + module.fail_json(msg="No such parameter: %s. " + "Please check its spelling or presence in your PostgreSQL version " + "(https://www.postgresql.org/docs/current/runtime-config.html)" % name) + + raw_val = info[0][1] + unit = info[0][2] + context = info[0][3] + boot_val = info[0][4] + + if val[0] == 'True': + val[0] = 'on' + elif val[0] == 'False': + val[0] = 'off' + + if unit == 'kB': + if int(raw_val) > 0: + raw_val = int(raw_val) * 1024 + if int(boot_val) > 0: + boot_val = int(boot_val) * 1024 + + unit = 'b' + + elif unit == 'MB': + if int(raw_val) > 0: + raw_val = int(raw_val) * 1024 * 1024 + if int(boot_val) > 0: + boot_val = int(boot_val) * 1024 * 1024 + + unit = 'b' + + return (val[0], raw_val, unit, boot_val, context) + + +def pretty_to_bytes(pretty_val): + # The function returns a value in bytes + # if the value contains 'B', 'kB', 'MB', 'GB', 'TB'. + # Otherwise it returns the passed argument. + + # It's sometimes possible to have an empty values + if not pretty_val: + return pretty_val + + # If the first char is not a digit, it does not make sense + # to parse further, so just return a passed value + if not pretty_val[0].isdigit(): + return pretty_val + + # If the last char is not an alphabetical symbol, it means that + # it does not contain any suffixes, so no sense to parse further + if not pretty_val[-1].isalpha(): + return pretty_val + + # Extract digits + num_part = [] + for c in pretty_val: + # When we reach the first non-digit element, + # e.g. in 1024kB, stop iterating + if not c.isdigit(): + break + else: + num_part.append(c) + + num_part = ''.join(num_part) + + val_in_bytes = None + + if len(pretty_val) >= 2: + if 'kB' in pretty_val[-2:]: + val_in_bytes = num_part * 1024 + + elif 'MB' in pretty_val[-2:]: + val_in_bytes = num_part * 1024 * 1024 + + elif 'GB' in pretty_val[-2:]: + val_in_bytes = num_part * 1024 * 1024 * 1024 + + elif 'TB' in pretty_val[-2:]: + val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024 + + # For cases like "1B" + if not val_in_bytes and 'B' in pretty_val[-1]: + val_in_bytes = num_part + + if val_in_bytes is not None: + return val_in_bytes + else: + return pretty_val + + +def param_set(cursor, module, name, value, context): + try: + if str(value).lower() == 'default': + query = "ALTER SYSTEM SET %s = DEFAULT" % name + else: + query = "ALTER SYSTEM SET %s = '%s'" % (name, value) + cursor.execute(query) + + if context != 'postmaster': + cursor.execute("SELECT pg_reload_conf()") + + except Exception as e: + module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e))) + + return True + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + name=dict(type='str', required=True), + db=dict(type='str', aliases=['login_db']), + value=dict(type='str'), + reset=dict(type='bool', default=False), + session_role=dict(type='str'), + trust_input=dict(type='bool', default=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + name = module.params['name'] + value = module.params['value'] + reset = module.params['reset'] + session_role = module.params['session_role'] + trust_input = module.params['trust_input'] + + if not trust_input: + # Check input for potentially dangerous elements: + check_input(module, name, value, session_role) + + if value: + # Convert a value like 1mb (Postgres does not support) to 1MB, etc: + if len(value) > 2 and value[:-2].isdigit() and value[-2:] in LOWERCASE_SIZE_UNITS: + value = value.upper() + + # Convert a value like 1b (Postgres does not support) to 1B: + elif len(value) > 1 and ('b' in value[-1] and value[:-1].isdigit()): + value = value.upper() + + if value is not None and reset: + module.fail_json(msg="%s: value and reset params are mutually exclusive" % name) + + if value is None and not reset: + module.fail_json(msg="%s: at least one of value or reset param must be specified" % name) + + conn_params = get_conn_params(module, module.params, warn_db_default=False) + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + kw = {} + # Check server version (needs 9.4 or later): + ver = db_connection.server_version + if ver < PG_REQ_VER: + module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER)) + kw = dict( + changed=False, + restart_required=False, + value_pretty="", + prev_val_pretty="", + value={"value": "", "unit": ""}, + ) + kw['name'] = name + db_connection.close() + module.exit_json(**kw) + + # Set default returned values: + restart_required = False + changed = False + kw['name'] = name + kw['restart_required'] = False + + # Get info about param state: + res = param_get(cursor, module, name) + current_value = res[0] + raw_val = res[1] + unit = res[2] + boot_val = res[3] + context = res[4] + + if value == 'True': + value = 'on' + elif value == 'False': + value = 'off' + + kw['prev_val_pretty'] = current_value + kw['value_pretty'] = deepcopy(kw['prev_val_pretty']) + kw['context'] = context + + # Do job + if context == "internal": + module.fail_json(msg="%s: cannot be changed (internal context). See " + "https://www.postgresql.org/docs/current/runtime-config-preset.html" % name) + + if context == "postmaster": + restart_required = True + + # If check_mode, just compare and exit: + if module.check_mode: + if pretty_to_bytes(value) == pretty_to_bytes(current_value): + kw['changed'] = False + + else: + kw['value_pretty'] = value + kw['changed'] = True + + # Anyway returns current raw value in the check_mode: + kw['value'] = dict( + value=raw_val, + unit=unit, + ) + kw['restart_required'] = restart_required + module.exit_json(**kw) + + # Set param (value can be an empty string): + if value is not None and value != current_value: + changed = param_set(cursor, module, name, value, context) + + kw['value_pretty'] = value + + # Reset param: + elif reset: + if raw_val == boot_val: + # nothing to change, exit: + kw['value'] = dict( + value=raw_val, + unit=unit, + ) + module.exit_json(**kw) + + changed = param_set(cursor, module, name, boot_val, context) + + cursor.close() + db_connection.close() + + # Reconnect and recheck current value: + if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'): + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + res = param_get(cursor, module, name) + # f_ means 'final' + f_value = res[0] + f_raw_val = res[1] + + if raw_val == f_raw_val: + changed = False + + else: + changed = True + + kw['value_pretty'] = f_value + kw['value'] = dict( + value=f_raw_val, + unit=unit, + ) + + cursor.close() + db_connection.close() + + kw['changed'] = changed + kw['restart_required'] = restart_required + + if restart_required and changed: + module.warn("Restart of PostgreSQL is required for setting %s" % name) + + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py new file mode 100644 index 00000000..594a0ee8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, John Scalia (@jscalia), Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: postgresql_slot +short_description: Add or remove replication slots from a PostgreSQL database +description: +- Add or remove physical or logical replication slots from a PostgreSQL database. + +options: + name: + description: + - Name of the replication slot to add or remove. + type: str + required: yes + aliases: + - slot_name + slot_type: + description: + - Slot type. + type: str + default: physical + choices: [ logical, physical ] + state: + description: + - The slot state. + - I(state=present) implies the slot must be present in the system. + - I(state=absent) implies the I(groups) must be revoked from I(target_roles). + type: str + default: present + choices: [ absent, present ] + immediately_reserve: + description: + - Optional parameter that when C(yes) specifies that the LSN for this replication slot be reserved + immediately, otherwise the default, C(no), specifies that the LSN is reserved on the first connection + from a streaming replication client. + - Is available from PostgreSQL version 9.6. + - Uses only with I(slot_type=physical). + - Mutually exclusive with I(slot_type=logical). + type: bool + default: no + output_plugin: + description: + - All logical slots must indicate which output plugin decoder they're using. + - This parameter does not apply to physical slots. + - It will be ignored with I(slot_type=physical). + type: str + default: "test_decoding" + db: + description: + - Name of database to connect to. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. + The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + trust_input: + description: + - If C(no), check the value of I(session_role) is potentially dangerous. + - It makes sense to use C(no) only when SQL injections via I(session_role) are possible. + type: bool + default: yes + version_added: '0.2.0' + +notes: +- Physical replication slots were introduced to PostgreSQL with version 9.4, + while logical replication slots were added beginning with version 10.0. +- Supports C(check_mode). + +seealso: +- name: PostgreSQL pg_replication_slots view reference + description: Complete reference of the PostgreSQL pg_replication_slots view. + link: https://www.postgresql.org/docs/current/view-pg-replication-slots.html +- name: PostgreSQL streaming replication protocol reference + description: Complete reference of the PostgreSQL streaming replication protocol documentation. + link: https://www.postgresql.org/docs/current/protocol-replication.html +- name: PostgreSQL logical replication protocol reference + description: Complete reference of the PostgreSQL logical replication protocol documentation. + link: https://www.postgresql.org/docs/current/protocol-logical-replication.html + +author: +- John Scalia (@jscalia) +- Andrew Klychkov (@Andersson007) +- Thomas O'Donnell (@andytom) +extends_documentation_fragment: +- community.postgresql.postgres + +''' + +EXAMPLES = r''' +- name: Create physical_one physical slot if doesn't exist + become_user: postgres + community.postgresql.postgresql_slot: + slot_name: physical_one + db: ansible + +- name: Remove physical_one slot if exists + become_user: postgres + community.postgresql.postgresql_slot: + slot_name: physical_one + db: ansible + state: absent + +- name: Create logical_one logical slot to the database acme if doesn't exist + community.postgresql.postgresql_slot: + name: logical_slot_one + slot_type: logical + state: present + output_plugin: custom_decoder_one + db: "acme" + +- name: Remove logical_one slot if exists from the cluster running on another host and non-standard port + community.postgresql.postgresql_slot: + name: logical_one + login_host: mydatabase.example.org + port: 5433 + login_user: ourSuperuser + login_password: thePassword + state: absent +''' + +RETURN = r''' +name: + description: Name of the slot. + returned: always + type: str + sample: "physical_one" +queries: + description: List of executed queries. + returned: always + type: str + sample: [ "SELECT pg_create_physical_replication_slot('physical_one', False, False)" ] +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +# =========================================== +# PostgreSQL module specific support methods. +# + +class PgSlot(object): + def __init__(self, module, cursor, name): + self.module = module + self.cursor = cursor + self.name = name + self.exists = False + self.kind = '' + self.__slot_exists() + self.changed = False + self.executed_queries = [] + + def create(self, kind='physical', immediately_reserve=False, output_plugin=False, just_check=False): + if self.exists: + if self.kind == kind: + return False + else: + self.module.warn("slot with name '%s' already exists " + "but has another type '%s'" % (self.name, self.kind)) + return False + + if just_check: + return None + + if kind == 'physical': + # Check server version (needs for immedately_reserverd needs 9.6+): + if self.cursor.connection.server_version < 96000: + query = "SELECT pg_create_physical_replication_slot(%(name)s)" + + else: + query = "SELECT pg_create_physical_replication_slot(%(name)s, %(i_reserve)s)" + + self.changed = exec_sql(self, query, + query_params={'name': self.name, 'i_reserve': immediately_reserve}, + return_bool=True) + + elif kind == 'logical': + query = "SELECT pg_create_logical_replication_slot(%(name)s, %(o_plugin)s)" + self.changed = exec_sql(self, query, + query_params={'name': self.name, 'o_plugin': output_plugin}, return_bool=True) + + def drop(self): + if not self.exists: + return False + + query = "SELECT pg_drop_replication_slot(%(name)s)" + self.changed = exec_sql(self, query, query_params={'name': self.name}, return_bool=True) + + def __slot_exists(self): + query = "SELECT slot_type FROM pg_replication_slots WHERE slot_name = %(name)s" + res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False) + if res: + self.exists = True + self.kind = res[0][0] + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + db=dict(type="str", aliases=["login_db"]), + name=dict(type="str", required=True, aliases=["slot_name"]), + slot_type=dict(type="str", default="physical", choices=["logical", "physical"]), + immediately_reserve=dict(type="bool", default=False), + session_role=dict(type="str"), + output_plugin=dict(type="str", default="test_decoding"), + state=dict(type="str", default="present", choices=["absent", "present"]), + trust_input=dict(type="bool", default=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + name = module.params["name"] + slot_type = module.params["slot_type"] + immediately_reserve = module.params["immediately_reserve"] + state = module.params["state"] + output_plugin = module.params["output_plugin"] + + if not module.params["trust_input"]: + check_input(module, module.params['session_role']) + + if immediately_reserve and slot_type == 'logical': + module.fail_json(msg="Module parameters immediately_reserve and slot_type=logical are mutually exclusive") + + # When slot_type is logical and parameter db is not passed, + # the default database will be used to create the slot and + # the user should know about this. + # When the slot type is physical, + # it doesn't matter which database will be used + # because physical slots are global objects. + if slot_type == 'logical': + warn_db_default = True + else: + warn_db_default = False + + conn_params = get_conn_params(module, module.params, warn_db_default=warn_db_default) + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + ################################## + # Create an object and do main job + pg_slot = PgSlot(module, cursor, name) + + changed = False + + if module.check_mode: + if state == "present": + if not pg_slot.exists: + changed = True + + pg_slot.create(slot_type, immediately_reserve, output_plugin, just_check=True) + + elif state == "absent": + if pg_slot.exists: + changed = True + else: + if state == "absent": + pg_slot.drop() + + elif state == "present": + pg_slot.create(slot_type, immediately_reserve, output_plugin) + + changed = pg_slot.changed + + db_connection.close() + module.exit_json(changed=changed, name=name, queries=pg_slot.executed_queries) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py new file mode 100644 index 00000000..037f94af --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py @@ -0,0 +1,718 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: postgresql_subscription +short_description: Add, update, or remove PostgreSQL subscription +description: +- Add, update, or remove PostgreSQL subscription. +version_added: '0.2.0' + +options: + name: + description: + - Name of the subscription to add, update, or remove. + type: str + required: yes + db: + description: + - Name of the database to connect to and where + the subscription state will be changed. + aliases: [ login_db ] + type: str + required: yes + state: + description: + - The subscription state. + - C(present) implies that if I(name) subscription doesn't exist, it will be created. + - C(absent) implies that if I(name) subscription exists, it will be removed. + - C(refresh) implies that if I(name) subscription exists, it will be refreshed. + Fetch missing table information from publisher. Always returns ``changed`` is ``True``. + This will start replication of tables that were added to the subscribed-to publications + since the last invocation of REFRESH PUBLICATION or since CREATE SUBSCRIPTION. + The existing data in the publications that are being subscribed to + should be copied once the replication starts. + - For more information about C(refresh) see U(https://www.postgresql.org/docs/current/sql-altersubscription.html). + type: str + choices: [ absent, present, refresh ] + default: present + owner: + description: + - Subscription owner. + - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role). + - Ignored when I(state) is not C(present). + type: str + publications: + description: + - The publication names on the publisher to use for the subscription. + - Ignored when I(state) is not C(present). + type: list + elements: str + connparams: + description: + - The connection dict param-value to connect to the publisher. + - For more information see U(https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). + - Ignored when I(state) is not C(present). + type: dict + cascade: + description: + - Drop subscription dependencies. Has effect with I(state=absent) only. + - Ignored when I(state) is not C(absent). + type: bool + default: false + subsparams: + description: + - Dictionary of optional parameters for a subscription, e.g. copy_data, enabled, create_slot, etc. + - For update the subscription allowed keys are C(enabled), C(slot_name), C(synchronous_commit), C(publication_name). + - See available parameters to create a new subscription + on U(https://www.postgresql.org/docs/current/sql-createsubscription.html). + - Ignored when I(state) is not C(present). + type: dict + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + version_added: '0.2.0' + trust_input: + description: + - If C(no), check whether values of parameters I(name), I(publications), I(owner), + I(session_role), I(connparams), I(subsparams) are potentially dangerous. + - It makes sense to use C(yes) only when SQL injections via the parameters are possible. + type: bool + default: yes + version_added: '0.2.0' + +notes: +- PostgreSQL version must be 10 or greater. +- Supports C(check_mode). + +seealso: +- module: community.postgresql.postgresql_publication +- module: community.postgresql.postgresql_info +- name: CREATE SUBSCRIPTION reference + description: Complete reference of the CREATE SUBSCRIPTION command documentation. + link: https://www.postgresql.org/docs/current/sql-createsubscription.html +- name: ALTER SUBSCRIPTION reference + description: Complete reference of the ALTER SUBSCRIPTION command documentation. + link: https://www.postgresql.org/docs/current/sql-altersubscription.html +- name: DROP SUBSCRIPTION reference + description: Complete reference of the DROP SUBSCRIPTION command documentation. + link: https://www.postgresql.org/docs/current/sql-dropsubscription.html + +author: +- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> + +extends_documentation_fragment: +- community.postgresql.postgres + +''' + +EXAMPLES = r''' +- name: > + Create acme subscription in mydb database using acme_publication and + the following connection parameters to connect to the publisher. + Set the subscription owner as alice. + community.postgresql.postgresql_subscription: + db: mydb + name: acme + state: present + publications: acme_publication + owner: alice + connparams: + host: 127.0.0.1 + port: 5432 + user: repl + password: replpass + dbname: mydb + +- name: Assuming that acme subscription exists, try to change conn parameters + community.postgresql.postgresql_subscription: + db: mydb + name: acme + connparams: + host: 127.0.0.1 + port: 5432 + user: repl + password: replpass + connect_timeout: 100 + +- name: Refresh acme publication + community.postgresql.postgresql_subscription: + db: mydb + name: acme + state: refresh + +- name: Drop acme subscription from mydb with dependencies (cascade=yes) + community.postgresql.postgresql_subscription: + db: mydb + name: acme + state: absent + cascade: yes + +- name: Assuming that acme subscription exists and enabled, disable the subscription + community.postgresql.postgresql_subscription: + db: mydb + name: acme + state: present + subsparams: + enabled: no +''' + +RETURN = r''' +name: + description: + - Name of the subscription. + returned: always + type: str + sample: acme +exists: + description: + - Flag indicates the subscription exists or not at the end of runtime. + returned: always + type: bool + sample: true +queries: + description: List of executed queries. + returned: always + type: str + sample: [ 'DROP SUBSCRIPTION "mysubscription"' ] +initial_state: + description: Subscription configuration at the beginning of runtime. + returned: always + type: dict + sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true} +final_state: + description: Subscription configuration at the end of runtime. + returned: always + type: dict + sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true} +''' + +from copy import deepcopy + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import check_input +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils.six import iteritems + +SUPPORTED_PG_VERSION = 10000 + +SUBSPARAMS_KEYS_FOR_UPDATE = ('enabled', 'synchronous_commit', 'slot_name') + + +################################ +# Module functions and classes # +################################ + +def convert_conn_params(conn_dict): + """Converts the passed connection dictionary to string. + + Args: + conn_dict (list): Dictionary which needs to be converted. + + Returns: + Connection string. + """ + conn_list = [] + for (param, val) in iteritems(conn_dict): + conn_list.append('%s=%s' % (param, val)) + + return ' '.join(conn_list) + + +def convert_subscr_params(params_dict): + """Converts the passed params dictionary to string. + + Args: + params_dict (list): Dictionary which needs to be converted. + + Returns: + Parameters string. + """ + params_list = [] + for (param, val) in iteritems(params_dict): + if val is False: + val = 'false' + elif val is True: + val = 'true' + + params_list.append('%s = %s' % (param, val)) + + return ', '.join(params_list) + + +class PgSubscription(): + """Class to work with PostgreSQL subscription. + + Args: + module (AnsibleModule): Object of AnsibleModule class. + cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + name (str): The name of the subscription. + db (str): The database name the subscription will be associated with. + + Attributes: + module (AnsibleModule): Object of AnsibleModule class. + cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + name (str): Name of subscription. + executed_queries (list): List of executed queries. + attrs (dict): Dict with subscription attributes. + exists (bool): Flag indicates the subscription exists or not. + """ + + def __init__(self, module, cursor, name, db): + self.module = module + self.cursor = cursor + self.name = name + self.db = db + self.executed_queries = [] + self.attrs = { + 'owner': None, + 'enabled': None, + 'synccommit': None, + 'conninfo': {}, + 'slotname': None, + 'publications': [], + } + self.empty_attrs = deepcopy(self.attrs) + self.exists = self.check_subscr() + + def get_info(self): + """Refresh the subscription information. + + Returns: + ``self.attrs``. + """ + self.exists = self.check_subscr() + return self.attrs + + def check_subscr(self): + """Check the subscription and refresh ``self.attrs`` subscription attribute. + + Returns: + True if the subscription with ``self.name`` exists, False otherwise. + """ + + subscr_info = self.__get_general_subscr_info() + + if not subscr_info: + # The subscription does not exist: + self.attrs = deepcopy(self.empty_attrs) + return False + + self.attrs['owner'] = subscr_info.get('rolname') + self.attrs['enabled'] = subscr_info.get('subenabled') + self.attrs['synccommit'] = subscr_info.get('subenabled') + self.attrs['slotname'] = subscr_info.get('subslotname') + self.attrs['publications'] = subscr_info.get('subpublications') + if subscr_info.get('subconninfo'): + for param in subscr_info['subconninfo'].split(' '): + tmp = param.split('=') + try: + self.attrs['conninfo'][tmp[0]] = int(tmp[1]) + except ValueError: + self.attrs['conninfo'][tmp[0]] = tmp[1] + + return True + + def create(self, connparams, publications, subsparams, check_mode=True): + """Create the subscription. + + Args: + connparams (str): Connection string in libpq style. + publications (list): Publications on the master to use. + subsparams (str): Parameters string in WITH () clause style. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + changed (bool): True if the subscription has been created, otherwise False. + """ + query_fragments = [] + query_fragments.append("CREATE SUBSCRIPTION %s CONNECTION '%s' " + "PUBLICATION %s" % (self.name, connparams, ', '.join(publications))) + + if subsparams: + query_fragments.append("WITH (%s)" % subsparams) + + changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode) + + return changed + + def update(self, connparams, publications, subsparams, check_mode=True): + """Update the subscription. + + Args: + connparams (str): Connection string in libpq style. + publications (list): Publications on the master to use. + subsparams (dict): Dictionary of optional parameters. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + changed (bool): True if subscription has been updated, otherwise False. + """ + changed = False + + if connparams: + if connparams != self.attrs['conninfo']: + changed = self.__set_conn_params(convert_conn_params(connparams), + check_mode=check_mode) + + if publications: + if sorted(self.attrs['publications']) != sorted(publications): + changed = self.__set_publications(publications, check_mode=check_mode) + + if subsparams: + params_to_update = [] + + for (param, value) in iteritems(subsparams): + if param == 'enabled': + if self.attrs['enabled'] and value is False: + changed = self.enable(enabled=False, check_mode=check_mode) + elif not self.attrs['enabled'] and value is True: + changed = self.enable(enabled=True, check_mode=check_mode) + + elif param == 'synchronous_commit': + if self.attrs['synccommit'] is True and value is False: + params_to_update.append("%s = false" % param) + elif self.attrs['synccommit'] is False and value is True: + params_to_update.append("%s = true" % param) + + elif param == 'slot_name': + if self.attrs['slotname'] and self.attrs['slotname'] != value: + params_to_update.append("%s = %s" % (param, value)) + + else: + self.module.warn("Parameter '%s' is not in params supported " + "for update '%s', ignored..." % (param, SUBSPARAMS_KEYS_FOR_UPDATE)) + + if params_to_update: + changed = self.__set_params(params_to_update, check_mode=check_mode) + + return changed + + def drop(self, cascade=False, check_mode=True): + """Drop the subscription. + + Kwargs: + cascade (bool): Flag indicates that the subscription needs to be deleted + with its dependencies. + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + changed (bool): True if the subscription has been removed, otherwise False. + """ + if self.exists: + query_fragments = ["DROP SUBSCRIPTION %s" % self.name] + if cascade: + query_fragments.append("CASCADE") + + return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode) + + def set_owner(self, role, check_mode=True): + """Set a subscription owner. + + Args: + role (str): Role (user) name that needs to be set as a subscription owner. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = 'ALTER SUBSCRIPTION %s OWNER TO "%s"' % (self.name, role) + return self.__exec_sql(query, check_mode=check_mode) + + def refresh(self, check_mode=True): + """Refresh publication. + + Fetches missing table info from publisher. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = 'ALTER SUBSCRIPTION %s REFRESH PUBLICATION' % self.name + return self.__exec_sql(query, check_mode=check_mode) + + def __set_params(self, params_to_update, check_mode=True): + """Update optional subscription parameters. + + Args: + params_to_update (list): Parameters with values to update. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = 'ALTER SUBSCRIPTION %s SET (%s)' % (self.name, ', '.join(params_to_update)) + return self.__exec_sql(query, check_mode=check_mode) + + def __set_conn_params(self, connparams, check_mode=True): + """Update connection parameters. + + Args: + connparams (str): Connection string in libpq style. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = "ALTER SUBSCRIPTION %s CONNECTION '%s'" % (self.name, connparams) + return self.__exec_sql(query, check_mode=check_mode) + + def __set_publications(self, publications, check_mode=True): + """Update publications. + + Args: + publications (list): Publications on the master to use. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + query = 'ALTER SUBSCRIPTION %s SET PUBLICATION %s' % (self.name, ', '.join(publications)) + return self.__exec_sql(query, check_mode=check_mode) + + def enable(self, enabled=True, check_mode=True): + """Enable or disable the subscription. + + Kwargs: + enable (bool): Flag indicates that the subscription needs + to be enabled or disabled. + check_mode (bool): If True, don't actually change anything, + just make SQL, add it to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + if enabled: + query = 'ALTER SUBSCRIPTION %s ENABLE' % self.name + else: + query = 'ALTER SUBSCRIPTION %s DISABLE' % self.name + + return self.__exec_sql(query, check_mode=check_mode) + + def __get_general_subscr_info(self): + """Get and return general subscription information. + + Returns: + Dict with subscription information if successful, False otherwise. + """ + query = ("SELECT d.datname, r.rolname, s.subenabled, " + "s.subconninfo, s.subslotname, s.subsynccommit, " + "s.subpublications FROM pg_catalog.pg_subscription s " + "JOIN pg_catalog.pg_database d " + "ON s.subdbid = d.oid " + "JOIN pg_catalog.pg_roles AS r " + "ON s.subowner = r.oid " + "WHERE s.subname = %(name)s AND d.datname = %(db)s") + + result = exec_sql(self, query, query_params={'name': self.name, 'db': self.db}, add_to_executed=False) + if result: + return result[0] + else: + return False + + def __exec_sql(self, query, check_mode=False): + """Execute SQL query. + + Note: If we need just to get information from the database, + we use ``exec_sql`` function directly. + + Args: + query (str): Query that needs to be executed. + + Kwargs: + check_mode (bool): If True, don't actually change anything, + just add ``query`` to ``self.executed_queries`` and return True. + + Returns: + True if successful, False otherwise. + """ + if check_mode: + self.executed_queries.append(query) + return True + else: + return exec_sql(self, query, return_bool=True) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + name=dict(type='str', required=True), + db=dict(type='str', required=True, aliases=['login_db']), + state=dict(type='str', default='present', choices=['absent', 'present', 'refresh']), + publications=dict(type='list', elements='str'), + connparams=dict(type='dict'), + cascade=dict(type='bool', default=False), + owner=dict(type='str'), + subsparams=dict(type='dict'), + session_role=dict(type='str'), + trust_input=dict(type='bool', default=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + # Parameters handling: + db = module.params['db'] + name = module.params['name'] + state = module.params['state'] + publications = module.params['publications'] + cascade = module.params['cascade'] + owner = module.params['owner'] + subsparams = module.params['subsparams'] + connparams = module.params['connparams'] + session_role = module.params['session_role'] + trust_input = module.params['trust_input'] + + if not trust_input: + # Check input for potentially dangerous elements: + if not subsparams: + subsparams_str = None + else: + subsparams_str = convert_subscr_params(subsparams) + + if not connparams: + connparams_str = None + else: + connparams_str = convert_conn_params(connparams) + + check_input(module, name, publications, owner, session_role, + connparams_str, subsparams_str) + + if state == 'present' and cascade: + module.warn('parameter "cascade" is ignored when state is not absent') + + if state != 'present': + if owner: + module.warn("parameter 'owner' is ignored when state is not 'present'") + if publications: + module.warn("parameter 'publications' is ignored when state is not 'present'") + if connparams: + module.warn("parameter 'connparams' is ignored when state is not 'present'") + if subsparams: + module.warn("parameter 'subsparams' is ignored when state is not 'present'") + + # Connect to DB and make cursor object: + pg_conn_params = get_conn_params(module, module.params) + # We check subscription state without DML queries execution, so set autocommit: + db_connection = connect_to_db(module, pg_conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + # Check version: + if cursor.connection.server_version < SUPPORTED_PG_VERSION: + module.fail_json(msg="PostgreSQL server version should be 10.0 or greater") + + # Set defaults: + changed = False + initial_state = {} + final_state = {} + + ################################### + # Create object and do rock'n'roll: + subscription = PgSubscription(module, cursor, name, db) + + if subscription.exists: + initial_state = deepcopy(subscription.attrs) + final_state = deepcopy(initial_state) + + if state == 'present': + if not subscription.exists: + if subsparams: + subsparams = convert_subscr_params(subsparams) + + if connparams: + connparams = convert_conn_params(connparams) + + changed = subscription.create(connparams, + publications, + subsparams, + check_mode=module.check_mode) + + else: + changed = subscription.update(connparams, + publications, + subsparams, + check_mode=module.check_mode) + + if owner and subscription.attrs['owner'] != owner: + changed = subscription.set_owner(owner, check_mode=module.check_mode) or changed + + elif state == 'absent': + changed = subscription.drop(cascade, check_mode=module.check_mode) + + elif state == 'refresh': + if not subscription.exists: + module.fail_json(msg="Refresh failed: subscription '%s' does not exist" % name) + + # Always returns True: + changed = subscription.refresh(check_mode=module.check_mode) + + # Get final subscription info: + final_state = subscription.get_info() + + # Connection is not needed any more: + cursor.close() + db_connection.close() + + # Return ret values and exit: + module.exit_json(changed=changed, + name=name, + exists=subscription.exists, + queries=subscription.executed_queries, + initial_state=initial_state, + final_state=final_state) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_table.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_table.py new file mode 100644 index 00000000..97194d43 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_table.py @@ -0,0 +1,611 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_table +short_description: Create, drop, or modify a PostgreSQL table +description: +- Allows to create, drop, rename, truncate a table, or change some table attributes. +options: + table: + description: + - Table name. + required: true + aliases: + - name + type: str + state: + description: + - The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged), + I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename). + type: str + default: present + choices: [ absent, present ] + tablespace: + description: + - Set a tablespace for the table. + type: str + owner: + description: + - Set a table owner. + type: str + unlogged: + description: + - Create an unlogged table. + type: bool + default: no + like: + description: + - Create a table like another table (with similar DDL). + Mutually exclusive with I(columns), I(rename), and I(truncate). + type: str + including: + description: + - Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL. + Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate). + type: str + columns: + description: + - Columns that are needed. + type: list + elements: str + rename: + description: + - New table name. Mutually exclusive with I(tablespace), I(owner), + I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params). + type: str + truncate: + description: + - Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged), + I(like), I(including), I(columns), I(rename), and I(storage_params). + type: bool + default: no + storage_params: + description: + - Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc. + Mutually exclusive with I(rename) and I(truncate). + type: list + elements: str + db: + description: + - Name of database to connect and where the table will be created. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. + The specified session_role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + cascade: + description: + - Automatically drop objects that depend on the table (such as views). + Used with I(state=absent) only. + type: bool + default: no + trust_input: + description: + - If C(no), check whether values of parameters are potentially dangerous. + - It makes sense to use C(no) only when SQL injections are possible. + type: bool + default: yes + version_added: '0.2.0' +notes: +- Supports C(check_mode). +- If you do not pass db parameter, tables will be created in the database + named postgres. +- PostgreSQL allows to create columnless table, so columns param is optional. +- Unlogged tables are available from PostgreSQL server version 9.1. +seealso: +- module: community.postgresql.postgresql_sequence +- module: community.postgresql.postgresql_idx +- module: community.postgresql.postgresql_info +- module: community.postgresql.postgresql_tablespace +- module: community.postgresql.postgresql_owner +- module: community.postgresql.postgresql_privs +- module: community.postgresql.postgresql_copy +- name: CREATE TABLE reference + description: Complete reference of the CREATE TABLE command documentation. + link: https://www.postgresql.org/docs/current/sql-createtable.html +- name: ALTER TABLE reference + description: Complete reference of the ALTER TABLE command documentation. + link: https://www.postgresql.org/docs/current/sql-altertable.html +- name: DROP TABLE reference + description: Complete reference of the DROP TABLE command documentation. + link: https://www.postgresql.org/docs/current/sql-droptable.html +- name: PostgreSQL data types + description: Complete reference of the PostgreSQL data types documentation. + link: https://www.postgresql.org/docs/current/datatype.html +author: +- Andrei Klychkov (@Andersson007) +extends_documentation_fragment: +- community.postgresql.postgres + +''' + +EXAMPLES = r''' +- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner + community.postgresql.postgresql_table: + db: acme + name: tbl2 + like: tbl1 + owner: testuser + +- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes + community.postgresql.postgresql_table: + db: acme + table: tbl2 + like: tbl1 + including: comments, indexes + tablespace: ssd + +- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1 + community.postgresql.postgresql_table: + name: test_table + columns: + - id bigserial primary key + - num bigint + - stories text + tablespace: ssd + storage_params: + - fillfactor=10 + - autovacuum_analyze_threshold=1 + +- name: Create an unlogged table in schema acme + community.postgresql.postgresql_table: + name: acme.useless_data + columns: waste_id int + unlogged: true + +- name: Rename table foo to bar + community.postgresql.postgresql_table: + table: foo + rename: bar + +- name: Rename table foo from schema acme to bar + community.postgresql.postgresql_table: + name: acme.foo + rename: bar + +- name: Set owner to someuser + community.postgresql.postgresql_table: + name: foo + owner: someuser + +- name: Change tablespace of foo table to new_tablespace and set owner to new_user + community.postgresql.postgresql_table: + name: foo + tablespace: new_tablespace + owner: new_user + +- name: Truncate table foo + community.postgresql.postgresql_table: + name: foo + truncate: yes + +- name: Drop table foo from schema acme + community.postgresql.postgresql_table: + name: acme.foo + state: absent + +- name: Drop table bar cascade + community.postgresql.postgresql_table: + name: bar + state: absent + cascade: yes +''' + +RETURN = r''' +table: + description: Name of a table. + returned: always + type: str + sample: 'foo' +state: + description: Table state. + returned: always + type: str + sample: 'present' +owner: + description: Table owner. + returned: always + type: str + sample: 'postgres' +tablespace: + description: Tablespace. + returned: always + type: str + sample: 'ssd_tablespace' +queries: + description: List of executed queries. + returned: always + type: str + sample: [ 'CREATE TABLE "test_table" (id bigint)' ] +storage_params: + description: Storage parameters. + returned: always + type: list + sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ] +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, + pg_quote_identifier, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +# =========================================== +# PostgreSQL module specific support methods. +# + +class Table(object): + def __init__(self, name, module, cursor): + self.name = name + self.module = module + self.cursor = cursor + self.info = { + 'owner': '', + 'tblspace': '', + 'storage_params': [], + } + self.exists = False + self.__exists_in_db() + self.executed_queries = [] + + def get_info(self): + """Getter to refresh and get table info""" + self.__exists_in_db() + + def __exists_in_db(self): + """Check table exists and refresh info""" + if "." in self.name: + schema = self.name.split('.')[-2] + tblname = self.name.split('.')[-1] + else: + schema = 'public' + tblname = self.name + + query = ("SELECT t.tableowner, t.tablespace, c.reloptions " + "FROM pg_tables AS t " + "INNER JOIN pg_class AS c ON c.relname = t.tablename " + "INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid " + "WHERE t.tablename = %(tblname)s " + "AND n.nspname = %(schema)s") + res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema}, + add_to_executed=False) + if res: + self.exists = True + self.info = dict( + owner=res[0][0], + tblspace=res[0][1] if res[0][1] else '', + storage_params=res[0][2] if res[0][2] else [], + ) + + return True + else: + self.exists = False + return False + + def create(self, columns='', params='', tblspace='', + unlogged=False, owner=''): + """ + Create table. + If table exists, check passed args (params, tblspace, owner) and, + if they're different from current, change them. + Arguments: + params - storage params (passed by "WITH (...)" in SQL), + comma separated. + tblspace - tablespace. + owner - table owner. + unlogged - create unlogged table. + columns - column string (comma separated). + """ + name = pg_quote_identifier(self.name, 'table') + + changed = False + + if self.exists: + if tblspace == 'pg_default' and self.info['tblspace'] is None: + pass # Because they have the same meaning + elif tblspace and self.info['tblspace'] != tblspace: + self.set_tblspace(tblspace) + changed = True + + if owner and self.info['owner'] != owner: + self.set_owner(owner) + changed = True + + if params: + param_list = [p.strip(' ') for p in params.split(',')] + + new_param = False + for p in param_list: + if p not in self.info['storage_params']: + new_param = True + + if new_param: + self.set_stor_params(params) + changed = True + + if changed: + return True + return False + + query = "CREATE" + if unlogged: + query += " UNLOGGED TABLE %s" % name + else: + query += " TABLE %s" % name + + if columns: + query += " (%s)" % columns + else: + query += " ()" + + if params: + query += " WITH (%s)" % params + + if tblspace: + query += ' TABLESPACE "%s"' % tblspace + + if exec_sql(self, query, return_bool=True): + changed = True + + if owner: + changed = self.set_owner(owner) + + return changed + + def create_like(self, src_table, including='', tblspace='', + unlogged=False, params='', owner=''): + """ + Create table like another table (with similar DDL). + Arguments: + src_table - source table. + including - corresponds to optional INCLUDING expression + in CREATE TABLE ... LIKE statement. + params - storage params (passed by "WITH (...)" in SQL), + comma separated. + tblspace - tablespace. + owner - table owner. + unlogged - create unlogged table. + """ + changed = False + + name = pg_quote_identifier(self.name, 'table') + + query = "CREATE" + if unlogged: + query += " UNLOGGED TABLE %s" % name + else: + query += " TABLE %s" % name + + query += " (LIKE %s" % pg_quote_identifier(src_table, 'table') + + if including: + including = including.split(',') + for i in including: + query += " INCLUDING %s" % i + + query += ')' + + if params: + query += " WITH (%s)" % params + + if tblspace: + query += ' TABLESPACE "%s"' % tblspace + + if exec_sql(self, query, return_bool=True): + changed = True + + if owner: + changed = self.set_owner(owner) + + return changed + + def truncate(self): + query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table') + return exec_sql(self, query, return_bool=True) + + def rename(self, newname): + query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'), + pg_quote_identifier(newname, 'table')) + return exec_sql(self, query, return_bool=True) + + def set_owner(self, username): + query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.name, 'table'), username) + return exec_sql(self, query, return_bool=True) + + def drop(self, cascade=False): + if not self.exists: + return False + + query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table') + if cascade: + query += " CASCADE" + return exec_sql(self, query, return_bool=True) + + def set_tblspace(self, tblspace): + query = 'ALTER TABLE %s SET TABLESPACE "%s"' % (pg_quote_identifier(self.name, 'table'), tblspace) + return exec_sql(self, query, return_bool=True) + + def set_stor_params(self, params): + query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params) + return exec_sql(self, query, return_bool=True) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + table=dict(type='str', required=True, aliases=['name']), + state=dict(type='str', default='present', choices=['absent', 'present']), + db=dict(type='str', default='', aliases=['login_db']), + tablespace=dict(type='str'), + owner=dict(type='str'), + unlogged=dict(type='bool', default=False), + like=dict(type='str'), + including=dict(type='str'), + rename=dict(type='str'), + truncate=dict(type='bool', default=False), + columns=dict(type='list', elements='str'), + storage_params=dict(type='list', elements='str'), + session_role=dict(type='str'), + cascade=dict(type='bool', default=False), + trust_input=dict(type='bool', default=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + table = module.params['table'] + state = module.params['state'] + tablespace = module.params['tablespace'] + owner = module.params['owner'] + unlogged = module.params['unlogged'] + like = module.params['like'] + including = module.params['including'] + newname = module.params['rename'] + storage_params = module.params['storage_params'] + truncate = module.params['truncate'] + columns = module.params['columns'] + cascade = module.params['cascade'] + session_role = module.params['session_role'] + trust_input = module.params['trust_input'] + + if not trust_input: + # Check input for potentially dangerous elements: + check_input(module, table, tablespace, owner, like, including, + newname, storage_params, columns, session_role) + + if state == 'present' and cascade: + module.warn("cascade=true is ignored when state=present") + + # Check mutual exclusive parameters: + if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including): + module.fail_json(msg="%s: state=absent is mutually exclusive with: " + "truncate, rename, columns, tablespace, " + "including, like, storage_params, unlogged, owner" % table) + + if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including): + module.fail_json(msg="%s: truncate is mutually exclusive with: " + "rename, columns, like, unlogged, including, " + "storage_params, owner, tablespace" % table) + + if newname and (columns or like or unlogged or storage_params or owner or tablespace or including): + module.fail_json(msg="%s: rename is mutually exclusive with: " + "columns, like, unlogged, including, " + "storage_params, owner, tablespace" % table) + + if like and columns: + module.fail_json(msg="%s: like and columns params are mutually exclusive" % table) + if including and not like: + module.fail_json(msg="%s: including param needs like param specified" % table) + + conn_params = get_conn_params(module, module.params) + db_connection = connect_to_db(module, conn_params, autocommit=False) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + if storage_params: + storage_params = ','.join(storage_params) + + if columns: + columns = ','.join(columns) + + ############## + # Do main job: + table_obj = Table(table, module, cursor) + + # Set default returned values: + changed = False + kw = {} + kw['table'] = table + kw['state'] = '' + if table_obj.exists: + kw = dict( + table=table, + state='present', + owner=table_obj.info['owner'], + tablespace=table_obj.info['tblspace'], + storage_params=table_obj.info['storage_params'], + ) + + if state == 'absent': + changed = table_obj.drop(cascade=cascade) + + elif truncate: + changed = table_obj.truncate() + + elif newname: + changed = table_obj.rename(newname) + q = table_obj.executed_queries + table_obj = Table(newname, module, cursor) + table_obj.executed_queries = q + + elif state == 'present' and not like: + changed = table_obj.create(columns, storage_params, + tablespace, unlogged, owner) + + elif state == 'present' and like: + changed = table_obj.create_like(like, including, tablespace, + unlogged, storage_params) + + if changed: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + # Refresh table info for RETURN. + # Note, if table has been renamed, it gets info by newname: + table_obj.get_info() + db_connection.commit() + if table_obj.exists: + kw = dict( + table=table, + state='present', + owner=table_obj.info['owner'], + tablespace=table_obj.info['tblspace'], + storage_params=table_obj.info['storage_params'], + ) + else: + # We just change the table state here + # to keep other information about the dropped table: + kw['state'] = 'absent' + + kw['queries'] = table_obj.executed_queries + kw['changed'] = changed + db_connection.close() + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py new file mode 100644 index 00000000..397bf7b3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py @@ -0,0 +1,541 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Flavien Chantelot (@Dorn-) +# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell) +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_tablespace +short_description: Add or remove PostgreSQL tablespaces from remote hosts +description: +- Adds or removes PostgreSQL tablespaces from remote hosts. +options: + tablespace: + description: + - Name of the tablespace to add or remove. + required: true + type: str + aliases: + - name + location: + description: + - Path to the tablespace directory in the file system. + - Ensure that the location exists and has right privileges. + type: path + aliases: + - path + state: + description: + - Tablespace state. + - I(state=present) implies the tablespace must be created if it doesn't exist. + - I(state=absent) implies the tablespace must be removed if present. + I(state=absent) is mutually exclusive with I(location), I(owner), i(set). + - See the Notes section for information about check mode restrictions. + type: str + default: present + choices: [ absent, present ] + owner: + description: + - Name of the role to set as an owner of the tablespace. + - If this option is not specified, the tablespace owner is a role that creates the tablespace. + type: str + set: + description: + - Dict of tablespace options to set. Supported from PostgreSQL 9.0. + - For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html). + - When reset is passed as an option's value, if the option was set previously, it will be removed. + type: dict + rename_to: + description: + - New name of the tablespace. + - The new name cannot begin with pg_, as such names are reserved for system tablespaces. + type: str + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + db: + description: + - Name of database to connect to and run queries against. + type: str + aliases: + - login_db + trust_input: + description: + - If C(no), check whether values of parameters I(tablespace), I(location), I(owner), + I(rename_to), I(session_role), I(settings_list) are potentially dangerous. + - It makes sense to use C(no) only when SQL injections via the parameters are possible. + type: bool + default: yes + version_added: '0.2.0' + +notes: +- I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not + support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands + can not be run inside the transaction block. + +seealso: +- name: PostgreSQL tablespaces + description: General information about PostgreSQL tablespaces. + link: https://www.postgresql.org/docs/current/manage-ag-tablespaces.html +- name: CREATE TABLESPACE reference + description: Complete reference of the CREATE TABLESPACE command documentation. + link: https://www.postgresql.org/docs/current/sql-createtablespace.html +- name: ALTER TABLESPACE reference + description: Complete reference of the ALTER TABLESPACE command documentation. + link: https://www.postgresql.org/docs/current/sql-altertablespace.html +- name: DROP TABLESPACE reference + description: Complete reference of the DROP TABLESPACE command documentation. + link: https://www.postgresql.org/docs/current/sql-droptablespace.html + +author: +- Flavien Chantelot (@Dorn-) +- Antoine Levy-Lambert (@antoinell) +- Andrew Klychkov (@Andersson007) + +extends_documentation_fragment: +- community.postgresql.postgres + +''' + +EXAMPLES = r''' +- name: Create a new tablespace called acme and set bob as an its owner + community.postgresql.postgresql_tablespace: + name: acme + owner: bob + location: /data/foo + +- name: Create a new tablespace called bar with tablespace options + community.postgresql.postgresql_tablespace: + name: bar + set: + random_page_cost: 1 + seq_page_cost: 1 + +- name: Reset random_page_cost option + community.postgresql.postgresql_tablespace: + name: bar + set: + random_page_cost: reset + +- name: Rename the tablespace from bar to pcie_ssd + community.postgresql.postgresql_tablespace: + name: bar + rename_to: pcie_ssd + +- name: Drop tablespace called bloat + community.postgresql.postgresql_tablespace: + name: bloat + state: absent +''' + +RETURN = r''' +queries: + description: List of queries that was tried to be executed. + returned: always + type: str + sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ] +tablespace: + description: Tablespace name. + returned: always + type: str + sample: 'ssd' +owner: + description: Tablespace owner. + returned: always + type: str + sample: 'Bob' +options: + description: Tablespace options. + returned: always + type: dict + sample: { 'random_page_cost': 1, 'seq_page_cost': 1 } +location: + description: Path to the tablespace in the file system. + returned: always + type: str + sample: '/incredible/fast/ssd' +newname: + description: New tablespace name. + returned: if existent + type: str + sample: new_ssd +state: + description: Tablespace state at the end of execution. + returned: always + type: str + sample: 'present' +''' + +try: + from psycopg2 import __version__ as PSYCOPG2_VERSION + from psycopg2.extras import DictCursor + from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT + from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import iteritems + +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, + pg_quote_identifier, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) + + +class PgTablespace(object): + + """Class for working with PostgreSQL tablespaces. + + Args: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + name (str) -- name of the tablespace + + Attrs: + module (AnsibleModule) -- object of AnsibleModule class + cursor (cursor) -- cursor object of psycopg2 library + name (str) -- name of the tablespace + exists (bool) -- flag the tablespace exists in the DB or not + owner (str) -- tablespace owner + location (str) -- path to the tablespace directory in the file system + executed_queries (list) -- list of executed queries + new_name (str) -- new name for the tablespace + opt_not_supported (bool) -- flag indicates a tablespace option is supported or not + """ + + def __init__(self, module, cursor, name): + self.module = module + self.cursor = cursor + self.name = name + self.exists = False + self.owner = '' + self.settings = {} + self.location = '' + self.executed_queries = [] + self.new_name = '' + self.opt_not_supported = False + # Collect info: + self.get_info() + + def get_info(self): + """Get tablespace information.""" + # Check that spcoptions exists: + opt = exec_sql(self, "SELECT 1 FROM information_schema.columns " + "WHERE table_name = 'pg_tablespace' " + "AND column_name = 'spcoptions'", add_to_executed=False) + + # For 9.1 version and earlier: + location = exec_sql(self, "SELECT 1 FROM information_schema.columns " + "WHERE table_name = 'pg_tablespace' " + "AND column_name = 'spclocation'", add_to_executed=False) + if location: + location = 'spclocation' + else: + location = 'pg_tablespace_location(t.oid)' + + if not opt: + self.opt_not_supported = True + query = ("SELECT r.rolname, (SELECT Null), %s " + "FROM pg_catalog.pg_tablespace AS t " + "JOIN pg_catalog.pg_roles AS r " + "ON t.spcowner = r.oid " % location) + else: + query = ("SELECT r.rolname, t.spcoptions, %s " + "FROM pg_catalog.pg_tablespace AS t " + "JOIN pg_catalog.pg_roles AS r " + "ON t.spcowner = r.oid " % location) + + res = exec_sql(self, query + "WHERE t.spcname = %(name)s", + query_params={'name': self.name}, add_to_executed=False) + + if not res: + self.exists = False + return False + + if res[0][0]: + self.exists = True + self.owner = res[0][0] + + if res[0][1]: + # Options exist: + for i in res[0][1]: + i = i.split('=') + self.settings[i[0]] = i[1] + + if res[0][2]: + # Location exists: + self.location = res[0][2] + + def create(self, location): + """Create tablespace. + + Return True if success, otherwise, return False. + + args: + location (str) -- tablespace directory path in the FS + """ + query = ('CREATE TABLESPACE "%s" LOCATION \'%s\'' % (self.name, location)) + return exec_sql(self, query, return_bool=True) + + def drop(self): + """Drop tablespace. + + Return True if success, otherwise, return False. + """ + return exec_sql(self, 'DROP TABLESPACE "%s"' % self.name, return_bool=True) + + def set_owner(self, new_owner): + """Set tablespace owner. + + Return True if success, otherwise, return False. + + args: + new_owner (str) -- name of a new owner for the tablespace" + """ + if new_owner == self.owner: + return False + + query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.name, new_owner) + return exec_sql(self, query, return_bool=True) + + def rename(self, newname): + """Rename tablespace. + + Return True if success, otherwise, return False. + + args: + newname (str) -- new name for the tablespace" + """ + query = 'ALTER TABLESPACE "%s" RENAME TO "%s"' % (self.name, newname) + self.new_name = newname + return exec_sql(self, query, return_bool=True) + + def set_settings(self, new_settings): + """Set tablespace settings (options). + + If some setting has been changed, set changed = True. + After all settings list is handling, return changed. + + args: + new_settings (list) -- list of new settings + """ + # settings must be a dict {'key': 'value'} + if self.opt_not_supported: + return False + + changed = False + + # Apply new settings: + for i in new_settings: + if new_settings[i] == 'reset': + if i in self.settings: + changed = self.__reset_setting(i) + self.settings[i] = None + + elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]): + changed = self.__set_setting("%s = '%s'" % (i, new_settings[i])) + + return changed + + def __reset_setting(self, setting): + """Reset tablespace setting. + + Return True if success, otherwise, return False. + + args: + setting (str) -- string in format "setting_name = 'setting_value'" + """ + query = 'ALTER TABLESPACE "%s" RESET (%s)' % (self.name, setting) + return exec_sql(self, query, return_bool=True) + + def __set_setting(self, setting): + """Set tablespace setting. + + Return True if success, otherwise, return False. + + args: + setting (str) -- string in format "setting_name = 'setting_value'" + """ + query = 'ALTER TABLESPACE "%s" SET (%s)' % (self.name, setting) + return exec_sql(self, query, return_bool=True) + + +# =========================================== +# Module execution. +# + + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + tablespace=dict(type='str', required=True, aliases=['name']), + state=dict(type='str', default="present", choices=["absent", "present"]), + location=dict(type='path', aliases=['path']), + owner=dict(type='str'), + set=dict(type='dict'), + rename_to=dict(type='str'), + db=dict(type='str', aliases=['login_db']), + session_role=dict(type='str'), + trust_input=dict(type='bool', default=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=(('positional_args', 'named_args'),), + supports_check_mode=True, + ) + + tablespace = module.params["tablespace"] + state = module.params["state"] + location = module.params["location"] + owner = module.params["owner"] + rename_to = module.params["rename_to"] + settings = module.params["set"] + session_role = module.params["session_role"] + trust_input = module.params["trust_input"] + + if state == 'absent' and (location or owner or rename_to or settings): + module.fail_json(msg="state=absent is mutually exclusive location, " + "owner, rename_to, and set") + + if not trust_input: + # Check input for potentially dangerous elements: + if not settings: + settings_list = None + else: + settings_list = ['%s = %s' % (k, v) for k, v in iteritems(settings)] + + check_input(module, tablespace, location, owner, + rename_to, session_role, settings_list) + + conn_params = get_conn_params(module, module.params, warn_db_default=False) + db_connection = connect_to_db(module, conn_params, autocommit=True) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + # Change autocommit to False if check_mode: + if module.check_mode: + if PSYCOPG2_VERSION >= '2.4.2': + db_connection.set_session(autocommit=False) + else: + db_connection.set_isolation_level(READ_COMMITTED) + + # Set defaults: + autocommit = False + changed = False + + ############## + # Create PgTablespace object and do main job: + tblspace = PgTablespace(module, cursor, tablespace) + + # If tablespace exists with different location, exit: + if tblspace.exists and location and location != tblspace.location: + module.fail_json(msg="Tablespace '%s' exists with " + "different location '%s'" % (tblspace.name, tblspace.location)) + + # Create new tablespace: + if not tblspace.exists and state == 'present': + if rename_to: + module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace) + + if not location: + module.fail_json(msg="'location' parameter must be passed with " + "state=present if the tablespace doesn't exist") + + # Because CREATE TABLESPACE can not be run inside the transaction block: + autocommit = True + if PSYCOPG2_VERSION >= '2.4.2': + db_connection.set_session(autocommit=True) + else: + db_connection.set_isolation_level(AUTOCOMMIT) + + changed = tblspace.create(location) + + # Drop non-existing tablespace: + elif not tblspace.exists and state == 'absent': + # Nothing to do: + module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name) + + # Drop existing tablespace: + elif tblspace.exists and state == 'absent': + # Because DROP TABLESPACE can not be run inside the transaction block: + autocommit = True + if PSYCOPG2_VERSION >= '2.4.2': + db_connection.set_session(autocommit=True) + else: + db_connection.set_isolation_level(AUTOCOMMIT) + + changed = tblspace.drop() + + # Rename tablespace: + elif tblspace.exists and rename_to: + if tblspace.name != rename_to: + changed = tblspace.rename(rename_to) + + if state == 'present': + # Refresh information: + tblspace.get_info() + + # Change owner and settings: + if state == 'present' and tblspace.exists: + if owner: + changed = tblspace.set_owner(owner) + + if settings: + changed = tblspace.set_settings(settings) + + tblspace.get_info() + + # Rollback if it's possible and check_mode: + if not autocommit: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + cursor.close() + db_connection.close() + + # Make return values: + kw = dict( + changed=changed, + state='present', + tablespace=tblspace.name, + owner=tblspace.owner, + queries=tblspace.executed_queries, + options=tblspace.settings, + location=tblspace.location, + ) + + if state == 'present': + kw['state'] = 'present' + + if tblspace.new_name: + kw['newname'] = tblspace.new_name + + elif state == 'absent': + kw['state'] = 'absent' + + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_user.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_user.py new file mode 100644 index 00000000..d56c9924 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_user.py @@ -0,0 +1,998 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_user +short_description: Create, alter, or remove a user (role) from a PostgreSQL server instance +description: +- Creates, alters, or removes a user (role) from a PostgreSQL server instance + ("cluster" in PostgreSQL terminology) and, optionally, + grants the user access to an existing database or tables. +- A user is a role with login privilege. +- You can also use it to grant or revoke user's privileges in a particular database. +- You cannot remove a user while it still has any privileges granted to it in any database. +- Set I(fail_on_user) to C(no) to make the module ignore failures when trying to remove a user. + In this case, the module reports if changes happened as usual and separately reports + whether the user has been removed or not. +options: + name: + description: + - Name of the user (role) to add or remove. + type: str + required: true + aliases: + - user + password: + description: + - Set the user's password, before 1.4 this was required. + - Password can be passed unhashed or hashed (MD5-hashed). + - An unhashed password is automatically hashed when saved into the + database if I(encrypted) is set, otherwise it is saved in + plain text format. + - When passing an MD5-hashed password, you must generate it with the format + C('str["md5"] + md5[ password + username ]'), resulting in a total of + 35 characters. An easy way to do this is + C(echo "md5`echo -n 'verysecretpasswordJOE' | md5sum | awk '{print $1}'`"). + - Note that if the provided password string is already in MD5-hashed + format, then it is used as-is, regardless of I(encrypted) option. + type: str + db: + description: + - Name of database to connect to and where user's permissions are granted. + type: str + aliases: + - login_db + fail_on_user: + description: + - If C(yes), fails when the user (role) cannot be removed. Otherwise just log and continue. + default: yes + type: bool + aliases: + - fail_on_role + priv: + description: + - "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where + you can define the user's privileges for the database ( allowed options - 'CREATE', + 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or + for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE', + 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example + C(table:SELECT) ). Mixed example of this string: + C(CONNECT/CREATE/table1:SELECT/table2:INSERT)." + type: str + role_attr_flags: + description: + - "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER." + - Note that '[NO]CREATEUSER' is deprecated. + - To create a simple role for using it like a group, use C(NOLOGIN) flag. + type: str + choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB', + '[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ] + session_role: + description: + - Switch to session role after connecting. + - The specified session role must be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though the session role + were the one that had logged in originally. + type: str + state: + description: + - The user (role) state. + type: str + default: present + choices: [ absent, present ] + encrypted: + description: + - Whether the password is stored hashed in the database. + - You can specify an unhashed password, and PostgreSQL ensures + the stored password is hashed when I(encrypted=yes) is set. + If you specify a hashed password, the module uses it as-is, + regardless of the setting of I(encrypted). + - "Note: Postgresql 10 and newer does not support unhashed passwords." + - Previous to Ansible 2.6, this was C(no) by default. + default: yes + type: bool + expires: + description: + - The date at which the user's password is to expire. + - If set to C('infinity'), user's password never expires. + - Note that this value must be a valid SQL date and time type. + type: str + no_password_changes: + description: + - If C(yes), does not inspect the database for password changes. + If the user already exists, skips all password related checks. + Useful when C(pg_authid) is not accessible (such as in AWS RDS). + Otherwise, makes password changes as necessary. + default: no + type: bool + conn_limit: + description: + - Specifies the user (role) connection limit. + type: int + ssl_mode: + description: + - Determines how an SSL session is negotiated with the server. + - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes. + - Default of C(prefer) matches libpq default. + type: str + default: prefer + choices: [ allow, disable, prefer, require, verify-ca, verify-full ] + ca_cert: + description: + - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). + - If the file exists, verifies that the server's certificate is signed by one of these authorities. + type: str + aliases: [ ssl_rootcert ] + groups: + description: + - The list of groups (roles) that you want to grant to the user. + type: list + elements: str + comment: + description: + - Adds a comment on the user (equivalent to the C(COMMENT ON ROLE) statement). + type: str + version_added: '0.2.0' + trust_input: + description: + - If C(no), checks whether values of options I(name), I(password), I(privs), I(expires), + I(role_attr_flags), I(groups), I(comment), I(session_role) are potentially dangerous. + - It makes sense to use C(no) only when SQL injections through the options are possible. + type: bool + default: yes + version_added: '0.2.0' +notes: +- The module creates a user (role) with login privilege by default. + Use C(NOLOGIN) I(role_attr_flags) to change this behaviour. +- If you specify C(PUBLIC) as the user (role), then the privilege changes apply to all users (roles). + You may not specify password or role_attr_flags when the C(PUBLIC) user is specified. +- SCRAM-SHA-256-hashed passwords (SASL Authentication) require PostgreSQL version 10 or newer. + On the previous versions the whole hashed string is used as a password. +- 'Working with SCRAM-SHA-256-hashed passwords, be sure you use the I(environment:) variable + C(PGOPTIONS: "-c password_encryption=scram-sha-256") (see the provided example).' +- On some systems (such as AWS RDS), C(pg_authid) is not accessible, thus, the module cannot compare + the current and desired C(password). In this case, the module assumes that the passwords are + different and changes it reporting that the state has been changed. + To skip all password related checks for existing users, use I(no_password_changes=yes). +- Supports ``check_mode``. +seealso: +- module: community.postgresql.postgresql_privs +- module: community.postgresql.postgresql_membership +- module: community.postgresql.postgresql_owner +- name: PostgreSQL database roles + description: Complete reference of the PostgreSQL database roles documentation. + link: https://www.postgresql.org/docs/current/user-manag.html +- name: PostgreSQL SASL Authentication + description: Complete reference of the PostgreSQL SASL Authentication. + link: https://www.postgresql.org/docs/current/sasl-authentication.html +author: +- Ansible Core Team +extends_documentation_fragment: +- community.postgresql.postgres + +''' + +EXAMPLES = r''' +- name: Connect to acme database, create django user, and grant access to database and products table + community.postgresql.postgresql_user: + db: acme + name: django + password: ceec4eif7ya + priv: "CONNECT/products:ALL" + expires: "Jan 31 2020" + +- name: Add a comment on django user + community.postgresql.postgresql_user: + db: acme + name: django + comment: This is a test user + +# Connect to default database, create rails user, set its password (MD5-hashed), +# and grant privilege to create other databases and demote rails from super user status if user exists +- name: Create rails user, set MD5-hashed password, grant privs + community.postgresql.postgresql_user: + name: rails + password: md59543f1d82624df2b31672ec0f7050460 + role_attr_flags: CREATEDB,NOSUPERUSER + +- name: Connect to acme database and remove test user privileges from there + community.postgresql.postgresql_user: + db: acme + name: test + priv: "ALL/products:ALL" + state: absent + fail_on_user: no + +- name: Connect to test database, remove test user from cluster + community.postgresql.postgresql_user: + db: test + name: test + priv: ALL + state: absent + +- name: Connect to acme database and set user's password with no expire date + community.postgresql.postgresql_user: + db: acme + name: django + password: mysupersecretword + priv: "CONNECT/products:ALL" + expires: infinity + +# Example privileges string format +# INSERT,UPDATE/table:SELECT/anothertable:ALL + +- name: Connect to test database and remove an existing user's password + community.postgresql.postgresql_user: + db: test + user: test + password: "" + +- name: Create user test and grant group user_ro and user_rw to it + community.postgresql.postgresql_user: + name: test + groups: + - user_ro + - user_rw + +# Create user with a cleartext password if it does not exist or update its password. +# The password will be encrypted with SCRAM algorithm (available since PostgreSQL 10) +- name: Create appclient user with SCRAM-hashed password + community.postgresql.postgresql_user: + name: appclient + password: "secret123" + environment: + PGOPTIONS: "-c password_encryption=scram-sha-256" +''' + +RETURN = r''' +queries: + description: List of executed queries. + returned: always + type: list + sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"'] +''' + +import itertools +import re +import traceback +from hashlib import md5, sha256 +import hmac +from base64 import b64decode + +try: + import psycopg2 + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + pg_quote_identifier, + SQLParseError, + check_input, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + get_conn_params, + PgMembership, + postgres_common_argument_spec, +) +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.six import iteritems +import ansible_collections.community.postgresql.plugins.module_utils.saslprep as saslprep + +try: + # pbkdf2_hmac is missing on python 2.6, we can safely assume, + # that postresql 10 capable instance have at least python 2.7 installed + from hashlib import pbkdf2_hmac + pbkdf2_found = True +except ImportError: + pbkdf2_found = False + + +FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION') +FLAGS_BY_VERSION = {'BYPASSRLS': 90500} + +SCRAM_SHA256_REGEX = r'^SCRAM-SHA-256\$(\d+):([A-Za-z0-9+\/=]+)\$([A-Za-z0-9+\/=]+):([A-Za-z0-9+\/=]+)$' + +VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')), + database=frozenset( + ('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')), + ) + +# map to cope with idiosyncracies of SUPERUSER and LOGIN +PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole', + CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin', + REPLICATION='rolreplication', BYPASSRLS='rolbypassrls') + +executed_queries = [] + + +class InvalidFlagsError(Exception): + pass + + +class InvalidPrivsError(Exception): + pass + +# =========================================== +# PostgreSQL module specific support methods. +# + + +def user_exists(cursor, user): + # The PUBLIC user is a special case that is always there + if user == 'PUBLIC': + return True + query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s" + cursor.execute(query, {'user': user}) + return cursor.rowcount > 0 + + +def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit): + """Create a new database user (role).""" + # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a + # literal + query_password_data = dict(password=password, expires=expires) + query = ['CREATE USER "%(user)s"' % + {"user": user}] + if password is not None and password != '': + query.append("WITH %(crypt)s" % {"crypt": encrypted}) + query.append("PASSWORD %(password)s") + if expires is not None: + query.append("VALID UNTIL %(expires)s") + if conn_limit is not None: + query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit}) + query.append(role_attr_flags) + query = ' '.join(query) + executed_queries.append(query) + cursor.execute(query, query_password_data) + return True + + +def user_should_we_change_password(current_role_attrs, user, password, encrypted): + """Check if we should change the user's password. + + Compare the proposed password with the existing one, comparing + hashes if encrypted. If we can't access it assume yes. + """ + + if current_role_attrs is None: + # on some databases, E.g. AWS RDS instances, there is no access to + # the pg_authid relation to check the pre-existing password, so we + # just assume password is different + return True + + # Do we actually need to do anything? + pwchanging = False + if password is not None: + # Empty password means that the role shouldn't have a password, which + # means we need to check if the current password is None. + if password == '': + if current_role_attrs['rolpassword'] is not None: + pwchanging = True + + # SCRAM hashes are represented as a special object, containing hash data: + # `SCRAM-SHA-256$<iteration count>:<salt>$<StoredKey>:<ServerKey>` + # for reference, see https://www.postgresql.org/docs/current/catalog-pg-authid.html + elif current_role_attrs['rolpassword'] is not None \ + and pbkdf2_found \ + and re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword']): + + r = re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword']) + try: + # extract SCRAM params from rolpassword + it = int(r.group(1)) + salt = b64decode(r.group(2)) + server_key = b64decode(r.group(4)) + # we'll never need `storedKey` as it is only used for server auth in SCRAM + # storedKey = b64decode(r.group(3)) + + # from RFC5802 https://tools.ietf.org/html/rfc5802#section-3 + # SaltedPassword := Hi(Normalize(password), salt, i) + # ServerKey := HMAC(SaltedPassword, "Server Key") + normalized_password = saslprep.saslprep(to_text(password)) + salted_password = pbkdf2_hmac('sha256', to_bytes(normalized_password), salt, it) + + server_key_verifier = hmac.new(salted_password, digestmod=sha256) + server_key_verifier.update(b'Server Key') + + if server_key_verifier.digest() != server_key: + pwchanging = True + except Exception: + # We assume the password is not scram encrypted + # or we cannot check it properly, e.g. due to missing dependencies + pwchanging = True + + # 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits + # 3: The size of the 'md5' prefix + # When the provided password looks like a MD5-hash, value of + # 'encrypted' is ignored. + elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED': + if password != current_role_attrs['rolpassword']: + pwchanging = True + elif encrypted == 'ENCRYPTED': + hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest()) + if hashed_password != current_role_attrs['rolpassword']: + pwchanging = True + + return pwchanging + + +def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit): + """Change user password and/or attributes. Return True if changed, False otherwise.""" + changed = False + + cursor = db_connection.cursor(cursor_factory=DictCursor) + # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a + # literal + if user == 'PUBLIC': + if password is not None: + module.fail_json(msg="cannot change the password for PUBLIC user") + elif role_attr_flags != '': + module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user") + else: + return False + + # Handle passwords. + if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None): + # Select password and all flag-like columns in order to verify changes. + try: + select = "SELECT * FROM pg_authid where rolname=%(user)s" + cursor.execute(select, {"user": user}) + # Grab current role attributes. + current_role_attrs = cursor.fetchone() + except psycopg2.ProgrammingError: + current_role_attrs = None + db_connection.rollback() + + pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted) + + if current_role_attrs is None: + try: + # AWS RDS instances does not allow user to access pg_authid + # so try to get current_role_attrs from pg_roles tables + select = "SELECT * FROM pg_roles where rolname=%(user)s" + cursor.execute(select, {"user": user}) + # Grab current role attributes from pg_roles + current_role_attrs = cursor.fetchone() + except psycopg2.ProgrammingError as e: + db_connection.rollback() + module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e)) + + role_attr_flags_changing = False + if role_attr_flags: + role_attr_flags_dict = {} + for r in role_attr_flags.split(' '): + if r.startswith('NO'): + role_attr_flags_dict[r.replace('NO', '', 1)] = False + else: + role_attr_flags_dict[r] = True + + for role_attr_name, role_attr_value in role_attr_flags_dict.items(): + if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value: + role_attr_flags_changing = True + + if expires is not None: + cursor.execute("SELECT %s::timestamptz;", (expires,)) + expires_with_tz = cursor.fetchone()[0] + expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil') + else: + expires_changing = False + + conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit']) + + if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing: + return False + + alter = ['ALTER USER "%(user)s"' % {"user": user}] + if pwchanging: + if password != '': + alter.append("WITH %(crypt)s" % {"crypt": encrypted}) + alter.append("PASSWORD %(password)s") + else: + alter.append("WITH PASSWORD NULL") + alter.append(role_attr_flags) + elif role_attr_flags: + alter.append('WITH %s' % role_attr_flags) + if expires is not None: + alter.append("VALID UNTIL %(expires)s") + if conn_limit is not None: + alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit}) + + query_password_data = dict(password=password, expires=expires) + try: + cursor.execute(' '.join(alter), query_password_data) + changed = True + except psycopg2.InternalError as e: + if e.pgcode == '25006': + # Handle errors due to read-only transactions indicated by pgcode 25006 + # ERROR: cannot execute ALTER ROLE in a read-only transaction + changed = False + module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + return changed + else: + raise psycopg2.InternalError(e) + except psycopg2.NotSupportedError as e: + module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + + elif no_password_changes and role_attr_flags != '': + # Grab role information from pg_roles instead of pg_authid + select = "SELECT * FROM pg_roles where rolname=%(user)s" + cursor.execute(select, {"user": user}) + # Grab current role attributes. + current_role_attrs = cursor.fetchone() + + role_attr_flags_changing = False + + if role_attr_flags: + role_attr_flags_dict = {} + for r in role_attr_flags.split(' '): + if r.startswith('NO'): + role_attr_flags_dict[r.replace('NO', '', 1)] = False + else: + role_attr_flags_dict[r] = True + + for role_attr_name, role_attr_value in role_attr_flags_dict.items(): + if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value: + role_attr_flags_changing = True + + if not role_attr_flags_changing: + return False + + alter = ['ALTER USER "%(user)s"' % + {"user": user}] + if role_attr_flags: + alter.append('WITH %s' % role_attr_flags) + + try: + cursor.execute(' '.join(alter)) + except psycopg2.InternalError as e: + if e.pgcode == '25006': + # Handle errors due to read-only transactions indicated by pgcode 25006 + # ERROR: cannot execute ALTER ROLE in a read-only transaction + changed = False + module.fail_json(msg=e.pgerror, exception=traceback.format_exc()) + return changed + else: + raise psycopg2.InternalError(e) + + # Grab new role attributes. + cursor.execute(select, {"user": user}) + new_role_attrs = cursor.fetchone() + + # Detect any differences between current_ and new_role_attrs. + changed = current_role_attrs != new_role_attrs + + return changed + + +def user_delete(cursor, user): + """Try to remove a user. Returns True if successful otherwise False""" + cursor.execute("SAVEPOINT ansible_pgsql_user_delete") + try: + query = 'DROP USER "%s"' % user + executed_queries.append(query) + cursor.execute(query) + except Exception: + cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete") + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") + return False + + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") + return True + + +def has_table_privileges(cursor, user, table, privs): + """ + Return the difference between the privileges that a user already has and + the privileges that they desire to have. + + :returns: tuple of: + * privileges that they have and were requested + * privileges they currently hold but were not requested + * privileges requested that they do not hold + """ + cur_privs = get_table_privileges(cursor, user, table) + have_currently = cur_privs.intersection(privs) + other_current = cur_privs.difference(privs) + desired = privs.difference(cur_privs) + return (have_currently, other_current, desired) + + +def get_table_privileges(cursor, user, table): + if '.' in table: + schema, table = table.split('.', 1) + else: + schema = 'public' + query = ("SELECT privilege_type FROM information_schema.role_table_grants " + "WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s") + cursor.execute(query, {'user': user, 'table': table, 'schema': schema}) + return frozenset([x[0] for x in cursor.fetchall()]) + + +def grant_table_privileges(cursor, user, table, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + query = 'GRANT %s ON TABLE %s TO "%s"' % ( + privs, pg_quote_identifier(table, 'table'), user) + executed_queries.append(query) + cursor.execute(query) + + +def revoke_table_privileges(cursor, user, table, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + query = 'REVOKE %s ON TABLE %s FROM "%s"' % ( + privs, pg_quote_identifier(table, 'table'), user) + executed_queries.append(query) + cursor.execute(query) + + +def get_database_privileges(cursor, user, db): + priv_map = { + 'C': 'CREATE', + 'T': 'TEMPORARY', + 'c': 'CONNECT', + } + query = 'SELECT datacl FROM pg_database WHERE datname = %s' + cursor.execute(query, (db,)) + datacl = cursor.fetchone()[0] + if datacl is None: + return set() + r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl) + if r is None: + return set() + o = set() + for v in r.group(1): + o.add(priv_map[v]) + return normalize_privileges(o, 'database') + + +def has_database_privileges(cursor, user, db, privs): + """ + Return the difference between the privileges that a user already has and + the privileges that they desire to have. + + :returns: tuple of: + * privileges that they have and were requested + * privileges they currently hold but were not requested + * privileges requested that they do not hold + """ + cur_privs = get_database_privileges(cursor, user, db) + have_currently = cur_privs.intersection(privs) + other_current = cur_privs.difference(privs) + desired = privs.difference(cur_privs) + return (have_currently, other_current, desired) + + +def grant_database_privileges(cursor, user, db, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + if user == "PUBLIC": + query = 'GRANT %s ON DATABASE %s TO PUBLIC' % ( + privs, pg_quote_identifier(db, 'database')) + else: + query = 'GRANT %s ON DATABASE %s TO "%s"' % ( + privs, pg_quote_identifier(db, 'database'), user) + + executed_queries.append(query) + cursor.execute(query) + + +def revoke_database_privileges(cursor, user, db, privs): + # Note: priv escaped by parse_privs + privs = ', '.join(privs) + if user == "PUBLIC": + query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % ( + privs, pg_quote_identifier(db, 'database')) + else: + query = 'REVOKE %s ON DATABASE %s FROM "%s"' % ( + privs, pg_quote_identifier(db, 'database'), user) + + executed_queries.append(query) + cursor.execute(query) + + +def revoke_privileges(cursor, user, privs): + if privs is None: + return False + + revoke_funcs = dict(table=revoke_table_privileges, + database=revoke_database_privileges) + check_funcs = dict(table=has_table_privileges, + database=has_database_privileges) + + changed = False + for type_ in privs: + for name, privileges in iteritems(privs[type_]): + # Check that any of the privileges requested to be removed are + # currently granted to the user + differences = check_funcs[type_](cursor, user, name, privileges) + if differences[0]: + revoke_funcs[type_](cursor, user, name, privileges) + changed = True + return changed + + +def grant_privileges(cursor, user, privs): + if privs is None: + return False + + grant_funcs = dict(table=grant_table_privileges, + database=grant_database_privileges) + check_funcs = dict(table=has_table_privileges, + database=has_database_privileges) + + changed = False + for type_ in privs: + for name, privileges in iteritems(privs[type_]): + # Check that any of the privileges requested for the user are + # currently missing + differences = check_funcs[type_](cursor, user, name, privileges) + if differences[2]: + grant_funcs[type_](cursor, user, name, privileges) + changed = True + return changed + + +def parse_role_attrs(cursor, role_attr_flags): + """ + Parse role attributes string for user creation. + Format: + + attributes[,attributes,...] + + Where: + + attributes := CREATEDB,CREATEROLE,NOSUPERUSER,... + [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB", + "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION", + "[NO]BYPASSRLS" ] + + Note: "[NO]BYPASSRLS" role attribute introduced in 9.5 + Note: "[NO]CREATEUSER" role attribute is deprecated. + + """ + flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role) + + valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor))) + valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags))) + + if not flags.issubset(valid_flags): + raise InvalidFlagsError('Invalid role_attr_flags specified: %s' % + ' '.join(flags.difference(valid_flags))) + + return ' '.join(flags) + + +def normalize_privileges(privs, type_): + new_privs = set(privs) + if 'ALL' in new_privs: + new_privs.update(VALID_PRIVS[type_]) + new_privs.remove('ALL') + if 'TEMP' in new_privs: + new_privs.add('TEMPORARY') + new_privs.remove('TEMP') + + return new_privs + + +def parse_privs(privs, db): + """ + Parse privilege string to determine permissions for database db. + Format: + + privileges[/privileges/...] + + Where: + + privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] | + TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...] + """ + if privs is None: + return privs + + o_privs = { + 'database': {}, + 'table': {} + } + for token in privs.split('/'): + if ':' not in token: + type_ = 'database' + name = db + priv_set = frozenset(x.strip().upper() + for x in token.split(',') if x.strip()) + else: + type_ = 'table' + name, privileges = token.split(':', 1) + priv_set = frozenset(x.strip().upper() + for x in privileges.split(',') if x.strip()) + + if not priv_set.issubset(VALID_PRIVS[type_]): + raise InvalidPrivsError('Invalid privs specified for %s: %s' % + (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_])))) + + priv_set = normalize_privileges(priv_set, type_) + o_privs[type_][name] = priv_set + + return o_privs + + +def get_valid_flags_by_version(cursor): + """ + Some role attributes were introduced after certain versions. We want to + compile a list of valid flags against the current Postgres version. + """ + current_version = cursor.connection.server_version + + return [ + flag + for flag, version_introduced in FLAGS_BY_VERSION.items() + if current_version >= version_introduced + ] + + +def get_comment(cursor, user): + """Get user's comment.""" + query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') " + "FROM pg_catalog.pg_roles r " + "WHERE r.rolname = %(user)s") + cursor.execute(query, {'user': user}) + return cursor.fetchone()[0] + + +def add_comment(cursor, user, comment): + """Add comment on user.""" + if comment != get_comment(cursor, user): + query = 'COMMENT ON ROLE "%s" IS ' % user + cursor.execute(query + '%(comment)s', {'comment': comment}) + executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment})) + return True + else: + return False + + +# =========================================== +# Module execution. +# + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + user=dict(type='str', required=True, aliases=['name']), + password=dict(type='str', default=None, no_log=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + priv=dict(type='str', default=None), + db=dict(type='str', default='', aliases=['login_db']), + fail_on_user=dict(type='bool', default=True, aliases=['fail_on_role']), + role_attr_flags=dict(type='str', default=''), + encrypted=dict(type='bool', default=True), + no_password_changes=dict(type='bool', default=False, no_log=False), + expires=dict(type='str', default=None), + conn_limit=dict(type='int', default=None), + session_role=dict(type='str'), + groups=dict(type='list', elements='str'), + comment=dict(type='str', default=None), + trust_input=dict(type='bool', default=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + user = module.params["user"] + password = module.params["password"] + state = module.params["state"] + fail_on_user = module.params["fail_on_user"] + if module.params['db'] == '' and module.params["priv"] is not None: + module.fail_json(msg="privileges require a database to be specified") + privs = parse_privs(module.params["priv"], module.params["db"]) + no_password_changes = module.params["no_password_changes"] + if module.params["encrypted"]: + encrypted = "ENCRYPTED" + else: + encrypted = "UNENCRYPTED" + expires = module.params["expires"] + conn_limit = module.params["conn_limit"] + role_attr_flags = module.params["role_attr_flags"] + groups = module.params["groups"] + if groups: + groups = [e.strip() for e in groups] + comment = module.params["comment"] + session_role = module.params['session_role'] + + trust_input = module.params['trust_input'] + if not trust_input: + # Check input for potentially dangerous elements: + check_input(module, user, password, privs, expires, + role_attr_flags, groups, comment, session_role) + + conn_params = get_conn_params(module, module.params, warn_db_default=False) + db_connection = connect_to_db(module, conn_params) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + try: + role_attr_flags = parse_role_attrs(cursor, role_attr_flags) + except InvalidFlagsError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + kw = dict(user=user) + changed = False + user_removed = False + + if state == "present": + if user_exists(cursor, user): + try: + changed = user_alter(db_connection, module, user, password, + role_attr_flags, encrypted, expires, no_password_changes, conn_limit) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + else: + try: + changed = user_add(cursor, user, password, + role_attr_flags, encrypted, expires, conn_limit) + except psycopg2.ProgrammingError as e: + module.fail_json(msg="Unable to add user with given requirement " + "due to : %s" % to_native(e), + exception=traceback.format_exc()) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + try: + changed = grant_privileges(cursor, user, privs) or changed + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + if groups: + target_roles = [] + target_roles.append(user) + pg_membership = PgMembership(module, cursor, groups, target_roles) + changed = pg_membership.grant() or changed + executed_queries.extend(pg_membership.executed_queries) + + if comment is not None: + try: + changed = add_comment(cursor, user, comment) or changed + except Exception as e: + module.fail_json(msg='Unable to add comment on role: %s' % to_native(e), + exception=traceback.format_exc()) + + else: + if user_exists(cursor, user): + if module.check_mode: + changed = True + kw['user_removed'] = True + else: + try: + changed = revoke_privileges(cursor, user, privs) + user_removed = user_delete(cursor, user) + except SQLParseError as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + changed = changed or user_removed + if fail_on_user and not user_removed: + msg = "Unable to remove user" + module.fail_json(msg=msg) + kw['user_removed'] = user_removed + + if changed: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + kw['changed'] = changed + kw['queries'] = executed_queries + module.exit_json(**kw) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py new file mode 100644 index 00000000..06eff530 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py @@ -0,0 +1,336 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: postgresql_user_obj_stat_info +short_description: Gather statistics about PostgreSQL user objects +description: +- Gathers statistics about PostgreSQL user objects. +version_added: '0.2.0' +options: + filter: + description: + - Limit the collected information by comma separated string or YAML list. + - Allowable values are C(functions), C(indexes), C(tables). + - By default, collects all subsets. + - Unsupported values are ignored. + type: list + elements: str + schema: + description: + - Restrict the output by certain schema. + type: str + db: + description: + - Name of database to connect. + type: str + aliases: + - login_db + session_role: + description: + - Switch to session_role after connecting. The specified session_role must + be a role that the current login_user is a member of. + - Permissions checking for SQL commands is carried out as though + the session_role were the one that had logged in originally. + type: str + trust_input: + description: + - If C(no), check the value of I(session_role) is potentially dangerous. + - It makes sense to use C(no) only when SQL injections via I(session_role) are possible. + type: bool + default: yes + version_added: '0.2.0' + +notes: +- C(size) and C(total_size) returned values are presented in bytes. +- For tracking function statistics the PostgreSQL C(track_functions) parameter must be enabled. + See U(https://www.postgresql.org/docs/current/runtime-config-statistics.html) for more information. +- Supports C(check_mode). +seealso: +- module: community.postgresql.postgresql_info +- module: community.postgresql.postgresql_ping +- name: PostgreSQL statistics collector reference + description: Complete reference of the PostgreSQL statistics collector documentation. + link: https://www.postgresql.org/docs/current/monitoring-stats.html +author: +- Andrew Klychkov (@Andersson007) +- Thomas O'Donnell (@andytom) +extends_documentation_fragment: +- community.postgresql.postgres + +''' + +EXAMPLES = r''' +- name: Collect information about all supported user objects of the acme database + community.postgresql.postgresql_user_obj_stat_info: + db: acme + +- name: Collect information about all supported user objects in the custom schema of the acme database + community.postgresql.postgresql_user_obj_stat_info: + db: acme + schema: custom + +- name: Collect information about user tables and indexes in the acme database + community.postgresql.postgresql_user_obj_stat_info: + db: acme + filter: tables, indexes +''' + +RETURN = r''' +indexes: + description: User index statistics. + returned: always + type: dict + sample: {"public": {"test_id_idx": {"idx_scan": 0, "idx_tup_fetch": 0, "idx_tup_read": 0, "relname": "test", "size": 8192, ...}}} +tables: + description: User table statistics. + returned: always + type: dict + sample: {"public": {"test": {"analyze_count": 3, "n_dead_tup": 0, "n_live_tup": 0, "seq_scan": 2, "size": 0, "total_size": 8192, ...}}} +functions: + description: User function statistics. + returned: always + type: dict + sample: {"public": {"inc": {"calls": 1, "funcid": 26722, "self_time": 0.23, "total_time": 0.23}}} +''' + +try: + from psycopg2.extras import DictCursor +except ImportError: + # psycopg2 is checked by connect_to_db() + # from ansible.module_utils.postgres + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.postgresql.plugins.module_utils.database import ( + check_input, +) +from ansible_collections.community.postgresql.plugins.module_utils.postgres import ( + connect_to_db, + exec_sql, + get_conn_params, + postgres_common_argument_spec, +) +from ansible.module_utils.six import iteritems + + +# =========================================== +# PostgreSQL module specific support methods. +# + + +class PgUserObjStatInfo(): + """Class to collect information about PostgreSQL user objects. + + Args: + module (AnsibleModule): Object of AnsibleModule class. + cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + + Attributes: + module (AnsibleModule): Object of AnsibleModule class. + cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL. + executed_queries (list): List of executed queries. + info (dict): Statistics dictionary. + obj_func_mapping (dict): Mapping of object types to corresponding functions. + schema (str): Name of a schema to restrict stat collecting. + """ + + def __init__(self, module, cursor): + self.module = module + self.cursor = cursor + self.info = { + 'functions': {}, + 'indexes': {}, + 'tables': {}, + } + self.obj_func_mapping = { + 'functions': self.get_func_stat, + 'indexes': self.get_idx_stat, + 'tables': self.get_tbl_stat, + } + self.schema = None + + def collect(self, filter_=None, schema=None): + """Collect statistics information of user objects. + + Kwargs: + filter_ (list): List of subsets which need to be collected. + schema (str): Restrict stat collecting by certain schema. + + Returns: + ``self.info``. + """ + if schema: + self.set_schema(schema) + + if filter_: + for obj_type in filter_: + obj_type = obj_type.strip() + obj_func = self.obj_func_mapping.get(obj_type) + + if obj_func is not None: + obj_func() + else: + self.module.warn("Unknown filter option '%s'" % obj_type) + + else: + for obj_func in self.obj_func_mapping.values(): + obj_func() + + return self.info + + def get_func_stat(self): + """Get function statistics and fill out self.info dictionary.""" + query = "SELECT * FROM pg_stat_user_functions" + if self.schema: + query = "SELECT * FROM pg_stat_user_functions WHERE schemaname = %s" + + result = exec_sql(self, query, query_params=(self.schema,), + add_to_executed=False) + + if not result: + return + + self.__fill_out_info(result, + info_key='functions', + schema_key='schemaname', + name_key='funcname') + + def get_idx_stat(self): + """Get index statistics and fill out self.info dictionary.""" + query = "SELECT * FROM pg_stat_user_indexes" + if self.schema: + query = "SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s" + + result = exec_sql(self, query, query_params=(self.schema,), + add_to_executed=False) + + if not result: + return + + self.__fill_out_info(result, + info_key='indexes', + schema_key='schemaname', + name_key='indexrelname') + + def get_tbl_stat(self): + """Get table statistics and fill out self.info dictionary.""" + query = "SELECT * FROM pg_stat_user_tables" + if self.schema: + query = "SELECT * FROM pg_stat_user_tables WHERE schemaname = %s" + + result = exec_sql(self, query, query_params=(self.schema,), + add_to_executed=False) + + if not result: + return + + self.__fill_out_info(result, + info_key='tables', + schema_key='schemaname', + name_key='relname') + + def __fill_out_info(self, result, info_key=None, schema_key=None, name_key=None): + # Convert result to list of dicts to handle it easier: + result = [dict(row) for row in result] + + for elem in result: + # Add schema name as a key if not presented: + if not self.info[info_key].get(elem[schema_key]): + self.info[info_key][elem[schema_key]] = {} + + # Add object name key as a subkey + # (they must be uniq over a schema, so no need additional checks): + self.info[info_key][elem[schema_key]][elem[name_key]] = {} + + # Add other other attributes to a certain index: + for key, val in iteritems(elem): + if key not in (schema_key, name_key): + self.info[info_key][elem[schema_key]][elem[name_key]][key] = val + + if info_key in ('tables', 'indexes'): + schemaname = elem[schema_key] + if self.schema: + schemaname = self.schema + + relname = '%s.%s' % (schemaname, elem[name_key]) + + result = exec_sql(self, "SELECT pg_relation_size (%s)", + query_params=(relname,), + add_to_executed=False) + + self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0] + + if info_key == 'tables': + result = exec_sql(self, "SELECT pg_total_relation_size (%s)", + query_params=(relname,), + add_to_executed=False) + + self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0] + + def set_schema(self, schema): + """If schema exists, sets self.schema, otherwise fails.""" + query = ("SELECT 1 FROM information_schema.schemata " + "WHERE schema_name = %s") + result = exec_sql(self, query, query_params=(schema,), + add_to_executed=False) + + if result and result[0][0]: + self.schema = schema + else: + self.module.fail_json(msg="Schema '%s' does not exist" % (schema)) + + +# =========================================== +# Module execution. +# + +def main(): + argument_spec = postgres_common_argument_spec() + argument_spec.update( + db=dict(type='str', aliases=['login_db']), + filter=dict(type='list', elements='str'), + session_role=dict(type='str'), + schema=dict(type='str'), + trust_input=dict(type="bool", default=True), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + filter_ = module.params["filter"] + schema = module.params["schema"] + + if not module.params["trust_input"]: + check_input(module, module.params['session_role']) + + # Connect to DB and make cursor object: + pg_conn_params = get_conn_params(module, module.params) + # We don't need to commit anything, so, set it to False: + db_connection = connect_to_db(module, pg_conn_params, autocommit=False) + cursor = db_connection.cursor(cursor_factory=DictCursor) + + ############################ + # Create object and do work: + pg_obj_info = PgUserObjStatInfo(module, cursor) + + info_dict = pg_obj_info.collect(filter_, schema) + + # Clean up: + cursor.close() + db_connection.close() + + # Return information: + module.exit_json(**info_dict) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/shippable.yml b/collections-debian-merged/ansible_collections/community/postgresql/shippable.yml new file mode 100644 index 00000000..72eb91e0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/shippable.yml @@ -0,0 +1,59 @@ +language: python + +env: + matrix: + - T=none + +matrix: + exclude: + - env: T=none + include: + - env: T=devel/sanity/1 + - env: T=devel/sanity/extra + + - env: T=2.10/sanity/1 + + - env: T=2.9/sanity/1 + + - env: T=devel/units/1 + + - env: T=2.10/units/1 + + - env: T=2.9/units/1 + + - env: T=devel/rhel/7.8/1 + - env: T=devel/rhel/8.2/1 + - env: T=devel/freebsd/11.1/1 + - env: T=devel/freebsd/12.1/1 + - env: T=devel/linux/centos6/1 + - env: T=devel/linux/centos7/1 + #- env: T=devel/linux/centos8/1 + - env: T=devel/linux/fedora31/1 + - env: T=devel/linux/fedora32/1 + #- env: T=devel/linux/opensuse15py2/1 + #- env: T=devel/linux/opensuse15/1 + - env: T=devel/linux/ubuntu1604/1 + - env: T=devel/linux/ubuntu1804/1 + + - env: T=2.10/rhel/8.2/1 + + - env: T=2.9/rhel/8.2/1 + +branches: + except: + - "*-patch-*" + - "revert-*-*" + - "patchback/backports/*" + +build: + ci: + - tests/utils/shippable/timing.sh tests/utils/shippable/shippable.sh $T + +integrations: + notifications: + - integrationName: email + type: email + on_success: never + on_failure: never + on_start: never + on_pull_request: never diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/tasks/main.yml new file mode 100644 index 00000000..359c5d3b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/tasks/main.yml @@ -0,0 +1,8 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial CI tests of postgresql_copy module +- import_tasks: postgresql_copy_initial.yml + when: postgres_version_resp.stdout is version('9.4', '>=') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/tasks/postgresql_copy_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/tasks/postgresql_copy_initial.yml new file mode 100644 index 00000000..cd9981e9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_copy/tasks/postgresql_copy_initial.yml @@ -0,0 +1,278 @@ +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# The file for testing postgresql_copy module. + +- vars: + test_table: acme + data_file_txt: /tmp/data.txt + data_file_csv: /tmp/data.csv + task_parameters: &task_parameters + become_user: '{{ pg_user }}' + become: yes + register: result + pg_parameters: &pg_parameters + login_user: '{{ pg_user }}' + login_db: postgres + + block: + # Test preparation: + - name: postgresql_copy - create test table + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + name: '{{ test_table }}' + columns: + - id int + - name text + + # Insert the data: + - name: postgresql_copy - insert rows into test table + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "INSERT INTO {{ test_table }} (id, name) VALUES (1, 'first')" + + - name: postgresql_copy - ensure that test data files don't exist + <<: *task_parameters + file: + path: '{{ item }}' + state: absent + with_items: + - '{{ data_file_csv }}' + - '{{ data_file_txt }}' + + # ############## + # Do main tests: + + # check_mode - if it's OK, must always return changed=True: + - name: postgresql_copy - check_mode, copy test table content to data_file_txt + check_mode: yes + <<: *task_parameters + postgresql_copy: + <<: *pg_parameters + copy_to: '{{ data_file_txt }}' + src: '{{ test_table }}' + trust_input: no + + - assert: + that: + - result is changed + + # check that nothing changed after the previous step: + - name: postgresql_copy - check that data_file_txt doesn't exist + <<: *task_parameters + ignore_errors: yes + shell: head -n 1 '{{ data_file_txt }}' + + - assert: + that: + - result.failed == true + - result.rc == 1 + + # check_mode - if it's OK, must always return changed=True: + - name: postgresql_copy - check_mode, copy test table content from data_file_txt + check_mode: yes + <<: *task_parameters + postgresql_copy: + <<: *pg_parameters + copy_from: '{{ data_file_txt }}' + dst: '{{ test_table }}' + trust_input: no + + - assert: + that: + - result is changed + + # check that nothing changed after the previous step: + - name: postgresql_copy - check that test table continue to have one row + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: 'SELECT * FROM {{ test_table }}' + + - assert: + that: + - result.rowcount == 1 + + # check_mode - test must fail because test table doesn't exist: + - name: postgresql_copy - check_mode, copy non existent table to data_file_txt + check_mode: yes + ignore_errors: yes + <<: *task_parameters + postgresql_copy: + <<: *pg_parameters + copy_to: '{{ data_file_txt }}' + src: non_existent_table + trust_input: no + + - assert: + that: + - result.failed == true + - result.queries is not defined + + - name: postgresql_copy - check trust_input + <<: *task_parameters + postgresql_copy: + <<: *pg_parameters + copy_to: '{{ data_file_txt }}' + src: '{{ test_table }}' + session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' + trust_input: no + ignore_errors: yes + + - assert: + that: + - result is failed + - result.msg is search('is potentially dangerous') + + - name: postgresql_copy - copy test table data to data_file_txt + <<: *task_parameters + postgresql_copy: + <<: *pg_parameters + copy_to: '{{ data_file_txt }}' + src: '{{ test_table }}' + trust_input: no + + - assert: + that: + - result is changed + - result.queries == ["COPY \"{{ test_table }}\" TO '{{ data_file_txt }}'"] + - result.src == '{{ test_table }}' + - result.dst == '{{ data_file_txt }}' + + # check the prev test + - name: postgresql_copy - check data_file_txt exists and not empty + <<: *task_parameters + shell: 'head -n 1 {{ data_file_txt }}' + + - assert: + that: + - result.stdout == '1\tfirst' + + # test different options and columns + - name: postgresql_copy - copy test table data to data_file_csv with options and columns + <<: *task_parameters + postgresql_copy: + <<: *pg_parameters + copy_to: '{{ data_file_csv }}' + src: '{{ test_table }}' + columns: + - id + - name + options: + format: csv + trust_input: no + + - assert: + that: + - result is changed + - result.queries == ["COPY \"{{ test_table }}\" (id,name) TO '{{ data_file_csv }}' (format csv)"] + - result.src == '{{ test_table }}' + - result.dst == '{{ data_file_csv }}' + + # check the prev test + - name: postgresql_copy - check data_file_csv exists and not empty + <<: *task_parameters + shell: 'head -n 1 {{ data_file_csv }}' + + - assert: + that: + - result.stdout == '1,first' + + - name: postgresql_copy - copy from data_file_csv to test table + <<: *task_parameters + postgresql_copy: + <<: *pg_parameters + copy_from: '{{ data_file_csv }}' + dst: '{{ test_table }}' + columns: + - id + - name + options: + format: csv + trust_input: no + + - assert: + that: + - result is changed + - result.queries == ["COPY \"{{ test_table }}\" (id,name) FROM '{{ data_file_csv }}' (format csv)"] + - result.dst == '{{ test_table }}' + - result.src == '{{ data_file_csv }}' + + - name: postgresql_copy - check that there are two rows in test table after the prev step + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT * FROM {{ test_table }} WHERE id = '1' AND name = 'first'" + + - assert: + that: + - result.rowcount == 2 + + - name: postgresql_copy - test program option, copy to program + <<: *task_parameters + postgresql_copy: + <<: *pg_parameters + src: '{{ test_table }}' + copy_to: '/bin/true' + program: yes + columns: id, name + options: + delimiter: '|' + trust_input: no + when: ansible_distribution != 'FreeBSD' + + - assert: + that: + - result is changed + - result.queries == ["COPY \"{{ test_table }}\" (id, name) TO PROGRAM '/bin/true' (delimiter '|')"] + - result.src == '{{ test_table }}' + - result.dst == '/bin/true' + when: ansible_distribution != 'FreeBSD' + + - name: postgresql_copy - test program option, copy from program + <<: *task_parameters + postgresql_copy: + <<: *pg_parameters + dst: '{{ test_table }}' + copy_from: 'echo 1,first' + program: yes + columns: id, name + options: + delimiter: ',' + trust_input: no + + - assert: + that: + - result is changed + - result.queries == ["COPY \"{{ test_table }}\" (id, name) FROM PROGRAM 'echo 1,first' (delimiter ',')"] + - result.dst == '{{ test_table }}' + - result.src == 'echo 1,first' + when: ansible_distribution != 'FreeBSD' + + - name: postgresql_copy - check that there are three rows in test table after the prev step + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT * FROM {{ test_table }} WHERE id = '1' AND name = 'first'" + + - assert: + that: + - result.rowcount == 3 + + # clean up + - name: postgresql_copy - remove test table + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + name: '{{ test_table }}' + state: absent + + - name: postgresql_copy - remove test data files + <<: *task_parameters + file: + path: '{{ item }}' + state: absent + with_items: + - '{{ data_file_csv }}' + - '{{ data_file_txt }}' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/aliases new file mode 100644 index 00000000..2f88eca0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/aliases @@ -0,0 +1,3 @@ +destructive +shippable/posix/group1 +postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/defaults/main.yml new file mode 100644 index 00000000..766feeec --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/defaults/main.yml @@ -0,0 +1,11 @@ +db_name: 'ansible_db' +db_user1: 'ansible.db.user1' +db_user2: 'ansible.db.user2' +tmp_dir: '/tmp' +db_session_role1: 'session_role1' +db_session_role2: 'session_role2' + +# To test trust_input parameter and +# possibility to create a database with dots in its name +db_name_with_dot: 'db.name' +suspicious_db_name: '{{ db_name_with_dot }}"; --' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/main.yml new file mode 100644 index 00000000..56b56aec --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/main.yml @@ -0,0 +1,36 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- import_tasks: postgresql_db_session_role.yml + +# Initial tests of postgresql_db module: +- import_tasks: postgresql_db_initial.yml + +# General tests: +- import_tasks: postgresql_db_general.yml + +# Dump/restore tests per format: +- include_tasks: state_dump_restore.yml + vars: + test_fixture: user + file: '{{ loop_item }}' + loop: + - dbdata.sql + - dbdata.sql.gz + - dbdata.sql.bz2 + - dbdata.sql.xz + - dbdata.tar + - dbdata.tar.gz + - dbdata.tar.bz2 + - dbdata.tar.xz + - dbdata.pgc + loop_control: + loop_var: loop_item + +# Dump/restore tests per other logins: +- import_tasks: state_dump_restore.yml + vars: + file: dbdata.tar + test_fixture: admin diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml new file mode 100644 index 00000000..6a178bea --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml @@ -0,0 +1,152 @@ +- become_user: '{{ pg_user }}' + become: true + vars: + db_tablespace: bar + tblspc_location: /ssd + db_name: acme + block_parameters: + become_user: '{{ pg_user }}' + become: true + task_parameters: + register: result + pg_parameters: + login_user: '{{ pg_user }}' + block: + - name: postgresql_db - drop dir for test tablespace + become: true + become_user: root + file: + path: '{{ tblspc_location }}' + state: absent + ignore_errors: true + - name: postgresql_db - disable selinux + become: true + become_user: root + shell: setenforce 0 + ignore_errors: true + - name: postgresql_db - create dir for test tablespace + become: true + become_user: root + file: + path: '{{ tblspc_location }}' + state: directory + owner: '{{ pg_user }}' + group: '{{ pg_user }}' + mode: '0700' + - name: postgresql_db_ - create a new tablespace + postgresql_tablespace: + login_user: '{{ pg_user }}' + login_db: postgres + name: '{{ db_tablespace }}' + location: '{{ tblspc_location }}' + - register: result + name: postgresql_db_tablespace - Create DB with tablespace option in check mode + check_mode: true + postgresql_db: + login_user: '{{ pg_user }}' + maintenance_db: postgres + name: '{{ db_name }}' + tablespace: '{{ db_tablespace }}' + - assert: + that: + - result is changed + - register: result + name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 0 because actually nothing changed + postgresql_query: + login_user: '{{ pg_user }}' + login_db: postgres + query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''{{ db_tablespace }}'' + + ' + - assert: + that: + - result.rowcount == 0 + - register: result + name: postgresql_db_tablespace - Create DB with tablespace option + postgresql_db: + login_user: '{{ pg_user }}' + maintenance_db: postgres + name: '{{ db_name }}' + tablespace: '{{ db_tablespace }}' + - assert: + that: + - result is changed + - result.executed_commands == ['CREATE DATABASE "{{ db_name }}" TABLESPACE "{{ db_tablespace }}"'] + - register: result + name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 1 + postgresql_query: + login_user: '{{ pg_user }}' + login_db: postgres + query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''{{ db_tablespace }}'' + + ' + - assert: + that: + - result.rowcount == 1 + - register: result + name: postgresql_db_tablespace - The same DB with tablespace option again + postgresql_db: + login_user: '{{ pg_user }}' + maintenance_db: postgres + name: '{{ db_name }}' + tablespace: '{{ db_tablespace }}' + - assert: + that: + - result is not changed + - register: result + name: postgresql_db_tablespace - Change tablespace in check_mode + check_mode: true + postgresql_db: + login_user: '{{ pg_user }}' + maintenance_db: postgres + name: '{{ db_name }}' + tablespace: pg_default + - assert: + that: + - result is changed + - register: result + name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 1 because actually nothing changed + postgresql_query: + login_user: '{{ pg_user }}' + login_db: postgres + query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''{{ db_tablespace }}'' + + ' + - assert: + that: + - result.rowcount == 1 + - register: result + name: postgresql_db_tablespace - Change tablespace in actual mode + postgresql_db: + login_user: '{{ pg_user }}' + maintenance_db: postgres + name: '{{ db_name }}' + tablespace: pg_default + - assert: + that: + - result is changed + - register: result + name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 1 + postgresql_query: + login_user: '{{ pg_user }}' + login_db: postgres + query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''pg_default'' + + ' + - assert: + that: + - result.rowcount == 1 + - register: result + name: postgresql_db_tablespace - Drop test DB + postgresql_db: + login_user: '{{ pg_user }}' + maintenance_db: postgres + name: '{{ db_name }}' + state: absent + - register: result + name: postgresql_db_tablespace - Remove tablespace + postgresql_tablespace: + login_user: '{{ pg_user }}' + login_db: postgres + name: '{{ db_tablespace }}' + state: absent diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml new file mode 100644 index 00000000..851c19f4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml @@ -0,0 +1,366 @@ +# +# Create and destroy db +# +- name: Create DB + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: present + name: "{{ db_name }}" + login_user: "{{ pg_user }}" + register: result + +- name: assert that module reports the db was created + assert: + that: + - result is changed + - result.db == "{{ db_name }}" + - result.executed_commands == ['CREATE DATABASE "{{ db_name }}"'] + +- name: Check that database created + become_user: "{{ pg_user }}" + become: yes + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + +- name: Run create on an already created db + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: present + name: "{{ db_name }}" + login_user: "{{ pg_user }}" + register: result + +- name: assert that module reports the db was unchanged + assert: + that: + - result is not changed + +- name: Destroy DB + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: absent + name: "{{ db_name }}" + login_user: "{{ pg_user }}" + register: result + +- name: assert that module reports the db was changed + assert: + that: + - result is changed + - result.executed_commands == ['DROP DATABASE "{{ db_name }}"'] + +- name: Check that database was destroyed + become_user: "{{ pg_user }}" + become: yes + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Destroy DB + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: absent + name: "{{ db_name }}" + login_user: "{{ pg_user }}" + register: result + +- name: assert that removing an already removed db makes no change + assert: + that: + - result is not changed + + +# This corner case works to add but not to drop. This is sufficiently crazy +# that I'm not going to attempt to fix it unless someone lets me know that they +# need the functionality +# +# - postgresql_db: +# state: 'present' +# name: '"silly.""name"' +# - shell: echo "select datname from pg_database where datname = 'silly.""name';" | psql +# register: result +# +# - assert: +# that: "result.stdout_lines[-1] == '(1 row)'" +# - postgresql_db: +# state: absent +# name: '"silly.""name"' +# - shell: echo "select datname from pg_database where datname = 'silly.""name';" | psql +# register: result +# +# - assert: +# that: "result.stdout_lines[-1] == '(0 rows)'" + +# +# Test conn_limit, encoding, collate, ctype, template options +# +- name: Create a DB with conn_limit, encoding, collate, ctype, and template options + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + name: '{{ db_name }}' + state: 'present' + conn_limit: '100' + encoding: 'LATIN1' + lc_collate: 'pt_BR{{ locale_latin_suffix }}' + lc_ctype: 'es_ES{{ locale_latin_suffix }}' + template: 'template0' + login_user: "{{ pg_user }}" + register: result + +- assert: + that: + - result is changed + - result.executed_commands == ["CREATE DATABASE \"{{ db_name }}\" TEMPLATE \"template0\" ENCODING 'LATIN1' LC_COLLATE 'pt_BR{{ locale_latin_suffix }}' LC_CTYPE 'es_ES{{ locale_latin_suffix }}' CONNECTION LIMIT 100"] or result.executed_commands == ["CREATE DATABASE \"{{ db_name }}\" TEMPLATE \"template0\" ENCODING E'LATIN1' LC_COLLATE E'pt_BR{{ locale_latin_suffix }}' LC_CTYPE E'es_ES{{ locale_latin_suffix }}' CONNECTION LIMIT 100"] + +- name: Check that the DB has all of our options + become_user: "{{ pg_user }}" + become: yes + shell: echo "select datname, datconnlimit, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name }}';" | psql -d postgres + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'LATIN1' in result.stdout_lines[-2]" + - "'pt_BR' in result.stdout_lines[-2]" + - "'es_ES' in result.stdout_lines[-2]" + - "'UTF8' not in result.stdout_lines[-2]" + - "'en_US' not in result.stdout_lines[-2]" + - "'100' in result.stdout_lines[-2]" + +- name: Check that running db creation with options a second time does nothing + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + name: '{{ db_name }}' + state: 'present' + conn_limit: '100' + encoding: 'LATIN1' + lc_collate: 'pt_BR{{ locale_latin_suffix }}' + lc_ctype: 'es_ES{{ locale_latin_suffix }}' + template: 'template0' + login_user: "{{ pg_user }}" + register: result + +- assert: + that: + - result is not changed + + +- name: Check that attempting to change encoding returns an error + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + name: '{{ db_name }}' + state: 'present' + encoding: 'UTF8' + lc_collate: 'pt_BR{{ locale_utf8_suffix }}' + lc_ctype: 'es_ES{{ locale_utf8_suffix }}' + template: 'template0' + login_user: "{{ pg_user }}" + register: result + ignore_errors: yes + +- assert: + that: + - result is failed + +- name: Check that changing the conn_limit actually works + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + name: '{{ db_name }}' + state: 'present' + conn_limit: '200' + encoding: 'LATIN1' + lc_collate: 'pt_BR{{ locale_latin_suffix }}' + lc_ctype: 'es_ES{{ locale_latin_suffix }}' + template: 'template0' + login_user: "{{ pg_user }}" + register: result + +- assert: + that: + - result is changed + - result.executed_commands == ['ALTER DATABASE "{{ db_name }}" CONNECTION LIMIT 200'] + +- name: Check that conn_limit has actually been set / updated to 200 + become_user: "{{ pg_user }}" + become: yes + shell: echo "SELECT datconnlimit AS conn_limit FROM pg_database WHERE datname = '{{ db_name }}';" | psql -d postgres + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'200' == '{{ result.stdout_lines[-2] | trim }}'" + +- name: Cleanup test DB + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + name: '{{ db_name }}' + state: 'absent' + login_user: "{{ pg_user }}" + +- shell: echo "select datname, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name }}';" | psql -d postgres + become_user: "{{ pg_user }}" + become: yes + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +# +# Test db ownership +# +- name: Create an unprivileged user to own a DB + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + name: "{{ item }}" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + login_user: "{{ pg_user }}" + db: postgres + loop: + - "{{ db_user1 }}" + - "{{ db_user2 }}" + +- name: Create db with user ownership + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + name: "{{ db_name }}" + state: "present" + owner: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + register: result + +- assert: + that: + - result is changed + - result.executed_commands == ['CREATE DATABASE "{{ db_name }}" OWNER "{{ db_user1 }}"'] + +- name: Check that the user owns the newly created DB + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: > + SELECT 1 FROM pg_catalog.pg_database + WHERE datname = '{{ db_name }}' + AND pg_catalog.pg_get_userbyid(datdba) = '{{ db_user1 }}' + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: Change the owner on an existing db, username with dots + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + name: "{{ db_name }}" + state: "present" + owner: "{{ db_user2 }}" + login_user: "{{ pg_user }}" + register: result + +- assert: + that: + - result is changed + - result.executed_commands == ['ALTER DATABASE "{{ db_name }}" OWNER TO "{{ db_user2 }}"'] + +- name: Check the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + login_user: "{{ pg_user }}" + db: postgres + query: > + SELECT 1 FROM pg_catalog.pg_database + WHERE datname = '{{ db_name }}' + AND pg_catalog.pg_get_userbyid(datdba) = '{{ db_user2 }}' + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: Change the owner on an existing db + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + name: "{{ db_name }}" + state: "present" + owner: "{{ pg_user }}" + login_user: "{{ pg_user }}" + register: result + +- name: assert that ansible says it changed the db + assert: + that: + - result is changed + +- name: Check that the user owns the newly created DB + become_user: "{{ pg_user }}" + become: yes + shell: echo "select pg_catalog.pg_get_userbyid(datdba) from pg_catalog.pg_database where datname = '{{ db_name }}';" | psql -d postgres + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'{{ pg_user }}' == '{{ result.stdout_lines[-2] | trim }}'" + +- name: Cleanup db + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + name: "{{ db_name }}" + state: "absent" + login_user: "{{ pg_user }}" + +- name: Check that database was destroyed + become_user: "{{ pg_user }}" + become: yes + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Cleanup test user + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + name: "{{ db_user1 }}" + state: 'absent' + login_user: "{{ pg_user }}" + db: postgres + +- name: Check that they were removed + become_user: "{{ pg_user }}" + become: yes + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_session_role.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_session_role.yml new file mode 100644 index 00000000..4cdef73f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/postgresql_db_session_role.yml @@ -0,0 +1,80 @@ +- name: Check that becoming an non-existing user throws an error + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: present + name: must_fail + login_user: "{{ pg_user }}" + session_role: "{{ db_session_role1 }}" + register: result + ignore_errors: yes + +- assert: + that: + - result is failed + +- name: Create a high privileged user + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + name: "{{ db_session_role1 }}" + state: "present" + password: "password" + role_attr_flags: "CREATEDB,LOGIN,CREATEROLE" + login_user: "{{ pg_user }}" + db: postgres + +- name: Create a low privileged user using the newly created user + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + name: "{{ db_session_role2 }}" + state: "present" + password: "password" + role_attr_flags: "LOGIN" + login_user: "{{ pg_user }}" + session_role: "{{ db_session_role1 }}" + db: postgres + +- name: Create DB as session_role + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: present + name: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + session_role: "{{ db_session_role1 }}" + register: result + +- name: Check that database created and is owned by correct user + become_user: "{{ pg_user }}" + become: yes + shell: echo "select rolname from pg_database join pg_roles on datdba = pg_roles.oid where datname = '{{ db_session_role1 }}';" | psql -AtXq postgres + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '{{ db_session_role1 }}'" + +- name: Fail when creating database as low privileged user + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: present + name: "{{ db_session_role2 }}" + login_user: "{{ pg_user }}" + session_role: "{{ db_session_role2 }}" + register: result + ignore_errors: yes + +- assert: + that: + - result is failed + +- name: Drop test db + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: absent + name: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/state_dump_restore.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/state_dump_restore.yml new file mode 100644 index 00000000..6c62cce6 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_db/tasks/state_dump_restore.yml @@ -0,0 +1,235 @@ +# test code for state dump and restore for postgresql_db module +# copied from mysql_db/tasks/state_dump_import.yml +# (c) 2014, Wayne Rosario <wrosario@ansible.com> + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# ============================================================ + +- name: Create a test user + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + name: "{{ db_user1 }}" + state: "present" + encrypted: 'yes' + password: "password" + role_attr_flags: "CREATEDB,LOGIN,CREATEROLE" + login_user: "{{ pg_user }}" + db: postgres + +- set_fact: db_file_name="{{tmp_dir}}/{{file}}" + +- set_fact: + admin_str: "psql -U {{ pg_user }}" + +- set_fact: + user_str: "env PGPASSWORD=password psql -h localhost -U {{ db_user1 }} {{ db_name }}" + when: test_fixture == "user" + # "-n public" is required to work around pg_restore issues with plpgsql + +- set_fact: + user_str: "psql -U {{ pg_user }} {{ db_name }}" + when: test_fixture == "admin" + + + +- set_fact: + sql_create: "create table employee(id int, name varchar(100));" + sql_insert: "insert into employee values (47,'Joe Smith');" + sql_select: "select * from employee;" + +- name: state dump/restore - create database + postgresql_db: + state: present + name: "{{ db_name }}" + owner: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + +- name: state dump/restore - create table employee + command: '{{ user_str }} -c "{{ sql_create }}"' + +- name: state dump/restore - insert data into table employee + command: '{{ user_str }} -c "{{ sql_insert }}"' + +- name: state dump/restore - file name should not exist + file: name={{ db_file_name }} state=absent + +- name: test state=dump to backup the database (expect changed=true) + postgresql_db: + name: "{{ db_name }}" + target: "{{ db_file_name }}" + owner: "{{ db_user1 }}" + login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}' + target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}' + login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}' + login_password: '{{(test_fixture == "user")|ternary("password", omit)}}' + state: dump + dump_extra_args: --exclude-table=fake + register: result + become_user: "{{ pg_user }}" + become: yes + +- name: assert output message backup the database + assert: + that: + - result is changed + - result.executed_commands[0] is search("--exclude-table=fake") + +- name: assert database was backed up successfully + command: file {{ db_file_name }} + register: result + +- name: state dump/restore - remove database for restore + postgresql_db: + name: "{{ db_name }}" + target: "{{ db_file_name }}" + owner: "{{ db_user1 }}" + login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}' + target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}' + login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}' + login_password: '{{(test_fixture == "user")|ternary("password", omit)}}' + state: absent + +- name: state dump/restore - re-create database + postgresql_db: + state: present + name: "{{ db_name }}" + owner: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + +- name: test state=restore to restore the database (expect changed=true) + postgresql_db: + name: "{{ db_name }}" + target: "{{ db_file_name }}" + owner: "{{ db_user1 }}" + login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}' + target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}' + login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}' + login_password: '{{(test_fixture == "user")|ternary("password", omit)}}' + state: restore + register: result + become_user: "{{ pg_user }}" + become: yes + +- name: assert output message restore the database + assert: + that: + - result is changed + +- name: select data from table employee + command: '{{ user_str }} -c "{{ sql_select }}"' + register: result + +- name: assert data in database is from the restore database + assert: + that: + - "'47' in result.stdout" + - "'Joe Smith' in result.stdout" + +############################ +# 1. Test trust_input parameter +# 2. Test db name containing dots + +- name: state dump/restore - create database, trust_input no + become: yes + become_user: "{{ pg_user }}" + postgresql_db: + state: present + name: "{{ suspicious_db_name }}" + owner: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + ignore_errors: yes + +- assert: + that: + - result is failed + - result.msg == 'Passed input \'{{ suspicious_db_name }}\' is potentially dangerous' + +- name: state dump/restore - create database, trust_input yes explicitly + become: yes + become_user: "{{ pg_user }}" + postgresql_db: + state: present + name: "{{ suspicious_db_name }}" + owner: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + trust_input: yes + register: result + +- assert: + that: + - result is changed + +- name: test state=restore to restore the database (expect changed=true) + become: yes + become_user: "{{ pg_user }}" + postgresql_db: + name: "{{ db_name_with_dot }}" + target: "{{ db_file_name }}" + owner: "{{ db_user1 }}" + login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}' + target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}' + login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}' + login_password: '{{(test_fixture == "user")|ternary("password", omit)}}' + state: restore + register: result + +- name: assert output message restore the database + assert: + that: + - result is changed + +- name: state dump/restore - remove databases + become: yes + become_user: "{{ pg_user }}" + postgresql_db: + state: absent + name: "{{ db_name_with_dot }}" + owner: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + trust_input: yes + register: result + +- assert: + that: + - result is changed + +# Clean up +- name: state dump/restore - remove database name + postgresql_db: + name: "{{ db_name }}" + target: "{{ db_file_name }}" + owner: "{{ db_user1 }}" + login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}' + target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}' + login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}' + login_password: '{{(test_fixture == "user")|ternary("password", omit)}}' + state: absent + +- name: remove file name + file: name={{ db_file_name }} state=absent + +- name: Remove the test user + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + name: "{{ db_user1 }}" + state: "absent" + login_user: "{{ pg_user }}" + db: postgres diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/aliases new file mode 100644 index 00000000..142e8aa0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/aliases @@ -0,0 +1,3 @@ +destructive +shippable/posix/group1 +skip/freebsd diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/defaults/main.yml new file mode 100644 index 00000000..05bac61d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/defaults/main.yml @@ -0,0 +1,2 @@ +db_session_role1: 'session_role1' +db_session_role2: 'session_role2' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/meta/main.yml new file mode 100644 index 00000000..0ec7d2fc --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - setup_pkg_mgr + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/main.yml new file mode 100644 index 00000000..1fa365be --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/main.yml @@ -0,0 +1,26 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- import_tasks: postgresql_ext_session_role.yml + +# Initial CI tests of postgresql_ext module. +# pg_extension system view is available from PG 9.1. +# The tests are restricted by Fedora because there will be errors related with +# attempts to change the environment during postgis installation or +# missing postgis package in repositories. +# Anyway, these tests completely depend on Postgres version, +# not specific distributions. +- import_tasks: postgresql_ext_initial.yml + when: + - postgres_version_resp.stdout is version('9.1', '>=') + - ansible_distribution == 'Fedora' + +# CI tests of "version" option. +# It uses a mock extension, see test/integration/targets/setup_postgresql_db/. +# TODO: change postgresql_ext_initial.yml to use the mock extension too. +- import_tasks: postgresql_ext_version_opt.yml + when: + - ansible_distribution == 'Ubuntu' + - postgres_version_resp.stdout is version('9.1', '>=') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml new file mode 100644 index 00000000..cabf1f49 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml @@ -0,0 +1,208 @@ +--- +- name: postgresql_ext - install postgis on Linux + package: name=postgis state=present + when: ansible_os_family != "Windows" + +- name: postgresql_ext - create schema schema1 + become_user: '{{ pg_user }}' + become: true + postgresql_schema: + database: postgres + name: schema1 + state: present + +- name: postgresql_ext - drop extension if exists + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + query: DROP EXTENSION IF EXISTS postgis + ignore_errors: true + +- name: postgresql_ext - create extension postgis in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_ext: + login_db: postgres + login_port: 5432 + name: postgis + check_mode: true + ignore_errors: true + register: result + +- assert: + that: + - result is changed + - result.queries == [] + +- name: postgresql_ext - check that extension doesn't exist after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + query: SELECT extname FROM pg_extension WHERE extname='postgis' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_ext - create extension postgis + become_user: '{{ pg_user }}' + become: true + postgresql_ext: + login_db: postgres + login_port: 5432 + name: postgis + ignore_errors: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['CREATE EXTENSION "postgis"'] + +- name: postgresql_ext - check that extension exists after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + query: SELECT extname FROM pg_extension WHERE extname='postgis' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_ext - drop extension postgis + become_user: '{{ pg_user }}' + become: true + postgresql_ext: + db: postgres + name: postgis + state: absent + ignore_errors: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['DROP EXTENSION "postgis"'] + +- name: postgresql_ext - check that extension doesn't exist after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + query: SELECT extname FROM pg_extension WHERE extname='postgis' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_ext - create extension postgis + become_user: '{{ pg_user }}' + become: true + postgresql_ext: + db: postgres + name: postgis + schema: schema1 + ignore_errors: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['CREATE EXTENSION "postgis" WITH SCHEMA "schema1"'] + +- name: postgresql_ext - check that extension exists after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + query: "SELECT extname FROM pg_extension AS e LEFT JOIN pg_catalog.pg_namespace AS n \nON n.oid = e.extnamespace WHERE e.extname='postgis' AND n.nspname='schema1'\n" + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_ext - drop extension postgis cascade + become_user: '{{ pg_user }}' + become: true + postgresql_ext: + db: postgres + name: postgis + state: absent + cascade: true + ignore_errors: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['DROP EXTENSION "postgis" CASCADE'] + +- name: postgresql_ext - check that extension doesn't exist after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + query: SELECT extname FROM pg_extension WHERE extname='postgis' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_ext - create extension postgis cascade + become_user: '{{ pg_user }}' + become: true + postgresql_ext: + db: postgres + name: postgis + cascade: true + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('9.6', '<=') + +- assert: + that: + - result is changed + - result.queries == ['CREATE EXTENSION "postgis" CASCADE"'] + when: postgres_version_resp.stdout is version('9.6', '<=') + +- name: postgresql_ext - check that extension exists after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + query: SELECT extname FROM pg_extension WHERE extname='postgis' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('9.6', '<=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('9.6', '<=') + +- name: postgresql_ext - check that using a dangerous name fails + postgresql_ext: + db: postgres + name: postgis + session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' + trust_input: no + ignore_errors: true + register: result + +- assert: + that: + - result is failed + - result.msg is search('is potentially dangerous') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_session_role.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_session_role.yml new file mode 100644 index 00000000..c1fed5bf --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_session_role.yml @@ -0,0 +1,114 @@ +- name: Create a high privileged user + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + name: "{{ db_session_role1 }}" + state: "present" + password: "password" + role_attr_flags: "CREATEDB,LOGIN,CREATEROLE" + login_user: "{{ pg_user }}" + db: postgres + +- name: Create DB as session_role + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: present + name: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + session_role: "{{ db_session_role1 }}" + register: result + +- name: Check that pg_extension exists (PostgreSQL >= 9.1) + become_user: "{{ pg_user }}" + become: yes + shell: echo "select count(*) from pg_class where relname='pg_extension' and relkind='r'" | psql -AtXq postgres + register: pg_extension + +- name: Remove plpgsql from testdb using postgresql_ext + become_user: "{{ pg_user }}" + become: yes + postgresql_ext: + name: plpgsql + db: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + state: absent + when: + "pg_extension.stdout_lines[-1] == '1'" + +- name: Fail when trying to create an extension as a mere mortal user + become_user: "{{ pg_user }}" + become: yes + postgresql_ext: + name: plpgsql + db: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + session_role: "{{ db_session_role2 }}" + ignore_errors: yes + register: result + when: + "pg_extension.stdout_lines[-1] == '1'" + +- assert: + that: + - result is failed + when: + "pg_extension.stdout_lines[-1] == '1'" + +- name: Install extension as session_role + become_user: "{{ pg_user }}" + become: yes + postgresql_ext: + name: plpgsql + db: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + session_role: "{{ db_session_role1 }}" + trust_input: no + when: + "pg_extension.stdout_lines[-1] == '1'" + +- name: Check that extension is created and is owned by session_role + become_user: "{{ pg_user }}" + become: yes + shell: echo "select rolname from pg_extension join pg_roles on extowner=pg_roles.oid where extname='plpgsql';" | psql -AtXq "{{ db_session_role1 }}" + register: result + when: + "pg_extension.stdout_lines[-1] == '1'" + +- assert: + that: + - "result.stdout_lines[-1] == '{{ db_session_role1 }}'" + when: + "pg_extension.stdout_lines[-1] == '1'" + +- name: Remove plpgsql from testdb using postgresql_ext + become_user: "{{ pg_user }}" + become: yes + postgresql_ext: + name: plpgsql + db: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + state: absent + trust_input: no + when: + "pg_extension.stdout_lines[-1] == '1'" + +- name: Drop test db + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: absent + name: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + +- name: Drop test users + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + name: "{{ item }}" + state: absent + login_user: "{{ pg_user }}" + db: postgres + with_items: + - "{{ db_session_role1 }}" + - "{{ db_session_role2 }}" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml new file mode 100644 index 00000000..f90340c5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml @@ -0,0 +1,364 @@ +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Tests for postgresql_ext version option + +- vars: + test_ext: dummy + test_schema: schema1 + task_parameters: &task_parameters + become_user: '{{ pg_user }}' + become: yes + register: result + pg_parameters: &pg_parameters + login_user: '{{ pg_user }}' + login_db: postgres + + block: + # Preparation: + - name: postgresql_ext_version - create schema schema1 + <<: *task_parameters + postgresql_schema: + <<: *pg_parameters + name: "{{ test_schema }}" + + # Do tests: + - name: postgresql_ext_version - create extension of specific version, check mode + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + schema: "{{ test_schema }}" + version: '1.0' + trust_input: no + check_mode: yes + + - assert: + that: + - result is changed + + - name: postgresql_ext_version - check that nothing was actually changed + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}'" + + - assert: + that: + - result.rowcount == 0 + + - name: postgresql_ext_version - create extension of specific version + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + schema: "{{ test_schema }}" + version: '1.0' + trust_input: no + + - assert: + that: + - result is changed + - result.queries == ["CREATE EXTENSION \"{{ test_ext }}\" WITH SCHEMA \"{{ test_schema }}\" VERSION '1.0'"] + + - name: postgresql_ext_version - check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'" + + - assert: + that: + - result.rowcount == 1 + + - name: postgresql_ext_version - try to create extension of the same version again in check_mode + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + schema: "{{ test_schema }}" + version: '1.0' + trust_input: no + check_mode: yes + + - assert: + that: + - result is not changed + + - name: postgresql_ext_version - check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'" + + - assert: + that: + - result.rowcount == 1 + + - name: postgresql_ext_version - try to create extension of the same version again in actual mode + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + schema: "{{ test_schema }}" + version: '1.0' + trust_input: no + + - assert: + that: + - result is not changed + + - name: postgresql_ext_version - check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'" + + - assert: + that: + - result.rowcount == 1 + + - name: postgresql_ext_version - update the extension to the next version in check_mode + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + schema: "{{ test_schema }}" + version: '2.0' + trust_input: no + check_mode: yes + + - assert: + that: + - result is changed + + - name: postgresql_ext_version - check, the version must be 1.0 + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'" + + - assert: + that: + - result.rowcount == 1 + + - name: postgresql_ext_version - update the extension to the next version + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + schema: "{{ test_schema }}" + version: '2.0' + trust_input: no + + - assert: + that: + - result is changed + - result.queries == ["ALTER EXTENSION \"{{ test_ext }}\" UPDATE TO '2.0'"] + + - name: postgresql_ext_version - check, the version must be 2.0 + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '2.0'" + + - assert: + that: + - result.rowcount == 1 + + - name: postgresql_ext_version - check that version won't be changed if version won't be passed + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + schema: "{{ test_schema }}" + trust_input: no + + - assert: + that: + - result is not changed + + - name: postgresql_ext_version - check, the version must be 2.0 + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '2.0'" + + - assert: + that: + - result.rowcount == 1 + + - name: postgresql_ext_version - update the extension to the latest version + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + schema: "{{ test_schema }}" + version: latest + trust_input: no + + - assert: + that: + - result is changed + - result.queries == ["ALTER EXTENSION \"{{ test_ext }}\" UPDATE TO '3.0'"] + + - name: postgresql_ext_version - check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '3.0'" + + - assert: + that: + - result.rowcount == 1 + + - name: postgresql_ext_version - try to update the extension to the latest version again + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + schema: "{{ test_schema }}" + version: latest + trust_input: no + + - assert: + that: + - result is not changed + + - name: postgresql_ext_version - try to downgrade the extension version, must fail + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + schema: "{{ test_schema }}" + version: '1.0' + trust_input: no + ignore_errors: yes + + - assert: + that: + - result.failed == true + + - name: postgresql_ext_version - drop the extension in check_mode + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + state: absent + trust_input: no + check_mode: yes + + - assert: + that: + - result is changed + + - name: postgresql_ext_version - check that extension exists + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '3.0'" + + - assert: + that: + - result.rowcount == 1 + + - name: postgresql_ext_version - drop the extension in actual mode + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + state: absent + trust_input: no + + - assert: + that: + - result is changed + + - name: postgresql_ext_version - check that extension doesn't exist after the prev step + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}'" + + - assert: + that: + - result.rowcount == 0 + + - name: postgresql_ext_version - try to drop the non-existent extension again + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + state: absent + trust_input: no + + - assert: + that: + - result is not changed + + - name: postgresql_ext_version - create the extension without passing version + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + trust_input: no + + - assert: + that: + - result is changed + - result.queries == ["CREATE EXTENSION \"{{ test_ext }}\""] + + - name: postgresql_ext_version - check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '3.0'" + + - assert: + that: + - result.rowcount == 1 + + - name: postgresql_ext_version - try to install non-existent version + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: non_existent + trust_input: no + ignore_errors: yes + + - assert: + that: + - result.failed == true + - result.msg == "Extension non_existent is not installed" + + ###################################################################### + # https://github.com/ansible-collections/community.general/issues/1095 + - name: Install postgis + package: + name: postgis + + - name: Create postgis extension + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: postgis + version: latest + + - assert: + that: + - result is changed + + # Cleanup: + - name: postgresql_ext_version - drop the extension + <<: *task_parameters + postgresql_ext: + <<: *pg_parameters + name: "{{ test_ext }}" + state: absent + trust_input: no + + - name: postgresql_ext_version - drop the schema + <<: *task_parameters + postgresql_schema: + <<: *pg_parameters + name: "{{ test_schema }}" + state: absent diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/tasks/main.yml new file mode 100644 index 00000000..2f594561 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/tasks/main.yml @@ -0,0 +1,7 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial CI tests of postgresql_idx module +- import_tasks: postgresql_idx_initial.yml diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/tasks/postgresql_idx_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/tasks/postgresql_idx_initial.yml new file mode 100644 index 00000000..31d16627 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_idx/tasks/postgresql_idx_initial.yml @@ -0,0 +1,377 @@ +- name: postgresql_idx - create test table called test_table + become_user: '{{ pg_user }}' + become: true + shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE test_table (id int, story text);" + ignore_errors: true + +- name: postgresql_idx - drop test tablespace called ssd if exists + become_user: '{{ pg_user }}' + become: true + shell: psql postgres -U "{{ pg_user }}" -t -c "DROP TABLESPACE IF EXISTS ssd;" + ignore_errors: true + +- name: postgresql_idx - drop dir for test tablespace + become: true + file: + path: /mnt/ssd + state: absent + ignore_errors: true + +- name: postgresql_idx - create dir for test tablespace + become: true + file: + path: /mnt/ssd + state: directory + owner: '{{ pg_user }}' + mode: '0755' + ignore_errors: true + +- name: postgresql_idx - create test tablespace called ssd + become_user: '{{ pg_user }}' + become: true + shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLESPACE ssd LOCATION '/mnt/ssd';" + ignore_errors: true + register: tablespace + +- name: postgresql_idx - create test schema + become_user: '{{ pg_user }}' + become: true + shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE SCHEMA foo;" + ignore_errors: true + +- name: postgresql_idx - create table in non-default schema + become_user: '{{ pg_user }}' + become: true + shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE foo.foo_table (id int, story text);" + ignore_errors: true + +- name: postgresql_idx - create btree index in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_idx: + db: postgres + login_user: '{{ pg_user }}' + table: test_table + columns: id, story + idxname: Test0_idx + check_mode: true + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.tblname == '' + - result.name == 'Test0_idx' + - result.state == 'absent' + - result.valid != '' + - result.tblspace == '' + - result.storage_params == [] + - result.schema == '' + - result.query == '' + +- name: postgresql_idx - check nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + login_user: '{{ pg_user }}' + query: SELECT 1 FROM pg_indexes WHERE indexname = 'Test0_idx' + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_idx - create btree index concurrently + become_user: '{{ pg_user }}' + become: true + postgresql_idx: + db: postgres + login_user: '{{ pg_user }}' + table: test_table + columns: id, story + idxname: Test0_idx + trust_input: no + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.tblname == 'test_table' + - result.name == 'Test0_idx' + - result.state == 'present' + - result.valid != '' + - result.tblspace == '' + - result.storage_params == [] + - result.schema == 'public' + - result.query == 'CREATE INDEX CONCURRENTLY "Test0_idx" ON "public"."test_table" USING BTREE (id, story)' + +- name: postgresql_idx - check the index exists after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + login_user: '{{ pg_user }}' + query: SELECT 1 FROM pg_indexes WHERE indexname = 'Test0_idx' + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_idx - try to create existing index again + become_user: '{{ pg_user }}' + become: true + postgresql_idx: + db: postgres + login_user: '{{ pg_user }}' + table: test_table + columns: id, story + idxname: Test0_idx + register: result + ignore_errors: true + +- assert: + that: + - result is not changed + - result.tblname == 'test_table' + - result.name == 'Test0_idx' + - result.state == 'present' + - result.valid != '' + - result.tblspace == '' + - result.storage_params == [] + - result.schema == 'public' + - result.query == '' + +- name: postgresql_idx - create btree index - non-default schema, tablespace, storage parameter + become_user: '{{ pg_user }}' + become: true + postgresql_idx: + db: postgres + login_user: '{{ pg_user }}' + schema: foo + table: foo_table + columns: + - id + - story + idxname: foo_test_idx + tablespace: ssd + storage_params: fillfactor=90 + trust_input: no + register: result + ignore_errors: true + when: tablespace.rc == 0 + +- assert: + that: + - result is changed + - result.tblname == 'foo_table' + - result.name == 'foo_test_idx' + - result.state == 'present' + - result.valid != '' + - result.tblspace == 'ssd' + - result.storage_params == [ "fillfactor=90" ] + - result.schema == 'foo' + - result.query == 'CREATE INDEX CONCURRENTLY "foo_test_idx" ON "foo"."foo_table" USING BTREE (id,story) WITH (fillfactor=90) TABLESPACE "ssd"' + when: tablespace.rc == 0 + +- name: postgresql_idx - create brin index not concurrently + become_user: '{{ pg_user }}' + become: true + postgresql_idx: + db: postgres + login_user: '{{ pg_user }}' + schema: public + table: test_table + state: present + type: brin + columns: id + idxname: test_brin_idx + concurrent: false + trust_input: no + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.tblname == 'test_table' + - result.name == 'test_brin_idx' + - result.state == 'present' + - result.valid != '' + - result.tblspace == '' + - result.storage_params == [] + - result.schema == 'public' + - result.query == 'CREATE INDEX "test_brin_idx" ON "public"."test_table" USING brin (id)' + when: postgres_version_resp.stdout is version('9.5', '>=') + +- name: postgresql_idx - create index with condition + become_user: '{{ pg_user }}' + become: true + postgresql_idx: + db: postgres + login_user: '{{ pg_user }}' + table: test_table + columns: id + idxname: test1_idx + cond: id > 1 AND id != 10 + trust_input: no + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.tblname == 'test_table' + - result.name == 'test1_idx' + - result.state == 'present' + - result.valid != '' + - result.tblspace == '' + - result.storage_params == [] + - result.schema == 'public' + - result.query == 'CREATE INDEX CONCURRENTLY "test1_idx" ON "public"."test_table" USING BTREE (id) WHERE id > 1 AND id != 10' + +- name: postgresql_idx - create unique index + become_user: '{{ pg_user }}' + become: true + postgresql_idx: + db: postgres + login_user: '{{ pg_user }}' + table: test_table + columns: story + idxname: test_unique0_idx + unique: true + trust_input: no + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.tblname == 'test_table' + - result.name == 'test_unique0_idx' + - result.state == 'present' + - result.valid != '' + - result.tblspace == '' + - result.storage_params == [] + - result.schema == 'public' + - result.query == 'CREATE UNIQUE INDEX CONCURRENTLY "test_unique0_idx" ON "public"."test_table" USING BTREE (story)' + +- name: postgresql_idx - avoid unique index with type different of btree + become_user: '{{ pg_user }}' + become: true + postgresql_idx: + db: postgres + login_user: '{{ pg_user }}' + table: test_table + columns: story + idxname: test_unique0_idx + unique: true + concurrent: false + type: brin + register: result + ignore_errors: true + +- assert: + that: + - result is not changed + - result.msg == 'Only btree currently supports unique indexes' + +- name: postgresql_idx - drop index from specific schema cascade in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_idx: + db: postgres + login_user: '{{ pg_user }}' + schema: foo + name: foo_test_idx + cascade: true + state: absent + concurrent: false + trust_input: yes + check_mode: true + register: result + ignore_errors: true + when: tablespace.rc == 0 + +- assert: + that: + - result is changed + - result.name == 'foo_test_idx' + - result.state == 'present' + - result.schema == 'foo' + - result.query == '' + when: tablespace.rc == 0 + +- name: postgresql_idx - check the index exists after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + login_user: '{{ pg_user }}' + query: SELECT 1 FROM pg_indexes WHERE indexname = 'foo_test_idx' AND schemaname = 'foo' + register: result + when: tablespace.rc == 0 + +- assert: + that: + - result.rowcount == 1 + when: tablespace.rc == 0 + +- name: postgresql_idx - drop index from specific schema cascade + become_user: '{{ pg_user }}' + become: true + postgresql_idx: + db: postgres + login_user: '{{ pg_user }}' + schema: foo + name: foo_test_idx + cascade: true + state: absent + concurrent: false + register: result + ignore_errors: true + when: tablespace.rc == 0 + +- assert: + that: + - result is changed + - result.name == 'foo_test_idx' + - result.state == 'absent' + - result.schema == 'foo' + - result.query == 'DROP INDEX "foo"."foo_test_idx" CASCADE' + when: tablespace.rc == 0 + +- name: postgresql_idx - check the index doesn't exist after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: postgres + login_user: '{{ pg_user }}' + query: SELECT 1 FROM pg_indexes WHERE indexname = 'foo_test_idx' and schemaname = 'foo' + register: result + when: tablespace.rc == 0 + +- assert: + that: + - result.rowcount == 0 + when: tablespace.rc == 0 + +- name: postgresql_idx - try to drop not existing index + become_user: '{{ pg_user }}' + become: true + postgresql_idx: + db: postgres + login_user: '{{ pg_user }}' + schema: foo + name: foo_test_idx + state: absent + register: result + ignore_errors: true + +- assert: + that: + - result is not changed + - result.query == '' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/aliases new file mode 100644 index 00000000..786e0531 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/aliases @@ -0,0 +1,4 @@ +destructive +shippable/posix/group1 +skip/freebsd +skip/rhel diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/defaults/main.yml new file mode 100644 index 00000000..000532ae --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/defaults/main.yml @@ -0,0 +1,15 @@ +--- +pg_user: postgres +db_default: postgres +master_port: 5433 +replica_port: 5434 + +test_table1: acme1 +test_pub: first_publication +test_pub2: second_publication +replication_role: logical_replication +replication_pass: alsdjfKJKDf1# +test_db: acme_db +test_subscription: test +test_subscription2: test2 +conn_timeout: 100 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/meta/main.yml new file mode 100644 index 00000000..d72e4d23 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_replication diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/main.yml new file mode 100644 index 00000000..04c7788a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/main.yml @@ -0,0 +1,12 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# For testing getting publication and subscription info +- import_tasks: setup_publication.yml + when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18' + +# Initial CI tests of postgresql_info module +- import_tasks: postgresql_info_initial.yml + when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml new file mode 100644 index 00000000..0a117b75 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml @@ -0,0 +1,177 @@ +# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- vars: + task_parameters: &task_parameters + become_user: '{{ pg_user }}' + become: yes + register: result + pg_parameters: &pg_parameters + login_user: '{{ pg_user }}' + login_db: '{{ db_default }}' + + block: + + - name: Create test subscription + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + login_db: '{{ test_db }}' + state: present + publications: '{{ test_pub }}' + connparams: + host: 127.0.0.1 + port: '{{ master_port }}' + user: '{{ replication_role }}' + password: '{{ replication_pass }}' + dbname: '{{ test_db }}' + + - name: Create test subscription + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription2 }}' + login_db: '{{ test_db }}' + state: present + publications: '{{ test_pub2 }}' + connparams: + host: 127.0.0.1 + port: '{{ master_port }}' + user: '{{ replication_role }}' + password: '{{ replication_pass }}' + dbname: '{{ test_db }}' + + - name: postgresql_info - create role to check session_role + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + login_port: '{{ replica_port }}' + login_user: "{{ pg_user }}" + name: session_superuser + role_attr_flags: SUPERUSER + + - name: postgresql_info - test return values and session_role param + <<: *task_parameters + postgresql_info: + <<: *pg_parameters + login_port: '{{ replica_port }}' + session_role: session_superuser + + - assert: + that: + - result.version != {} + - result.in_recovery == false + - result.databases.{{ db_default }}.collate + - result.databases.{{ db_default }}.languages + - result.databases.{{ db_default }}.namespaces + - result.databases.{{ db_default }}.extensions + - result.databases.{{ test_db }}.subscriptions.{{ test_subscription }} + - result.databases.{{ test_db }}.subscriptions.{{ test_subscription2 }} + - result.settings + - result.tablespaces + - result.roles + + - name: postgresql_info - check filter param passed by list + <<: *task_parameters + postgresql_info: + <<: *pg_parameters + login_port: '{{ replica_port }}' + filter: + - ver* + - rol* + - in_recov* + + - assert: + that: + - result.version != {} + - result.roles + - result.in_recovery == false + - result.databases == {} + - result.repl_slots == {} + - result.replications == {} + - result.settings == {} + - result.tablespaces == {} + + - name: postgresql_info - check filter param passed by string + <<: *task_parameters + postgresql_info: + <<: *pg_parameters + filter: ver*,role* + + - assert: + that: + - result.version != {} + - result.roles + - result.databases == {} + - result.repl_slots == {} + - result.replications == {} + - result.settings == {} + - result.tablespaces == {} + + - name: postgresql_info - check filter param passed by string + <<: *task_parameters + postgresql_info: + <<: *pg_parameters + filter: ver* + + - assert: + that: + - result.version + - result.roles == {} + + - name: postgresql_info - check excluding filter param passed by list + <<: *task_parameters + postgresql_info: + <<: *pg_parameters + filter: + - "!ver*" + - "!rol*" + - "!in_rec*" + + - assert: + that: + - result.version == {} + - result.in_recovery == None + - result.roles == {} + - result.databases + + - name: postgresql_info - test return publication info + <<: *task_parameters + postgresql_info: + <<: *pg_parameters + login_db: '{{ test_db }}' + login_port: '{{ master_port }}' + trust_input: yes + + - assert: + that: + - result.version != {} + - result.in_recovery == false + - result.databases.{{ db_default }}.collate + - result.databases.{{ db_default }}.languages + - result.databases.{{ db_default }}.namespaces + - result.databases.{{ db_default }}.extensions + - result.databases.{{ test_db }}.publications.{{ test_pub }}.ownername == '{{ pg_user }}' + - result.databases.{{ test_db }}.publications.{{ test_pub2 }}.puballtables == true + - result.settings + - result.tablespaces + - result.roles + + - name: postgresql_info - test trust_input parameter + <<: *task_parameters + postgresql_info: + <<: *pg_parameters + login_db: '{{ test_db }}' + login_port: '{{ master_port }}' + trust_input: no + session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' + register: result + ignore_errors: yes + + - assert: + that: + - result is failed + - result.msg is search('is potentially dangerous') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/setup_publication.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/setup_publication.yml new file mode 100644 index 00000000..0d7df0d7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_info/tasks/setup_publication.yml @@ -0,0 +1,61 @@ +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Preparation for further tests of postgresql_subscription module. + +- vars: + task_parameters: &task_parameters + become_user: '{{ pg_user }}' + become: yes + register: result + pg_parameters: &pg_parameters + login_user: '{{ pg_user }}' + login_db: '{{ test_db }}' + + block: + - name: Create test db + <<: *task_parameters + postgresql_db: + login_user: '{{ pg_user }}' + login_port: '{{ master_port }}' + maintenance_db: '{{ db_default }}' + name: '{{ test_db }}' + + - name: Create test role + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + login_port: '{{ master_port }}' + name: '{{ replication_role }}' + password: '{{ replication_pass }}' + role_attr_flags: LOGIN,REPLICATION + + - name: Create test table + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + login_port: '{{ master_port }}' + name: '{{ test_table1 }}' + columns: + - id int + + - name: Master - dump schema + <<: *task_parameters + shell: pg_dumpall -p '{{ master_port }}' -s > /tmp/schema.sql + + - name: Replicat restore schema + <<: *task_parameters + shell: psql -p '{{ replica_port }}' -f /tmp/schema.sql + + - name: Create publication + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + login_port: '{{ master_port }}' + name: '{{ test_pub }}' + + - name: Create publication + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + login_port: '{{ master_port }}' + name: '{{ test_pub2 }}' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/main.yml new file mode 100644 index 00000000..79950143 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/main.yml @@ -0,0 +1,25 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Include distribution specific variables + include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + files: + - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml" + - default.yml + paths: + - vars + +# Only run on CentOS 7 because there is a stack trace on CentOS 8 because the module +# is looking for the incorrect version of plpython. +# https://gist.github.com/samdoran/8fc1b4ae834d3e66d1895d087419b8d8 +- name: Initial CI tests of postgresql_lang module + when: + - ansible_facts.distribution == 'CentOS' + - ansible_facts.distribution_major_version is version ('7', '==') + block: + - include_tasks: postgresql_lang_initial.yml + - include_tasks: postgresql_lang_add_owner_param.yml diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml new file mode 100644 index 00000000..5d21db56 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml @@ -0,0 +1,199 @@ +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- vars: + test_user1: alice + test_user2: bob + test_lang: plperl + non_existent_role: fake_role + task_parameters: &task_parameters + become_user: '{{ pg_user }}' + become: yes + register: result + pg_parameters: &pg_parameters + login_user: '{{ pg_user }}' + login_db: postgres + + block: + - name: Create roles for tests + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ item }}' + loop: + - '{{ test_user1 }}' + - '{{ test_user2 }}' + + - name: Create lang with owner in check_mode + <<: *task_parameters + postgresql_lang: + <<: *pg_parameters + name: '{{ test_lang }}' + owner: '{{ test_user1 }}' + trust_input: no + check_mode: yes + + - assert: + that: + - result is changed + - result.queries == [] + + - name: Check that nothing was actually changed + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT r.rolname FROM pg_language l + JOIN pg_roles r ON l.lanowner = r.oid + WHERE l.lanname = '{{ test_lang }}' + AND r.rolname = '{{ test_user1 }}' + + - assert: + that: + - result.rowcount == 0 + + - name: Create lang with owner + <<: *task_parameters + postgresql_lang: + <<: *pg_parameters + name: '{{ test_lang }}' + owner: '{{ test_user1 }}' + trust_input: no + + - assert: + that: + - result is changed + - result.queries == ['CREATE LANGUAGE "{{ test_lang }}"', 'ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user1 }}"'] + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT r.rolname FROM pg_language l + JOIN pg_roles r ON l.lanowner = r.oid + WHERE l.lanname = '{{ test_lang }}' + AND r.rolname = '{{ test_user1 }}' + + - assert: + that: + - result.rowcount == 1 + + - name: Change lang owner in check_mode + <<: *task_parameters + postgresql_lang: + <<: *pg_parameters + name: '{{ test_lang }}' + owner: '{{ test_user2 }}' + trust_input: yes + check_mode: yes + + - assert: + that: + - result is changed + - result.queries == ['ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user2 }}"'] + + - name: Check that nothing was actually changed + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT r.rolname FROM pg_language l + JOIN pg_roles r ON l.lanowner = r.oid + WHERE l.lanname = '{{ test_lang }}' + AND r.rolname = '{{ test_user2 }}' + + - assert: + that: + - result.rowcount == 0 + + - name: Change lang owner + <<: *task_parameters + postgresql_lang: + <<: *pg_parameters + name: '{{ test_lang }}' + owner: '{{ test_user2 }}' + + - assert: + that: + - result is changed + # TODO: the first elem of the returned list below + # looks like a bug, not related with the option owner, needs to be checked + - result.queries == ["UPDATE pg_language SET lanpltrusted = false WHERE lanname = '{{ test_lang }}'", 'ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user2 }}"'] + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT r.rolname FROM pg_language l + JOIN pg_roles r ON l.lanowner = r.oid + WHERE l.lanname = '{{ test_lang }}' + AND r.rolname = '{{ test_user2 }}' + + - assert: + that: + - result.rowcount == 1 + + - name: Try to change lang owner again to the same role + <<: *task_parameters + postgresql_lang: + <<: *pg_parameters + name: '{{ test_lang }}' + owner: '{{ test_user2 }}' + + - assert: + that: + - result is not changed + - result.queries == [] + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT r.rolname FROM pg_language l + JOIN pg_roles r ON l.lanowner = r.oid + WHERE l.lanname = '{{ test_lang }}' + AND r.rolname = '{{ test_user2 }}' + + - assert: + that: + - result.rowcount == 1 + + - name: Drop test lang with owner, must ignore + <<: *task_parameters + postgresql_lang: + <<: *pg_parameters + name: '{{ test_lang }}' + state: absent + owner: '{{ non_existent_role }}' + + - assert: + that: + - result is changed + - result.queries == ["DROP LANGUAGE \"{{ test_lang }}\""] + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT r.rolname FROM pg_language l + JOIN pg_roles r ON l.lanowner = r.oid + WHERE l.lanname = '{{ test_lang }}' + + - assert: + that: + - result.rowcount == 0 + + # Clean up + - name: Drop test roles + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ item }}' + state: absent + loop: + - '{{ test_user1 }}' + - '{{ test_user2 }}' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml new file mode 100644 index 00000000..66023de8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml @@ -0,0 +1,231 @@ +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Preparation for tests: +- name: Install PostgreSQL support packages + become: yes + action: "{{ ansible_facts.pkg_mgr }}" + args: + name: "{{ postgresql_lang_packages }}" + state: present + +############### +# Do main tests +# + +# Create language in check_mode: +- name: postgresql_lang - create plperl in check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_lang: + db: postgres + login_user: "{{ pg_user }}" + name: plperl + register: result + ignore_errors: yes + check_mode: yes + +- assert: + that: + - result is changed + - result.queries == [] + +- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0 + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'" + register: result + +- assert: + that: + - result.rowcount == 0 + +# Create language: +- name: postgresql_lang - create plperl + become_user: "{{ pg_user }}" + become: yes + postgresql_lang: + db: postgres + login_user: "{{ pg_user }}" + name: plperl + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.queries == ['CREATE LANGUAGE "plperl"'] + +- name: postgresql_lang - check that lang exists after previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'" + register: result + +- assert: + that: + - result.rowcount == 1 + +# Drop language in check_mode: +- name: postgresql_lang - drop plperl in check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_lang: + db: postgres + login_user: "{{ pg_user }}" + name: plperl + state: absent + register: result + ignore_errors: yes + check_mode: yes + +- assert: + that: + - result is changed + - result.queries == [] + +- name: postgresql_lang - check that lang exists after previous step, rowcount must be 1 + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'" + register: result + +- assert: + that: + - result.rowcount == 1 + +# Drop language: +- name: postgresql_lang - drop plperl + become_user: "{{ pg_user }}" + become: yes + postgresql_lang: + db: postgres + login_user: "{{ pg_user }}" + name: plperl + state: absent + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.queries == ['DROP LANGUAGE "plperl"'] + +- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0 + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'" + register: result + +- assert: + that: + - result.rowcount == 0 + +# Check fail_on_drop yes +- name: postgresql_lang - drop c language to check fail_on_drop yes + become_user: "{{ pg_user }}" + become: yes + postgresql_lang: + db: postgres + login_user: "{{ pg_user }}" + name: c + state: absent + fail_on_drop: yes + register: result + ignore_errors: yes + +- assert: + that: + - result.failed == true + +# Check fail_on_drop no +- name: postgresql_lang - drop c language to check fail_on_drop no + become_user: "{{ pg_user }}" + become: yes + postgresql_lang: + db: postgres + login_user: "{{ pg_user }}" + name: c + state: absent + fail_on_drop: no + register: result + ignore_errors: yes + +- assert: + that: + - result.failed == false + +# Create trusted language: +- name: postgresql_lang - create plpythonu + become_user: "{{ pg_user }}" + become: yes + postgresql_lang: + db: postgres + login_user: "{{ pg_user }}" + name: plpythonu + trust: yes + force_trust: yes + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.queries == ['CREATE TRUSTED LANGUAGE "plpythonu"', "UPDATE pg_language SET lanpltrusted = true WHERE lanname = 'plpythonu'"] + +- name: postgresql_lang - check that lang exists and it's trusted after previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_language WHERE lanname = 'plpythonu' AND lanpltrusted = 't'" + register: result + +- assert: + that: + - result.rowcount == 1 + +# Drop language cascade, tests of aliases: +- name: postgresql_lang - drop plpythonu cascade + become_user: "{{ pg_user }}" + become: yes + postgresql_lang: + login_db: postgres + login_user: "{{ pg_user }}" + login_port: 5432 + lang: plpythonu + state: absent + cascade: yes + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.queries == ['DROP LANGUAGE "plpythonu" CASCADE'] + +- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0 + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_language WHERE lanname = 'plpythonu'" + register: result + +- assert: + that: + - result.rowcount == 0 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-7.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-7.yml new file mode 100644 index 00000000..8d4bcc7e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-7.yml @@ -0,0 +1,3 @@ +postgresql_lang_packages: + - postgresql-plperl + - postgresql-plpython diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-8.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-8.yml new file mode 100644 index 00000000..5da004c8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/CentOS-8.yml @@ -0,0 +1,3 @@ +postgresql_lang_packages: + - postgresql-plperl + - postgresql-plpython3 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/default.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/default.yml new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_lang/vars/default.yml diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/defaults/main.yml new file mode 100644 index 00000000..7b1d49e4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/defaults/main.yml @@ -0,0 +1,6 @@ +test_group1: group1 +test_group2: group2 +test_group3: group.with.dots +test_user1: user1 +test_user2: user.with.dots +dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/tasks/main.yml new file mode 100644 index 00000000..ea058d08 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/tasks/main.yml @@ -0,0 +1,7 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial CI tests of postgresql_membership module +- import_tasks: postgresql_membership_initial.yml diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/tasks/postgresql_membership_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/tasks/postgresql_membership_initial.yml new file mode 100644 index 00000000..d8d7bb6b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_membership/tasks/postgresql_membership_initial.yml @@ -0,0 +1,390 @@ +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#################### +# Prepare for tests: + +# Create test roles: +- name: postgresql_membership - create test roles + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + login_user: "{{ pg_user }}" + db: postgres + name: "{{ item }}" + ignore_errors: yes + with_items: + - "{{ test_group1 }}" + - "{{ test_group2 }}" + - "{{ test_group3 }}" + - "{{ test_user1 }}" + - "{{ test_user2 }}" + +################ +# Do main tests: + +### Test check_mode +# Grant test_group1 to test_user1 in check_mode: +- name: postgresql_membership - grant test_group1 to test_user1 in check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: "{{ test_group1 }}" + user: "{{ test_user1 }}" + state: present + register: result + ignore_errors: yes + check_mode: yes + +- assert: + that: + - result is changed + - result.groups == ["{{ test_group1 }}"] + - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\""] + - result.granted.{{ test_group1 }} == ["{{ test_user1 }}"] + - result.state == "present" + - result.target_roles == ["{{ test_user1 }}"] + +# Try to revoke test_group1 from test_user1 to check that +# nothing actually changed in check_mode at the previous step: +- name: postgresql_membership - try to revoke test_group1 from test_user1 for checking check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: "{{ test_group1 }}" + user: "{{ test_user1 }}" + state: absent + register: result + ignore_errors: yes + check_mode: yes + +- assert: + that: + - result is not changed + - result.groups == ["{{ test_group1 }}"] + - result.queries == [] + - result.revoked.{{ test_group1 }} == [] + - result.state == "absent" + - result.target_roles == ["{{ test_user1 }}"] +### End of test check_mode + +# Grant test_group1 to test_user1: +- name: postgresql_membership - grant test_group1 to test_user1 + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: "{{ test_group1 }}" + user: "{{ test_user1 }}" + state: present + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.groups == ["{{ test_group1 }}"] + - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\""] + - result.granted.{{ test_group1 }} == ["{{ test_user1 }}"] + - result.state == "present" + - result.target_roles == ["{{ test_user1 }}"] + +# Grant test_group1 to test_user1 again to check that nothing changes: +- name: postgresql_membership - grant test_group1 to test_user1 again + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: "{{ test_group1 }}" + user: "{{ test_user1 }}" + state: present + register: result + ignore_errors: yes + +- assert: + that: + - result is not changed + - result.groups == ["{{ test_group1 }}"] + - result.queries == [] + - result.granted.{{ test_group1 }} == [] + - result.state == "present" + - result.target_roles == ["{{ test_user1 }}"] + +# Revoke test_group1 from test_user1: +- name: postgresql_membership - revoke test_group1 from test_user1 + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: "{{ test_group1 }}" + user: "{{ test_user1 }}" + state: absent + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.groups == ["{{ test_group1 }}"] + - result.queries == ["REVOKE \"{{ test_group1 }}\" FROM \"{{ test_user1 }}\""] + - result.revoked.{{ test_group1 }} == ["{{ test_user1 }}"] + - result.state == "absent" + - result.target_roles == ["{{ test_user1 }}"] + +# Revoke test_group1 from test_user1 again to check that nothing changes: +- name: postgresql_membership - revoke test_group1 from test_user1 again + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: "{{ test_group1 }}" + user: "{{ test_user1 }}" + state: absent + register: result + ignore_errors: yes + +- assert: + that: + - result is not changed + - result.groups == ["{{ test_group1 }}"] + - result.queries == [] + - result.revoked.{{ test_group1 }} == [] + - result.state == "absent" + - result.target_roles == ["{{ test_user1 }}"] + +# Grant test_group1 and test_group2 to test_user1 and test_user2: +- name: postgresql_membership - grant two groups to two users + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: + - "{{ test_group1 }}" + - "{{ test_group2 }}" + user: + - "{{ test_user1 }}" + - "{{ test_user2 }}" + state: present + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.groups == ["{{ test_group1 }}", "{{ test_group2 }}"] + - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\"", "GRANT \"{{ test_group1 }}\" TO \"{{ test_user2 }}\"", "GRANT \"{{ test_group2 }}\" TO \"{{ test_user1 }}\"", "GRANT \"{{ test_group2 }}\" TO \"{{ test_user2 }}\""] + - result.granted.{{ test_group1 }} == ["{{ test_user1 }}", "{{ test_user2 }}"] + - result.granted.{{ test_group2 }} == ["{{ test_user1 }}", "{{ test_user2 }}"] + - result.state == "present" + - result.target_roles == ["{{ test_user1 }}", "{{ test_user2 }}"] + +# Grant test_group1 and test_group2 to test_user1 and test_user2 again to check that nothing changes: +- name: postgresql_membership - grant two groups to two users again + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: + - "{{ test_group1 }}" + - "{{ test_group2 }}" + user: + - "{{ test_user1 }}" + - "{{ test_user2 }}" + state: present + register: result + ignore_errors: yes + +- assert: + that: + - result is not changed + - result.groups == ["{{ test_group1 }}", "{{ test_group2 }}"] + - result.queries == [] + - result.granted.{{ test_group1 }} == [] + - result.granted.{{ test_group2 }} == [] + - result.state == "present" + - result.target_roles == ["{{ test_user1 }}", "{{ test_user2 }}"] + +# Revoke only test_group1 from test_user1: +- name: postgresql_membership - revoke one group from one user + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: "{{ test_group1 }}" + user: "{{ test_user1 }}" + state: absent + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.groups == ["{{ test_group1 }}"] + - result.queries == ["REVOKE \"{{ test_group1 }}\" FROM \"{{ test_user1 }}\""] + - result.revoked.{{ test_group1 }} == ["{{ test_user1 }}"] + - result.state == "absent" + - result.target_roles == ["{{ test_user1 }}"] + +# Try to grant test_group1 and test_group2 to test_user1 and test_user2 again +# to check that nothing changes with test_user2: +- name: postgresql_membership - grant two groups to two users again + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: + - "{{ test_group1 }}" + - "{{ test_group2 }}" + user: + - "{{ test_user1 }}" + - "{{ test_user2 }}" + state: present + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.groups == ["{{ test_group1 }}", "{{ test_group2 }}"] + - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\""] + - result.granted.{{ test_group1 }} == ["{{ test_user1 }}"] + - result.granted.{{ test_group2 }} == [] + - result.state == "present" + - result.target_roles == ["{{ test_user1 }}", "{{ test_user2 }}"] + +##################### +# Check fail_on_role: + +# Try to grant non existent group to non existent role with fail_on_role=yes: +- name: postgresql_membership - revoke non existen group from non existen role + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: fake_group + user: fake_user + state: present + fail_on_role: yes + register: result + ignore_errors: yes + +- assert: + that: + - result is not changed + +# Try to grant non existent group to non existent role with fail_on_role=no: +- name: postgresql_membership - revoke non existen group from non existen role + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: fake_group + user: fake_user + state: present + fail_on_role: no + register: result + ignore_errors: yes + +- assert: + that: + - result is not changed + - result.granted == {} + - result.groups == [] + - result.target_roles == [] + - result.state == 'present' + +# Try to revoke non existent group from non existent role with fail_on_role=no: +- name: postgresql_membership - revoke non existen group from non existen role + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: fake_group + user: fake_user + state: absent + fail_on_role: no + register: result + ignore_errors: yes + +- assert: + that: + - result is not changed + - result.revoked == {} + - result.groups == [] + - result.target_roles == [] + - result.state == 'absent' + +# Grant test_group3 with a name containing dots to test_user1. +- name: postgresql_membership - grant test_group3 with dots to test_user1 + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: "{{ test_group3 }}" + user: "{{ test_user1 }}" + state: present + register: result + +- assert: + that: + - result is changed + - result.queries == ["GRANT \"{{ test_group3 }}\" TO \"{{ test_user1 }}\""] + +############################# +# Check trust_input parameter + +- name: postgresql_membership - try to use dangerous input, don't trust + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: + - "{{ test_group3}}" + - "{{ dangerous_name }}" + user: "{{ test_user1 }}" + state: present + trust_input: no + register: result + ignore_errors: yes + +- assert: + that: + - result is failed + - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous' + +- name: postgresql_membership - try to use dangerous input, trust explicitly + become_user: "{{ pg_user }}" + become: yes + postgresql_membership: + login_user: "{{ pg_user }}" + db: postgres + group: + - "{{ test_group3}}" + - "{{ dangerous_name }}" + user: "{{ test_user1 }}" + state: present + trust_input: yes + register: result + ignore_errors: yes + +- assert: + that: + - result is failed + - result.msg == 'Role {{ dangerous_name }} does not exist' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/defaults/main.yml new file mode 100644 index 00000000..e43723c4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/defaults/main.yml @@ -0,0 +1,3 @@ +test_tablespace_path: "/ssd" + +dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/main.yml new file mode 100644 index 00000000..4b2f5751 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/main.yml @@ -0,0 +1,9 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial CI tests of postgresql_owner module +- import_tasks: postgresql_owner_initial.yml + when: + - postgres_version_resp.stdout is version('9.4', '>=') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml new file mode 100644 index 00000000..65623675 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml @@ -0,0 +1,1073 @@ +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +#################### +# Prepare for tests: + +# Create test roles: +- name: postgresql_owner - create test roles + become_user: '{{ pg_user }}' + become: true + postgresql_user: + login_user: '{{ pg_user }}' + db: postgres + name: '{{ item }}' + ignore_errors: true + with_items: + - alice + - bob + +- name: postgresql_owner - create test database + become_user: '{{ pg_user }}' + become: true + postgresql_db: + login_user: '{{ pg_user }}' + db: acme + +- name: postgresql_owner - create test table + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: CREATE TABLE my_table (id int) + +- name: postgresql_owner - set owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: my_table + obj_type: table + +- name: postgresql_owner - create test sequence + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: CREATE SEQUENCE test_seq + +- name: postgresql_owner - create test function + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: > + CREATE FUNCTION increment(integer) RETURNS integer AS 'select $1 + 1;' + LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; + +- name: postgresql_owner - create test schema + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: CREATE SCHEMA test_schema + +- name: postgresql_owner - create test view + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: CREATE VIEW test_view AS SELECT * FROM my_table + +- name: postgresql_owner - create test materialized view + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: acme + query: CREATE MATERIALIZED VIEW test_mat_view AS SELECT * FROM my_table + when: postgres_version_resp.stdout is version('9.4', '>=') + +- name: postgresql_owner - drop dir for test tablespace + become: true + file: + path: '{{ test_tablespace_path }}' + state: absent + ignore_errors: true + +- name: postgresql_owner - disable selinux + become: true + shell: setenforce 0 + ignore_errors: true + +- name: postgresql_owner - create dir for test tablespace + become: true + file: + path: '{{ test_tablespace_path }}' + state: directory + owner: '{{ pg_user }}' + group: '{{ pg_user }}' + mode: '0700' + ignore_errors: true + +- name: > + postgresql_owner - create a new tablespace called acme and + set bob as an its owner + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: acme + login_user: '{{ pg_user }}' + name: acme + owner: alice + location: '{{ test_tablespace_path }}' + +################ +# Do main tests: + +# +# check reassign_owned_by param +# +# try to reassign ownership to non existent user: +- name: postgresql_owner - reassign_owned_by to non existent user + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: non_existent + reassign_owned_by: bob + register: result + ignore_errors: true + +- assert: + that: + - result.failed == true + +- name: postgresql_owner - reassign_owned_by, check fail_on_role + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: alice + reassign_owned_by: non_existent + fail_on_role: false + register: result + +- assert: + that: + - result.failed == false + +- name: postgresql_owner - reassign_owned_by in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: alice + reassign_owned_by: bob + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['REASSIGN OWNED BY "bob" TO "alice"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_tables WHERE tablename = 'my_table' + AND tableowner = 'alice' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - reassign_owned_by + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: alice + reassign_owned_by: bob + trust_input: yes + register: result + +- assert: + that: + - result is changed + - result.queries == ['REASSIGN OWNED BY "bob" TO "alice"'] + +- name: postgresql_owner - check that ownership has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: SELECT 1 FROM pg_tables WHERE tablename = 'my_table' AND tableowner = 'alice' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +########################### +# Test trust_inpt parameter + +- name: postgresql_owner - reassign_owned_by, trust_input no + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: '{{ dangerous_name }}' + reassign_owned_by: alice + trust_input: no + register: result + ignore_errors: yes + +- assert: + that: + - result is failed + - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous' + +- name: postgresql_owner - reassign_owned_by, trust_input yes by default + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: '{{ dangerous_name }}' + reassign_owned_by: alice + register: result + ignore_errors: yes + +- assert: + that: + - result is not changed + - result.msg is search('does not exist') +# End of testing trust_input + +# +# Check obj_type for each type +# + +# ############################# +# check_mode obj_type: database +- name: postgresql_owner - set db owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: acme + obj_type: database + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER DATABASE "acme" OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_database AS d JOIN pg_roles AS r + ON d.datdba = r.oid WHERE d.datname = 'acme' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set db owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: acme + obj_type: database + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER DATABASE "acme" OWNER TO "bob"'] + +- name: postgresql_owner - check that db owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_database AS d JOIN pg_roles AS r + ON d.datdba = r.oid WHERE d.datname = 'acme' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set db owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: acme + obj_type: database + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that db owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_database AS d JOIN pg_roles AS r + ON d.datdba = r.oid WHERE d.datname = 'acme' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set table owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: my_table + obj_type: table + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER TABLE "my_table" OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_tables WHERE tablename = 'my_table' + AND tableowner = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set db owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: my_table + obj_type: table + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER TABLE "my_table" OWNER TO "bob"'] + +- name: postgresql_owner - check that table owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_tables WHERE tablename = 'my_table' + AND tableowner = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set db owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: my_table + obj_type: table + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that table owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_tables WHERE tablename = 'my_table' + AND tableowner = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set sequence owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_seq + obj_type: sequence + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER SEQUENCE "test_seq" OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_class AS c JOIN pg_roles AS r + ON c.relowner = r.oid WHERE c.relkind = 'S' + AND c.relname = 'test_seq' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set db owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_seq + obj_type: sequence + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER SEQUENCE "test_seq" OWNER TO "bob"'] + +- name: postgresql_owner - check that table owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_class AS c JOIN pg_roles AS r + ON c.relowner = r.oid WHERE c.relkind = 'S' + AND c.relname = 'test_seq' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set db owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_seq + obj_type: sequence + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that sequence owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_class AS c JOIN pg_roles AS r + ON c.relowner = r.oid WHERE c.relkind = 'S' + AND c.relname = 'test_seq' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set function owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: increment + obj_type: function + check_mode: true + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - result is changed + - result.queries == ['ALTER FUNCTION increment OWNER TO "bob"'] + when: postgres_version_resp.stdout is version('10', '>=') + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r + ON f.proowner = r.oid WHERE f.proname = 'increment' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('10', '>=') + +- name: postgresql_owner - set func owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: increment + obj_type: function + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - result is changed + - result.queries == ['ALTER FUNCTION increment OWNER TO "bob"'] + when: postgres_version_resp.stdout is version('10', '>=') + +- name: postgresql_owner - check that func owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r + ON f.proowner = r.oid WHERE f.proname = 'increment' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('10', '>=') + +- name: postgresql_owner - set func owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: increment + obj_type: function + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - result is not changed + - result.queries == [] + when: postgres_version_resp.stdout is version('10', '>=') + +- name: postgresql_owner - check that function owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r + ON f.proowner = r.oid WHERE f.proname = 'increment' AND r.rolname = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('10', '>=') + +- name: postgresql_owner - set schema owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_schema + obj_type: schema + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER SCHEMA "test_schema" OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM information_schema.schemata + WHERE schema_name = 'test_schema' AND schema_owner = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set schema owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_schema + obj_type: schema + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER SCHEMA "test_schema" OWNER TO "bob"'] + +- name: postgresql_owner - check that schema owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM information_schema.schemata + WHERE schema_name = 'test_schema' AND schema_owner = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set schema owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_seq + obj_type: sequence + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that schema owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM information_schema.schemata + WHERE schema_name = 'test_schema' AND schema_owner = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set view owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_view + obj_type: view + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER VIEW "test_view" OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: SELECT 1 FROM pg_views WHERE viewname = 'test_view' AND viewowner = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set view owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_view + obj_type: view + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER VIEW "test_view" OWNER TO "bob"'] + +- name: postgresql_owner - check that view owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: SELECT 1 FROM pg_views WHERE viewname = 'test_view' AND viewowner = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set view owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_view + obj_type: view + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that view owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: SELECT 1 FROM pg_views WHERE viewname = 'test_view' AND viewowner = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set matview owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_mat_view + obj_type: matview + check_mode: true + register: result + when: postgres_version_resp.stdout is version('9.4', '>=') + +- assert: + that: + - result is changed + - result.queries == ['ALTER MATERIALIZED VIEW "test_mat_view" OWNER TO "bob"'] + when: postgres_version_resp.stdout is version('9.4', '>=') + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: SELECT 1 FROM pg_matviews WHERE matviewname = 'test_view' AND matviewowner = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('9.4', '>=') + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('9.4', '>=') + +- name: postgresql_owner - set matview owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_mat_view + obj_type: matview + register: result + when: postgres_version_resp.stdout is version('9.4', '>=') + +- assert: + that: + - result is changed + - result.queries == ['ALTER MATERIALIZED VIEW "test_mat_view" OWNER TO "bob"'] + when: postgres_version_resp.stdout is version('9.4', '>=') + +- name: postgresql_owner - check that matview owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: SELECT 1 FROM pg_matviews WHERE matviewname = 'test_mat_view' AND matviewowner = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('9.4', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('9.4', '>=') + +- name: postgresql_owner - set matview owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: test_mat_view + obj_type: matview + register: result + when: postgres_version_resp.stdout is version('9.4', '>=') + +- assert: + that: + - result is not changed + - result.queries == [] + when: postgres_version_resp.stdout is version('9.4', '>=') + +- name: postgresql_owner - check that matview owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: SELECT 1 FROM pg_matviews WHERE matviewname = 'test_mat_view' AND matviewowner = 'bob' + ignore_errors: true + register: result + when: postgres_version_resp.stdout is version('9.4', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('9.4', '>=') + +- name: postgresql_owner - set tablespace owner in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: acme + obj_type: tablespace + check_mode: true + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER TABLESPACE "acme" OWNER TO "bob"'] + +- name: postgresql_owner - check that nothing changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_tablespace AS t JOIN pg_roles AS r + ON t.spcowner = r.oid WHERE t.spcname = 'acme' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_owner - set tablespace owner + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: acme + obj_type: tablespace + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER TABLESPACE "acme" OWNER TO "bob"'] + +- name: postgresql_owner - check that tablespace owner has been changed after the previous step + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_tablespace AS t JOIN pg_roles AS r + ON t.spcowner = r.oid WHERE t.spcname = 'acme' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_owner - set tablespace owner again + become_user: '{{ pg_user }}' + become: true + postgresql_owner: + login_user: '{{ pg_user }}' + db: acme + new_owner: bob + obj_name: acme + obj_type: tablespace + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +- name: postgresql_owner - check that tablespace owner is bob + become_user: '{{ pg_user }}' + become: true + postgresql_query: + db: acme + login_user: '{{ pg_user }}' + query: > + SELECT 1 FROM pg_tablespace AS t JOIN pg_roles AS r + ON t.spcowner = r.oid WHERE t.spcname = 'acme' AND r.rolname = 'bob' + ignore_errors: true + register: result + +- assert: + that: + - result.rowcount == 1 + +# +# Crean up +# +- name: postgresql_owner - drop test database + become_user: '{{ pg_user }}' + become: true + postgresql_db: + login_user: '{{ pg_user }}' + db: acme + state: absent + +- name: postgresql_owner - drop test tablespace + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + state: absent diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/defaults/main.yml new file mode 100644 index 00000000..4e1fe7dd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/defaults/main.yml @@ -0,0 +1,23 @@ +--- +pg_hba_test_ips: +- contype: local + users: 'all,postgres,test' +- source: '0000:ffff::' + netmask: 'ffff:fff0::' +- source: '192.168.0.0/24' + netmask: '' + databases: 'all,replication' +- source: '192.168.1.0/24' + netmask: '' + databases: 'all' + method: reject +- source: '127.0.0.1/32' + netmask: '' +- source: '::1/128' + netmask: '' +- source: '0000:ff00::' + netmask: 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ff00' + method: scram-sha-256 +- source: '172.16.0.0' + netmask: '255.255.0.0' + method: trust diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/main.yml new file mode 100644 index 00000000..d6d298d5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/main.yml @@ -0,0 +1,7 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial CI tests of postgresql_pg_hba module +- import_tasks: postgresql_pg_hba_initial.yml diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml new file mode 100644 index 00000000..478d8936 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml @@ -0,0 +1,183 @@ +- name: Make sure file does not exist + file: + dest: /tmp/pg_hba.conf + state: absent + +- name: check_mode run + postgresql_pg_hba: + dest: /tmp/pg_hba.conf + contype: host + source: '0000:ffff::' + netmask: 'ffff:fff0::' + method: md5 + backup: 'True' + order: sud + state: "{{item}}" + check_mode: yes + with_items: + - present + - absent + +- name: check_mode check + stat: + path: /tmp/pg_hba.conf + register: pg_hba_checkmode_check + +- name: Remove several ip addresses for idempotency check + postgresql_pg_hba: + contype: "{{item.contype|default('host')}}" + databases: "{{item.databases|default('all')}}" + dest: /tmp/pg_hba.conf + method: "{{item.method|default('md5')}}" + netmask: "{{item.netmask|default('')}}" + order: sud + source: "{{item.source|default('')}}" + state: absent + users: "{{item.users|default('all')}}" + with_items: "{{pg_hba_test_ips}}" + register: pg_hba_idempotency_check1 + +- name: idempotency not creating file check + stat: + path: /tmp/pg_hba.conf + register: pg_hba_idempotency_file_check + +- name: Add several ip addresses + postgresql_pg_hba: + backup: 'True' + contype: "{{item.contype|default('host')}}" + create: 'True' + databases: "{{item.databases|default('all')}}" + dest: /tmp/pg_hba.conf + method: "{{item.method|default('md5')}}" + netmask: "{{item.netmask|default('')}}" + order: sud + source: "{{item.source|default('')}}" + state: present + users: "{{item.users|default('all')}}" + register: pg_hba_change + with_items: "{{pg_hba_test_ips}}" + +- name: Able to add options on rule without + postgresql_pg_hba: + dest: "/tmp/pg_hba.conf" + users: "+some" + order: "sud" + state: "present" + contype: "local" + method: "cert" + options: "{{ item }}" + address: "" + with_items: + - "" + - "clientcert=1" + +- name: Retain options even if they contain spaces + postgresql_pg_hba: + dest: "/tmp/pg_hba.conf" + users: "+some" + order: "sud" + state: "present" + contype: "{{ item.contype }}" + method: "{{ item.method }}" + options: "{{ item.options }}" + address: "{{ item.address }}" + with_items: + - { address: "", contype: "local", method: "ldap", options: "ldapserver=example.com ldapport=389 ldapprefix=\"cn=\"" } + - { address: "red", contype: "hostssl", method: "cert", options: "clientcert=1 map=mymap" } + - { address: "blue", contype: "hostssl", method: "cert", options: "clientcert=1 map=mymap" } + register: pg_hba_options + +- name: read pg_hba rules + postgresql_pg_hba: + dest: /tmp/pg_hba.conf + register: pg_hba + +- name: Add several ip addresses again for idempotency check + postgresql_pg_hba: + contype: "{{item.contype|default('host')}}" + databases: "{{item.databases|default('all')}}" + dest: /tmp/pg_hba.conf + method: "{{item.method|default('md5')}}" + netmask: "{{item.netmask|default('')}}" + order: sud + source: "{{item.source|default('')}}" + state: present + users: "{{item.users|default('all')}}" + with_items: "{{pg_hba_test_ips}}" + register: pg_hba_idempotency_check2 + +- name: pre-backup stat + stat: + path: /tmp/pg_hba.conf + register: prebackupstat + +- name: Add new ip address for backup check and netmask_sameas_prefix check + postgresql_pg_hba: + backup: 'True' + contype: host + dest: /tmp/pg_hba.conf + method: md5 + netmask: 255.255.255.0 + order: sud + source: '172.21.0.0' + state: present + register: pg_hba_backup_check2 + +- name: Add new ip address for netmask_sameas_prefix check + postgresql_pg_hba: + backup: 'True' + contype: host + dest: /tmp/pg_hba.conf + method: md5 + order: sud + source: '172.21.0.0/24' + state: present + register: netmask_sameas_prefix_check + +- name: post-backup stat + stat: + path: "{{pg_hba_backup_check2.backup_file}}" + register: postbackupstat + +- name: Dont allow netmask for src in [all, samehost, samenet] + postgresql_pg_hba: + contype: host + dest: /tmp/pg_hba.conf + method: md5 + netmask: '255.255.255.255' + order: sud + source: all + state: present + register: pg_hba_fail_src_all_with_netmask + ignore_errors: yes + +- debug: + var: pg_hba.pg_hba +- assert: + that: + - 'pg_hba.pg_hba == [ + { "db": "all", "method": "ldap", "type": "local", "usr": "+some", "options": "ldapserver=example.com ldapport=389 ldapprefix=\"cn=\"" }, + { "db": "all", "method": "md5", "type": "local", "usr": "postgres" }, + { "db": "all", "method": "md5", "type": "local", "usr": "test" }, + { "db": "all", "method": "md5", "type": "local", "usr": "all" }, + { "db": "all", "method": "cert", "src": "blue", "type": "hostssl", "usr": "+some", "options": "clientcert=1 map=mymap" }, + { "db": "all", "method": "cert", "src": "red", "type": "hostssl", "usr": "+some", "options": "clientcert=1 map=mymap" }, + { "db": "all", "method": "md5", "src": "127.0.0.1/32", "type": "host", "usr": "all" }, + { "db": "all", "method": "md5", "src": "::1/128", "type": "host", "usr": "all" }, + { "db": "all", "method": "scram-sha-256", "src": "0:ff00::/120", "type": "host", "usr": "all" }, + { "db": "replication", "method": "md5", "src": "192.168.0.0/24", "type": "host", "usr": "all" }, + { "db": "all", "method": "md5", "src": "192.168.0.0/24", "type": "host", "usr": "all" }, + { "db": "all", "method": "reject", "src": "192.168.1.0/24", "type": "host", "usr": "all" }, + { "db": "all", "method": "trust", "src": "172.16.0.0/16", "type": "host", "usr": "all" }, + { "db": "all", "method": "md5", "src": "0:fff0::/28", "type": "host", "usr": "all" } + ]' + - 'pg_hba_change is changed' + - 'pg_hba_checkmode_check.stat.exists == false' + - 'not pg_hba_idempotency_check1 is changed' + - 'not pg_hba_idempotency_check2 is changed' + - 'pg_hba_idempotency_file_check.stat.exists == false' + - 'prebackupstat.stat.checksum == postbackupstat.stat.checksum' + - 'pg_hba_fail_src_all_with_netmask is failed' + - 'not netmask_sameas_prefix_check is changed' + - 'pg_hba_options is changed' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/defaults/main.yml new file mode 100644 index 00000000..73eb55ae --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/defaults/main.yml @@ -0,0 +1,2 @@ +--- +db_default: postgres diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/main.yml new file mode 100644 index 00000000..bcb18d2f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/main.yml @@ -0,0 +1,9 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial CI tests of postgresql_ping module +- import_tasks: postgresql_ping_initial.yml + vars: + db_name_nonexist: fake_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml new file mode 100644 index 00000000..9d35d91c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml @@ -0,0 +1,75 @@ +# Test code for the postgresql_ping module +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: postgresql_ping - test return values + become_user: "{{ pg_user }}" + become: yes + postgresql_ping: + db: "{{ db_default }}" + login_user: "{{ pg_user }}" + register: result + ignore_errors: yes + +- assert: + that: + - result.is_available == true + - result.server_version != {} + - result.server_version.major != false + - result.server_version.minor != false + - result is not changed + +- name: postgresql_ping - check ping of non-existing database doesn't return anything + become_user: "{{ pg_user }}" + become: yes + postgresql_ping: + db: "{{ db_name_nonexist }}" + login_user: "{{ pg_user }}" + register: result + ignore_errors: yes + +- assert: + that: + - result.is_available == false + - result.server_version == {} + - result is not changed + +- name: postgresql_ping - ping DB with SSL + become_user: "{{ pg_user }}" + become: yes + postgresql_ping: + db: "{{ ssl_db }}" + login_user: "{{ ssl_user }}" + login_password: "{{ ssl_pass }}" + login_host: 127.0.0.1 + login_port: 5432 + ssl_mode: require + ca_cert: '{{ ssl_rootcert }}' + trust_input: yes + register: result + when: + - ansible_os_family == 'Debian' + - postgres_version_resp.stdout is version('9.4', '>=') + +- assert: + that: + - result.is_available == true + when: + - ansible_os_family == 'Debian' + - postgres_version_resp.stdout is version('9.4', '>=') + +- name: postgresql_ping - check trust_input + become_user: "{{ pg_user }}" + become: yes + postgresql_ping: + db: "{{ db_default }}" + login_user: "{{ pg_user }}" + trust_input: no + session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' + register: result + ignore_errors: yes + +- assert: + that: + - result is failed + - result.msg is search('is potentially dangerous') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/defaults/main.yml new file mode 100644 index 00000000..e03dd494 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/defaults/main.yml @@ -0,0 +1,12 @@ +db_name: ansible_db +db_user1: ansible_db_user1 +db_user2: ansible_db_user2 +db_user3: ansible_db_user3 +db_user_with_dots1: role.with.dots1 +db_user_with_dots2: role.with.dots2 +db_name_with_hyphens: ansible-db +db_user_with_hyphens: ansible-db-user +db_schema_with_hyphens: ansible-db-schema +db_session_role1: session_role1 +db_session_role2: session_role2 +dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/main.yml new file mode 100644 index 00000000..cf7b6352 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/main.yml @@ -0,0 +1,19 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- include_tasks: postgresql_privs_session_role.yml + when: postgres_version_resp.stdout is version('9.4', '>=') + +# Initial CI tests of postgresql_privs module: +- include_tasks: postgresql_privs_initial.yml + when: postgres_version_resp.stdout is version('9.4', '>=') + +# General tests: +- include_tasks: postgresql_privs_general.yml + when: postgres_version_resp.stdout is version('9.4', '>=') + +# Tests default_privs with target_role: +- include_tasks: test_target_role.yml + when: postgres_version_resp.stdout is version('9.4', '>=') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/pg_authid_not_readable.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/pg_authid_not_readable.yml new file mode 100644 index 00000000..f5d502d0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/pg_authid_not_readable.yml @@ -0,0 +1,50 @@ +- name: "Admin user is allowed to access pg_authid relation: password comparison will succeed, password won't be updated" + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + name: "{{ db_user1 }}" + encrypted: 'yes' + password: "md5{{ (db_password ~ db_user1) | hash('md5')}}" + db: "{{ db_name }}" + priv: 'test_table1:INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER/test_table2:INSERT/CREATE,CONNECT,TEMP' + login_user: "{{ pg_user }}" + register: redo_as_admin + +- name: "Check that task succeeded without any change" + assert: + that: + - 'redo_as_admin is not failed' + - 'redo_as_admin is not changed' + - 'redo_as_admin is successful' + +- name: "Check that normal user isn't allowed to access pg_authid" + shell: 'psql -c "select * from pg_authid;" {{ db_name }} {{ db_user1 }}' + environment: + PGPASSWORD: '{{ db_password }}' + ignore_errors: yes + register: pg_authid + +- assert: + that: + - 'pg_authid is failed' + - pg_authid.stderr is search('permission denied for (relation|table) pg_authid') + +- name: "Normal user isn't allowed to access pg_authid relation: password comparison will fail, password will be updated" + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + name: "{{ db_user1 }}" + encrypted: 'yes' + password: "md5{{ (db_password ~ db_user1) | hash('md5')}}" + db: "{{ db_name }}" + priv: 'test_table1:INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER/test_table2:INSERT/CREATE,CONNECT,TEMP' + login_user: "{{ db_user1 }}" + login_password: "{{ db_password }}" + register: redo_as_normal_user + +- name: "Check that task succeeded and that result is changed" + assert: + that: + - 'redo_as_normal_user is not failed' + - 'redo_as_normal_user is changed' + - 'redo_as_normal_user is successful' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml new file mode 100644 index 00000000..530e0d1e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml @@ -0,0 +1,1533 @@ +# Setup +- name: Create DB + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: present + name: "{{ db_name }}" + login_user: "{{ pg_user }}" + +- name: Create a user to be owner of objects + postgresql_user: + name: "{{ db_user3 }}" + state: present + encrypted: yes + password: password + role_attr_flags: CREATEDB,LOGIN + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + +- name: Create a user to be given permissions and other tests + postgresql_user: + name: "{{ db_user2 }}" + state: present + encrypted: yes + password: password + role_attr_flags: LOGIN + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + +############################# +# Test of solving bug 656 # +############################# +- name: Create DB with hyphen in the name + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: present + name: "{{ db_name_with_hyphens }}" + login_user: "{{ pg_user }}" + register: result + +- assert: + that: + - result is changed + +- name: Create a user with hyphen in the name + postgresql_user: + name: "{{ db_user_with_hyphens }}" + state: present + encrypted: yes + password: password + role_attr_flags: CREATEDB,LOGIN + db: "{{ db_name_with_hyphens }}" + login_user: "{{ pg_user }}" + register: result + +- assert: + that: + - result is changed + +- name: Create schema with hyphen in the name + postgresql_schema: + login_user: "{{ pg_user }}" + login_password: password + db: "{{ db_name_with_hyphens }}" + name: "{{ db_schema_with_hyphens }}" + state: present + register: result + +- assert: + that: + - result is changed + +# Also covers https://github.com/ansible-collections/community.general/issues/884 +- name: Set table default privs on the schema with hyphen in the name + postgresql_privs: + login_user: "{{ pg_user }}" + password: password + db: "{{ db_name_with_hyphens }}" + schema: "{{ db_schema_with_hyphens }}" + role: "{{ db_user_with_hyphens }}" + type: default_privs + obj: TABLES + privs: all + state: present + usage_on_types: yes + register: result + check_mode: yes + +- assert: + that: + - result is changed + - result.queries is search('ON TYPES') + +# Also covers https://github.com/ansible-collections/community.general/issues/884 +- name: Set table default privs on the schema with hyphen in the name + postgresql_privs: + login_user: "{{ pg_user }}" + password: password + db: "{{ db_name_with_hyphens }}" + schema: "{{ db_schema_with_hyphens }}" + role: "{{ db_user_with_hyphens }}" + type: default_privs + obj: TABLES + privs: all + state: present + usage_on_types: no + register: result + +- assert: + that: + - result is changed + - result.queries is not search('ON TYPES') + +- name: Delete table default privs on the schema with hyphen in the name + postgresql_privs: + login_user: "{{ pg_user }}" + password: password + db: "{{ db_name_with_hyphens }}" + schema: "{{ db_schema_with_hyphens }}" + role: "{{ db_user_with_hyphens }}" + type: default_privs + obj: TABLES + privs: all + state: absent + register: result + +- assert: + that: + - result is changed + +- name: Delete schema with hyphen in the name + postgresql_schema: + login_user: "{{ pg_user }}" + login_password: password + db: "{{ db_name_with_hyphens }}" + name: "{{ db_schema_with_hyphens }}" + state: absent + register: result + +- assert: + that: + - result is changed + +- name: Delete a user with hyphen in the name + postgresql_user: + name: "{{ db_user_with_hyphens }}" + state: absent + encrypted: yes + password: password + role_attr_flags: CREATEDB,LOGIN + db: "{{ db_name_with_hyphens }}" + login_user: "{{ pg_user }}" + register: result + +- assert: + that: + - result is changed + +- name: Delete DB with hyphen in the name + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: absent + name: "{{ db_name_with_hyphens }}" + login_user: "{{ pg_user }}" + register: result + +- assert: + that: + - result is changed + +############################# +# Test of solving bug 27327 # +############################# + +# Create the test table and view: +- name: Create table + become: yes + become_user: "{{ pg_user }}" + postgresql_table: + login_user: "{{ pg_user }}" + db: postgres + name: test_table1 + columns: + - id int + +- name: Create view + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + login_user: "{{ pg_user }}" + db: postgres + query: "CREATE VIEW test_view AS SELECT id FROM test_table1" + +# Test check_mode: +- name: Grant SELECT on test_view, check_mode + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + login_user: "{{ pg_user }}" + db: postgres + state: present + privs: SELECT + type: table + objs: test_view + roles: "{{ db_user2 }}" + trust_input: no + check_mode: yes + register: result + +- assert: + that: + - result is changed + +# Check: +- name: Check that nothing was changed after the prev step + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + login_user: "{{ pg_user }}" + db: postgres + query: "SELECT grantee FROM information_schema.role_table_grants WHERE table_name='test_view' AND grantee = '{{ db_user2 }}'" + register: result + +- assert: + that: + - result.rowcount == 0 + +# Test true mode: +- name: Grant SELECT on test_view + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + login_user: "{{ pg_user }}" + db: postgres + state: present + privs: SELECT + type: table + objs: test_view + roles: "{{ db_user2 }}" + trust_input: no + register: result + +- assert: + that: + - result is changed + +# Check: +- name: Check that nothing was changed after the prev step + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + login_user: "{{ pg_user }}" + db: postgres + query: "SELECT grantee FROM information_schema.role_table_grants WHERE table_name='test_view' AND grantee = '{{ db_user2 }}'" + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test true mode: +- name: Try to grant SELECT again + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + login_user: "{{ pg_user }}" + db: postgres + state: present + privs: SELECT + type: table + objs: test_view + roles: "{{ db_user2 }}" + trust_input: no + register: result + +- assert: + that: + - result is not changed + +# Cleanup: +- name: Drop test view + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + login_user: "{{ pg_user }}" + db: postgres + query: "DROP VIEW test_view" + +- name: Drop test table + become: yes + become_user: "{{ pg_user }}" + postgresql_table: + login_user: "{{ pg_user }}" + db: postgres + name: test_table1 + state: absent + +###################################################### +# Test foreign data wrapper and foreign server privs # +###################################################### + +# Foreign data wrapper setup +- name: Create foreign data wrapper extension + become: yes + become_user: "{{ pg_user }}" + shell: echo "CREATE EXTENSION postgres_fdw" | psql -d "{{ db_name }}" + +- name: Create dummy foreign data wrapper + become: yes + become_user: "{{ pg_user }}" + shell: echo "CREATE FOREIGN DATA WRAPPER dummy" | psql -d "{{ db_name }}" + +- name: Create foreign server + become: yes + become_user: "{{ pg_user }}" + shell: echo "CREATE SERVER dummy_server FOREIGN DATA WRAPPER dummy" | psql -d "{{ db_name }}" + +# Test +- name: Grant foreign data wrapper privileges + postgresql_privs: + state: present + type: foreign_data_wrapper + roles: "{{ db_user2 }}" + privs: ALL + objs: dummy + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + ignore_errors: yes + +# Checks +- assert: + that: + - result is changed + +- name: Get foreign data wrapper privileges + become: yes + become_user: "{{ pg_user }}" + shell: echo "{{ fdw_query }}" | psql -d "{{ db_name }}" + vars: + fdw_query: > + SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper + WHERE fdwname = ANY (ARRAY['dummy']) ORDER BY fdwname + register: fdw_result + +- assert: + that: + - "fdw_result.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user2 }}' in fdw_result.stdout_lines[-2]" + +# Test +- name: Grant foreign data wrapper privileges second time + postgresql_privs: + state: present + type: foreign_data_wrapper + roles: "{{ db_user2 }}" + privs: ALL + objs: dummy + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + ignore_errors: yes + +# Checks +- assert: + that: + - result is not changed + +# Test +- name: Revoke foreign data wrapper privileges + postgresql_privs: + state: absent + type: foreign_data_wrapper + roles: "{{ db_user2 }}" + privs: ALL + objs: dummy + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + ignore_errors: yes + +# Checks +- assert: + that: + - result is changed + +- name: Get foreign data wrapper privileges + become: yes + become_user: "{{ pg_user }}" + shell: echo "{{ fdw_query }}" | psql -d "{{ db_name }}" + vars: + fdw_query: > + SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper + WHERE fdwname = ANY (ARRAY['dummy']) ORDER BY fdwname + register: fdw_result + +- assert: + that: + - "fdw_result.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user2 }}' not in fdw_result.stdout_lines[-2]" + +# Test +- name: Revoke foreign data wrapper privileges for second time + postgresql_privs: + state: absent + type: foreign_data_wrapper + roles: "{{ db_user2 }}" + privs: ALL + objs: dummy + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + ignore_errors: yes + +# Checks +- assert: + that: + - result is not changed + +# Test +- name: Grant foreign server privileges + postgresql_privs: + state: present + type: foreign_server + roles: "{{ db_user2 }}" + privs: ALL + objs: dummy_server + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + ignore_errors: yes + +# Checks +- assert: + that: + - result is changed + +- name: Get foreign server privileges + become: yes + become_user: "{{ pg_user }}" + shell: echo "{{ fdw_query }}" | psql -d "{{ db_name }}" + vars: + fdw_query: > + SELECT srvacl FROM pg_catalog.pg_foreign_server + WHERE srvname = ANY (ARRAY['dummy_server']) ORDER BY srvname + register: fs_result + +- assert: + that: + - "fs_result.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user2 }}' in fs_result.stdout_lines[-2]" + +# Test +- name: Grant foreign server privileges for second time + postgresql_privs: + state: present + type: foreign_server + roles: "{{ db_user2 }}" + privs: ALL + objs: dummy_server + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + ignore_errors: yes + +# Checks +- assert: + that: + - result is not changed + +# Test +- name: Revoke foreign server privileges + postgresql_privs: + state: absent + type: foreign_server + roles: "{{ db_user2 }}" + privs: ALL + objs: dummy_server + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + ignore_errors: yes + +# Checks +- assert: + that: + - result is changed + +- name: Get foreign server privileges + become: yes + become_user: "{{ pg_user }}" + shell: echo "{{ fdw_query }}" | psql -d "{{ db_name }}" + vars: + fdw_query: > + SELECT srvacl FROM pg_catalog.pg_foreign_server + WHERE srvname = ANY (ARRAY['dummy_server']) ORDER BY srvname + register: fs_result + +- assert: + that: + - "fs_result.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user2 }}' not in fs_result.stdout_lines[-2]" + +# Test +- name: Revoke foreign server privileges for second time + postgresql_privs: + state: absent + type: foreign_server + roles: "{{ db_user2 }}" + privs: ALL + objs: dummy_server + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + ignore_errors: yes + +# Checks +- assert: + that: + - result is not changed + +# Foreign data wrapper cleanup +- name: Drop foreign server + become: yes + become_user: "{{ pg_user }}" + shell: echo "DROP SERVER dummy_server" | psql -d "{{ db_name }}" + +- name: Drop dummy foreign data wrapper + become: yes + become_user: "{{ pg_user }}" + shell: echo "DROP FOREIGN DATA WRAPPER dummy" | psql -d "{{ db_name }}" + +- name: Drop foreign data wrapper extension + become: yes + become_user: "{{ pg_user }}" + shell: echo "DROP EXTENSION postgres_fdw" | psql -d "{{ db_name }}" + +########################################## +# Test ALL_IN_SCHEMA for 'function' type # +########################################## + +# Function ALL_IN_SCHEMA Setup +- name: Create function for test + postgresql_query: + query: CREATE FUNCTION public.a() RETURNS integer LANGUAGE SQL AS 'SELECT 2'; + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + +# Test +- name: Grant execute to all functions + postgresql_privs: + type: function + state: present + privs: EXECUTE + roles: "{{ db_user2 }}" + objs: ALL_IN_SCHEMA + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + trust_input: no + register: result + ignore_errors: yes + +# Checks +- assert: + that: result is changed + +- name: Check that all functions have execute privileges + become: yes + become_user: "{{ pg_user }}" + shell: psql {{ db_name }} -c "SELECT proacl FROM pg_proc WHERE proname = 'a'" -t + register: result + +- assert: + that: "'{{ db_user2 }}=X/{{ db_user3 }}' in '{{ result.stdout_lines[0] }}'" + +# Test +- name: Grant execute to all functions again + postgresql_privs: + type: function + state: present + privs: EXECUTE + roles: "{{ db_user2 }}" + objs: ALL_IN_SCHEMA + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + trust_input: no + register: result + ignore_errors: yes + +# Checks +- assert: + that: result is not changed + +# Test +- name: Revoke execute to all functions + postgresql_privs: + type: function + state: absent + privs: EXECUTE + roles: "{{ db_user2 }}" + objs: ALL_IN_SCHEMA + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + trust_input: no + register: result + ignore_errors: yes + +# Checks +- assert: + that: result is changed + +# Test +- name: Revoke execute to all functions again + postgresql_privs: + type: function + state: absent + privs: EXECUTE + roles: "{{ db_user2 }}" + objs: ALL_IN_SCHEMA + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + trust_input: no + register: result + ignore_errors: yes + +- assert: + that: result is not changed + +# Function ALL_IN_SCHEMA cleanup +- name: Remove function for test + postgresql_query: + query: DROP FUNCTION public.a(); + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + +# Issue https://github.com/ansible-collections/community.general/issues/994 +- name: Create a procedure for tests + postgresql_query: + query: "CREATE PROCEDURE mock_procedure() LANGUAGE SQL AS $$ SELECT 1; $$;" + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + when: postgres_version_resp.stdout is version('11', '>=') + +# Issue https://github.com/ansible-collections/community.general/issues/994 +- name: Try to run module against a procedure, not function + postgresql_privs: + type: function + state: present + privs: ALL + roles: "{{ db_user2 }}" + objs: ALL_IN_SCHEMA + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is not changed + when: postgres_version_resp.stdout is version('11', '>=') + +########################### +# Test for procedure type # +########################### +- name: Create another procedure for tests + postgresql_query: + query: "CREATE PROCEDURE mock_procedure1(int, int) LANGUAGE SQL AS $$ SELECT 1; $$;" + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + when: postgres_version_resp.stdout is version('11', '>=') + +- name: Grant privs on procedure + postgresql_privs: + type: procedure + state: present + privs: EXECUTE + roles: "{{ db_user2 }}" + objs: 'mock_procedure1(int:int)' + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is changed + when: postgres_version_resp.stdout is version('11', '>=') + +- name: Grant privs on procedure again + postgresql_privs: + type: procedure + state: present + privs: EXECUTE + roles: "{{ db_user2 }}" + objs: 'mock_procedure1(int:int)' + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is not changed + when: postgres_version_resp.stdout is version('11', '>=') + +- name: Revoke procedure privs + postgresql_privs: + type: procedure + state: absent + privs: EXECUTE + roles: "{{ db_user2 }}" + objs: 'mock_procedure1(int:int)' + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is changed + when: postgres_version_resp.stdout is version('11', '>=') + +- name: Revoke procedure privs again + postgresql_privs: + type: procedure + state: absent + privs: EXECUTE + roles: "{{ db_user2 }}" + objs: 'mock_procedure1(int:int)' + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is not changed + when: postgres_version_resp.stdout is version('11', '>=') + +- name: Grant procedure privs for all object in schema + postgresql_privs: + type: procedure + state: present + privs: ALL + roles: "{{ db_user2 }}" + objs: ALL_IN_SCHEMA + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is changed + when: postgres_version_resp.stdout is version('11', '>=') + +- name: Grant procedure privs for all object in schema again + postgresql_privs: + type: procedure + state: present + privs: ALL + roles: "{{ db_user2 }}" + objs: ALL_IN_SCHEMA + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is not changed + when: postgres_version_resp.stdout is version('11', '>=') + +- name: Revoke procedure privs for all object in schema + postgresql_privs: + type: procedure + state: absent + privs: ALL + roles: "{{ db_user2 }}" + objs: ALL_IN_SCHEMA + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + register: result + when: postgres_version_resp.stdout is version('11', '>=') + +- assert: + that: + - result is changed + when: postgres_version_resp.stdout is version('11', '>=') + +################################################# +# Test ALL_IN_SCHEMA for 'partioned tables type # +################################################# + +# Partitioning tables is a feature introduced in Postgresql 10. +# (see https://www.postgresql.org/docs/10/ddl-partitioning.html ) +# The test below check for this version + +# Function ALL_IN_SCHEMA Setup +- name: Create partioned table for test purpose + postgresql_query: + query: CREATE TABLE public.testpt (id int not null, logdate date not null) PARTITION BY RANGE (logdate); + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + when: postgres_version_resp.stdout is version('10', '>=') + +# Test +- name: Grant execute to all tables in check mode + postgresql_privs: + type: table + state: present + privs: SELECT + roles: "{{ db_user2 }}" + objs: ALL_IN_SCHEMA + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + trust_input: no + register: result + ignore_errors: yes + when: postgres_version_resp.stdout is version('10', '>=') + check_mode: yes + +# Checks +- name: Check that all partitioned tables don't have select privileges after the check mode task + postgresql_query: + query: SELECT grantee, privilege_type FROM information_schema.role_table_grants WHERE table_name='testpt' and privilege_type='SELECT' and grantee = %(grantuser)s + db: "{{ db_name }}" + login_user: '{{ db_user2 }}' + login_password: password + named_args: + grantuser: '{{ db_user2 }}' + become: yes + become_user: "{{ pg_user }}" + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('10', '>=') + +# Test +- name: Grant execute with grant option on pg_create_restore_point function + postgresql_privs: + privs: EXECUTE + type: function + schema: pg_catalog + obj: pg_create_restore_point(text) + db: "{{ db_name }}" + roles: "{{ db_user2 }}" + login_user: "{{ pg_user }}" + grant_option: yes + state: present + become: yes + become_user: "{{ pg_user }}" + register: result + +# Checks +- assert: + that: result is changed + +- name: Check that user has GRANT privilege on the function + postgresql_query: + query: SELECT proacl FROM pg_proc WHERE proname='pg_create_restore_point' + db: "{{ db_name }}" + login_user: "{{ db_user2 }}" + login_password: password + become: yes + become_user: "{{ pg_user }}" + register: result + +- assert: + that: "'{{ db_user2 }}=X*/{{ pg_user }}' in result.query_result[0].proacl" + +# Test +- name: Grant execute without specifying grant_option to check idempotence + postgresql_privs: + privs: EXECUTE + type: function + schema: pg_catalog + obj: pg_create_restore_point(text) + db: "{{ db_name }}" + roles: "{{ db_user2 }}" + login_user: "{{ pg_user }}" + state: present + become: yes + become_user: "{{ pg_user }}" + register: result + +# Checks +- assert: + that: result is not changed + +- name: Check that user has GRANT privilege on the function + postgresql_query: + query: SELECT proacl FROM pg_proc WHERE proname='pg_create_restore_point' + db: "{{ db_name }}" + login_user: "{{ db_user2 }}" + login_password: password + become: yes + become_user: "{{ pg_user }}" + register: result + +- assert: + that: "'{{ db_user2 }}=X*/{{ pg_user }}' in result.query_result[0].proacl" + +# Test +- name: Revoke grant option on pg_create_restore_point function + postgresql_privs: + privs: EXECUTE + type: function + schema: pg_catalog + obj: pg_create_restore_point(text) + db: "{{ db_name }}" + roles: "{{ db_user2 }}" + login_user: "{{ pg_user }}" + grant_option: no + state: present + become: yes + become_user: "{{ pg_user }}" + register: result + +# Checks +- assert: + that: result is changed + +- name: Check that user does not have GRANT privilege on the function + postgresql_query: + query: SELECT proacl FROM pg_proc WHERE proname='pg_create_restore_point' + db: "{{ db_name }}" + login_user: "{{ db_user2 }}" + login_password: password + become: yes + become_user: "{{ pg_user }}" + register: result + +- assert: + that: "'{{ db_user2 }}=X/{{ pg_user }}' in result.query_result[0].proacl" + +# Test +- name: Revoke execute on pg_create_restore_point function + postgresql_privs: + privs: EXECUTE + type: function + schema: pg_catalog + obj: pg_create_restore_point(text) + db: "{{ db_name }}" + roles: "{{ db_user2 }}" + login_user: "{{ pg_user }}" + state: absent + become: yes + become_user: "{{ pg_user }}" + register: result + +# Checks +- assert: + that: result is changed + +- name: Check that user does not have EXECUTE privilege on the function + postgresql_query: + query: SELECT proacl FROM pg_proc WHERE proname='pg_create_restore_point' + db: "{{ db_name }}" + login_user: "{{ db_user2 }}" + login_password: password + become: yes + become_user: "{{ pg_user }}" + register: result + +- assert: + that: "'{{ db_user2 }}' not in result.query_result[0].proacl" + +# Test +- name: Grant execute to all tables + postgresql_privs: + type: table + state: present + privs: SELECT + roles: "{{ db_user2 }}" + objs: ALL_IN_SCHEMA + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + trust_input: no + register: result + ignore_errors: yes + when: postgres_version_resp.stdout is version('10', '>=') + +# Checks +- assert: + that: result is changed + when: postgres_version_resp.stdout is version('10', '>=') + +- name: Check that all partitioned tables have select privileges + postgresql_query: + query: SELECT grantee, privilege_type FROM information_schema.role_table_grants WHERE table_name='testpt' and privilege_type='SELECT' and grantee = %(grantuser)s + db: "{{ db_name }}" + login_user: '{{ db_user2 }}' + login_password: password + named_args: + grantuser: '{{ db_user2 }}' + become: yes + become_user: "{{ pg_user }}" + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('10', '>=') + +# Test +- name: Grant execute to all tables again to see no changes are reported + postgresql_privs: + type: table + state: present + privs: SELECT + roles: "{{ db_user2 }}" + objs: ALL_IN_SCHEMA + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + trust_input: no + register: result + ignore_errors: yes + when: postgres_version_resp.stdout is version('10', '>=') + +# Checks +- assert: + that: result is not changed + when: postgres_version_resp.stdout is version('10', '>=') + +# Test +- name: Revoke SELECT to all tables + postgresql_privs: + type: table + state: absent + privs: SELECT + roles: "{{ db_user2 }}" + objs: ALL_IN_SCHEMA + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + trust_input: no + register: result + ignore_errors: yes + when: postgres_version_resp.stdout is version('10', '>=') + +# Checks +- assert: + that: result is changed + when: postgres_version_resp.stdout is version('10', '>=') + +- name: Check that all partitioned tables don't have select privileges + postgresql_query: + query: SELECT grantee, privilege_type FROM information_schema.role_table_grants WHERE table_name='testpt' and privilege_type='SELECT' and grantee = %(grantuser)s + db: "{{ db_name }}" + login_user: '{{ db_user2 }}' + login_password: password + named_args: + grantuser: '{{ db_user2 }}' + become: yes + become_user: "{{ pg_user }}" + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('10', '>=') + +# Test +- name: Revoke SELECT to all tables and no changes are reported + postgresql_privs: + type: table + state: absent + privs: SELECT + roles: "{{ db_user2 }}" + objs: ALL_IN_SCHEMA + schema: public + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + trust_input: no + register: result + ignore_errors: yes + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: result is not changed + when: postgres_version_resp.stdout is version('10', '>=') + +# Table ALL_IN_SCHEMA cleanup +- name: Remove table for test + postgresql_query: + query: DROP TABLE public.testpt; + db: "{{ db_name }}" + login_user: "{{ db_user3 }}" + login_password: password + trust_input: no + ignore_errors: yes + when: postgres_version_resp.stdout is version('10', '>=') + +########################################### +# Test for 'type' value of type parameter # +########################################### + +# Test +- name: Grant type privileges + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + state: present + type: type + roles: "{{ db_user2 }}" + privs: ALL + objs: numeric + schema: pg_catalog + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +# Checks +- assert: + that: + - result is changed + when: postgres_version_resp.stdout is version('10', '>=') + +- name: Get type privileges + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + login_user: "{{ pg_user }}" + login_db: "{{ db_name }}" + query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric'; + register: typ_result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - "'{{ db_user2 }}' in typ_result.query_result[0].typacl" + when: postgres_version_resp.stdout is version('10', '>=') + +- name: Grant type privileges again using check_mode + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + state: present + type: type + roles: "{{ db_user2 }}" + privs: ALL + objs: numeric + schema: pg_catalog + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + check_mode: yes + when: postgres_version_resp.stdout is version('10', '>=') + +# Checks +- assert: + that: + - result is not changed + when: postgres_version_resp.stdout is version('10', '>=') + +- name: Get type privileges + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + login_user: "{{ pg_user }}" + login_db: "{{ db_name }}" + query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric'; + register: typ_result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - "'{{ db_user2 }}' in typ_result.query_result[0].typacl" + when: postgres_version_resp.stdout is version('10', '>=') + +- name: Grant type privileges again + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + state: present + type: type + roles: "{{ db_user2 }}" + privs: ALL + objs: numeric + schema: pg_catalog + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +# Checks +- assert: + that: + - result is not changed + when: postgres_version_resp.stdout is version('10', '>=') + +- name: Get type privileges + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + login_user: "{{ pg_user }}" + login_db: "{{ db_name }}" + query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric'; + register: typ_result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - "'{{ db_user2 }}' in typ_result.query_result[0].typacl" + when: postgres_version_resp.stdout is version('10', '>=') + +- name: Revoke type privileges in check_mode + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + state: absent + type: type + roles: "{{ db_user2 }}" + privs: ALL + objs: numeric + schema: pg_catalog + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + check_mode: yes + when: postgres_version_resp.stdout is version('10', '>=') + +# Checks +- assert: + that: + - result is changed + when: postgres_version_resp.stdout is version('10', '>=') + +- name: Get type privileges + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + login_user: "{{ pg_user }}" + login_db: "{{ db_name }}" + query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric'; + register: typ_result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - "'{{ db_user2 }}' in typ_result.query_result[0].typacl" + when: postgres_version_resp.stdout is version('10', '>=') + +- name: Revoke type privileges + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + state: absent + type: type + roles: "{{ db_user2 }}" + privs: ALL + objs: numeric + schema: pg_catalog + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +# Checks +- assert: + that: + - result is changed + when: postgres_version_resp.stdout is version('10', '>=') + +- name: Get type privileges + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + login_user: "{{ pg_user }}" + login_db: "{{ db_name }}" + query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric'; + register: typ_result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - "'{{ db_user2 }}' not in typ_result.query_result[0].typacl" + when: postgres_version_resp.stdout is version('10', '>=') + +# type with default schema (public): +- name: Create custom type in schema public + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + login_user: "{{ pg_user }}" + login_db: "{{ db_name }}" + query: "CREATE TYPE compfoo AS (f1 int, f2 text)" + when: postgres_version_resp.stdout is version('10', '>=') + +# Test +- name: Grant type privileges with default schema + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + state: present + type: type + roles: "{{ db_user2 }}" + privs: ALL + objs: compfoo + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +# Checks +- assert: + that: + - result is changed + when: postgres_version_resp.stdout is version('10', '>=') + +- name: Get type privileges + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + login_user: "{{ pg_user }}" + login_db: "{{ db_name }}" + query: > + SELECT t.typacl FROM pg_catalog.pg_type t JOIN pg_catalog.pg_namespace n + ON n.oid = t.typnamespace WHERE t.typname = 'compfoo' AND n.nspname = 'public'; + register: typ_result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - "'{{ db_user2 }}' in typ_result.query_result[0].typacl" + when: postgres_version_resp.stdout is version('10', '>=') + +###################################################################### +# https://github.com/ansible-collections/community.general/issues/1058 +- name: Create user for test + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + login_user: "{{ pg_user }}" + login_db: "{{ db_name }}" + name: "test-role" + role_attr_flags: "NOLOGIN,NOSUPERUSER,INHERIT,NOCREATEDB,NOCREATEROLE,NOREPLICATION" + +- name: Test community.general/issue/1058 GRANT with hyphen + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + login_user: "{{ pg_user }}" + login_db: "{{ db_name }}" + roles: "test-role" + objs: "{{ pg_user }}" + type: "group" + register: result + +- assert: + that: + - result is changed + - result.queries == ["GRANT \"{{ pg_user }}\" TO \"test-role\";"] + +- name: Test community.general/issue/1058 REVOKE + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + login_user: "{{ pg_user }}" + login_db: "{{ db_name }}" + roles: "test-role" + objs: "{{ pg_user }}" + type: "group" + state: absent + register: result + +- assert: + that: + - result is changed + - result.queries == ["REVOKE \"{{ pg_user }}\" FROM \"test-role\";"] + +- name: Test community.general/issue/1058 GRANT without hyphen + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + login_user: "{{ pg_user }}" + login_db: "{{ db_name }}" + roles: "{{ db_user3 }}" + objs: "{{ pg_user }}" + type: "group" + register: result + +- assert: + that: + - result is changed + - result.queries == ["GRANT \"{{ pg_user }}\" TO \"{{ db_user3 }}\";"] + +- name: Test community.general/issue/1058 GRANT with hyphen as an object + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + login_user: "{{ pg_user }}" + login_db: "{{ db_name }}" + roles: "{{ db_user3 }}" + objs: "test-role,{{ db_user2 }}" + type: "group" + register: result + +- assert: + that: + - result is changed + - result.queries == ["GRANT \"test-role\",\"{{ db_user2 }}\" TO \"{{ db_user3 }}\";"] + +- name: Test community.general/issue/1058 GRANT with hyphen as an object + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + login_user: "{{ pg_user }}" + login_db: "{{ db_name }}" + roles: "{{ db_user3 }}" + objs: "test-role" + type: "group" + register: result + +- assert: + that: + - result is not changed + +# Cleanup +- name: Remove privs + become: yes + become_user: "{{ pg_user }}" + postgresql_privs: + state: absent + type: type + roles: "{{ db_user2 }}" + privs: ALL + objs: compfoo + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + when: postgres_version_resp.stdout is version('10', '>=') + +- name: Reassign ownership + become_user: "{{ pg_user }}" + become: yes + postgresql_owner: + login_user: "{{ pg_user }}" + db: "{{ db_name }}" + new_owner: "{{ pg_user }}" + reassign_owned_by: "{{ item }}" + loop: + - "{{ db_user2 }}" + - "{{ db_user3 }}" + +- name: Remove user given permissions + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + name: "{{ db_user2 }}" + state: absent + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + +- name: Remove user owner of objects + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + name: "{{ item }}" + state: absent + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + loop: + - '{{ db_user3 }}' + - 'test-role' + +- name: Destroy DB + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: absent + name: "{{ db_name }}" + login_user: "{{ pg_user }}" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml new file mode 100644 index 00000000..8aa6b409 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml @@ -0,0 +1,407 @@ +# The tests below were added initially and moved here +# from the shared target called ``postgresql`` by @Andersson007 <aaklychkov@mail.ru>. +# You can see modern examples of CI tests in postgresql_publication directory, for example. + +# +# Test settings privileges +# +- name: Create db + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + name: "{{ db_name }}" + state: "present" + login_user: "{{ pg_user }}" + +- name: Create some tables on the db + become_user: "{{ pg_user }}" + become: yes + shell: echo "create table test_table1 (field text);" | psql {{ db_name }} + +- become_user: "{{ pg_user }}" + become: yes + shell: echo "create table test_table2 (field text);" | psql {{ db_name }} + +- vars: + db_password: 'secretù' # use UTF-8 + block: + - name: Create a user with some permissions on the db + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + name: "{{ db_user1 }}" + encrypted: 'yes' + password: "md5{{ (db_password ~ db_user1) | hash('md5')}}" + db: "{{ db_name }}" + priv: 'test_table1:INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER/test_table2:INSERT/CREATE,CONNECT,TEMP' + login_user: "{{ pg_user }}" + + - include_tasks: pg_authid_not_readable.yml + +- name: Check that the user has the requested permissions (table1) + become_user: "{{ pg_user }}" + become: yes + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + register: result_table1 + +- name: Check that the user has the requested permissions (table2) + become_user: "{{ pg_user }}" + become: yes + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- name: Check that the user has the requested permissions (database) + become_user: "{{ pg_user }}" + become: yes + shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + register: result_database + +- assert: + that: + - "result_table1.stdout_lines[-1] == '(7 rows)'" + - "'INSERT' in result_table1.stdout" + - "'SELECT' in result_table1.stdout" + - "'UPDATE' in result_table1.stdout" + - "'DELETE' in result_table1.stdout" + - "'TRUNCATE' in result_table1.stdout" + - "'REFERENCES' in result_table1.stdout" + - "'TRIGGER' in result_table1.stdout" + - "result_table2.stdout_lines[-1] == '(1 row)'" + - "'INSERT' == '{{ result_table2.stdout_lines[-2] | trim }}'" + - "result_database.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user1 }}=CTc/{{ pg_user }}' in result_database.stdout_lines[-2]" + +- name: Add another permission for the user + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + name: "{{ db_user1 }}" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + db: "{{ db_name }}" + priv: 'test_table2:select' + login_user: "{{ pg_user }}" + register: result + +- name: Check that ansible reports it changed the user + assert: + that: + - result is changed + +- name: Check that the user has the requested permissions (table2) + become_user: "{{ pg_user }}" + become: yes + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- assert: + that: + - "result_table2.stdout_lines[-1] == '(2 rows)'" + - "'INSERT' in result_table2.stdout" + - "'SELECT' in result_table2.stdout" + +# +# Test priv setting via postgresql_privs module +# (Depends on state from previous _user privs tests) +# + +- name: Revoke a privilege + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + type: "table" + state: "absent" + roles: "{{ db_user1 }}" + privs: "INSERT" + objs: "test_table2" + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + +- name: Check that ansible reports it changed the user + assert: + that: + - result is changed + +- name: Check that the user has the requested permissions (table2) + become_user: "{{ pg_user }}" + become: yes + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- assert: + that: + - "result_table2.stdout_lines[-1] == '(1 row)'" + - "'SELECT' == '{{ result_table2.stdout_lines[-2] | trim }}'" + +- name: Revoke many privileges on multiple tables + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + state: "absent" + roles: "{{ db_user1 }}" + privs: "INSERT,select,UPDATE,TRUNCATE,REFERENCES,TRIGGER,delete" + objs: "test_table2,test_table1" + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + +- name: Check that ansible reports it changed the user + assert: + that: + - result is changed + +- name: Check that permissions were revoked (table1) + become_user: "{{ pg_user }}" + become: yes + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + register: result_table1 + +- name: Check that permissions were revoked (table2) + become_user: "{{ pg_user }}" + become: yes + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- assert: + that: + - "result_table1.stdout_lines[-1] == '(0 rows)'" + - "result_table2.stdout_lines[-1] == '(0 rows)'" + +- name: Revoke database privileges + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + type: "database" + state: "absent" + roles: "{{ db_user1 }}" + privs: "Create,connect,TEMP" + objs: "{{ db_name }}" + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + +- name: Check that the user has the requested permissions (database) + become_user: "{{ pg_user }}" + become: yes + shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + register: result_database + +- assert: + that: + - "result_database.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user1 }}' not in result_database.stdout" + +- name: Grant database privileges + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + type: "database" + state: "present" + roles: "{{ db_user1 }}" + privs: "CREATE,connect" + objs: "{{ db_name }}" + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + register: result + +- name: Check that ansible reports it changed the user + assert: + that: + - result is changed + +- name: Check that the user has the requested permissions (database) + become_user: "{{ pg_user }}" + become: yes + shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + register: result_database + +- assert: + that: + - "result_database.stdout_lines[-1] == '(1 row)'" + - "'{{ db_user1 }}=Cc' in result_database.stdout" + +- name: Grant a single privilege on a table + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + state: "present" + roles: "{{ db_user1 }}" + privs: "INSERT" + objs: "test_table1" + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + +- name: Check that permissions were added (table1) + become_user: "{{ pg_user }}" + become: yes + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + register: result_table1 + +- assert: + that: + - "result_table1.stdout_lines[-1] == '(1 row)'" + - "'{{ result_table1.stdout_lines[-2] | trim }}' == 'INSERT'" + +- name: Grant many privileges on multiple tables + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + state: "present" + roles: "{{ db_user1 }}" + privs: 'INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,trigger' + objs: "test_table2,test_table1" + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + +- name: Check that permissions were added (table1) + become_user: "{{ pg_user }}" + become: yes + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }} + register: result_table1 + +- name: Check that permissions were added (table2) + become_user: "{{ pg_user }}" + become: yes + shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }} + register: result_table2 + +- assert: + that: + - "result_table1.stdout_lines[-1] == '(7 rows)'" + - "'INSERT' in result_table1.stdout" + - "'SELECT' in result_table1.stdout" + - "'UPDATE' in result_table1.stdout" + - "'DELETE' in result_table1.stdout" + - "'TRUNCATE' in result_table1.stdout" + - "'REFERENCES' in result_table1.stdout" + - "'TRIGGER' in result_table1.stdout" + - "result_table2.stdout_lines[-1] == '(7 rows)'" + - "'INSERT' in result_table2.stdout" + - "'SELECT' in result_table2.stdout" + - "'UPDATE' in result_table2.stdout" + - "'DELETE' in result_table2.stdout" + - "'TRUNCATE' in result_table2.stdout" + - "'REFERENCES' in result_table2.stdout" + - "'TRIGGER' in result_table2.stdout" + +# Check passing roles with dots +# https://github.com/ansible/ansible/issues/63204 +- name: Create roles for further tests + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: "{{ item }}" + loop: + - "{{ db_user_with_dots1 }}" + - "{{ db_user_with_dots2 }}" + +- name: Pass role with dots in its name to roles parameter + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + state: "present" + roles: "{{ db_user_with_dots1 }}" + privs: "INSERT" + objs: "test_table1" + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + trust_input: no + +- name: Check that permissions were added (table1) + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user_with_dots1 }}' and table_name='test_table1'" + register: result + +- assert: + that: + - result.rowcount == 1 + +# We don't need to check anything here, only that nothing failed +- name: Pass role with dots in its name to target_roles parameter + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + state: "present" + roles: "{{ db_user_with_dots1 }}" + privs: "INSERT" + objs: TABLES + type: default_privs + target_roles: "{{ db_user_with_dots2 }}" + trust_input: no + +# Bugfix for https://github.com/ansible-collections/community.general/issues/857 +- name: Test passing lowercase PUBLIC role + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + type: 'database' + privs: 'connect' + role: 'public' + register: result + +- assert: + that: + - result is changed + - result.queries == ["GRANT CONNECT ON database \"{{ db_name }}\" TO PUBLIC;"] + +# +# Cleanup +# +- name: Cleanup db + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + name: "{{ db_name }}" + state: "absent" + login_user: "{{ pg_user }}" + +- name: Check that database was destroyed + become_user: "{{ pg_user }}" + become: yes + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Cleanup test user + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + name: "{{ item }}" + state: 'absent' + login_user: "{{ pg_user }}" + db: postgres + loop: + - "{{ db_user1 }}" + - "{{ db_user2 }}" + - "{{ db_user3 }}" + - "{{ db_user_with_dots1 }}" + - "{{ db_user_with_dots2 }}" + +- name: Check that they were removed + become_user: "{{ pg_user }}" + become: yes + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres + register: result + +- assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_session_role.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_session_role.yml new file mode 100644 index 00000000..9a06c9d2 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_session_role.yml @@ -0,0 +1,102 @@ +- name: Create a high privileged user + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + name: "{{ db_session_role1 }}" + state: "present" + password: "password" + role_attr_flags: "CREATEDB,LOGIN,CREATEROLE" + login_user: "{{ pg_user }}" + db: postgres + +- name: Create a low privileged user using the newly created user + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + name: "{{ db_session_role2 }}" + state: "present" + password: "password" + role_attr_flags: "LOGIN" + login_user: "{{ pg_user }}" + session_role: "{{ db_session_role1 }}" + db: postgres + +- name: Create DB as session_role + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: present + name: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + session_role: "{{ db_session_role1 }}" + register: result + +- name: Create table to be able to grant privileges + become_user: "{{ pg_user }}" + become: yes + shell: echo "CREATE TABLE test(i int); CREATE TABLE test2(i int);" | psql -AtXq "{{ db_session_role1 }}" + +- name: Grant all privileges on test1 table to low privileged user + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + db: "{{ db_session_role1 }}" + type: table + objs: test + roles: "{{ db_session_role2 }}" + login_user: "{{ pg_user }}" + privs: select + admin_option: yes + +- name: Verify admin option was successful for grants + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + db: "{{ db_session_role1 }}" + type: table + objs: test + roles: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + privs: select + session_role: "{{ db_session_role2 }}" + +- name: Verify no grants can be granted for test2 table + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + db: "{{ db_session_role1 }}" + type: table + objs: test2 + roles: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + privs: update + session_role: "{{ db_session_role2 }}" + ignore_errors: yes + register: result + +- assert: + that: + - result is failed + +######################## +# Test trust_input param + +- name: Verify trust_input parameter + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + db: "{{ db_session_role1 }}" + type: table + objs: test2 + roles: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + privs: update + session_role: "{{ dangerous_name }}" + trust_input: no + ignore_errors: yes + register: result + +- assert: + that: + - result is failed + - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml new file mode 100644 index 00000000..a1d2805a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml @@ -0,0 +1,120 @@ +# Setup +- name: Create a test user + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + name: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + db: postgres + +- name: Create DB + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: present + name: "{{ db_name }}" + owner: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + +- name: Create a user to be given permissions and other tests + postgresql_user: + name: "{{ db_user2 }}" + state: present + encrypted: yes + password: password + role_attr_flags: LOGIN + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + +####################################### +# Test default_privs with target_role # +####################################### + +# Test +- name: Grant default privileges for new table objects + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + db: "{{ db_name }}" + objs: TABLES + privs: SELECT + type: default_privs + role: "{{ db_user2 }}" + target_roles: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + register: result + +# Checks +- assert: + that: result is changed + +- name: Check that default privileges are set + become: yes + become_user: "{{ pg_user }}" + shell: psql {{ db_name }} -c "SELECT defaclrole, defaclobjtype, defaclacl FROM pg_default_acl a JOIN pg_roles b ON a.defaclrole=b.oid;" -t + register: result + +- assert: + that: "'{{ db_user2 }}=r/{{ db_user1 }}' in '{{ result.stdout_lines[0] }}'" + +# Test +- name: Revoke default privileges for new table objects + become_user: "{{ pg_user }}" + become: yes + postgresql_privs: + db: "{{ db_name }}" + state: absent + objs: TABLES + privs: SELECT + type: default_privs + role: "{{ db_user2 }}" + target_roles: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + register: result + +# Checks +- assert: + that: result is changed + +# Cleanup +- name: Remove user given permissions + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + name: "{{ db_user2 }}" + state: absent + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + +- name: Remove user owner of objects + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + name: "{{ db_user3 }}" + state: absent + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + +- name: Destroy DBs + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: absent + name: "{{ item }}" + login_user: "{{ pg_user }}" + loop: + - "{{ db_name }}" + - "{{ db_session_role1 }}" + +- name: Remove test users + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + name: "{{ item }}" + state: absent + db: postgres + login_user: "{{ pg_user }}" + loop: + - "{{ db_user1 }}" + - "{{ db_session_role1 }}" + - "{{ db_session_role2 }}" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/aliases new file mode 100644 index 00000000..142e8aa0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/aliases @@ -0,0 +1,3 @@ +destructive +shippable/posix/group1 +skip/freebsd diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/main.yml new file mode 100644 index 00000000..507c1e23 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/main.yml @@ -0,0 +1,8 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial CI tests of postgresql_publication module +- import_tasks: postgresql_publication_initial.yml + when: postgres_version_resp.stdout is version('10', '>=') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml new file mode 100644 index 00000000..0300fc07 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml @@ -0,0 +1,436 @@ +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# The file for testing postgresql_copy module. + +- vars: + test_table1: acme1 + test_table2: acme2 + test_table3: acme3 + test_pub: acme_publ + test_role: alice + dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' + test_schema: acme_schema + test_db: acme_db + task_parameters: &task_parameters + become_user: '{{ pg_user }}' + become: yes + register: result + pg_parameters: &pg_parameters + login_user: '{{ pg_user }}' + login_db: '{{ test_db }}' + + block: + ################################################# + # Test preparation, create database test objects: + - name: postgresql_publication - create test db + <<: *task_parameters + postgresql_db: + login_user: '{{ pg_user }}' + maintenance_db: postgres + name: '{{ test_db }}' + + - name: postgresql_publication - create test schema + <<: *task_parameters + postgresql_schema: + <<: *pg_parameters + name: '{{ test_schema }}' + + - name: postgresql_publication - create test role + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_role }}' + role_attr_flags: SUPERUSER + + - name: postgresql_publication - create test tables + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + name: '{{ item }}' + columns: + - id int + loop: + - '{{ test_table1 }}' + - '{{ test_schema }}.{{ test_table2 }}' + - '{{ test_table3 }}' + + + ################ + # Do main tests: + + # Test + - name: postgresql_publication - create publication, check_mode + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + name: '{{ test_pub }}' + trust_input: no + check_mode: yes + + - assert: + that: + - result is changed + - result.exists == false + - result.queries == ["CREATE PUBLICATION \"{{ test_pub }}\" FOR ALL TABLES"] + + # Check + - name: postgresql_publication - check that nothing has been changed + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' + + - assert: + that: + - result.rowcount == 0 + + # Test + - name: postgresql_publication - create publication + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + name: '{{ test_pub }}' + trust_input: no + + - assert: + that: + - result is changed + - result.exists == true + - result.queries == ["CREATE PUBLICATION \"{{ test_pub }}\" FOR ALL TABLES"] + - result.owner == '{{ pg_user }}' + - result.alltables == true + - result.tables == [] + - result.parameters.publish != {} + + # Check + - name: postgresql_publication - check that nothing has been changed + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' + AND pubowner = '10' AND puballtables = 't' + + - assert: + that: + - result.rowcount == 1 + + # Test + - name: postgresql_publication - drop publication, check_mode + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + name: '{{ test_pub }}' + state: absent + trust_input: no + check_mode: yes + + - assert: + that: + - result is changed + - result.exists == true + - result.queries == ["DROP PUBLICATION \"{{ test_pub }}\""] + - result.owner == '{{ pg_user }}' + - result.alltables == true + - result.tables == [] + - result.parameters.publish != {} + + # Check + - name: postgresql_publication - check that nothing has been changed + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' + + - assert: + that: + - result.rowcount == 1 + + # Test + - name: postgresql_publication - drop publication + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + name: '{{ test_pub }}' + state: absent + cascade: yes + + - assert: + that: + - result is changed + - result.exists == false + - result.queries == ["DROP PUBLICATION \"{{ test_pub }}\" CASCADE"] + + # Check + - name: postgresql_publication - check that publication does not exist + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' + + - assert: + that: + - result.rowcount == 0 + + # Test + - name: postgresql_publication - create publication with tables, owner, params + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + name: '{{ test_pub }}' + owner: '{{ test_role }}' + tables: + - '{{ test_table1 }}' + - '{{ test_schema }}.{{ test_table2 }}' + parameters: + publish: 'insert' + trust_input: no + + - assert: + that: + - result is changed + - result.queries == ["CREATE PUBLICATION \"{{ test_pub }}\" FOR TABLE \"public\".\"{{ test_table1 }}\", \"{{ test_schema }}\".\"{{ test_table2 }}\" WITH (publish = 'insert')", "ALTER PUBLICATION \"{{ test_pub }}\" OWNER TO \"{{ test_role }}\""] + - result.owner == '{{ test_role }}' + - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\""] + - result.parameters.publish.insert == true + - result.parameters.publish.delete == false + + # Check 1 + - name: postgresql_publication - check that test publication exists + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' + AND pubowner != '10' AND puballtables = 'f' AND pubinsert = 't' AND pubdelete = 'f' + + - assert: + that: + - result.rowcount == 1 + + # Check 2 + - name: postgresql_publication - check that test_table1 from schema public is in publication + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}' AND schemaname = 'public' + + - assert: + that: + - result.rowcount == 1 + + # Check 3 + - name: postgresql_publication - check that test_table2 from test schema is in publication + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}' AND schemaname = '{{ test_schema }}' + + - assert: + that: + - result.rowcount == 1 + + # Test + - name: postgresql_publication - test trust_input parameter + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + name: '{{ test_pub }}' + session_role: '{{ dangerous_name }}' + owner: '{{ dangerous_name }}' + trust_input: no + ignore_errors: yes + + - assert: + that: + - result is failed + - result.msg is search('is potentially dangerous') + + # Test + - name: postgresql_publication - add table to publication, change owner, check_mode + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + name: '{{ test_pub }}' + owner: '{{ pg_user }}' + tables: + - '{{ test_table1 }}' + - '{{ test_schema }}.{{ test_table2 }}' + - '{{ test_table3 }}' + trust_input: no + check_mode: yes + + - assert: + that: + - result is changed + - result.queries == ["ALTER PUBLICATION \"{{ test_pub }}\" ADD TABLE \"public\".\"{{ test_table3 }}\"", "ALTER PUBLICATION \"{{ test_pub }}\" OWNER TO \"{{ pg_user }}\""] + - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\""] + + # Check + - name: postgresql_publication - check that nothing changes after the previous step + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' + AND pubowner != '10' AND puballtables = 'f' AND pubinsert = 't' AND pubupdate = 't' + + - assert: + that: + - result.rowcount == 0 + + # Check + - name: postgresql_publication - check that 2 tables are in publication + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}' + + - assert: + that: + - result.rowcount == 2 + + # Test + - name: postgresql_publication - add table to publication, change owner + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + name: '{{ test_pub }}' + owner: '{{ pg_user }}' + tables: + - '{{ test_table1 }}' + - '{{ test_schema }}.{{ test_table2 }}' + - '{{ test_table3 }}' + trust_input: no + + - assert: + that: + - result is changed + - result.queries == ["ALTER PUBLICATION \"{{ test_pub }}\" ADD TABLE \"public\".\"{{ test_table3 }}\"", "ALTER PUBLICATION \"{{ test_pub }}\" OWNER TO \"{{ pg_user }}\""] + - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\"", "\"public\".\"{{ test_table3 }}\""] + + # Check 1 + - name: postgresql_publication - check owner has been changed + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' AND pubowner = '10' + + - assert: + that: + - result.rowcount == 1 + + # Check 2 + - name: postgresql_publication - check that 3 tables are in publication + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}' + + - assert: + that: + - result.rowcount == 3 + + # Test + - name: postgresql_publication - remove table from publication, check_mode + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + name: '{{ test_pub }}' + tables: + - '{{ test_table1 }}' + - '{{ test_schema }}.{{ test_table2 }}' + parameters: + publish: 'insert' + trust_input: no + check_mode: yes + + - assert: + that: + - result is changed + - result.queries == ["ALTER PUBLICATION \"{{ test_pub }}\" DROP TABLE \"public\".\"{{ test_table3 }}\""] + - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\"", "\"public\".\"{{ test_table3 }}\""] + + # Check 1 + - name: postgresql_publication - check that 3 tables are in publication + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}' + + - assert: + that: + - result.rowcount == 3 + + # Check 2 + - name: postgresql_publication - check no parameters have been changed + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' AND pubinsert = 't' + + - assert: + that: + - result.rowcount == 1 + + # Test + - name: postgresql_publication - remove table from publication + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + name: '{{ test_pub }}' + tables: + - '{{ test_table1 }}' + - '{{ test_schema }}.{{ test_table2 }}' + parameters: + publish: 'delete' + trust_input: no + + - assert: + that: + - result is changed + - result.queries == ["ALTER PUBLICATION \"{{ test_pub }}\" DROP TABLE \"public\".\"{{ test_table3 }}\"", "ALTER PUBLICATION \"{{ test_pub }}\" SET (publish = 'delete')"] + - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\""] + + # Check 1 + - name: postgresql_publication - check that 2 tables are in publication + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}' + + - assert: + that: + - result.rowcount == 2 + + # Check 2 + - name: postgresql_publication - check parameter has been changed + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' AND pubinsert = 'f' + + - assert: + that: + - result.rowcount == 1 + + always: + ########### + # Clean up: + + - name: postgresql_publication - remove test db + <<: *task_parameters + postgresql_db: + login_user: '{{ pg_user }}' + maintenance_db: postgres + name: '{{ test_db }}' + state: absent + + - name: postgresql_publication - remove test role + <<: *task_parameters + postgresql_user: + login_user: '{{ pg_user }}' + login_db: postgres + name: '{{ test_role }}' + state: absent diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test0.sql b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test0.sql new file mode 100644 index 00000000..fb9ce516 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test0.sql @@ -0,0 +1,4 @@ +SELECT version(); + +SELECT story FROM test_table + WHERE id = %s OR story = 'Данные'; diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test1.sql b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test1.sql new file mode 100644 index 00000000..028c192d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/files/test1.sql @@ -0,0 +1,10 @@ +CREATE FUNCTION add(integer, integer) RETURNS integer + AS 'select $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT; + +SELECT story FROM test_table + WHERE id = %s OR story = 'Данные'; + +SELECT version(); diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/main.yml new file mode 100644 index 00000000..7b24dbf9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/main.yml @@ -0,0 +1,7 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial CI tests of postgresql_query module +- import_tasks: postgresql_query_initial.yml diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml new file mode 100644 index 00000000..dd064078 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml @@ -0,0 +1,607 @@ +- name: postgresql_query - drop test table if exists + become_user: '{{ pg_user }}' + become: true + shell: psql postgres -U "{{ pg_user }}" -t -c "DROP TABLE IF EXISTS test_table;" + ignore_errors: true + +- name: postgresql_query - create test table called test_table + become_user: '{{ pg_user }}' + become: true + shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE test_table (id int, story text);" + ignore_errors: true + +- name: postgresql_query - insert some data into test_table + become_user: '{{ pg_user }}' + become: true + shell: psql postgres -U "{{ pg_user }}" -t -c "INSERT INTO test_table (id, story) VALUES (1, 'first'), (2, 'second'), (3, 'third');" + ignore_errors: true + +- name: Copy script files + become: yes + copy: + src: '{{ item }}' + dest: '~{{ pg_user }}/{{ item }}' + owner: '{{ pg_user }}' + force: yes + loop: + - test0.sql + - test1.sql + register: sql_file_created + ignore_errors: yes + +- name: postgresql_query - analyze test_table + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: ANALYZE test_table + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.query == 'ANALYZE test_table' + - result.query_list == ['ANALYZE test_table'] + - result.rowcount == 0 + - result.statusmessage == 'ANALYZE' + - result.query_result == {} + - result.query_all_results == [{}] + +- name: postgresql_query - run queries from SQL script + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + path_to_script: ~{{ pg_user }}/test0.sql + positional_args: + - 1 + encoding: UTF-8 + register: result + ignore_errors: true + when: sql_file_created + +- assert: + that: + - result is not changed + - result.query == "\n\nSELECT story FROM test_table\n WHERE id = 1 OR story = 'Данные'" + - result.query_result[0].story == 'first' + - result.query_all_results[0][0].version is search('PostgreSQL') + - result.query_all_results[1][0].story == 'first' + - result.rowcount == 2 + - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT' + when: sql_file_created + +- name: postgresql_query - simple select query to test_table + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: SELECT * FROM test_table + register: result + ignore_errors: true + +- assert: + that: + - result is not changed + - result.query == 'SELECT * FROM test_table' + - result.rowcount == 3 + - result.statusmessage == 'SELECT 3' or result.statusmessage == 'SELECT' + - result.query_result[0].id == 1 + - result.query_result[1].id == 2 + - result.query_result[2].id == 3 + - result.query_result[0].story == 'first' + - result.query_result[1].story == 'second' + - result.query_result[2].story == 'third' + +- name: postgresql_query - select query with named args + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: SELECT id FROM test_table WHERE id = %(id_val)s AND story = %(story_val)s + named_args: + id_val: 1 + story_val: first + register: result + ignore_errors: true + +- assert: + that: + - result is not changed + - result.query == "SELECT id FROM test_table WHERE id = 1 AND story = 'first'" or result.query == "SELECT id FROM test_table WHERE id = 1 AND story = E'first'" + - result.rowcount == 1 + - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT' + - result.query_result[0].id == 1 + +- name: postgresql_query - select query with positional arguments + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: SELECT story FROM test_table WHERE id = %s AND story = %s + positional_args: + - 2 + - second + register: result + ignore_errors: true + +- assert: + that: + - result is not changed + - result.query == "SELECT story FROM test_table WHERE id = 2 AND story = 'second'" or result.query == "SELECT story FROM test_table WHERE id = 2 AND story = E'second'" + - result.rowcount == 1 + - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT' + - result.query_result[0].story == 'second' + +- name: postgresql_query - simple update query + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: UPDATE test_table SET story = 'new' WHERE id = 3 + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.query == "UPDATE test_table SET story = 'new' WHERE id = 3" + - result.rowcount == 1 + - result.statusmessage == 'UPDATE 1' + - result.query_result == {} + +- name: check the previous update + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: SELECT * FROM test_table WHERE story = 'new' AND id = 3 + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_query - simple update query in check_mode + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: UPDATE test_table SET story = 'CHECK_MODE' WHERE id = 3 + register: result + check_mode: true + +- assert: + that: + - result is changed + - result.query == "UPDATE test_table SET story = 'CHECK_MODE' WHERE id = 3" + - result.rowcount == 1 + - result.statusmessage == 'UPDATE 1' + - result.query_result == {} + +- name: check the previous update that nothing has been changed + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: SELECT * FROM test_table WHERE story = 'CHECK_MODE' AND id = 3 + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_query - try to update not existing row + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: UPDATE test_table SET story = 'new' WHERE id = 100 + register: result + ignore_errors: true + +- assert: + that: + - result is not changed + - result.query == "UPDATE test_table SET story = 'new' WHERE id = 100" + - result.rowcount == 0 + - result.statusmessage == 'UPDATE 0' + - result.query_result == {} + +- name: postgresql_query - insert query + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: INSERT INTO test_table (id, story) VALUES (%s, %s) + positional_args: + - 4 + - fourth + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.query == "INSERT INTO test_table (id, story) VALUES (4, 'fourth')" or result.query == "INSERT INTO test_table (id, story) VALUES (4, E'fourth')" + - result.rowcount == 1 + - result.statusmessage == 'INSERT 0 1' + - result.query_result == {} + +- name: postgresql_query - truncate test_table + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: TRUNCATE test_table + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.query == "TRUNCATE test_table" + - result.rowcount == 0 + - result.statusmessage == 'TRUNCATE TABLE' + - result.query_result == {} + +- name: postgresql_query - alter test_table + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: ALTER TABLE test_table ADD COLUMN foo int + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.query == "ALTER TABLE test_table ADD COLUMN foo int" + - result.rowcount == 0 + - result.statusmessage == 'ALTER TABLE' + +- name: postgresql_query - vacuum without autocommit must fail + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: VACUUM + register: result + ignore_errors: true + +- assert: + that: + - result.failed == true + +- name: postgresql_query - autocommit in check_mode must fail + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: VACUUM + autocommit: true + check_mode: true + register: result + ignore_errors: true + +- assert: + that: + - result.failed == true + - result.msg == "Using autocommit is mutually exclusive with check_mode" + +- name: postgresql_query - vacuum with autocommit + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: VACUUM + autocommit: true + register: result + +- assert: + that: + - result is changed + - result.query == "VACUUM" + - result.rowcount == 0 + - result.statusmessage == 'VACUUM' + - result.query_result == {} + +- name: postgresql_query - create test table for issue 59955 + become_user: '{{ pg_user }}' + become: true + postgresql_table: + login_user: '{{ pg_user }}' + login_db: postgres + name: test_array_table + columns: + - arr_col int[] + when: postgres_version_resp.stdout is version('9.4', '>=') + +- set_fact: + my_list: + - 1 + - 2 + - 3 + my_arr: '{1, 2, 3}' + when: postgres_version_resp.stdout is version('9.4', '>=') + +- name: postgresql_query - insert array into test table by positional args + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + login_db: postgres + query: INSERT INTO test_array_table (arr_col) VALUES (%s) + positional_args: + - '{{ my_list }}' + register: result + when: postgres_version_resp.stdout is version('9.4', '>=') + +- assert: + that: + - result is changed + - result.query == "INSERT INTO test_array_table (arr_col) VALUES ('{1, 2, 3}')" + when: postgres_version_resp.stdout is version('9.4', '>=') + +- name: postgresql_query - select array from test table by passing positional_args + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + login_db: postgres + query: SELECT * FROM test_array_table WHERE arr_col = %s + positional_args: + - '{{ my_list }}' + register: result + when: postgres_version_resp.stdout is version('9.4', '>=') + +- assert: + that: + - result is not changed + - result.query == "SELECT * FROM test_array_table WHERE arr_col = '{1, 2, 3}'" + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('9.4', '>=') + +- name: postgresql_query - select array from test table by passing named_args + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + login_db: postgres + query: SELECT * FROM test_array_table WHERE arr_col = %(arr_val)s + named_args: + arr_val: + - '{{ my_list }}' + register: result + when: postgres_version_resp.stdout is version('9.4', '>=') + +- assert: + that: + - result is not changed + - result.query == "SELECT * FROM test_array_table WHERE arr_col = '{1, 2, 3}'" + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('9.4', '>=') + +- name: postgresql_query - select array from test table by passing positional_args as a string + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + login_db: postgres + query: SELECT * FROM test_array_table WHERE arr_col = %s + positional_args: + - '{{ my_arr|string }}' + trust_input: yes + register: result + when: postgres_version_resp.stdout is version('9.4', '>=') + +- assert: + that: + - result is not changed + - result.query == "SELECT * FROM test_array_table WHERE arr_col = '{1, 2, 3}'" + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('9.4', '>=') + +- name: postgresql_query - test trust_input parameter + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + login_db: postgres + session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' + query: SELECT version() + trust_input: no + ignore_errors: yes + register: result + +- assert: + that: + - result is failed + - result.msg is search('is potentially dangerous') + +- name: postgresql_query - clean up + become_user: '{{ pg_user }}' + become: true + postgresql_table: + login_user: '{{ pg_user }}' + login_db: postgres + name: test_array_table + state: absent + when: postgres_version_resp.stdout is version('9.4', '>=') + +############################# +# Check search_path parameter + +- name: postgresql_set - create test schemas + become_user: '{{ pg_user }}' + become: true + postgresql_schema: + login_user: '{{ pg_user }}' + login_db: postgres + name: '{{ item }}' + loop: + - query_test1 + - query_test2 + +- name: postgresql_set - create test tables + become_user: '{{ pg_user }}' + become: true + postgresql_table: + login_user: '{{ pg_user }}' + login_db: postgres + name: '{{ item }}' + columns: + - id int + loop: + - 'query_test1.test1' + - 'query_test2.test2' + +- name: postgresql_query - insert data + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + login_db: postgres + query: 'INSERT INTO {{ item }} (id) VALUES (1)' + search_path: + - query_test1 + - query_test2 + loop: + - test1 + - test2 + +- name: postgresql_query - get data + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + login_db: postgres + query: 'SELECT id FROM test1' + search_path: + - query_test1 + - query_test2 + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_query - get data, must fail + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + login_db: postgres + query: 'SELECT id FROM test1' + register: result + ignore_errors: yes + +- assert: + that: + - result is failed + +# Tests for the as_single_query option +- name: Run queries from SQL script as a single query + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + path_to_script: ~{{ pg_user }}/test1.sql + positional_args: + - 1 + encoding: UTF-8 + as_single_query: yes + register: result + +- name: > + Must pass. Not changed because we can only + check statusmessage of the last query + assert: + that: + - result is not changed + - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT' + - result.query_list[0] == "CREATE FUNCTION add(integer, integer) RETURNS integer\n AS 'select $1 + $2;'\n LANGUAGE SQL\n IMMUTABLE\n RETURNS NULL ON NULL INPUT;\n\nSELECT story FROM test_table\n WHERE id = %s OR story = 'Данные';\n\nSELECT version();\n" + +############################################################################# +# Issue https://github.com/ansible-collections/community.postgresql/issues/45 +- name: Create table containing a decimal value + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: CREATE TABLE blabla (id int, num decimal) + +- name: Insert data + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: INSERT INTO blabla (id, num) VALUES (1, 1::decimal) + +- name: Get data + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: SELECT * FROM blabla + register: result + +- assert: + that: + - result.rowcount == 1 + +############################################################################# +# Issue https://github.com/ansible-collections/community.postgresql/issues/47 +- name: Get datetime.timedelta value + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: "SELECT EXTRACT(epoch from make_interval(secs => 3))" + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - result.rowcount == 1 + - result.query_result[0]["date_part"] == 3 + when: postgres_version_resp.stdout is version('10', '>=') + +- name: Get interval value + become_user: '{{ pg_user }}' + become: true + postgresql_query: + login_user: '{{ pg_user }}' + db: postgres + query: "SELECT make_interval(secs => 3)" + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - result.rowcount == 1 + - result.query_result[0]["make_interval"] == "0:00:03" + when: postgres_version_resp.stdout is version('10', '>=') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/defaults/main.yml new file mode 100644 index 00000000..ff6dd5cb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/defaults/main.yml @@ -0,0 +1,7 @@ +--- +db_name: 'ansible_db' +db_user1: 'ansible_db_user1' +db_user2: 'ansible_db_user2' +dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' +db_session_role1: 'session_role1' +db_session_role2: 'session_role2' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/main.yml new file mode 100644 index 00000000..d894dd04 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/main.yml @@ -0,0 +1,9 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- import_tasks: postgresql_schema_session_role.yml + +# Initial CI tests of postgresql_schema module +- import_tasks: postgresql_schema_initial.yml diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml new file mode 100644 index 00000000..7d73ddb5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml @@ -0,0 +1,331 @@ +--- + +# Setup +- name: Create test roles + postgresql_user: + name: "{{ item }}" + state: present + encrypted: yes + password: password + role_attr_flags: LOGIN + db: postgres + login_user: "{{ pg_user }}" + loop: + - "{{ db_user1 }}" + - "{{ db_user2 }}" + +- name: Create DB + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: present + name: "{{ db_name }}" + owner: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + +# Test: CREATE SCHEMA in checkmode +- name: Create a new schema with name "acme" in check_mode + become_user: "{{ pg_user }}" + become: yes + check_mode: yes + postgresql_schema: + database: "{{ db_name }}" + name: acme + login_user: "{{ pg_user }}" + register: result + +# Checks +- assert: + that: + - result is changed + - result.schema == 'acme' + +- name: Check that the new schema "acme" not exists + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'" + register: result + +- assert: + that: + - result.rowcount == 0 + +# Test: CREATE SCHEMA +- name: Create a new schema with name "acme" + become_user: "{{ pg_user }}" + become: yes + postgresql_schema: + database: "{{ db_name }}" + name: acme + login_user: "{{ pg_user }}" + trust_input: yes + register: result + +# Checks +- assert: + that: + - result is changed + - result.schema == 'acme' + - result.queries == [ 'CREATE SCHEMA "acme"' ] + +- name: Check that the new schema "acme" exists + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'" + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test: DROP SCHEMA in checkmode +- name: Drop schema "acme" in check_mode + become_user: "{{ pg_user }}" + become: yes + check_mode: yes + postgresql_schema: + database: "{{ db_name }}" + name: acme + state: absent + login_user: "{{ pg_user }}" + register: result + +# Checks +- assert: + that: + - result is not changed + +- name: Check that the new schema "acme" still exists + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'" + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test: DROP SCHEMA +- name: Drop schema "acme" + become_user: "{{ pg_user }}" + become: yes + postgresql_schema: + database: "{{ db_name }}" + name: acme + state: absent + login_user: "{{ pg_user }}" + register: result + +# Checks +- assert: + that: + - result is changed + - result.queries == [ 'DROP SCHEMA "acme"' ] + +- name: Check that no schema "acme" exists + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'" + register: result + ignore_errors: yes + +- assert: + that: + - result.rowcount == 0 + +# Test: trust_input parameter +- name: Create a new schema with potentially dangerous owner name + become_user: "{{ pg_user }}" + become: yes + postgresql_schema: + database: "{{ db_name }}" + name: acme + login_user: "{{ pg_user }}" + owner: "{{ dangerous_name }}" + trust_input: no + register: result + ignore_errors: yes + +# Checks +- assert: + that: + - result is failed + - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous' + +# Test: CREATE SCHEMA; WITH TABLE for DROP CASCADE test +- name: Create a new schema "acme" + become_user: "{{ pg_user }}" + become: yes + postgresql_schema: + database: "{{ db_name }}" + name: acme + login_user: "{{ pg_user }}" + register: result + +- name: Create table in schema for DROP CASCADE check + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "CREATE TABLE acme.table1()" + register: result2 + +# Checks +- assert: + that: + - result is changed + - result.schema == 'acme' + - result.queries == [ 'CREATE SCHEMA "acme"' ] + - result2.changed == true + - result2.statusmessage == 'CREATE TABLE' + +- name: Check that the new schema "acme" exists + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT schema_name,schema_owner FROM information_schema.schemata WHERE schema_name = 'acme'" + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: Check that the new table "table1" in schema 'acme' exists + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'acme' AND tablename = 'table1')" + register: result + +- assert: + that: + - result.rowcount == 1 + +# Test: DROP SCHEMA ... CASCADE; +- name: Drop schema "acme" with cascade + become_user: "{{ pg_user }}" + become: yes + postgresql_schema: + database: "{{ db_name }}" + name: acme + state: absent + cascade_drop: yes + login_user: "{{ pg_user }}" + register: result + +# Checks +- assert: + that: + - result is changed + - result.queries == [ 'DROP SCHEMA "acme" CASCADE' ] + +- name: Check that no schema "acme" exists + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'" + register: result + ignore_errors: yes + +- assert: + that: + - result.rowcount == 0 + +# Test: CREATE SCHEMA WITH OWNER ...; +- name: Create a new schema "acme" with a user "{{ db_user2 }}" who will own it + become_user: "{{ pg_user }}" + become: yes + postgresql_schema: + database: "{{ db_name }}" + name: acme + owner: "{{ db_user2 }}" + login_user: "{{ pg_user }}" + register: result + +# Checks +- assert: + that: + - result is changed + - result.schema == 'acme' + - result.queries == [ 'CREATE SCHEMA "acme" AUTHORIZATION "{{ db_user2 }}"' ] + +- name: Check that the new schema "acme" exists and "{{ db_user2 }}" own it + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT schema_name,schema_owner FROM information_schema.schemata WHERE schema_name = 'acme' AND schema_owner = '{{ db_user2 }}'" + register: result + ignore_errors: yes + +- assert: + that: + - result.rowcount == 1 + +# Test: DROP SCHEMA +- name: Drop schema "acme" + become_user: "{{ pg_user }}" + become: yes + postgresql_schema: + database: "{{ db_name }}" + name: acme + state: absent + login_user: "{{ pg_user }}" + register: result + +# Checks +- assert: + that: + - result is changed + - result.queries == [ 'DROP SCHEMA "acme"' ] + +- name: Check that no schema "acme" exists + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'" + register: result + ignore_errors: yes + +- assert: + that: + - result.rowcount == 0 + + +# Cleanup +- name: Remove user + postgresql_user: + name: "{{ db_user2 }}" + state: absent + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + +- name: Destroy DB + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: absent + name: "{{ db_name }}" + login_user: "{{ pg_user }}" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_session_role.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_session_role.yml new file mode 100644 index 00000000..6aaefff5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_session_role.yml @@ -0,0 +1,78 @@ +- name: Create a high privileged user + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + name: "{{ db_session_role1 }}" + state: "present" + password: "password" + role_attr_flags: "CREATEDB,LOGIN,CREATEROLE" + login_user: "{{ pg_user }}" + db: postgres + +- name: Create DB as session_role + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: present + name: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + session_role: "{{ db_session_role1 }}" + register: result + +- name: Create schema in own database + become_user: "{{ pg_user }}" + become: yes + postgresql_schema: + database: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + name: "{{ db_session_role1 }}" + session_role: "{{ db_session_role1 }}" + +- name: Create schema in own database, should be owned by session_role + become_user: "{{ pg_user }}" + become: yes + postgresql_schema: + database: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + name: "{{ db_session_role1 }}" + owner: "{{ db_session_role1 }}" + register: result + +- assert: + that: + - result is not changed + +- name: Fail when creating schema in postgres database as a regular user + become_user: "{{ pg_user }}" + become: yes + postgresql_schema: + database: postgres + login_user: "{{ pg_user }}" + name: "{{ db_session_role1 }}" + session_role: "{{ db_session_role1 }}" + ignore_errors: yes + register: result + +- assert: + that: + - result is failed + +- name: Drop test db + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: absent + name: "{{ db_session_role1 }}" + login_user: "{{ pg_user }}" + +- name: Drop test users + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + name: "{{ item }}" + state: absent + login_user: "{{ pg_user }}" + db: postgres + with_items: + - "{{ db_session_role1 }}" + - "{{ db_session_role2 }}" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/defaults/main.yml new file mode 100644 index 00000000..049b5531 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/defaults/main.yml @@ -0,0 +1,5 @@ +--- +db_name: 'ansible_db' +db_user1: 'ansible_db_user1' +db_user2: 'ansible_db_user2' +db_default: 'postgres' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/tasks/main.yml new file mode 100644 index 00000000..b5306900 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/tasks/main.yml @@ -0,0 +1,8 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial CI tests of postgresql_sequence module +- import_tasks: postgresql_sequence_initial.yml + when: postgres_version_resp.stdout is version('9.4', '>=') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/tasks/postgresql_sequence_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/tasks/postgresql_sequence_initial.yml new file mode 100644 index 00000000..f3672f26 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_sequence/tasks/postgresql_sequence_initial.yml @@ -0,0 +1,730 @@ +--- +# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Preparation for tests. +- name: postgresql_sequence - create a user to be owner of a database + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + name: "{{ db_user1 }}" + state: present + encrypted: yes + password: password + role_attr_flags: LOGIN + db: "{{ db_default }}" + login_user: "{{ pg_user }}" + +- name: postgresql_sequence - create DB + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: present + name: "{{ db_name }}" + owner: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + +- name: Create a user to be owner of a sequence + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + name: "{{ db_user2 }}" + state: present + encrypted: yes + password: password + role_attr_flags: LOGIN + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + +- name: postgresql_sequence - create a schema + become_user: "{{ pg_user }}" + become: yes + postgresql_schema: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar_schema + +#################### +# Test: create sequence in checkmode +- name: postgresql_sequence - create a new sequence with name "foobar" in check_mode + become_user: "{{ pg_user }}" + become: yes + check_mode: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'foobar' + - result.queries == ["CREATE SEQUENCE \"public\".\"foobar\""] + +# Real SQL check +- name: postgresql_sequence - check that the new sequence "foobar" not exists + become: yes + become_user: "{{ pg_user }}" + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 0 + - result.statusmessage == 'SELECT 0' + +#################### +# Test: create sequence +- name: postgresql_sequence - create a new sequence with name "foobar" + become_user: "{{ pg_user }}" + become: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'foobar' + - result.queries == ["CREATE SEQUENCE \"public\".\"foobar\""] + +# Real SQL check +- name: postgresql_sequence - check that the new sequence "foobar" exists + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 1 + +#################### +# Test: drop sequence in checkmode +- name: postgresql_sequence - drop a sequence called foobar + become_user: "{{ pg_user }}" + become: yes + check_mode: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar + state: absent + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'foobar' + - result.queries == ["DROP SEQUENCE \"public\".\"foobar\""] + +# Real SQL check +- name: postgresql_sequence - check that the sequence "foobar" still exists + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 1 + +#################### +# Test: drop sequence +- name: postgresql_sequence - drop a sequence called foobar + become_user: "{{ pg_user }}" + become: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar + state: absent + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'foobar' + - result.queries == ["DROP SEQUENCE \"public\".\"foobar\""] + +# Real SQL check +- name: postgresql_sequence - check that the sequence "foobar" not exists + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 0 + +#################### +# Test: drop nonexistent sequence +- name: postgresql_sequence - drop a sequence called foobar which does not exists + become_user: "{{ pg_user }}" + become: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar + state: absent + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is not changed + - result.sequence == 'foobar' + - result.queries == [] + +# Real SQL check +- name: postgresql_sequence - check that the sequence "foobar" not exists + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 0 + +#################### +# Test: create sequence with options +- name: postgresql_sequence - create an descending sequence called foobar_desc, starting at 101 and which cycle between 1 to 1000 + become_user: "{{ pg_user }}" + become: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar_desc + increment: -1 + start: 101 + minvalue: 1 + maxvalue: 1000 + cycle: yes + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'foobar_desc' + - result.increment == '-1' + - result.minvalue == '1' + - result.maxvalue == '1000' + - result.cycle == 'YES' + - result.queries == ["CREATE SEQUENCE \"public\".\"foobar_desc\" INCREMENT BY -1 MINVALUE 1 MAXVALUE 1000 START WITH 101 CYCLE"] + +# Real SQL check +- name: postgresql_sequence - check that the new sequence "foobar_desc" exists + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar_desc'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 1 + +#################### +# Test: rename a sequence in checkmode +- name: postgresql_sequence - rename an existing sequence named foobar_desc to foobar_with_options + become_user: "{{ pg_user }}" + become: yes + check_mode: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar_desc + rename_to: foobar_with_options + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'foobar_desc' + - result.newname == 'foobar_with_options' + - result.queries == ["ALTER SEQUENCE \"public\".\"foobar_desc\" RENAME TO \"foobar_with_options\""] + +# Real SQL check +- name: postgresql_sequence - check that the sequence "foobar_desc" still exists and is not renamed + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar_desc'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 1 + +#################### +# Test: rename a sequence +- name: postgresql_sequence - rename an existing sequence named foobar_desc to foobar_with_options + become_user: "{{ pg_user }}" + become: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar_desc + rename_to: foobar_with_options + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'foobar_desc' + - result.newname == 'foobar_with_options' + - result.queries == ["ALTER SEQUENCE \"public\".\"foobar_desc\" RENAME TO \"foobar_with_options\""] + +# Real SQL check +- name: postgresql_sequence - check that the renamed sequence "foobar_with_options" exists + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar_with_options'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 1 + +#################### +# Test: change schema of a sequence in checkmode +- name: postgresql_sequence - change schema of an existing sequence from public to foobar_schema + become_user: "{{ pg_user }}" + become: yes + check_mode: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar_with_options + newschema: foobar_schema + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'foobar_with_options' + - result.schema == 'public' + - result.newschema == 'foobar_schema' + - result.queries == ["ALTER SEQUENCE \"public\".\"foobar_with_options\" SET SCHEMA \"foobar_schema\""] + +# Real SQL check +- name: postgresql_sequence - check that the sequence "foobar_with_options" still exists in the old schema + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT sequence_name,sequence_schema FROM information_schema.sequences WHERE sequence_name = 'foobar_with_options' AND sequence_schema = 'public'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 1 + +#################### +# Test: change schema of a sequence +- name: postgresql_sequence - change schema of an existing sequence from public to foobar_schema + become_user: "{{ pg_user }}" + become: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar_with_options + newschema: foobar_schema + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'foobar_with_options' + - result.schema == 'public' + - result.newschema == 'foobar_schema' + - result.queries == ["ALTER SEQUENCE \"public\".\"foobar_with_options\" SET SCHEMA \"foobar_schema\""] + +# Real SQL check +- name: postgresql_sequence - check that the sequence "foobar_with_options" exists in new schema + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT sequence_name,sequence_schema FROM information_schema.sequences WHERE sequence_name = 'foobar_with_options' AND sequence_schema = 'foobar_schema'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 1 + +#################### +# Test: change owner of a sequence in checkmode +- name: postgresql_sequence - change owner of an existing sequence from "{{ pg_user }}" to "{{ db_user1 }}" + become_user: "{{ pg_user }}" + become: yes + check_mode: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar_with_options + schema: foobar_schema + owner: "{{ db_user1 }}" + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'foobar_with_options' + - result.owner == "{{ pg_user }}" + - result.queries == ["ALTER SEQUENCE \"foobar_schema\".\"foobar_with_options\" OWNER TO \"{{ db_user1 }}\""] + +# Real SQL check +- name: postgresql_sequence - check that the sequence "foobar_with_options" has still the old owner + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT c.relname,a.rolname,n.nspname + FROM pg_class as c + JOIN pg_authid as a on (c.relowner = a.oid) + JOIN pg_namespace as n on (c.relnamespace = n.oid) + WHERE c.relkind = 'S' and + c.relname = 'foobar_with_options' and + n.nspname = 'foobar_schema' and + a.rolname = '{{ pg_user }}'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 1 + +#################### +# Test: change owner of a sequence +- name: postgresql_sequence - change owner of an existing sequence from "{{ pg_user }}" to "{{ db_user1 }}" + become_user: "{{ pg_user }}" + become: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar_with_options + schema: foobar_schema + owner: "{{ db_user1 }}" + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'foobar_with_options' + - result.owner == "{{ pg_user }}" + - result.queries == ["ALTER SEQUENCE \"foobar_schema\".\"foobar_with_options\" OWNER TO \"{{ db_user1 }}\""] + +# Real SQL check +- name: postgresql_sequence - check that the sequence "foobar_with_options" has a new owner + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT c.relname,a.rolname,n.nspname + FROM pg_class as c + JOIN pg_authid as a on (c.relowner = a.oid) + JOIN pg_namespace as n on (c.relnamespace = n.oid) + WHERE c.relkind = 'S' and + c.relname = 'foobar_with_options' and + n.nspname = 'foobar_schema' and + a.rolname = '{{ db_user1 }}'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 1 + +#################### +# Test: drop sequence with cascade + +# CREATE SEQUENCE seq1; +# CREATE TABLE t1 (f1 INT NOT NULL DEFAULT nextval('seq1')); +# DROP SEQUENCE seq1 CASCADE; +- name: postgresql_sequence - create sequence for drop cascade test + become_user: "{{ pg_user }}" + become: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: seq1 + +- name: postgresql_sequence - create table which use sequence for drop cascade test + become_user: "{{ pg_user }}" + become: yes + postgresql_table: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: t1 + columns: + - f1 INT NOT NULL DEFAULT nextval('seq1') + +#################### +# Test: drop sequence with cascade in checkmode +- name: postgresql_sequence - drop with cascade a sequence called seq1 + become_user: "{{ pg_user }}" + become: yes + check_mode: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: seq1 + state: absent + cascade: yes + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'seq1' + - result.queries == ["DROP SEQUENCE \"public\".\"seq1\" CASCADE"] + +# Real SQL check +- name: postgresql_sequence - check that the sequence "seq1" still exists + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'seq1'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 1 + +#################### +# Test: drop sequence with cascade +- name: postgresql_sequence - drop with cascade a sequence called seq1 + become_user: "{{ pg_user }}" + become: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: seq1 + state: absent + cascade: yes + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'seq1' + - result.queries == ["DROP SEQUENCE \"public\".\"seq1\" CASCADE"] + +# Real SQL check +- name: postgresql_sequence - check that the sequence "seq1" not exists + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'seq1'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 0 + +#################### +# Test: create sequence with owner in checkmode +- name: postgresql_sequence - create a new sequence with name "foobar2" with owner "{{ db_user2 }}" + become_user: "{{ pg_user }}" + become: yes + check_mode: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar2 + owner: "{{ db_user2 }}" + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'foobar2' + - result.queries == ["CREATE SEQUENCE \"public\".\"foobar2\"", "ALTER SEQUENCE \"public\".\"foobar2\" OWNER TO \"ansible_db_user2\""] + +# Real SQL check +- name: postgresql_sequence - check that the new sequence "foobar2" does not exists + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar2'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 0 + +#################### +# Test: create sequence with owner +- name: postgresql_sequence - create a new sequence with name "foobar2" with owner "{{ db_user2 }}" + become_user: "{{ pg_user }}" + become: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: foobar2 + owner: "{{ db_user2 }}" + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is changed + - result.sequence == 'foobar2' + - result.queries == ["CREATE SEQUENCE \"public\".\"foobar2\"", "ALTER SEQUENCE \"public\".\"foobar2\" OWNER TO \"ansible_db_user2\""] + +# Real SQL check +- name: postgresql_sequence - check that the new sequence "foobar2" exists + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar2'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 1 + +- name: postgresql_sequence - check that the sequence "foobar2" has owner "{{ db_user2 }}" + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + query: "SELECT c.relname,a.rolname,n.nspname + FROM pg_class as c + JOIN pg_authid as a on (c.relowner = a.oid) + JOIN pg_namespace as n on (c.relnamespace = n.oid) + WHERE c.relkind = 'S' and + c.relname = 'foobar2' and + n.nspname = 'public' and + a.rolname = '{{ db_user2 }}'" + register: result + +- name: postgresql_sequence - check with assert the output + assert: + that: + - result.rowcount == 1 + +#################### +# Test: create sequence with trust_input +- name: postgresql_sequence - check that trust_input works as expected + become_user: "{{ pg_user }}" + become: yes + postgresql_sequence: + db: "{{ db_name }}" + login_user: "{{ pg_user }}" + name: 'just_a_name"; SELECT * FROM information_schema.tables; --' + trust_input: no + owner: "{{ db_user2 }}" + ignore_errors: yes + register: result + +# Checks +- name: postgresql_sequence - check with assert the output + assert: + that: + - result is failed + - result.msg is search('is potentially dangerous') + +# Cleanup +- name: postgresql_sequence - destroy DB + become_user: "{{ pg_user }}" + become: yes + postgresql_db: + state: absent + name: "{{ db_name }}" + login_user: "{{ pg_user }}" + +- name: remove test roles + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + state: absent + login_db: "{{ db_default }}" + login_user: "{{ pg_user }}" + name: "{{ item }}" + loop: + - "{{ db_user1 }}" + - "{{ db_user2 }}" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/main.yml new file mode 100644 index 00000000..3f16eb0d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/main.yml @@ -0,0 +1,11 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial CI tests of postgresql_initial module +- include_tasks: postgresql_set_initial.yml + when: postgres_version_resp.stdout is version('9.6', '>=') + +- include_tasks: options_coverage.yml + when: postgres_version_resp.stdout is version('9.6', '>=') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/options_coverage.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/options_coverage.yml new file mode 100644 index 00000000..c41d3184 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/options_coverage.yml @@ -0,0 +1,55 @@ +# Test code for the postgresql_set module +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Notice: assertions are different for Ubuntu 16.04 and FreeBSD because they don't work +# correctly for these tests. There are some stranges exactly in Shippable CI. +# However I checked it manually for all points (including Ubuntu 16.05 and FreeBSD) +# and it worked as expected. + +- vars: + task_parameters: &task_parameters + become_user: '{{ pg_user }}' + become: yes + pg_parameters: &pg_parameters + login_user: '{{ pg_user }}' + login_db: postgres + + block: + - name: Define a test setting map + set_fact: + setting_map: + allow_system_table_mods: on + archive_command: /bin/true + archive_timeout: 10min + autovacuum_work_mem: '-1' + backend_flush_after: 0 + autovacuum_vacuum_scale_factor: 0.5 + client_encoding: UTF-8 + bgwriter_delay: 400 + maintenance_work_mem: 32mb + effective_cache_size: 1024kB + shared_buffers: 1GB + stats_temp_directory: pg_stat_tmp + wal_level: replica + log_statement: mod + track_functions: none + + # Check mode: + - name: Set settings in check mode + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: '{{ item.key }}' + value: '{{ item.value }}' + check_mode: yes + with_dict: '{{ setting_map }}' + + # Actual mode: + - name: Set settings in actual mode + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: '{{ item.key }}' + value: '{{ item.value }}' + with_dict: '{{ setting_map }}' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/postgresql_set_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/postgresql_set_initial.yml new file mode 100644 index 00000000..125e1dab --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_set/tasks/postgresql_set_initial.yml @@ -0,0 +1,442 @@ +# Test code for the postgresql_set module +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# Notice: assertions are different for Ubuntu 16.04 and FreeBSD because they don't work +# correctly for these tests. There are some stranges exactly in Shippable CI. +# However I checked it manually for all points (including Ubuntu 16.05 and FreeBSD) +# and it worked as expected. + +- vars: + task_parameters: &task_parameters + become_user: '{{ pg_user }}' + become: yes + pg_parameters: &pg_parameters + login_user: '{{ pg_user }}' + login_db: postgres + + block: + - name: postgresql_set - preparation to the next step + <<: *task_parameters + become_user: "{{ pg_user }}" + become: yes + postgresql_set: + <<: *pg_parameters + name: work_mem + reset: yes + + ##################### + # Testing check_mode: + - name: postgresql_set - get work_mem initial value + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: SHOW work_mem + register: before + + - name: postgresql_set - set work_mem (restart is not required), check_mode + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: work_mem + value: 12MB + register: set_wm + check_mode: yes + + - assert: + that: + - set_wm.name == 'work_mem' + - set_wm.changed == true + - set_wm.prev_val_pretty == before.query_result[0].work_mem + - set_wm.value_pretty == '12MB' + - set_wm.restart_required == false + + - name: postgresql_set - get work_mem value to check, must be the same as initial + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: SHOW work_mem + register: after + + - assert: + that: + - before.query_result[0].work_mem == after.query_result[0].work_mem + ###### + # + + - name: postgresql_set - set work_mem (restart is not required) + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: work_mem + value: 12mb + register: set_wm + + - assert: + that: + - set_wm.name == 'work_mem' + - set_wm.changed == true + - set_wm.value_pretty == '12MB' + - set_wm.value_pretty != set_wm.prev_val_pretty + - set_wm.restart_required == false + - set_wm.value.value == 12582912 + - set_wm.value.unit == 'b' + when: + - ansible_distribution != "Ubuntu" + - ansible_distribution_major_version != '16' + - ansible_distribution != "FreeBSD" + + - assert: + that: + - set_wm.name == 'work_mem' + - set_wm.changed == true + - set_wm.restart_required == false + when: + - ansible_distribution == "Ubuntu" + - ansible_distribution_major_version == '16' + + - name: postgresql_set - reset work_mem (restart is not required) + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: work_mem + reset: yes + register: reset_wm + + - assert: + that: + - reset_wm.name == 'work_mem' + - reset_wm.changed == true + - reset_wm.value_pretty != reset_wm.prev_val_pretty + - reset_wm.restart_required == false + - reset_wm.value.value != '12582912' + when: + - ansible_distribution != "Ubuntu" + - ansible_distribution_major_version != '16' + - ansible_distribution != "FreeBSD" + + - assert: + that: + - reset_wm.name == 'work_mem' + - reset_wm.changed == true + - reset_wm.restart_required == false + when: + - ansible_distribution == "Ubuntu" + - ansible_distribution_major_version == '16' + + - name: postgresql_set - reset work_mem again to check that nothing changed (restart is not required) + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: work_mem + reset: yes + register: reset_wm2 + + - assert: + that: + - reset_wm2.name == 'work_mem' + - reset_wm2.changed == false + - reset_wm2.value_pretty == reset_wm2.prev_val_pretty + - reset_wm2.restart_required == false + when: + - ansible_distribution != "Ubuntu" + - ansible_distribution_major_version != '16' + + - assert: + that: + - reset_wm2.name == 'work_mem' + - reset_wm2.changed == false + - reset_wm2.restart_required == false + when: + - ansible_distribution == "Ubuntu" + - ansible_distribution_major_version == '16' + + - name: postgresql_set - preparation to the next step + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: work_mem + value: 14MB + + - name: postgresql_set - set work_mem to initial state (restart is not required) + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: work_mem + value: default + register: def_wm + + - assert: + that: + - def_wm.name == 'work_mem' + - def_wm.changed == true + - def_wm.value_pretty != def_wm.prev_val_pretty + - def_wm.restart_required == false + - def_wm.value.value != '14680064' + when: + - ansible_distribution != "Ubuntu" + - ansible_distribution_major_version != '16' + - ansible_distribution != 'FreeBSD' + + - assert: + that: + - def_wm.name == 'work_mem' + - def_wm.changed == true + - def_wm.restart_required == false + when: + - ansible_distribution == "Ubuntu" + - ansible_distribution_major_version == '16' + - ansible_distribution != 'FreeBSD' + + - name: postgresql_set - set shared_buffers (restart is required) + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: shared_buffers + value: 111MB + register: set_shb + + - assert: + that: + - set_shb.name == 'shared_buffers' + - set_shb.changed == true + - set_shb.restart_required == true + + # We don't check value.unit because it is none + - name: postgresql_set - set autovacuum (enabled by default, restart is not required) + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: autovacuum + value: off + register: set_aut + + - assert: + that: + - set_aut.name == 'autovacuum' + - set_aut.changed == true + - set_aut.restart_required == false + - set_aut.value.value == 'off' + + # Test check_mode, step 1. At the previous test we set autovacuum = 'off' + - name: postgresql - try to change autovacuum again in check_mode + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: autovacuum + value: on + register: set_aut + check_mode: yes + + - assert: + that: + - set_aut.name == 'autovacuum' + - set_aut.changed == true + - set_aut.restart_required == false + - set_aut.value.value == 'off' + + # Test check_mode, step 2 + - name: postgresql - check that autovacuum wasn't actually changed after change in check_mode + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: autovacuum + value: off + register: set_aut + check_mode: yes + + - assert: + that: + - set_aut.name == 'autovacuum' + - set_aut.changed == false + - set_aut.restart_required == false + - set_aut.value.value == 'off' + + # Additional check by SQL query: + - name: postgresql_set - get autovacuum value to check, must be off + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: SHOW autovacuum + register: result + + - assert: + that: + - result.query_result[0].autovacuum == 'off' + + # Test check_mode, step 3. It is different from + # the prev test - it runs without check_mode: yes. + # Before the check_mode tests autovacuum was off + - name: postgresql - check that autovacuum wasn't actually changed after change in check_mode + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: autovacuum + value: off + register: set_aut + + - assert: + that: + - set_aut.name == 'autovacuum' + - set_aut.changed == false + - set_aut.restart_required == false + - set_aut.value.value == 'off' + + ################# + # Bugfix of 67377 + - name: archive command with mb + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + trust_input: yes + name: archive_command + value: 'test ! -f /mnt/postgres/mb/%f && cp %p /mnt/postgres/mb/%f' + + # Check: + - name: check value + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: select reset_val from pg_settings where name = 'archive_command' + register: result + + - assert: + that: + - result.query_result.0.reset_val == "test ! -f /mnt/postgres/mb/%f && cp %p /mnt/postgres/mb/%f" + + ############################# + # Check trust_input parameter + - name: postgresql_set - check trust_input + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: shared_buffers + value: 111MB + trust_input: no + session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' + register: result + ignore_errors: yes + + - assert: + that: + - result is failed + - result.msg is search('is potentially dangerous') + + ############################################################################### + # Bugfix of https://github.com/ansible-collections/community.general/issues/775 + - name: postgresql_set - turn on archive mode + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: archive_mode + value: 'on' + + - name: Restart PostgreSQL + become: yes + service: + name: "{{ postgresql_service }}" + state: restarted + + - name: postgresql_set - set empty string as value + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: archive_command + value: '' + register: result + + - assert: + that: + - result is changed + + - name: postgresql_set - set empty string as value again + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: archive_command + value: '' + register: result + + - assert: + that: + - result is not changed + + - name: postgresql_set - set empty string as value again in check mode + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: archive_command + value: '' + register: result + check_mode: yes + + - assert: + that: + - result is not changed + + - name: Pass non-existent parameter + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: Timezone + value: utc + register: result + ignore_errors: yes + + - assert: + that: + - result is failed + - result.msg is search('No such parameter') + + ####################################################################### + # https://github.com/ansible-collections/community.postgresql/issues/48 + - name: Pass a parameter containing b in check_mode + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: archive_command + value: '/usr/bin/touch %f' + register: result + check_mode: yes + + - assert: + that: + - result is changed + + - name: Pass a parameter containing b + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: archive_command + value: '/usr/bin/touch %f' + register: result + + - assert: + that: + - result is changed + + - name: Pass another parameter containing B in check_mode + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: track_activity_query_size + value: '4096B' + register: result + check_mode: yes + + - assert: + that: + - result is changed + + - name: Pass another parameter containing b in check_mode + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: track_activity_query_size + value: '2048b' + register: result + check_mode: yes + + - assert: + that: + - result is changed diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/tasks/main.yml new file mode 100644 index 00000000..d44aab9d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/tasks/main.yml @@ -0,0 +1,9 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial CI tests of postgresql_slot module +# Physical replication slots are available since PostgreSQL 9.4 +- import_tasks: postgresql_slot_initial.yml + when: postgres_version_resp.stdout is version('9.4', '>=') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/tasks/postgresql_slot_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/tasks/postgresql_slot_initial.yml new file mode 100644 index 00000000..4f009ac0 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_slot/tasks/postgresql_slot_initial.yml @@ -0,0 +1,735 @@ +--- +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: postgresql_slot - set max_replication_slots + become_user: "{{ pg_user }}" + become: yes + postgresql_set: + login_user: "{{ pg_user }}" + db: postgres + name: max_replication_slots + value: '10' + +- name: postgresql_slot - set wal_level to logical + become_user: "{{ pg_user }}" + become: yes + postgresql_set: + login_user: "{{ pg_user }}" + db: postgres + name: wal_level + value: logical + +# To avoid CI timeouts +- name: Kill all postgres processes + shell: 'pkill -u {{ pg_user }}' + become: yes + when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8' + ignore_errors: yes + +- name: postgresql_slot - stop PostgreSQL + become: yes + service: + name: "{{ postgresql_service }}" + state: stopped + when: (ansible_facts.distribution_major_version != '8' and ansible_facts.distribution == 'CentOS') or ansible_facts.distribution != 'CentOS' + +- name: postgresql_slot - pause between stop and start PostgreSQL + ansible.builtin.pause: + seconds: 5 + +- name: postgresql_slot - start PostgreSQL + become: yes + service: + name: "{{ postgresql_service }}" + state: started + +# +# state=present +# + +# check_mode +- name: postgresql_slot - create slot in check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot0 + check_mode: yes + register: result + +- assert: + that: + - result is changed + - result.queries == [] + +# Check, rowcount must be 0 +- name: postgresql_slot - check that nothing changed after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot0'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 0 + +# true mode +- name: postgresql_slot - create physical slot + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot0 + register: result + +- assert: + that: + - result is changed + - result.queries == ["SELECT pg_create_physical_replication_slot('slot0', false)"] + when: postgres_version_resp.stdout is version('9.6', '>=') + +- assert: + that: + - result is changed + - result.queries == ["SELECT pg_create_physical_replication_slot('slot0')"] + when: postgres_version_resp.stdout is version('9.6', '<') + +# Check, rowcount must be 1 +- name: postgresql_slot - check that the slot exists after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot0' and slot_type = 'physical'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 1 + +# check mode +- name: postgresql_slot - try create physical slot again in check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot0 + check_mode: yes + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +# Check, rowcount must be 1 +- name: postgresql_slot - check that nothing changed after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot0' and slot_type = 'physical'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 1 + +# true mode +- name: postgresql_slot - try create physical slot again + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot0 + slot_type: physical + register: result + +- assert: + that: + - result is not changed + - result.queries == [] + +# Check, rowcount must be 1 +- name: postgresql_slot - check that nothing changed after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot0' and slot_type = 'physical'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 1 + +# +# immediately_reserve +# + +- name: postgresql_slot - create physical slot with immediately_reserve + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot1 + immediately_reserve: yes + register: result + when: postgres_version_resp.stdout is version('9.6', '>=') + +- assert: + that: + - result is changed + - result.queries == ["SELECT pg_create_physical_replication_slot('slot1', true)"] + when: postgres_version_resp.stdout is version('9.6', '>=') + +# Check, rowcount must be 1 +- name: postgresql_slot - check that the slot exists after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1' and slot_type = 'physical' and restart_lsn is not NULL" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('9.6', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('9.6', '>=') + +# +# slot_type: logical +# +# available from postgresql 10 +# +# on RedHat family tests failed: +# ERROR: could not access file "test_decoding": No such file or directory +# "Your distrib did not compile the test decoder." +# So the tests are restricted by Ubuntu because of the module functionality +# depends on PostgreSQL server version only. + +# check_mode +- name: postgresql_slot - create slot in check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot2 + slot_type: logical + check_mode: yes + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result is changed + - result.queries == [] + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# Check, rowcount must be 0 +- name: postgresql_slot - check that nothing changed after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# true mode +- name: postgresql_slot - create logical slot + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot2 + slot_type: logical + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result is changed + - result.queries == ["SELECT pg_create_logical_replication_slot('slot2', 'test_decoding')"] + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# Check, rowcount must be 1 +- name: postgresql_slot - check that the slot exists after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# check mode +- name: postgresql_slot - try create logical slot again in check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot2 + slot_type: logical + check_mode: yes + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result is not changed + - result.queries == [] + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# Check, rowcount must be 1 +- name: postgresql_slot - check that nothing changed after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# true mode +- name: postgresql_slot - try create logical slot again + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot2 + slot_type: logical + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result is not changed + - result.queries == [] + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# Check, rowcount must be 1 +- name: postgresql_slot - check that nothing changed after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# +# output_plugin: test_decoding +# + +- name: postgresql_slot - create logical slot with output_plugin + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot3 + slot_type: logical + output_plugin: test_decoding + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result is changed + - result.queries == ["SELECT pg_create_logical_replication_slot('slot3', 'test_decoding')"] + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# Check, rowcount must be 1 +- name: postgresql_slot - check that the slot exists after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot3' and slot_type = 'logical' and plugin = 'test_decoding'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# +# state: absent for logical slots +# + +# check_mode +- name: postgresql_slot - drop logical slot in check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot2 + state: absent + check_mode: yes + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result is changed + - result.queries == [] + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# Check, rowcount must be 1 +- name: postgresql_slot - check that nothing changed after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# true mode +- name: postgresql_slot - drop logical slot + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot2 + state: absent + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result is changed + - result.queries == ["SELECT pg_drop_replication_slot('slot2')"] + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# Check, rowcount must be 0 +- name: postgresql_slot - check that the slot does not exist after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# check mode +- name: postgresql_slot - try drop logical slot again in check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot2 + state: absent + check_mode: yes + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result is not changed + - result.queries == [] + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# Check, rowcount must be 0 +- name: postgresql_slot - check that nothing changed after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('10', '>=') + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# true mode +- name: postgresql_slot - try drop logical slot again + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot2 + state: absent + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result is not changed + - result.queries == [] + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# Check, rowcount must be 0 +- name: postgresql_slot - check that nothing changed after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + +# +# state=absent for physical slots +# + +# check_mode +- name: postgresql_slot - drop physical slot in check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot1 + state: absent + check_mode: yes + register: result + +- assert: + that: + - result is changed + - result.queries == [] + when: postgres_version_resp.stdout is version('9.6', '>=') + +# Check, rowcount must be 1 +- name: postgresql_slot - check that nothing changed after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('9.6', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('9.6', '>=') + +# true mode +- name: postgresql_slot - drop physical slot + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot1 + state: absent + register: result + when: postgres_version_resp.stdout is version('9.6', '>=') + +- assert: + that: + - result is changed + - result.queries == ["SELECT pg_drop_replication_slot('slot1')"] + when: postgres_version_resp.stdout is version('9.6', '>=') + +# Check, rowcount must be 0 +- name: postgresql_slot - check that the slot does not exist after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1' and slot_type = 'physical'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('9.6', '>=') + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('9.6', '>=') + +# check mode +- name: postgresql_slot - try drop physical slot again in check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot1 + state: absent + check_mode: yes + register: result + when: postgres_version_resp.stdout is version('9.6', '>=') + +- assert: + that: + - result is not changed + - result.queries == [] + when: postgres_version_resp.stdout is version('9.6', '>=') + +# Check, rowcount must be 0 +- name: postgresql_slot - check that nothing changed after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1' and slot_type = 'physical'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('9.6', '>=') + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('9.6', '>=') + +# true mode +- name: postgresql_slot - try drop physical slot again + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: slot1 + state: absent + register: result + when: postgres_version_resp.stdout is version('9.6', '>=') + +- assert: + that: + - result is not changed + - result.queries == [] + when: postgres_version_resp.stdout is version('9.6', '>=') + +# Check, rowcount must be 0 +- name: postgresql_slot - check that nothing changed after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1' and slot_type = 'physical'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('9.6', '>=') + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('9.6', '>=') + +# Check trust input +- name: postgresql_slot - try using a bad name + postgresql_slot: + session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' + db: postgres + name: slot1 + trust_input: no + register: result + ignore_errors: true + when: postgres_version_resp.stdout is version('9.6', '>=') + +- name: postgresql_slot - check that using a dangerous name fails + assert: + that: + - result is failed + - result.msg is search('is potentially dangerous') + when: postgres_version_resp.stdout is version('9.6', '>=') + +# +# clean up +# +- name: postgresql_slot - clean up + become_user: "{{ pg_user }}" + become: yes + postgresql_slot: + login_user: "{{ pg_user }}" + db: postgres + name: "{{ item }}" + state: absent + ignore_errors: yes + when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu' + with_items: + - slot0 + - slot3 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/aliases new file mode 100644 index 00000000..786e0531 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/aliases @@ -0,0 +1,4 @@ +destructive +shippable/posix/group1 +skip/freebsd +skip/rhel diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/defaults/main.yml new file mode 100644 index 00000000..e1433f9f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/defaults/main.yml @@ -0,0 +1,15 @@ +pg_user: postgres +db_default: postgres +master_port: 5433 +replica_port: 5434 + +test_table1: acme1 +test_pub: first_publication +test_pub2: second_publication +replication_role: logical_replication +replication_pass: alsdjfKJKDf1# +test_db: acme_db +test_subscription: test +test_role1: alice +test_role2: bob +conn_timeout: 100 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/meta/main.yml new file mode 100644 index 00000000..d72e4d23 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_replication diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/main.yml new file mode 100644 index 00000000..e440e8c8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/main.yml @@ -0,0 +1,12 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial tests of postgresql_subscription module: + +- import_tasks: setup_publication.yml + when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18' + +- import_tasks: postgresql_subscription_initial.yml + when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml new file mode 100644 index 00000000..695edd0e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml @@ -0,0 +1,672 @@ +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- vars: + dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' + task_parameters: &task_parameters + become_user: '{{ pg_user }}' + become: yes + register: result + pg_parameters: &pg_parameters + login_user: '{{ pg_user }}' + login_db: '{{ test_db }}' + + block: + + - name: Create roles to test owner parameter + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ item }}' + role_attr_flags: SUPERUSER,LOGIN + loop: + - '{{ test_role1 }}' + - '{{ test_role2 }}' + + #################### + # Test mode: present + #################### + - name: Create subscription + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: present + publications: '{{ test_pub }}' + connparams: + host: 127.0.0.1 + port: '{{ master_port }}' + user: '{{ replication_role }}' + password: '{{ replication_pass }}' + dbname: '{{ test_db }}' + trust_input: no + + - assert: + that: + - result is changed + - result.name == '{{ test_subscription }}' + - result.queries == ["CREATE SUBSCRIPTION test CONNECTION 'host=127.0.0.1 port={{ master_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }}' PUBLICATION {{ test_pub }}"] + - result.exists == true + - result.initial_state == {} + - result.final_state.owner == '{{ pg_user }}' + - result.final_state.enabled == true + - result.final_state.publications == ["{{ test_pub }}"] + - result.final_state.synccommit == true + - result.final_state.slotname == '{{ test_subscription }}' + - result.final_state.conninfo.dbname == '{{ test_db }}' + - result.final_state.conninfo.host == '127.0.0.1' + - result.final_state.conninfo.port == {{ master_port }} + - result.final_state.conninfo.user == '{{ replication_role }}' + - result.final_state.conninfo.password == '{{ replication_pass }}' + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'" + + - assert: + that: + - result.rowcount == 1 + + ################### + # Test mode: absent + ################### + + - name: Drop subscription in check mode + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: absent + trust_input: no + check_mode: yes + + - assert: + that: + - result is changed + - result.queries == ["DROP SUBSCRIPTION {{ test_subscription }}"] + - result.final_state == result.initial_state + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'" + + - assert: + that: + - result.rowcount == 1 + + - name: Drop subscription + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: absent + + - assert: + that: + - result is changed + - result.queries == ["DROP SUBSCRIPTION {{ test_subscription }}"] + - result.final_state != result.initial_state + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'" + + - assert: + that: + - result.rowcount == 0 + + ################## + # Test owner param + ################## + + - name: Create with owner + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: present + publications: '{{ test_pub }}' + owner: '{{ test_role1 }}' + connparams: + host: 127.0.0.1 + port: '{{ master_port }}' + user: '{{ replication_role }}' + password: '{{ replication_pass }}' + dbname: '{{ test_db }}' + trust_input: no + + - assert: + that: + - result.final_state.owner == '{{ test_role1 }}' + - result.queries[1] == 'ALTER SUBSCRIPTION {{ test_subscription }} OWNER TO "{{ test_role1 }}"' + + - name: Try to set this owner again + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: present + publications: '{{ test_pub }}' + owner: '{{ test_role1 }}' + trust_input: no + + - assert: + that: + - result is not changed + - result.initial_state == result.final_state + - result.final_state.owner == '{{ test_role1 }}' + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: > + SELECT subname FROM pg_subscription AS s + JOIN pg_catalog.pg_roles AS r ON s.subowner = r.oid + WHERE subname = '{{ test_subscription }}' and r.rolname = '{{ test_role1 }}' + + - assert: + that: + - result.rowcount == 1 + + - name: Set another owner in check mode + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: present + publications: '{{ test_pub }}' + owner: '{{ test_role2 }}' + trust_input: no + check_mode: yes + + - assert: + that: + - result is changed + - result.initial_state == result.final_state + - result.final_state.owner == '{{ test_role1 }}' + - result.queries == ['ALTER SUBSCRIPTION {{ test_subscription }} OWNER TO "{{ test_role2 }}"'] + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: > + SELECT subname FROM pg_subscription AS s + JOIN pg_catalog.pg_roles AS r ON s.subowner = r.oid + WHERE subname = '{{ test_subscription }}' and r.rolname = '{{ test_role1 }}' + + - assert: + that: + - result.rowcount == 1 + + - name: Set another owner + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: present + publications: '{{ test_pub }}' + owner: '{{ test_role2 }}' + trust_input: no + + - assert: + that: + - result is changed + - result.initial_state != result.final_state + - result.final_state.owner == '{{ test_role2 }}' + - result.queries == ['ALTER SUBSCRIPTION {{ test_subscription }} OWNER TO "{{ test_role2 }}"'] + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: > + SELECT subname FROM pg_subscription AS s + JOIN pg_catalog.pg_roles AS r ON s.subowner = r.oid + WHERE subname = '{{ test_subscription }}' and r.rolname = '{{ test_role2 }}' + + - assert: + that: + - result.rowcount == 1 + + ########################## + # Test trust_input param # + ########################## + + - name: Test trust_input parameter + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: present + publications: '{{ test_pub }}' + session_role: '{{ dangerous_name }}' + owner: '{{ test_role1 }}' + trust_input: no + connparams: + host: 127.0.0.1 + port: '{{ master_port }}' + user: '{{ replication_role }}' + password: '{{ replication_pass }}' + dbname: '{{ test_db }}' + ignore_errors: yes + + - assert: + that: + - result is failed + - result.msg is search('is potentially dangerous') + + ############## + # Test cascade + ############## + + - name: Drop subscription cascade in check mode + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: absent + cascade: yes + trust_input: no + check_mode: yes + + - assert: + that: + - result is changed + - result.queries == ["DROP SUBSCRIPTION {{ test_subscription }} CASCADE"] + - result.final_state == result.initial_state + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'" + + - assert: + that: + - result.rowcount == 1 + + - name: Drop subscription cascade + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: absent + cascade: yes + + - assert: + that: + - result is changed + - result.queries == ["DROP SUBSCRIPTION {{ test_subscription }} CASCADE"] + - result.final_state != result.initial_state + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'" + + - assert: + that: + - result.rowcount == 0 + + ########################### + # Test subsparams parameter + ########################### + + - name: Create subscription with subsparams + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: present + publications: '{{ test_pub }}' + connparams: + host: 127.0.0.1 + port: '{{ master_port }}' + user: '{{ replication_role }}' + password: '{{ replication_pass }}' + dbname: '{{ test_db }}' + subsparams: + enabled: no + synchronous_commit: no + trust_input: no + + - assert: + that: + - result is changed + - result.name == '{{ test_subscription }}' + - result.queries == ["CREATE SUBSCRIPTION test CONNECTION 'host=127.0.0.1 port={{ master_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }}' PUBLICATION {{ test_pub }} WITH (enabled = false, synchronous_commit = false)"] + - result.exists == true + - result.final_state.enabled == false + - result.final_state.synccommit == false + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: > + SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}' + AND subenabled = 'f' AND subsynccommit = 'false' + + - assert: + that: + - result.rowcount == 1 + + - name: Enable changed params + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + publications: '{{ test_pub }}' + subsparams: + enabled: yes + synchronous_commit: yes + trust_input: no + + - assert: + that: + - result is changed + - result.name == '{{ test_subscription }}' + - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} ENABLE", "ALTER SUBSCRIPTION {{ test_subscription }} SET (synchronous_commit = true)"] + - result.exists == true + - result.final_state.enabled == true + - result.final_state.synccommit == true + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: > + SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}' + AND subenabled = 't' AND subsynccommit = 'true' + + - assert: + that: + - result.rowcount == 1 + + - name: Enable the same params again + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + publications: '{{ test_pub }}' + subsparams: + enabled: yes + synchronous_commit: yes + trust_input: no + + - assert: + that: + - result is not changed + - result.name == '{{ test_subscription }}' + - result.queries == [] + - result.exists == true + - result.final_state == result.initial_state + - result.final_state.enabled == true + - result.final_state.synccommit == true + + ########################## + # Test change publications + ########################## + + - name: Change publications in check mode + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: present + publications: + - '{{ test_pub }}' + - '{{ test_pub2 }}' + trust_input: no + check_mode: yes + + - assert: + that: + - result is changed + - result.name == '{{ test_subscription }}' + - result.final_state.publications == result.initial_state.publications + - result.final_state.publications == ['{{ test_pub }}'] + - result.queries == ['ALTER SUBSCRIPTION {{ test_subscription }} SET PUBLICATION {{ test_pub }}, {{ test_pub2 }}'] + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: > + SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}' + AND subpublications = '{"{{ test_pub }}"}' + + - assert: + that: + - result.rowcount == 1 + + - name: Change publications + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: present + publications: + - '{{ test_pub }}' + - '{{ test_pub2 }}' + trust_input: no + + - assert: + that: + - result is changed + - result.name == '{{ test_subscription }}' + - result.final_state.publications != result.initial_state.publications + - result.final_state.publications == ['{{ test_pub }}', '{{ test_pub2 }}'] + - result.queries == ['ALTER SUBSCRIPTION {{ test_subscription }} SET PUBLICATION {{ test_pub }}, {{ test_pub2 }}'] + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: > + SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}' + AND subpublications = '{"{{ test_pub }}", "{{ test_pub2 }}"}' + + - assert: + that: + - result.rowcount == 1 + + - name: Change publications with the same values again + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: present + publications: + - '{{ test_pub }}' + - '{{ test_pub2 }}' + trust_input: no + + - assert: + that: + - result is not changed + - result.name == '{{ test_subscription }}' + - result.final_state.publications == result.initial_state.publications + - result.final_state.publications == ['{{ test_pub }}', '{{ test_pub2 }}'] + - result.queries == [] + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: > + SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}' + AND subpublications = '{"{{ test_pub }}", "{{ test_pub2 }}"}' + + - assert: + that: + - result.rowcount == 1 + + ###################### + # Test update conninfo + ###################### + + - name: Change conninfo in check mode + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: present + connparams: + host: 127.0.0.1 + port: '{{ master_port }}' + user: '{{ replication_role }}' + password: '{{ replication_pass }}' + dbname: '{{ test_db }}' + connect_timeout: '{{ conn_timeout }}' + trust_input: no + check_mode: yes + + - assert: + that: + - result is changed + - result.name == '{{ test_subscription }}' + - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} CONNECTION 'host=127.0.0.1 port={{ master_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }} connect_timeout={{ conn_timeout }}'"] + - result.initial_state.conninfo == result.final_state.conninfo + + - name: Change conninfo + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: present + connparams: + host: 127.0.0.1 + port: '{{ master_port }}' + user: '{{ replication_role }}' + password: '{{ replication_pass }}' + dbname: '{{ test_db }}' + connect_timeout: '{{ conn_timeout }}' + trust_input: no + + - assert: + that: + - result is changed + - result.name == '{{ test_subscription }}' + - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} CONNECTION 'host=127.0.0.1 port={{ master_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }} connect_timeout={{ conn_timeout }}'"] + - result.initial_state.conninfo != result.final_state.conninfo + + - name: Check + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ replica_port }}' + query: "SELECT * FROM pg_subscription WHERE subname = '{{ test_subscription }}'" + + - assert: + that: + - result.query_result[0].subconninfo == "host=127.0.0.1 port={{ master_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }} connect_timeout={{ conn_timeout }}" + + - name: Try to change conninfo again with the same values + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: present + connparams: + host: 127.0.0.1 + port: '{{ master_port }}' + user: '{{ replication_role }}' + password: '{{ replication_pass }}' + dbname: '{{ test_db }}' + connect_timeout: '{{ conn_timeout }}' + trust_input: no + + - assert: + that: + - result is not changed + - result.name == '{{ test_subscription }}' + - result.queries == [] + - result.initial_state.conninfo == result.final_state.conninfo + - result.final_state.conninfo.connect_timeout == {{ conn_timeout }} + + #################### + # Test state refresh + #################### + + - name: Refresh in check mode + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: refresh + check_mode: yes + + - assert: + that: + - result is changed + - result.name == '{{ test_subscription }}' + - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} REFRESH PUBLICATION"] + + - name: Refresh + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: refresh + + - assert: + that: + - result is changed + - result.name == '{{ test_subscription }}' + - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} REFRESH PUBLICATION"] + + ########## + # Clean up + ########## + - name: Drop subscription + <<: *task_parameters + postgresql_subscription: + <<: *pg_parameters + login_port: '{{ replica_port }}' + name: '{{ test_subscription }}' + state: absent diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/setup_publication.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/setup_publication.yml new file mode 100644 index 00000000..dc99f89d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_subscription/tasks/setup_publication.yml @@ -0,0 +1,84 @@ +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Preparation for further tests of postgresql_subscription module. + +- vars: + task_parameters: &task_parameters + become_user: '{{ pg_user }}' + become: yes + register: result + pg_parameters: &pg_parameters + login_user: '{{ pg_user }}' + login_db: '{{ test_db }}' + + block: + - name: postgresql_publication - create test db + <<: *task_parameters + postgresql_db: + login_user: '{{ pg_user }}' + login_port: '{{ master_port }}' + maintenance_db: '{{ db_default }}' + name: '{{ test_db }}' + + - name: postgresql_publication - create test role + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + login_port: '{{ master_port }}' + name: '{{ replication_role }}' + password: '{{ replication_pass }}' + role_attr_flags: LOGIN,REPLICATION + + - name: postgresql_publication - create test table + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + login_port: '{{ master_port }}' + name: '{{ test_table1 }}' + columns: + - id int + + - name: Master - dump schema + <<: *task_parameters + shell: pg_dumpall -p '{{ master_port }}' -s > /tmp/schema.sql + + - name: Replicat restore schema + <<: *task_parameters + shell: psql -p '{{ replica_port }}' -f /tmp/schema.sql + + - name: postgresql_publication - create publication + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + login_port: '{{ master_port }}' + name: '{{ test_pub }}' + + - assert: + that: + - result is changed + - result.exists == true + - result.queries == ["CREATE PUBLICATION \"{{ test_pub }}\" FOR ALL TABLES"] + - result.owner == '{{ pg_user }}' + - result.alltables == true + - result.tables == [] + - result.parameters.publish != {} + + - name: postgresql_publication - create one more publication + <<: *task_parameters + postgresql_publication: + <<: *pg_parameters + login_port: '{{ master_port }}' + name: '{{ test_pub2 }}' + + - name: postgresql_publication - check the publication was created + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + login_port: '{{ master_port }}' + query: > + SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' + AND pubowner = '10' AND puballtables = 't' + + - assert: + that: + - result.rowcount == 1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/aliases new file mode 100644 index 00000000..0d91b7de --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group5 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/tasks/main.yml new file mode 100644 index 00000000..3534c73b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/tasks/main.yml @@ -0,0 +1,7 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial CI tests of postgresql_table module +- import_tasks: postgresql_table_initial.yml diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/tasks/postgresql_table_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/tasks/postgresql_table_initial.yml new file mode 100644 index 00000000..c06403a4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_table/tasks/postgresql_table_initial.yml @@ -0,0 +1,899 @@ +# Test code for the postgresql_set module + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Create a role for tests: +- name: postgresql_table - create a role for tests + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + db: postgres + login_user: "{{ pg_user }}" + name: alice + +- name: postgresql_table - create test schema + become_user: "{{ pg_user }}" + become: yes + postgresql_schema: + database: postgres + login_user: "{{ pg_user }}" + name: acme + +# +# Check table creation +# + +# Create a simple table in check_mode: +- name: postgresql_table - create table in check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_table: + login_db: postgres + login_port: 5432 + login_user: "{{ pg_user }}" + name: test1 + owner: alice + columns: id int + register: result + ignore_errors: yes + check_mode: yes + +- assert: + that: + - result is changed + - result.table == 'test1' + - result.queries == ['CREATE TABLE "test1" (id int)', 'ALTER TABLE "test1" OWNER TO "alice"'] + - result.state == 'absent' + +# Check that the table doesn't exist after the previous step, rowcount must be 0 +- name: postgresql_table - check that table doesn't exist after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test1'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 0 + +# Create a simple table: +- name: postgresql_table - create table + become_user: "{{ pg_user }}" + become: yes + postgresql_table: + login_db: postgres + login_port: 5432 + login_user: "{{ pg_user }}" + name: test1 + owner: alice + columns: id int + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.table == 'test1' + - result.queries == ['CREATE TABLE "test1" (id int)', 'ALTER TABLE "test1" OWNER TO "alice"'] + - result.state == 'present' + - result.storage_params == [] + - result.tablespace == "" + - result.owner == "alice" + +# Check that the table exists after the previous step, rowcount must be 1 +- name: postgresql_table - check that table exists after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test1'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 1 + +# Check that the tableowner is alice +- name: postgresql_table - check that table owner is alice + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_tables WHERE tablename = 'test1' AND tableowner = 'alice'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 1 + +# +# Check create table like another table +# + +# Create a table LIKE another table without any additional parameters in check_mode: +- name: postgresql_table - create table like in check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test2 + like: test1 + register: result + ignore_errors: yes + check_mode: yes + +- assert: + that: + - result is changed + - result.table == 'test2' + - result.queries == ['CREATE TABLE "test2" (LIKE "test1")'] + - result.state == 'absent' + +# Check that the table doesn't exist after the previous step, rowcount must be 0 +- name: postgresql_table - check that table doesn't exist after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test2'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 0 + +# Create a table LIKE another table without any additional parameters: +- name: postgresql_table - create table like + become_user: "{{ pg_user }}" + become: yes + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test2 + like: test1 + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.table == 'test2' + - result.queries == ['CREATE TABLE "test2" (LIKE "test1")'] + - result.state == 'present' + - result.storage_params == [] + - result.tablespace == "" + - result.owner == "{{ pg_user }}" + +# Check that the table exists after the previous step, rowcount must be 1 +- name: postgresql_table - check that table exists after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test2'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 1 + +# +# Check drop table +# + +# Drop a table in check_mode: +- name: postgresql_table - drop table in check_mode + become_user: "{{ pg_user }}" + become: yes + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test2 + state: absent + register: result + ignore_errors: yes + check_mode: yes + +- assert: + that: + - result is changed + - result.queries == ['DROP TABLE "test2"'] + - result.state == 'present' + - result.storage_params == [] + - result.tablespace == "" + - result.owner == "{{ pg_user }}" + +# Check that the table exists after the previous step, rowcount must be 1 +- name: postgresql_table - check that table exists after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test2'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 1 + +# Drop a table: +- name: postgresql_table - drop table + become_user: "{{ pg_user }}" + become: yes + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test2 + state: absent + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.queries == ['DROP TABLE "test2"'] + - result.state == 'absent' + +# Check that the table doesn't exist after the previous step, rowcount must be 0 +- name: postgresql_table - check that table doesn't exist after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test2'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 0 + +# Create a table like another table including: +- name: postgresql_table - create table like with including indexes + become_user: "{{ pg_user }}" + become: yes + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test2 + like: test1 + including: indexes + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.queries == ['CREATE TABLE "test2" (LIKE "test1" INCLUDING indexes)'] + - result.state == 'present' + - result.storage_params == [] + - result.tablespace == "" + - result.owner == "{{ pg_user }}" + +# Check to create table if it exists: +- name: postgresql_table - try to create existing table again + become_user: "{{ pg_user }}" + become: yes + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test2 + like: test1 + including: indexes + register: result + ignore_errors: yes + +- assert: + that: + - result is not changed + +# Drop the table to prepare for the next step: +- name: postgresql_table - drop table + become_user: "{{ pg_user }}" + become: yes + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test2 + state: absent + register: result + ignore_errors: yes + +# Try to drop non existing table: +- name: postgresql_table - try drop dropped table again + become_user: "{{ pg_user }}" + become: yes + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test2 + state: absent + register: result + ignore_errors: yes + +- assert: + that: + - result is not changed + +# +# Change ownership +# + +# Create user to prepare for the next step: +- name: postgresql_table - create the new user test_user + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + login_user: "{{ pg_user }}" + db: postgres + name: test_user + state: present + ignore_errors: yes + +# Try to change owner to test_user in check_mode +- name: postgresql_table - change table ownership to test_user in check_mode + become: yes + become_user: "{{ pg_user }}" + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test1 + owner: test_user + register: result + ignore_errors: yes + check_mode: yes + +- assert: + that: + - result.owner == 'alice' + - result.queries == ['ALTER TABLE "test1" OWNER TO "test_user"'] + - result.state == 'present' + - result is changed + +# Check that the tableowner was not changed to test_user +- name: postgresql_table - check that table owner was not changed + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_tables WHERE tablename = 'test1' AND tableowner = 'test_user'" + ignore_errors: yes + register: result + +- assert: + that: + - result is not changed + +# Try to change owner to test_user +- name: postgresql_table - change table ownership to test_user + become: yes + become_user: "{{ pg_user }}" + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test1 + owner: test_user + register: result + ignore_errors: yes + +- assert: + that: + - result.owner == 'test_user' + - result.queries == ['ALTER TABLE "test1" OWNER TO "test_user"'] + - result.state == 'present' + - result is changed + +# Check that the tableowner was changed to test_user +- name: postgresql_table - check that table owner was changed + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_tables WHERE tablename = 'test1' AND tableowner = 'test_user'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 1 + +# +# Additional storage parameters +# + +# Create a table with additional storage parameters: +- name: postgresql_table - create table with storage_params + become: yes + become_user: "{{ pg_user }}" + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test3 + columns: + - id int + - name text + storage_params: + - fillfactor=10 + - autovacuum_analyze_threshold=1 + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.state == 'present' + - result.queries == ['CREATE TABLE "test3" (id int,name text) WITH (fillfactor=10,autovacuum_analyze_threshold=1)'] + - result.storage_params == [ "fillfactor=10", "autovacuum_analyze_threshold=1" ] + +# Check storage parameters +- name: postgresql_table - check storage parameters + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT reloptions FROM pg_class WHERE relname = 'test3'" + ignore_errors: yes + register: result + +- assert: + that: + - result.query_result[0].reloptions == ["fillfactor=10", "autovacuum_analyze_threshold=1"] +# +# Check truncate table +# + +# Insert a row to test table: +- name: postgresql_table - insert a row + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "INSERT INTO test3 (id, name) VALUES (1, 'first')" + +# Truncate a table in check_mode: +- name: postgresql_table - truncate table + become: yes + become_user: "{{ pg_user }}" + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test3 + truncate: yes + register: result + ignore_errors: yes + check_mode: yes + +- assert: + that: + - result is changed + - result.queries == ['TRUNCATE TABLE "test3"'] + - result.state == "present" + +# Check the row exists: +- name: postgresql_table - check that row exists after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT * FROM test3 WHERE id = '1'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 1 + +# Truncate a table. It always returns changed == true +# because it always creates a new table with the same schema and drop the old table: +- name: postgresql_table - truncate table + become: yes + become_user: "{{ pg_user }}" + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test3 + truncate: yes + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.queries == ['TRUNCATE TABLE "test3"'] + - result.state == "present" + +# Check the row exists: +- name: postgresql_table - check that row doesn't exist after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT * FROM test3 WHERE id = '1'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 0 + +# +# Check rename table +# + +# Rename a table in check_mode. +# In check_mode test4 won't be exist after the following playbook, +# so result.changed == 'absent' for the table with this name +- name: postgresql_table - rename table in check_mode + become: yes + become_user: "{{ pg_user }}" + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test3 + rename: test4 + register: result + ignore_errors: yes + check_mode: yes + +- assert: + that: + - result is changed + - result.queries == ['ALTER TABLE "test3" RENAME TO "test4"'] + - result.state == "absent" + +# Check that the table exists after the previous step, rowcount must be 1 +- name: postgresql_table - check that table exists after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test3'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 1 + +# Rename a table: +- name: postgresql_table - rename table + become: yes + become_user: "{{ pg_user }}" + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test3 + rename: test4 + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.queries == ['ALTER TABLE "test3" RENAME TO "test4"'] + - result.state == "present" + +# Check that the table test 3 doesn't exist after the previous step, rowcount must be - 0 +- name: postgresql_table - check that table exists after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test3'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 0 + +# Check that the table test 4 exists after the previous step, rowcount must be - 1 +- name: postgresql_table - check that table exists after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test4'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 1 + +# +# Check create unlogged table +# + +# Create unlogged table in check_mode: +- name: postgresql_table - create unlogged table in check_mode + become: yes + become_user: "{{ pg_user }}" + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test5 + unlogged: yes + register: result + ignore_errors: yes + check_mode: yes + +- assert: + that: + - result is changed + - result.queries == ['CREATE UNLOGGED TABLE "test5" ()'] + when: postgres_version_resp.stdout is version('9.1', '>=') + +# Check that the table doesn't exist after the previous step, rowcount must be - 0 +- name: postgresql_table - check that table doesn't exist after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test5'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 0 + +# Create unlogged table: +- name: postgresql_table - create unlogged table + become: yes + become_user: "{{ pg_user }}" + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test5 + unlogged: yes + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.queries == ['CREATE UNLOGGED TABLE "test5" ()'] + when: postgres_version_resp.stdout is version('9.1', '>=') + +# Check that the table exists after the previous step, rowcount must be - 1 +- name: postgresql_table - check that table exists after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test5'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('9.1', '>=') + +- assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('9.1', '>=') + +# Drop table CASCADE: +- name: postgresql_table - drop table cascade + become: yes + become_user: "{{ pg_user }}" + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test5 + state: absent + cascade: yes + register: result + ignore_errors: yes + +- assert: + that: + - result is changed + - result.queries == ['DROP TABLE "test5" CASCADE'] + when: postgres_version_resp.stdout is version('9.1', '>=') + +# Check that the table doesn't exist after the previous step, rowcount must be - 0 +- name: postgresql_table - check that table doesn't exist after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test5'" + ignore_errors: yes + register: result + when: postgres_version_resp.stdout is version('9.1', '>=') + +- assert: + that: + - result.rowcount == 0 + when: postgres_version_resp.stdout is version('9.1', '>=') + +# +# Create, drop, and rename table in a specific schema: +# +- name: postgresql_table - create table in a specific schema + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: acme.test_schema_table + register: result + +- assert: + that: + - result is changed + - result.queries == ['CREATE TABLE "acme"."test_schema_table" ()'] + +- name: postgresql_table - check that table exists after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test_schema_table' and schemaname = 'acme'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_table - try to create a table with the same name and schema again + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: acme.test_schema_table + register: result + +- assert: + that: + - result is not changed + +- name: postgresql_table - create a table in the default schema for the next test + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: test_schema_table + register: result + +- assert: + that: + - result is changed + +- name: postgresql_table - drop the table from schema acme + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: postgres.acme.test_schema_table + state: absent + register: result + +- assert: + that: + - result is changed + - result.queries == ['DROP TABLE "postgres"."acme"."test_schema_table"'] + +- name: postgresql_table - check that the table doesn't exist after the previous step + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test_schema_table' and schemaname = 'acme'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 0 + +- name: postgresql_table - try to drop the table from schema acme again + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: acme.test_schema_table + state: absent + register: result + +- assert: + that: + - result is not changed + +- name: postgresql_table - check that the table with the same name in schema public exists + become_user: "{{ pg_user }}" + become: yes + postgresql_query: + db: postgres + login_user: "{{ pg_user }}" + query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test_schema_table' and schemaname = 'public'" + ignore_errors: yes + register: result + +- assert: + that: + - result.rowcount == 1 + +- name: postgresql_table - rename the table that contents a schema name + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: public.test_schema_table + rename: new_test_schema_table + trust_input: yes + register: result + +- assert: + that: + - result is changed + - result.queries == ['ALTER TABLE "public"."test_schema_table" RENAME TO "new_test_schema_table"'] + +############################ +# Test trust_input parameter +- name: postgresql_table - check trust_input + postgresql_table: + db: postgres + login_user: "{{ pg_user }}" + name: postgres.acme.test_schema_table + state: absent + trust_input: no + session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' + register: result + ignore_errors: yes + +- assert: + that: + - result is failed + - result.msg is search('is potentially dangerous') + +# +# Clean up +# +- name: postgresql_table - drop test schema + become_user: "{{ pg_user }}" + become: yes + postgresql_schema: + database: postgres + login_user: "{{ pg_user }}" + name: acme + state: absent + cascade_drop: yes + +- name: postgresql_table - drop test role + become_user: "{{ pg_user }}" + become: yes + postgresql_user: + db: postgres + login_user: "{{ pg_user }}" + name: "{{ item }}" + state: absent + loop: + - test_user + - alice + ignore_errors: yes diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/defaults/main.yml new file mode 100644 index 00000000..1eb5b843 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/defaults/main.yml @@ -0,0 +1,3 @@ +--- +test_tablespace_path: "/ssd" +dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/main.yml new file mode 100644 index 00000000..21a47ee3 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/main.yml @@ -0,0 +1,7 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial CI tests of postgresql_tablespace module +- import_tasks: postgresql_tablespace_initial.yml diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml new file mode 100644 index 00000000..f5884d99 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml @@ -0,0 +1,245 @@ +- name: postgresql_tablespace - drop dir for test tablespace + become: true + file: + path: '{{ test_tablespace_path }}' + state: absent + ignore_errors: true + +- name: postgresql_tablespace - disable selinux + become: true + shell: setenforce 0 + ignore_errors: true + +- name: postgresql_tablespace - create dir for test tablespace + become: true + file: + path: '{{ test_tablespace_path }}' + state: directory + owner: '{{ pg_user }}' + group: '{{ pg_user }}' + mode: '0700' + ignore_errors: true + +- name: postgresql_tablespace - create test role to test change ownership + become_user: '{{ pg_user }}' + become: true + postgresql_user: + db: postgres + login_user: '{{ pg_user }}' + name: bob + state: present + ignore_errors: true + +- name: postgresql_tablespace - create test role to test change ownership + become_user: '{{ pg_user }}' + become: true + postgresql_user: + db: postgres + login_user: '{{ pg_user }}' + name: alice + state: present + ignore_errors: true + +- name: postgresql_tablespace - create a new tablespace called acme and set bob as an its owner + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + owner: bob + location: /ssd + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.owner == 'bob' + - result.queries == ["CREATE TABLESPACE \"acme\" LOCATION '/ssd'", "ALTER TABLESPACE \"acme\" OWNER TO \"bob\""] + - result.state == 'present' + - result.tablespace == 'acme' + - result.options == {} + - result.location == '/ssd' + +- name: postgresql_tablespace - try to create the same tablespace with different location + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + location: /another-ssd + register: result + ignore_errors: true + +- assert: + that: + - result is not changed + - result.msg == "Tablespace 'acme' exists with different location '/ssd'" + +- name: postgresql_tablespace - change tablespace owner to alice + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + owner: alice + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.owner == 'alice' + - result.queries == ["ALTER TABLESPACE \"acme\" OWNER TO \"alice\""] + - result.state == 'present' + - result.tablespace == 'acme' + - result.options == {} + +- name: postgresql_tablespace - try to change tablespace owner to alice again to be sure that nothing changes + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + owner: alice + register: result + ignore_errors: true + +- assert: + that: + - result is not changed + - result.owner == 'alice' + - result.queries == [] + - result.state == 'present' + - result.tablespace == 'acme' + - result.options == {} + +- name: postgresql_tablespace - change tablespace options + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + set: + seq_page_cost: 4 + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.owner == 'alice' + - result.queries == ["ALTER TABLESPACE \"acme\" SET (seq_page_cost = '4')"] + - result.state == 'present' + - result.tablespace == 'acme' + - result.options.seq_page_cost == '4' + when: postgres_version_resp.stdout is version('9.0', '>=') + +- name: postgresql_tablespace - reset seq_page_cost option + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + login_db: postgres + login_user: '{{ pg_user }}' + name: acme + set: + seq_page_cost: reset + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.queries == ["ALTER TABLESPACE \"acme\" RESET (seq_page_cost)"] + when: postgres_version_resp.stdout is version('9.0', '>=') + +- name: postgresql_tablespace - reset seq_page_cost option again + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + set: + seq_page_cost: reset + register: result + ignore_errors: true + +- assert: + that: + - result is not changed + - result.queries == [] + when: postgres_version_resp.stdout is version('9.0', '>=') + +- name: postgresql_tablespace - rename tablespace + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: acme + rename_to: foo + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.newname == 'foo' + - result.queries == ["ALTER TABLESPACE \"acme\" RENAME TO \"foo\""] + +- name: postgresql_tablespace - rename tablespace to potentially dangerous name + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: foo + rename_to: '{{ dangerous_name }}' + trust_input: no + register: result + ignore_errors: true + +- assert: + that: + - result is failed + - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous' + +- name: postgresql_tablespace - drop tablespace + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: foo + state: absent + trust_input: yes + register: result + ignore_errors: true + +- assert: + that: + - result is changed + - result.state == 'absent' + - result.queries == ["DROP TABLESPACE \"foo\""] + +- name: postgresql_tablespace - try to drop nonexistent tablespace + become_user: '{{ pg_user }}' + become: true + postgresql_tablespace: + db: postgres + login_user: '{{ pg_user }}' + name: foo + state: absent + register: result + ignore_errors: true + +- assert: + that: + - result is not changed + - result.msg == "Tries to drop nonexistent tablespace 'foo'" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/aliases new file mode 100644 index 00000000..a4c92ef8 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/aliases @@ -0,0 +1,2 @@ +destructive +shippable/posix/group1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/defaults/main.yml new file mode 100644 index 00000000..dbcbea12 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/defaults/main.yml @@ -0,0 +1,4 @@ +db_name: 'ansible_db' +db_user1: 'ansible_db_user1' +db_user2: 'ansible_db_user2' +dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/main.yml new file mode 100644 index 00000000..183494ed --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/main.yml @@ -0,0 +1,12 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial CI tests of postgresql_user module +- import_tasks: postgresql_user_initial.yml + when: postgres_version_resp.stdout is version('9.4', '>=') + +# General tests: +- import_tasks: postgresql_user_general.yml + when: postgres_version_resp.stdout is version('9.4', '>=') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml new file mode 100644 index 00000000..b007492d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml @@ -0,0 +1,775 @@ +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# Integration tests for postgresql_user module. + +- vars: + test_user: hello.user.with.dots + test_user2: hello + test_group1: group1 + test_group2: group2 + test_table: test + test_comment1: 'comment1' + test_comment2: 'comment2' + task_parameters: &task_parameters + become_user: '{{ pg_user }}' + become: yes + register: result + pg_parameters: &pg_parameters + login_user: '{{ pg_user }}' + login_db: postgres + + block: + # + # Common tests + # + - name: Create role in check_mode + <<: *task_parameters + check_mode: yes + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + trust_input: no + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: check that the user doesn't exist + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'" + + - assert: + that: + - result.rowcount == 0 + + - name: Create role in actual mode + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: check that the user exists + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'" + + - assert: + that: + - result.rowcount == 1 + + - name: Add a comment on the user + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + comment: '{{ test_comment1 }}' + + - assert: + that: + - result is changed + - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS '{{ test_comment1 }}'"] + + - name: check the comment + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment + FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}' + + - assert: + that: + - result.rowcount == 1 + - result.query_result[0].comment == '{{ test_comment1 }}' + + - name: Try to add the same comment on the user + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + comment: '{{ test_comment1 }}' + trust_input: no + + - assert: + that: + - result is not changed + + - name: Try to add another comment on the user + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + comment: '{{ test_comment2 }}' + + - assert: + that: + - result is changed + - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS '{{ test_comment2 }}'"] + + - name: check the comment + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment + FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}' + + - assert: + that: + - result.rowcount == 1 + - result.query_result[0].comment == '{{ test_comment2 }}' + + - name: Try to create role again in check_mode + <<: *task_parameters + check_mode: yes + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + - name: check that the user exists + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'" + + - assert: + that: + - result.rowcount == 1 + + - name: Try to create role again + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + - name: check that the user exists + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'" + + - assert: + that: + - result.rowcount == 1 + + - name: Drop role in check_mode + <<: *task_parameters + check_mode: yes + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + state: absent + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: check that the user actually exists + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'" + + - assert: + that: + - result.rowcount == 1 + + - name: Drop role in actual mode + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + state: absent + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: check that the user doesn't exist + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'" + + - assert: + that: + - result.rowcount == 0 + + - name: Try to drop role in check mode again + <<: *task_parameters + check_mode: yes + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + state: absent + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + - name: Try to drop role in actual mode again + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + state: absent + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + # + # password, no_password_changes, encrypted, expires parameters + # + + - name: Create role with password, passed as hashed md5 + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + password: md59543f1d82624df2b31672ec0f7050460 + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: Check that the user exist with a proper password + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'md59543f1d82624df2b31672ec0f7050460'" + + - assert: + that: + - result.rowcount == 1 + + - name: Test no_password_changes + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + password: u123 + no_password_changes: yes + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + + - name: Check that nothing changed + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'md59543f1d82624df2b31672ec0f7050460'" + + - assert: + that: + - result.rowcount == 1 + + # Storing unencrypted passwords is not available from PostgreSQL 10 + - name: Change password, passed as unencrypted + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + password: myunencryptedpass + encrypted: no + when: postgres_version_resp.stdout is version('10', '<') + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + when: postgres_version_resp.stdout is version('10', '<') + + - name: Check that the user exist with the unencrypted password + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'myunencryptedpass'" + when: postgres_version_resp.stdout is version('10', '<') + + - assert: + that: + - result.rowcount == 1 + when: postgres_version_resp.stdout is version('10', '<') + + - name: Change password, explicit encrypted=yes + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + password: myunencryptedpass + encrypted: yes + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: Check that the user exist with encrypted password + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword != 'myunencryptedpass'" + + - assert: + that: + - result.rowcount == 1 + + - name: Change rolvaliduntil attribute + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + expires: 'Jan 31 2020' + trust_input: no + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: Check the prev step + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' + AND rolvaliduntil::text like '2020-01-31%' + + - assert: + that: + - result.rowcount == 1 + + - name: Try to set the same rolvaliduntil value again + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + expires: 'Jan 31 2020' + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + - name: Check that nothing changed + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' + AND rolvaliduntil::text like '2020-01-31%' + + - assert: + that: + - result.rowcount == 1 + + # + # role_attr_flags + # + - name: Set role attributes + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + role_attr_flags: CREATEROLE,CREATEDB + trust_input: no + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: Check the prev step + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' + AND rolcreaterole = 't' and rolcreatedb = 't' + + - assert: + that: + - result.rowcount == 1 + + - name: Set the same role attributes again + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + role_attr_flags: CREATEROLE,CREATEDB + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + - name: Check the prev step + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' + AND rolcreaterole = 't' and rolcreatedb = 't' + + - name: Set role attributes + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + role_attr_flags: NOCREATEROLE,NOCREATEDB + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + + - name: Check the prev step + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' + AND rolcreaterole = 'f' and rolcreatedb = 'f' + + - assert: + that: + - result.rowcount == 1 + + - name: Set role attributes + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + role_attr_flags: NOCREATEROLE,NOCREATEDB + + - assert: + that: + - result is not changed + - result.user == '{{ test_user }}' + + - name: Check the prev step + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: > + SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' + AND rolcreaterole = 'f' and rolcreatedb = 'f' + + # + # priv + # + - name: Create test table + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + name: '{{ test_table }}' + columns: + - id int + + - name: Insert data to test table + <<: *task_parameters + postgresql_query: + query: "INSERT INTO {{ test_table }} (id) VALUES ('1')" + <<: *pg_parameters + + - name: Check that test_user is not allowed to read the data + <<: *task_parameters + postgresql_query: + db: postgres + login_user: '{{ pg_user }}' + session_role: '{{ test_user }}' + query: 'SELECT * FROM {{ test_table }}' + ignore_errors: yes + + - assert: + that: + - result is failed + - "'permission denied' in result.msg" + + - name: Grant privileges + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + priv: '{{ test_table }}:SELECT' + trust_input: no + + - assert: + that: + - result is changed + + - name: Check that test_user is allowed to read the data + <<: *task_parameters + postgresql_query: + db: postgres + login_user: '{{ pg_user }}' + session_role: '{{ test_user }}' + query: 'SELECT * FROM {{ test_table }}' + + - assert: + that: + - result.rowcount == 1 + + - name: Grant the same privileges again + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + priv: '{{ test_table }}:SELECT' + + - assert: + that: + - result is not changed + + - name: Remove test table + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + name: '{{ test_table }}' + state: absent + + # + # fail_on_user + # + - name: Create role for test + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user2 }}' + + - name: Create test table, set owner as test_user + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + name: '{{ test_table }}' + owner: '{{ test_user2 }}' + + - name: Test fail_on_user + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user2 }}' + state: absent + ignore_errors: yes + + - assert: + that: + - result is failed + - result.msg == 'Unable to remove user' + + - name: Test fail_on_user + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + fail_on_user: no + + - assert: + that: + - result is not changed + + # + # Test groups parameter + # + - name: Create test group + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_group2 }}' + role_attr_flags: NOLOGIN + + - name: Create role test_group1 and grant test_group2 to test_group1 in check_mode + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_group1 }}' + groups: '{{ test_group2 }}' + role_attr_flags: NOLOGIN + check_mode: yes + + - assert: + that: + - result is changed + - result.user == '{{ test_group1 }}' + - result.queries == ['CREATE USER "{{ test_group1 }}" NOLOGIN', 'GRANT "{{ test_group2 }}" TO "{{ test_group1 }}"'] + + - name: check that the user doesn't exist + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_group1 }}'" + + - assert: + that: + - result.rowcount == 0 + + - name: check membership + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'" + + - assert: + that: + - result.rowcount == 0 + + - name: Create role test_group1 and grant test_group2 to test_group1 + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_group1 }}' + groups: '{{ test_group2 }}' + role_attr_flags: NOLOGIN + trust_input: no + + - assert: + that: + - result is changed + - result.user == '{{ test_group1 }}' + - result.queries == ['CREATE USER "{{ test_group1 }}" NOLOGIN', 'GRANT "{{ test_group2 }}" TO "{{ test_group1 }}"'] + + - name: check that the user exists + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_group1 }}'" + + - assert: + that: + - result.rowcount == 1 + + - name: check membership + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'" + + - assert: + that: + - result.rowcount == 1 + + - name: Grant test_group2 to test_group1 again + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_group1 }}' + groups: '{{ test_group2 }}' + + - assert: + that: + - result is not changed + - result.user == '{{ test_group1 }}' + + - name: check membership + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'" + + - assert: + that: + - result.rowcount == 1 + + - name: Grant groups to existent role + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ test_user }}' + groups: + - '{{ test_group1 }}' + - '{{ test_group2 }}' + trust_input: no + + - assert: + that: + - result is changed + - result.user == '{{ test_user }}' + - result.queries == ['GRANT "{{ test_group1 }}" TO "{{ test_user }}"', 'GRANT "{{ test_group2 }}" TO "{{ test_user }}"'] + + - name: check membership + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: "SELECT * FROM pg_group WHERE groname in ('{{ test_group1 }}', '{{ test_group2 }}') AND grolist != '{}'" + + - assert: + that: + - result.rowcount == 2 + + ######################## + # Test trust_input param + + - name: Create role with potentially dangerous name, don't trust + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ dangerous_name }}' + trust_input: no + ignore_errors: yes + + - assert: + that: + - result is failed + - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous' + + - name: Create role with potentially dangerous name, trust + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ dangerous_name }}' + + - assert: + that: + - result is changed + + always: + # + # Clean up + # + - name: Drop test table + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + name: '{{ test_table }}' + state: absent + + - name: Drop test user + <<: *task_parameters + postgresql_user: + <<: *pg_parameters + name: '{{ item }}' + state: absent + loop: + - '{{ test_user }}' + - '{{ test_user2 }}' + - '{{ test_group1 }}' + - '{{ test_group2 }}' + - '{{ dangerous_name }}' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_initial.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_initial.yml new file mode 100644 index 00000000..79be2237 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/postgresql_user_initial.yml @@ -0,0 +1,156 @@ +# +# Create and destroy user, test 'password' and 'encrypted' parameters +# +# unencrypted values are not supported on newer versions +# do not run the encrypted: no tests if on 10+ +- ansible.builtin.set_fact: + encryption_values: + - 'yes' + +- ansible.builtin.set_fact: + encryption_values: '{{ encryption_values + ["no"]}}' + when: postgres_version_resp.stdout is version('10', '<=') + +- include_tasks: test_password.yml + vars: + encrypted: '{{ loop_item }}' + db_password1: 'secretù' # use UTF-8 + loop: '{{ encryption_values }}' + loop_control: + loop_var: loop_item + +# BYPASSRLS role attribute was introduced in PostgreSQL 9.5, so +# we want to test attribute management differently depending +# on the version. +- ansible.builtin.set_fact: + bypassrls_supported: "{{ postgres_version_resp.stdout is version('9.5.0', '>=') }}" + +# test 'no_password_change' and 'role_attr_flags' parameters +- include_tasks: test_no_password_change.yml + vars: + no_password_changes: '{{ loop_item }}' + loop: + - 'yes' + - 'no' + loop_control: + loop_var: loop_item + +### TODO: fail_on_user + +# +# Test login_user functionality +# +- name: Create a user to test login module parameters + become: yes + become_user: "{{ pg_user }}" + postgresql_user: + name: "{{ db_user1 }}" + state: "present" + encrypted: 'yes' + password: "password" + role_attr_flags: "CREATEDB,LOGIN,CREATEROLE" + login_user: "{{ pg_user }}" + trust_input: no + db: postgres + +- name: Create db + postgresql_db: + name: "{{ db_name }}" + state: "present" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + +- name: Check that database created + become: yes + become_user: "{{ pg_user }}" + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres + register: result + +- ansible.builtin.assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + +- name: Create a user + postgresql_user: + name: "{{ db_user2 }}" + state: "present" + encrypted: 'yes' + password: "md55c8ccfd9d6711fc69a7eae647fc54f51" + db: "{{ db_name }}" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + trust_input: no + +- name: Check that it was created + become: yes + become_user: "{{ pg_user }}" + shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql -d postgres + register: result + +- ansible.builtin.assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + +- name: Grant database privileges + postgresql_privs: + type: "database" + state: "present" + roles: "{{ db_user2 }}" + privs: "CREATE,connect" + objs: "{{ db_name }}" + db: "{{ db_name }}" + login: "{{ db_user1 }}" + password: "password" + host: "localhost" + +- name: Check that the user has the requested permissions (database) + become: yes + become_user: "{{ pg_user }}" + shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }} + register: result_database + +- ansible.builtin.assert: + that: + - "result_database.stdout_lines[-1] == '(1 row)'" + - "db_user2 ~ '=Cc' in result_database.stdout" + +- name: Remove user + postgresql_user: + name: "{{ db_user2 }}" + state: 'absent' + priv: "ALL" + db: "{{ db_name }}" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + trust_input: no + +- name: Check that they were removed + become: yes + become_user: "{{ pg_user }}" + shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql -d postgres + register: result + +- ansible.builtin.assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + +- name: Destroy DB + postgresql_db: + state: absent + name: "{{ db_name }}" + login_user: "{{ db_user1 }}" + login_password: "password" + login_host: "localhost" + +- name: Check that database was destroyed + become: yes + become_user: "{{ pg_user }}" + shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres + register: result + +- ansible.builtin.assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml new file mode 100644 index 00000000..c296c0ea --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml @@ -0,0 +1,167 @@ +- vars: + task_parameters: &task_parameters + become_user: "{{ pg_user }}" + become: yes + register: result + postgresql_parameters: ¶meters + db: postgres + name: "{{ db_user1 }}" + login_user: "{{ pg_user }}" + + block: + + - name: Create a user with all role attributes + <<: *task_parameters + postgresql_user: + <<: *parameters + state: "present" + role_attr_flags: "SUPERUSER,CREATEROLE,CREATEDB,INHERIT,LOGIN{{ bypassrls_supported | ternary(',BYPASSRLS', '') }}" + no_password_changes: '{{ no_password_changes }}' # no_password_changes is ignored when user doesn't already exist + + - name: Check that the user has the requested role attributes + <<: *task_parameters + shell: "echo \"select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin {{ bypassrls_supported | ternary(\", 'bypassrls:'||rolbypassrls\", '') }} from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres" + + - assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'super:t' in result.stdout_lines[-2]" + - "'createrole:t' in result.stdout_lines[-2]" + - "'create:t' in result.stdout_lines[-2]" + - "'inherit:t' in result.stdout_lines[-2]" + - "'login:t' in result.stdout_lines[-2]" + + - block: + - name: Check that the user has the requested role attribute BYPASSRLS + <<: *task_parameters + shell: "echo \"select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres" + + - assert: + that: + - "not bypassrls_supported or 'bypassrls:t' in result.stdout_lines[-2]" + when: bypassrls_supported + + - name: Modify a user to have no role attributes + <<: *task_parameters + postgresql_user: + <<: *parameters + state: "present" + role_attr_flags: "NOSUPERUSER,NOCREATEROLE,NOCREATEDB,noinherit,NOLOGIN{{ bypassrls_supported | ternary(',NOBYPASSRLS', '') }}" + no_password_changes: '{{ no_password_changes }}' + + - name: Check that ansible reports it modified the role + assert: + that: + - result is changed + + - name: "Check that the user doesn't have any attribute" + <<: *task_parameters + shell: "echo \"select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres" + + - assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'super:f' in result.stdout_lines[-2]" + - "'createrole:f' in result.stdout_lines[-2]" + - "'create:f' in result.stdout_lines[-2]" + - "'inherit:f' in result.stdout_lines[-2]" + - "'login:f' in result.stdout_lines[-2]" + + - block: + - name: Check that the user has the requested role attribute BYPASSRLS + <<: *task_parameters + shell: "echo \"select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres" + + - assert: + that: + - "not bypassrls_supported or 'bypassrls:f' in result.stdout_lines[-2]" + when: bypassrls_supported + + - name: Try to add an invalid attribute + <<: *task_parameters + postgresql_user: + <<: *parameters + state: "present" + role_attr_flags: "NOSUPERUSER,NOCREATEROLE,NOCREATEDB,noinherit,NOLOGIN{{ bypassrls_supported | ternary(',NOBYPASSRLS', '') }},INVALID" + no_password_changes: '{{ no_password_changes }}' + ignore_errors: yes + + - name: Check that ansible reports failure + assert: + that: + - result is not changed + - result is failed + - "result.msg == 'Invalid role_attr_flags specified: INVALID'" + + - name: Modify a single role attribute on a user + <<: *task_parameters + postgresql_user: + <<: *parameters + state: "present" + role_attr_flags: "LOGIN" + no_password_changes: '{{ no_password_changes }}' + + - name: Check that ansible reports it modified the role + assert: + that: + - result is changed + + - name: Check the role attributes + <<: *task_parameters + shell: echo "select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';" | psql -d postgres + + - assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + - "'super:f' in result.stdout_lines[-2]" + - "'createrole:f' in result.stdout_lines[-2]" + - "'create:f' in result.stdout_lines[-2]" + - "'inherit:f' in result.stdout_lines[-2]" + - "'login:t' in result.stdout_lines[-2]" + + - block: + - name: Check the role attribute BYPASSRLS + <<: *task_parameters + shell: echo "select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';" | psql -d postgres + + - assert: + that: + - "( postgres_version_resp.stdout is version('9.5.0', '<')) or 'bypassrls:f' in result.stdout_lines[-2]" + when: bypassrls_supported + + - name: Check that using same attribute a second time does nothing + <<: *task_parameters + postgresql_user: + <<: *parameters + state: "present" + role_attr_flags: "LOGIN" + no_password_changes: '{{ no_password_changes }}' + environment: + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - name: Check there isn't any update reported + assert: + that: + - result is not changed + + - name: Cleanup the user + <<: *task_parameters + postgresql_user: + <<: *parameters + state: 'absent' + no_password_changes: '{{ no_password_changes }}' # user deletion: no_password_changes is ignored + + - name: Check that user was removed + <<: *task_parameters + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres + + - assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + + always: + - name: Cleanup the user + <<: *task_parameters + postgresql_user: + <<: *parameters + state: 'absent' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/test_password.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/test_password.yml new file mode 100644 index 00000000..0f1edcff --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user/tasks/test_password.yml @@ -0,0 +1,429 @@ +- vars: + task_parameters: &task_parameters + become_user: "{{ pg_user }}" + become: yes + register: result + postgresql_query_parameters: &query_parameters + db: postgres + login_user: "{{ pg_user }}" + postgresql_parameters: ¶meters + <<: *query_parameters + name: "{{ db_user1 }}" + + block: + - name: 'Check that PGOPTIONS environment variable is effective (1/2)' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '{{ db_password1 }}' + ignore_errors: true + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - name: 'Check that PGOPTIONS environment variable is effective (2/2)' + assert: + that: + - "{{ result is failed }}" + + - name: 'Create a user (password encrypted: {{ encrypted }})' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '{{ db_password1 }}' + encrypted: '{{ encrypted }}' + environment: + PGCLIENTENCODING: 'UTF8' + + - block: &changed # block is only used here in order to be able to define YAML anchor + - name: Check that ansible reports it was created + assert: + that: + - "{{ result is changed }}" + + - name: Check that it was created + <<: *task_parameters + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres + + - assert: + that: + - "result.stdout_lines[-1] == '(1 row)'" + + - name: Check that creating user a second time does nothing + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '{{ db_password1 }}' + encrypted: '{{ encrypted }}' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - block: ¬_changed # block is only used here in order to be able to define YAML anchor + - name: Check that ansible reports no change + assert: + that: + - "{{ result is not changed }}" + + - name: 'Define an expiration time' + <<: *task_parameters + postgresql_user: + <<: *parameters + expires: '2025-01-01' + environment: + PGCLIENTENCODING: 'UTF8' + + - <<: *changed + + - name: 'Redefine the same expiration time' + <<: *task_parameters + postgresql_user: + expires: '2025-01-01' + <<: *parameters + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - block: + + - name: 'Using MD5-hashed password: check that password not changed when using cleartext password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '{{ db_password1 }}' + encrypted: 'yes' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: "Using MD5-hashed password: check that password not changed when using md5 hash with 'ENCRYPTED'" + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}" + encrypted: 'yes' + environment: + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: "Using MD5-hashed password: check that password not changed when using md5 hash with 'UNENCRYPTED'" + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}" + encrypted: 'no' + environment: + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Redefine the same expiration time and password (encrypted)' + <<: *task_parameters + postgresql_user: + <<: *parameters + encrypted: 'yes' + password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}" + expires: '2025-01-01' + environment: + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Using MD5-hashed password: check that password changed when using another cleartext password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: 'prefix{{ db_password1 }}' + encrypted: 'yes' + environment: + PGCLIENTENCODING: 'UTF8' + + - <<: *changed + + - name: "Using MD5-hashed password: check that password changed when using another md5 hash with 'ENCRYPTED'" + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "md5{{ ('prefix1' ~ db_password1 ~ db_user1) | hash('md5')}}" + encrypted: 'yes' + + - <<: *changed + + - name: "Using MD5-hashed password: check that password changed when using md5 hash with 'UNENCRYPTED'" + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "md5{{ ('prefix2' ~ db_password1 ~ db_user1) | hash('md5')}}" + encrypted: 'no' + register: change_pass_unencrypted + failed_when: + - change_pass_unencrypted is failed + # newer version of psycopg2 no longer supported unencrypted password, we ignore the error + - '"UNENCRYPTED PASSWORD is no longer supported" not in change_pass_unencrypted.msg' + + - <<: *changed + + - name: 'Using MD5-hashed password: check that password changed when clearing the password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '' + encrypted: 'yes' + environment: + PGCLIENTENCODING: 'UTF8' + + - <<: *changed + + - name: 'Using MD5-hashed password: check that password not changed when clearing the password again' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '' + encrypted: 'yes' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Using cleartext password: check that password not changed when clearing the password again' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '' + encrypted: 'no' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Using MD5-hashed password: check that password changed when using a cleartext password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '{{ db_password1 }}' + encrypted: 'yes' + environment: + PGCLIENTENCODING: 'UTF8' + + - <<: *changed + + when: encrypted == 'yes' + + - block: + + - name: 'Using cleartext password: check that password not changed when using cleartext password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "{{ db_password1 }}" + encrypted: 'no' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Redefine the same expiration time and password (not encrypted)' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "{{ db_password1 }}" + encrypted: 'no' + expires: '2025-01-01' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Using cleartext password: check that password changed when using another cleartext password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "changed{{ db_password1 }}" + encrypted: 'no' + environment: + PGCLIENTENCODING: 'UTF8' + + - <<: *changed + + - name: 'Using cleartext password: check that password changed when clearing the password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '' + encrypted: 'no' + environment: + PGCLIENTENCODING: 'UTF8' + + - <<: *changed + + - name: 'Using cleartext password: check that password not changed when clearing the password again' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '' + encrypted: 'no' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Using MD5-hashed password: check that password not changed when clearing the password again' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '' + encrypted: 'yes' + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + - name: 'Using cleartext password: check that password changed when using cleartext password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "{{ db_password1 }}" + encrypted: 'no' + environment: + PGCLIENTENCODING: 'UTF8' + + - <<: *changed + + when: encrypted == 'no' + + # start of block scram-sha-256 + # scram-sha-256 password encryption type is supported since PostgreSQL 10 + - when: postgres_version_resp.stdout is version('10', '>=') + block: + + - name: 'Using cleartext password with scram-sha-256: resetting password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "" + encrypted: "{{ encrypted }}" + environment: + PGCLIENTENCODING: 'UTF8' + + - name: 'Using cleartext password with scram-sha-256: check that password is changed when using cleartext password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "{{ db_password1 }}" + encrypted: "{{ encrypted }}" + environment: + PGCLIENTENCODING: 'UTF8' + # ansible postgresql_user module interface does not (yet) support forcing password_encryption + # type value, we'll have to hack it in env variable to force correct encryption + PGOPTIONS: "-c password_encryption=scram-sha-256" + + - <<: *changed + + - name: 'Using cleartext password with scram-sha-256: ensure password is properly encrypted' + <<: *task_parameters + postgresql_query: + <<: *query_parameters + query: select * from pg_authid where rolname=%s and rolpassword like %s + positional_args: + - '{{ db_user1 }}' + - 'SCRAM-SHA-256$%' + + - assert: + that: + - result.rowcount == 1 + + - name: 'Using cleartext password with scram-sha-256: check that password is not changed when using the same password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "{{ db_password1 }}" + encrypted: "{{ encrypted }}" + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: "-c password_encryption=scram-sha-256" + + - <<: *not_changed + + - name: 'Using cleartext password with scram-sha-256: check that password is changed when using another cleartext password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: "changed{{ db_password1 }}" + encrypted: "{{ encrypted }}" + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: "-c password_encryption=scram-sha-256" + + - <<: *changed + + - name: 'Using cleartext password with scram-sha-256: check that password is changed when clearing the password' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '' + encrypted: "{{ encrypted }}" + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: "-c password_encryption=scram-sha-256" + + - <<: *changed + + - name: 'Using cleartext password with scram-sha-256: check that password is not changed when clearing the password again' + <<: *task_parameters + postgresql_user: + <<: *parameters + password: '' + encrypted: "{{ encrypted }}" + environment: + PGCLIENTENCODING: 'UTF8' + PGOPTIONS: "-c password_encryption=scram-sha-256" + + - <<: *not_changed + + # end of block scram-sha-256 + + - name: Remove user + <<: *task_parameters + postgresql_user: + state: 'absent' + <<: *parameters + + - <<: *changed + + - name: Check that they were removed + <<: *task_parameters + shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres + environment: + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - assert: + that: + - "result.stdout_lines[-1] == '(0 rows)'" + + - name: Check that removing user a second time does nothing + <<: *task_parameters + postgresql_user: + state: 'absent' + <<: *parameters + environment: + PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed + + - <<: *not_changed + + always: + - name: Remove user + <<: *task_parameters + postgresql_user: + state: 'absent' + <<: *parameters diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/aliases b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/aliases new file mode 100644 index 00000000..786e0531 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/aliases @@ -0,0 +1,4 @@ +destructive +shippable/posix/group1 +skip/freebsd +skip/rhel diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/defaults/main.yml new file mode 100644 index 00000000..f697cefd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/defaults/main.yml @@ -0,0 +1,12 @@ +pg_user: postgres +db_default: postgres + +test_table1: acme1 +test_table2: acme2 +test_table3: acme3 +test_idx1: idx1 +test_idx2: idx2 +test_func1: func1 +test_func2: func2 +test_func3: func3 +test_schema1: schema1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/meta/main.yml new file mode 100644 index 00000000..4ce5a583 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_postgresql_db diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/main.yml new file mode 100644 index 00000000..fa47fdc5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/main.yml @@ -0,0 +1,8 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Initial tests of postgresql_user_obj_stat_info module: +- import_tasks: postgresql_user_obj_stat_info.yml + when: postgres_version_resp.stdout is version('9.4', '>=') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml new file mode 100644 index 00000000..6e6ff212 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml @@ -0,0 +1,222 @@ +--- +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- vars: + task_parameters: &task_parameters + become_user: '{{ pg_user }}' + become: yes + register: result + pg_parameters: &pg_parameters + login_user: '{{ pg_user }}' + login_db: '{{ db_default }}' + + block: + # Preparation: + # 0. create test schema + # 1. create test tables + # 2. create test indexes + # 3. create test functions + # 4. enable track_functions and restart + + - name: Create schema + <<: *task_parameters + postgresql_schema: + <<: *pg_parameters + name: '{{ test_schema1 }}' + + - name: Create test tables + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + name: '{{ item }}' + columns: + - id int + loop: + - '{{ test_table1 }}' + - '{{ test_table2 }}' + + - name: Create test table in another schema + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + name: '{{ test_schema1 }}.{{ test_table3 }}' + + - name: Create test indexes + <<: *task_parameters + postgresql_idx: + <<: *pg_parameters + name: '{{ item }}' + table: '{{ test_table1 }}' + columns: + - id + loop: + - '{{ test_idx1 }}' + - '{{ test_idx2 }}' + + - name: Set track_function (restart is required) + <<: *task_parameters + postgresql_set: + <<: *pg_parameters + name: track_functions + value: all + + # To avoid CI timeouts + - name: Kill all postgres processes + shell: 'pkill -u {{ pg_user }}' + become: yes + when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8' + ignore_errors: yes + + - name: Stop PostgreSQL + become: yes + service: + name: "{{ postgresql_service }}" + state: stopped + when: (ansible_facts.distribution_major_version != '8' and ansible_facts.distribution == 'CentOS') or ansible_facts.distribution != 'CentOS' + + - name: Pause between stop and start PosgreSQL + pause: + seconds: 5 + + - name: Start PostgreSQL + become: yes + service: + name: "{{ postgresql_service }}" + state: started + + - name: Create test functions + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: 'CREATE FUNCTION {{ item }}() RETURNS boolean AS $$ BEGIN RETURN 1; END; $$ LANGUAGE PLPGSQL' + loop: + - '{{ test_func1 }}' + - '{{ test_func2 }}' + - '{{ test_schema1 }}.{{ test_func3 }}' + + - name: Touch test functions + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: 'SELECT {{ item }}()' + loop: + - '{{ test_func1 }}' + - '{{ test_func2 }}' + - '{{ test_schema1 }}.{{ test_func3 }}' + + ####### + # Tests + ####### + # 0. Without filter + - name: Collect all stats + <<: *task_parameters + postgresql_user_obj_stat_info: + <<: *pg_parameters + + - assert: + that: + - result is not changed + - result.tables.public.{{ test_table1 }}.size == 0 + - result.tables.public.{{ test_table1 }}.size == 0 + - result.tables.{{ test_schema1 }}.{{ test_table3 }}.size == 0 + - result.functions.public.{{ test_func1 }}.calls == 1 + - result.functions.public.{{ test_func2 }}.calls == 1 + - result.functions.{{ test_schema1 }}.{{ test_func3 }}.calls == 1 + - result.indexes.public.{{ test_idx1 }}.idx_scan == 0 + - result.indexes.public.{{ test_idx2 }}.idx_scan == 0 + + # 1. With filter + - name: Collect stats with filter + <<: *task_parameters + postgresql_user_obj_stat_info: + <<: *pg_parameters + filter: tables, indexes + + - assert: + that: + - result is not changed + - result.tables.public.{{ test_table1 }}.size == 0 + - result.tables.public.{{ test_table1 }}.size == 0 + - result.tables.{{ test_schema1 }}.{{ test_table3 }}.size == 0 + - result.functions == {} + - result.indexes.public.{{ test_idx1 }}.idx_scan == 0 + - result.indexes.public.{{ test_idx2 }}.idx_scan == 0 + + # 2. With schema + - name: Collect stats for objects in certain schema + <<: *task_parameters + postgresql_user_obj_stat_info: + <<: *pg_parameters + schema: public + + - assert: + that: + - result is not changed + - result.tables.public.{{ test_table1 }}.size == 0 + - result.tables.public.{{ test_table1 }}.size == 0 + - result.indexes.public.{{ test_idx1 }}.idx_scan == 0 + - result.indexes.public.{{ test_idx2 }}.idx_scan == 0 + - result.functions.public.{{ test_func1 }}.calls == 1 + - result.functions.public.{{ test_func2 }}.calls == 1 + - result.tables.{{ test_schema1 }} is not defined + + + # 3. With wrong schema + - name: Try to collect data in nonexistent schema + <<: *task_parameters + postgresql_user_obj_stat_info: + <<: *pg_parameters + schema: nonexistent + ignore_errors: yes + + - assert: + that: + - result is failed + - result.msg == "Schema 'nonexistent' does not exist" + + # 4. Test Trust Input + - name: Try running with SQL injection + <<: *task_parameters + postgresql_user_obj_stat_info: + <<: *pg_parameters + session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --' + trust_input: no + ignore_errors: yes + + - assert: + that: + - result is failed + - result.msg is search('is potentially dangerous') + + ########## + # Clean up + ########## + - name: Drop schema + <<: *task_parameters + postgresql_schema: + <<: *pg_parameters + name: '{{ test_schema1 }}' + state: absent + cascade_drop: yes + + - name: Drop test tables + <<: *task_parameters + postgresql_table: + <<: *pg_parameters + name: '{{ item }}' + state: absent + loop: + - '{{ test_table1 }}' + - '{{ test_table2 }}' + + - name: Drop test functions + <<: *task_parameters + postgresql_query: + <<: *pg_parameters + query: 'DROP FUNCTION {{ item }}()' + loop: + - '{{ test_func1 }}' + - '{{ test_func2 }}' + - '{{ test_schema1 }}.{{ test_func3 }}' + ignore_errors: yes diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_pkg_mgr/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_pkg_mgr/tasks/main.yml new file mode 100644 index 00000000..24d02228 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_pkg_mgr/tasks/main.yml @@ -0,0 +1,17 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- set_fact: + pkg_mgr: community.general.pkgng + ansible_pkg_mgr: community.general.pkgng + cacheable: yes + when: ansible_os_family == "FreeBSD" + +- set_fact: + pkg_mgr: community.general.zypper + ansible_pkg_mgr: community.general.zypper + cacheable: yes + when: ansible_os_family == "Suse" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/defaults/main.yml new file mode 100644 index 00000000..aea02442 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/defaults/main.yml @@ -0,0 +1,17 @@ +postgresql_service: postgresql + +postgresql_packages: + - postgresql-server + - python-psycopg2 + +pg_user: postgres +pg_group: root + +locale_latin_suffix: +locale_utf8_suffix: + +# defaults for test SSL +ssl_db: 'ssl_db' +ssl_user: 'ssl_user' +ssl_pass: 'ssl_pass' +ssl_rootcert: '~{{ pg_user }}/root.crt' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql new file mode 100644 index 00000000..53c79666 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql @@ -0,0 +1,2 @@ +CREATE OR REPLACE FUNCTION dummy_display_ext_version() +RETURNS text LANGUAGE SQL AS 'SELECT (''1.0'')::text'; diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql new file mode 100644 index 00000000..227ba1b4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql @@ -0,0 +1,2 @@ +CREATE OR REPLACE FUNCTION dummy_display_ext_version() +RETURNS text LANGUAGE SQL AS 'SELECT (''2.0'')::text'; diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql new file mode 100644 index 00000000..7d6a60e5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql @@ -0,0 +1,2 @@ +CREATE OR REPLACE FUNCTION dummy_display_ext_version() +RETURNS text LANGUAGE SQL AS 'SELECT (''3.0'')::text'; diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy.control b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy.control new file mode 100644 index 00000000..4f8553c2 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/dummy.control @@ -0,0 +1,3 @@ +comment = 'dummy extension used to test postgresql_ext Ansible module' +default_version = '3.0' +relocatable = true diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf new file mode 100644 index 00000000..58de3607 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf @@ -0,0 +1,10 @@ +# !!! This file managed by Ansible. Any local changes may be overwritten. !!! + +# Database administrative login by UNIX sockets +# note: you may wish to restrict this further later +local all {{ pg_user }} trust + +# TYPE DATABASE USER CIDR-ADDRESS METHOD +local all all md5 +host all all 127.0.0.1/32 md5 +host all all ::1/128 md5 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/meta/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/meta/main.yml new file mode 100644 index 00000000..5438ced5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_pkg_mgr diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/main.yml new file mode 100644 index 00000000..3723c5ef --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/main.yml @@ -0,0 +1,247 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Exit when Suse because it causes CI problems +- meta: end_play + when: ansible_os_family == 'Suse' + +# To avoid hangings on service start/stop postgres during CI runs: +- meta: end_play + when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8' + +- name: python 2 + set_fact: + python_suffix: '' + when: ansible_python_version is version('3', '<') + +- name: python 3 + set_fact: + python_suffix: -py3 + when: ansible_python_version is version('3', '>=') + +- name: Include distribution and Python version specific variables + include_vars: '{{ lookup(''first_found'', params) }}' + vars: + params: + files: + - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml' + - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml' + - '{{ ansible_os_family }}{{ python_suffix }}.yml' + - default{{ python_suffix }}.yml + paths: + - '{{ role_path }}/vars' + +- name: make sure the dbus service is started under systemd + systemd: + name: dbus + state: started + when: ansible_service_mgr == 'systemd' and ansible_distribution == 'Fedora' + +- name: Kill all postgres processes + shell: 'pkill -u {{ pg_user }}' + become: yes + when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8' + ignore_errors: yes + +- name: stop postgresql service + service: name={{ postgresql_service }} state=stopped + ignore_errors: true + +- name: remove old db (RedHat or Suse) + file: + path: '{{ pg_dir }}' + state: absent + ignore_errors: true + when: ansible_os_family == "RedHat" or ansible_os_family == "Suse" + +- name: remove old db (FreeBSD) + file: + path: '{{ pg_dir }}' + state: absent + ignore_errors: true + when: ansible_os_family == "FreeBSD" + +- name: remove old db config and files (debian) + file: + path: '{{ loop_item }}' + state: absent + ignore_errors: true + when: ansible_os_family == "Debian" + loop: + - /etc/postgresql + - /var/lib/postgresql + loop_control: + loop_var: loop_item + +- name: install dependencies for postgresql test + package: + name: '{{ postgresql_package_item }}' + state: present + with_items: '{{ postgresql_packages }}' + loop_control: + loop_var: postgresql_package_item + +- name: initialize postgres (FreeBSD) + command: /usr/local/etc/rc.d/postgresql oneinitdb + when: ansible_os_family == "FreeBSD" + +- name: Initialize postgres (RedHat systemd) + command: postgresql-setup initdb + when: ansible_os_family == "RedHat" and ansible_service_mgr == "systemd" + +- name: Initialize postgres (RedHat sysv) + command: /sbin/service postgresql initdb + when: ansible_os_family == "RedHat" and ansible_service_mgr != "systemd" + +- name: Initialize postgres (Debian) + shell: . /usr/share/postgresql-common/maintscripts-functions && set_system_locale && /usr/bin/pg_createcluster -u postgres {{ pg_ver }} main + args: + creates: /etc/postgresql/{{ pg_ver }}/ + when: ansible_os_family == 'Debian' + +- name: Initialize postgres (Suse) + service: name=postgresql state=stopped + when: ansible_os_family == 'Suse' + +- name: Pause between stop and start postgresql + pause: + seconds: 5 + when: ansible_os_family == 'Suse' + +- name: Initialize postgres (Suse) + service: name=postgresql state=started + when: ansible_os_family == 'Suse' + +- name: Copy pg_hba into place + template: + src: files/pg_hba.conf + dest: '{{ pg_hba_location }}' + owner: '{{ pg_user }}' + group: '{{ pg_group }}' + mode: '0644' + +- name: Generate locales (Debian) + locale_gen: + name: '{{ item }}' + state: present + with_items: + - pt_BR + - es_ES + when: ansible_os_family == 'Debian' + +- block: + - name: Install langpacks (RHEL8) + yum: + name: + - glibc-langpack-es + - glibc-langpack-pt + - glibc-all-langpacks + state: present + when: ansible_distribution_major_version is version('8', '>=') + + - name: Check if locales need to be generated (RedHat) + shell: localedef --list-archive | grep -a -q '^{{ locale }}$' + register: locale_present + ignore_errors: true + with_items: + - es_ES + - pt_BR + loop_control: + loop_var: locale + + - name: Reinstall internationalization files + shell: yum -y reinstall glibc-common || yum -y install glibc-common + args: + warn: false + when: locale_present is failed + + - name: Generate locale (RedHat) + command: localedef -f ISO-8859-1 -i {{ item.locale }} {{ item.locale }} + when: item is failed + with_items: '{{ locale_present.results }}' + when: ansible_os_family == 'RedHat' and ansible_distribution != 'Fedora' + +- name: Install glibc langpacks (Fedora >= 24) + package: + name: '{{ item }}' + state: latest + with_items: + - glibc-langpack-es + - glibc-langpack-pt + when: ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('24', '>=') + +- name: enable postgresql service (FreeBSD) + lineinfile: + path: /etc/rc.conf + line: postgresql_enable="YES" + when: ansible_os_family == "FreeBSD" + +- name: start postgresql service + service: name={{ postgresql_service }} state=started + +- name: Pause between start and stop + pause: + seconds: 5 + +- name: Kill all postgres processes + shell: 'pkill -u {{ pg_user }}' + become: yes + when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8' + ignore_errors: yes + register: terminate + +- name: Stop postgresql service + service: name={{ postgresql_service }} state=stopped + when: terminate is not succeeded + +- name: Pause between stop and start + pause: + seconds: 5 + +- name: Start postgresql service + service: name={{ postgresql_service }} state=started + +- name: copy control file for dummy ext + copy: + src: dummy.control + dest: /usr/share/postgresql/{{ pg_ver }}/extension/dummy.control + mode: '0444' + when: ansible_os_family == 'Debian' + +- name: copy version files for dummy ext + copy: + src: '{{ item }}' + dest: /usr/share/postgresql/{{ pg_ver }}/extension/{{ item }} + mode: '0444' + with_items: + - dummy--1.0.sql + - dummy--2.0.sql + - dummy--3.0.sql + when: ansible_os_family == 'Debian' + +- name: add update paths + file: + path: /usr/share/postgresql/{{ pg_ver }}/extension/{{ item }} + mode: '0444' + state: touch + with_items: + - dummy--1.0--2.0.sql + - dummy--2.0--3.0.sql + when: ansible_os_family == 'Debian' + +- name: Get PostgreSQL version + become_user: '{{ pg_user }}' + become: true + shell: echo 'SHOW SERVER_VERSION' | psql --tuples-only --no-align --dbname postgres + register: postgres_version_resp + +- name: Print PostgreSQL server version + debug: + msg: '{{ postgres_version_resp.stdout }}' + +- import_tasks: ssl.yml + when: + - ansible_os_family == 'Debian' + - postgres_version_resp.stdout is version('9.4', '>=') diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/ssl.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/ssl.yml new file mode 100644 index 00000000..1bc4411d --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/tasks/ssl.yml @@ -0,0 +1,66 @@ +- name: postgresql SSL - create database + become_user: '{{ pg_user }}' + become: true + postgresql_db: + name: '{{ ssl_db }}' + +- name: postgresql SSL - create role + become_user: '{{ pg_user }}' + become: true + postgresql_user: + name: '{{ ssl_user }}' + role_attr_flags: SUPERUSER + password: '{{ ssl_pass }}' + +- name: postgresql SSL - install openssl + become: true + package: name=openssl state=present + +- name: postgresql SSL - create certs 1 + become_user: root + become: true + shell: openssl req -new -nodes -text -out ~{{ pg_user }}/root.csr \ -keyout ~{{ pg_user }}/root.key -subj "/CN=localhost.local" + +- name: postgresql SSL - create certs 2 + become_user: root + become: true + shell: openssl x509 -req -in ~{{ pg_user }}/root.csr -text -days 3650 \ -extensions v3_ca -signkey ~{{ pg_user }}/root.key -out ~{{ pg_user }}/root.crt + +- name: postgresql SSL - create certs 3 + become_user: root + become: true + shell: openssl req -new -nodes -text -out ~{{ pg_user }}/server.csr \ -keyout ~{{ pg_user }}/server.key -subj "/CN=localhost.local" + +- name: postgresql SSL - create certs 4 + become_user: root + become: true + shell: openssl x509 -req -in ~{{ pg_user }}/server.csr -text -days 365 \ -CA ~{{ pg_user }}/root.crt -CAkey ~{{ pg_user }}/root.key -CAcreateserial -out server.crt + +- name: postgresql SSL - set right permissions to files + become_user: root + become: true + file: + path: '{{ item }}' + mode: '0600' + owner: '{{ pg_user }}' + group: '{{ pg_user }}' + with_items: + - ~{{ pg_user }}/root.key + - ~{{ pg_user }}/server.key + - ~{{ pg_user }}/root.crt + - ~{{ pg_user }}/server.csr + +- name: postgresql SSL - enable SSL + become_user: '{{ pg_user }}' + become: true + postgresql_set: + login_user: '{{ pg_user }}' + db: postgres + name: ssl + value: true + +- name: postgresql SSL - reload PostgreSQL to enable ssl on + become: true + service: + name: '{{ postgresql_service }}' + state: reloaded diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml new file mode 100644 index 00000000..c5c6795e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python-psycopg2" + +pg_hba_location: "/etc/postgresql/9.4/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/9.4/main" +pg_ver: 9.4 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml new file mode 100644 index 00000000..2f6b0d98 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml @@ -0,0 +1,12 @@ +postgresql_packages: + - postgresql95-server + - py36-psycopg2 + +pg_dir: /usr/local/pgsql/data +pg_hba_location: "{{ pg_dir }}/pg_hba.conf" +pg_ver: 9.5 +pg_user: pgsql +pg_group: pgsql + +locale_latin_suffix: .ISO8859-1 +locale_utf8_suffix: .UTF-8 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml new file mode 100644 index 00000000..efb0603b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml @@ -0,0 +1,12 @@ +postgresql_packages: + - postgresql95-server + - py27-psycopg2 + +pg_dir: /usr/local/pgsql/data +pg_hba_location: "{{ pg_dir }}/pg_hba.conf" +pg_ver: 9.5 +pg_user: pgsql +pg_group: pgsql + +locale_latin_suffix: .ISO8859-1 +locale_utf8_suffix: .UTF-8 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml new file mode 100644 index 00000000..2f6b0d98 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml @@ -0,0 +1,12 @@ +postgresql_packages: + - postgresql95-server + - py36-psycopg2 + +pg_dir: /usr/local/pgsql/data +pg_hba_location: "{{ pg_dir }}/pg_hba.conf" +pg_ver: 9.5 +pg_user: pgsql +pg_group: pgsql + +locale_latin_suffix: .ISO8859-1 +locale_utf8_suffix: .UTF-8 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml new file mode 100644 index 00000000..1fe66782 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml @@ -0,0 +1,12 @@ +postgresql_packages: + - postgresql96-server + - py27-psycopg2 + +pg_dir: /usr/local/pgsql/data +pg_hba_location: "{{ pg_dir }}/pg_hba.conf" +pg_ver: 9.6 +pg_user: pgsql +pg_group: pgsql + +locale_latin_suffix: .ISO8859-1 +locale_utf8_suffix: .UTF-8 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml new file mode 100644 index 00000000..cd7c83a4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml @@ -0,0 +1,12 @@ +postgresql_packages: + - postgresql11-server + - py36-psycopg2 + +pg_dir: /var/db/postgres/data11 +pg_hba_location: "{{ pg_dir }}/pg_hba.conf" +pg_ver: 11 +pg_user: postgres +pg_group: postgres + +locale_latin_suffix: .ISO8859-1 +locale_utf8_suffix: .UTF-8 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml new file mode 100644 index 00000000..0b1ab5b2 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml @@ -0,0 +1,12 @@ +postgresql_packages: + - postgresql11-server + - py27-psycopg2 + +pg_dir: /var/db/postgres/data11 +pg_hba_location: "{{ pg_dir }}/pg_hba.conf" +pg_ver: 11 +pg_user: postgres +pg_group: postgres + +locale_latin_suffix: .ISO8859-1 +locale_utf8_suffix: .UTF-8 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml new file mode 100644 index 00000000..ee083722 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql-server" + - "python3-psycopg2" + - "bzip2" + - "xz" + +pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" +pg_dir: "/var/lib/pgsql/data" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml new file mode 100644 index 00000000..20c4b1f5 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml @@ -0,0 +1,7 @@ +postgresql_packages: + - "postgresql-server" + - "python-psycopg2" + - "bzip2" + +pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" +pg_dir: "/var/lib/pgsql/data" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml new file mode 100644 index 00000000..4b6e744b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python-psycopg2" + +pg_hba_location: "/etc/postgresql/9.1/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/9.1/main" +pg_ver: 9.1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml new file mode 100644 index 00000000..ffcc8dd4 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python-psycopg2" + +pg_hba_location: "/etc/postgresql/9.3/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/9.3/main" +pg_ver: 9.3 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml new file mode 100644 index 00000000..b088c310 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python3-psycopg2" + +pg_hba_location: "/etc/postgresql/9.5/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/9.5/main" +pg_ver: 9.5 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml new file mode 100644 index 00000000..897efd2c --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python-psycopg2" + +pg_hba_location: "/etc/postgresql/9.5/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/9.5/main" +pg_ver: 9.5 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml new file mode 100644 index 00000000..10453bdf --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml @@ -0,0 +1,8 @@ +postgresql_packages: + - "postgresql" + - "postgresql-common" + - "python3-psycopg2" + +pg_hba_location: "/etc/postgresql/10/main/pg_hba.conf" +pg_dir: "/var/lib/postgresql/10/main" +pg_ver: 10 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml new file mode 100644 index 00000000..19152a64 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml @@ -0,0 +1,6 @@ +postgresql_packages: + - "postgresql-server" + - "python3-psycopg2" + +pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" +pg_dir: "/var/lib/pgsql/data" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default.yml new file mode 100644 index 00000000..ab36dd9f --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_db/vars/default.yml @@ -0,0 +1,6 @@ +postgresql_packages: + - "postgresql-server" + - "python-psycopg2" + +pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" +pg_dir: "/var/lib/pgsql/data" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/defaults/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/defaults/main.yml new file mode 100644 index 00000000..1b1d8b41 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/defaults/main.yml @@ -0,0 +1,30 @@ +# General: +pg_user: postgres +db_default: postgres + +pg_package_list: +- apt-utils +- postgresql +- postgresql-contrib +- python3-psycopg2 + +packages_to_remove: +- postgresql +- postgresql-contrib +- postgresql-server +- postgresql-libs +- python3-psycopg2 + +# Master specific defaults: +master_root_dir: '/var/lib/pgsql/master' +master_data_dir: '{{ master_root_dir }}/data' +master_postgresql_conf: '{{ master_data_dir }}/postgresql.conf' +master_pg_hba_conf: '{{ master_data_dir }}/pg_hba.conf' +master_port: 5433 + +# Replica specific defaults: +replica_root_dir: '/var/lib/pgsql/replica' +replica_data_dir: '{{ replica_root_dir }}/data' +replica_postgresql_conf: '{{ replica_data_dir }}/postgresql.conf' +replica_pg_hba_conf: '{{ replica_data_dir }}/pg_hba.conf' +replica_port: 5434 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/handlers/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/handlers/main.yml new file mode 100644 index 00000000..7f4dc5cc --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/handlers/main.yml @@ -0,0 +1,23 @@ +- name: Stop services + become: yes + become_user: '{{ pg_user }}' + shell: '{{ pg_ctl }} -D {{ item.datadir }} -o "-p {{ item.port }}" -m immediate stop' + loop: + - { datadir: '{{ master_data_dir }}', port: '{{ master_port }}' } + - { datadir: '{{ replica_data_dir }}', port: '{{ replica_port }}' } + listen: stop postgresql + +- name: Remove packages + apt: + name: '{{ packages_to_remove }}' + state: absent + listen: cleanup postgresql + +- name: Remove FS objects + file: + state: absent + path: "{{ item }}" + loop: + - "{{ master_root_dir }}" + - "{{ replica_root_dir }}" + listen: cleanup postgresql diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/main.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/main.yml new file mode 100644 index 00000000..21f6ffa9 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/main.yml @@ -0,0 +1,13 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Setup PostgreSQL master-standby replication into one container: +- import_tasks: setup_postgresql_cluster.yml + when: + - ansible_distribution == 'Ubuntu' + - ansible_distribution_major_version >= '18' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml new file mode 100644 index 00000000..9ef657c7 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml @@ -0,0 +1,84 @@ +- name: Install packages + apt: + name: '{{ pg_package_list }}' + notify: cleanup postgresql +- name: Create root dirs + file: + state: directory + path: '{{ item }}' + owner: postgres + group: postgres + mode: '0700' + loop: + - '{{ master_root_dir }}' + - '{{ master_data_dir }}' + - '{{ replica_root_dir }}' + - '{{ replica_data_dir }}' + notify: cleanup postgresql +- name: Find initdb + shell: find /usr/lib -type f -name "initdb" + register: result +- name: Set path to initdb + set_fact: + initdb: '{{ result.stdout }}' +- name: Initialize databases + become: true + become_user: '{{ pg_user }}' + shell: '{{ initdb }} --pgdata {{ item }}' + loop: + - '{{ master_data_dir }}' + - '{{ replica_data_dir }}' +- name: Copy config templates + template: + src: '{{ item.conf_templ }}' + dest: '{{ item.conf_dest }}' + owner: postgres + group: postgres + force: true + loop: + - conf_templ: master_postgresql.conf.j2 + conf_dest: '{{ master_postgresql_conf }}' + - conf_templ: replica_postgresql.conf.j2 + conf_dest: '{{ replica_postgresql_conf }}' + - conf_templ: pg_hba.conf.j2 + conf_dest: '{{ master_pg_hba_conf }}' + - conf_templ: pg_hba.conf.j2 + conf_dest: '{{ replica_pg_hba_conf }}' +- name: Find pg_ctl + shell: find /usr/lib -type f -name "pg_ctl" + register: result +- name: Set path to initdb + set_fact: + pg_ctl: '{{ result.stdout }}' +- name: Start servers + become: true + become_user: '{{ pg_user }}' + shell: '{{ pg_ctl }} -D {{ item.datadir }} -o "-p {{ item.port }}" start' + loop: + - datadir: '{{ master_data_dir }}' + port: '{{ master_port }}' + - datadir: '{{ replica_data_dir }}' + port: '{{ replica_port }}' + notify: stop postgresql +- name: Check connectivity to the master and get PostgreSQL version + become: true + become_user: '{{ pg_user }}' + postgresql_ping: + db: '{{ db_default }}' + login_user: '{{ pg_user }}' + login_port: '{{ master_port }}' + register: result +- name: Check connectivity to the replica and get PostgreSQL version + become: true + become_user: '{{ pg_user }}' + postgresql_ping: + db: '{{ db_default }}' + login_user: '{{ pg_user }}' + login_port: '{{ replica_port }}' +- name: Define server version + set_fact: + pg_major_version: '{{ result.server_version.major }}' + pg_minor_version: '{{ result.server_version.minor }}' +- name: Print PostgreSQL version + debug: + msg: PostgreSQL version is {{ pg_major_version }}.{{ pg_minor_version }} diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/master_postgresql.conf.j2 b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/master_postgresql.conf.j2 new file mode 100644 index 00000000..744243ff --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/master_postgresql.conf.j2 @@ -0,0 +1,28 @@ +# Important parameters: +listen_addresses='*' +port = {{ master_port }} +wal_level = logical +max_wal_senders = 8 +track_commit_timestamp = on +max_replication_slots = 10 + +# Unimportant parameters: +max_connections=10 +shared_buffers=8MB +dynamic_shared_memory_type=posix +log_destination='stderr' +logging_collector=on +log_directory='log' +log_filename='postgresql-%a.log' +log_truncate_on_rotation=on +log_rotation_age=1d +log_rotation_size=0 +log_line_prefix='%m[%p]' +log_timezone='W-SU' +datestyle='iso,mdy' +timezone='W-SU' +lc_messages='en_US.UTF-8' +lc_monetary='en_US.UTF-8' +lc_numeric='en_US.UTF-8' +lc_time='en_US.UTF-8' +default_text_search_config='pg_catalog.english' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2 b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2 new file mode 100644 index 00000000..62e05ffc --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2 @@ -0,0 +1,7 @@ +local all all trust +local replication logical_replication trust +host replication logical_replication 127.0.0.1/32 trust +host replication logical_replication 0.0.0.0/0 trust +local all logical_replication trust +host all logical_replication 127.0.0.1/32 trust +host all logical_replication 0.0.0.0/0 trust diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2 b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2 new file mode 100644 index 00000000..206ab2eb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2 @@ -0,0 +1,28 @@ +# Important parameters: +listen_addresses='*' +port = {{ replica_port }} +wal_level = logical +max_wal_senders = 8 +track_commit_timestamp = on +max_replication_slots = 10 + +# Unimportant parameters: +max_connections=10 +shared_buffers=8MB +dynamic_shared_memory_type=posix +log_destination='stderr' +logging_collector=on +log_directory='log' +log_filename='postgresql-%a.log' +log_truncate_on_rotation=on +log_rotation_age=1d +log_rotation_size=0 +log_line_prefix='%m[%p]' +log_timezone='W-SU' +datestyle='iso,mdy' +timezone='W-SU' +lc_messages='en_US.UTF-8' +lc_monetary='en_US.UTF-8' +lc_numeric='en_US.UTF-8' +lc_time='en_US.UTF-8' +default_text_search_config='pg_catalog.english' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/requirements.yml b/collections-debian-merged/ansible_collections/community/postgresql/tests/requirements.yml new file mode 100644 index 00000000..5a2c9c80 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/requirements.yml @@ -0,0 +1,3 @@ +integration_tests_dependencies: +- community.general +unit_tests_dependencies: [] diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.json b/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.json new file mode 100644 index 00000000..c789a7fd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.json @@ -0,0 +1,7 @@ +{ + "include_symlinks": true, + "prefixes": [ + "plugins/" + ], + "output": "path-message" +} diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.py b/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.py new file mode 100755 index 00000000..49806f2e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/extra/no-unwanted-files.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Prevent unwanted files from being added to the source tree.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys + + +def main(): + """Main entry point.""" + paths = sys.argv[1:] or sys.stdin.read().splitlines() + + allowed_extensions = ( + '.cs', + '.ps1', + '.psm1', + '.py', + ) + + skip_paths = set([ + ]) + + skip_directories = ( + ) + + for path in paths: + if path in skip_paths: + continue + + if any(path.startswith(skip_directory) for skip_directory in skip_directories): + continue + + ext = os.path.splitext(path)[1] + + if ext not in allowed_extensions: + print('%s: extension must be one of: %s' % (path, ', '.join(allowed_extensions))) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/ignore-2.10.txt b/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/ignore-2.10.txt new file mode 100644 index 00000000..b6eb3028 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/ignore-2.10.txt @@ -0,0 +1,7 @@ +tests/utils/shippable/check_matrix.py replace-urlopen +tests/utils/shippable/timing.py shebang +plugins/modules/postgresql_db.py use-argspec-type-path +plugins/modules/postgresql_db.py validate-modules:use-run-command-not-popen +plugins/modules/postgresql_privs.py validate-modules:parameter-documented-multiple-times +plugins/modules/postgresql_tablespace.py validate-modules:mutually_exclusive-unknown +plugins/modules/postgresql_user.py validate-modules:doc-choices-do-not-match-spec diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/ignore-2.11.txt b/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/ignore-2.11.txt new file mode 100644 index 00000000..b6eb3028 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/ignore-2.11.txt @@ -0,0 +1,7 @@ +tests/utils/shippable/check_matrix.py replace-urlopen +tests/utils/shippable/timing.py shebang +plugins/modules/postgresql_db.py use-argspec-type-path +plugins/modules/postgresql_db.py validate-modules:use-run-command-not-popen +plugins/modules/postgresql_privs.py validate-modules:parameter-documented-multiple-times +plugins/modules/postgresql_tablespace.py validate-modules:mutually_exclusive-unknown +plugins/modules/postgresql_user.py validate-modules:doc-choices-do-not-match-spec diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/ignore-2.9.txt b/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/ignore-2.9.txt new file mode 100644 index 00000000..cd5ab7eb --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/sanity/ignore-2.9.txt @@ -0,0 +1,9 @@ +tests/utils/shippable/check_matrix.py replace-urlopen +tests/utils/shippable/timing.py shebang +plugins/modules/postgresql_db.py use-argspec-type-path +plugins/modules/postgresql_db.py validate-modules:parameter-type-not-in-doc +plugins/modules/postgresql_db.py validate-modules:use-run-command-not-popen +plugins/modules/postgresql_ext.py validate-modules:parameter-type-not-in-doc +plugins/modules/postgresql_schema.py validate-modules:parameter-type-not-in-doc +plugins/modules/postgresql_user.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/postgresql_user.py validate-modules:parameter-type-not-in-doc diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/__init__.py b/collections-debian-merged/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/__init__.py diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_postgres.py b/collections-debian-merged/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_postgres.py new file mode 100644 index 00000000..a9fb9600 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_postgres.py @@ -0,0 +1,325 @@ +# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +import ansible_collections.community.postgresql.plugins.module_utils.postgres as pg + + +INPUT_DICT = dict( + session_role=dict(default=''), + login_user=dict(default='postgres'), + login_password=dict(default='test', no_log=True), + login_host=dict(default='test'), + login_unix_socket=dict(default=''), + port=dict(type='int', default=5432, aliases=['login_port']), + ssl_mode=dict( + default='prefer', + choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full'] + ), + ca_cert=dict(aliases=['ssl_rootcert']), +) + +EXPECTED_DICT = dict( + user=dict(default='postgres'), + password=dict(default='test', no_log=True), + host=dict(default='test'), + port=dict(type='int', default=5432, aliases=['login_port']), + sslmode=dict( + default='prefer', + choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full'] + ), + sslrootcert=dict(aliases=['ssl_rootcert']), +) + + +class TestPostgresCommonArgSpec(): + + """ + Namespace for testing postgresql_common_arg_spec() function. + """ + + def test_postgres_common_argument_spec(self): + """ + Test for postgresql_common_arg_spec() function. + + The tested function just returns a dictionary with the default + parameters and their values for PostgreSQL modules. + The return and expected dictionaries must be compared. + """ + expected_dict = dict( + login_user=dict(default='postgres'), + login_password=dict(default='', no_log=True), + login_host=dict(default=''), + login_unix_socket=dict(default=''), + port=dict(type='int', default=5432, aliases=['login_port']), + ssl_mode=dict( + default='prefer', + choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full'] + ), + ca_cert=dict(aliases=['ssl_rootcert']), + ) + assert pg.postgres_common_argument_spec() == expected_dict + + +@pytest.fixture +def m_psycopg2(): + """Return mock object for psycopg2 emulation.""" + global Cursor + Cursor = None + + class Cursor(): + def __init__(self): + self.passed_query = None + + def execute(self, query): + self.passed_query = query + + def close(self): + pass + + global DbConnection + DbConnection = None + + class DbConnection(): + def __init__(self): + pass + + def cursor(self, cursor_factory=None): + return Cursor() + + def set_session(self, autocommit=None): + pass + + def set_isolation_level(self, isolevel): + pass + + class Extras(): + def __init__(self): + self.DictCursor = True + + class Extensions(): + def __init__(self): + self.ISOLATION_LEVEL_AUTOCOMMIT = True + + class DummyPsycopg2(): + def __init__(self): + self.__version__ = '2.4.3' + self.extras = Extras() + self.extensions = Extensions() + + def connect(self, host=None, port=None, user=None, + password=None, sslmode=None, sslrootcert=None): + if user == 'Exception': + raise Exception() + + return DbConnection() + + return DummyPsycopg2() + + +class TestEnsureReqLibs(): + + """ + Namespace for testing ensure_required_libs() function. + + If there is something wrong with libs, the function invokes fail_json() + method of AnsibleModule object passed as an argument called 'module'. + Therefore we must check: + 1. value of err_msg attribute of m_ansible_module mock object. + """ + + @pytest.fixture(scope='class') + def m_ansible_module(self): + """Return an object of dummy AnsibleModule class.""" + class Dummym_ansible_module(): + def __init__(self): + self.params = {'ca_cert': False} + self.err_msg = '' + + def fail_json(self, msg): + self.err_msg = msg + + return Dummym_ansible_module() + + def test_ensure_req_libs_has_not_psycopg2(self, m_ansible_module): + """Test ensure_required_libs() with psycopg2 is None.""" + # HAS_PSYCOPG2 is False by default + pg.ensure_required_libs(m_ansible_module) + assert 'Failed to import the required Python library (psycopg2)' in m_ansible_module.err_msg + + def test_ensure_req_libs_has_psycopg2(self, m_ansible_module, monkeypatch): + """Test ensure_required_libs() with psycopg2 is not None.""" + monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) + + pg.ensure_required_libs(m_ansible_module) + assert m_ansible_module.err_msg == '' + + def test_ensure_req_libs_ca_cert(self, m_ansible_module, m_psycopg2, monkeypatch): + """ + Test with module.params['ca_cert'], psycopg2 version is suitable. + """ + m_ansible_module.params['ca_cert'] = True + monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) + monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) + + pg.ensure_required_libs(m_ansible_module) + assert m_ansible_module.err_msg == '' + + def test_ensure_req_libs_ca_cert_low_psycopg2_ver(self, m_ansible_module, m_psycopg2, monkeypatch): + """ + Test with module.params['ca_cert'], psycopg2 version is wrong. + """ + m_ansible_module.params['ca_cert'] = True + monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) + # Set wrong psycopg2 version number: + psycopg2 = m_psycopg2 + psycopg2.__version__ = '2.4.2' + monkeypatch.setattr(pg, 'psycopg2', psycopg2) + + pg.ensure_required_libs(m_ansible_module) + assert 'psycopg2 must be at least 2.4.3' in m_ansible_module.err_msg + + +@pytest.fixture(scope='class') +def m_ansible_module(): + """Return an object of dummy AnsibleModule class.""" + class DummyAnsibleModule(): + def __init__(self): + self.params = pg.postgres_common_argument_spec() + self.err_msg = '' + self.warn_msg = '' + + def fail_json(self, msg): + self.err_msg = msg + + def warn(self, msg): + self.warn_msg = msg + + return DummyAnsibleModule() + + +class TestConnectToDb(): + + """ + Namespace for testing connect_to_db() function. + + When some connection errors occure connect_to_db() caught any of them + and invoke fail_json() or warn() methods of AnsibleModule object + depending on the passed parameters. + connect_to_db may return db_connection object or None if errors occured. + Therefore we must check: + 1. Values of err_msg and warn_msg attributes of m_ansible_module mock object. + 2. Types of return objects (db_connection and cursor). + """ + + def test_connect_to_db(self, m_ansible_module, monkeypatch, m_psycopg2): + """Test connect_to_db(), common test.""" + monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) + monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) + + conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params) + db_connection = pg.connect_to_db(m_ansible_module, conn_params) + cursor = db_connection.cursor() + # if errors, db_connection returned as None: + assert isinstance(db_connection, DbConnection) + assert isinstance(cursor, Cursor) + assert m_ansible_module.err_msg == '' + # The default behaviour, normal in this case: + assert 'Database name has not been passed' in m_ansible_module.warn_msg + + def test_session_role(self, m_ansible_module, monkeypatch, m_psycopg2): + """Test connect_to_db(), switch on session_role.""" + monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) + monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) + + m_ansible_module.params['session_role'] = 'test_role' + conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params) + db_connection = pg.connect_to_db(m_ansible_module, conn_params) + cursor = db_connection.cursor() + # if errors, db_connection returned as None: + assert isinstance(db_connection, DbConnection) + assert isinstance(cursor, Cursor) + assert m_ansible_module.err_msg == '' + # The default behaviour, normal in this case: + assert 'Database name has not been passed' in m_ansible_module.warn_msg + + def test_fail_on_conn_true(self, m_ansible_module, monkeypatch, m_psycopg2): + """ + Test connect_to_db(), fail_on_conn arg passed as True (the default behavior). + """ + monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) + monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) + + m_ansible_module.params['login_user'] = 'Exception' # causes Exception + + conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params) + db_connection = pg.connect_to_db(m_ansible_module, conn_params, fail_on_conn=True) + + assert 'unable to connect to database' in m_ansible_module.err_msg + assert db_connection is None + + def test_fail_on_conn_false(self, m_ansible_module, monkeypatch, m_psycopg2): + """ + Test connect_to_db(), fail_on_conn arg passed as False. + """ + monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) + monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) + + m_ansible_module.params['login_user'] = 'Exception' # causes Exception + + conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params) + db_connection = pg.connect_to_db(m_ansible_module, conn_params, fail_on_conn=False) + + assert m_ansible_module.err_msg == '' + assert 'PostgreSQL server is unavailable' in m_ansible_module.warn_msg + assert db_connection is None + + def test_autocommit_true(self, m_ansible_module, monkeypatch, m_psycopg2): + """ + Test connect_to_db(), autocommit arg passed as True (the default is False). + """ + monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True) + + # case 1: psycopg2.__version >= 2.4.2 (the default in m_psycopg2) + monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) + + conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params) + db_connection = pg.connect_to_db(m_ansible_module, conn_params, autocommit=True) + cursor = db_connection.cursor() + + # if errors, db_connection returned as None: + assert isinstance(db_connection, DbConnection) + assert isinstance(cursor, Cursor) + assert m_ansible_module.err_msg == '' + + # case 2: psycopg2.__version < 2.4.2 + m_psycopg2.__version__ = '2.4.1' + monkeypatch.setattr(pg, 'psycopg2', m_psycopg2) + + conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params) + db_connection = pg.connect_to_db(m_ansible_module, conn_params, autocommit=True) + cursor = db_connection.cursor() + + # if errors, db_connection returned as None: + assert isinstance(db_connection, DbConnection) + assert isinstance(cursor, Cursor) + assert 'psycopg2 must be at least 2.4.3' in m_ansible_module.err_msg + + +class TestGetConnParams(): + + """Namespace for testing get_conn_params() function.""" + + def test_get_conn_params_def(self, m_ansible_module): + """Test get_conn_params(), warn_db_default kwarg is default.""" + assert pg.get_conn_params(m_ansible_module, INPUT_DICT) == EXPECTED_DICT + assert m_ansible_module.warn_msg == 'Database name has not been passed, used default database to connect to.' + + def test_get_conn_params_warn_db_def_false(self, m_ansible_module): + """Test get_conn_params(), warn_db_default kwarg is False.""" + assert pg.get_conn_params(m_ansible_module, INPUT_DICT, warn_db_default=False) == EXPECTED_DICT + assert m_ansible_module.warn_msg == '' diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_saslprep.py b/collections-debian-merged/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_saslprep.py new file mode 100644 index 00000000..62a1704a --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/unit/plugins/module_utils/test_saslprep.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2019, Andrey Tuzhilin <andrei.tuzhilin@gmail.com> +# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.community.postgresql.plugins.module_utils.saslprep import saslprep + + +VALID = [ + (u'', u''), + (u'\u00A0', u' '), + (u'a', u'a'), + (u'й', u'й'), + (u'\u30DE\u30C8\u30EA\u30C3\u30AF\u30B9', u'\u30DE\u30C8\u30EA\u30C3\u30AF\u30B9'), + (u'The\u00ADM\u00AAtr\u2168', u'TheMatrIX'), + (u'I\u00ADX', u'IX'), + (u'user', u'user'), + (u'USER', u'USER'), + (u'\u00AA', u'a'), + (u'\u2168', u'IX'), + (u'\u05BE\u00A0\u05BE', u'\u05BE\u0020\u05BE'), +] + +INVALID = [ + (None, TypeError), + (b'', TypeError), + (u'\u0221', ValueError), + (u'\u0007', ValueError), + (u'\u0627\u0031', ValueError), + (u'\uE0001', ValueError), + (u'\uE0020', ValueError), + (u'\uFFF9', ValueError), + (u'\uFDD0', ValueError), + (u'\u0000', ValueError), + (u'\u06DD', ValueError), + (u'\uFFFFD', ValueError), + (u'\uD800', ValueError), + (u'\u200E', ValueError), + (u'\u05BE\u00AA\u05BE', ValueError), +] + + +@pytest.mark.parametrize('source,target', VALID) +def test_saslprep_conversions(source, target): + assert saslprep(source) == target + + +@pytest.mark.parametrize('source,exception', INVALID) +def test_saslprep_exceptions(source, exception): + with pytest.raises(exception) as ex: + saslprep(source) diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/constraints.txt b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/constraints.txt new file mode 100644 index 00000000..ae6000ae --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/constraints.txt @@ -0,0 +1,52 @@ +coverage >= 4.2, < 5.0.0, != 4.3.2 ; python_version <= '3.7' # features in 4.2+ required, avoid known bug in 4.3.2 on python 2.6, coverage 5.0+ incompatible +coverage >= 4.5.4, < 5.0.0 ; python_version > '3.7' # coverage had a bug in < 4.5.4 that would cause unit tests to hang in Python 3.8, coverage 5.0+ incompatible +cryptography < 2.2 ; python_version < '2.7' # cryptography 2.2 drops support for python 2.6 +deepdiff < 4.0.0 ; python_version < '3' # deepdiff 4.0.0 and later require python 3 +jinja2 < 2.11 ; python_version < '2.7' # jinja2 2.11 and later require python 2.7 or later +urllib3 < 1.24 ; python_version < '2.7' # urllib3 1.24 and later require python 2.7 or later +pywinrm >= 0.3.0 # message encryption support +sphinx < 1.6 ; python_version < '2.7' # sphinx 1.6 and later require python 2.7 or later +sphinx < 1.8 ; python_version >= '2.7' # sphinx 1.8 and later are currently incompatible with rstcheck 3.3 +pygments >= 2.4.0 # Pygments 2.4.0 includes bugfixes for YAML and YAML+Jinja lexers +wheel < 0.30.0 ; python_version < '2.7' # wheel 0.30.0 and later require python 2.7 or later +yamllint != 1.8.0, < 1.14.0 ; python_version < '2.7' # yamllint 1.8.0 and 1.14.0+ require python 2.7+ +pycrypto >= 2.6 # Need features found in 2.6 and greater +ncclient >= 0.5.2 # Need features added in 0.5.2 and greater +idna < 2.6, >= 2.5 # linode requires idna < 2.9, >= 2.5, requests requires idna < 2.6, but cryptography will cause the latest version to be installed instead +paramiko < 2.4.0 ; python_version < '2.7' # paramiko 2.4.0 drops support for python 2.6 +pytest < 3.3.0 ; python_version < '2.7' # pytest 3.3.0 drops support for python 2.6 +pytest < 5.0.0 ; python_version == '2.7' # pytest 5.0.0 and later will no longer support python 2.7 +pytest-forked < 1.0.2 ; python_version < '2.7' # pytest-forked 1.0.2 and later require python 2.7 or later +pytest-forked >= 1.0.2 ; python_version >= '2.7' # pytest-forked before 1.0.2 does not work with pytest 4.2.0+ (which requires python 2.7+) +ntlm-auth >= 1.3.0 # message encryption support using cryptography +requests < 2.20.0 ; python_version < '2.7' # requests 2.20.0 drops support for python 2.6 +requests-ntlm >= 1.1.0 # message encryption support +requests-credssp >= 0.1.0 # message encryption support +voluptuous >= 0.11.0 # Schema recursion via Self +openshift >= 0.6.2, < 0.9.0 # merge_type support +virtualenv < 16.0.0 ; python_version < '2.7' # virtualenv 16.0.0 and later require python 2.7 or later +pathspec < 0.6.0 ; python_version < '2.7' # pathspec 0.6.0 and later require python 2.7 or later +pyopenssl < 18.0.0 ; python_version < '2.7' # pyOpenSSL 18.0.0 and later require python 2.7 or later +pyfmg == 0.6.1 # newer versions do not pass current unit tests +pyyaml < 5.1 ; python_version < '2.7' # pyyaml 5.1 and later require python 2.7 or later +pycparser < 2.19 ; python_version < '2.7' # pycparser 2.19 and later require python 2.7 or later +mock >= 2.0.0 # needed for features backported from Python 3.6 unittest.mock (assert_called, assert_called_once...) +pytest-mock >= 1.4.0 # needed for mock_use_standalone_module pytest option +xmltodict < 0.12.0 ; python_version < '2.7' # xmltodict 0.12.0 and later require python 2.7 or later +lxml < 4.3.0 ; python_version < '2.7' # lxml 4.3.0 and later require python 2.7 or later +pyvmomi < 6.0.0 ; python_version < '2.7' # pyvmomi 6.0.0 and later require python 2.7 or later +pyone == 1.1.9 # newer versions do not pass current integration tests +boto3 < 1.11 ; python_version < '2.7' # boto3 1.11 drops Python 2.6 support +botocore >= 1.10.0, < 1.14 ; python_version < '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca; botocore 1.14 drops Python 2.6 support +botocore >= 1.10.0 ; python_version >= '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca +setuptools < 45 ; python_version <= '2.7' # setuptools 45 and later require python 3.5 or later +cffi >= 1.14.2, != 1.14.3 # Yanked version which older versions of pip will still install: + +# freeze pylint and its requirements for consistent test results +astroid == 2.2.5 +isort == 4.3.15 +lazy-object-proxy == 1.3.1 +mccabe == 0.6.1 +pylint == 2.3.1 +typed-ast == 1.4.0 # 1.4.0 is required to compile on Python 3.8 +wrapt == 1.11.1 diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/aix.sh b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/aix.sh new file mode 100755 index 00000000..cd3014cc --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/aix.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +platform="${args[0]}" +version="${args[1]}" + +if [ "${#args[@]}" -gt 2 ]; then + target="shippable/posix/group${args[2]}/" +else + target="shippable/posix/" +fi + +stage="${S:-prod}" +provider="${P:-default}" + +# shellcheck disable=SC2086 +ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ + --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/check_matrix.py b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/check_matrix.py new file mode 100755 index 00000000..608db692 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/check_matrix.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python +"""Verify the currently executing Shippable test matrix matches the one defined in the "shippable.yml" file.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import datetime +import json +import os +import re +import sys +import time + +try: + from typing import NoReturn +except ImportError: + NoReturn = None + +try: + # noinspection PyCompatibility + from urllib2 import urlopen # pylint: disable=ansible-bad-import-from +except ImportError: + # noinspection PyCompatibility + from urllib.request import urlopen + + +def main(): # type: () -> None + """Main entry point.""" + repo_full_name = os.environ['REPO_FULL_NAME'] + required_repo_full_name = 'ansible-collections/community.postgresql' + + if repo_full_name != required_repo_full_name: + sys.stderr.write('Skipping matrix check on repo "%s" which is not "%s".\n' % (repo_full_name, required_repo_full_name)) + return + + with open('shippable.yml', 'rb') as yaml_file: + yaml = yaml_file.read().decode('utf-8').splitlines() + + defined_matrix = [match.group(1) for match in [re.search(r'^ *- env: T=(.*)$', line) for line in yaml] if match and match.group(1) != 'none'] + + if not defined_matrix: + fail('No matrix entries found in the "shippable.yml" file.', + 'Did you modify the "shippable.yml" file?') + + run_id = os.environ['SHIPPABLE_BUILD_ID'] + sleep = 1 + jobs = [] + + for attempts_remaining in range(4, -1, -1): + try: + jobs = json.loads(urlopen('https://api.shippable.com/jobs?runIds=%s' % run_id).read()) + + if not isinstance(jobs, list): + raise Exception('Shippable run %s data is not a list.' % run_id) + + break + except Exception as ex: + if not attempts_remaining: + fail('Unable to retrieve Shippable run %s matrix.' % run_id, + str(ex)) + + sys.stderr.write('Unable to retrieve Shippable run %s matrix: %s\n' % (run_id, ex)) + sys.stderr.write('Trying again in %d seconds...\n' % sleep) + time.sleep(sleep) + sleep *= 2 + + if len(jobs) != len(defined_matrix): + if len(jobs) == 1: + hint = '\n\nMake sure you do not use the "Rebuild with SSH" option.' + else: + hint = '' + + fail('Shippable run %s has %d jobs instead of the expected %d jobs.' % (run_id, len(jobs), len(defined_matrix)), + 'Try re-running the entire matrix.%s' % hint) + + actual_matrix = dict((job.get('jobNumber'), dict(tuple(line.split('=', 1)) for line in job.get('env', [])).get('T', '')) for job in jobs) + errors = [(job_number, test, actual_matrix.get(job_number)) for job_number, test in enumerate(defined_matrix, 1) if actual_matrix.get(job_number) != test] + + if len(errors): + error_summary = '\n'.join('Job %s expected "%s" but found "%s" instead.' % (job_number, expected, actual) for job_number, expected, actual in errors) + + fail('Shippable run %s has a job matrix mismatch.' % run_id, + 'Try re-running the entire matrix.\n\n%s' % error_summary) + + +def fail(message, output): # type: (str, str) -> NoReturn + # Include a leading newline to improve readability on Shippable "Tests" tab. + # Without this, the first line becomes indented. + output = '\n' + output.strip() + + timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + + # hack to avoid requiring junit-xml, which isn't pre-installed on Shippable outside our test containers + xml = ''' +<?xml version="1.0" encoding="utf-8"?> +<testsuites disabled="0" errors="1" failures="0" tests="1" time="0.0"> +\t<testsuite disabled="0" errors="1" failures="0" file="None" log="None" name="ansible-test" skipped="0" tests="1" time="0" timestamp="%s" url="None"> +\t\t<testcase classname="timeout" name="timeout"> +\t\t\t<error message="%s" type="error">%s</error> +\t\t</testcase> +\t</testsuite> +</testsuites> +''' % (timestamp, message, output) + + path = 'shippable/testresults/check-matrix.xml' + dir_path = os.path.dirname(path) + + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + with open(path, 'w') as junit_fd: + junit_fd.write(xml.lstrip()) + + sys.stderr.write(message + '\n') + sys.stderr.write(output + '\n') + + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/freebsd.sh b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/freebsd.sh new file mode 100755 index 00000000..cd3014cc --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/freebsd.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +platform="${args[0]}" +version="${args[1]}" + +if [ "${#args[@]}" -gt 2 ]; then + target="shippable/posix/group${args[2]}/" +else + target="shippable/posix/" +fi + +stage="${S:-prod}" +provider="${P:-default}" + +# shellcheck disable=SC2086 +ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ + --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/linux.sh b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/linux.sh new file mode 100755 index 00000000..9cc2f966 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/linux.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +image="${args[1]}" + +if [ "${#args[@]}" -gt 2 ]; then + target="shippable/posix/group${args[2]}/" +else + target="shippable/posix/" +fi + +# shellcheck disable=SC2086 +ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ + --docker "${image}" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/remote.sh b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/remote.sh new file mode 100755 index 00000000..cd3014cc --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/remote.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +platform="${args[0]}" +version="${args[1]}" + +if [ "${#args[@]}" -gt 2 ]; then + target="shippable/posix/group${args[2]}/" +else + target="shippable/posix/" +fi + +stage="${S:-prod}" +provider="${P:-default}" + +# shellcheck disable=SC2086 +ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ + --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/rhel.sh b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/rhel.sh new file mode 100755 index 00000000..cd3014cc --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/rhel.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +platform="${args[0]}" +version="${args[1]}" + +if [ "${#args[@]}" -gt 2 ]; then + target="shippable/posix/group${args[2]}/" +else + target="shippable/posix/" +fi + +stage="${S:-prod}" +provider="${P:-default}" + +# shellcheck disable=SC2086 +ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ + --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/sanity.sh b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/sanity.sh new file mode 100755 index 00000000..c216220e --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/sanity.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +group="${args[1]}" + +if [ "${BASE_BRANCH:-}" ]; then + base_branch="origin/${BASE_BRANCH}" +else + base_branch="" +fi + +if [ "${group}" == "extra" ]; then + # ansible-galaxy -vvv collection install community.internal_test_tools + git clone --single-branch --depth 1 https://github.com/ansible-collections/community.internal_test_tools.git ../internal_test_tools + + ../internal_test_tools/tools/run.py --color + exit +fi + +# shellcheck disable=SC2086 +ansible-test sanity --color -v --junit ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \ + --docker --base-branch "${base_branch}" \ + --allow-disabled diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/shippable.sh b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/shippable.sh new file mode 100755 index 00000000..2eb224bd --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/shippable.sh @@ -0,0 +1,207 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +ansible_version="${args[0]}" +script="${args[1]}" + +function join { + local IFS="$1"; + shift; + echo "$*"; +} + +test="$(join / "${args[@]:1}")" + +docker images ansible/ansible +docker images quay.io/ansible/* +docker ps + +for container in $(docker ps --format '{{.Image}} {{.ID}}' | grep -v -e '^drydock/' -e '^quay.io/ansible/azure-pipelines-test-container:' | sed 's/^.* //'); do + docker rm -f "${container}" || true # ignore errors +done + +docker ps + +if [ -d /home/shippable/cache/ ]; then + ls -la /home/shippable/cache/ +fi + +command -v python +python -V + +function retry +{ + # shellcheck disable=SC2034 + for repetition in 1 2 3; do + set +e + "$@" + result=$? + set -e + if [ ${result} == 0 ]; then + return ${result} + fi + echo "@* -> ${result}" + done + echo "Command '@*' failed 3 times!" + exit -1 +} + +command -v pip +pip --version +pip list --disable-pip-version-check +if [ "${ansible_version}" == "devel" ]; then + retry pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check +else + retry pip install "https://github.com/ansible/ansible/archive/stable-${ansible_version}.tar.gz" --disable-pip-version-check +fi + +if [ "${SHIPPABLE_BUILD_ID:-}" ]; then + export ANSIBLE_COLLECTIONS_PATHS="${HOME}/.ansible" + SHIPPABLE_RESULT_DIR="$(pwd)/shippable" + TEST_DIR="${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/postgresql" + mkdir -p "${TEST_DIR}" + cp -aT "${SHIPPABLE_BUILD_DIR}" "${TEST_DIR}" + cd "${TEST_DIR}" +else + export ANSIBLE_COLLECTIONS_PATHS="${PWD}/../../../" +fi + +# START: HACK install dependencies for integration tests +if [ "${script}" != "units" ] && [ "${script}" != "sanity" ] && [ "${ansible_version}" != "2.9" ]; then + retry ansible-galaxy -vvv collection install community.general +fi +# END: HACK + +export PYTHONIOENCODING='utf-8' + +if [ "${JOB_TRIGGERED_BY_NAME:-}" == "nightly-trigger" ]; then + COVERAGE=yes + COMPLETE=yes +fi + +if [ -n "${COVERAGE:-}" ]; then + # on-demand coverage reporting triggered by setting the COVERAGE environment variable to a non-empty value + export COVERAGE="--coverage" +elif [[ "${COMMIT_MESSAGE}" =~ ci_coverage ]]; then + # on-demand coverage reporting triggered by having 'ci_coverage' in the latest commit message + export COVERAGE="--coverage" +else + # on-demand coverage reporting disabled (default behavior, always-on coverage reporting remains enabled) + export COVERAGE="--coverage-check" +fi + +if [ -n "${COMPLETE:-}" ]; then + # disable change detection triggered by setting the COMPLETE environment variable to a non-empty value + export CHANGED="" +elif [[ "${COMMIT_MESSAGE}" =~ ci_complete ]]; then + # disable change detection triggered by having 'ci_complete' in the latest commit message + export CHANGED="" +else + # enable change detection (default behavior) + export CHANGED="--changed" +fi + +if [ "${IS_PULL_REQUEST:-}" == "true" ]; then + # run unstable tests which are targeted by focused changes on PRs + export UNSTABLE="--allow-unstable-changed" +else + # do not run unstable tests outside PRs + export UNSTABLE="" +fi + +# remove empty core/extras module directories from PRs created prior to the repo-merge +find plugins -type d -empty -print -delete + +function cleanup +{ + # for complete on-demand coverage generate a report for all files with no coverage on the "sanity/5" job so we only have one copy + if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ] && [ "${test}" == "sanity/5" ]; then + stub="--stub" + # trigger coverage reporting for stubs even if no other coverage data exists + mkdir -p tests/output/coverage/ + else + stub="" + fi + + if [ -d tests/output/coverage/ ]; then + if find tests/output/coverage/ -mindepth 1 -name '.*' -prune -o -print -quit | grep -q .; then + process_coverage='yes' # process existing coverage files + elif [ "${stub}" ]; then + process_coverage='yes' # process coverage when stubs are enabled + else + process_coverage='' + fi + + if [ "${process_coverage}" ]; then + # use python 3.7 for coverage to avoid running out of memory during coverage xml processing + # only use it for coverage to avoid the additional overhead of setting up a virtual environment for a potential no-op job + virtualenv --python /usr/bin/python3.7 ~/ansible-venv + set +ux + . ~/ansible-venv/bin/activate + set -ux + + # shellcheck disable=SC2086 + ansible-test coverage xml --color -v --requirements --group-by command --group-by version ${stub:+"$stub"} + cp -a tests/output/reports/coverage=*.xml "$SHIPPABLE_RESULT_DIR/codecoverage/" + + if [ "${ansible_version}" != "2.9" ]; then + # analyze and capture code coverage aggregated by integration test target + ansible-test coverage analyze targets generate -v "$SHIPPABLE_RESULT_DIR/testresults/coverage-analyze-targets.json" + fi + + # upload coverage report to codecov.io only when using complete on-demand coverage + if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ]; then + for file in tests/output/reports/coverage=*.xml; do + flags="${file##*/coverage=}" + flags="${flags%-powershell.xml}" + flags="${flags%.xml}" + # remove numbered component from stub files when converting to tags + flags="${flags//stub-[0-9]*/stub}" + flags="${flags//=/,}" + flags="${flags//[^a-zA-Z0-9_,]/_}" + + bash <(curl -s https://codecov.io/bash) \ + -f "${file}" \ + -F "${flags}" \ + -n "${test}" \ + -t 00c0f9fa-ac1b-43d3-addf-99de803232c1 \ + -X coveragepy \ + -X gcov \ + -X fix \ + -X search \ + -X xcode \ + || echo "Failed to upload code coverage report to codecov.io: ${file}" + done + fi + fi + fi + + if [ -d tests/output/junit/ ]; then + cp -aT tests/output/junit/ "$SHIPPABLE_RESULT_DIR/testresults/" + fi + + if [ -d tests/output/data/ ]; then + cp -a tests/output/data/ "$SHIPPABLE_RESULT_DIR/testresults/" + fi + + if [ -d tests/output/bot/ ]; then + cp -aT tests/output/bot/ "$SHIPPABLE_RESULT_DIR/testresults/" + fi +} + +if [ "${SHIPPABLE_BUILD_ID:-}" ]; then trap cleanup EXIT; fi + +if [[ "${COVERAGE:-}" == "--coverage" ]]; then + timeout=60 +else + timeout=50 +fi + +ansible-test env --dump --show --timeout "${timeout}" --color -v + +if [ "${SHIPPABLE_BUILD_ID:-}" ]; then "tests/utils/shippable/check_matrix.py"; fi +"tests/utils/shippable/${script}.sh" "${test}" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/timing.py b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/timing.py new file mode 100755 index 00000000..fb538271 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/timing.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3.7 +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import time + +start = time.time() + +sys.stdin.reconfigure(errors='surrogateescape') +sys.stdout.reconfigure(errors='surrogateescape') + +for line in sys.stdin: + seconds = time.time() - start + sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line)) + sys.stdout.flush() diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/timing.sh b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/timing.sh new file mode 100755 index 00000000..77e25783 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/timing.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -o pipefail -eu + +"$@" 2>&1 | "$(dirname "$0")/timing.py" diff --git a/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/units.sh b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/units.sh new file mode 100755 index 00000000..f204dc87 --- /dev/null +++ b/collections-debian-merged/ansible_collections/community/postgresql/tests/utils/shippable/units.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +group="${args[1]}" + +if [[ "${COVERAGE:-}" == "--coverage" ]]; then + timeout=90 +else + timeout=30 +fi + +group1=() + +case "${group}" in + 1) options=("${group1[@]:+${group1[@]}}") ;; +esac + +ansible-test env --timeout "${timeout}" --color -v + +# shellcheck disable=SC2086 +ansible-test units --color -v --docker default ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \ + "${options[@]:+${options[@]}}" \ |