summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules
diff options
context:
space:
mode:
Diffstat (limited to 'collections-debian-merged/ansible_collections/community/postgresql/plugins/modules')
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py419
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_db.py673
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py444
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py589
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_info.py1032
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py365
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py229
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py454
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py746
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py1172
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py683
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_query.py524
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py294
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py628
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_set.py480
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py305
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py718
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_table.py611
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py541
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_user.py998
-rw-r--r--collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py336
22 files changed, 12412 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py
new file mode 100644
index 00000000..6f083f4a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py
@@ -0,0 +1,419 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_copy
+short_description: Copy data between a file/program and a PostgreSQL table
+description:
+- Copy data between a file/program and a PostgreSQL table.
+
+options:
+ copy_to:
+ description:
+ - Copy the contents of a table to a file.
+ - Can also copy the results of a SELECT query.
+ - Mutually exclusive with I(copy_from) and I(dst).
+ type: path
+ aliases: [ to ]
+ copy_from:
+ description:
+ - Copy data from a file to a table (appending the data to whatever is in the table already).
+ - Mutually exclusive with I(copy_to) and I(src).
+ type: path
+ aliases: [ from ]
+ src:
+ description:
+ - Copy data from I(copy_from) to I(src=tablename).
+ - Used with I(copy_to) only.
+ type: str
+ aliases: [ source ]
+ dst:
+ description:
+ - Copy data to I(dst=tablename) from I(copy_from=/path/to/data.file).
+ - Used with I(copy_from) only.
+ type: str
+ aliases: [ destination ]
+ columns:
+ description:
+ - List of column names for the src/dst table to COPY FROM/TO.
+ type: list
+ elements: str
+ aliases: [ column ]
+ program:
+ description:
+ - Mark I(src)/I(dst) as a program. Data will be copied to/from a program.
+ - See block Examples and PROGRAM arg description U(https://www.postgresql.org/docs/current/sql-copy.html).
+ type: bool
+ default: no
+ options:
+ description:
+ - Options of COPY command.
+ - See the full list of available options U(https://www.postgresql.org/docs/current/sql-copy.html).
+ type: dict
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases: [ login_db ]
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- Supports PostgreSQL version 9.4+.
+- COPY command is only allowed to database superusers.
+- If I(check_mode=yes), we just check the src/dst table availability
+ and return the COPY query that actually has not been executed.
+- If i(check_mode=yes) and the source has been passed as SQL, the module
+ will execute it and rolled the transaction back but pay attention
+ it can affect database performance (e.g., if SQL collects a lot of data).
+
+seealso:
+- name: COPY command reference
+ description: Complete reference of the COPY command documentation.
+ link: https://www.postgresql.org/docs/current/sql-copy.html
+
+author:
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Copy text TAB-separated data from file /tmp/data.txt to acme table
+ community.postgresql.postgresql_copy:
+ copy_from: /tmp/data.txt
+ dst: acme
+
+- name: Copy CSV (comma-separated) data from file /tmp/data.csv to columns id, name of table acme
+ community.postgresql.postgresql_copy:
+ copy_from: /tmp/data.csv
+ dst: acme
+ columns: id,name
+ options:
+ format: csv
+
+- name: >
+ Copy text vertical-bar-separated data from file /tmp/data.txt to bar table.
+ The NULL values are specified as N
+ community.postgresql.postgresql_copy:
+ copy_from: /tmp/data.csv
+ dst: bar
+ options:
+ delimiter: '|'
+ null: 'N'
+
+- name: Copy data from acme table to file /tmp/data.txt in text format, TAB-separated
+ community.postgresql.postgresql_copy:
+ src: acme
+ copy_to: /tmp/data.txt
+
+- name: Copy data from SELECT query to/tmp/data.csv in CSV format
+ community.postgresql.postgresql_copy:
+ src: 'SELECT * FROM acme'
+ copy_to: /tmp/data.csv
+ options:
+ format: csv
+
+- name: Copy CSV data from my_table to gzip
+ community.postgresql.postgresql_copy:
+ src: my_table
+ copy_to: 'gzip > /tmp/data.csv.gz'
+ program: yes
+ options:
+ format: csv
+
+- name: >
+ Copy data from columns id, name of table bar to /tmp/data.txt.
+ Output format is text, vertical-bar-separated, NULL as N
+ community.postgresql.postgresql_copy:
+ src: bar
+ columns:
+ - id
+ - name
+ copy_to: /tmp/data.csv
+ options:
+ delimiter: '|'
+ null: 'N'
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "COPY test_table FROM '/tmp/data_file.txt' (FORMAT csv, DELIMITER ',', NULL 'NULL')" ]
+src:
+ description: Data source.
+ returned: always
+ type: str
+ sample: "mytable"
+dst:
+ description: Data destination.
+ returned: always
+ type: str
+ sample: "/tmp/data.csv"
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+
+class PgCopyData(object):
+
+ """Implements behavior of COPY FROM, COPY TO PostgreSQL command.
+
+ Arguments:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+
+ Attributes:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ changed (bool) -- something was changed after execution or not
+ executed_queries (list) -- executed queries
+ dst (str) -- data destination table (when copy_from)
+ src (str) -- data source table (when copy_to)
+ opt_need_quotes (tuple) -- values of these options must be passed
+ to SQL in quotes
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.executed_queries = []
+ self.changed = False
+ self.dst = ''
+ self.src = ''
+ self.opt_need_quotes = (
+ 'DELIMITER',
+ 'NULL',
+ 'QUOTE',
+ 'ESCAPE',
+ 'ENCODING',
+ )
+
+ def copy_from(self):
+ """Implements COPY FROM command behavior."""
+ self.src = self.module.params['copy_from']
+ self.dst = self.module.params['dst']
+
+ query_fragments = ['COPY %s' % pg_quote_identifier(self.dst, 'table')]
+
+ if self.module.params.get('columns'):
+ query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
+
+ query_fragments.append('FROM')
+
+ if self.module.params.get('program'):
+ query_fragments.append('PROGRAM')
+
+ query_fragments.append("'%s'" % self.src)
+
+ if self.module.params.get('options'):
+ query_fragments.append(self.__transform_options())
+
+ # Note: check mode is implemented here:
+ if self.module.check_mode:
+ self.changed = self.__check_table(self.dst)
+
+ if self.changed:
+ self.executed_queries.append(' '.join(query_fragments))
+ else:
+ if exec_sql(self, ' '.join(query_fragments), return_bool=True):
+ self.changed = True
+
+ def copy_to(self):
+ """Implements COPY TO command behavior."""
+ self.src = self.module.params['src']
+ self.dst = self.module.params['copy_to']
+
+ if 'SELECT ' in self.src.upper():
+ # If src is SQL SELECT statement:
+ query_fragments = ['COPY (%s)' % self.src]
+ else:
+ # If src is a table:
+ query_fragments = ['COPY %s' % pg_quote_identifier(self.src, 'table')]
+
+ if self.module.params.get('columns'):
+ query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
+
+ query_fragments.append('TO')
+
+ if self.module.params.get('program'):
+ query_fragments.append('PROGRAM')
+
+ query_fragments.append("'%s'" % self.dst)
+
+ if self.module.params.get('options'):
+ query_fragments.append(self.__transform_options())
+
+ # Note: check mode is implemented here:
+ if self.module.check_mode:
+ self.changed = self.__check_table(self.src)
+
+ if self.changed:
+ self.executed_queries.append(' '.join(query_fragments))
+ else:
+ if exec_sql(self, ' '.join(query_fragments), return_bool=True):
+ self.changed = True
+
+ def __transform_options(self):
+ """Transform options dict into a suitable string."""
+ for (key, val) in iteritems(self.module.params['options']):
+ if key.upper() in self.opt_need_quotes:
+ self.module.params['options'][key] = "'%s'" % val
+
+ opt = ['%s %s' % (key, val) for (key, val) in iteritems(self.module.params['options'])]
+ return '(%s)' % ', '.join(opt)
+
+ def __check_table(self, table):
+ """Check table or SQL in transaction mode for check_mode.
+
+ Return True if it is OK.
+
+ Arguments:
+ table (str) - Table name that needs to be checked.
+ It can be SQL SELECT statement that was passed
+ instead of the table name.
+ """
+ if 'SELECT ' in table.upper():
+ # In this case table is actually SQL SELECT statement.
+ # If SQL fails, it's handled by exec_sql():
+ exec_sql(self, table, add_to_executed=False)
+ # If exec_sql was passed, it means all is OK:
+ return True
+
+ exec_sql(self, 'SELECT 1 FROM %s' % pg_quote_identifier(table, 'table'),
+ add_to_executed=False)
+ # If SQL was executed successfully:
+ return True
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ copy_to=dict(type='path', aliases=['to']),
+ copy_from=dict(type='path', aliases=['from']),
+ src=dict(type='str', aliases=['source']),
+ dst=dict(type='str', aliases=['destination']),
+ columns=dict(type='list', elements='str', aliases=['column']),
+ options=dict(type='dict'),
+ program=dict(type='bool', default=False),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['copy_from', 'copy_to'],
+ ['copy_from', 'src'],
+ ['copy_to', 'dst'],
+ ]
+ )
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ opt_list = None
+ if module.params['options']:
+ opt_list = ['%s %s' % (key, val) for (key, val) in iteritems(module.params['options'])]
+
+ check_input(module,
+ module.params['copy_to'],
+ module.params['copy_from'],
+ module.params['src'],
+ module.params['dst'],
+ opt_list,
+ module.params['columns'],
+ module.params['session_role'])
+
+ # Note: we don't need to check mutually exclusive params here, because they are
+ # checked automatically by AnsibleModule (mutually_exclusive=[] list above).
+ if module.params.get('copy_from') and not module.params.get('dst'):
+ module.fail_json(msg='dst param is necessary with copy_from')
+
+ elif module.params.get('copy_to') and not module.params.get('src'):
+ module.fail_json(msg='src param is necessary with copy_to')
+
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ data = PgCopyData(module, cursor)
+
+ # Note: parameters like dst, src, etc. are got
+ # from module object into data object of PgCopyData class.
+ # Therefore not need to pass args to the methods below.
+ # Note: check mode is implemented inside the methods below
+ # by checking passed module.check_mode arg.
+ if module.params.get('copy_to'):
+ data.copy_to()
+
+ elif module.params.get('copy_from'):
+ data.copy_from()
+
+ # Finish:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Return some values:
+ module.exit_json(
+ changed=data.changed,
+ queries=data.executed_queries,
+ src=data.src,
+ dst=data.dst,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_db.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_db.py
new file mode 100644
index 00000000..4a50176a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_db.py
@@ -0,0 +1,673 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_db
+short_description: Add or remove PostgreSQL databases from a remote host
+description:
+ - Add or remove PostgreSQL databases from a remote host.
+options:
+ name:
+ description:
+ - Name of the database to add or remove.
+ type: str
+ required: true
+ aliases: [ db ]
+ port:
+ description:
+ - Database port to connect (if needed).
+ type: int
+ default: 5432
+ aliases:
+ - login_port
+ owner:
+ description:
+ - Name of the role to set as owner of the database.
+ type: str
+ template:
+ description:
+ - Template used to create the database.
+ type: str
+ encoding:
+ description:
+ - Encoding of the database.
+ type: str
+ lc_collate:
+ description:
+ - Collation order (LC_COLLATE) to use in the database
+ must match collation order of template database unless C(template0) is used as template.
+ type: str
+ lc_ctype:
+ description:
+ - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...).
+ - Must match LC_CTYPE of template database unless C(template0) is used as template.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role
+ were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The database state.
+ - C(present) implies that the database should be created if necessary.
+ - C(absent) implies that the database should be removed if present.
+ - C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
+ Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
+ returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
+ so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
+ pg_dump returns rc 1 in this case.
+ - C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4).
+ - The format of the backup will be detected based on the target name.
+ - Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz).
+ - Supported formats for dump and restore include C(.sql) and C(.tar).
+ - "Restore program is selected by target file format: C(.tar) and C(.pgc) are handled by pg_restore, other with pgsql."
+ type: str
+ choices: [ absent, dump, present, restore ]
+ default: present
+ target:
+ description:
+ - File to back up or restore from.
+ - Used when I(state) is C(dump) or C(restore).
+ type: path
+ target_opts:
+ description:
+ - Additional arguments for pg_dump or restore program (pg_restore or psql, depending on target's format).
+ - Used when I(state) is C(dump) or C(restore).
+ type: str
+ maintenance_db:
+ description:
+ - The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
+ type: str
+ default: postgres
+ conn_limit:
+ description:
+ - Specifies the database connection limit.
+ type: str
+ tablespace:
+ description:
+ - The tablespace to set for the database
+ U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
+ - If you want to move the database back to the default tablespace,
+ explicitly set this to pg_default.
+ type: path
+ dump_extra_args:
+ description:
+ - Provides additional arguments when I(state) is C(dump).
+ - Cannot be used with dump-file-format-related arguments like ``--format=d``.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(owner), I(conn_limit), I(encoding),
+ I(db), I(template), I(tablespace), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: CREATE DATABASE reference
+ description: Complete reference of the CREATE DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createdatabase.html
+- name: DROP DATABASE reference
+ description: Complete reference of the DROP DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
+- name: pg_dump reference
+ description: Complete reference of pg_dump documentation.
+ link: https://www.postgresql.org/docs/current/app-pgdump.html
+- name: pg_restore reference
+ description: Complete reference of pg_restore documentation.
+ link: https://www.postgresql.org/docs/current/app-pgrestore.html
+- module: community.postgresql.postgresql_tablespace
+- module: community.postgresql.postgresql_info
+- module: community.postgresql.postgresql_ping
+notes:
+- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
+- Supports C(check_mode).
+author: "Ansible Core Team"
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new database with name "acme"
+ community.postgresql.postgresql_db:
+ name: acme
+
+# Note: If a template different from "template0" is specified,
+# encoding and locale settings must match those of the template.
+- name: Create a new database with name "acme" and specific encoding and locale # settings
+ community.postgresql.postgresql_db:
+ name: acme
+ encoding: UTF-8
+ lc_collate: de_DE.UTF-8
+ lc_ctype: de_DE.UTF-8
+ template: template0
+
+# Note: Default limit for the number of concurrent connections to
+# a specific database is "-1", which means "unlimited"
+- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
+ community.postgresql.postgresql_db:
+ name: acme
+ conn_limit: "100"
+
+- name: Dump an existing database to a file
+ community.postgresql.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+
+- name: Dump an existing database to a file excluding the test table
+ community.postgresql.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ dump_extra_args: --exclude-table=test
+
+- name: Dump an existing database to a file (with compression)
+ community.postgresql.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql.gz
+
+- name: Dump a single schema for an existing database
+ community.postgresql.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ target_opts: "-n public"
+
+- name: Dump only table1 and table2 from the acme database
+ community.postgresql.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/table1_table2.sql
+ target_opts: "-t table1 -t table2"
+
+# Note: In the example below, if database foo exists and has another tablespace
+# the tablespace will be changed to foo. Access to the database will be locked
+# until the copying of database files is finished.
+- name: Create a new database called foo in tablespace bar
+ community.postgresql.postgresql_db:
+ name: foo
+ tablespace: bar
+'''
+
+RETURN = r'''
+executed_commands:
+ description: List of commands which tried to run.
+ returned: always
+ type: list
+ sample: ["CREATE DATABASE acme"]
+ version_added: '0.2.0'
+'''
+
+
+import os
+import subprocess
+import traceback
+
+try:
+ import psycopg2
+ import psycopg2.extras
+except ImportError:
+ HAS_PSYCOPG2 = False
+else:
+ HAS_PSYCOPG2 = True
+
+import ansible_collections.community.postgresql.plugins.module_utils.postgres as pgutils
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+ SQLParseError,
+)
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_native
+
+executed_commands = []
+
+
+class NotSupportedError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def set_owner(cursor, db, owner):
+ query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (db, owner)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def set_conn_limit(cursor, db, conn_limit):
+ query = 'ALTER DATABASE "%s" CONNECTION LIMIT %s' % (db, conn_limit)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def get_encoding_id(cursor, encoding):
+ query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
+ cursor.execute(query, {'encoding': encoding})
+ return cursor.fetchone()['encoding_id']
+
+
+def get_db_info(cursor, db):
+ query = """
+ SELECT rolname AS owner,
+ pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
+ datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
+ spcname AS tablespace
+ FROM pg_database
+ JOIN pg_roles ON pg_roles.oid = pg_database.datdba
+ JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
+ WHERE datname = %(db)s
+ """
+ cursor.execute(query, {'db': db})
+ return cursor.fetchone()
+
+
+def db_exists(cursor, db):
+ query = "SELECT * FROM pg_database WHERE datname=%(db)s"
+ cursor.execute(query, {'db': db})
+ return cursor.rowcount == 1
+
+
+def db_delete(cursor, db):
+ if db_exists(cursor, db):
+ query = 'DROP DATABASE "%s"' % db
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+ else:
+ return False
+
+
+def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
+ if not db_exists(cursor, db):
+ query_fragments = ['CREATE DATABASE "%s"' % db]
+ if owner:
+ query_fragments.append('OWNER "%s"' % owner)
+ if template:
+ query_fragments.append('TEMPLATE "%s"' % template)
+ if encoding:
+ query_fragments.append('ENCODING %(enc)s')
+ if lc_collate:
+ query_fragments.append('LC_COLLATE %(collate)s')
+ if lc_ctype:
+ query_fragments.append('LC_CTYPE %(ctype)s')
+ if tablespace:
+ query_fragments.append('TABLESPACE "%s"' % tablespace)
+ if conn_limit:
+ query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query = ' '.join(query_fragments)
+ executed_commands.append(cursor.mogrify(query, params))
+ cursor.execute(query, params)
+ return True
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ raise NotSupportedError(
+ 'Changing database encoding is not supported. '
+ 'Current encoding: %s' % db_info['encoding']
+ )
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ raise NotSupportedError(
+ 'Changing LC_COLLATE is not supported. '
+ 'Current LC_COLLATE: %s' % db_info['lc_collate']
+ )
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ raise NotSupportedError(
+ 'Changing LC_CTYPE is not supported.'
+ 'Current LC_CTYPE: %s' % db_info['lc_ctype']
+ )
+ else:
+ changed = False
+
+ if owner and owner != db_info['owner']:
+ changed = set_owner(cursor, db, owner)
+
+ if conn_limit and conn_limit != str(db_info['conn_limit']):
+ changed = set_conn_limit(cursor, db, conn_limit)
+
+ if tablespace and tablespace != db_info['tablespace']:
+ changed = set_tablespace(cursor, db, tablespace)
+
+ return changed
+
+
+def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ if not db_exists(cursor, db):
+ return False
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ return False
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ return False
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ return False
+ elif owner and owner != db_info['owner']:
+ return False
+ elif conn_limit and conn_limit != str(db_info['conn_limit']):
+ return False
+ elif tablespace and tablespace != db_info['tablespace']:
+ return False
+ else:
+ return True
+
+
+def db_dump(module, target, target_opts="",
+ db=None,
+ dump_extra_args=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user, db_prefix=False)
+ cmd = module.get_bin_path('pg_dump', True)
+ comp_prog_path = None
+
+ if os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=t')
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=c')
+ if os.path.splitext(target)[-1] == '.gz':
+ if module.get_bin_path('pigz'):
+ comp_prog_path = module.get_bin_path('pigz', True)
+ else:
+ comp_prog_path = module.get_bin_path('gzip', True)
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzip2', True)
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xz', True)
+
+ cmd += "".join(flags)
+
+ if dump_extra_args:
+ cmd += " {0} ".format(dump_extra_args)
+
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ # Use a fifo to be notified of an error in pg_dump
+ # Using shell pipe has no way to return the code of the first command
+ # in a portable way.
+ fifo = os.path.join(module.tmpdir, 'pg_fifo')
+ os.mkfifo(fifo)
+ cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
+ else:
+ cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def db_restore(module, target, target_opts="",
+ db=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user)
+ comp_prog_path = None
+ cmd = module.get_bin_path('psql', True)
+
+ if os.path.splitext(target)[-1] == '.sql':
+ flags.append(' --file={0}'.format(target))
+
+ elif os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=Tar')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=Custom')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.gz':
+ comp_prog_path = module.get_bin_path('zcat', True)
+
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzcat', True)
+
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xzcat', True)
+
+ cmd += "".join(flags)
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ env = os.environ.copy()
+ if password:
+ env = {"PGPASSWORD": password}
+ p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
+ (stdout2, stderr2) = p2.communicate()
+ p1.stdout.close()
+ p1.wait()
+ if p1.returncode != 0:
+ stderr1 = p1.stderr.read()
+ return p1.returncode, '', stderr1, 'cmd: ****'
+ else:
+ return p2.returncode, '', stderr2, 'cmd: ****'
+ else:
+ cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def login_flags(db, host, port, user, db_prefix=True):
+ """
+ returns a list of connection argument strings each prefixed
+ with a space and quoted where necessary to later be combined
+ in a single shell string with `"".join(rv)`
+
+ db_prefix determines if "--dbname" is prefixed to the db argument,
+ since the argument was introduced in 9.3.
+ """
+ flags = []
+ if db:
+ if db_prefix:
+ flags.append(' --dbname={0}'.format(shlex_quote(db)))
+ else:
+ flags.append(' {0}'.format(shlex_quote(db)))
+ if host:
+ flags.append(' --host={0}'.format(host))
+ if port:
+ flags.append(' --port={0}'.format(port))
+ if user:
+ flags.append(' --username={0}'.format(user))
+ return flags
+
+
+def do_with_password(module, cmd, password):
+ env = {}
+ if password:
+ env = {"PGPASSWORD": password}
+ executed_commands.append(cmd)
+ rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
+ return rc, stderr, stdout, cmd
+
+
+def set_tablespace(cursor, db, tablespace):
+ query = 'ALTER DATABASE "%s" SET TABLESPACE "%s"' % (db, tablespace)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = pgutils.postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', required=True, aliases=['name']),
+ owner=dict(type='str', default=''),
+ template=dict(type='str', default=''),
+ encoding=dict(type='str', default=''),
+ lc_collate=dict(type='str', default=''),
+ lc_ctype=dict(type='str', default=''),
+ state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']),
+ target=dict(type='path', default=''),
+ target_opts=dict(type='str', default=''),
+ maintenance_db=dict(type='str', default="postgres"),
+ session_role=dict(type='str'),
+ conn_limit=dict(type='str', default=''),
+ tablespace=dict(type='path', default=''),
+ dump_extra_args=dict(type='str', default=None),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ db = module.params["db"]
+ owner = module.params["owner"]
+ template = module.params["template"]
+ encoding = module.params["encoding"]
+ lc_collate = module.params["lc_collate"]
+ lc_ctype = module.params["lc_ctype"]
+ target = module.params["target"]
+ target_opts = module.params["target_opts"]
+ state = module.params["state"]
+ changed = False
+ maintenance_db = module.params['maintenance_db']
+ session_role = module.params["session_role"]
+ conn_limit = module.params['conn_limit']
+ tablespace = module.params['tablespace']
+ dump_extra_args = module.params['dump_extra_args']
+ trust_input = module.params['trust_input']
+
+ # Check input
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, owner, conn_limit, encoding, db, template, tablespace, session_role)
+
+ raw_connection = state in ("dump", "restore")
+
+ if not raw_connection:
+ pgutils.ensure_required_libs(module)
+
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+ kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
+ if k in params_map and v != '' and v is not None)
+
+ # If a login_unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
+
+ if is_localhost and module.params["login_unix_socket"] != "":
+ kw["host"] = module.params["login_unix_socket"]
+
+ if target == "":
+ target = "{0}/{1}.sql".format(os.getcwd(), db)
+ target = os.path.expanduser(target)
+
+ if not raw_connection:
+ try:
+ db_connection = psycopg2.connect(database=maintenance_db, **kw)
+
+ # Enable autocommit so we can create databases
+ if psycopg2.__version__ >= '2.4.2':
+ db_connection.autocommit = True
+ else:
+ db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ if session_role:
+ try:
+ cursor.execute('SET ROLE "%s"' % session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
+
+ try:
+ if module.check_mode:
+ if state == "absent":
+ changed = db_exists(cursor, db)
+ elif state == "present":
+ changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+ if state == "absent":
+ try:
+ changed = db_delete(cursor, db)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == "present":
+ try:
+ changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state in ("dump", "restore"):
+ method = state == "dump" and db_dump or db_restore
+ try:
+ if state == 'dump':
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw)
+ else:
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw)
+
+ if rc != 0:
+ module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
+ else:
+ module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
+ executed_commands=executed_commands)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # Avoid catching this on Python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py
new file mode 100644
index 00000000..7514a0c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_ext.py
@@ -0,0 +1,444 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_ext
+short_description: Add or remove PostgreSQL extensions from a database
+description:
+- Add or remove PostgreSQL extensions from a database.
+options:
+ name:
+ description:
+ - Name of the extension to add or remove.
+ required: true
+ type: str
+ aliases:
+ - ext
+ db:
+ description:
+ - Name of the database to add or remove the extension to/from.
+ required: true
+ type: str
+ aliases:
+ - login_db
+ schema:
+ description:
+ - Name of the schema to add the extension to.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The database extension state.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ cascade:
+ description:
+ - Automatically install/remove any extensions that this extension depends on
+ that are not already installed/removed (supported since PostgreSQL 9.6).
+ type: bool
+ default: no
+ login_unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ version:
+ description:
+ - Extension version to add or update to. Has effect with I(state=present) only.
+ - If not specified, the latest extension version will be created.
+ - It can't downgrade an extension version.
+ When version downgrade is needed, remove the extension and create new one with appropriate version.
+ - Set I(version=latest) to update the extension to the latest available version.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(ext), I(schema),
+ I(version), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL extensions
+ description: General information about PostgreSQL extensions.
+ link: https://www.postgresql.org/docs/current/external-extensions.html
+- name: CREATE EXTENSION reference
+ description: Complete reference of the CREATE EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createextension.html
+- name: ALTER EXTENSION reference
+ description: Complete reference of the ALTER EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterextension.html
+- name: DROP EXTENSION reference
+ description: Complete reference of the DROP EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droppublication.html
+notes:
+- Supports C(check_mode).
+- The default authentication assumes that you are either logging in as
+ or sudo'ing to the C(postgres) account on the host.
+- This module uses I(psycopg2), a Python PostgreSQL database adapter.
+- You must ensure that C(psycopg2) is installed on the host before using this module.
+- If the remote host is the PostgreSQL server (which is the default case),
+ then PostgreSQL must also be installed on the remote host.
+- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev),
+ and C(python-psycopg2) packages on the remote host before using this module.
+- Incomparable versions, for example PostGIS ``unpackaged``, cannot be installed.
+requirements: [ psycopg2 ]
+author:
+- Daniel Schep (@dschep)
+- Thomas O'Donnell (@andytom)
+- Sandro Santilli (@strk)
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Adds postgis extension to the database acme in the schema foo
+ community.postgresql.postgresql_ext:
+ name: postgis
+ db: acme
+ schema: foo
+
+- name: Removes postgis extension to the database acme
+ community.postgresql.postgresql_ext:
+ name: postgis
+ db: acme
+ state: absent
+
+- name: Adds earthdistance extension to the database template1 cascade
+ community.postgresql.postgresql_ext:
+ name: earthdistance
+ db: template1
+ cascade: true
+
+# In the example below, if earthdistance extension is installed,
+# it will be removed too because it depends on cube:
+- name: Removes cube extension from the database acme cascade
+ community.postgresql.postgresql_ext:
+ name: cube
+ db: acme
+ cascade: yes
+ state: absent
+
+- name: Create extension foo of version 1.2 or update it if it's already created
+ community.postgresql.postgresql_ext:
+ db: acme
+ name: foo
+ version: 1.2
+
+- name: Assuming extension foo is created, update it to the latest version
+ community.postgresql.postgresql_ext:
+ db: acme
+ name: foo
+ version: latest
+'''
+
+RETURN = r'''
+query:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ["DROP EXTENSION \"acme\""]
+
+'''
+
+import traceback
+
+from distutils.version import LooseVersion
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+
+executed_queries = []
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+def ext_exists(cursor, ext):
+ query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
+ cursor.execute(query, {'ext': ext})
+ return cursor.rowcount == 1
+
+
+def ext_delete(cursor, ext, cascade):
+ if ext_exists(cursor, ext):
+ query = "DROP EXTENSION \"%s\"" % ext
+ if cascade:
+ query += " CASCADE"
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ return False
+
+
+def ext_update_version(cursor, ext, version):
+ """Update extension version.
+
+ Return True if success.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ version (str) -- extension version
+ """
+ query = "ALTER EXTENSION \"%s\" UPDATE" % ext
+ params = {}
+
+ if version != 'latest':
+ query += " TO %(ver)s"
+ params['ver'] = version
+
+ cursor.execute(query, params)
+ executed_queries.append(cursor.mogrify(query, params))
+
+ return True
+
+
+def ext_create(cursor, ext, schema, cascade, version):
+ query = "CREATE EXTENSION \"%s\"" % ext
+ params = {}
+
+ if schema:
+ query += " WITH SCHEMA \"%s\"" % schema
+ if version:
+ query += " VERSION %(ver)s"
+ params['ver'] = version
+ if cascade:
+ query += " CASCADE"
+
+ cursor.execute(query, params)
+ executed_queries.append(cursor.mogrify(query, params))
+ return True
+
+
+def ext_get_versions(cursor, ext):
+ """
+ Get the current created extension version and available versions.
+
+ Return tuple (current_version, [list of available versions]).
+
+ Note: the list of available versions contains only versions
+ that higher than the current created version.
+ If the extension is not created, this list will contain all
+ available versions.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ """
+
+ # 1. Get the current extension version:
+ query = ("SELECT extversion FROM pg_catalog.pg_extension "
+ "WHERE extname = %(ext)s")
+
+ current_version = '0'
+ cursor.execute(query, {'ext': ext})
+ res = cursor.fetchone()
+ if res:
+ current_version = res[0]
+
+ # 2. Get available versions:
+ query = ("SELECT version FROM pg_available_extension_versions "
+ "WHERE name = %(ext)s")
+ cursor.execute(query, {'ext': ext})
+ res = cursor.fetchall()
+
+ available_versions = parse_ext_versions(current_version, res)
+
+ if current_version == '0':
+ current_version = False
+
+ return (current_version, available_versions)
+
+
+def parse_ext_versions(current_version, ext_ver_list):
+ """Parse ext versions.
+
+ Args:
+ current_version (str) -- version to compare elements of ext_ver_list with
+ ext_ver_list (list) -- list containing dicts with versions
+
+ Return a sorted list with versions that are higher than current_version.
+
+ Note: Incomparable versions (e.g., postgis version "unpackaged") are skipped.
+ """
+ available_versions = []
+
+ for line in ext_ver_list:
+ if line['version'] == 'unpackaged':
+ continue
+
+ try:
+ if LooseVersion(line['version']) > LooseVersion(current_version):
+ available_versions.append(line['version'])
+ except Exception:
+ # When a version cannot be compared, skip it
+ # (there's a note in the documentation)
+ continue
+
+ return sorted(available_versions, key=LooseVersion)
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", required=True, aliases=["login_db"]),
+ ext=dict(type="str", required=True, aliases=["name"]),
+ schema=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ cascade=dict(type="bool", default=False),
+ session_role=dict(type="str"),
+ version=dict(type="str"),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ ext = module.params["ext"]
+ schema = module.params["schema"]
+ state = module.params["state"]
+ cascade = module.params["cascade"]
+ version = module.params["version"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+ changed = False
+
+ if not trust_input:
+ check_input(module, ext, schema, version, session_role)
+
+ if version and state == 'absent':
+ module.warn("Parameter version is ignored when state=absent")
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ # Get extension info and available versions:
+ curr_version, available_versions = ext_get_versions(cursor, ext)
+
+ if state == "present":
+ if version == 'latest':
+ if available_versions:
+ version = available_versions[-1]
+ else:
+ version = ''
+
+ if version:
+ # If the specific version is passed and it is not available for update:
+ if version not in available_versions:
+ if not curr_version:
+ module.fail_json(msg="Passed version '%s' is not available" % version)
+
+ elif LooseVersion(curr_version) == LooseVersion(version):
+ changed = False
+
+ else:
+ module.fail_json(msg="Passed version '%s' is lower than "
+ "the current created version '%s' or "
+ "the passed version is not available" % (version, curr_version))
+
+ # If the specific version is passed and it is higher that the current version:
+ if curr_version:
+ if LooseVersion(curr_version) < LooseVersion(version):
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_update_version(cursor, ext, version)
+
+ # If the specific version is passed and it is created now:
+ if curr_version == version:
+ changed = False
+
+ # If the ext doesn't exist and installed:
+ elif not curr_version and available_versions:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_create(cursor, ext, schema, cascade, version)
+
+ # If version is not passed:
+ else:
+ if not curr_version:
+ # If the ext doesn't exist and it's installed:
+ if available_versions:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_create(cursor, ext, schema, cascade, version)
+
+ # If the ext doesn't exist and not installed:
+ else:
+ module.fail_json(msg="Extension %s is not installed" % ext)
+
+ elif state == "absent":
+ if curr_version:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_delete(cursor, ext, cascade)
+ else:
+ changed = False
+
+ except Exception as e:
+ db_connection.close()
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ db_connection.close()
+ module.exit_json(changed=changed, db=module.params["db"], ext=ext, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py
new file mode 100644
index 00000000..d798b74c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_idx.py
@@ -0,0 +1,589 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018-2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_idx
+short_description: Create or drop indexes from a PostgreSQL database
+description:
+- Create or drop indexes from a PostgreSQL database.
+
+options:
+ idxname:
+ description:
+ - Name of the index to create or drop.
+ type: str
+ required: true
+ aliases:
+ - name
+ db:
+ description:
+ - Name of database to connect to and where the index will be created/dropped.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ schema:
+ description:
+ - Name of a database schema where the index will be created.
+ type: str
+ state:
+ description:
+ - Index state.
+ - C(present) implies the index will be created if it does not exist.
+ - C(absent) implies the index will be dropped if it exists.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ table:
+ description:
+ - Table to create index on it.
+ - Mutually exclusive with I(state=absent).
+ type: str
+ columns:
+ description:
+ - List of index columns that need to be covered by index.
+ - Mutually exclusive with I(state=absent).
+ type: list
+ elements: str
+ aliases:
+ - column
+ cond:
+ description:
+ - Index conditions.
+ - Mutually exclusive with I(state=absent).
+ type: str
+ idxtype:
+ description:
+ - Index type (like btree, gist, gin, etc.).
+ - Mutually exclusive with I(state=absent).
+ type: str
+ aliases:
+ - type
+ concurrent:
+ description:
+ - Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
+ - Pay attention, if I(concurrent=no), the table will be locked (ACCESS EXCLUSIVE) during the building process.
+ For more information about the lock levels see U(https://www.postgresql.org/docs/current/explicit-locking.html).
+ - If the building process was interrupted for any reason when I(cuncurrent=yes), the index becomes invalid.
+ In this case it should be dropped and created again.
+ - Mutually exclusive with I(cascade=yes).
+ type: bool
+ default: yes
+ unique:
+ description:
+ - Enable unique index.
+ - Only btree currently supports unique indexes.
+ type: bool
+ default: no
+ version_added: '0.2.0'
+ tablespace:
+ description:
+ - Set a tablespace for the index.
+ - Mutually exclusive with I(state=absent).
+ type: str
+ storage_params:
+ description:
+ - Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc.
+ - Mutually exclusive with I(state=absent).
+ type: list
+ elements: str
+ cascade:
+ description:
+ - Automatically drop objects that depend on the index,
+ and in turn all objects that depend on those objects.
+ - It used only with I(state=absent).
+ - Mutually exclusive with I(concurrent=yes).
+ type: bool
+ default: no
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(idxname), I(session_role),
+ I(schema), I(table), I(columns), I(tablespace), I(storage_params),
+ I(cond) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+seealso:
+- module: community.postgresql.postgresql_table
+- module: community.postgresql.postgresql_tablespace
+- name: PostgreSQL indexes reference
+ description: General information about PostgreSQL indexes.
+ link: https://www.postgresql.org/docs/current/indexes.html
+- name: CREATE INDEX reference
+ description: Complete reference of the CREATE INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createindex.html
+- name: ALTER INDEX reference
+ description: Complete reference of the ALTER INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterindex.html
+- name: DROP INDEX reference
+ description: Complete reference of the DROP INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropindex.html
+
+notes:
+- Supports C(check_mode).
+- The index building process can affect database performance.
+- To avoid table locks on production databases, use I(concurrent=yes) (default behavior).
+
+author:
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products
+ community.postgresql.postgresql_idx:
+ db: acme
+ table: products
+ columns: id,name
+ name: test_idx
+
+- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter
+ community.postgresql.postgresql_idx:
+ db: acme
+ table: products
+ columns:
+ - id
+ - name
+ idxname: test_idx
+ tablespace: ssd
+ storage_params:
+ - fillfactor=90
+
+- name: Create gist index test_gist_idx concurrently on column geo_data of table map
+ community.postgresql.postgresql_idx:
+ db: somedb
+ table: map
+ idxtype: gist
+ columns: geo_data
+ idxname: test_gist_idx
+
+# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops
+- name: Create gin index gin0_idx not concurrently on column comment of table test
+ community.postgresql.postgresql_idx:
+ idxname: gin0_idx
+ table: test
+ columns: comment gin_trgm_ops
+ concurrent: no
+ idxtype: gin
+
+- name: Drop btree test_idx concurrently
+ community.postgresql.postgresql_idx:
+ db: mydb
+ idxname: test_idx
+ state: absent
+
+- name: Drop test_idx cascade
+ community.postgresql.postgresql_idx:
+ db: mydb
+ idxname: test_idx
+ state: absent
+ cascade: yes
+ concurrent: no
+
+- name: Create btree index test_idx concurrently on columns id,comment where column id > 1
+ community.postgresql.postgresql_idx:
+ db: mydb
+ table: test
+ columns: id,comment
+ idxname: test_idx
+ cond: id > 1
+
+- name: Create unique btree index if not exists test_unique_idx on column name of table products
+ community.postgresql.postgresql_idx:
+ db: acme
+ table: products
+ columns: name
+ name: test_unique_idx
+ unique: yes
+ concurrent: no
+'''
+
+RETURN = r'''
+name:
+ description: Index name.
+ returned: always
+ type: str
+ sample: 'foo_idx'
+state:
+ description: Index state.
+ returned: always
+ type: str
+ sample: 'present'
+schema:
+ description: Schema where index exists.
+ returned: always
+ type: str
+ sample: 'public'
+tablespace:
+ description: Tablespace where index exists.
+ returned: always
+ type: str
+ sample: 'ssd'
+query:
+ description: Query that was tried to be executed.
+ returned: always
+ type: str
+ sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)'
+storage_params:
+ description: Index storage parameters.
+ returned: always
+ type: list
+ sample: [ "fillfactor=90" ]
+valid:
+ description: Index validity.
+ returned: always
+ type: bool
+ sample: true
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import check_input
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN')
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class Index(object):
+
+ """Class for working with PostgreSQL indexes.
+
+ TODO:
+ 1. Add possibility to change ownership
+ 2. Add possibility to change tablespace
+ 3. Add list called executed_queries (executed_query should be left too)
+ 4. Use self.module instead of passing arguments to the methods whenever possible
+
+ Args:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ schema (str) -- name of the index schema
+ name (str) -- name of the index
+
+ Attrs:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ schema (str) -- name of the index schema
+ name (str) -- name of the index
+ exists (bool) -- flag the index exists in the DB or not
+ info (dict) -- dict that contents information about the index
+ executed_query (str) -- executed query
+ """
+
+ def __init__(self, module, cursor, schema, name):
+ self.name = name
+ if schema:
+ self.schema = schema
+ else:
+ self.schema = 'public'
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'name': self.name,
+ 'state': 'absent',
+ 'schema': '',
+ 'tblname': '',
+ 'tblspace': '',
+ 'valid': True,
+ 'storage_params': [],
+ }
+ self.exists = False
+ self.__exists_in_db()
+ self.executed_query = ''
+
+ def get_info(self):
+ """Refresh index info.
+
+ Return self.info dict.
+ """
+ self.__exists_in_db()
+ return self.info
+
+ def __exists_in_db(self):
+ """Check index existence, collect info, add it to self.info dict.
+
+ Return True if the index exists, otherwise, return False.
+ """
+ query = ("SELECT i.schemaname, i.tablename, i.tablespace, "
+ "pi.indisvalid, c.reloptions "
+ "FROM pg_catalog.pg_indexes AS i "
+ "JOIN pg_catalog.pg_class AS c "
+ "ON i.indexname = c.relname "
+ "JOIN pg_catalog.pg_index AS pi "
+ "ON c.oid = pi.indexrelid "
+ "WHERE i.indexname = %(name)s")
+
+ res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
+ if res:
+ self.exists = True
+ self.info = dict(
+ name=self.name,
+ state='present',
+ schema=res[0][0],
+ tblname=res[0][1],
+ tblspace=res[0][2] if res[0][2] else '',
+ valid=res[0][3],
+ storage_params=res[0][4] if res[0][4] else [],
+ )
+ return True
+
+ else:
+ self.exists = False
+ return False
+
+ def create(self, tblname, idxtype, columns, cond, tblspace,
+ storage_params, concurrent=True, unique=False):
+ """Create PostgreSQL index.
+
+ Return True if success, otherwise, return False.
+
+ Args:
+ tblname (str) -- name of a table for the index
+ idxtype (str) -- type of the index like BTREE, BRIN, etc
+ columns (str) -- string of comma-separated columns that need to be covered by index
+ tblspace (str) -- tablespace for storing the index
+ storage_params (str) -- string of comma-separated storage parameters
+
+ Kwargs:
+ concurrent (bool) -- build index in concurrent mode, default True
+ """
+ if self.exists:
+ return False
+
+ if idxtype is None:
+ idxtype = "BTREE"
+
+ query = 'CREATE'
+
+ if unique:
+ query += ' UNIQUE'
+
+ query += ' INDEX'
+
+ if concurrent:
+ query += ' CONCURRENTLY'
+
+ query += ' "%s"' % self.name
+
+ query += ' ON "%s"."%s" ' % (self.schema, tblname)
+
+ query += 'USING %s (%s)' % (idxtype, columns)
+
+ if storage_params:
+ query += ' WITH (%s)' % storage_params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if cond:
+ query += ' WHERE %s' % cond
+
+ self.executed_query = query
+
+ return exec_sql(self, query, return_bool=True, add_to_executed=False)
+
+ def drop(self, cascade=False, concurrent=True):
+ """Drop PostgreSQL index.
+
+ Return True if success, otherwise, return False.
+
+ Args:
+ schema (str) -- name of the index schema
+
+ Kwargs:
+ cascade (bool) -- automatically drop objects that depend on the index,
+ default False
+ concurrent (bool) -- build index in concurrent mode, default True
+ """
+ if not self.exists:
+ return False
+
+ query = 'DROP INDEX'
+
+ if concurrent:
+ query += ' CONCURRENTLY'
+
+ query += ' "%s"."%s"' % (self.schema, self.name)
+
+ if cascade:
+ query += ' CASCADE'
+
+ self.executed_query = query
+
+ return exec_sql(self, query, return_bool=True, add_to_executed=False)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ idxname=dict(type='str', required=True, aliases=['name']),
+ db=dict(type='str', aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ concurrent=dict(type='bool', default=True),
+ unique=dict(type='bool', default=False),
+ table=dict(type='str'),
+ idxtype=dict(type='str', aliases=['type']),
+ columns=dict(type='list', elements='str', aliases=['column']),
+ cond=dict(type='str'),
+ session_role=dict(type='str'),
+ tablespace=dict(type='str'),
+ storage_params=dict(type='list', elements='str'),
+ cascade=dict(type='bool', default=False),
+ schema=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ idxname = module.params["idxname"]
+ state = module.params["state"]
+ concurrent = module.params["concurrent"]
+ unique = module.params["unique"]
+ table = module.params["table"]
+ idxtype = module.params["idxtype"]
+ columns = module.params["columns"]
+ cond = module.params["cond"]
+ tablespace = module.params["tablespace"]
+ storage_params = module.params["storage_params"]
+ cascade = module.params["cascade"]
+ schema = module.params["schema"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, idxname, session_role, schema, table, columns,
+ tablespace, storage_params, cond)
+
+ if concurrent and cascade:
+ module.fail_json(msg="Concurrent mode and cascade parameters are mutually exclusive")
+
+ if unique and (idxtype and idxtype != 'btree'):
+ module.fail_json(msg="Only btree currently supports unique indexes")
+
+ if state == 'present':
+ if not table:
+ module.fail_json(msg="Table must be specified")
+ if not columns:
+ module.fail_json(msg="At least one column must be specified")
+ else:
+ if table or columns or cond or idxtype or tablespace:
+ module.fail_json(msg="Index %s is going to be removed, so it does not "
+ "make sense to pass a table name, columns, conditions, "
+ "index type, or tablespace" % idxname)
+
+ if cascade and state != 'absent':
+ module.fail_json(msg="cascade parameter used only with state=absent")
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Set defaults:
+ changed = False
+
+ # Do job:
+ index = Index(module, cursor, schema, idxname)
+ kw = index.get_info()
+ kw['query'] = ''
+
+ #
+ # check_mode start
+ if module.check_mode:
+ if state == 'present' and index.exists:
+ kw['changed'] = False
+ module.exit_json(**kw)
+
+ elif state == 'present' and not index.exists:
+ kw['changed'] = True
+ module.exit_json(**kw)
+
+ elif state == 'absent' and not index.exists:
+ kw['changed'] = False
+ module.exit_json(**kw)
+
+ elif state == 'absent' and index.exists:
+ kw['changed'] = True
+ module.exit_json(**kw)
+ # check_mode end
+ #
+
+ if state == "present":
+ if idxtype and idxtype.upper() not in VALID_IDX_TYPES:
+ module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname))
+
+ columns = ','.join(columns)
+
+ if storage_params:
+ storage_params = ','.join(storage_params)
+
+ changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent, unique)
+
+ if changed:
+ kw = index.get_info()
+ kw['state'] = 'present'
+ kw['query'] = index.executed_query
+
+ else:
+ changed = index.drop(cascade, concurrent)
+
+ if changed:
+ kw['state'] = 'absent'
+ kw['query'] = index.executed_query
+
+ if not kw['valid']:
+ db_connection.rollback()
+ module.warn("Index %s is invalid! ROLLBACK" % idxname)
+
+ if not concurrent:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_info.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_info.py
new file mode 100644
index 00000000..ac412c49
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_info.py
@@ -0,0 +1,1032 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_info
+short_description: Gather information about PostgreSQL servers
+description:
+- Gathers information about PostgreSQL servers.
+options:
+ filter:
+ description:
+ - Limit the collected information by comma separated string or YAML list.
+ - Allowable values are C(version),
+ C(databases), C(in_recovery), C(settings), C(tablespaces), C(roles),
+ C(replications), C(repl_slots).
+ - By default, collects all subsets.
+ - You can use shell-style (fnmatch) wildcard to pass groups of values (see Examples).
+ - You can use '!' before value (for example, C(!settings)) to exclude it from the information.
+ - If you pass including and excluding values to the filter, for example, I(filter=!settings,ver),
+ the excluding values will be ignored.
+ type: list
+ elements: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.postgresql.postgresql_ping
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+notes:
+- Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+# Display info from postgres hosts.
+# ansible postgres -m postgresql_info
+
+# Display only databases and roles info from all hosts using shell-style wildcards:
+# ansible all -m postgresql_info -a 'filter=dat*,rol*'
+
+# Display only replications and repl_slots info from standby hosts using shell-style wildcards:
+# ansible standby -m postgresql_info -a 'filter=repl*'
+
+# Display all info from databases hosts except settings:
+# ansible databases -m postgresql_info -a 'filter=!settings'
+
+- name: Collect PostgreSQL version and extensions
+ become: yes
+ become_user: postgres
+ community.postgresql.postgresql_info:
+ filter: ver*,ext*
+
+- name: Collect all info except settings and roles
+ become: yes
+ become_user: postgres
+ community.postgresql.postgresql_info:
+ filter: "!settings,!roles"
+
+# On FreeBSD with PostgreSQL 9.5 version and lower use pgsql user to become
+# and pass "postgres" as a database to connect to
+- name: Collect tablespaces and repl_slots info
+ become: yes
+ become_user: pgsql
+ community.postgresql.postgresql_info:
+ db: postgres
+ filter:
+ - tablesp*
+ - repl_sl*
+
+- name: Collect all info except databases
+ become: yes
+ become_user: postgres
+ community.postgresql.postgresql_info:
+ filter:
+ - "!databases"
+'''
+
+RETURN = r'''
+version:
+ description: Database server version U(https://www.postgresql.org/support/versioning/).
+ returned: always
+ type: dict
+ sample: { "version": { "major": 10, "minor": 6 } }
+ contains:
+ major:
+ description: Major server version.
+ returned: always
+ type: int
+ sample: 11
+ minor:
+ description: Minor server version.
+ returned: always
+ type: int
+ sample: 1
+in_recovery:
+ description: Indicates if the service is in recovery mode or not.
+ returned: always
+ type: bool
+ sample: false
+databases:
+ description: Information about databases.
+ returned: always
+ type: dict
+ sample:
+ - { "postgres": { "access_priv": "", "collate": "en_US.UTF-8",
+ "ctype": "en_US.UTF-8", "encoding": "UTF8", "owner": "postgres", "size": "7997 kB" } }
+ contains:
+ database_name:
+ description: Database name.
+ returned: always
+ type: dict
+ sample: template1
+ contains:
+ access_priv:
+ description: Database access privileges.
+ returned: always
+ type: str
+ sample: "=c/postgres_npostgres=CTc/postgres"
+ collate:
+ description:
+ - Database collation U(https://www.postgresql.org/docs/current/collation.html).
+ returned: always
+ type: str
+ sample: en_US.UTF-8
+ ctype:
+ description:
+ - Database LC_CTYPE U(https://www.postgresql.org/docs/current/multibyte.html).
+ returned: always
+ type: str
+ sample: en_US.UTF-8
+ encoding:
+ description:
+ - Database encoding U(https://www.postgresql.org/docs/current/multibyte.html).
+ returned: always
+ type: str
+ sample: UTF8
+ owner:
+ description:
+ - Database owner U(https://www.postgresql.org/docs/current/sql-createdatabase.html).
+ returned: always
+ type: str
+ sample: postgres
+ size:
+ description: Database size in bytes.
+ returned: always
+ type: str
+ sample: 8189415
+ extensions:
+ description:
+ - Extensions U(https://www.postgresql.org/docs/current/sql-createextension.html).
+ returned: always
+ type: dict
+ sample:
+ - { "plpgsql": { "description": "PL/pgSQL procedural language",
+ "extversion": { "major": 1, "minor": 0 } } }
+ contains:
+ extdescription:
+ description: Extension description.
+ returned: if existent
+ type: str
+ sample: PL/pgSQL procedural language
+ extversion:
+ description: Extension description.
+ returned: always
+ type: dict
+ contains:
+ major:
+ description: Extension major version.
+ returned: always
+ type: int
+ sample: 1
+ minor:
+ description: Extension minor version.
+ returned: always
+ type: int
+ sample: 0
+ nspname:
+ description: Namespace where the extension is.
+ returned: always
+ type: str
+ sample: pg_catalog
+ languages:
+ description: Procedural languages U(https://www.postgresql.org/docs/current/xplang.html).
+ returned: always
+ type: dict
+ sample: { "sql": { "lanacl": "", "lanowner": "postgres" } }
+ contains:
+ lanacl:
+ description:
+ - Language access privileges
+ U(https://www.postgresql.org/docs/current/catalog-pg-language.html).
+ returned: always
+ type: str
+ sample: "{postgres=UC/postgres,=U/postgres}"
+ lanowner:
+ description:
+ - Language owner U(https://www.postgresql.org/docs/current/catalog-pg-language.html).
+ returned: always
+ type: str
+ sample: postgres
+ namespaces:
+ description:
+ - Namespaces (schema) U(https://www.postgresql.org/docs/current/sql-createschema.html).
+ returned: always
+ type: dict
+ sample: { "pg_catalog": { "nspacl": "{postgres=UC/postgres,=U/postgres}", "nspowner": "postgres" } }
+ contains:
+ nspacl:
+ description:
+ - Access privileges U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html).
+ returned: always
+ type: str
+ sample: "{postgres=UC/postgres,=U/postgres}"
+ nspowner:
+ description:
+ - Schema owner U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html).
+ returned: always
+ type: str
+ sample: postgres
+ publications:
+ description:
+ - Information about logical replication publications (available for PostgreSQL 10 and higher)
+ U(https://www.postgresql.org/docs/current/logical-replication-publication.html).
+ - Content depends on PostgreSQL server version.
+ returned: if configured
+ type: dict
+ sample: { "pub1": { "ownername": "postgres", "puballtables": true, "pubinsert": true, "pubupdate": true } }
+ version_added: '0.2.0'
+ subscriptions:
+ description:
+ - Information about replication subscriptions (available for PostgreSQL 10 and higher)
+ U(https://www.postgresql.org/docs/current/logical-replication-subscription.html).
+ - Content depends on PostgreSQL server version.
+ returned: if configured
+ type: dict
+ sample:
+ - { "my_subscription": {"ownername": "postgres", "subenabled": true, "subpublications": ["first_publication"] } }
+ version_added: '0.2.0'
+repl_slots:
+ description:
+ - Replication slots (available in 9.4 and later)
+ U(https://www.postgresql.org/docs/current/view-pg-replication-slots.html).
+ returned: if existent
+ type: dict
+ sample: { "slot0": { "active": false, "database": null, "plugin": null, "slot_type": "physical" } }
+ contains:
+ active:
+ description:
+ - True means that a receiver has connected to it, and it is currently reserving archives.
+ returned: always
+ type: bool
+ sample: true
+ database:
+ description: Database name this slot is associated with, or null.
+ returned: always
+ type: str
+ sample: acme
+ plugin:
+ description:
+ - Base name of the shared object containing the output plugin
+ this logical slot is using, or null for physical slots.
+ returned: always
+ type: str
+ sample: pgoutput
+ slot_type:
+ description: The slot type - physical or logical.
+ returned: always
+ type: str
+ sample: logical
+replications:
+ description:
+ - Information about the current replications by process PIDs
+ U(https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-STATS-VIEWS-TABLE).
+ returned: if pg_stat_replication view existent
+ type: dict
+ sample:
+ - { "76580": { "app_name": "standby1", "backend_start": "2019-02-03 00:14:33.908593+03",
+ "client_addr": "10.10.10.2", "client_hostname": "", "state": "streaming", "usename": "postgres" } }
+ contains:
+ usename:
+ description:
+ - Name of the user logged into this WAL sender process ('usename' is a column name in pg_stat_replication view).
+ returned: always
+ type: str
+ sample: replication_user
+ app_name:
+ description: Name of the application that is connected to this WAL sender.
+ returned: if existent
+ type: str
+ sample: acme_srv
+ client_addr:
+ description:
+ - IP address of the client connected to this WAL sender.
+ - If this field is null, it indicates that the client is connected
+ via a Unix socket on the server machine.
+ returned: always
+ type: str
+ sample: 10.0.0.101
+ client_hostname:
+ description:
+ - Host name of the connected client, as reported by a reverse DNS lookup of client_addr.
+ - This field will only be non-null for IP connections, and only when log_hostname is enabled.
+ returned: always
+ type: str
+ sample: dbsrv1
+ backend_start:
+ description: Time when this process was started, i.e., when the client connected to this WAL sender.
+ returned: always
+ type: str
+ sample: "2019-02-03 00:14:33.908593+03"
+ state:
+ description: Current WAL sender state.
+ returned: always
+ type: str
+ sample: streaming
+tablespaces:
+ description:
+ - Information about tablespaces U(https://www.postgresql.org/docs/current/catalog-pg-tablespace.html).
+ returned: always
+ type: dict
+ sample:
+ - { "test": { "spcacl": "{postgres=C/postgres,andreyk=C/postgres}", "spcoptions": [ "seq_page_cost=1" ],
+ "spcowner": "postgres" } }
+ contains:
+ spcacl:
+ description: Tablespace access privileges.
+ returned: always
+ type: str
+ sample: "{postgres=C/postgres,andreyk=C/postgres}"
+ spcoptions:
+ description: Tablespace-level options.
+ returned: always
+ type: list
+ sample: [ "seq_page_cost=1" ]
+ spcowner:
+ description: Owner of the tablespace.
+ returned: always
+ type: str
+ sample: test_user
+roles:
+ description:
+ - Information about roles U(https://www.postgresql.org/docs/current/user-manag.html).
+ returned: always
+ type: dict
+ sample:
+ - { "test_role": { "canlogin": true, "member_of": [ "user_ro" ], "superuser": false,
+ "valid_until": "9999-12-31T23:59:59.999999+00:00" } }
+ contains:
+ canlogin:
+ description: Login privilege U(https://www.postgresql.org/docs/current/role-attributes.html).
+ returned: always
+ type: bool
+ sample: true
+ member_of:
+ description:
+ - Role membership U(https://www.postgresql.org/docs/current/role-membership.html).
+ returned: always
+ type: list
+ sample: [ "read_only_users" ]
+ superuser:
+ description: User is a superuser or not.
+ returned: always
+ type: bool
+ sample: false
+ valid_until:
+ description:
+ - Password expiration date U(https://www.postgresql.org/docs/current/sql-alterrole.html).
+ returned: always
+ type: str
+ sample: "9999-12-31T23:59:59.999999+00:00"
+pending_restart_settings:
+ description:
+ - List of settings that are pending restart to be set.
+ returned: always
+ type: list
+ sample: [ "shared_buffers" ]
+settings:
+ description:
+ - Information about run-time server parameters
+ U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+ returned: always
+ type: dict
+ sample:
+ - { "work_mem": { "boot_val": "4096", "context": "user", "max_val": "2147483647",
+ "min_val": "64", "setting": "8192", "sourcefile": "/var/lib/pgsql/10/data/postgresql.auto.conf",
+ "unit": "kB", "vartype": "integer", "val_in_bytes": 4194304 } }
+ contains:
+ setting:
+ description: Current value of the parameter.
+ returned: always
+ type: str
+ sample: 49152
+ unit:
+ description: Implicit unit of the parameter.
+ returned: always
+ type: str
+ sample: kB
+ boot_val:
+ description:
+ - Parameter value assumed at server startup if the parameter is not otherwise set.
+ returned: always
+ type: str
+ sample: 4096
+ min_val:
+ description:
+ - Minimum allowed value of the parameter (null for non-numeric values).
+ returned: always
+ type: str
+ sample: 64
+ max_val:
+ description:
+ - Maximum allowed value of the parameter (null for non-numeric values).
+ returned: always
+ type: str
+ sample: 2147483647
+ sourcefile:
+ description:
+ - Configuration file the current value was set in.
+ - Null for values set from sources other than configuration files,
+ or when examined by a user who is neither a superuser or a member of pg_read_all_settings.
+ - Helpful when using include directives in configuration files.
+ returned: always
+ type: str
+ sample: /var/lib/pgsql/10/data/postgresql.auto.conf
+ context:
+ description:
+ - Context required to set the parameter's value.
+ - For more information see U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+ returned: always
+ type: str
+ sample: user
+ vartype:
+ description:
+ - Parameter type (bool, enum, integer, real, or string).
+ returned: always
+ type: str
+ sample: integer
+ val_in_bytes:
+ description:
+ - Current value of the parameter in bytes.
+ returned: if supported
+ type: int
+ sample: 2147483647
+ pretty_val:
+ description:
+ - Value presented in the pretty form.
+ returned: always
+ type: str
+ sample: 2MB
+ pending_restart:
+ description:
+ - True if the value has been changed in the configuration file but needs a restart; or false otherwise.
+ - Returns only if C(settings) is passed.
+ returned: always
+ type: bool
+ sample: false
+'''
+
+from fnmatch import fnmatch
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_native
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class PgDbConn(object):
+ """Auxiliary class for working with PostgreSQL connection objects.
+
+ Arguments:
+ module (AnsibleModule): Object of AnsibleModule class that
+ contains connection parameters.
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.db_conn = None
+ self.cursor = None
+
+ def connect(self):
+ """Connect to a PostgreSQL database and return a cursor object.
+
+ Note: connection parameters are passed by self.module object.
+ """
+ conn_params = get_conn_params(self.module, self.module.params, warn_db_default=False)
+ self.db_conn = connect_to_db(self.module, conn_params)
+ return self.db_conn.cursor(cursor_factory=DictCursor)
+
+ def reconnect(self, dbname):
+ """Reconnect to another database and return a PostgreSQL cursor object.
+
+ Arguments:
+ dbname (string): Database name to connect to.
+ """
+ self.db_conn.close()
+
+ self.module.params['database'] = dbname
+ return self.connect()
+
+
+class PgClusterInfo(object):
+ """Class for collection information about a PostgreSQL instance.
+
+ Arguments:
+ module (AnsibleModule): Object of AnsibleModule class.
+ db_conn_obj (psycopg2.connect): PostgreSQL connection object.
+ """
+
+ def __init__(self, module, db_conn_obj):
+ self.module = module
+ self.db_obj = db_conn_obj
+ self.cursor = db_conn_obj.connect()
+ self.pg_info = {
+ "version": {},
+ "in_recovery": None,
+ "tablespaces": {},
+ "databases": {},
+ "replications": {},
+ "repl_slots": {},
+ "settings": {},
+ "roles": {},
+ "pending_restart_settings": [],
+ }
+
+ def collect(self, val_list=False):
+ """Collect information based on 'filter' option."""
+ subset_map = {
+ "version": self.get_pg_version,
+ "in_recovery": self.get_recovery_state,
+ "tablespaces": self.get_tablespaces,
+ "databases": self.get_db_info,
+ "replications": self.get_repl_info,
+ "repl_slots": self.get_rslot_info,
+ "settings": self.get_settings,
+ "roles": self.get_role_info,
+ }
+
+ incl_list = []
+ excl_list = []
+ # Notice: incl_list and excl_list
+ # don't make sense together, therefore,
+ # if incl_list is not empty, we collect
+ # only values from it:
+ if val_list:
+ for i in val_list:
+ if i[0] != '!':
+ incl_list.append(i)
+ else:
+ excl_list.append(i.lstrip('!'))
+
+ if incl_list:
+ for s in subset_map:
+ for i in incl_list:
+ if fnmatch(s, i):
+ subset_map[s]()
+ break
+ elif excl_list:
+ found = False
+ # Collect info:
+ for s in subset_map:
+ for e in excl_list:
+ if fnmatch(s, e):
+ found = True
+
+ if not found:
+ subset_map[s]()
+ else:
+ found = False
+
+ # Default behaviour, if include or exclude is not passed:
+ else:
+ # Just collect info for each item:
+ for s in subset_map:
+ subset_map[s]()
+
+ return self.pg_info
+
+ def get_pub_info(self):
+ """Get publication statistics."""
+ query = ("SELECT p.*, r.rolname AS ownername "
+ "FROM pg_catalog.pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid")
+
+ result = self.__exec_sql(query)
+
+ if result:
+ result = [dict(row) for row in result]
+ else:
+ return {}
+
+ publications = {}
+
+ for elem in result:
+ if not publications.get(elem['pubname']):
+ publications[elem['pubname']] = {}
+
+ for key, val in iteritems(elem):
+ if key != 'pubname':
+ publications[elem['pubname']][key] = val
+
+ return publications
+
+ def get_subscr_info(self):
+ """Get subscription statistics."""
+ query = ("SELECT s.*, r.rolname AS ownername, d.datname AS dbname "
+ "FROM pg_catalog.pg_subscription s "
+ "JOIN pg_catalog.pg_database d "
+ "ON s.subdbid = d.oid "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON s.subowner = r.oid")
+
+ result = self.__exec_sql(query)
+
+ if result:
+ result = [dict(row) for row in result]
+ else:
+ return {}
+
+ subscr_info = {}
+
+ for elem in result:
+ if not subscr_info.get(elem['dbname']):
+ subscr_info[elem['dbname']] = {}
+
+ if not subscr_info[elem['dbname']].get(elem['subname']):
+ subscr_info[elem['dbname']][elem['subname']] = {}
+
+ for key, val in iteritems(elem):
+ if key not in ('subname', 'dbname'):
+ subscr_info[elem['dbname']][elem['subname']][key] = val
+
+ return subscr_info
+
+ def get_tablespaces(self):
+ """Get information about tablespaces."""
+ # Check spcoption exists:
+ opt = self.__exec_sql("SELECT column_name "
+ "FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spcoptions'")
+
+ if not opt:
+ query = ("SELECT s.spcname, a.rolname, s.spcacl "
+ "FROM pg_tablespace AS s "
+ "JOIN pg_authid AS a ON s.spcowner = a.oid")
+ else:
+ query = ("SELECT s.spcname, a.rolname, s.spcacl, s.spcoptions "
+ "FROM pg_tablespace AS s "
+ "JOIN pg_authid AS a ON s.spcowner = a.oid")
+
+ res = self.__exec_sql(query)
+ ts_dict = {}
+ for i in res:
+ ts_name = i[0]
+ ts_info = dict(
+ spcowner=i[1],
+ spcacl=i[2] if i[2] else '',
+ )
+ if opt:
+ ts_info['spcoptions'] = i[3] if i[3] else []
+
+ ts_dict[ts_name] = ts_info
+
+ self.pg_info["tablespaces"] = ts_dict
+
+ def get_ext_info(self):
+ """Get information about existing extensions."""
+ # Check that pg_extension exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_extension')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT e.extname, e.extversion, n.nspname, c.description "
+ "FROM pg_catalog.pg_extension AS e "
+ "LEFT JOIN pg_catalog.pg_namespace AS n "
+ "ON n.oid = e.extnamespace "
+ "LEFT JOIN pg_catalog.pg_description AS c "
+ "ON c.objoid = e.oid "
+ "AND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass")
+ res = self.__exec_sql(query)
+ ext_dict = {}
+ for i in res:
+ ext_ver = i[1].split('.')
+
+ ext_dict[i[0]] = dict(
+ extversion=dict(
+ major=int(ext_ver[0]),
+ minor=int(ext_ver[1]),
+ ),
+ nspname=i[2],
+ description=i[3],
+ )
+
+ return ext_dict
+
+ def get_role_info(self):
+ """Get information about roles (in PgSQL groups and users are roles)."""
+ query = ("SELECT r.rolname, r.rolsuper, r.rolcanlogin, "
+ "r.rolvaliduntil, "
+ "ARRAY(SELECT b.rolname "
+ "FROM pg_catalog.pg_auth_members AS m "
+ "JOIN pg_catalog.pg_roles AS b ON (m.roleid = b.oid) "
+ "WHERE m.member = r.oid) AS memberof "
+ "FROM pg_catalog.pg_roles AS r "
+ "WHERE r.rolname !~ '^pg_'")
+
+ res = self.__exec_sql(query)
+ rol_dict = {}
+ for i in res:
+ rol_dict[i[0]] = dict(
+ superuser=i[1],
+ canlogin=i[2],
+ valid_until=i[3] if i[3] else '',
+ member_of=i[4] if i[4] else [],
+ )
+
+ self.pg_info["roles"] = rol_dict
+
+ def get_rslot_info(self):
+ """Get information about replication slots if exist."""
+ # Check that pg_replication_slots exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_replication_slots')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT slot_name, plugin, slot_type, database, "
+ "active FROM pg_replication_slots")
+ res = self.__exec_sql(query)
+
+ # If there is no replication:
+ if not res:
+ return True
+
+ rslot_dict = {}
+ for i in res:
+ rslot_dict[i[0]] = dict(
+ plugin=i[1],
+ slot_type=i[2],
+ database=i[3],
+ active=i[4],
+ )
+
+ self.pg_info["repl_slots"] = rslot_dict
+
+ def get_settings(self):
+ """Get server settings."""
+ # Check pending restart column exists:
+ pend_rest_col_exists = self.__exec_sql("SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_settings' "
+ "AND column_name = 'pending_restart'")
+ if not pend_rest_col_exists:
+ query = ("SELECT name, setting, unit, context, vartype, "
+ "boot_val, min_val, max_val, sourcefile "
+ "FROM pg_settings")
+ else:
+ query = ("SELECT name, setting, unit, context, vartype, "
+ "boot_val, min_val, max_val, sourcefile, pending_restart "
+ "FROM pg_settings")
+
+ res = self.__exec_sql(query)
+
+ set_dict = {}
+ for i in res:
+ val_in_bytes = None
+ setting = i[1]
+ if i[2]:
+ unit = i[2]
+ else:
+ unit = ''
+
+ if unit == 'kB':
+ val_in_bytes = int(setting) * 1024
+
+ elif unit == '8kB':
+ val_in_bytes = int(setting) * 1024 * 8
+
+ elif unit == 'MB':
+ val_in_bytes = int(setting) * 1024 * 1024
+
+ if val_in_bytes is not None and val_in_bytes < 0:
+ val_in_bytes = 0
+
+ setting_name = i[0]
+ pretty_val = self.__get_pretty_val(setting_name)
+
+ pending_restart = None
+ if pend_rest_col_exists:
+ pending_restart = i[9]
+
+ set_dict[setting_name] = dict(
+ setting=setting,
+ unit=unit,
+ context=i[3],
+ vartype=i[4],
+ boot_val=i[5] if i[5] else '',
+ min_val=i[6] if i[6] else '',
+ max_val=i[7] if i[7] else '',
+ sourcefile=i[8] if i[8] else '',
+ pretty_val=pretty_val,
+ )
+ if val_in_bytes is not None:
+ set_dict[setting_name]['val_in_bytes'] = val_in_bytes
+
+ if pending_restart is not None:
+ set_dict[setting_name]['pending_restart'] = pending_restart
+ if pending_restart:
+ self.pg_info["pending_restart_settings"].append(setting_name)
+
+ self.pg_info["settings"] = set_dict
+
+ def get_repl_info(self):
+ """Get information about replication if the server is a master."""
+ # Check that pg_replication_slots exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_stat_replication')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT r.pid, a.rolname, r.application_name, r.client_addr, "
+ "r.client_hostname, r.backend_start::text, r.state "
+ "FROM pg_stat_replication AS r "
+ "JOIN pg_authid AS a ON r.usesysid = a.oid")
+ res = self.__exec_sql(query)
+
+ # If there is no replication:
+ if not res:
+ return True
+
+ repl_dict = {}
+ for i in res:
+ repl_dict[i[0]] = dict(
+ usename=i[1],
+ app_name=i[2] if i[2] else '',
+ client_addr=i[3],
+ client_hostname=i[4] if i[4] else '',
+ backend_start=i[5],
+ state=i[6],
+ )
+
+ self.pg_info["replications"] = repl_dict
+
+ def get_lang_info(self):
+ """Get information about current supported languages."""
+ query = ("SELECT l.lanname, a.rolname, l.lanacl "
+ "FROM pg_language AS l "
+ "JOIN pg_authid AS a ON l.lanowner = a.oid")
+ res = self.__exec_sql(query)
+ lang_dict = {}
+ for i in res:
+ lang_dict[i[0]] = dict(
+ lanowner=i[1],
+ lanacl=i[2] if i[2] else '',
+ )
+
+ return lang_dict
+
+ def get_namespaces(self):
+ """Get information about namespaces."""
+ query = ("SELECT n.nspname, a.rolname, n.nspacl "
+ "FROM pg_catalog.pg_namespace AS n "
+ "JOIN pg_authid AS a ON a.oid = n.nspowner")
+ res = self.__exec_sql(query)
+
+ nsp_dict = {}
+ for i in res:
+ nsp_dict[i[0]] = dict(
+ nspowner=i[1],
+ nspacl=i[2] if i[2] else '',
+ )
+
+ return nsp_dict
+
+ def get_pg_version(self):
+ """Get major and minor PostgreSQL server version."""
+ query = "SELECT version()"
+ raw = self.__exec_sql(query)[0][0]
+ raw = raw.split()[1].split('.')
+ self.pg_info["version"] = dict(
+ major=int(raw[0]),
+ minor=int(raw[1].rstrip(',')),
+ )
+
+ def get_recovery_state(self):
+ """Get if the service is in recovery mode."""
+ self.pg_info["in_recovery"] = self.__exec_sql("SELECT pg_is_in_recovery()")[0][0]
+
+ def get_db_info(self):
+ """Get information about the current database."""
+ # Following query returns:
+ # Name, Owner, Encoding, Collate, Ctype, Access Priv, Size
+ query = ("SELECT d.datname, "
+ "pg_catalog.pg_get_userbyid(d.datdba), "
+ "pg_catalog.pg_encoding_to_char(d.encoding), "
+ "d.datcollate, "
+ "d.datctype, "
+ "pg_catalog.array_to_string(d.datacl, E'\n'), "
+ "CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') "
+ "THEN pg_catalog.pg_database_size(d.datname)::text "
+ "ELSE 'No Access' END, "
+ "t.spcname "
+ "FROM pg_catalog.pg_database AS d "
+ "JOIN pg_catalog.pg_tablespace t ON d.dattablespace = t.oid "
+ "WHERE d.datname != 'template0'")
+
+ res = self.__exec_sql(query)
+
+ db_dict = {}
+ for i in res:
+ db_dict[i[0]] = dict(
+ owner=i[1],
+ encoding=i[2],
+ collate=i[3],
+ ctype=i[4],
+ access_priv=i[5] if i[5] else '',
+ size=i[6],
+ )
+
+ if self.cursor.connection.server_version >= 100000:
+ subscr_info = self.get_subscr_info()
+
+ for datname in db_dict:
+ self.cursor = self.db_obj.reconnect(datname)
+ db_dict[datname]['namespaces'] = self.get_namespaces()
+ db_dict[datname]['extensions'] = self.get_ext_info()
+ db_dict[datname]['languages'] = self.get_lang_info()
+ if self.cursor.connection.server_version >= 100000:
+ db_dict[datname]['publications'] = self.get_pub_info()
+ db_dict[datname]['subscriptions'] = subscr_info.get(datname, {})
+
+ self.pg_info["databases"] = db_dict
+
+ def __get_pretty_val(self, setting):
+ """Get setting's value represented by SHOW command."""
+ return self.__exec_sql("SHOW %s" % setting)[0][0]
+
+ def __exec_sql(self, query):
+ """Execute SQL and return the result."""
+ try:
+ self.cursor.execute(query)
+ res = self.cursor.fetchall()
+ if res:
+ return res
+ except Exception as e:
+ self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+ self.cursor.close()
+ return False
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ filter=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ filter_ = module.params['filter']
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ check_input(module, module.params['session_role'])
+
+ db_conn_obj = PgDbConn(module)
+
+ # Do job:
+ pg_info = PgClusterInfo(module, db_conn_obj)
+
+ module.exit_json(**pg_info.collect(filter_))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py
new file mode 100644
index 00000000..338522d5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_lang.py
@@ -0,0 +1,365 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2014, Jens Depuydt <http://www.jensd.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_lang
+short_description: Adds, removes or changes procedural languages with a PostgreSQL database
+description:
+- Adds, removes or changes procedural languages with a PostgreSQL database.
+- This module allows you to add a language, remote a language or change the trust
+ relationship with a PostgreSQL database.
+- The module can be used on the machine where executed or on a remote host.
+- When removing a language from a database, it is possible that dependencies prevent
+ the database from being removed. In that case, you can specify I(cascade=yes) to
+ automatically drop objects that depend on the language (such as functions in the
+ language).
+- In case the language can't be deleted because it is required by the
+ database system, you can specify I(fail_on_drop=no) to ignore the error.
+- Be careful when marking a language as trusted since this could be a potential
+ security breach. Untrusted languages allow only users with the PostgreSQL superuser
+ privilege to use this language to create new functions.
+options:
+ lang:
+ description:
+ - Name of the procedural language to add, remove or change.
+ required: true
+ type: str
+ aliases:
+ - name
+ trust:
+ description:
+ - Make this language trusted for the selected db.
+ type: bool
+ default: 'no'
+ db:
+ description:
+ - Name of database to connect to and where the language will be added, removed or changed.
+ type: str
+ aliases:
+ - login_db
+ required: true
+ force_trust:
+ description:
+ - Marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
+ - Use with care!
+ type: bool
+ default: 'no'
+ fail_on_drop:
+ description:
+ - If C(yes), fail when removing a language. Otherwise just log and continue.
+ - In some cases, it is not possible to remove a language (used by the db-system).
+ - When dependencies block the removal, consider using I(cascade).
+ type: bool
+ default: 'yes'
+ cascade:
+ description:
+ - When dropping a language, also delete object that depend on this language.
+ - Only used when I(state=absent).
+ type: bool
+ default: 'no'
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified I(session_role) must be a role that the current I(login_user) is a member of.
+ - Permissions checking for SQL commands is carried out as though the
+ I(session_role) were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The state of the language for the selected database.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ login_unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ owner:
+ description:
+ - Set an owner for the language.
+ - Ignored when I(state=absent).
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(lang), I(session_role),
+ I(owner) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL languages
+ description: General information about PostgreSQL languages.
+ link: https://www.postgresql.org/docs/current/xplang.html
+- name: CREATE LANGUAGE reference
+ description: Complete reference of the CREATE LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createlanguage.html
+- name: ALTER LANGUAGE reference
+ description: Complete reference of the ALTER LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterlanguage.html
+- name: DROP LANGUAGE reference
+ description: Complete reference of the DROP LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droplanguage.html
+author:
+- Jens Depuydt (@jensdepuydt)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+notes:
+- Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+- name: Add language pltclu to database testdb if it doesn't exist
+ community.postgresql.postgresql_lang: db=testdb lang=pltclu state=present
+
+# Add language pltclu to database testdb if it doesn't exist and mark it as trusted.
+# Marks the language as trusted if it exists but isn't trusted yet.
+# force_trust makes sure that the language will be marked as trusted
+- name: Add language pltclu to database testdb if it doesn't exist and mark it as trusted
+ community.postgresql.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: present
+ trust: yes
+ force_trust: yes
+
+- name: Remove language pltclu from database testdb
+ community.postgresql.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+
+- name: Remove language pltclu from database testdb and remove all dependencies
+ community.postgresql.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+ cascade: yes
+
+- name: Remove language c from database testdb but ignore errors if something prevents the removal
+ community.postgresql.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+ fail_on_drop: no
+
+- name: In testdb change owner of mylang to alice
+ community.postgresql.postgresql_lang:
+ db: testdb
+ lang: mylang
+ owner: alice
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['CREATE LANGUAGE "acme"']
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import check_input
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+executed_queries = []
+
+
+def lang_exists(cursor, lang):
+ """Checks if language exists for db"""
+ query = "SELECT lanname FROM pg_language WHERE lanname = %(lang)s"
+ cursor.execute(query, {'lang': lang})
+ return cursor.rowcount > 0
+
+
+def lang_istrusted(cursor, lang):
+ """Checks if language is trusted for db"""
+ query = "SELECT lanpltrusted FROM pg_language WHERE lanname = %(lang)s"
+ cursor.execute(query, {'lang': lang})
+ return cursor.fetchone()[0]
+
+
+def lang_altertrust(cursor, lang, trust):
+ """Changes if language is trusted for db"""
+ query = "UPDATE pg_language SET lanpltrusted = %(trust)s WHERE lanname = %(lang)s"
+ cursor.execute(query, {'trust': trust, 'lang': lang})
+ executed_queries.append(cursor.mogrify(query, {'trust': trust, 'lang': lang}))
+ return True
+
+
+def lang_add(cursor, lang, trust):
+ """Adds language for db"""
+ if trust:
+ query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
+ else:
+ query = 'CREATE LANGUAGE "%s"' % lang
+ executed_queries.append(query)
+ cursor.execute(query)
+ return True
+
+
+def lang_drop(cursor, lang, cascade):
+ """Drops language for db"""
+ cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
+ try:
+ if cascade:
+ query = "DROP LANGUAGE \"%s\" CASCADE" % lang
+ else:
+ query = "DROP LANGUAGE \"%s\"" % lang
+ executed_queries.append(query)
+ cursor.execute(query)
+ except Exception:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
+ return False
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
+ return True
+
+
+def get_lang_owner(cursor, lang):
+ """Get language owner.
+
+ Args:
+ cursor (cursor): psycopg2 cursor object.
+ lang (str): language name.
+ """
+ query = ("SELECT r.rolname FROM pg_language l "
+ "JOIN pg_roles r ON l.lanowner = r.oid "
+ "WHERE l.lanname = %(lang)s")
+ cursor.execute(query, {'lang': lang})
+ return cursor.fetchone()[0]
+
+
+def set_lang_owner(cursor, lang, owner):
+ """Set language owner.
+
+ Args:
+ cursor (cursor): psycopg2 cursor object.
+ lang (str): language name.
+ owner (str): name of new owner.
+ """
+ query = "ALTER LANGUAGE \"%s\" OWNER TO \"%s\"" % (lang, owner)
+ executed_queries.append(query)
+ cursor.execute(query)
+ return True
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", required=True, aliases=["login_db"]),
+ lang=dict(type="str", required=True, aliases=["name"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ trust=dict(type="bool", default="no"),
+ force_trust=dict(type="bool", default="no"),
+ cascade=dict(type="bool", default="no"),
+ fail_on_drop=dict(type="bool", default="yes"),
+ session_role=dict(type="str"),
+ owner=dict(type="str"),
+ trust_input=dict(type="bool", default="yes")
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ db = module.params["db"]
+ lang = module.params["lang"]
+ state = module.params["state"]
+ trust = module.params["trust"]
+ force_trust = module.params["force_trust"]
+ cascade = module.params["cascade"]
+ fail_on_drop = module.params["fail_on_drop"]
+ owner = module.params["owner"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, lang, session_role, owner)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor()
+
+ changed = False
+ kw = {'db': db, 'lang': lang, 'trust': trust}
+
+ if state == "present":
+ if lang_exists(cursor, lang):
+ lang_trusted = lang_istrusted(cursor, lang)
+ if (lang_trusted and not trust) or (not lang_trusted and trust):
+ if module.check_mode:
+ changed = True
+ else:
+ changed = lang_altertrust(cursor, lang, trust)
+ else:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = lang_add(cursor, lang, trust)
+ if force_trust:
+ changed = lang_altertrust(cursor, lang, trust)
+
+ else:
+ if lang_exists(cursor, lang):
+ if module.check_mode:
+ changed = True
+ kw['lang_dropped'] = True
+ else:
+ changed = lang_drop(cursor, lang, cascade)
+ if fail_on_drop and not changed:
+ msg = ("unable to drop language, use cascade "
+ "to delete dependencies or fail_on_drop=no to ignore")
+ module.fail_json(msg=msg)
+ kw['lang_dropped'] = changed
+
+ if owner and state == 'present':
+ if lang_exists(cursor, lang):
+ if owner != get_lang_owner(cursor, lang):
+ changed = set_lang_owner(cursor, lang, owner)
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ kw['queries'] = executed_queries
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py
new file mode 100644
index 00000000..f9423220
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_membership.py
@@ -0,0 +1,229 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_membership
+short_description: Add or remove PostgreSQL roles from groups
+description:
+- Adds or removes PostgreSQL roles from groups (other roles).
+- Users are roles with login privilege.
+- Groups are PostgreSQL roles usually without LOGIN privilege.
+- "Common use case:"
+- 1) add a new group (groups) by M(community.postgresql.postgresql_user) module with I(role_attr_flags=NOLOGIN)
+- 2) grant them desired privileges by M(community.postgresql.postgresql_privs) module
+- 3) add desired PostgreSQL users to the new group (groups) by this module
+options:
+ groups:
+ description:
+ - The list of groups (roles) that need to be granted to or revoked from I(target_roles).
+ required: yes
+ type: list
+ elements: str
+ aliases:
+ - group
+ - source_role
+ - source_roles
+ target_roles:
+ description:
+ - The list of target roles (groups will be granted to them).
+ required: yes
+ type: list
+ elements: str
+ aliases:
+ - target_role
+ - users
+ - user
+ fail_on_role:
+ description:
+ - If C(yes), fail when group or target_role doesn't exist. If C(no), just warn and continue.
+ default: yes
+ type: bool
+ state:
+ description:
+ - Membership state.
+ - I(state=present) implies the I(groups)must be granted to I(target_roles).
+ - I(state=absent) implies the I(groups) must be revoked from I(target_roles).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(groups),
+ I(target_roles), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.postgresql.postgresql_user
+- module: community.postgresql.postgresql_privs
+- module: community.postgresql.postgresql_owner
+- name: PostgreSQL role membership reference
+ description: Complete reference of the PostgreSQL role membership documentation.
+ link: https://www.postgresql.org/docs/current/role-membership.html
+- name: PostgreSQL role attributes reference
+ description: Complete reference of the PostgreSQL role attributes documentation.
+ link: https://www.postgresql.org/docs/current/role-attributes.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.postgresql.postgres
+notes:
+- Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+- name: Grant role read_only to alice and bob
+ community.postgresql.postgresql_membership:
+ group: read_only
+ target_roles:
+ - alice
+ - bob
+ state: present
+
+# you can also use target_roles: alice,bob,etc to pass the role list
+
+- name: Revoke role read_only and exec_func from bob. Ignore if roles don't exist
+ community.postgresql.postgresql_membership:
+ groups:
+ - read_only
+ - exec_func
+ target_role: bob
+ fail_on_role: no
+ state: absent
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "GRANT \"user_ro\" TO \"alice\"" ]
+granted:
+ description: Dict of granted groups and roles.
+ returned: if I(state=present)
+ type: dict
+ sample: { "ro_group": [ "alice", "bob" ] }
+revoked:
+ description: Dict of revoked groups and roles.
+ returned: if I(state=absent)
+ type: dict
+ sample: { "ro_group": [ "alice", "bob" ] }
+state:
+ description: Membership state that tried to be set.
+ returned: always
+ type: str
+ sample: "present"
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import check_input
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ PgMembership,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ groups=dict(type='list', elements='str', required=True, aliases=['group', 'source_role', 'source_roles']),
+ target_roles=dict(type='list', elements='str', required=True, aliases=['target_role', 'user', 'users']),
+ fail_on_role=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ groups = module.params['groups']
+ target_roles = module.params['target_roles']
+ fail_on_role = module.params['fail_on_role']
+ state = module.params['state']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, groups, target_roles, session_role)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+
+ pg_membership = PgMembership(module, cursor, groups, target_roles, fail_on_role)
+
+ if state == 'present':
+ pg_membership.grant()
+
+ elif state == 'absent':
+ pg_membership.revoke()
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ return_dict = dict(
+ changed=pg_membership.changed,
+ state=state,
+ groups=pg_membership.groups,
+ target_roles=pg_membership.target_roles,
+ queries=pg_membership.executed_queries,
+ )
+
+ if state == 'present':
+ return_dict['granted'] = pg_membership.granted
+ elif state == 'absent':
+ return_dict['revoked'] = pg_membership.revoked
+
+ module.exit_json(**return_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py
new file mode 100644
index 00000000..62b968cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_owner.py
@@ -0,0 +1,454 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_owner
+short_description: Change an owner of PostgreSQL database object
+description:
+- Change an owner of PostgreSQL database object.
+- Also allows to reassign the ownership of database objects owned by a database role to another role.
+
+options:
+ new_owner:
+ description:
+ - Role (user/group) to set as an I(obj_name) owner.
+ type: str
+ required: yes
+ obj_name:
+ description:
+ - Name of a database object to change ownership.
+ - Mutually exclusive with I(reassign_owned_by).
+ type: str
+ obj_type:
+ description:
+ - Type of a database object.
+ - Mutually exclusive with I(reassign_owned_by).
+ type: str
+ choices: [ database, function, matview, sequence, schema, table, tablespace, view ]
+ aliases:
+ - type
+ reassign_owned_by:
+ description:
+ - The list of role names. The ownership of all the objects within the current database,
+ and of all shared objects (databases, tablespaces), owned by this role(s) will be reassigned to I(owner).
+ - Pay attention - it reassigns all objects owned by this role(s) in the I(db)!
+ - If role(s) exists, always returns changed True.
+ - Cannot reassign ownership of objects that are required by the database system.
+ - Mutually exclusive with C(obj_type).
+ type: list
+ elements: str
+ fail_on_role:
+ description:
+ - If C(yes), fail when I(reassign_owned_by) role does not exist.
+ Otherwise just warn and continue.
+ - Mutually exclusive with I(obj_name) and I(obj_type).
+ default: yes
+ type: bool
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(new_owner), I(obj_name),
+ I(reassign_owned_by), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.postgresql.postgresql_user
+- module: community.postgresql.postgresql_privs
+- module: community.postgresql.postgresql_membership
+- name: PostgreSQL REASSIGN OWNED command reference
+ description: Complete reference of the PostgreSQL REASSIGN OWNED command documentation.
+ link: https://www.postgresql.org/docs/current/sql-reassign-owned.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.postgresql.postgres
+notes:
+- Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+# Set owner as alice for function myfunc in database bar by ansible ad-hoc command:
+# ansible -m postgresql_owner -a "db=bar new_owner=alice obj_name=myfunc obj_type=function"
+
+- name: The same as above by playbook
+ community.postgresql.postgresql_owner:
+ db: bar
+ new_owner: alice
+ obj_name: myfunc
+ obj_type: function
+
+- name: Set owner as bob for table acme in database bar
+ community.postgresql.postgresql_owner:
+ db: bar
+ new_owner: bob
+ obj_name: acme
+ obj_type: table
+
+- name: Set owner as alice for view test_view in database bar
+ community.postgresql.postgresql_owner:
+ db: bar
+ new_owner: alice
+ obj_name: test_view
+ obj_type: view
+
+- name: Set owner as bob for tablespace ssd in database foo
+ community.postgresql.postgresql_owner:
+ db: foo
+ new_owner: bob
+ obj_name: ssd
+ obj_type: tablespace
+
+- name: Reassign all object in database bar owned by bob to alice
+ community.postgresql.postgresql_owner:
+ db: bar
+ new_owner: alice
+ reassign_owned_by: bob
+
+- name: Reassign all object in database bar owned by bob and bill to alice
+ community.postgresql.postgresql_owner:
+ db: bar
+ new_owner: alice
+ reassign_owned_by:
+ - bob
+ - bill
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'REASSIGN OWNED BY "bob" TO "alice"' ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class PgOwnership(object):
+
+ """Class for changing ownership of PostgreSQL objects.
+
+ Arguments:
+ module (AnsibleModule): Object of Ansible module class.
+ cursor (psycopg2.connect.cursor): Cursor object for interaction with the database.
+ role (str): Role name to set as a new owner of objects.
+
+ Important:
+ If you want to add handling of a new type of database objects:
+ 1. Add a specific method for this like self.__set_db_owner(), etc.
+ 2. Add a condition with a check of ownership for new type objects to self.__is_owner()
+ 3. Add a condition with invocation of the specific method to self.set_owner()
+ 4. Add the information to the module documentation
+ That's all.
+ """
+
+ def __init__(self, module, cursor, role):
+ self.module = module
+ self.cursor = cursor
+ self.check_role_exists(role)
+ self.role = role
+ self.changed = False
+ self.executed_queries = []
+ self.obj_name = ''
+ self.obj_type = ''
+
+ def check_role_exists(self, role, fail_on_role=True):
+ """Check the role exists or not.
+
+ Arguments:
+ role (str): Role name.
+ fail_on_role (bool): If True, fail when the role does not exist.
+ Otherwise just warn and continue.
+ """
+ if not self.__role_exists(role):
+ if fail_on_role:
+ self.module.fail_json(msg="Role '%s' does not exist" % role)
+ else:
+ self.module.warn("Role '%s' does not exist, pass" % role)
+
+ return False
+
+ else:
+ return True
+
+ def reassign(self, old_owners, fail_on_role):
+ """Implements REASSIGN OWNED BY command.
+
+ If success, set self.changed as True.
+
+ Arguments:
+ old_owners (list): The ownership of all the objects within
+ the current database, and of all shared objects (databases, tablespaces),
+ owned by these roles will be reassigned to self.role.
+ fail_on_role (bool): If True, fail when a role from old_owners does not exist.
+ Otherwise just warn and continue.
+ """
+ roles = []
+ for r in old_owners:
+ if self.check_role_exists(r, fail_on_role):
+ roles.append('"%s"' % r)
+
+ # Roles do not exist, nothing to do, exit:
+ if not roles:
+ return False
+
+ old_owners = ','.join(roles)
+
+ query = ['REASSIGN OWNED BY']
+ query.append(old_owners)
+ query.append('TO "%s"' % self.role)
+ query = ' '.join(query)
+
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def set_owner(self, obj_type, obj_name):
+ """Change owner of a database object.
+
+ Arguments:
+ obj_type (str): Type of object (like database, table, view, etc.).
+ obj_name (str): Object name.
+ """
+ self.obj_name = obj_name
+ self.obj_type = obj_type
+
+ # if a new_owner is the object owner now,
+ # nothing to do:
+ if self.__is_owner():
+ return False
+
+ if obj_type == 'database':
+ self.__set_db_owner()
+
+ elif obj_type == 'function':
+ self.__set_func_owner()
+
+ elif obj_type == 'sequence':
+ self.__set_seq_owner()
+
+ elif obj_type == 'schema':
+ self.__set_schema_owner()
+
+ elif obj_type == 'table':
+ self.__set_table_owner()
+
+ elif obj_type == 'tablespace':
+ self.__set_tablespace_owner()
+
+ elif obj_type == 'view':
+ self.__set_view_owner()
+
+ elif obj_type == 'matview':
+ self.__set_mat_view_owner()
+
+ def __is_owner(self):
+ """Return True if self.role is the current object owner."""
+ if self.obj_type == 'table':
+ query = ("SELECT 1 FROM pg_tables "
+ "WHERE tablename = %(obj_name)s "
+ "AND tableowner = %(role)s")
+
+ elif self.obj_type == 'database':
+ query = ("SELECT 1 FROM pg_database AS d "
+ "JOIN pg_roles AS r ON d.datdba = r.oid "
+ "WHERE d.datname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'function':
+ query = ("SELECT 1 FROM pg_proc AS f "
+ "JOIN pg_roles AS r ON f.proowner = r.oid "
+ "WHERE f.proname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'sequence':
+ query = ("SELECT 1 FROM pg_class AS c "
+ "JOIN pg_roles AS r ON c.relowner = r.oid "
+ "WHERE c.relkind = 'S' AND c.relname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'schema':
+ query = ("SELECT 1 FROM information_schema.schemata "
+ "WHERE schema_name = %(obj_name)s "
+ "AND schema_owner = %(role)s")
+
+ elif self.obj_type == 'tablespace':
+ query = ("SELECT 1 FROM pg_tablespace AS t "
+ "JOIN pg_roles AS r ON t.spcowner = r.oid "
+ "WHERE t.spcname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'view':
+ query = ("SELECT 1 FROM pg_views "
+ "WHERE viewname = %(obj_name)s "
+ "AND viewowner = %(role)s")
+
+ elif self.obj_type == 'matview':
+ query = ("SELECT 1 FROM pg_matviews "
+ "WHERE matviewname = %(obj_name)s "
+ "AND matviewowner = %(role)s")
+
+ query_params = {'obj_name': self.obj_name, 'role': self.role}
+ return exec_sql(self, query, query_params, add_to_executed=False)
+
+ def __set_db_owner(self):
+ """Set the database owner."""
+ query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_func_owner(self):
+ """Set the function owner."""
+ query = 'ALTER FUNCTION %s OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_seq_owner(self):
+ """Set the sequence owner."""
+ query = 'ALTER SEQUENCE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_schema_owner(self):
+ """Set the schema owner."""
+ query = 'ALTER SCHEMA %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'schema'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_table_owner(self):
+ """Set the table owner."""
+ query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_tablespace_owner(self):
+ """Set the tablespace owner."""
+ query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_view_owner(self):
+ """Set the view owner."""
+ query = 'ALTER VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_mat_view_owner(self):
+ """Set the materialized view owner."""
+ query = 'ALTER MATERIALIZED VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __role_exists(self, role):
+ """Return True if role exists, otherwise return False."""
+ query_params = {'role': role}
+ query = "SELECT 1 FROM pg_roles WHERE rolname = %(role)s"
+ return exec_sql(self, query, query_params, add_to_executed=False)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ new_owner=dict(type='str', required=True),
+ obj_name=dict(type='str'),
+ obj_type=dict(type='str', aliases=['type'], choices=[
+ 'database', 'function', 'matview', 'sequence', 'schema', 'table', 'tablespace', 'view']),
+ reassign_owned_by=dict(type='list', elements='str'),
+ fail_on_role=dict(type='bool', default=True),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['obj_name', 'reassign_owned_by'],
+ ['obj_type', 'reassign_owned_by'],
+ ['obj_name', 'fail_on_role'],
+ ['obj_type', 'fail_on_role'],
+ ],
+ supports_check_mode=True,
+ )
+
+ new_owner = module.params['new_owner']
+ obj_name = module.params['obj_name']
+ obj_type = module.params['obj_type']
+ reassign_owned_by = module.params['reassign_owned_by']
+ fail_on_role = module.params['fail_on_role']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, new_owner, obj_name, reassign_owned_by, session_role)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ pg_ownership = PgOwnership(module, cursor, new_owner)
+
+ # if we want to change ownership:
+ if obj_name:
+ pg_ownership.set_owner(obj_type, obj_name)
+
+ # if we want to reassign objects owned by roles:
+ elif reassign_owned_by:
+ pg_ownership.reassign(reassign_owned_by, fail_on_role)
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(
+ changed=pg_ownership.changed,
+ queries=pg_ownership.executed_queries,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py
new file mode 100644
index 00000000..de52e9f7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_pg_hba.py
@@ -0,0 +1,746 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Sebastiaan Mannem (@sebasmannem) <sebastiaan.mannem@enterprisedb.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+'''
+This module is used to manage postgres pg_hba files with Ansible.
+'''
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_pg_hba
+short_description: Add, remove or modify a rule in a pg_hba file
+description:
+ - The fundamental function of the module is to create, or delete lines in pg_hba files.
+ - The lines in the file should be in a typical pg_hba form and lines should be unique per key (type, databases, users, source).
+ If they are not unique and the SID is 'the one to change', only one for I(state=present) or
+ none for I(state=absent) of the SID's will remain.
+extends_documentation_fragment: files
+options:
+ address:
+ description:
+ - The source address/net where the connections could come from.
+ - Will not be used for entries of I(type)=C(local).
+ - You can also use keywords C(all), C(samehost), and C(samenet).
+ default: samehost
+ type: str
+ aliases: [ source, src ]
+ backup:
+ description:
+ - If set, create a backup of the C(pg_hba) file before it is modified.
+ The location of the backup is returned in the (backup) variable by this module.
+ default: false
+ type: bool
+ backup_file:
+ description:
+ - Write backup to a specific backupfile rather than a temp file.
+ type: str
+ create:
+ description:
+ - Create an C(pg_hba) file if none exists.
+ - When set to false, an error is raised when the C(pg_hba) file doesn't exist.
+ default: false
+ type: bool
+ contype:
+ description:
+ - Type of the rule. If not set, C(postgresql_pg_hba) will only return contents.
+ type: str
+ choices: [ local, host, hostnossl, hostssl ]
+ databases:
+ description:
+ - Databases this line applies to.
+ default: all
+ type: str
+ dest:
+ description:
+ - Path to C(pg_hba) file to modify.
+ type: path
+ required: true
+ method:
+ description:
+ - Authentication method to be used.
+ type: str
+ choices: [ cert, gss, ident, krb5, ldap, md5, pam, password, peer, radius, reject, scram-sha-256 , sspi, trust ]
+ default: md5
+ netmask:
+ description:
+ - The netmask of the source address.
+ type: str
+ options:
+ description:
+ - Additional options for the authentication I(method).
+ type: str
+ order:
+ description:
+ - The entries will be written out in a specific order.
+ With this option you can control by which field they are ordered first, second and last.
+ s=source, d=databases, u=users.
+ This option is deprecated since 2.9 and will be removed in community.postgresql 3.0.0.
+ Sortorder is now hardcoded to sdu.
+ type: str
+ default: sdu
+ choices: [ sdu, sud, dsu, dus, usd, uds ]
+ state:
+ description:
+ - The lines will be added/modified when C(state=present) and removed when C(state=absent).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ users:
+ description:
+ - Users this line applies to.
+ type: str
+ default: all
+
+notes:
+ - The default authentication assumes that on the host, you are either logging in as or
+ sudo'ing to an account with appropriate permissions to read and modify the file.
+ - This module also returns the pg_hba info. You can use this module to only retrieve it by only specifying I(dest).
+ The info can be found in the returned data under key pg_hba, being a list, containing a dict per rule.
+ - This module will sort resulting C(pg_hba) files if a rule change is required.
+ This could give unexpected results with manual created hba files, if it was improperly sorted.
+ For example a rule was created for a net first and for a ip in that net range next.
+ In that situation, the 'ip specific rule' will never hit, it is in the C(pg_hba) file obsolete.
+ After the C(pg_hba) file is rewritten by the M(community.postgresql.postgresql_pg_hba) module, the ip specific rule will be sorted above the range rule.
+ And then it will hit, which will give unexpected results.
+ - With the 'order' parameter you can control which field is used to sort first, next and last.
+ - The module supports a check mode and a diff mode.
+
+seealso:
+- name: PostgreSQL pg_hba.conf file reference
+ description: Complete reference of the PostgreSQL pg_hba.conf file documentation.
+ link: https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
+
+requirements:
+ - ipaddress
+
+author: Sebastiaan Mannem (@sebasmannem)
+'''
+
+EXAMPLES = '''
+- name: Grant users joe and simon access to databases sales and logistics from ipv6 localhost ::1/128 using peer authentication
+ community.postgresql.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: host
+ users: joe,simon
+ source: ::1
+ databases: sales,logistics
+ method: peer
+ create: true
+
+- name: Grant user replication from network 192.168.0.100/24 access for replication with client cert authentication
+ community.postgresql.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: host
+ users: replication
+ source: 192.168.0.100/24
+ databases: replication
+ method: cert
+
+- name: Revoke access from local user mary on database mydb
+ community.postgresql.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: local
+ users: mary
+ databases: mydb
+ state: absent
+'''
+
+RETURN = r'''
+msgs:
+ description: List of textual messages what was done.
+ returned: always
+ type: list
+ sample:
+ "msgs": [
+ "Removing",
+ "Changed",
+ "Writing"
+ ]
+backup_file:
+ description: File that the original pg_hba file was backed up to.
+ returned: changed
+ type: str
+ sample: /tmp/pg_hba_jxobj_p
+pg_hba:
+ description: List of the pg_hba rules as they are configured in the specified hba file.
+ returned: always
+ type: list
+ sample:
+ "pg_hba": [
+ {
+ "db": "all",
+ "method": "md5",
+ "src": "samehost",
+ "type": "host",
+ "usr": "all"
+ }
+ ]
+'''
+
+import os
+import re
+import traceback
+
+IPADDRESS_IMP_ERR = None
+try:
+ import ipaddress
+except ImportError:
+ IPADDRESS_IMP_ERR = traceback.format_exc()
+
+import tempfile
+import shutil
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+# from ansible.module_utils.postgres import postgres_common_argument_spec
+
+PG_HBA_METHODS = ["trust", "reject", "md5", "password", "gss", "sspi", "krb5", "ident", "peer",
+ "ldap", "radius", "cert", "pam", "scram-sha-256"]
+PG_HBA_TYPES = ["local", "host", "hostssl", "hostnossl"]
+PG_HBA_ORDERS = ["sdu", "sud", "dsu", "dus", "usd", "uds"]
+PG_HBA_HDR = ['type', 'db', 'usr', 'src', 'mask', 'method', 'options']
+
+WHITESPACES_RE = re.compile(r'\s+')
+
+
+class PgHbaError(Exception):
+ '''
+ This exception is raised when parsing the pg_hba file ends in an error.
+ '''
+
+
+class PgHbaRuleError(PgHbaError):
+ '''
+ This exception is raised when parsing the pg_hba file ends in an error.
+ '''
+
+
+class PgHbaRuleChanged(PgHbaRuleError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHbaValueError(PgHbaError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHbaRuleValueError(PgHbaRuleError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHba(object):
+ """
+ PgHba object to read/write entries to/from.
+ pg_hba_file - the pg_hba file almost always /etc/pg_hba
+ """
+ def __init__(self, pg_hba_file=None, order="sdu", backup=False, create=False):
+ if order not in PG_HBA_ORDERS:
+ msg = "invalid order setting {0} (should be one of '{1}')."
+ raise PgHbaError(msg.format(order, "', '".join(PG_HBA_ORDERS)))
+ self.pg_hba_file = pg_hba_file
+ self.rules = None
+ self.comment = None
+ self.order = order
+ self.backup = backup
+ self.last_backup = None
+ self.create = create
+ self.unchanged()
+ # self.databases will be update by add_rule and gives some idea of the number of databases
+ # (at least that are handled by this pg_hba)
+ self.databases = set(['postgres', 'template0', 'template1'])
+
+ # self.databases will be update by add_rule and gives some idea of the number of users
+ # (at least that are handled by this pg_hba) since this might also be groups with multiple
+ # users, this might be totally off, but at least it is some info...
+ self.users = set(['postgres'])
+
+ self.read()
+
+ def unchanged(self):
+ '''
+ This method resets self.diff to a empty default
+ '''
+ self.diff = {'before': {'file': self.pg_hba_file, 'pg_hba': []},
+ 'after': {'file': self.pg_hba_file, 'pg_hba': []}}
+
+ def read(self):
+ '''
+ Read in the pg_hba from the system
+ '''
+ self.rules = {}
+ self.comment = []
+ # read the pg_hbafile
+ try:
+ with open(self.pg_hba_file, 'r') as file:
+ for line in file:
+ line = line.strip()
+ # uncomment
+ if '#' in line:
+ line, comment = line.split('#', 1)
+ self.comment.append('#' + comment)
+ try:
+ self.add_rule(PgHbaRule(line=line))
+ except PgHbaRuleError:
+ pass
+ self.unchanged()
+ except IOError:
+ pass
+
+ def write(self, backup_file=''):
+ '''
+ This method writes the PgHba rules (back) to a file.
+ '''
+ if not self.changed():
+ return False
+
+ contents = self.render()
+ if self.pg_hba_file:
+ if not (os.path.isfile(self.pg_hba_file) or self.create):
+ raise PgHbaError("pg_hba file '{0}' doesn't exist. "
+ "Use create option to autocreate.".format(self.pg_hba_file))
+ if self.backup and os.path.isfile(self.pg_hba_file):
+ if backup_file:
+ self.last_backup = backup_file
+ else:
+ __backup_file_h, self.last_backup = tempfile.mkstemp(prefix='pg_hba')
+ shutil.copy(self.pg_hba_file, self.last_backup)
+ fileh = open(self.pg_hba_file, 'w')
+ else:
+ filed, __path = tempfile.mkstemp(prefix='pg_hba')
+ fileh = os.fdopen(filed, 'w')
+
+ fileh.write(contents)
+ self.unchanged()
+ fileh.close()
+ return True
+
+ def add_rule(self, rule):
+ '''
+ This method can be used to add a rule to the list of rules in this PgHba object
+ '''
+ key = rule.key()
+ try:
+ try:
+ oldrule = self.rules[key]
+ except KeyError:
+ raise PgHbaRuleChanged
+ ekeys = set(list(oldrule.keys()) + list(rule.keys()))
+ ekeys.remove('line')
+ for k in ekeys:
+ if oldrule.get(k) != rule.get(k):
+ raise PgHbaRuleChanged('{0} changes {1}'.format(rule, oldrule))
+ except PgHbaRuleChanged:
+ self.rules[key] = rule
+ self.diff['after']['pg_hba'].append(rule.line())
+ if rule['db'] not in ['all', 'samerole', 'samegroup', 'replication']:
+ databases = set(rule['db'].split(','))
+ self.databases.update(databases)
+ if rule['usr'] != 'all':
+ user = rule['usr']
+ if user[0] == '+':
+ user = user[1:]
+ self.users.add(user)
+
+ def remove_rule(self, rule):
+ '''
+ This method can be used to find and remove a rule. It doesn't look for the exact rule, only
+ the rule with the same key.
+ '''
+ keys = rule.key()
+ try:
+ del self.rules[keys]
+ self.diff['before']['pg_hba'].append(rule.line())
+ except KeyError:
+ pass
+
+ def get_rules(self, with_lines=False):
+ '''
+ This method returns all the rules of the PgHba object
+ '''
+ rules = sorted(self.rules.values())
+ for rule in rules:
+ ret = {}
+ for key, value in rule.items():
+ ret[key] = value
+ if not with_lines:
+ if 'line' in ret:
+ del ret['line']
+ else:
+ ret['line'] = rule.line()
+
+ yield ret
+
+ def render(self):
+ '''
+ This method renders the content of the PgHba rules and comments.
+ The returning value can be used directly to write to a new file.
+ '''
+ comment = '\n'.join(self.comment)
+ rule_lines = '\n'.join([rule['line'] for rule in self.get_rules(with_lines=True)])
+ result = comment + '\n' + rule_lines
+ # End it properly with a linefeed (if not already).
+ if result and result[-1] not in ['\n', '\r']:
+ result += '\n'
+ return result
+
+ def changed(self):
+ '''
+ This method can be called to detect if the PgHba file has been changed.
+ '''
+ return bool(self.diff['before']['pg_hba'] or self.diff['after']['pg_hba'])
+
+
+class PgHbaRule(dict):
+ '''
+ This class represents one rule as defined in a line in a PgHbaFile.
+ '''
+
+ def __init__(self, contype=None, databases=None, users=None, source=None, netmask=None,
+ method=None, options=None, line=None):
+ '''
+ This function can be called with a comma seperated list of databases and a comma seperated
+ list of users and it will act as a generator that returns a expanded list of rules one by
+ one.
+ '''
+
+ super(PgHbaRule, self).__init__()
+
+ if line:
+ # Read values from line if parsed
+ self.fromline(line)
+
+ # read rule cols from parsed items
+ rule = dict(zip(PG_HBA_HDR, [contype, databases, users, source, netmask, method, options]))
+ for key, value in rule.items():
+ if value:
+ self[key] = value
+
+ # Some sanity checks
+ for key in ['method', 'type']:
+ if key not in self:
+ raise PgHbaRuleError('Missing {0} in rule {1}'.format(key, self))
+
+ if self['method'] not in PG_HBA_METHODS:
+ msg = "invalid method {0} (should be one of '{1}')."
+ raise PgHbaRuleValueError(msg.format(self['method'], "', '".join(PG_HBA_METHODS)))
+
+ if self['type'] not in PG_HBA_TYPES:
+ msg = "invalid connection type {0} (should be one of '{1}')."
+ raise PgHbaRuleValueError(msg.format(self['type'], "', '".join(PG_HBA_TYPES)))
+
+ if self['type'] == 'local':
+ self.unset('src')
+ self.unset('mask')
+ elif 'src' not in self:
+ raise PgHbaRuleError('Missing src in rule {1}'.format(self))
+ elif '/' in self['src']:
+ self.unset('mask')
+ else:
+ self['src'] = str(self.source())
+ self.unset('mask')
+
+ def unset(self, key):
+ '''
+ This method is used to unset certain columns if they exist
+ '''
+ if key in self:
+ del self[key]
+
+ def line(self):
+ '''
+ This method can be used to return (or generate) the line
+ '''
+ try:
+ return self['line']
+ except KeyError:
+ self['line'] = "\t".join([self[k] for k in PG_HBA_HDR if k in self.keys()])
+ return self['line']
+
+ def fromline(self, line):
+ '''
+ split into 'type', 'db', 'usr', 'src', 'mask', 'method', 'options' cols
+ '''
+ if WHITESPACES_RE.sub('', line) == '':
+ # empty line. skip this one...
+ return
+ cols = WHITESPACES_RE.split(line)
+ if len(cols) < 4:
+ msg = "Rule {0} has too few columns."
+ raise PgHbaValueError(msg.format(line))
+ if cols[0] not in PG_HBA_TYPES:
+ msg = "Rule {0} has unknown type: {1}."
+ raise PgHbaValueError(msg.format(line, cols[0]))
+ if cols[0] == 'local':
+ cols.insert(3, None) # No address
+ cols.insert(3, None) # No IP-mask
+ if len(cols) < 6:
+ cols.insert(4, None) # No IP-mask
+ elif cols[5] not in PG_HBA_METHODS:
+ cols.insert(4, None) # No IP-mask
+ if cols[5] not in PG_HBA_METHODS:
+ raise PgHbaValueError("Rule {0} of '{1}' type has invalid auth-method '{2}'".format(line, cols[0], cols[5]))
+
+ if len(cols) < 7:
+ cols.insert(6, None) # No auth-options
+ else:
+ cols[6] = " ".join(cols[6:]) # combine all auth-options
+ rule = dict(zip(PG_HBA_HDR, cols[:7]))
+ for key, value in rule.items():
+ if value:
+ self[key] = value
+
+ def key(self):
+ '''
+ This method can be used to get the key from a rule.
+ '''
+ if self['type'] == 'local':
+ source = 'local'
+ else:
+ source = str(self.source())
+ return (source, self['db'], self['usr'])
+
+ def source(self):
+ '''
+ This method is used to get the source of a rule as an ipaddress object if possible.
+ '''
+ if 'mask' in self.keys():
+ try:
+ ipaddress.ip_address(u'{0}'.format(self['src']))
+ except ValueError:
+ raise PgHbaValueError('Mask was specified, but source "{0}" '
+ 'is no valid ip'.format(self['src']))
+ # ipaddress module cannot work with ipv6 netmask, so lets convert it to prefixlen
+ # furthermore ipv4 with bad netmask throws 'Rule {} doesn't seem to be an ip, but has a
+ # mask error that doesn't seem to describe what is going on.
+ try:
+ mask_as_ip = ipaddress.ip_address(u'{0}'.format(self['mask']))
+ except ValueError:
+ raise PgHbaValueError('Mask {0} seems to be invalid'.format(self['mask']))
+ binvalue = "{0:b}".format(int(mask_as_ip))
+ if '01' in binvalue:
+ raise PgHbaValueError('IP mask {0} seems invalid '
+ '(binary value has 1 after 0)'.format(self['mask']))
+ prefixlen = binvalue.count('1')
+ sourcenw = '{0}/{1}'.format(self['src'], prefixlen)
+ try:
+ return ipaddress.ip_network(u'{0}'.format(sourcenw), strict=False)
+ except ValueError:
+ raise PgHbaValueError('{0} is no valid address range'.format(sourcenw))
+
+ try:
+ return ipaddress.ip_network(u'{0}'.format(self['src']), strict=False)
+ except ValueError:
+ return self['src']
+
+ def __lt__(self, other):
+ """This function helps sorted to decide how to sort.
+
+ It just checks itself against the other and decides on some key values
+ if it should be sorted higher or lower in the list.
+ The way it works:
+ For networks, every 1 in 'netmask in binary' makes the subnet more specific.
+ Therefore I chose to use prefix as the weight.
+ So a single IP (/32) should have twice the weight of a /16 network.
+ To keep everything in the same weight scale,
+ - for ipv6, we use a weight scale of 0 (all possible ipv6 addresses) to 128 (single ip)
+ - for ipv4, we use a weight scale of 0 (all possible ipv4 addresses) to 128 (single ip)
+ Therefore for ipv4, we use prefixlen (0-32) * 4 for weight,
+ which corresponds to ipv6 (0-128).
+ """
+ myweight = self.source_weight()
+ hisweight = other.source_weight()
+ if myweight != hisweight:
+ return myweight > hisweight
+
+ myweight = self.db_weight()
+ hisweight = other.db_weight()
+ if myweight != hisweight:
+ return myweight < hisweight
+
+ myweight = self.user_weight()
+ hisweight = other.user_weight()
+ if myweight != hisweight:
+ return myweight < hisweight
+ try:
+ return self['src'] < other['src']
+ except TypeError:
+ return self.source_type_weight() < other.source_type_weight()
+ except Exception:
+ # When all else fails, just compare the exact line.
+ return self.line() < other.line()
+
+ def source_weight(self):
+ """Report the weight of this source net.
+
+ Basically this is the netmask, where IPv4 is normalized to IPv6
+ (IPv4/32 has the same weight as IPv6/128).
+ """
+ if self['type'] == 'local':
+ return 130
+
+ sourceobj = self.source()
+ if isinstance(sourceobj, ipaddress.IPv4Network):
+ return sourceobj.prefixlen * 4
+ if isinstance(sourceobj, ipaddress.IPv6Network):
+ return sourceobj.prefixlen
+ if isinstance(sourceobj, str):
+ # You can also write all to match any IP address,
+ # samehost to match any of the server's own IP addresses,
+ # or samenet to match any address in any subnet that the server is connected to.
+ if sourceobj == 'all':
+ # (all is considered the full range of all ips, which has a weight of 0)
+ return 0
+ if sourceobj == 'samehost':
+ # (sort samehost second after local)
+ return 129
+ if sourceobj == 'samenet':
+ # Might write some fancy code to determine all prefix's
+ # from all interfaces and find a sane value for this one.
+ # For now, let's assume IPv4/24 or IPv6/96 (both have weight 96).
+ return 96
+ if sourceobj[0] == '.':
+ # suffix matching (domain name), let's assume a very large scale
+ # and therefore a very low weight IPv4/16 or IPv6/64 (both have weight 64).
+ return 64
+ # hostname, let's assume only one host matches, which is
+ # IPv4/32 or IPv6/128 (both have weight 128)
+ return 128
+ raise PgHbaValueError('Cannot deduct the source weight of this source {1}'.format(sourceobj))
+
+ def source_type_weight(self):
+ """Give a weight on the type of this source.
+
+ Basically make sure that IPv6Networks are sorted higher than IPv4Networks.
+ This is a 'when all else fails' solution in __lt__.
+ """
+ if self['type'] == 'local':
+ return 3
+
+ sourceobj = self.source()
+ if isinstance(sourceobj, ipaddress.IPv4Network):
+ return 2
+ if isinstance(sourceobj, ipaddress.IPv6Network):
+ return 1
+ if isinstance(sourceobj, str):
+ return 0
+ raise PgHbaValueError('This source {0} is of an unknown type...'.format(sourceobj))
+
+ def db_weight(self):
+ """Report the weight of the database.
+
+ Normally, just 1, but for replication this is 0, and for 'all', this is more than 2.
+ """
+ if self['db'] == 'all':
+ return 100000
+ if self['db'] == 'replication':
+ return 0
+ if self['db'] in ['samerole', 'samegroup']:
+ return 1
+ return 1 + self['db'].count(',')
+
+ def user_weight(self):
+ """Report weight when comparing users."""
+ if self['usr'] == 'all':
+ return 1000000
+ return 1
+
+
+def main():
+ '''
+ This function is the main function of this module
+ '''
+ # argument_spec = postgres_common_argument_spec()
+ argument_spec = dict()
+ argument_spec.update(
+ address=dict(type='str', default='samehost', aliases=['source', 'src']),
+ backup=dict(type='bool', default=False),
+ backup_file=dict(type='str'),
+ contype=dict(type='str', default=None, choices=PG_HBA_TYPES),
+ create=dict(type='bool', default=False),
+ databases=dict(type='str', default='all'),
+ dest=dict(type='path', required=True),
+ method=dict(type='str', default='md5', choices=PG_HBA_METHODS),
+ netmask=dict(type='str'),
+ options=dict(type='str'),
+ order=dict(type='str', default="sdu", choices=PG_HBA_ORDERS,
+ removed_in_version='3.0.0', removed_from_collection='community.postgresql'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ users=dict(type='str', default='all')
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+ if IPADDRESS_IMP_ERR is not None:
+ module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR)
+
+ contype = module.params["contype"]
+ create = bool(module.params["create"] or module.check_mode)
+ if module.check_mode:
+ backup = False
+ else:
+ backup = module.params['backup']
+ backup_file = module.params['backup_file']
+ databases = module.params["databases"]
+ dest = module.params["dest"]
+
+ method = module.params["method"]
+ netmask = module.params["netmask"]
+ options = module.params["options"]
+ order = module.params["order"]
+ source = module.params["address"]
+ state = module.params["state"]
+ users = module.params["users"]
+
+ ret = {'msgs': []}
+ try:
+ pg_hba = PgHba(dest, order, backup=backup, create=create)
+ except PgHbaError as error:
+ module.fail_json(msg='Error reading file:\n{0}'.format(error))
+
+ if contype:
+ try:
+ for database in databases.split(','):
+ for user in users.split(','):
+ rule = PgHbaRule(contype, database, user, source, netmask, method, options)
+ if state == "present":
+ ret['msgs'].append('Adding')
+ pg_hba.add_rule(rule)
+ else:
+ ret['msgs'].append('Removing')
+ pg_hba.remove_rule(rule)
+ except PgHbaError as error:
+ module.fail_json(msg='Error modifying rules:\n{0}'.format(error))
+ file_args = module.load_file_common_arguments(module.params)
+ ret['changed'] = changed = pg_hba.changed()
+ if changed:
+ ret['msgs'].append('Changed')
+ ret['diff'] = pg_hba.diff
+
+ if not module.check_mode:
+ ret['msgs'].append('Writing')
+ try:
+ if pg_hba.write(backup_file):
+ module.set_fs_attributes_if_different(file_args, True, pg_hba.diff,
+ expand=False)
+ except PgHbaError as error:
+ module.fail_json(msg='Error writing file:\n{0}'.format(error))
+ if pg_hba.last_backup:
+ ret['backup_file'] = pg_hba.last_backup
+
+ ret['pg_hba'] = list(pg_hba.get_rules())
+ module.exit_json(**ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py
new file mode 100644
index 00000000..2a7aa0cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_ping.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_ping
+short_description: Check remote PostgreSQL server availability
+description:
+- Simple module to check remote PostgreSQL server availability.
+options:
+ db:
+ description:
+ - Name of a database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.postgresql.postgresql_info
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.postgresql.postgres
+notes:
+- Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+# PostgreSQL ping dbsrv server from the shell:
+# ansible dbsrv -m postgresql_ping
+
+# In the example below you need to generate certificates previously.
+# See https://www.postgresql.org/docs/current/libpq-ssl.html for more information.
+- name: PostgreSQL ping dbsrv server using not default credentials and ssl
+ community.postgresql.postgresql_ping:
+ db: protected_db
+ login_host: dbsrv
+ login_user: secret
+ login_password: secret_pass
+ ca_cert: /root/root.crt
+ ssl_mode: verify-full
+'''
+
+RETURN = r'''
+is_available:
+ description: PostgreSQL server availability.
+ returned: always
+ type: bool
+ sample: true
+server_version:
+ description: PostgreSQL server version.
+ returned: always
+ type: dict
+ sample: { major: 10, minor: 1 }
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+class PgPing(object):
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.is_available = False
+ self.version = {}
+
+ def do(self):
+ self.get_pg_version()
+ return (self.is_available, self.version)
+
+ def get_pg_version(self):
+ query = "SELECT version()"
+ raw = exec_sql(self, query, add_to_executed=False)[0][0]
+ if raw:
+ self.is_available = True
+ raw = raw.split()[1].split('.')
+ self.version = dict(
+ major=int(raw[0]),
+ minor=int(raw[1].rstrip(',')),
+ )
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ check_input(module, module.params['session_role'])
+
+ # Set some default values:
+ cursor = False
+ db_connection = False
+ result = dict(
+ changed=False,
+ is_available=False,
+ server_version=dict(),
+ )
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, fail_on_conn=False)
+
+ if db_connection is not None:
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Do job:
+ pg_ping = PgPing(module, cursor)
+ if cursor:
+ # If connection established:
+ result["is_available"], result["server_version"] = pg_ping.do()
+ db_connection.rollback()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py
new file mode 100644
index 00000000..9cd1b155
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_privs.py
@@ -0,0 +1,1172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_privs
+short_description: Grant or revoke privileges on PostgreSQL database objects
+description:
+- Grant or revoke privileges on PostgreSQL database objects.
+- This module is basically a wrapper around most of the functionality of
+ PostgreSQL's GRANT and REVOKE statements with detection of changes
+ (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)).
+options:
+ database:
+ description:
+ - Name of database to connect to.
+ required: yes
+ type: str
+ aliases:
+ - db
+ - login_db
+ state:
+ description:
+ - If C(present), the specified privileges are granted, if C(absent) they are revoked.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ privs:
+ description:
+ - Comma separated list of privileges to grant/revoke.
+ type: str
+ aliases:
+ - priv
+ type:
+ description:
+ - Type of database object to set privileges on.
+ - The C(default_privs) choice is available starting at version 2.7.
+ - The C(foreign_data_wrapper) and C(foreign_server) object types are available since Ansible version 2.8.
+ - The C(type) choice is available since Ansible version 2.10.
+ - The C(procedure) is supported since collection version 1.3.0 and PostgreSQL 11.
+ type: str
+ default: table
+ choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function,
+ group, language, table, tablespace, schema, sequence, type , procedure]
+ objs:
+ description:
+ - Comma separated list of database objects to set privileges on.
+ - If I(type) is C(table), C(partition table), C(sequence), C(function) or C(procedure),
+ the special valueC(ALL_IN_SCHEMA) can be provided instead to specify all
+ database objects of type I(type) in the schema specified via I(schema).
+ (This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available
+ for C(function) and C(partition table) since Ansible 2.8).
+ - C(procedure) is supported since PostgreSQL 11 and M(community.postgresql) collection 1.3.0.
+ - If I(type) is C(database), this parameter can be omitted, in which case
+ privileges are set for the database specified via I(database).
+ - If I(type) is I(function) or I(procedure), colons (":") in object names will be
+ replaced with commas (needed to specify signatures, see examples).
+ type: str
+ aliases:
+ - obj
+ schema:
+ description:
+ - Schema that contains the database objects specified via I(objs).
+ - May only be provided if I(type) is C(table), C(sequence), C(function), C(procedure), C(type),
+ or C(default_privs). Defaults to C(public) in these cases.
+ - Pay attention, for embedded types when I(type=type)
+ I(schema) can be C(pg_catalog) or C(information_schema) respectively.
+ type: str
+ roles:
+ description:
+ - Comma separated list of role (user/group) names to set permissions for.
+ - The special value C(PUBLIC) can be provided instead to set permissions
+ for the implicitly defined PUBLIC group.
+ type: str
+ required: yes
+ aliases:
+ - role
+ fail_on_role:
+ description:
+ - If C(yes), fail when target role (for whom privs need to be granted) does not exist.
+ Otherwise just warn and continue.
+ default: yes
+ type: bool
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ target_roles:
+ description:
+ - A list of existing role (user/group) names to set as the
+ default permissions for database objects subsequently created by them.
+ - Parameter I(target_roles) is only available with C(type=default_privs).
+ type: str
+ grant_option:
+ description:
+ - Whether C(role) may grant/revoke the specified privileges/group memberships to others.
+ - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes.
+ - I(grant_option) only has an effect if I(state) is C(present).
+ type: bool
+ aliases:
+ - admin_option
+ host:
+ description:
+ - Database host address. If unspecified, connect via Unix socket.
+ type: str
+ aliases:
+ - login_host
+ port:
+ description:
+ - Database port to connect to.
+ type: int
+ default: 5432
+ aliases:
+ - login_port
+ unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ aliases:
+ - login_unix_socket
+ login:
+ description:
+ - The username to authenticate with.
+ type: str
+ default: postgres
+ aliases:
+ - login_user
+ password:
+ description:
+ - The password to authenticate with.
+ type: str
+ aliases:
+ - login_password
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases:
+ - ssl_rootcert
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(roles), I(target_roles), I(session_role),
+ I(schema) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+ usage_on_types:
+ description:
+ - When adding default privileges, the module always implicitly adds ``USAGE ON TYPES``.
+ - To avoid this behavior, set I(usage_on_types) to C(no).
+ - Added to save backwards compatibility.
+ - Used only when adding default privileges, ignored otherwise.
+ type: bool
+ default: yes
+ version_added: '1.2.0'
+
+notes:
+- Supports C(check_mode).
+- Parameters that accept comma separated lists (I(privs), I(objs), I(roles))
+ have singular alias names (I(priv), I(obj), I(role)).
+- To revoke only C(GRANT OPTION) for a specific object, set I(state) to
+ C(present) and I(grant_option) to C(no) (see examples).
+- Note that when revoking privileges from a role R, this role may still have
+ access via privileges granted to any role R is a member of including C(PUBLIC).
+- Note that when you use C(PUBLIC) role, the module always reports that the state has been changed.
+- Note that when revoking privileges from a role R, you do so as the user
+ specified via I(login). If R has been granted the same privileges by
+ another user also, R can still access database objects via these privileges.
+- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs).
+
+seealso:
+- module: community.postgresql.postgresql_user
+- module: community.postgresql.postgresql_owner
+- module: community.postgresql.postgresql_membership
+- name: PostgreSQL privileges
+ description: General information about PostgreSQL privileges.
+ link: https://www.postgresql.org/docs/current/ddl-priv.html
+- name: PostgreSQL GRANT command reference
+ description: Complete reference of the PostgreSQL GRANT command documentation.
+ link: https://www.postgresql.org/docs/current/sql-grant.html
+- name: PostgreSQL REVOKE command reference
+ description: Complete reference of the PostgreSQL REVOKE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-revoke.html
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+
+author:
+- Bernhard Weitzhofer (@b6d)
+- Tobias Birkefeld (@tcraxs)
+'''
+
+EXAMPLES = r'''
+# On database "library":
+# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors
+# TO librarian, reader WITH GRANT OPTION
+- name: Grant privs to librarian and reader on database library
+ community.postgresql.postgresql_privs:
+ database: library
+ state: present
+ privs: SELECT,INSERT,UPDATE
+ type: table
+ objs: books,authors
+ schema: public
+ roles: librarian,reader
+ grant_option: yes
+
+- name: Same as above leveraging default values
+ community.postgresql.postgresql_privs:
+ db: library
+ privs: SELECT,INSERT,UPDATE
+ objs: books,authors
+ roles: librarian,reader
+ grant_option: yes
+
+# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader
+# Note that role "reader" will be *granted* INSERT privilege itself if this
+# isn't already the case (since state: present).
+- name: Revoke privs from reader
+ community.postgresql.postgresql_privs:
+ db: library
+ state: present
+ priv: INSERT
+ obj: books
+ role: reader
+ grant_option: no
+
+# "public" is the default schema. This also works for PostgreSQL 8.x.
+- name: REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader
+ community.postgresql.postgresql_privs:
+ db: library
+ state: absent
+ privs: INSERT,UPDATE
+ objs: ALL_IN_SCHEMA
+ role: reader
+
+- name: GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian
+ community.postgresql.postgresql_privs:
+ db: library
+ privs: ALL
+ type: schema
+ objs: public,math
+ role: librarian
+
+# Note the separation of arguments with colons.
+- name: GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader
+ community.postgresql.postgresql_privs:
+ db: library
+ privs: ALL
+ type: function
+ obj: add(int:int)
+ schema: math
+ roles: librarian,reader
+
+# Note that group role memberships apply cluster-wide and therefore are not
+# restricted to database "library" here.
+- name: GRANT librarian, reader TO alice, bob WITH ADMIN OPTION
+ community.postgresql.postgresql_privs:
+ db: library
+ type: group
+ objs: librarian,reader
+ roles: alice,bob
+ admin_option: yes
+
+# Note that here "db: postgres" specifies the database to connect to, not the
+# database to grant privileges on (which is specified via the "objs" param)
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ community.postgresql.postgresql_privs:
+ db: postgres
+ privs: ALL
+ type: database
+ obj: library
+ role: librarian
+
+# If objs is omitted for type "database", it defaults to the database
+# to which the connection is established
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ community.postgresql.postgresql_privs:
+ db: library
+ privs: ALL
+ type: database
+ role: librarian
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO librarian
+ community.postgresql.postgresql_privs:
+ db: library
+ objs: ALL_DEFAULT
+ privs: ALL
+ type: default_privs
+ role: librarian
+ grant_option: yes
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 1
+ community.postgresql.postgresql_privs:
+ db: library
+ objs: TABLES,SEQUENCES
+ privs: SELECT
+ type: default_privs
+ role: reader
+
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 2
+ community.postgresql.postgresql_privs:
+ db: library
+ objs: TYPES
+ privs: USAGE
+ type: default_privs
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN DATA WRAPPER fdw TO reader
+ community.postgresql.postgresql_privs:
+ db: test
+ objs: fdw
+ privs: ALL
+ type: foreign_data_wrapper
+ role: reader
+
+# Available since community.postgresql 0.2.0
+- name: GRANT ALL PRIVILEGES ON TYPE customtype TO reader
+ community.postgresql.postgresql_privs:
+ db: test
+ objs: customtype
+ privs: ALL
+ type: type
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN SERVER fdw_server TO reader
+ community.postgresql.postgresql_privs:
+ db: test
+ objs: fdw_server
+ privs: ALL
+ type: foreign_server
+ role: reader
+
+# Available since version 2.8
+# Grant 'execute' permissions on all functions in schema 'common' to role 'caller'
+- name: GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA common TO caller
+ community.postgresql.postgresql_privs:
+ type: function
+ state: present
+ privs: EXECUTE
+ roles: caller
+ objs: ALL_IN_SCHEMA
+ schema: common
+
+# Available since collection version 1.3.0
+# Grant 'execute' permissions on all procedures in schema 'common' to role 'caller'
+# Needs PostreSQL 11 or higher and community.postgresql 1.3.0 or higher
+- name: GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA common TO caller
+ community.postgresql.postgresql_privs:
+ type: prucedure
+ state: present
+ privs: EXECUTE
+ roles: caller
+ objs: ALL_IN_SCHEMA
+ schema: common
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library GRANT SELECT ON TABLES TO reader
+# GRANT SELECT privileges for new TABLES objects created by librarian as
+# default to the role reader.
+# For specific
+- name: ALTER privs
+ community.postgresql.postgresql_privs:
+ db: library
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library REVOKE SELECT ON TABLES FROM reader
+# REVOKE SELECT privileges for new TABLES objects created by librarian as
+# default from the role reader.
+# For specific
+- name: ALTER privs
+ community.postgresql.postgresql_privs:
+ db: library
+ state: absent
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since community.postgresql 0.2.0
+- name: Grant type privileges for pg_catalog.numeric type to alice
+ community.postgresql.postgresql_privs:
+ type: type
+ roles: alice
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: acme
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";']
+'''
+
+import traceback
+
+PSYCOPG2_IMP_ERR = None
+try:
+ import psycopg2
+ import psycopg2.extensions
+except ImportError:
+ PSYCOPG2_IMP_ERR = traceback.format_exc()
+ psycopg2 = None
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ pg_quote_identifier,
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import postgres_common_argument_spec
+from ansible.module_utils._text import to_native
+
+VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE',
+ 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT',
+ 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE'))
+VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'),
+ 'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'),
+ 'FUNCTIONS': ('ALL', 'EXECUTE'),
+ 'TYPES': ('ALL', 'USAGE')}
+
+executed_queries = []
+
+
+class Error(Exception):
+ pass
+
+
+def role_exists(module, cursor, rolname):
+ """Check user exists or not"""
+ query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname
+ try:
+ cursor.execute(query)
+ return cursor.rowcount > 0
+
+ except Exception as e:
+ module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+
+ return False
+
+
+# We don't have functools.partial in Python < 2.5
+def partial(f, *args, **kwargs):
+ """Partial function application"""
+
+ def g(*g_args, **g_kwargs):
+ new_kwargs = kwargs.copy()
+ new_kwargs.update(g_kwargs)
+ return f(*(args + g_args), **g_kwargs)
+
+ g.f = f
+ g.args = args
+ g.kwargs = kwargs
+ return g
+
+
+class Connection(object):
+ """Wrapper around a psycopg2 connection with some convenience methods"""
+
+ def __init__(self, params, module):
+ self.database = params.database
+ self.module = module
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "host": "host",
+ "login": "user",
+ "password": "password",
+ "port": "port",
+ "database": "database",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+
+ kw = dict((params_map[k], getattr(params, k)) for k in params_map
+ if getattr(params, k) != '' and getattr(params, k) is not None)
+
+ # If a unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
+ if is_localhost and params.unix_socket != "":
+ kw["host"] = params.unix_socket
+
+ sslrootcert = params.ca_cert
+ if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
+ raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter')
+
+ self.connection = psycopg2.connect(**kw)
+ self.cursor = self.connection.cursor()
+ self.pg_version = self.connection.server_version
+
+ def commit(self):
+ self.connection.commit()
+
+ def rollback(self):
+ self.connection.rollback()
+
+ @property
+ def encoding(self):
+ """Connection encoding in Python-compatible form"""
+ return psycopg2.extensions.encodings[self.connection.encoding]
+
+ # Methods for querying database objects
+
+ # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like
+ # phrases in GRANT or REVOKE statements, therefore alternative methods are
+ # provided here.
+
+ def schema_exists(self, schema):
+ query = """SELECT count(*)
+ FROM pg_catalog.pg_namespace WHERE nspname = %s"""
+ self.cursor.execute(query, (schema,))
+ return self.cursor.fetchone()[0] > 0
+
+ def get_all_tables_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_sequences_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S'"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_functions_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+
+ query = ("SELECT p.proname, oidvectortypes(p.proargtypes) "
+ "FROM pg_catalog.pg_proc p "
+ "JOIN pg_namespace n ON n.oid = p.pronamespace "
+ "WHERE nspname = %s")
+
+ if self.pg_version >= 110000:
+ query += " and p.prokind = 'f'"
+
+ self.cursor.execute(query, (schema,))
+ return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
+
+ def get_all_procedures_in_schema(self, schema):
+ if self.pg_version < 110000:
+ raise Error("PostgreSQL verion must be >= 11 for type=procedure. Exit")
+
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+
+ query = ("SELECT p.proname, oidvectortypes(p.proargtypes) "
+ "FROM pg_catalog.pg_proc p "
+ "JOIN pg_namespace n ON n.oid = p.pronamespace "
+ "WHERE nspname = %s and p.prokind = 'p'")
+
+ self.cursor.execute(query, (schema,))
+ return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
+
+ # Methods for getting access control lists and group membership info
+
+ # To determine whether anything has changed after granting/revoking
+ # privileges, we compare the access control lists of the specified database
+ # objects before and afterwards. Python's list/string comparison should
+ # suffice for change detection, we should not actually have to parse ACLs.
+ # The same should apply to group membership information.
+
+ def get_table_acls(self, schema, tables):
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, tables))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_sequence_acls(self, schema, sequences):
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, sequences))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_function_acls(self, schema, function_signatures):
+ funcnames = [f.split('(', 1)[0] for f in function_signatures]
+ query = """SELECT proacl
+ FROM pg_catalog.pg_proc p
+ JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
+ WHERE nspname = %s AND proname = ANY (%s)
+ ORDER BY proname, proargtypes"""
+ self.cursor.execute(query, (schema, funcnames))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_schema_acls(self, schemas):
+ query = """SELECT nspacl FROM pg_catalog.pg_namespace
+ WHERE nspname = ANY (%s) ORDER BY nspname"""
+ self.cursor.execute(query, (schemas,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_language_acls(self, languages):
+ query = """SELECT lanacl FROM pg_catalog.pg_language
+ WHERE lanname = ANY (%s) ORDER BY lanname"""
+ self.cursor.execute(query, (languages,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_tablespace_acls(self, tablespaces):
+ query = """SELECT spcacl FROM pg_catalog.pg_tablespace
+ WHERE spcname = ANY (%s) ORDER BY spcname"""
+ self.cursor.execute(query, (tablespaces,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_database_acls(self, databases):
+ query = """SELECT datacl FROM pg_catalog.pg_database
+ WHERE datname = ANY (%s) ORDER BY datname"""
+ self.cursor.execute(query, (databases,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_group_memberships(self, groups):
+ query = """SELECT roleid, grantor, member, admin_option
+ FROM pg_catalog.pg_auth_members am
+ JOIN pg_catalog.pg_roles r ON r.oid = am.roleid
+ WHERE r.rolname = ANY(%s)
+ ORDER BY roleid, grantor, member"""
+ self.cursor.execute(query, (groups,))
+ return self.cursor.fetchall()
+
+ def get_default_privs(self, schema, *args):
+ query = """SELECT defaclacl
+ FROM pg_default_acl a
+ JOIN pg_namespace b ON a.defaclnamespace=b.oid
+ WHERE b.nspname = %s;"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_data_wrapper_acls(self, fdws):
+ query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper
+ WHERE fdwname = ANY (%s) ORDER BY fdwname"""
+ self.cursor.execute(query, (fdws,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_server_acls(self, fs):
+ query = """SELECT srvacl FROM pg_catalog.pg_foreign_server
+ WHERE srvname = ANY (%s) ORDER BY srvname"""
+ self.cursor.execute(query, (fs,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_type_acls(self, schema, types):
+ query = """SELECT t.typacl FROM pg_catalog.pg_type t
+ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
+ WHERE n.nspname = %s AND t.typname = ANY (%s) ORDER BY typname"""
+ self.cursor.execute(query, (schema, types))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ # Manipulating privileges
+
+ def manipulate_privs(self, obj_type, privs, objs, roles, target_roles,
+ state, grant_option, schema_qualifier=None, fail_on_role=True, usage_on_types=True):
+ """Manipulate database object privileges.
+
+ :param obj_type: Type of database object to grant/revoke
+ privileges for.
+ :param privs: Either a list of privileges to grant/revoke
+ or None if type is "group".
+ :param objs: List of database objects to grant/revoke
+ privileges for.
+ :param roles: Either a list of role names or "PUBLIC"
+ for the implicitly defined "PUBLIC" group
+ :param target_roles: List of role names to grant/revoke
+ default privileges as.
+ :param state: "present" to grant privileges, "absent" to revoke.
+ :param grant_option: Only for state "present": If True, set
+ grant/admin option. If False, revoke it.
+ If None, don't change grant option.
+ :param schema_qualifier: Some object types ("TABLE", "SEQUENCE",
+ "FUNCTION") must be qualified by schema.
+ Ignored for other Types.
+ """
+ # get_status: function to get current status
+ if obj_type == 'table':
+ get_status = partial(self.get_table_acls, schema_qualifier)
+ elif obj_type == 'sequence':
+ get_status = partial(self.get_sequence_acls, schema_qualifier)
+ elif obj_type in ('function', 'procedure'):
+ get_status = partial(self.get_function_acls, schema_qualifier)
+ elif obj_type == 'schema':
+ get_status = self.get_schema_acls
+ elif obj_type == 'language':
+ get_status = self.get_language_acls
+ elif obj_type == 'tablespace':
+ get_status = self.get_tablespace_acls
+ elif obj_type == 'database':
+ get_status = self.get_database_acls
+ elif obj_type == 'group':
+ get_status = self.get_group_memberships
+ elif obj_type == 'default_privs':
+ get_status = partial(self.get_default_privs, schema_qualifier)
+ elif obj_type == 'foreign_data_wrapper':
+ get_status = self.get_foreign_data_wrapper_acls
+ elif obj_type == 'foreign_server':
+ get_status = self.get_foreign_server_acls
+ elif obj_type == 'type':
+ get_status = partial(self.get_type_acls, schema_qualifier)
+ else:
+ raise Error('Unsupported database object type "%s".' % obj_type)
+
+ # Return False (nothing has changed) if there are no objs to work on.
+ if not objs:
+ return False
+
+ # obj_ids: quoted db object identifiers (sometimes schema-qualified)
+ if obj_type in ('function', 'procedure'):
+ obj_ids = []
+ for obj in objs:
+ try:
+ f, args = obj.split('(', 1)
+ except Exception:
+ raise Error('Illegal function / procedure signature: "%s".' % obj)
+ obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args))
+ elif obj_type in ['table', 'sequence', 'type']:
+ obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs]
+ else:
+ obj_ids = ['"%s"' % o for o in objs]
+
+ # set_what: SQL-fragment specifying what to set for the target roles:
+ # Either group membership or privileges on objects of a certain type
+ if obj_type == 'group':
+ set_what = ','.join(obj_ids)
+ elif obj_type == 'default_privs':
+ # We don't want privs to be quoted here
+ set_what = ','.join(privs)
+ else:
+ # function types are already quoted above
+ if obj_type not in ('function', 'procedure'):
+ obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids]
+ # Note: obj_type has been checked against a set of string literals
+ # and privs was escaped when it was parsed
+ # Note: Underscores are replaced with spaces to support multi-word obj_type
+ set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '),
+ ','.join(obj_ids))
+
+ # for_whom: SQL-fragment specifying for whom to set the above
+ if roles == 'PUBLIC':
+ for_whom = 'PUBLIC'
+ else:
+ for_whom = []
+ for r in roles:
+ if not role_exists(self.module, self.cursor, r):
+ if fail_on_role:
+ self.module.fail_json(msg="Role '%s' does not exist" % r.strip())
+
+ else:
+ self.module.warn("Role '%s' does not exist, pass it" % r.strip())
+ else:
+ for_whom.append('"%s"' % r)
+
+ if not for_whom:
+ return False
+
+ for_whom = ','.join(for_whom)
+
+ # as_who:
+ as_who = None
+ if target_roles:
+ as_who = ','.join('"%s"' % r for r in target_roles)
+
+ if schema_qualifier:
+ schema_qualifier = '"%s"' % schema_qualifier
+
+ status_before = get_status(objs)
+
+ query = QueryBuilder(state) \
+ .for_objtype(obj_type) \
+ .with_grant_option(grant_option) \
+ .for_whom(for_whom) \
+ .as_who(as_who) \
+ .for_schema(schema_qualifier) \
+ .set_what(set_what) \
+ .for_objs(objs) \
+ .usage_on_types(usage_on_types) \
+ .build()
+
+ executed_queries.append(query)
+ self.cursor.execute(query)
+ if roles == 'PUBLIC':
+ return True
+
+ status_after = get_status(objs)
+
+ def nonesorted(e):
+ # For python 3+ that can fail trying
+ # to compare NoneType elements by sort method.
+ if e is None:
+ return ''
+ return e
+
+ status_before.sort(key=nonesorted)
+ status_after.sort(key=nonesorted)
+ return status_before != status_after
+
+
+class QueryBuilder(object):
+ def __init__(self, state):
+ self._grant_option = None
+ self._for_whom = None
+ self._as_who = None
+ self._set_what = None
+ self._obj_type = None
+ self._state = state
+ self._schema = None
+ self._objs = None
+ self._usage_on_types = None
+ self.query = []
+
+ def for_objs(self, objs):
+ self._objs = objs
+ return self
+
+ def for_schema(self, schema):
+ self._schema = schema
+ return self
+
+ def with_grant_option(self, option):
+ self._grant_option = option
+ return self
+
+ def for_whom(self, who):
+ self._for_whom = who
+ return self
+
+ def usage_on_types(self, usage_on_types):
+ self._usage_on_types = usage_on_types
+ return self
+
+ def as_who(self, target_roles):
+ self._as_who = target_roles
+ return self
+
+ def set_what(self, what):
+ self._set_what = what
+ return self
+
+ def for_objtype(self, objtype):
+ self._obj_type = objtype
+ return self
+
+ def build(self):
+ if self._state == 'present':
+ self.build_present()
+ elif self._state == 'absent':
+ self.build_absent()
+ else:
+ self.build_absent()
+ return '\n'.join(self.query)
+
+ def add_default_revoke(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+
+ def add_grant_option(self):
+ if self._grant_option:
+ if self._obj_type == 'group':
+ self.query[-1] += ' WITH ADMIN OPTION;'
+ else:
+ self.query[-1] += ' WITH GRANT OPTION;'
+ elif self._grant_option is False:
+ self.query[-1] += ';'
+ if self._obj_type == 'group':
+ self.query.append('REVOKE ADMIN OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+ elif not self._obj_type == 'default_privs':
+ self.query.append('REVOKE GRANT OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+ else:
+ self.query[-1] += ';'
+
+ def add_default_priv(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT {2} ON {3} TO {4}'.format(self._as_who,
+ self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT {1} ON {2} TO {3}'.format(self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ self.add_grant_option()
+
+ if self._usage_on_types:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who,
+ self._schema,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom))
+ self.add_grant_option()
+
+ def build_present(self):
+ if self._obj_type == 'default_privs':
+ self.add_default_revoke()
+ self.add_default_priv()
+ else:
+ self.query.append('GRANT {0} TO {1}'.format(self._set_what, self._for_whom))
+ self.add_grant_option()
+
+ def build_absent(self):
+ if self._obj_type == 'default_privs':
+ self.query = []
+ for obj in ['TABLES', 'SEQUENCES', 'TYPES']:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append('REVOKE {0} FROM {1};'.format(self._set_what, self._for_whom))
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ database=dict(required=True, aliases=['db', 'login_db']),
+ state=dict(default='present', choices=['present', 'absent']),
+ privs=dict(required=False, aliases=['priv']),
+ type=dict(default='table',
+ choices=['table',
+ 'sequence',
+ 'function',
+ 'procedure',
+ 'database',
+ 'schema',
+ 'language',
+ 'tablespace',
+ 'group',
+ 'default_privs',
+ 'foreign_data_wrapper',
+ 'foreign_server',
+ 'type', ]),
+ objs=dict(required=False, aliases=['obj']),
+ schema=dict(required=False),
+ roles=dict(required=True, aliases=['role']),
+ session_role=dict(required=False),
+ target_roles=dict(required=False),
+ grant_option=dict(required=False, type='bool',
+ aliases=['admin_option']),
+ host=dict(default='', aliases=['login_host']),
+ unix_socket=dict(default='', aliases=['login_unix_socket']),
+ login=dict(default='postgres', aliases=['login_user']),
+ password=dict(default='', aliases=['login_password'], no_log=True),
+ fail_on_role=dict(type='bool', default=True),
+ trust_input=dict(type='bool', default=True),
+ usage_on_types=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ fail_on_role = module.params['fail_on_role']
+ usage_on_types = module.params['usage_on_types']
+
+ # Create type object as namespace for module params
+ p = type('Params', (), module.params)
+ # param "schema": default, allowed depends on param "type"
+ if p.type in ['table', 'sequence', 'function', 'procedure', 'type', 'default_privs']:
+ p.schema = p.schema or 'public'
+ elif p.schema:
+ module.fail_json(msg='Argument "schema" is not allowed '
+ 'for type "%s".' % p.type)
+
+ # param "objs": default, required depends on param "type"
+ if p.type == 'database':
+ p.objs = p.objs or p.database
+ elif not p.objs:
+ module.fail_json(msg='Argument "objs" is required '
+ 'for type "%s".' % p.type)
+
+ # param "privs": allowed, required depends on param "type"
+ if p.type == 'group':
+ if p.privs:
+ module.fail_json(msg='Argument "privs" is not allowed '
+ 'for type "group".')
+ elif not p.privs:
+ module.fail_json(msg='Argument "privs" is required '
+ 'for type "%s".' % p.type)
+
+ # Check input
+ if not p.trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, p.roles, p.target_roles, p.session_role, p.schema)
+
+ # Connect to Database
+ if not psycopg2:
+ module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
+ try:
+ conn = Connection(p, module)
+ except psycopg2.Error as e:
+ module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc())
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+ except ValueError as e:
+ # We raise this when the psycopg library is too old
+ module.fail_json(msg=to_native(e))
+
+ if p.session_role:
+ try:
+ conn.cursor.execute('SET ROLE "%s"' % p.session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc())
+
+ try:
+ # privs
+ if p.privs:
+ privs = frozenset(pr.upper() for pr in p.privs.split(','))
+ if not privs.issubset(VALID_PRIVS):
+ module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS))
+ else:
+ privs = None
+ # objs:
+ if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_tables_in_schema(p.schema)
+ elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_sequences_in_schema(p.schema)
+ elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_functions_in_schema(p.schema)
+ elif p.type == 'procedure' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_procedures_in_schema(p.schema)
+ elif p.type == 'default_privs':
+ if p.objs == 'ALL_DEFAULT':
+ objs = frozenset(VALID_DEFAULT_OBJS.keys())
+ else:
+ objs = frozenset(obj.upper() for obj in p.objs.split(','))
+ if not objs.issubset(VALID_DEFAULT_OBJS):
+ module.fail_json(
+ msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys()))
+ # Again, do we have valid privs specified for object type:
+ valid_objects_for_priv = frozenset(obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj]))
+ if not valid_objects_for_priv == objs:
+ module.fail_json(
+ msg='Invalid priv specified. Valid object for priv: {0}. Objects: {1}'.format(
+ valid_objects_for_priv, objs))
+ else:
+ objs = p.objs.split(',')
+
+ # function signatures are encoded using ':' to separate args
+ if p.type in ('function', 'procedure'):
+ objs = [obj.replace(':', ',') for obj in objs]
+
+ # roles
+ if p.roles.upper() == 'PUBLIC':
+ roles = 'PUBLIC'
+ else:
+ roles = p.roles.split(',')
+
+ if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]):
+ module.exit_json(changed=False)
+
+ if fail_on_role:
+ module.fail_json(msg="Role '%s' does not exist" % roles[0].strip())
+
+ else:
+ module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip())
+
+ # check if target_roles is set with type: default_privs
+ if p.target_roles and not p.type == 'default_privs':
+ module.warn('"target_roles" will be ignored '
+ 'Argument "type: default_privs" is required for usage of "target_roles".')
+
+ # target roles
+ if p.target_roles:
+ target_roles = p.target_roles.split(',')
+ else:
+ target_roles = None
+
+ changed = conn.manipulate_privs(
+ obj_type=p.type,
+ privs=privs,
+ objs=objs,
+ roles=roles,
+ target_roles=target_roles,
+ state=p.state,
+ grant_option=p.grant_option,
+ schema_qualifier=p.schema,
+ fail_on_role=fail_on_role,
+ usage_on_types=usage_on_types,
+ )
+
+ except Error as e:
+ conn.rollback()
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ except psycopg2.Error as e:
+ conn.rollback()
+ module.fail_json(msg=to_native(e))
+
+ if module.check_mode or not changed:
+ conn.rollback()
+ else:
+ conn.commit()
+ module.exit_json(changed=changed, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py
new file mode 100644
index 00000000..06692c09
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_publication.py
@@ -0,0 +1,683 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_publication
+short_description: Add, update, or remove PostgreSQL publication
+description:
+- Add, update, or remove PostgreSQL publication.
+options:
+ name:
+ description:
+ - Name of the publication to add, update, or remove.
+ required: true
+ type: str
+ db:
+ description:
+ - Name of the database to connect to and where
+ the publication state will be changed.
+ aliases: [ login_db ]
+ type: str
+ tables:
+ description:
+ - List of tables to add to the publication.
+ - If no value is set all tables are targeted.
+ - If the publication already exists for specific tables and I(tables) is not passed,
+ nothing will be changed.
+ - If you need to add all tables to the publication with the same name,
+ drop existent and create new without passing I(tables).
+ type: list
+ elements: str
+ state:
+ description:
+ - The publication state.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ parameters:
+ description:
+ - Dictionary with optional publication parameters.
+ - Available parameters depend on PostgreSQL version.
+ type: dict
+ owner:
+ description:
+ - Publication owner.
+ - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
+ type: str
+ cascade:
+ description:
+ - Drop publication dependencies. Has effect with I(state=absent) only.
+ type: bool
+ default: false
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(name), I(tables), I(owner),
+ I(session_role), I(params) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- PostgreSQL version must be 10 or greater.
+- Supports C(check_mode).
+seealso:
+- name: CREATE PUBLICATION reference
+ description: Complete reference of the CREATE PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createpublication.html
+- name: ALTER PUBLICATION reference
+ description: Complete reference of the ALTER PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterpublication.html
+- name: DROP PUBLICATION reference
+ description: Complete reference of the DROP PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droppublication.html
+author:
+- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+extends_documentation_fragment:
+- community.postgresql.postgres
+'''
+
+EXAMPLES = r'''
+- name: Create a new publication with name "acme" targeting all tables in database "test"
+ community.postgresql.postgresql_publication:
+ db: test
+ name: acme
+
+- name: Create publication "acme" publishing only prices and vehicles tables
+ community.postgresql.postgresql_publication:
+ name: acme
+ tables:
+ - prices
+ - vehicles
+
+- name: >
+ Create publication "acme", set user alice as an owner, targeting all tables
+ Allowable DML operations are INSERT and UPDATE only
+ community.postgresql.postgresql_publication:
+ name: acme
+ owner: alice
+ parameters:
+ publish: 'insert,update'
+
+- name: >
+ Assuming publication "acme" exists and there are targeted
+ tables "prices" and "vehicles", add table "stores" to the publication
+ community.postgresql.postgresql_publication:
+ name: acme
+ tables:
+ - prices
+ - vehicles
+ - stores
+
+- name: Remove publication "acme" if exists in database "test"
+ community.postgresql.postgresql_publication:
+ db: test
+ name: acme
+ state: absent
+'''
+
+RETURN = r'''
+exists:
+ description:
+ - Flag indicates the publication exists or not at the end of runtime.
+ returned: always
+ type: bool
+ sample: true
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'DROP PUBLICATION "acme" CASCADE' ]
+owner:
+ description: Owner of the publication at the end of runtime.
+ returned: if publication exists
+ type: str
+ sample: "alice"
+tables:
+ description:
+ - List of tables in the publication at the end of runtime.
+ - If all tables are published, returns empty list.
+ returned: if publication exists
+ type: list
+ sample: ["\"public\".\"prices\"", "\"public\".\"vehicles\""]
+alltables:
+ description:
+ - Flag indicates that all tables are published.
+ returned: if publication exists
+ type: bool
+ sample: false
+parameters:
+ description: Publication parameters at the end of runtime.
+ returned: if publication exists
+ type: dict
+ sample: {'publish': {'insert': false, 'delete': false, 'update': true}}
+'''
+
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+SUPPORTED_PG_VERSION = 10000
+
+
+################################
+# Module functions and classes #
+################################
+
+def transform_tables_representation(tbl_list):
+ """Add 'public.' to names of tables where a schema identifier is absent
+ and add quotes to each element.
+
+ Args:
+ tbl_list (list): List of table names.
+
+ Returns:
+ tbl_list (list): Changed list.
+ """
+ for i, table in enumerate(tbl_list):
+ if '.' not in table:
+ tbl_list[i] = pg_quote_identifier('public.%s' % table.strip(), 'table')
+ else:
+ tbl_list[i] = pg_quote_identifier(table.strip(), 'table')
+
+ return tbl_list
+
+
+class PgPublication():
+ """Class to work with PostgreSQL publication.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): The name of the publication.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): Name of the publication.
+ executed_queries (list): List of executed queries.
+ attrs (dict): Dict with publication attributes.
+ exists (bool): Flag indicates the publication exists or not.
+ """
+
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.executed_queries = []
+ self.attrs = {
+ 'alltables': False,
+ 'tables': [],
+ 'parameters': {},
+ 'owner': '',
+ }
+ self.exists = self.check_pub()
+
+ def get_info(self):
+ """Refresh the publication information.
+
+ Returns:
+ ``self.attrs``.
+ """
+ self.exists = self.check_pub()
+ return self.attrs
+
+ def check_pub(self):
+ """Check the publication and refresh ``self.attrs`` publication attribute.
+
+ Returns:
+ True if the publication with ``self.name`` exists, False otherwise.
+ """
+
+ pub_info = self.__get_general_pub_info()
+
+ if not pub_info:
+ # Publication does not exist:
+ return False
+
+ self.attrs['owner'] = pub_info.get('pubowner')
+
+ # Publication DML operations:
+ self.attrs['parameters']['publish'] = {}
+ self.attrs['parameters']['publish']['insert'] = pub_info.get('pubinsert', False)
+ self.attrs['parameters']['publish']['update'] = pub_info.get('pubupdate', False)
+ self.attrs['parameters']['publish']['delete'] = pub_info.get('pubdelete', False)
+ if pub_info.get('pubtruncate'):
+ self.attrs['parameters']['publish']['truncate'] = pub_info.get('pubtruncate')
+
+ # If alltables flag is False, get the list of targeted tables:
+ if not pub_info.get('puballtables'):
+ table_info = self.__get_tables_pub_info()
+ # Join sublists [['schema', 'table'], ...] to ['schema.table', ...]
+ # for better representation:
+ for i, schema_and_table in enumerate(table_info):
+ table_info[i] = pg_quote_identifier('.'.join(schema_and_table), 'table')
+
+ self.attrs['tables'] = table_info
+ else:
+ self.attrs['alltables'] = True
+
+ # Publication exists:
+ return True
+
+ def create(self, tables, params, owner, check_mode=True):
+ """Create the publication.
+
+ Args:
+ tables (list): List with names of the tables that need to be added to the publication.
+ params (dict): Dict contains optional publication parameters and their values.
+ owner (str): Name of the publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been created, otherwise False.
+ """
+ changed = True
+
+ query_fragments = ["CREATE PUBLICATION %s" % pg_quote_identifier(self.name, 'publication')]
+
+ if tables:
+ query_fragments.append("FOR TABLE %s" % ', '.join(tables))
+ else:
+ query_fragments.append("FOR ALL TABLES")
+
+ if params:
+ params_list = []
+ # Make list ["param = 'value'", ...] from params dict:
+ for (key, val) in iteritems(params):
+ params_list.append("%s = '%s'" % (key, val))
+
+ # Add the list to query_fragments:
+ query_fragments.append("WITH (%s)" % ', '.join(params_list))
+
+ changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ if owner:
+ # If check_mode, just add possible SQL to
+ # executed_queries and return:
+ self.__pub_set_owner(owner, check_mode=check_mode)
+
+ return changed
+
+ def update(self, tables, params, owner, check_mode=True):
+ """Update the publication.
+
+ Args:
+ tables (list): List with names of the tables that need to be presented in the publication.
+ params (dict): Dict contains optional publication parameters and their values.
+ owner (str): Name of the publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been updated, otherwise False.
+ """
+ changed = False
+
+ # Add or drop tables from published tables suit:
+ if tables and not self.attrs['alltables']:
+
+ # 1. If needs to add table to the publication:
+ for tbl in tables:
+ if tbl not in self.attrs['tables']:
+ # If needs to add table to the publication:
+ changed = self.__pub_add_table(tbl, check_mode=check_mode)
+
+ # 2. if there is a table in targeted tables
+ # that's not presented in the passed tables:
+ for tbl in self.attrs['tables']:
+ if tbl not in tables:
+ changed = self.__pub_drop_table(tbl, check_mode=check_mode)
+
+ elif tables and self.attrs['alltables']:
+ changed = self.__pub_set_tables(tables, check_mode=check_mode)
+
+ # Update pub parameters:
+ if params:
+ for key, val in iteritems(params):
+ if self.attrs['parameters'].get(key):
+
+ # In PostgreSQL 10/11 only 'publish' optional parameter is presented.
+ if key == 'publish':
+ # 'publish' value can be only a string with comma-separated items
+ # of allowed DML operations like 'insert,update' or
+ # 'insert,update,delete', etc.
+ # Make dictionary to compare with current attrs later:
+ val_dict = self.attrs['parameters']['publish'].copy()
+ val_list = val.split(',')
+ for v in val_dict:
+ if v in val_list:
+ val_dict[v] = True
+ else:
+ val_dict[v] = False
+
+ # Compare val_dict and the dict with current 'publish' parameters,
+ # if they're different, set new values:
+ if val_dict != self.attrs['parameters']['publish']:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ # Default behavior for other cases:
+ elif self.attrs['parameters'][key] != val:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ else:
+ # If the parameter was not set before:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ # Update pub owner:
+ if owner:
+ if owner != self.attrs['owner']:
+ changed = self.__pub_set_owner(owner, check_mode=check_mode)
+
+ return changed
+
+ def drop(self, cascade=False, check_mode=True):
+ """Drop the publication.
+
+ Kwargs:
+ cascade (bool): Flag indicates that publication needs to be deleted
+ with its dependencies.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been updated, otherwise False.
+ """
+ if self.exists:
+ query_fragments = []
+ query_fragments.append("DROP PUBLICATION %s" % pg_quote_identifier(self.name, 'publication'))
+ if cascade:
+ query_fragments.append("CASCADE")
+
+ return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ def __get_general_pub_info(self):
+ """Get and return general publication information.
+
+ Returns:
+ Dict with publication information if successful, False otherwise.
+ """
+ # Check pg_publication.pubtruncate exists (supported from PostgreSQL 11):
+ pgtrunc_sup = exec_sql(self, ("SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_publication' "
+ "AND column_name = 'pubtruncate'"), add_to_executed=False)
+
+ if pgtrunc_sup:
+ query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
+ "p.pubupdate , p.pubdelete, p.pubtruncate FROM pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid "
+ "WHERE p.pubname = %(pname)s")
+ else:
+ query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
+ "p.pubupdate , p.pubdelete FROM pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid "
+ "WHERE p.pubname = %(pname)s")
+
+ result = exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
+ if result:
+ return result[0]
+ else:
+ return False
+
+ def __get_tables_pub_info(self):
+ """Get and return tables that are published by the publication.
+
+ Returns:
+ List of dicts with published tables.
+ """
+ query = ("SELECT schemaname, tablename "
+ "FROM pg_publication_tables WHERE pubname = %(pname)s")
+ return exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
+
+ def __pub_add_table(self, table, check_mode=False):
+ """Add a table to the publication.
+
+ Args:
+ table (str): Table name.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s ADD TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ pg_quote_identifier(table, 'table')))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_drop_table(self, table, check_mode=False):
+ """Drop a table from the publication.
+
+ Args:
+ table (str): Table name.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s DROP TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ pg_quote_identifier(table, 'table')))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_tables(self, tables, check_mode=False):
+ """Set a table suit that need to be published by the publication.
+
+ Args:
+ tables (list): List of tables.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ quoted_tables = [pg_quote_identifier(t, 'table') for t in tables]
+ query = ("ALTER PUBLICATION %s SET TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ ', '.join(quoted_tables)))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_param(self, param, value, check_mode=False):
+ """Set an optional publication parameter.
+
+ Args:
+ param (str): Name of the parameter.
+ value (str): Parameter value.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s SET (%s = '%s')" % (pg_quote_identifier(self.name, 'publication'),
+ param, value))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_owner(self, role, check_mode=False):
+ """Set a publication owner.
+
+ Args:
+ role (str): Role (user) name that needs to be set as a publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ('ALTER PUBLICATION %s '
+ 'OWNER TO "%s"' % (pg_quote_identifier(self.name, 'publication'), role))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __exec_sql(self, query, check_mode=False):
+ """Execute SQL query.
+
+ Note: If we need just to get information from the database,
+ we use ``exec_sql`` function directly.
+
+ Args:
+ query (str): Query that needs to be executed.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just add ``query`` to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if check_mode:
+ self.executed_queries.append(query)
+ return True
+ else:
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(required=True),
+ db=dict(type='str', aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ tables=dict(type='list', elements='str'),
+ parameters=dict(type='dict'),
+ owner=dict(type='str'),
+ cascade=dict(type='bool', default=False),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Parameters handling:
+ name = module.params['name']
+ state = module.params['state']
+ tables = module.params['tables']
+ params = module.params['parameters']
+ owner = module.params['owner']
+ cascade = module.params['cascade']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not params:
+ params_list = None
+ else:
+ params_list = ['%s = %s' % (k, v) for k, v in iteritems(params)]
+
+ check_input(module, name, tables, owner, session_role, params_list)
+
+ if state == 'absent':
+ if tables:
+ module.warn('parameter "tables" is ignored when "state=absent"')
+ if params:
+ module.warn('parameter "parameters" is ignored when "state=absent"')
+ if owner:
+ module.warn('parameter "owner" is ignored when "state=absent"')
+
+ if state == 'present' and cascade:
+ module.warn('parameter "cascade" is ignored when "state=present"')
+
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ # We check publication state without DML queries execution, so set autocommit:
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Check version:
+ if cursor.connection.server_version < SUPPORTED_PG_VERSION:
+ module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
+
+ # Nothing was changed by default:
+ changed = False
+
+ ###################################
+ # Create object and do rock'n'roll:
+ publication = PgPublication(module, cursor, name)
+
+ if tables:
+ tables = transform_tables_representation(tables)
+
+ # If module.check_mode=True, nothing will be changed:
+ if state == 'present':
+ if not publication.exists:
+ changed = publication.create(tables, params, owner, check_mode=module.check_mode)
+
+ else:
+ changed = publication.update(tables, params, owner, check_mode=module.check_mode)
+
+ elif state == 'absent':
+ changed = publication.drop(cascade=cascade, check_mode=module.check_mode)
+
+ # Get final publication info:
+ pub_fin_info = {}
+ if state == 'present' or (state == 'absent' and module.check_mode):
+ pub_fin_info = publication.get_info()
+ elif state == 'absent' and not module.check_mode:
+ publication.exists = False
+
+ # Connection is not needed any more:
+ cursor.close()
+ db_connection.close()
+
+ # Update publication info and return ret values:
+ module.exit_json(changed=changed, queries=publication.executed_queries, exists=publication.exists, **pub_fin_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_query.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_query.py
new file mode 100644
index 00000000..259a8d48
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_query.py
@@ -0,0 +1,524 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Felix Archambault
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_query
+short_description: Run PostgreSQL queries
+description:
+- Runs arbitrary PostgreSQL queries.
+- Can run queries from SQL script files.
+- Does not run against backup files. Use M(community.postgresql.postgresql_db) with I(state=restore)
+ to run queries on files made by pg_dump/pg_dumpall utilities.
+options:
+ query:
+ description:
+ - SQL query to run. Variables can be escaped with psycopg2 syntax
+ U(http://initd.org/psycopg/docs/usage.html).
+ type: str
+ positional_args:
+ description:
+ - List of values to be passed as positional arguments to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(named_args).
+ type: list
+ elements: raw
+ named_args:
+ description:
+ - Dictionary of key-value arguments to pass to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(positional_args).
+ type: dict
+ path_to_script:
+ description:
+ - Path to a SQL script on the target machine.
+ - If the script contains several queries, they must be semicolon-separated.
+ - To run scripts containing objects with semicolons
+ (for example, function and procedure definitions), use I(as_single_query=yes).
+ - To upload dumps or to execute other complex scripts, the preferable way
+ is to use the M(community.postgresql.postgresql_db) module with I(state=restore).
+ - Mutually exclusive with I(query).
+ type: path
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ autocommit:
+ description:
+ - Execute in autocommit mode when the query can't be run inside a transaction block
+ (e.g., VACUUM).
+ - Mutually exclusive with I(check_mode).
+ type: bool
+ default: no
+ encoding:
+ description:
+ - Set the client encoding for the current session (e.g. C(UTF-8)).
+ - The default is the encoding defined by the database.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+ search_path:
+ description:
+ - List of schema names to look in.
+ type: list
+ elements: str
+ version_added: '1.0.0'
+ as_single_query:
+ description:
+ - If C(yes), when reading from the I(path_to_script) file,
+ executes its whole content in a single query.
+ - When C(yes), the C(query_all_results) return value
+ contains only the result of the last statement.
+ - Whether the state is reported as changed or not
+ is determined by the last statement of the file.
+ - Used only when I(path_to_script) is specified, otherwise ignored.
+ - If set to C(no), the script can contain only semicolon-separated queries.
+ (see the I(path_to_script) option documentation).
+ - The default value is C(no).
+ type: bool
+ version_added: '1.1.0'
+seealso:
+- module: community.postgresql.postgresql_db
+- name: PostgreSQL Schema reference
+ description: Complete reference of the PostgreSQL schema documentation.
+ link: https://www.postgresql.org/docs/current/ddl-schemas.html
+author:
+- Felix Archambault (@archf)
+- Andrew Klychkov (@Andersson007)
+- Will Rouesnel (@wrouesnel)
+extends_documentation_fragment:
+- community.postgresql.postgres
+notes:
+- Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+- name: Simple select query to acme db
+ community.postgresql.postgresql_query:
+ db: acme
+ query: SELECT version()
+
+- name: Select query to db acme with positional arguments and non-default credentials
+ community.postgresql.postgresql_query:
+ db: acme
+ login_user: django
+ login_password: mysecretpass
+ query: SELECT * FROM acme WHERE id = %s AND story = %s
+ positional_args:
+ - 1
+ - test
+
+- name: Select query to test_db with named_args
+ community.postgresql.postgresql_query:
+ db: test_db
+ query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
+ named_args:
+ id_val: 1
+ story_val: test
+
+- name: Insert query to test_table in db test_db
+ community.postgresql.postgresql_query:
+ db: test_db
+ query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
+
+# If your script contains semicolons as parts of separate objects
+# like functions, procedures, and so on, use "as_single_query: yes"
+- name: Run queries from SQL script using UTF-8 client encoding for session
+ community.postgresql.postgresql_query:
+ db: test_db
+ path_to_script: /var/lib/pgsql/test.sql
+ positional_args:
+ - 1
+ encoding: UTF-8
+
+- name: Example of using autocommit parameter
+ community.postgresql.postgresql_query:
+ db: test_db
+ query: VACUUM
+ autocommit: yes
+
+- name: >
+ Insert data to the column of array type using positional_args.
+ Note that we use quotes here, the same as for passing JSON, etc.
+ community.postgresql.postgresql_query:
+ query: INSERT INTO test_table (array_column) VALUES (%s)
+ positional_args:
+ - '{1,2,3}'
+
+# Pass list and string vars as positional_args
+- name: Set vars
+ ansible.builtin.set_fact:
+ my_list:
+ - 1
+ - 2
+ - 3
+ my_arr: '{1, 2, 3}'
+
+- name: Select from test table by passing positional_args as arrays
+ community.postgresql.postgresql_query:
+ query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
+ positional_args:
+ - '{{ my_list }}'
+ - '{{ my_arr|string }}'
+
+# Select from test table looking into app1 schema first, then,
+# if the schema doesn't exist or the table hasn't been found there,
+# try to find it in the schema public
+- name: Select from test using search_path
+ community.postgresql.postgresql_query:
+ query: SELECT * FROM test_array_table
+ search_path:
+ - app1
+ - public
+
+# If you use a variable in positional_args / named_args that can
+# be undefined and you wish to set it as NULL, the constructions like
+# "{{ my_var if (my_var is defined) else none | default(none) }}"
+# will not work as expected substituting an empty string instead of NULL.
+# If possible, we suggest to use Ansible's DEFAULT_JINJA2_NATIVE configuration
+# (https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-jinja2-native).
+# Enabling it fixes this problem. If you cannot enable it, the following workaround
+# can be used.
+# You should precheck such a value and define it as NULL when undefined.
+# For example:
+- name: When undefined, set to NULL
+ set_fact:
+ my_var: NULL
+ when: my_var is undefined
+
+# Then:
+- name: Insert a value using positional arguments
+ community.postgresql.postgresql_query:
+ query: INSERT INTO test_table (col1) VALUES (%s)
+ positional_args:
+ - '{{ my_var }}'
+'''
+
+RETURN = r'''
+query:
+ description:
+ - Executed query.
+ - When reading several queries from a file, it contains only the last one.
+ returned: always
+ type: str
+ sample: 'SELECT * FROM bar'
+statusmessage:
+ description:
+ - Attribute containing the message returned by the command.
+ - When reading several queries from a file, it contains a message of the last one.
+ returned: always
+ type: str
+ sample: 'INSERT 0 1'
+query_result:
+ description:
+ - List of dictionaries in column:value form representing returned rows.
+ - When running queries from a file, returns result of the last query.
+ returned: always
+ type: list
+ elements: dict
+ sample: [{"Column": "Value1"},{"Column": "Value2"}]
+query_list:
+ description:
+ - List of executed queries.
+ Useful when reading several queries from a file.
+ returned: always
+ type: list
+ elements: str
+ sample: ['SELECT * FROM foo', 'SELECT * FROM bar']
+query_all_results:
+ description:
+ - List containing results of all queries executed (one sublist for every query).
+ Useful when reading several queries from a file.
+ returned: always
+ type: list
+ elements: list
+ sample: [[{"Column": "Value1"},{"Column": "Value2"}], [{"Column": "Value1"},{"Column": "Value2"}]]
+rowcount:
+ description:
+ - Number of produced or affected rows.
+ - When using a script with multiple queries,
+ it contains a total number of produced or affected rows.
+ returned: changed
+ type: int
+ sample: 5
+'''
+
+try:
+ from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # it is needed for checking 'no result to fetch' in main(),
+ # psycopg2 availability will be checked by connect_to_db() into
+ # ansible.module_utils.postgres
+ pass
+
+import datetime
+import decimal
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+
+
+# ===========================================
+# Module execution.
+#
+
+def list_to_pg_array(elem):
+ """Convert the passed list to PostgreSQL array
+ represented as a string.
+
+ Args:
+ elem (list): List that needs to be converted.
+
+ Returns:
+ elem (str): String representation of PostgreSQL array.
+ """
+ elem = str(elem).strip('[]')
+ elem = '{' + elem + '}'
+ return elem
+
+
+def convert_elements_to_pg_arrays(obj):
+ """Convert list elements of the passed object
+ to PostgreSQL arrays represented as strings.
+
+ Args:
+ obj (dict or list): Object whose elements need to be converted.
+
+ Returns:
+ obj (dict or list): Object with converted elements.
+ """
+ if isinstance(obj, dict):
+ for (key, elem) in iteritems(obj):
+ if isinstance(elem, list):
+ obj[key] = list_to_pg_array(elem)
+
+ elif isinstance(obj, list):
+ for i, elem in enumerate(obj):
+ if isinstance(elem, list):
+ obj[i] = list_to_pg_array(elem)
+
+ return obj
+
+
+def set_search_path(cursor, search_path):
+ """Set session's search_path.
+
+ Args:
+ cursor (Psycopg2 cursor): Database cursor object.
+ search_path (str): String containing comma-separated schema names.
+ """
+ cursor.execute('SET search_path TO %s' % search_path)
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ query=dict(type='str'),
+ db=dict(type='str', aliases=['login_db']),
+ positional_args=dict(type='list', elements='raw'),
+ named_args=dict(type='dict'),
+ session_role=dict(type='str'),
+ path_to_script=dict(type='path'),
+ autocommit=dict(type='bool', default=False),
+ encoding=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ search_path=dict(type='list', elements='str'),
+ as_single_query=dict(type='bool'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=True,
+ )
+
+ query = module.params["query"]
+ positional_args = module.params["positional_args"]
+ named_args = module.params["named_args"]
+ path_to_script = module.params["path_to_script"]
+ autocommit = module.params["autocommit"]
+ encoding = module.params["encoding"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+ search_path = module.params["search_path"]
+ as_single_query = module.params["as_single_query"]
+
+ if path_to_script and as_single_query is None:
+ module.warn('You use the "path_to_script" option with the "as_single_query" '
+ 'option unset. The default is false. '
+ 'To avoid crashes, please read the documentation '
+ 'and define the "as_single_query" option explicitly.')
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, session_role)
+
+ if autocommit and module.check_mode:
+ module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
+
+ if path_to_script and query:
+ module.fail_json(msg="path_to_script is mutually exclusive with query")
+
+ if positional_args:
+ positional_args = convert_elements_to_pg_arrays(positional_args)
+
+ elif named_args:
+ named_args = convert_elements_to_pg_arrays(named_args)
+
+ query_list = []
+ if path_to_script:
+ try:
+ with open(path_to_script, 'rb') as f:
+ query = to_native(f.read())
+
+ if not as_single_query:
+ if ';' in query:
+ query_list = [q for q in query.split(';') if q != '\n']
+ else:
+ query_list.append(query)
+ else:
+ query_list.append(query)
+
+ except Exception as e:
+ module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
+ else:
+ query_list.append(query)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
+ if encoding is not None:
+ db_connection.set_client_encoding(encoding)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if search_path:
+ set_search_path(cursor, '%s' % ','.join([x.strip(' ') for x in search_path]))
+
+ # Prepare args:
+ if module.params.get("positional_args"):
+ arguments = module.params["positional_args"]
+ elif module.params.get("named_args"):
+ arguments = module.params["named_args"]
+ else:
+ arguments = None
+
+ # Set defaults:
+ changed = False
+
+ query_all_results = []
+ rowcount = 0
+ statusmessage = ''
+
+ # Execute query:
+ for query in query_list:
+ try:
+ cursor.execute(query, arguments)
+ statusmessage = cursor.statusmessage
+ if cursor.rowcount > 0:
+ rowcount += cursor.rowcount
+
+ query_result = []
+ try:
+ for row in cursor.fetchall():
+ # Ansible engine does not support decimals.
+ # An explicit conversion is required on the module's side
+ row = dict(row)
+ for (key, val) in iteritems(row):
+ if isinstance(val, decimal.Decimal):
+ row[key] = float(val)
+
+ elif isinstance(val, datetime.timedelta):
+ row[key] = str(val)
+
+ query_result.append(row)
+
+ except Psycopg2ProgrammingError as e:
+ if to_native(e) == 'no results to fetch':
+ query_result = {}
+
+ except Exception as e:
+ module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
+
+ query_all_results.append(query_result)
+
+ if 'SELECT' not in statusmessage:
+ if re.search(re.compile(r'(UPDATE|INSERT|DELETE)'), statusmessage):
+ s = statusmessage.split()
+ if len(s) == 3:
+ if s[2] != '0':
+ changed = True
+
+ elif len(s) == 2:
+ if s[1] != '0':
+ changed = True
+
+ else:
+ changed = True
+
+ else:
+ changed = True
+
+ except Exception as e:
+ if not autocommit:
+ db_connection.rollback()
+
+ cursor.close()
+ db_connection.close()
+ module.fail_json(msg="Cannot execute SQL '%s' %s: %s, query list: %s" % (query, arguments, to_native(e), query_list))
+
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ if not autocommit:
+ db_connection.commit()
+
+ kw = dict(
+ changed=changed,
+ query=cursor.query,
+ query_list=query_list,
+ statusmessage=statusmessage,
+ query_result=query_result,
+ query_all_results=query_all_results,
+ rowcount=rowcount,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py
new file mode 100644
index 00000000..6e511376
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_schema.py
@@ -0,0 +1,294 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_schema
+short_description: Add or remove PostgreSQL schema
+description:
+- Add or remove PostgreSQL schema.
+options:
+ name:
+ description:
+ - Name of the schema to add or remove.
+ required: true
+ type: str
+ aliases:
+ - schema
+ database:
+ description:
+ - Name of the database to connect to and add or remove the schema.
+ type: str
+ default: postgres
+ aliases:
+ - db
+ - login_db
+ owner:
+ description:
+ - Name of the role to set as owner of the schema.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role
+ were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The schema state.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ cascade_drop:
+ description:
+ - Drop schema with CASCADE to remove child objects.
+ type: bool
+ default: false
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(schema), I(owner), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL schemas
+ description: General information about PostgreSQL schemas.
+ link: https://www.postgresql.org/docs/current/ddl-schemas.html
+- name: CREATE SCHEMA reference
+ description: Complete reference of the CREATE SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createschema.html
+- name: ALTER SCHEMA reference
+ description: Complete reference of the ALTER SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterschema.html
+- name: DROP SCHEMA reference
+ description: Complete reference of the DROP SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropschema.html
+author:
+- Flavien Chantelot (@Dorn-) <contact@flavien.io>
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.postgresql.postgres
+notes:
+- Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+- name: Create a new schema with name acme in test database
+ community.postgresql.postgresql_schema:
+ db: test
+ name: acme
+
+- name: Create a new schema acme with a user bob who will own it
+ community.postgresql.postgresql_schema:
+ name: acme
+ owner: bob
+
+- name: Drop schema "acme" with cascade
+ community.postgresql.postgresql_schema:
+ name: acme
+ state: absent
+ cascade_drop: yes
+'''
+
+RETURN = r'''
+schema:
+ description: Name of the schema.
+ returned: success, changed
+ type: str
+ sample: "acme"
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ["CREATE SCHEMA \"acme\""]
+'''
+
+import traceback
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+ SQLParseError,
+)
+from ansible.module_utils._text import to_native
+
+executed_queries = []
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+def set_owner(cursor, schema, owner):
+ query = 'ALTER SCHEMA %s OWNER TO "%s"' % (
+ pg_quote_identifier(schema, 'schema'), owner)
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+
+
+def get_schema_info(cursor, schema):
+ query = ("SELECT schema_owner AS owner "
+ "FROM information_schema.schemata "
+ "WHERE schema_name = %(schema)s")
+ cursor.execute(query, {'schema': schema})
+ return cursor.fetchone()
+
+
+def schema_exists(cursor, schema):
+ query = ("SELECT schema_name FROM information_schema.schemata "
+ "WHERE schema_name = %(schema)s")
+ cursor.execute(query, {'schema': schema})
+ return cursor.rowcount == 1
+
+
+def schema_delete(cursor, schema, cascade):
+ if schema_exists(cursor, schema):
+ query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
+ if cascade:
+ query += " CASCADE"
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ return False
+
+
+def schema_create(cursor, schema, owner):
+ if not schema_exists(cursor, schema):
+ query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
+ if owner:
+ query_fragments.append('AUTHORIZATION "%s"' % owner)
+ query = ' '.join(query_fragments)
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ schema_info = get_schema_info(cursor, schema)
+ if owner and owner != schema_info['owner']:
+ return set_owner(cursor, schema, owner)
+ else:
+ return False
+
+
+def schema_matches(cursor, schema, owner):
+ if not schema_exists(cursor, schema):
+ return False
+ else:
+ schema_info = get_schema_info(cursor, schema)
+ if owner and owner != schema_info['owner']:
+ return False
+ else:
+ return True
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ schema=dict(type="str", required=True, aliases=['name']),
+ owner=dict(type="str", default=""),
+ database=dict(type="str", default="postgres", aliases=["db", "login_db"]),
+ cascade_drop=dict(type="bool", default=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ session_role=dict(type="str"),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ schema = module.params["schema"]
+ owner = module.params["owner"]
+ state = module.params["state"]
+ cascade_drop = module.params["cascade_drop"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, schema, owner, session_role)
+
+ changed = False
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ if module.check_mode:
+ if state == "absent":
+ changed = not schema_exists(cursor, schema)
+ elif state == "present":
+ changed = not schema_matches(cursor, schema, owner)
+ module.exit_json(changed=changed, schema=schema)
+
+ if state == "absent":
+ try:
+ changed = schema_delete(cursor, schema, cascade_drop)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == "present":
+ try:
+ changed = schema_create(cursor, schema, owner)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # Avoid catching this on Python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ db_connection.close()
+ module.exit_json(changed=changed, schema=schema, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py
new file mode 100644
index 00000000..229068ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_sequence.py
@@ -0,0 +1,628 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_sequence
+short_description: Create, drop, or alter a PostgreSQL sequence
+description:
+- Allows to create, drop or change the definition of a sequence generator.
+options:
+ sequence:
+ description:
+ - The name of the sequence.
+ required: true
+ type: str
+ aliases:
+ - name
+ state:
+ description:
+ - The sequence state.
+ - If I(state=absent) other options will be ignored except of I(name) and
+ I(schema).
+ default: present
+ choices: [ absent, present ]
+ type: str
+ data_type:
+ description:
+ - Specifies the data type of the sequence. Valid types are bigint, integer,
+ and smallint. bigint is the default. The data type determines the default
+ minimum and maximum values of the sequence. For more info see the
+ documentation
+ U(https://www.postgresql.org/docs/current/sql-createsequence.html).
+ - Supported from PostgreSQL 10.
+ choices: [ bigint, integer, smallint ]
+ type: str
+ increment:
+ description:
+ - Increment specifies which value is added to the current sequence value
+ to create a new value.
+ - A positive value will make an ascending sequence, a negative one a
+ descending sequence. The default value is 1.
+ type: int
+ minvalue:
+ description:
+ - Minvalue determines the minimum value a sequence can generate. The
+ default for an ascending sequence is 1. The default for a descending
+ sequence is the minimum value of the data type.
+ type: int
+ aliases:
+ - min
+ maxvalue:
+ description:
+ - Maxvalue determines the maximum value for the sequence. The default for
+ an ascending sequence is the maximum
+ value of the data type. The default for a descending sequence is -1.
+ type: int
+ aliases:
+ - max
+ start:
+ description:
+ - Start allows the sequence to begin anywhere. The default starting value
+ is I(minvalue) for ascending sequences and I(maxvalue) for descending
+ ones.
+ type: int
+ cache:
+ description:
+ - Cache specifies how many sequence numbers are to be preallocated and
+ stored in memory for faster access. The minimum value is 1 (only one
+ value can be generated at a time, i.e., no cache), and this is also
+ the default.
+ type: int
+ cycle:
+ description:
+ - The cycle option allows the sequence to wrap around when the I(maxvalue)
+ or I(minvalue) has been reached by an ascending or descending sequence
+ respectively. If the limit is reached, the next number generated will be
+ the minvalue or maxvalue, respectively.
+ - If C(false) (NO CYCLE) is specified, any calls to nextval after the sequence
+ has reached its maximum value will return an error. False (NO CYCLE) is
+ the default.
+ type: bool
+ default: no
+ cascade:
+ description:
+ - Automatically drop objects that depend on the sequence, and in turn all
+ objects that depend on those objects.
+ - Ignored if I(state=present).
+ - Only used with I(state=absent).
+ type: bool
+ default: no
+ rename_to:
+ description:
+ - The new name for the I(sequence).
+ - Works only for existing sequences.
+ type: str
+ owner:
+ description:
+ - Set the owner for the I(sequence).
+ type: str
+ schema:
+ description:
+ - The schema of the I(sequence). This is be used to create and relocate
+ a I(sequence) in the given schema.
+ default: public
+ type: str
+ newschema:
+ description:
+ - The new schema for the I(sequence). Will be used for moving a
+ I(sequence) to another I(schema).
+ - Works only for existing sequences.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified I(session_role)
+ must be a role that the current I(login_user) is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the I(session_role) were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - database
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(sequence), I(schema), I(rename_to),
+ I(owner), I(newschema), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- Supports C(check_mode).
+- If you do not pass db parameter, sequence will be created in the database
+ named postgres.
+seealso:
+- module: community.postgresql.postgresql_table
+- module: community.postgresql.postgresql_owner
+- module: community.postgresql.postgresql_privs
+- module: community.postgresql.postgresql_tablespace
+- name: CREATE SEQUENCE reference
+ description: Complete reference of the CREATE SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createsequence.html
+- name: ALTER SEQUENCE reference
+ description: Complete reference of the ALTER SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersequence.html
+- name: DROP SEQUENCE reference
+ description: Complete reference of the DROP SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropsequence.html
+author:
+- Tobias Birkefeld (@tcraxs)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create an ascending bigint sequence called foobar in the default
+ database
+ community.postgresql.postgresql_sequence:
+ name: foobar
+
+- name: Create an ascending integer sequence called foobar, starting at 101
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ data_type: integer
+ start: 101
+
+- name: Create an descending sequence called foobar, starting at 101 and
+ preallocated 10 sequence numbers in cache
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ increment: -1
+ cache: 10
+ start: 101
+
+- name: Create an ascending sequence called foobar, which cycle between 1 to 10
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ cycle: yes
+ min: 1
+ max: 10
+
+- name: Create an ascending bigint sequence called foobar in the default
+ database with owner foobar
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ owner: foobar
+
+- name: Rename an existing sequence named foo to bar
+ community.postgresql.postgresql_sequence:
+ name: foo
+ rename_to: bar
+
+- name: Change the schema of an existing sequence to foobar
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ newschema: foobar
+
+- name: Change the owner of an existing sequence to foobar
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ owner: foobar
+
+- name: Drop a sequence called foobar
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ state: absent
+
+- name: Drop a sequence called foobar with cascade
+ community.postgresql.postgresql_sequence:
+ name: foobar
+ cascade: yes
+ state: absent
+'''
+
+RETURN = r'''
+state:
+ description: Sequence state at the end of execution.
+ returned: always
+ type: str
+ sample: 'present'
+sequence:
+ description: Sequence name.
+ returned: always
+ type: str
+ sample: 'foobar'
+queries:
+ description: List of queries that was tried to be executed.
+ returned: always
+ type: str
+ sample: [ "CREATE SEQUENCE \"foo\"" ]
+schema:
+ description: Name of the schema of the sequence.
+ returned: always
+ type: str
+ sample: 'foo'
+data_type:
+ description: Shows the current data type of the sequence.
+ returned: always
+ type: str
+ sample: 'bigint'
+increment:
+ description: The value of increment of the sequence. A positive value will
+ make an ascending sequence, a negative one a descending
+ sequence.
+ returned: always
+ type: int
+ sample: '-1'
+minvalue:
+ description: The value of minvalue of the sequence.
+ returned: always
+ type: int
+ sample: '1'
+maxvalue:
+ description: The value of maxvalue of the sequence.
+ returned: always
+ type: int
+ sample: '9223372036854775807'
+start:
+ description: The value of start of the sequence.
+ returned: always
+ type: int
+ sample: '12'
+cycle:
+ description: Shows if the sequence cycle or not.
+ returned: always
+ type: str
+ sample: 'NO'
+owner:
+ description: Shows the current owner of the sequence
+ after the successful run of the task.
+ returned: always
+ type: str
+ sample: 'postgres'
+newname:
+ description: Shows the new sequence name after rename.
+ returned: on success
+ type: str
+ sample: 'barfoo'
+newschema:
+ description: Shows the new schema of the sequence after schema change.
+ returned: on success
+ type: str
+ sample: 'foobar'
+'''
+
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class Sequence(object):
+ """Implements behavior of CREATE, ALTER or DROP SEQUENCE PostgreSQL command.
+
+ Arguments:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+
+ Attributes:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ changed (bool) -- something was changed after execution or not
+ executed_queries (list) -- executed queries
+ name (str) -- name of the sequence
+ owner (str) -- name of the owner of the sequence
+ schema (str) -- name of the schema (default: public)
+ data_type (str) -- data type of the sequence
+ start_value (int) -- value of the sequence start
+ minvalue (int) -- minimum value of the sequence
+ maxvalue (int) -- maximum value of the sequence
+ increment (int) -- increment value of the sequence
+ cycle (bool) -- sequence can cycle or not
+ new_name (str) -- name of the renamed sequence
+ new_schema (str) -- name of the new schema
+ exists (bool) -- sequence exists or not
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.executed_queries = []
+ self.name = self.module.params['sequence']
+ self.owner = ''
+ self.schema = self.module.params['schema']
+ self.data_type = ''
+ self.start_value = ''
+ self.minvalue = ''
+ self.maxvalue = ''
+ self.increment = ''
+ self.cycle = ''
+ self.new_name = ''
+ self.new_schema = ''
+ self.exists = False
+ # Collect info
+ self.get_info()
+
+ def get_info(self):
+ """Getter to refresh and get sequence info"""
+ query = ("SELECT "
+ "s.sequence_schema AS schemaname, "
+ "s.sequence_name AS sequencename, "
+ "pg_get_userbyid(c.relowner) AS sequenceowner, "
+ "s.data_type::regtype AS data_type, "
+ "s.start_value AS start_value, "
+ "s.minimum_value AS min_value, "
+ "s.maximum_value AS max_value, "
+ "s.increment AS increment_by, "
+ "s.cycle_option AS cycle "
+ "FROM information_schema.sequences s "
+ "JOIN pg_class c ON c.relname = s.sequence_name "
+ "LEFT JOIN pg_namespace n ON n.oid = c.relnamespace "
+ "WHERE NOT pg_is_other_temp_schema(n.oid) "
+ "AND c.relkind = 'S'::\"char\" "
+ "AND sequence_name = %(name)s "
+ "AND sequence_schema = %(schema)s")
+
+ res = exec_sql(self, query,
+ query_params={'name': self.name, 'schema': self.schema},
+ add_to_executed=False)
+
+ if not res:
+ self.exists = False
+ return False
+
+ if res:
+ self.exists = True
+ self.schema = res[0]['schemaname']
+ self.name = res[0]['sequencename']
+ self.owner = res[0]['sequenceowner']
+ self.data_type = res[0]['data_type']
+ self.start_value = res[0]['start_value']
+ self.minvalue = res[0]['min_value']
+ self.maxvalue = res[0]['max_value']
+ self.increment = res[0]['increment_by']
+ self.cycle = res[0]['cycle']
+
+ def create(self):
+ """Implements CREATE SEQUENCE command behavior."""
+ query = ['CREATE SEQUENCE']
+ query.append(self.__add_schema())
+
+ if self.module.params.get('data_type'):
+ query.append('AS %s' % self.module.params['data_type'])
+
+ if self.module.params.get('increment'):
+ query.append('INCREMENT BY %s' % self.module.params['increment'])
+
+ if self.module.params.get('minvalue'):
+ query.append('MINVALUE %s' % self.module.params['minvalue'])
+
+ if self.module.params.get('maxvalue'):
+ query.append('MAXVALUE %s' % self.module.params['maxvalue'])
+
+ if self.module.params.get('start'):
+ query.append('START WITH %s' % self.module.params['start'])
+
+ if self.module.params.get('cache'):
+ query.append('CACHE %s' % self.module.params['cache'])
+
+ if self.module.params.get('cycle'):
+ query.append('CYCLE')
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def drop(self):
+ """Implements DROP SEQUENCE command behavior."""
+ query = ['DROP SEQUENCE']
+ query.append(self.__add_schema())
+
+ if self.module.params.get('cascade'):
+ query.append('CASCADE')
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def rename(self):
+ """Implements ALTER SEQUENCE RENAME TO command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('RENAME TO "%s"' % self.module.params['rename_to'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def set_owner(self):
+ """Implements ALTER SEQUENCE OWNER TO command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('OWNER TO "%s"' % self.module.params['owner'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def set_schema(self):
+ """Implements ALTER SEQUENCE SET SCHEMA command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('SET SCHEMA "%s"' % self.module.params['newschema'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def __add_schema(self):
+ return '"%s"."%s"' % (self.schema, self.name)
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ sequence=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data_type=dict(type='str', choices=['bigint', 'integer', 'smallint']),
+ increment=dict(type='int'),
+ minvalue=dict(type='int', aliases=['min']),
+ maxvalue=dict(type='int', aliases=['max']),
+ start=dict(type='int'),
+ cache=dict(type='int'),
+ cycle=dict(type='bool', default=False),
+ schema=dict(type='str', default='public'),
+ cascade=dict(type='bool', default=False),
+ rename_to=dict(type='str'),
+ owner=dict(type='str'),
+ newschema=dict(type='str'),
+ db=dict(type='str', default='', aliases=['login_db', 'database']),
+ session_role=dict(type='str'),
+ trust_input=dict(type="bool", default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['rename_to', 'data_type'],
+ ['rename_to', 'increment'],
+ ['rename_to', 'minvalue'],
+ ['rename_to', 'maxvalue'],
+ ['rename_to', 'start'],
+ ['rename_to', 'cache'],
+ ['rename_to', 'cycle'],
+ ['rename_to', 'cascade'],
+ ['rename_to', 'owner'],
+ ['rename_to', 'newschema'],
+ ['cascade', 'data_type'],
+ ['cascade', 'increment'],
+ ['cascade', 'minvalue'],
+ ['cascade', 'maxvalue'],
+ ['cascade', 'start'],
+ ['cascade', 'cache'],
+ ['cascade', 'cycle'],
+ ['cascade', 'owner'],
+ ['cascade', 'newschema'],
+ ]
+ )
+
+ if not module.params["trust_input"]:
+ check_input(
+ module,
+ module.params['sequence'],
+ module.params['schema'],
+ module.params['rename_to'],
+ module.params['owner'],
+ module.params['newschema'],
+ module.params['session_role'],
+ )
+
+ # Note: we don't need to check mutually exclusive params here, because they are
+ # checked automatically by AnsibleModule (mutually_exclusive=[] list above).
+
+ # Change autocommit to False if check_mode:
+ autocommit = not module.check_mode
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ data = Sequence(module, cursor)
+
+ # Set defaults:
+ changed = False
+
+ # Create new sequence
+ if not data.exists and module.params['state'] == 'present':
+ if module.params.get('rename_to'):
+ module.fail_json(msg="Sequence '%s' does not exist, nothing to rename" % module.params['sequence'])
+ if module.params.get('newschema'):
+ module.fail_json(msg="Sequence '%s' does not exist, change of schema not possible" % module.params['sequence'])
+
+ changed = data.create()
+
+ # Drop non-existing sequence
+ elif not data.exists and module.params['state'] == 'absent':
+ # Nothing to do
+ changed = False
+
+ # Drop existing sequence
+ elif data.exists and module.params['state'] == 'absent':
+ changed = data.drop()
+
+ # Rename sequence
+ if data.exists and module.params.get('rename_to'):
+ if data.name != module.params['rename_to']:
+ changed = data.rename()
+ if changed:
+ data.new_name = module.params['rename_to']
+
+ # Refresh information
+ if module.params['state'] == 'present':
+ data.get_info()
+
+ # Change owner, schema and settings
+ if module.params['state'] == 'present' and data.exists:
+ # change owner
+ if module.params.get('owner'):
+ if data.owner != module.params['owner']:
+ changed = data.set_owner()
+
+ # Set schema
+ if module.params.get('newschema'):
+ if data.schema != module.params['newschema']:
+ changed = data.set_schema()
+ if changed:
+ data.new_schema = module.params['newschema']
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ kw = dict(
+ changed=changed,
+ state='present',
+ sequence=data.name,
+ queries=data.executed_queries,
+ schema=data.schema,
+ data_type=data.data_type,
+ increment=data.increment,
+ minvalue=data.minvalue,
+ maxvalue=data.maxvalue,
+ start=data.start_value,
+ cycle=data.cycle,
+ owner=data.owner,
+ )
+
+ if module.params['state'] == 'present':
+ if data.new_name:
+ kw['newname'] = data.new_name
+ if data.new_schema:
+ kw['newschema'] = data.new_schema
+
+ elif module.params['state'] == 'absent':
+ kw['state'] = 'absent'
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_set.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_set.py
new file mode 100644
index 00000000..4e909a3b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_set.py
@@ -0,0 +1,480 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_set
+short_description: Change a PostgreSQL server configuration parameter
+description:
+ - Allows to change a PostgreSQL server configuration parameter.
+ - The module uses ALTER SYSTEM command and applies changes by reload server configuration.
+ - ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster.
+ - It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file.
+ - ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file,
+ which is read in addition to postgresql.conf.
+ - The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter
+ string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required).
+ - After change you can see in the ansible output the previous and
+ the new parameter value and other information using returned values and M(ansible.builtin.debug) module.
+options:
+ name:
+ description:
+ - Name of PostgreSQL server parameter.
+ type: str
+ required: true
+ value:
+ description:
+ - Parameter value to set.
+ - To remove parameter string from postgresql.auto.conf and
+ reload the server configuration you must pass I(value=default).
+ With I(value=default) the playbook always returns changed is true.
+ type: str
+ reset:
+ description:
+ - Restore parameter to initial state (boot_val). Mutually exclusive with I(value).
+ type: bool
+ default: false
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- Supported version of PostgreSQL is 9.4 and later.
+- Supports C(check_mode).
+- Pay attention, change setting with 'postmaster' context can return changed is true
+ when actually nothing changes because the same value may be presented in
+ several different form, for example, 1024MB, 1GB, etc. However in pg_settings
+ system view it can be defined like 131072 number of 8kB pages.
+ The final check of the parameter value cannot compare it because the server was
+ not restarted and the value in pg_settings is not updated yet.
+- For some parameters restart of PostgreSQL server is required.
+ See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+seealso:
+- module: community.postgresql.postgresql_info
+- name: PostgreSQL server configuration
+ description: General information about PostgreSQL server configuration.
+ link: https://www.postgresql.org/docs/current/runtime-config.html
+- name: PostgreSQL view pg_settings reference
+ description: Complete reference of the pg_settings view documentation.
+ link: https://www.postgresql.org/docs/current/view-pg-settings.html
+- name: PostgreSQL ALTER SYSTEM command reference
+ description: Complete reference of the ALTER SYSTEM command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersystem.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Restore wal_keep_segments parameter to initial state
+ community.postgresql.postgresql_set:
+ name: wal_keep_segments
+ reset: yes
+
+# Set work_mem parameter to 32MB and show what's been changed and restart is required or not
+# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False")
+- name: Set work mem parameter
+ community.postgresql.postgresql_set:
+ name: work_mem
+ value: 32mb
+ register: set
+
+- name: Print the result if the setting changed
+ ansible.builtin.debug:
+ msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}"
+ when: set.changed
+# Ensure that the restart of PostgreSQL server must be required for some parameters.
+# In this situation you see the same parameter in prev_val_pretty and value_pretty, but 'changed=True'
+# (If you passed the value that was different from the current server setting).
+
+- name: Set log_min_duration_statement parameter to 1 second
+ community.postgresql.postgresql_set:
+ name: log_min_duration_statement
+ value: 1s
+
+- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf)
+ community.postgresql.postgresql_set:
+ name: wal_log_hints
+ value: default
+'''
+
+RETURN = r'''
+name:
+ description: Name of PostgreSQL server parameter.
+ returned: always
+ type: str
+ sample: 'shared_buffers'
+restart_required:
+ description: Information about parameter current state.
+ returned: always
+ type: bool
+ sample: true
+prev_val_pretty:
+ description: Information about previous state of the parameter.
+ returned: always
+ type: str
+ sample: '4MB'
+value_pretty:
+ description: Information about current state of the parameter.
+ returned: always
+ type: str
+ sample: '64MB'
+value:
+ description:
+ - Dictionary that contains the current parameter value (at the time of playbook finish).
+ - Pay attention that for real change some parameters restart of PostgreSQL server is required.
+ - Returns the current value in the check mode.
+ returned: always
+ type: dict
+ sample: { "value": 67108864, "unit": "b" }
+context:
+ description:
+ - PostgreSQL setting context.
+ returned: always
+ type: str
+ sample: user
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except Exception:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from copy import deepcopy
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+
+PG_REQ_VER = 90400
+
+# To allow to set value like 1mb instead of 1MB, etc:
+LOWERCASE_SIZE_UNITS = ("mb", "gb", "tb")
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def param_get(cursor, module, name):
+ query = ("SELECT name, setting, unit, context, boot_val "
+ "FROM pg_settings WHERE name = %(name)s")
+ try:
+ cursor.execute(query, {'name': name})
+ info = cursor.fetchall()
+ cursor.execute("SHOW %s" % name)
+ val = cursor.fetchone()
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ if not info:
+ module.fail_json(msg="No such parameter: %s. "
+ "Please check its spelling or presence in your PostgreSQL version "
+ "(https://www.postgresql.org/docs/current/runtime-config.html)" % name)
+
+ raw_val = info[0][1]
+ unit = info[0][2]
+ context = info[0][3]
+ boot_val = info[0][4]
+
+ if val[0] == 'True':
+ val[0] = 'on'
+ elif val[0] == 'False':
+ val[0] = 'off'
+
+ if unit == 'kB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024
+
+ unit = 'b'
+
+ elif unit == 'MB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024 * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024 * 1024
+
+ unit = 'b'
+
+ return (val[0], raw_val, unit, boot_val, context)
+
+
+def pretty_to_bytes(pretty_val):
+ # The function returns a value in bytes
+ # if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
+ # Otherwise it returns the passed argument.
+
+ # It's sometimes possible to have an empty values
+ if not pretty_val:
+ return pretty_val
+
+ # If the first char is not a digit, it does not make sense
+ # to parse further, so just return a passed value
+ if not pretty_val[0].isdigit():
+ return pretty_val
+
+ # If the last char is not an alphabetical symbol, it means that
+ # it does not contain any suffixes, so no sense to parse further
+ if not pretty_val[-1].isalpha():
+ return pretty_val
+
+ # Extract digits
+ num_part = []
+ for c in pretty_val:
+ # When we reach the first non-digit element,
+ # e.g. in 1024kB, stop iterating
+ if not c.isdigit():
+ break
+ else:
+ num_part.append(c)
+
+ num_part = ''.join(num_part)
+
+ val_in_bytes = None
+
+ if len(pretty_val) >= 2:
+ if 'kB' in pretty_val[-2:]:
+ val_in_bytes = num_part * 1024
+
+ elif 'MB' in pretty_val[-2:]:
+ val_in_bytes = num_part * 1024 * 1024
+
+ elif 'GB' in pretty_val[-2:]:
+ val_in_bytes = num_part * 1024 * 1024 * 1024
+
+ elif 'TB' in pretty_val[-2:]:
+ val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
+
+ # For cases like "1B"
+ if not val_in_bytes and 'B' in pretty_val[-1]:
+ val_in_bytes = num_part
+
+ if val_in_bytes is not None:
+ return val_in_bytes
+ else:
+ return pretty_val
+
+
+def param_set(cursor, module, name, value, context):
+ try:
+ if str(value).lower() == 'default':
+ query = "ALTER SYSTEM SET %s = DEFAULT" % name
+ else:
+ query = "ALTER SYSTEM SET %s = '%s'" % (name, value)
+ cursor.execute(query)
+
+ if context != 'postmaster':
+ cursor.execute("SELECT pg_reload_conf()")
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ return True
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ db=dict(type='str', aliases=['login_db']),
+ value=dict(type='str'),
+ reset=dict(type='bool', default=False),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ value = module.params['value']
+ reset = module.params['reset']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, name, value, session_role)
+
+ if value:
+ # Convert a value like 1mb (Postgres does not support) to 1MB, etc:
+ if len(value) > 2 and value[:-2].isdigit() and value[-2:] in LOWERCASE_SIZE_UNITS:
+ value = value.upper()
+
+ # Convert a value like 1b (Postgres does not support) to 1B:
+ elif len(value) > 1 and ('b' in value[-1] and value[:-1].isdigit()):
+ value = value.upper()
+
+ if value is not None and reset:
+ module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
+
+ if value is None and not reset:
+ module.fail_json(msg="%s: at least one of value or reset param must be specified" % name)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ kw = {}
+ # Check server version (needs 9.4 or later):
+ ver = db_connection.server_version
+ if ver < PG_REQ_VER:
+ module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER))
+ kw = dict(
+ changed=False,
+ restart_required=False,
+ value_pretty="",
+ prev_val_pretty="",
+ value={"value": "", "unit": ""},
+ )
+ kw['name'] = name
+ db_connection.close()
+ module.exit_json(**kw)
+
+ # Set default returned values:
+ restart_required = False
+ changed = False
+ kw['name'] = name
+ kw['restart_required'] = False
+
+ # Get info about param state:
+ res = param_get(cursor, module, name)
+ current_value = res[0]
+ raw_val = res[1]
+ unit = res[2]
+ boot_val = res[3]
+ context = res[4]
+
+ if value == 'True':
+ value = 'on'
+ elif value == 'False':
+ value = 'off'
+
+ kw['prev_val_pretty'] = current_value
+ kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
+ kw['context'] = context
+
+ # Do job
+ if context == "internal":
+ module.fail_json(msg="%s: cannot be changed (internal context). See "
+ "https://www.postgresql.org/docs/current/runtime-config-preset.html" % name)
+
+ if context == "postmaster":
+ restart_required = True
+
+ # If check_mode, just compare and exit:
+ if module.check_mode:
+ if pretty_to_bytes(value) == pretty_to_bytes(current_value):
+ kw['changed'] = False
+
+ else:
+ kw['value_pretty'] = value
+ kw['changed'] = True
+
+ # Anyway returns current raw value in the check_mode:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ kw['restart_required'] = restart_required
+ module.exit_json(**kw)
+
+ # Set param (value can be an empty string):
+ if value is not None and value != current_value:
+ changed = param_set(cursor, module, name, value, context)
+
+ kw['value_pretty'] = value
+
+ # Reset param:
+ elif reset:
+ if raw_val == boot_val:
+ # nothing to change, exit:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ module.exit_json(**kw)
+
+ changed = param_set(cursor, module, name, boot_val, context)
+
+ cursor.close()
+ db_connection.close()
+
+ # Reconnect and recheck current value:
+ if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'):
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ res = param_get(cursor, module, name)
+ # f_ means 'final'
+ f_value = res[0]
+ f_raw_val = res[1]
+
+ if raw_val == f_raw_val:
+ changed = False
+
+ else:
+ changed = True
+
+ kw['value_pretty'] = f_value
+ kw['value'] = dict(
+ value=f_raw_val,
+ unit=unit,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ kw['changed'] = changed
+ kw['restart_required'] = restart_required
+
+ if restart_required and changed:
+ module.warn("Restart of PostgreSQL is required for setting %s" % name)
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py
new file mode 100644
index 00000000..594a0ee8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_slot.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, John Scalia (@jscalia), Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: postgresql_slot
+short_description: Add or remove replication slots from a PostgreSQL database
+description:
+- Add or remove physical or logical replication slots from a PostgreSQL database.
+
+options:
+ name:
+ description:
+ - Name of the replication slot to add or remove.
+ type: str
+ required: yes
+ aliases:
+ - slot_name
+ slot_type:
+ description:
+ - Slot type.
+ type: str
+ default: physical
+ choices: [ logical, physical ]
+ state:
+ description:
+ - The slot state.
+ - I(state=present) implies the slot must be present in the system.
+ - I(state=absent) implies the I(groups) must be revoked from I(target_roles).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ immediately_reserve:
+ description:
+ - Optional parameter that when C(yes) specifies that the LSN for this replication slot be reserved
+ immediately, otherwise the default, C(no), specifies that the LSN is reserved on the first connection
+ from a streaming replication client.
+ - Is available from PostgreSQL version 9.6.
+ - Uses only with I(slot_type=physical).
+ - Mutually exclusive with I(slot_type=logical).
+ type: bool
+ default: no
+ output_plugin:
+ description:
+ - All logical slots must indicate which output plugin decoder they're using.
+ - This parameter does not apply to physical slots.
+ - It will be ignored with I(slot_type=physical).
+ type: str
+ default: "test_decoding"
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check the value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- Physical replication slots were introduced to PostgreSQL with version 9.4,
+ while logical replication slots were added beginning with version 10.0.
+- Supports C(check_mode).
+
+seealso:
+- name: PostgreSQL pg_replication_slots view reference
+ description: Complete reference of the PostgreSQL pg_replication_slots view.
+ link: https://www.postgresql.org/docs/current/view-pg-replication-slots.html
+- name: PostgreSQL streaming replication protocol reference
+ description: Complete reference of the PostgreSQL streaming replication protocol documentation.
+ link: https://www.postgresql.org/docs/current/protocol-replication.html
+- name: PostgreSQL logical replication protocol reference
+ description: Complete reference of the PostgreSQL logical replication protocol documentation.
+ link: https://www.postgresql.org/docs/current/protocol-logical-replication.html
+
+author:
+- John Scalia (@jscalia)
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create physical_one physical slot if doesn't exist
+ become_user: postgres
+ community.postgresql.postgresql_slot:
+ slot_name: physical_one
+ db: ansible
+
+- name: Remove physical_one slot if exists
+ become_user: postgres
+ community.postgresql.postgresql_slot:
+ slot_name: physical_one
+ db: ansible
+ state: absent
+
+- name: Create logical_one logical slot to the database acme if doesn't exist
+ community.postgresql.postgresql_slot:
+ name: logical_slot_one
+ slot_type: logical
+ state: present
+ output_plugin: custom_decoder_one
+ db: "acme"
+
+- name: Remove logical_one slot if exists from the cluster running on another host and non-standard port
+ community.postgresql.postgresql_slot:
+ name: logical_one
+ login_host: mydatabase.example.org
+ port: 5433
+ login_user: ourSuperuser
+ login_password: thePassword
+ state: absent
+'''
+
+RETURN = r'''
+name:
+ description: Name of the slot.
+ returned: always
+ type: str
+ sample: "physical_one"
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "SELECT pg_create_physical_replication_slot('physical_one', False, False)" ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class PgSlot(object):
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.exists = False
+ self.kind = ''
+ self.__slot_exists()
+ self.changed = False
+ self.executed_queries = []
+
+ def create(self, kind='physical', immediately_reserve=False, output_plugin=False, just_check=False):
+ if self.exists:
+ if self.kind == kind:
+ return False
+ else:
+ self.module.warn("slot with name '%s' already exists "
+ "but has another type '%s'" % (self.name, self.kind))
+ return False
+
+ if just_check:
+ return None
+
+ if kind == 'physical':
+ # Check server version (needs for immedately_reserverd needs 9.6+):
+ if self.cursor.connection.server_version < 96000:
+ query = "SELECT pg_create_physical_replication_slot(%(name)s)"
+
+ else:
+ query = "SELECT pg_create_physical_replication_slot(%(name)s, %(i_reserve)s)"
+
+ self.changed = exec_sql(self, query,
+ query_params={'name': self.name, 'i_reserve': immediately_reserve},
+ return_bool=True)
+
+ elif kind == 'logical':
+ query = "SELECT pg_create_logical_replication_slot(%(name)s, %(o_plugin)s)"
+ self.changed = exec_sql(self, query,
+ query_params={'name': self.name, 'o_plugin': output_plugin}, return_bool=True)
+
+ def drop(self):
+ if not self.exists:
+ return False
+
+ query = "SELECT pg_drop_replication_slot(%(name)s)"
+ self.changed = exec_sql(self, query, query_params={'name': self.name}, return_bool=True)
+
+ def __slot_exists(self):
+ query = "SELECT slot_type FROM pg_replication_slots WHERE slot_name = %(name)s"
+ res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
+ if res:
+ self.exists = True
+ self.kind = res[0][0]
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", aliases=["login_db"]),
+ name=dict(type="str", required=True, aliases=["slot_name"]),
+ slot_type=dict(type="str", default="physical", choices=["logical", "physical"]),
+ immediately_reserve=dict(type="bool", default=False),
+ session_role=dict(type="str"),
+ output_plugin=dict(type="str", default="test_decoding"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ slot_type = module.params["slot_type"]
+ immediately_reserve = module.params["immediately_reserve"]
+ state = module.params["state"]
+ output_plugin = module.params["output_plugin"]
+
+ if not module.params["trust_input"]:
+ check_input(module, module.params['session_role'])
+
+ if immediately_reserve and slot_type == 'logical':
+ module.fail_json(msg="Module parameters immediately_reserve and slot_type=logical are mutually exclusive")
+
+ # When slot_type is logical and parameter db is not passed,
+ # the default database will be used to create the slot and
+ # the user should know about this.
+ # When the slot type is physical,
+ # it doesn't matter which database will be used
+ # because physical slots are global objects.
+ if slot_type == 'logical':
+ warn_db_default = True
+ else:
+ warn_db_default = False
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=warn_db_default)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##################################
+ # Create an object and do main job
+ pg_slot = PgSlot(module, cursor, name)
+
+ changed = False
+
+ if module.check_mode:
+ if state == "present":
+ if not pg_slot.exists:
+ changed = True
+
+ pg_slot.create(slot_type, immediately_reserve, output_plugin, just_check=True)
+
+ elif state == "absent":
+ if pg_slot.exists:
+ changed = True
+ else:
+ if state == "absent":
+ pg_slot.drop()
+
+ elif state == "present":
+ pg_slot.create(slot_type, immediately_reserve, output_plugin)
+
+ changed = pg_slot.changed
+
+ db_connection.close()
+ module.exit_json(changed=changed, name=name, queries=pg_slot.executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py
new file mode 100644
index 00000000..037f94af
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_subscription.py
@@ -0,0 +1,718 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_subscription
+short_description: Add, update, or remove PostgreSQL subscription
+description:
+- Add, update, or remove PostgreSQL subscription.
+version_added: '0.2.0'
+
+options:
+ name:
+ description:
+ - Name of the subscription to add, update, or remove.
+ type: str
+ required: yes
+ db:
+ description:
+ - Name of the database to connect to and where
+ the subscription state will be changed.
+ aliases: [ login_db ]
+ type: str
+ required: yes
+ state:
+ description:
+ - The subscription state.
+ - C(present) implies that if I(name) subscription doesn't exist, it will be created.
+ - C(absent) implies that if I(name) subscription exists, it will be removed.
+ - C(refresh) implies that if I(name) subscription exists, it will be refreshed.
+ Fetch missing table information from publisher. Always returns ``changed`` is ``True``.
+ This will start replication of tables that were added to the subscribed-to publications
+ since the last invocation of REFRESH PUBLICATION or since CREATE SUBSCRIPTION.
+ The existing data in the publications that are being subscribed to
+ should be copied once the replication starts.
+ - For more information about C(refresh) see U(https://www.postgresql.org/docs/current/sql-altersubscription.html).
+ type: str
+ choices: [ absent, present, refresh ]
+ default: present
+ owner:
+ description:
+ - Subscription owner.
+ - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
+ - Ignored when I(state) is not C(present).
+ type: str
+ publications:
+ description:
+ - The publication names on the publisher to use for the subscription.
+ - Ignored when I(state) is not C(present).
+ type: list
+ elements: str
+ connparams:
+ description:
+ - The connection dict param-value to connect to the publisher.
+ - For more information see U(https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
+ - Ignored when I(state) is not C(present).
+ type: dict
+ cascade:
+ description:
+ - Drop subscription dependencies. Has effect with I(state=absent) only.
+ - Ignored when I(state) is not C(absent).
+ type: bool
+ default: false
+ subsparams:
+ description:
+ - Dictionary of optional parameters for a subscription, e.g. copy_data, enabled, create_slot, etc.
+ - For update the subscription allowed keys are C(enabled), C(slot_name), C(synchronous_commit), C(publication_name).
+ - See available parameters to create a new subscription
+ on U(https://www.postgresql.org/docs/current/sql-createsubscription.html).
+ - Ignored when I(state) is not C(present).
+ type: dict
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(name), I(publications), I(owner),
+ I(session_role), I(connparams), I(subsparams) are potentially dangerous.
+ - It makes sense to use C(yes) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- PostgreSQL version must be 10 or greater.
+- Supports C(check_mode).
+
+seealso:
+- module: community.postgresql.postgresql_publication
+- module: community.postgresql.postgresql_info
+- name: CREATE SUBSCRIPTION reference
+ description: Complete reference of the CREATE SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createsubscription.html
+- name: ALTER SUBSCRIPTION reference
+ description: Complete reference of the ALTER SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersubscription.html
+- name: DROP SUBSCRIPTION reference
+ description: Complete reference of the DROP SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropsubscription.html
+
+author:
+- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: >
+ Create acme subscription in mydb database using acme_publication and
+ the following connection parameters to connect to the publisher.
+ Set the subscription owner as alice.
+ community.postgresql.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: present
+ publications: acme_publication
+ owner: alice
+ connparams:
+ host: 127.0.0.1
+ port: 5432
+ user: repl
+ password: replpass
+ dbname: mydb
+
+- name: Assuming that acme subscription exists, try to change conn parameters
+ community.postgresql.postgresql_subscription:
+ db: mydb
+ name: acme
+ connparams:
+ host: 127.0.0.1
+ port: 5432
+ user: repl
+ password: replpass
+ connect_timeout: 100
+
+- name: Refresh acme publication
+ community.postgresql.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: refresh
+
+- name: Drop acme subscription from mydb with dependencies (cascade=yes)
+ community.postgresql.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: absent
+ cascade: yes
+
+- name: Assuming that acme subscription exists and enabled, disable the subscription
+ community.postgresql.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: present
+ subsparams:
+ enabled: no
+'''
+
+RETURN = r'''
+name:
+ description:
+ - Name of the subscription.
+ returned: always
+ type: str
+ sample: acme
+exists:
+ description:
+ - Flag indicates the subscription exists or not at the end of runtime.
+ returned: always
+ type: bool
+ sample: true
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'DROP SUBSCRIPTION "mysubscription"' ]
+initial_state:
+ description: Subscription configuration at the beginning of runtime.
+ returned: always
+ type: dict
+ sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
+final_state:
+ description: Subscription configuration at the end of runtime.
+ returned: always
+ type: dict
+ sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
+'''
+
+from copy import deepcopy
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import check_input
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+SUPPORTED_PG_VERSION = 10000
+
+SUBSPARAMS_KEYS_FOR_UPDATE = ('enabled', 'synchronous_commit', 'slot_name')
+
+
+################################
+# Module functions and classes #
+################################
+
+def convert_conn_params(conn_dict):
+ """Converts the passed connection dictionary to string.
+
+ Args:
+ conn_dict (list): Dictionary which needs to be converted.
+
+ Returns:
+ Connection string.
+ """
+ conn_list = []
+ for (param, val) in iteritems(conn_dict):
+ conn_list.append('%s=%s' % (param, val))
+
+ return ' '.join(conn_list)
+
+
+def convert_subscr_params(params_dict):
+ """Converts the passed params dictionary to string.
+
+ Args:
+ params_dict (list): Dictionary which needs to be converted.
+
+ Returns:
+ Parameters string.
+ """
+ params_list = []
+ for (param, val) in iteritems(params_dict):
+ if val is False:
+ val = 'false'
+ elif val is True:
+ val = 'true'
+
+ params_list.append('%s = %s' % (param, val))
+
+ return ', '.join(params_list)
+
+
+class PgSubscription():
+ """Class to work with PostgreSQL subscription.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): The name of the subscription.
+ db (str): The database name the subscription will be associated with.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): Name of subscription.
+ executed_queries (list): List of executed queries.
+ attrs (dict): Dict with subscription attributes.
+ exists (bool): Flag indicates the subscription exists or not.
+ """
+
+ def __init__(self, module, cursor, name, db):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.db = db
+ self.executed_queries = []
+ self.attrs = {
+ 'owner': None,
+ 'enabled': None,
+ 'synccommit': None,
+ 'conninfo': {},
+ 'slotname': None,
+ 'publications': [],
+ }
+ self.empty_attrs = deepcopy(self.attrs)
+ self.exists = self.check_subscr()
+
+ def get_info(self):
+ """Refresh the subscription information.
+
+ Returns:
+ ``self.attrs``.
+ """
+ self.exists = self.check_subscr()
+ return self.attrs
+
+ def check_subscr(self):
+ """Check the subscription and refresh ``self.attrs`` subscription attribute.
+
+ Returns:
+ True if the subscription with ``self.name`` exists, False otherwise.
+ """
+
+ subscr_info = self.__get_general_subscr_info()
+
+ if not subscr_info:
+ # The subscription does not exist:
+ self.attrs = deepcopy(self.empty_attrs)
+ return False
+
+ self.attrs['owner'] = subscr_info.get('rolname')
+ self.attrs['enabled'] = subscr_info.get('subenabled')
+ self.attrs['synccommit'] = subscr_info.get('subenabled')
+ self.attrs['slotname'] = subscr_info.get('subslotname')
+ self.attrs['publications'] = subscr_info.get('subpublications')
+ if subscr_info.get('subconninfo'):
+ for param in subscr_info['subconninfo'].split(' '):
+ tmp = param.split('=')
+ try:
+ self.attrs['conninfo'][tmp[0]] = int(tmp[1])
+ except ValueError:
+ self.attrs['conninfo'][tmp[0]] = tmp[1]
+
+ return True
+
+ def create(self, connparams, publications, subsparams, check_mode=True):
+ """Create the subscription.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+ publications (list): Publications on the master to use.
+ subsparams (str): Parameters string in WITH () clause style.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if the subscription has been created, otherwise False.
+ """
+ query_fragments = []
+ query_fragments.append("CREATE SUBSCRIPTION %s CONNECTION '%s' "
+ "PUBLICATION %s" % (self.name, connparams, ', '.join(publications)))
+
+ if subsparams:
+ query_fragments.append("WITH (%s)" % subsparams)
+
+ changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ return changed
+
+ def update(self, connparams, publications, subsparams, check_mode=True):
+ """Update the subscription.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+ publications (list): Publications on the master to use.
+ subsparams (dict): Dictionary of optional parameters.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if subscription has been updated, otherwise False.
+ """
+ changed = False
+
+ if connparams:
+ if connparams != self.attrs['conninfo']:
+ changed = self.__set_conn_params(convert_conn_params(connparams),
+ check_mode=check_mode)
+
+ if publications:
+ if sorted(self.attrs['publications']) != sorted(publications):
+ changed = self.__set_publications(publications, check_mode=check_mode)
+
+ if subsparams:
+ params_to_update = []
+
+ for (param, value) in iteritems(subsparams):
+ if param == 'enabled':
+ if self.attrs['enabled'] and value is False:
+ changed = self.enable(enabled=False, check_mode=check_mode)
+ elif not self.attrs['enabled'] and value is True:
+ changed = self.enable(enabled=True, check_mode=check_mode)
+
+ elif param == 'synchronous_commit':
+ if self.attrs['synccommit'] is True and value is False:
+ params_to_update.append("%s = false" % param)
+ elif self.attrs['synccommit'] is False and value is True:
+ params_to_update.append("%s = true" % param)
+
+ elif param == 'slot_name':
+ if self.attrs['slotname'] and self.attrs['slotname'] != value:
+ params_to_update.append("%s = %s" % (param, value))
+
+ else:
+ self.module.warn("Parameter '%s' is not in params supported "
+ "for update '%s', ignored..." % (param, SUBSPARAMS_KEYS_FOR_UPDATE))
+
+ if params_to_update:
+ changed = self.__set_params(params_to_update, check_mode=check_mode)
+
+ return changed
+
+ def drop(self, cascade=False, check_mode=True):
+ """Drop the subscription.
+
+ Kwargs:
+ cascade (bool): Flag indicates that the subscription needs to be deleted
+ with its dependencies.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if the subscription has been removed, otherwise False.
+ """
+ if self.exists:
+ query_fragments = ["DROP SUBSCRIPTION %s" % self.name]
+ if cascade:
+ query_fragments.append("CASCADE")
+
+ return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ def set_owner(self, role, check_mode=True):
+ """Set a subscription owner.
+
+ Args:
+ role (str): Role (user) name that needs to be set as a subscription owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s OWNER TO "%s"' % (self.name, role)
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def refresh(self, check_mode=True):
+ """Refresh publication.
+
+ Fetches missing table info from publisher.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s REFRESH PUBLICATION' % self.name
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_params(self, params_to_update, check_mode=True):
+ """Update optional subscription parameters.
+
+ Args:
+ params_to_update (list): Parameters with values to update.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s SET (%s)' % (self.name, ', '.join(params_to_update))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_conn_params(self, connparams, check_mode=True):
+ """Update connection parameters.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = "ALTER SUBSCRIPTION %s CONNECTION '%s'" % (self.name, connparams)
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_publications(self, publications, check_mode=True):
+ """Update publications.
+
+ Args:
+ publications (list): Publications on the master to use.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s SET PUBLICATION %s' % (self.name, ', '.join(publications))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def enable(self, enabled=True, check_mode=True):
+ """Enable or disable the subscription.
+
+ Kwargs:
+ enable (bool): Flag indicates that the subscription needs
+ to be enabled or disabled.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if enabled:
+ query = 'ALTER SUBSCRIPTION %s ENABLE' % self.name
+ else:
+ query = 'ALTER SUBSCRIPTION %s DISABLE' % self.name
+
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __get_general_subscr_info(self):
+ """Get and return general subscription information.
+
+ Returns:
+ Dict with subscription information if successful, False otherwise.
+ """
+ query = ("SELECT d.datname, r.rolname, s.subenabled, "
+ "s.subconninfo, s.subslotname, s.subsynccommit, "
+ "s.subpublications FROM pg_catalog.pg_subscription s "
+ "JOIN pg_catalog.pg_database d "
+ "ON s.subdbid = d.oid "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON s.subowner = r.oid "
+ "WHERE s.subname = %(name)s AND d.datname = %(db)s")
+
+ result = exec_sql(self, query, query_params={'name': self.name, 'db': self.db}, add_to_executed=False)
+ if result:
+ return result[0]
+ else:
+ return False
+
+ def __exec_sql(self, query, check_mode=False):
+ """Execute SQL query.
+
+ Note: If we need just to get information from the database,
+ we use ``exec_sql`` function directly.
+
+ Args:
+ query (str): Query that needs to be executed.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just add ``query`` to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if check_mode:
+ self.executed_queries.append(query)
+ return True
+ else:
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ db=dict(type='str', required=True, aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'refresh']),
+ publications=dict(type='list', elements='str'),
+ connparams=dict(type='dict'),
+ cascade=dict(type='bool', default=False),
+ owner=dict(type='str'),
+ subsparams=dict(type='dict'),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Parameters handling:
+ db = module.params['db']
+ name = module.params['name']
+ state = module.params['state']
+ publications = module.params['publications']
+ cascade = module.params['cascade']
+ owner = module.params['owner']
+ subsparams = module.params['subsparams']
+ connparams = module.params['connparams']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not subsparams:
+ subsparams_str = None
+ else:
+ subsparams_str = convert_subscr_params(subsparams)
+
+ if not connparams:
+ connparams_str = None
+ else:
+ connparams_str = convert_conn_params(connparams)
+
+ check_input(module, name, publications, owner, session_role,
+ connparams_str, subsparams_str)
+
+ if state == 'present' and cascade:
+ module.warn('parameter "cascade" is ignored when state is not absent')
+
+ if state != 'present':
+ if owner:
+ module.warn("parameter 'owner' is ignored when state is not 'present'")
+ if publications:
+ module.warn("parameter 'publications' is ignored when state is not 'present'")
+ if connparams:
+ module.warn("parameter 'connparams' is ignored when state is not 'present'")
+ if subsparams:
+ module.warn("parameter 'subsparams' is ignored when state is not 'present'")
+
+ # Connect to DB and make cursor object:
+ pg_conn_params = get_conn_params(module, module.params)
+ # We check subscription state without DML queries execution, so set autocommit:
+ db_connection = connect_to_db(module, pg_conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Check version:
+ if cursor.connection.server_version < SUPPORTED_PG_VERSION:
+ module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
+
+ # Set defaults:
+ changed = False
+ initial_state = {}
+ final_state = {}
+
+ ###################################
+ # Create object and do rock'n'roll:
+ subscription = PgSubscription(module, cursor, name, db)
+
+ if subscription.exists:
+ initial_state = deepcopy(subscription.attrs)
+ final_state = deepcopy(initial_state)
+
+ if state == 'present':
+ if not subscription.exists:
+ if subsparams:
+ subsparams = convert_subscr_params(subsparams)
+
+ if connparams:
+ connparams = convert_conn_params(connparams)
+
+ changed = subscription.create(connparams,
+ publications,
+ subsparams,
+ check_mode=module.check_mode)
+
+ else:
+ changed = subscription.update(connparams,
+ publications,
+ subsparams,
+ check_mode=module.check_mode)
+
+ if owner and subscription.attrs['owner'] != owner:
+ changed = subscription.set_owner(owner, check_mode=module.check_mode) or changed
+
+ elif state == 'absent':
+ changed = subscription.drop(cascade, check_mode=module.check_mode)
+
+ elif state == 'refresh':
+ if not subscription.exists:
+ module.fail_json(msg="Refresh failed: subscription '%s' does not exist" % name)
+
+ # Always returns True:
+ changed = subscription.refresh(check_mode=module.check_mode)
+
+ # Get final subscription info:
+ final_state = subscription.get_info()
+
+ # Connection is not needed any more:
+ cursor.close()
+ db_connection.close()
+
+ # Return ret values and exit:
+ module.exit_json(changed=changed,
+ name=name,
+ exists=subscription.exists,
+ queries=subscription.executed_queries,
+ initial_state=initial_state,
+ final_state=final_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_table.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_table.py
new file mode 100644
index 00000000..97194d43
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_table.py
@@ -0,0 +1,611 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_table
+short_description: Create, drop, or modify a PostgreSQL table
+description:
+- Allows to create, drop, rename, truncate a table, or change some table attributes.
+options:
+ table:
+ description:
+ - Table name.
+ required: true
+ aliases:
+ - name
+ type: str
+ state:
+ description:
+ - The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ tablespace:
+ description:
+ - Set a tablespace for the table.
+ type: str
+ owner:
+ description:
+ - Set a table owner.
+ type: str
+ unlogged:
+ description:
+ - Create an unlogged table.
+ type: bool
+ default: no
+ like:
+ description:
+ - Create a table like another table (with similar DDL).
+ Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ including:
+ description:
+ - Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL.
+ Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ columns:
+ description:
+ - Columns that are needed.
+ type: list
+ elements: str
+ rename:
+ description:
+ - New table name. Mutually exclusive with I(tablespace), I(owner),
+ I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params).
+ type: str
+ truncate:
+ description:
+ - Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(rename), and I(storage_params).
+ type: bool
+ default: no
+ storage_params:
+ description:
+ - Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc.
+ Mutually exclusive with I(rename) and I(truncate).
+ type: list
+ elements: str
+ db:
+ description:
+ - Name of database to connect and where the table will be created.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ cascade:
+ description:
+ - Automatically drop objects that depend on the table (such as views).
+ Used with I(state=absent) only.
+ type: bool
+ default: no
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- Supports C(check_mode).
+- If you do not pass db parameter, tables will be created in the database
+ named postgres.
+- PostgreSQL allows to create columnless table, so columns param is optional.
+- Unlogged tables are available from PostgreSQL server version 9.1.
+seealso:
+- module: community.postgresql.postgresql_sequence
+- module: community.postgresql.postgresql_idx
+- module: community.postgresql.postgresql_info
+- module: community.postgresql.postgresql_tablespace
+- module: community.postgresql.postgresql_owner
+- module: community.postgresql.postgresql_privs
+- module: community.postgresql.postgresql_copy
+- name: CREATE TABLE reference
+ description: Complete reference of the CREATE TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createtable.html
+- name: ALTER TABLE reference
+ description: Complete reference of the ALTER TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altertable.html
+- name: DROP TABLE reference
+ description: Complete reference of the DROP TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droptable.html
+- name: PostgreSQL data types
+ description: Complete reference of the PostgreSQL data types documentation.
+ link: https://www.postgresql.org/docs/current/datatype.html
+author:
+- Andrei Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner
+ community.postgresql.postgresql_table:
+ db: acme
+ name: tbl2
+ like: tbl1
+ owner: testuser
+
+- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes
+ community.postgresql.postgresql_table:
+ db: acme
+ table: tbl2
+ like: tbl1
+ including: comments, indexes
+ tablespace: ssd
+
+- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1
+ community.postgresql.postgresql_table:
+ name: test_table
+ columns:
+ - id bigserial primary key
+ - num bigint
+ - stories text
+ tablespace: ssd
+ storage_params:
+ - fillfactor=10
+ - autovacuum_analyze_threshold=1
+
+- name: Create an unlogged table in schema acme
+ community.postgresql.postgresql_table:
+ name: acme.useless_data
+ columns: waste_id int
+ unlogged: true
+
+- name: Rename table foo to bar
+ community.postgresql.postgresql_table:
+ table: foo
+ rename: bar
+
+- name: Rename table foo from schema acme to bar
+ community.postgresql.postgresql_table:
+ name: acme.foo
+ rename: bar
+
+- name: Set owner to someuser
+ community.postgresql.postgresql_table:
+ name: foo
+ owner: someuser
+
+- name: Change tablespace of foo table to new_tablespace and set owner to new_user
+ community.postgresql.postgresql_table:
+ name: foo
+ tablespace: new_tablespace
+ owner: new_user
+
+- name: Truncate table foo
+ community.postgresql.postgresql_table:
+ name: foo
+ truncate: yes
+
+- name: Drop table foo from schema acme
+ community.postgresql.postgresql_table:
+ name: acme.foo
+ state: absent
+
+- name: Drop table bar cascade
+ community.postgresql.postgresql_table:
+ name: bar
+ state: absent
+ cascade: yes
+'''
+
+RETURN = r'''
+table:
+ description: Name of a table.
+ returned: always
+ type: str
+ sample: 'foo'
+state:
+ description: Table state.
+ returned: always
+ type: str
+ sample: 'present'
+owner:
+ description: Table owner.
+ returned: always
+ type: str
+ sample: 'postgres'
+tablespace:
+ description: Tablespace.
+ returned: always
+ type: str
+ sample: 'ssd_tablespace'
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'CREATE TABLE "test_table" (id bigint)' ]
+storage_params:
+ description: Storage parameters.
+ returned: always
+ type: list
+ sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class Table(object):
+ def __init__(self, name, module, cursor):
+ self.name = name
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'owner': '',
+ 'tblspace': '',
+ 'storage_params': [],
+ }
+ self.exists = False
+ self.__exists_in_db()
+ self.executed_queries = []
+
+ def get_info(self):
+ """Getter to refresh and get table info"""
+ self.__exists_in_db()
+
+ def __exists_in_db(self):
+ """Check table exists and refresh info"""
+ if "." in self.name:
+ schema = self.name.split('.')[-2]
+ tblname = self.name.split('.')[-1]
+ else:
+ schema = 'public'
+ tblname = self.name
+
+ query = ("SELECT t.tableowner, t.tablespace, c.reloptions "
+ "FROM pg_tables AS t "
+ "INNER JOIN pg_class AS c ON c.relname = t.tablename "
+ "INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid "
+ "WHERE t.tablename = %(tblname)s "
+ "AND n.nspname = %(schema)s")
+ res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema},
+ add_to_executed=False)
+ if res:
+ self.exists = True
+ self.info = dict(
+ owner=res[0][0],
+ tblspace=res[0][1] if res[0][1] else '',
+ storage_params=res[0][2] if res[0][2] else [],
+ )
+
+ return True
+ else:
+ self.exists = False
+ return False
+
+ def create(self, columns='', params='', tblspace='',
+ unlogged=False, owner=''):
+ """
+ Create table.
+ If table exists, check passed args (params, tblspace, owner) and,
+ if they're different from current, change them.
+ Arguments:
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ columns - column string (comma separated).
+ """
+ name = pg_quote_identifier(self.name, 'table')
+
+ changed = False
+
+ if self.exists:
+ if tblspace == 'pg_default' and self.info['tblspace'] is None:
+ pass # Because they have the same meaning
+ elif tblspace and self.info['tblspace'] != tblspace:
+ self.set_tblspace(tblspace)
+ changed = True
+
+ if owner and self.info['owner'] != owner:
+ self.set_owner(owner)
+ changed = True
+
+ if params:
+ param_list = [p.strip(' ') for p in params.split(',')]
+
+ new_param = False
+ for p in param_list:
+ if p not in self.info['storage_params']:
+ new_param = True
+
+ if new_param:
+ self.set_stor_params(params)
+ changed = True
+
+ if changed:
+ return True
+ return False
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ if columns:
+ query += " (%s)" % columns
+ else:
+ query += " ()"
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if exec_sql(self, query, return_bool=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def create_like(self, src_table, including='', tblspace='',
+ unlogged=False, params='', owner=''):
+ """
+ Create table like another table (with similar DDL).
+ Arguments:
+ src_table - source table.
+ including - corresponds to optional INCLUDING expression
+ in CREATE TABLE ... LIKE statement.
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ """
+ changed = False
+
+ name = pg_quote_identifier(self.name, 'table')
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ query += " (LIKE %s" % pg_quote_identifier(src_table, 'table')
+
+ if including:
+ including = including.split(',')
+ for i in including:
+ query += " INCLUDING %s" % i
+
+ query += ')'
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if exec_sql(self, query, return_bool=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def truncate(self):
+ query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table')
+ return exec_sql(self, query, return_bool=True)
+
+ def rename(self, newname):
+ query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'),
+ pg_quote_identifier(newname, 'table'))
+ return exec_sql(self, query, return_bool=True)
+
+ def set_owner(self, username):
+ query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.name, 'table'), username)
+ return exec_sql(self, query, return_bool=True)
+
+ def drop(self, cascade=False):
+ if not self.exists:
+ return False
+
+ query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table')
+ if cascade:
+ query += " CASCADE"
+ return exec_sql(self, query, return_bool=True)
+
+ def set_tblspace(self, tblspace):
+ query = 'ALTER TABLE %s SET TABLESPACE "%s"' % (pg_quote_identifier(self.name, 'table'), tblspace)
+ return exec_sql(self, query, return_bool=True)
+
+ def set_stor_params(self, params):
+ query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params)
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ table=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ db=dict(type='str', default='', aliases=['login_db']),
+ tablespace=dict(type='str'),
+ owner=dict(type='str'),
+ unlogged=dict(type='bool', default=False),
+ like=dict(type='str'),
+ including=dict(type='str'),
+ rename=dict(type='str'),
+ truncate=dict(type='bool', default=False),
+ columns=dict(type='list', elements='str'),
+ storage_params=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ cascade=dict(type='bool', default=False),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ table = module.params['table']
+ state = module.params['state']
+ tablespace = module.params['tablespace']
+ owner = module.params['owner']
+ unlogged = module.params['unlogged']
+ like = module.params['like']
+ including = module.params['including']
+ newname = module.params['rename']
+ storage_params = module.params['storage_params']
+ truncate = module.params['truncate']
+ columns = module.params['columns']
+ cascade = module.params['cascade']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, table, tablespace, owner, like, including,
+ newname, storage_params, columns, session_role)
+
+ if state == 'present' and cascade:
+ module.warn("cascade=true is ignored when state=present")
+
+ # Check mutual exclusive parameters:
+ if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including):
+ module.fail_json(msg="%s: state=absent is mutually exclusive with: "
+ "truncate, rename, columns, tablespace, "
+ "including, like, storage_params, unlogged, owner" % table)
+
+ if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: truncate is mutually exclusive with: "
+ "rename, columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if newname and (columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: rename is mutually exclusive with: "
+ "columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if like and columns:
+ module.fail_json(msg="%s: like and columns params are mutually exclusive" % table)
+ if including and not like:
+ module.fail_json(msg="%s: including param needs like param specified" % table)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if storage_params:
+ storage_params = ','.join(storage_params)
+
+ if columns:
+ columns = ','.join(columns)
+
+ ##############
+ # Do main job:
+ table_obj = Table(table, module, cursor)
+
+ # Set default returned values:
+ changed = False
+ kw = {}
+ kw['table'] = table
+ kw['state'] = ''
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+
+ if state == 'absent':
+ changed = table_obj.drop(cascade=cascade)
+
+ elif truncate:
+ changed = table_obj.truncate()
+
+ elif newname:
+ changed = table_obj.rename(newname)
+ q = table_obj.executed_queries
+ table_obj = Table(newname, module, cursor)
+ table_obj.executed_queries = q
+
+ elif state == 'present' and not like:
+ changed = table_obj.create(columns, storage_params,
+ tablespace, unlogged, owner)
+
+ elif state == 'present' and like:
+ changed = table_obj.create_like(like, including, tablespace,
+ unlogged, storage_params)
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ # Refresh table info for RETURN.
+ # Note, if table has been renamed, it gets info by newname:
+ table_obj.get_info()
+ db_connection.commit()
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+ else:
+ # We just change the table state here
+ # to keep other information about the dropped table:
+ kw['state'] = 'absent'
+
+ kw['queries'] = table_obj.executed_queries
+ kw['changed'] = changed
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py
new file mode 100644
index 00000000..397bf7b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_tablespace.py
@@ -0,0 +1,541 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Flavien Chantelot (@Dorn-)
+# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell)
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_tablespace
+short_description: Add or remove PostgreSQL tablespaces from remote hosts
+description:
+- Adds or removes PostgreSQL tablespaces from remote hosts.
+options:
+ tablespace:
+ description:
+ - Name of the tablespace to add or remove.
+ required: true
+ type: str
+ aliases:
+ - name
+ location:
+ description:
+ - Path to the tablespace directory in the file system.
+ - Ensure that the location exists and has right privileges.
+ type: path
+ aliases:
+ - path
+ state:
+ description:
+ - Tablespace state.
+ - I(state=present) implies the tablespace must be created if it doesn't exist.
+ - I(state=absent) implies the tablespace must be removed if present.
+ I(state=absent) is mutually exclusive with I(location), I(owner), i(set).
+ - See the Notes section for information about check mode restrictions.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ owner:
+ description:
+ - Name of the role to set as an owner of the tablespace.
+ - If this option is not specified, the tablespace owner is a role that creates the tablespace.
+ type: str
+ set:
+ description:
+ - Dict of tablespace options to set. Supported from PostgreSQL 9.0.
+ - For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html).
+ - When reset is passed as an option's value, if the option was set previously, it will be removed.
+ type: dict
+ rename_to:
+ description:
+ - New name of the tablespace.
+ - The new name cannot begin with pg_, as such names are reserved for system tablespaces.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(tablespace), I(location), I(owner),
+ I(rename_to), I(session_role), I(settings_list) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not
+ support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands
+ can not be run inside the transaction block.
+
+seealso:
+- name: PostgreSQL tablespaces
+ description: General information about PostgreSQL tablespaces.
+ link: https://www.postgresql.org/docs/current/manage-ag-tablespaces.html
+- name: CREATE TABLESPACE reference
+ description: Complete reference of the CREATE TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createtablespace.html
+- name: ALTER TABLESPACE reference
+ description: Complete reference of the ALTER TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altertablespace.html
+- name: DROP TABLESPACE reference
+ description: Complete reference of the DROP TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droptablespace.html
+
+author:
+- Flavien Chantelot (@Dorn-)
+- Antoine Levy-Lambert (@antoinell)
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new tablespace called acme and set bob as an its owner
+ community.postgresql.postgresql_tablespace:
+ name: acme
+ owner: bob
+ location: /data/foo
+
+- name: Create a new tablespace called bar with tablespace options
+ community.postgresql.postgresql_tablespace:
+ name: bar
+ set:
+ random_page_cost: 1
+ seq_page_cost: 1
+
+- name: Reset random_page_cost option
+ community.postgresql.postgresql_tablespace:
+ name: bar
+ set:
+ random_page_cost: reset
+
+- name: Rename the tablespace from bar to pcie_ssd
+ community.postgresql.postgresql_tablespace:
+ name: bar
+ rename_to: pcie_ssd
+
+- name: Drop tablespace called bloat
+ community.postgresql.postgresql_tablespace:
+ name: bloat
+ state: absent
+'''
+
+RETURN = r'''
+queries:
+ description: List of queries that was tried to be executed.
+ returned: always
+ type: str
+ sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ]
+tablespace:
+ description: Tablespace name.
+ returned: always
+ type: str
+ sample: 'ssd'
+owner:
+ description: Tablespace owner.
+ returned: always
+ type: str
+ sample: 'Bob'
+options:
+ description: Tablespace options.
+ returned: always
+ type: dict
+ sample: { 'random_page_cost': 1, 'seq_page_cost': 1 }
+location:
+ description: Path to the tablespace in the file system.
+ returned: always
+ type: str
+ sample: '/incredible/fast/ssd'
+newname:
+ description: New tablespace name.
+ returned: if existent
+ type: str
+ sample: new_ssd
+state:
+ description: Tablespace state at the end of execution.
+ returned: always
+ type: str
+ sample: 'present'
+'''
+
+try:
+ from psycopg2 import __version__ as PSYCOPG2_VERSION
+ from psycopg2.extras import DictCursor
+ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT
+ from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class PgTablespace(object):
+
+ """Class for working with PostgreSQL tablespaces.
+
+ Args:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ name (str) -- name of the tablespace
+
+ Attrs:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ name (str) -- name of the tablespace
+ exists (bool) -- flag the tablespace exists in the DB or not
+ owner (str) -- tablespace owner
+ location (str) -- path to the tablespace directory in the file system
+ executed_queries (list) -- list of executed queries
+ new_name (str) -- new name for the tablespace
+ opt_not_supported (bool) -- flag indicates a tablespace option is supported or not
+ """
+
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.exists = False
+ self.owner = ''
+ self.settings = {}
+ self.location = ''
+ self.executed_queries = []
+ self.new_name = ''
+ self.opt_not_supported = False
+ # Collect info:
+ self.get_info()
+
+ def get_info(self):
+ """Get tablespace information."""
+ # Check that spcoptions exists:
+ opt = exec_sql(self, "SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spcoptions'", add_to_executed=False)
+
+ # For 9.1 version and earlier:
+ location = exec_sql(self, "SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spclocation'", add_to_executed=False)
+ if location:
+ location = 'spclocation'
+ else:
+ location = 'pg_tablespace_location(t.oid)'
+
+ if not opt:
+ self.opt_not_supported = True
+ query = ("SELECT r.rolname, (SELECT Null), %s "
+ "FROM pg_catalog.pg_tablespace AS t "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON t.spcowner = r.oid " % location)
+ else:
+ query = ("SELECT r.rolname, t.spcoptions, %s "
+ "FROM pg_catalog.pg_tablespace AS t "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON t.spcowner = r.oid " % location)
+
+ res = exec_sql(self, query + "WHERE t.spcname = %(name)s",
+ query_params={'name': self.name}, add_to_executed=False)
+
+ if not res:
+ self.exists = False
+ return False
+
+ if res[0][0]:
+ self.exists = True
+ self.owner = res[0][0]
+
+ if res[0][1]:
+ # Options exist:
+ for i in res[0][1]:
+ i = i.split('=')
+ self.settings[i[0]] = i[1]
+
+ if res[0][2]:
+ # Location exists:
+ self.location = res[0][2]
+
+ def create(self, location):
+ """Create tablespace.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ location (str) -- tablespace directory path in the FS
+ """
+ query = ('CREATE TABLESPACE "%s" LOCATION \'%s\'' % (self.name, location))
+ return exec_sql(self, query, return_bool=True)
+
+ def drop(self):
+ """Drop tablespace.
+
+ Return True if success, otherwise, return False.
+ """
+ return exec_sql(self, 'DROP TABLESPACE "%s"' % self.name, return_bool=True)
+
+ def set_owner(self, new_owner):
+ """Set tablespace owner.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ new_owner (str) -- name of a new owner for the tablespace"
+ """
+ if new_owner == self.owner:
+ return False
+
+ query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.name, new_owner)
+ return exec_sql(self, query, return_bool=True)
+
+ def rename(self, newname):
+ """Rename tablespace.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ newname (str) -- new name for the tablespace"
+ """
+ query = 'ALTER TABLESPACE "%s" RENAME TO "%s"' % (self.name, newname)
+ self.new_name = newname
+ return exec_sql(self, query, return_bool=True)
+
+ def set_settings(self, new_settings):
+ """Set tablespace settings (options).
+
+ If some setting has been changed, set changed = True.
+ After all settings list is handling, return changed.
+
+ args:
+ new_settings (list) -- list of new settings
+ """
+ # settings must be a dict {'key': 'value'}
+ if self.opt_not_supported:
+ return False
+
+ changed = False
+
+ # Apply new settings:
+ for i in new_settings:
+ if new_settings[i] == 'reset':
+ if i in self.settings:
+ changed = self.__reset_setting(i)
+ self.settings[i] = None
+
+ elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]):
+ changed = self.__set_setting("%s = '%s'" % (i, new_settings[i]))
+
+ return changed
+
+ def __reset_setting(self, setting):
+ """Reset tablespace setting.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ setting (str) -- string in format "setting_name = 'setting_value'"
+ """
+ query = 'ALTER TABLESPACE "%s" RESET (%s)' % (self.name, setting)
+ return exec_sql(self, query, return_bool=True)
+
+ def __set_setting(self, setting):
+ """Set tablespace setting.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ setting (str) -- string in format "setting_name = 'setting_value'"
+ """
+ query = 'ALTER TABLESPACE "%s" SET (%s)' % (self.name, setting)
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ tablespace=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ location=dict(type='path', aliases=['path']),
+ owner=dict(type='str'),
+ set=dict(type='dict'),
+ rename_to=dict(type='str'),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=True,
+ )
+
+ tablespace = module.params["tablespace"]
+ state = module.params["state"]
+ location = module.params["location"]
+ owner = module.params["owner"]
+ rename_to = module.params["rename_to"]
+ settings = module.params["set"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if state == 'absent' and (location or owner or rename_to or settings):
+ module.fail_json(msg="state=absent is mutually exclusive location, "
+ "owner, rename_to, and set")
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not settings:
+ settings_list = None
+ else:
+ settings_list = ['%s = %s' % (k, v) for k, v in iteritems(settings)]
+
+ check_input(module, tablespace, location, owner,
+ rename_to, session_role, settings_list)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Change autocommit to False if check_mode:
+ if module.check_mode:
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=False)
+ else:
+ db_connection.set_isolation_level(READ_COMMITTED)
+
+ # Set defaults:
+ autocommit = False
+ changed = False
+
+ ##############
+ # Create PgTablespace object and do main job:
+ tblspace = PgTablespace(module, cursor, tablespace)
+
+ # If tablespace exists with different location, exit:
+ if tblspace.exists and location and location != tblspace.location:
+ module.fail_json(msg="Tablespace '%s' exists with "
+ "different location '%s'" % (tblspace.name, tblspace.location))
+
+ # Create new tablespace:
+ if not tblspace.exists and state == 'present':
+ if rename_to:
+ module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace)
+
+ if not location:
+ module.fail_json(msg="'location' parameter must be passed with "
+ "state=present if the tablespace doesn't exist")
+
+ # Because CREATE TABLESPACE can not be run inside the transaction block:
+ autocommit = True
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(AUTOCOMMIT)
+
+ changed = tblspace.create(location)
+
+ # Drop non-existing tablespace:
+ elif not tblspace.exists and state == 'absent':
+ # Nothing to do:
+ module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name)
+
+ # Drop existing tablespace:
+ elif tblspace.exists and state == 'absent':
+ # Because DROP TABLESPACE can not be run inside the transaction block:
+ autocommit = True
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(AUTOCOMMIT)
+
+ changed = tblspace.drop()
+
+ # Rename tablespace:
+ elif tblspace.exists and rename_to:
+ if tblspace.name != rename_to:
+ changed = tblspace.rename(rename_to)
+
+ if state == 'present':
+ # Refresh information:
+ tblspace.get_info()
+
+ # Change owner and settings:
+ if state == 'present' and tblspace.exists:
+ if owner:
+ changed = tblspace.set_owner(owner)
+
+ if settings:
+ changed = tblspace.set_settings(settings)
+
+ tblspace.get_info()
+
+ # Rollback if it's possible and check_mode:
+ if not autocommit:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ kw = dict(
+ changed=changed,
+ state='present',
+ tablespace=tblspace.name,
+ owner=tblspace.owner,
+ queries=tblspace.executed_queries,
+ options=tblspace.settings,
+ location=tblspace.location,
+ )
+
+ if state == 'present':
+ kw['state'] = 'present'
+
+ if tblspace.new_name:
+ kw['newname'] = tblspace.new_name
+
+ elif state == 'absent':
+ kw['state'] = 'absent'
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_user.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_user.py
new file mode 100644
index 00000000..d56c9924
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_user.py
@@ -0,0 +1,998 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_user
+short_description: Create, alter, or remove a user (role) from a PostgreSQL server instance
+description:
+- Creates, alters, or removes a user (role) from a PostgreSQL server instance
+ ("cluster" in PostgreSQL terminology) and, optionally,
+ grants the user access to an existing database or tables.
+- A user is a role with login privilege.
+- You can also use it to grant or revoke user's privileges in a particular database.
+- You cannot remove a user while it still has any privileges granted to it in any database.
+- Set I(fail_on_user) to C(no) to make the module ignore failures when trying to remove a user.
+ In this case, the module reports if changes happened as usual and separately reports
+ whether the user has been removed or not.
+options:
+ name:
+ description:
+ - Name of the user (role) to add or remove.
+ type: str
+ required: true
+ aliases:
+ - user
+ password:
+ description:
+ - Set the user's password, before 1.4 this was required.
+ - Password can be passed unhashed or hashed (MD5-hashed).
+ - An unhashed password is automatically hashed when saved into the
+ database if I(encrypted) is set, otherwise it is saved in
+ plain text format.
+ - When passing an MD5-hashed password, you must generate it with the format
+ C('str["md5"] + md5[ password + username ]'), resulting in a total of
+ 35 characters. An easy way to do this is
+ C(echo "md5`echo -n 'verysecretpasswordJOE' | md5sum | awk '{print $1}'`").
+ - Note that if the provided password string is already in MD5-hashed
+ format, then it is used as-is, regardless of I(encrypted) option.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and where user's permissions are granted.
+ type: str
+ aliases:
+ - login_db
+ fail_on_user:
+ description:
+ - If C(yes), fails when the user (role) cannot be removed. Otherwise just log and continue.
+ default: yes
+ type: bool
+ aliases:
+ - fail_on_role
+ priv:
+ description:
+ - "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where
+ you can define the user's privileges for the database ( allowed options - 'CREATE',
+ 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or
+ for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE',
+ 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example
+ C(table:SELECT) ). Mixed example of this string:
+ C(CONNECT/CREATE/table1:SELECT/table2:INSERT)."
+ type: str
+ role_attr_flags:
+ description:
+ - "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER."
+ - Note that '[NO]CREATEUSER' is deprecated.
+ - To create a simple role for using it like a group, use C(NOLOGIN) flag.
+ type: str
+ choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB',
+ '[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ]
+ session_role:
+ description:
+ - Switch to session role after connecting.
+ - The specified session role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session role
+ were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The user (role) state.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ encrypted:
+ description:
+ - Whether the password is stored hashed in the database.
+ - You can specify an unhashed password, and PostgreSQL ensures
+ the stored password is hashed when I(encrypted=yes) is set.
+ If you specify a hashed password, the module uses it as-is,
+ regardless of the setting of I(encrypted).
+ - "Note: Postgresql 10 and newer does not support unhashed passwords."
+ - Previous to Ansible 2.6, this was C(no) by default.
+ default: yes
+ type: bool
+ expires:
+ description:
+ - The date at which the user's password is to expire.
+ - If set to C('infinity'), user's password never expires.
+ - Note that this value must be a valid SQL date and time type.
+ type: str
+ no_password_changes:
+ description:
+ - If C(yes), does not inspect the database for password changes.
+ If the user already exists, skips all password related checks.
+ Useful when C(pg_authid) is not accessible (such as in AWS RDS).
+ Otherwise, makes password changes as necessary.
+ default: no
+ type: bool
+ conn_limit:
+ description:
+ - Specifies the user (role) connection limit.
+ type: int
+ ssl_mode:
+ description:
+ - Determines how an SSL session is negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, verifies that the server's certificate is signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ groups:
+ description:
+ - The list of groups (roles) that you want to grant to the user.
+ type: list
+ elements: str
+ comment:
+ description:
+ - Adds a comment on the user (equivalent to the C(COMMENT ON ROLE) statement).
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), checks whether values of options I(name), I(password), I(privs), I(expires),
+ I(role_attr_flags), I(groups), I(comment), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections through the options are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- The module creates a user (role) with login privilege by default.
+ Use C(NOLOGIN) I(role_attr_flags) to change this behaviour.
+- If you specify C(PUBLIC) as the user (role), then the privilege changes apply to all users (roles).
+ You may not specify password or role_attr_flags when the C(PUBLIC) user is specified.
+- SCRAM-SHA-256-hashed passwords (SASL Authentication) require PostgreSQL version 10 or newer.
+ On the previous versions the whole hashed string is used as a password.
+- 'Working with SCRAM-SHA-256-hashed passwords, be sure you use the I(environment:) variable
+ C(PGOPTIONS: "-c password_encryption=scram-sha-256") (see the provided example).'
+- On some systems (such as AWS RDS), C(pg_authid) is not accessible, thus, the module cannot compare
+ the current and desired C(password). In this case, the module assumes that the passwords are
+ different and changes it reporting that the state has been changed.
+ To skip all password related checks for existing users, use I(no_password_changes=yes).
+- Supports ``check_mode``.
+seealso:
+- module: community.postgresql.postgresql_privs
+- module: community.postgresql.postgresql_membership
+- module: community.postgresql.postgresql_owner
+- name: PostgreSQL database roles
+ description: Complete reference of the PostgreSQL database roles documentation.
+ link: https://www.postgresql.org/docs/current/user-manag.html
+- name: PostgreSQL SASL Authentication
+ description: Complete reference of the PostgreSQL SASL Authentication.
+ link: https://www.postgresql.org/docs/current/sasl-authentication.html
+author:
+- Ansible Core Team
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Connect to acme database, create django user, and grant access to database and products table
+ community.postgresql.postgresql_user:
+ db: acme
+ name: django
+ password: ceec4eif7ya
+ priv: "CONNECT/products:ALL"
+ expires: "Jan 31 2020"
+
+- name: Add a comment on django user
+ community.postgresql.postgresql_user:
+ db: acme
+ name: django
+ comment: This is a test user
+
+# Connect to default database, create rails user, set its password (MD5-hashed),
+# and grant privilege to create other databases and demote rails from super user status if user exists
+- name: Create rails user, set MD5-hashed password, grant privs
+ community.postgresql.postgresql_user:
+ name: rails
+ password: md59543f1d82624df2b31672ec0f7050460
+ role_attr_flags: CREATEDB,NOSUPERUSER
+
+- name: Connect to acme database and remove test user privileges from there
+ community.postgresql.postgresql_user:
+ db: acme
+ name: test
+ priv: "ALL/products:ALL"
+ state: absent
+ fail_on_user: no
+
+- name: Connect to test database, remove test user from cluster
+ community.postgresql.postgresql_user:
+ db: test
+ name: test
+ priv: ALL
+ state: absent
+
+- name: Connect to acme database and set user's password with no expire date
+ community.postgresql.postgresql_user:
+ db: acme
+ name: django
+ password: mysupersecretword
+ priv: "CONNECT/products:ALL"
+ expires: infinity
+
+# Example privileges string format
+# INSERT,UPDATE/table:SELECT/anothertable:ALL
+
+- name: Connect to test database and remove an existing user's password
+ community.postgresql.postgresql_user:
+ db: test
+ user: test
+ password: ""
+
+- name: Create user test and grant group user_ro and user_rw to it
+ community.postgresql.postgresql_user:
+ name: test
+ groups:
+ - user_ro
+ - user_rw
+
+# Create user with a cleartext password if it does not exist or update its password.
+# The password will be encrypted with SCRAM algorithm (available since PostgreSQL 10)
+- name: Create appclient user with SCRAM-hashed password
+ community.postgresql.postgresql_user:
+ name: appclient
+ password: "secret123"
+ environment:
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"']
+'''
+
+import itertools
+import re
+import traceback
+from hashlib import md5, sha256
+import hmac
+from base64 import b64decode
+
+try:
+ import psycopg2
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ pg_quote_identifier,
+ SQLParseError,
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ PgMembership,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six import iteritems
+import ansible_collections.community.postgresql.plugins.module_utils.saslprep as saslprep
+
+try:
+ # pbkdf2_hmac is missing on python 2.6, we can safely assume,
+ # that postresql 10 capable instance have at least python 2.7 installed
+ from hashlib import pbkdf2_hmac
+ pbkdf2_found = True
+except ImportError:
+ pbkdf2_found = False
+
+
+FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
+FLAGS_BY_VERSION = {'BYPASSRLS': 90500}
+
+SCRAM_SHA256_REGEX = r'^SCRAM-SHA-256\$(\d+):([A-Za-z0-9+\/=]+)\$([A-Za-z0-9+\/=]+):([A-Za-z0-9+\/=]+)$'
+
+VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
+ database=frozenset(
+ ('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
+ )
+
+# map to cope with idiosyncracies of SUPERUSER and LOGIN
+PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
+ CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin',
+ REPLICATION='rolreplication', BYPASSRLS='rolbypassrls')
+
+executed_queries = []
+
+
+class InvalidFlagsError(Exception):
+ pass
+
+
+class InvalidPrivsError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def user_exists(cursor, user):
+ # The PUBLIC user is a special case that is always there
+ if user == 'PUBLIC':
+ return True
+ query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
+ cursor.execute(query, {'user': user})
+ return cursor.rowcount > 0
+
+
+def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit):
+ """Create a new database user (role)."""
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ query_password_data = dict(password=password, expires=expires)
+ query = ['CREATE USER "%(user)s"' %
+ {"user": user}]
+ if password is not None and password != '':
+ query.append("WITH %(crypt)s" % {"crypt": encrypted})
+ query.append("PASSWORD %(password)s")
+ if expires is not None:
+ query.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query.append(role_attr_flags)
+ query = ' '.join(query)
+ executed_queries.append(query)
+ cursor.execute(query, query_password_data)
+ return True
+
+
+def user_should_we_change_password(current_role_attrs, user, password, encrypted):
+ """Check if we should change the user's password.
+
+ Compare the proposed password with the existing one, comparing
+ hashes if encrypted. If we can't access it assume yes.
+ """
+
+ if current_role_attrs is None:
+ # on some databases, E.g. AWS RDS instances, there is no access to
+ # the pg_authid relation to check the pre-existing password, so we
+ # just assume password is different
+ return True
+
+ # Do we actually need to do anything?
+ pwchanging = False
+ if password is not None:
+ # Empty password means that the role shouldn't have a password, which
+ # means we need to check if the current password is None.
+ if password == '':
+ if current_role_attrs['rolpassword'] is not None:
+ pwchanging = True
+
+ # SCRAM hashes are represented as a special object, containing hash data:
+ # `SCRAM-SHA-256$<iteration count>:<salt>$<StoredKey>:<ServerKey>`
+ # for reference, see https://www.postgresql.org/docs/current/catalog-pg-authid.html
+ elif current_role_attrs['rolpassword'] is not None \
+ and pbkdf2_found \
+ and re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword']):
+
+ r = re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword'])
+ try:
+ # extract SCRAM params from rolpassword
+ it = int(r.group(1))
+ salt = b64decode(r.group(2))
+ server_key = b64decode(r.group(4))
+ # we'll never need `storedKey` as it is only used for server auth in SCRAM
+ # storedKey = b64decode(r.group(3))
+
+ # from RFC5802 https://tools.ietf.org/html/rfc5802#section-3
+ # SaltedPassword := Hi(Normalize(password), salt, i)
+ # ServerKey := HMAC(SaltedPassword, "Server Key")
+ normalized_password = saslprep.saslprep(to_text(password))
+ salted_password = pbkdf2_hmac('sha256', to_bytes(normalized_password), salt, it)
+
+ server_key_verifier = hmac.new(salted_password, digestmod=sha256)
+ server_key_verifier.update(b'Server Key')
+
+ if server_key_verifier.digest() != server_key:
+ pwchanging = True
+ except Exception:
+ # We assume the password is not scram encrypted
+ # or we cannot check it properly, e.g. due to missing dependencies
+ pwchanging = True
+
+ # 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits
+ # 3: The size of the 'md5' prefix
+ # When the provided password looks like a MD5-hash, value of
+ # 'encrypted' is ignored.
+ elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED':
+ if password != current_role_attrs['rolpassword']:
+ pwchanging = True
+ elif encrypted == 'ENCRYPTED':
+ hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest())
+ if hashed_password != current_role_attrs['rolpassword']:
+ pwchanging = True
+
+ return pwchanging
+
+
+def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit):
+ """Change user password and/or attributes. Return True if changed, False otherwise."""
+ changed = False
+
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ if user == 'PUBLIC':
+ if password is not None:
+ module.fail_json(msg="cannot change the password for PUBLIC user")
+ elif role_attr_flags != '':
+ module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
+ else:
+ return False
+
+ # Handle passwords.
+ if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None):
+ # Select password and all flag-like columns in order to verify changes.
+ try:
+ select = "SELECT * FROM pg_authid where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError:
+ current_role_attrs = None
+ db_connection.rollback()
+
+ pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted)
+
+ if current_role_attrs is None:
+ try:
+ # AWS RDS instances does not allow user to access pg_authid
+ # so try to get current_role_attrs from pg_roles tables
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes from pg_roles
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError as e:
+ db_connection.rollback()
+ module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e))
+
+ role_attr_flags_changing = False
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if expires is not None:
+ cursor.execute("SELECT %s::timestamptz;", (expires,))
+ expires_with_tz = cursor.fetchone()[0]
+ expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil')
+ else:
+ expires_changing = False
+
+ conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit'])
+
+ if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' % {"user": user}]
+ if pwchanging:
+ if password != '':
+ alter.append("WITH %(crypt)s" % {"crypt": encrypted})
+ alter.append("PASSWORD %(password)s")
+ else:
+ alter.append("WITH PASSWORD NULL")
+ alter.append(role_attr_flags)
+ elif role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+ if expires is not None:
+ alter.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+
+ query_password_data = dict(password=password, expires=expires)
+ try:
+ cursor.execute(' '.join(alter), query_password_data)
+ changed = True
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+ except psycopg2.NotSupportedError as e:
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+
+ elif no_password_changes and role_attr_flags != '':
+ # Grab role information from pg_roles instead of pg_authid
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+
+ role_attr_flags_changing = False
+
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if not role_attr_flags_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' %
+ {"user": user}]
+ if role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+
+ try:
+ cursor.execute(' '.join(alter))
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+
+ # Grab new role attributes.
+ cursor.execute(select, {"user": user})
+ new_role_attrs = cursor.fetchone()
+
+ # Detect any differences between current_ and new_role_attrs.
+ changed = current_role_attrs != new_role_attrs
+
+ return changed
+
+
+def user_delete(cursor, user):
+ """Try to remove a user. Returns True if successful otherwise False"""
+ cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
+ try:
+ query = 'DROP USER "%s"' % user
+ executed_queries.append(query)
+ cursor.execute(query)
+ except Exception:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return False
+
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return True
+
+
+def has_table_privileges(cursor, user, table, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_table_privileges(cursor, user, table)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+def get_table_privileges(cursor, user, table):
+ if '.' in table:
+ schema, table = table.split('.', 1)
+ else:
+ schema = 'public'
+ query = ("SELECT privilege_type FROM information_schema.role_table_grants "
+ "WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s")
+ cursor.execute(query, {'user': user, 'table': table, 'schema': schema})
+ return frozenset([x[0] for x in cursor.fetchall()])
+
+
+def grant_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'GRANT %s ON TABLE %s TO "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'REVOKE %s ON TABLE %s FROM "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def get_database_privileges(cursor, user, db):
+ priv_map = {
+ 'C': 'CREATE',
+ 'T': 'TEMPORARY',
+ 'c': 'CONNECT',
+ }
+ query = 'SELECT datacl FROM pg_database WHERE datname = %s'
+ cursor.execute(query, (db,))
+ datacl = cursor.fetchone()[0]
+ if datacl is None:
+ return set()
+ r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
+ if r is None:
+ return set()
+ o = set()
+ for v in r.group(1):
+ o.add(priv_map[v])
+ return normalize_privileges(o, 'database')
+
+
+def has_database_privileges(cursor, user, db, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_database_privileges(cursor, user, db)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+def grant_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'GRANT %s ON DATABASE %s TO "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'REVOKE %s ON DATABASE %s FROM "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ revoke_funcs = dict(table=revoke_table_privileges,
+ database=revoke_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested to be removed are
+ # currently granted to the user
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[0]:
+ revoke_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def grant_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ grant_funcs = dict(table=grant_table_privileges,
+ database=grant_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested for the user are
+ # currently missing
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[2]:
+ grant_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def parse_role_attrs(cursor, role_attr_flags):
+ """
+ Parse role attributes string for user creation.
+ Format:
+
+ attributes[,attributes,...]
+
+ Where:
+
+ attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
+ [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB",
+ "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION",
+ "[NO]BYPASSRLS" ]
+
+ Note: "[NO]BYPASSRLS" role attribute introduced in 9.5
+ Note: "[NO]CREATEUSER" role attribute is deprecated.
+
+ """
+ flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role)
+
+ valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor)))
+ valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags)))
+
+ if not flags.issubset(valid_flags):
+ raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
+ ' '.join(flags.difference(valid_flags)))
+
+ return ' '.join(flags)
+
+
+def normalize_privileges(privs, type_):
+ new_privs = set(privs)
+ if 'ALL' in new_privs:
+ new_privs.update(VALID_PRIVS[type_])
+ new_privs.remove('ALL')
+ if 'TEMP' in new_privs:
+ new_privs.add('TEMPORARY')
+ new_privs.remove('TEMP')
+
+ return new_privs
+
+
+def parse_privs(privs, db):
+ """
+ Parse privilege string to determine permissions for database db.
+ Format:
+
+ privileges[/privileges/...]
+
+ Where:
+
+ privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
+ TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
+ """
+ if privs is None:
+ return privs
+
+ o_privs = {
+ 'database': {},
+ 'table': {}
+ }
+ for token in privs.split('/'):
+ if ':' not in token:
+ type_ = 'database'
+ name = db
+ priv_set = frozenset(x.strip().upper()
+ for x in token.split(',') if x.strip())
+ else:
+ type_ = 'table'
+ name, privileges = token.split(':', 1)
+ priv_set = frozenset(x.strip().upper()
+ for x in privileges.split(',') if x.strip())
+
+ if not priv_set.issubset(VALID_PRIVS[type_]):
+ raise InvalidPrivsError('Invalid privs specified for %s: %s' %
+ (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
+
+ priv_set = normalize_privileges(priv_set, type_)
+ o_privs[type_][name] = priv_set
+
+ return o_privs
+
+
+def get_valid_flags_by_version(cursor):
+ """
+ Some role attributes were introduced after certain versions. We want to
+ compile a list of valid flags against the current Postgres version.
+ """
+ current_version = cursor.connection.server_version
+
+ return [
+ flag
+ for flag, version_introduced in FLAGS_BY_VERSION.items()
+ if current_version >= version_introduced
+ ]
+
+
+def get_comment(cursor, user):
+ """Get user's comment."""
+ query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') "
+ "FROM pg_catalog.pg_roles r "
+ "WHERE r.rolname = %(user)s")
+ cursor.execute(query, {'user': user})
+ return cursor.fetchone()[0]
+
+
+def add_comment(cursor, user, comment):
+ """Add comment on user."""
+ if comment != get_comment(cursor, user):
+ query = 'COMMENT ON ROLE "%s" IS ' % user
+ cursor.execute(query + '%(comment)s', {'comment': comment})
+ executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment}))
+ return True
+ else:
+ return False
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ user=dict(type='str', required=True, aliases=['name']),
+ password=dict(type='str', default=None, no_log=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ priv=dict(type='str', default=None),
+ db=dict(type='str', default='', aliases=['login_db']),
+ fail_on_user=dict(type='bool', default=True, aliases=['fail_on_role']),
+ role_attr_flags=dict(type='str', default=''),
+ encrypted=dict(type='bool', default=True),
+ no_password_changes=dict(type='bool', default=False, no_log=False),
+ expires=dict(type='str', default=None),
+ conn_limit=dict(type='int', default=None),
+ session_role=dict(type='str'),
+ groups=dict(type='list', elements='str'),
+ comment=dict(type='str', default=None),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ user = module.params["user"]
+ password = module.params["password"]
+ state = module.params["state"]
+ fail_on_user = module.params["fail_on_user"]
+ if module.params['db'] == '' and module.params["priv"] is not None:
+ module.fail_json(msg="privileges require a database to be specified")
+ privs = parse_privs(module.params["priv"], module.params["db"])
+ no_password_changes = module.params["no_password_changes"]
+ if module.params["encrypted"]:
+ encrypted = "ENCRYPTED"
+ else:
+ encrypted = "UNENCRYPTED"
+ expires = module.params["expires"]
+ conn_limit = module.params["conn_limit"]
+ role_attr_flags = module.params["role_attr_flags"]
+ groups = module.params["groups"]
+ if groups:
+ groups = [e.strip() for e in groups]
+ comment = module.params["comment"]
+ session_role = module.params['session_role']
+
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, user, password, privs, expires,
+ role_attr_flags, groups, comment, session_role)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ role_attr_flags = parse_role_attrs(cursor, role_attr_flags)
+ except InvalidFlagsError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ kw = dict(user=user)
+ changed = False
+ user_removed = False
+
+ if state == "present":
+ if user_exists(cursor, user):
+ try:
+ changed = user_alter(db_connection, module, user, password,
+ role_attr_flags, encrypted, expires, no_password_changes, conn_limit)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ else:
+ try:
+ changed = user_add(cursor, user, password,
+ role_attr_flags, encrypted, expires, conn_limit)
+ except psycopg2.ProgrammingError as e:
+ module.fail_json(msg="Unable to add user with given requirement "
+ "due to : %s" % to_native(e),
+ exception=traceback.format_exc())
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ try:
+ changed = grant_privileges(cursor, user, privs) or changed
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ if groups:
+ target_roles = []
+ target_roles.append(user)
+ pg_membership = PgMembership(module, cursor, groups, target_roles)
+ changed = pg_membership.grant() or changed
+ executed_queries.extend(pg_membership.executed_queries)
+
+ if comment is not None:
+ try:
+ changed = add_comment(cursor, user, comment) or changed
+ except Exception as e:
+ module.fail_json(msg='Unable to add comment on role: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ else:
+ if user_exists(cursor, user):
+ if module.check_mode:
+ changed = True
+ kw['user_removed'] = True
+ else:
+ try:
+ changed = revoke_privileges(cursor, user, privs)
+ user_removed = user_delete(cursor, user)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ changed = changed or user_removed
+ if fail_on_user and not user_removed:
+ msg = "Unable to remove user"
+ module.fail_json(msg=msg)
+ kw['user_removed'] = user_removed
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ kw['queries'] = executed_queries
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py
new file mode 100644
index 00000000..06eff530
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_user_obj_stat_info
+short_description: Gather statistics about PostgreSQL user objects
+description:
+- Gathers statistics about PostgreSQL user objects.
+version_added: '0.2.0'
+options:
+ filter:
+ description:
+ - Limit the collected information by comma separated string or YAML list.
+ - Allowable values are C(functions), C(indexes), C(tables).
+ - By default, collects all subsets.
+ - Unsupported values are ignored.
+ type: list
+ elements: str
+ schema:
+ description:
+ - Restrict the output by certain schema.
+ type: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check the value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- C(size) and C(total_size) returned values are presented in bytes.
+- For tracking function statistics the PostgreSQL C(track_functions) parameter must be enabled.
+ See U(https://www.postgresql.org/docs/current/runtime-config-statistics.html) for more information.
+- Supports C(check_mode).
+seealso:
+- module: community.postgresql.postgresql_info
+- module: community.postgresql.postgresql_ping
+- name: PostgreSQL statistics collector reference
+ description: Complete reference of the PostgreSQL statistics collector documentation.
+ link: https://www.postgresql.org/docs/current/monitoring-stats.html
+author:
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.postgresql.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Collect information about all supported user objects of the acme database
+ community.postgresql.postgresql_user_obj_stat_info:
+ db: acme
+
+- name: Collect information about all supported user objects in the custom schema of the acme database
+ community.postgresql.postgresql_user_obj_stat_info:
+ db: acme
+ schema: custom
+
+- name: Collect information about user tables and indexes in the acme database
+ community.postgresql.postgresql_user_obj_stat_info:
+ db: acme
+ filter: tables, indexes
+'''
+
+RETURN = r'''
+indexes:
+ description: User index statistics.
+ returned: always
+ type: dict
+ sample: {"public": {"test_id_idx": {"idx_scan": 0, "idx_tup_fetch": 0, "idx_tup_read": 0, "relname": "test", "size": 8192, ...}}}
+tables:
+ description: User table statistics.
+ returned: always
+ type: dict
+ sample: {"public": {"test": {"analyze_count": 3, "n_dead_tup": 0, "n_live_tup": 0, "seq_scan": 2, "size": 0, "total_size": 8192, ...}}}
+functions:
+ description: User function statistics.
+ returned: always
+ type: dict
+ sample: {"public": {"inc": {"calls": 1, "funcid": 26722, "self_time": 0.23, "total_time": 0.23}}}
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.postgresql.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+class PgUserObjStatInfo():
+ """Class to collect information about PostgreSQL user objects.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ executed_queries (list): List of executed queries.
+ info (dict): Statistics dictionary.
+ obj_func_mapping (dict): Mapping of object types to corresponding functions.
+ schema (str): Name of a schema to restrict stat collecting.
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'functions': {},
+ 'indexes': {},
+ 'tables': {},
+ }
+ self.obj_func_mapping = {
+ 'functions': self.get_func_stat,
+ 'indexes': self.get_idx_stat,
+ 'tables': self.get_tbl_stat,
+ }
+ self.schema = None
+
+ def collect(self, filter_=None, schema=None):
+ """Collect statistics information of user objects.
+
+ Kwargs:
+ filter_ (list): List of subsets which need to be collected.
+ schema (str): Restrict stat collecting by certain schema.
+
+ Returns:
+ ``self.info``.
+ """
+ if schema:
+ self.set_schema(schema)
+
+ if filter_:
+ for obj_type in filter_:
+ obj_type = obj_type.strip()
+ obj_func = self.obj_func_mapping.get(obj_type)
+
+ if obj_func is not None:
+ obj_func()
+ else:
+ self.module.warn("Unknown filter option '%s'" % obj_type)
+
+ else:
+ for obj_func in self.obj_func_mapping.values():
+ obj_func()
+
+ return self.info
+
+ def get_func_stat(self):
+ """Get function statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_functions"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_functions WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='functions',
+ schema_key='schemaname',
+ name_key='funcname')
+
+ def get_idx_stat(self):
+ """Get index statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_indexes"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='indexes',
+ schema_key='schemaname',
+ name_key='indexrelname')
+
+ def get_tbl_stat(self):
+ """Get table statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_tables"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_tables WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='tables',
+ schema_key='schemaname',
+ name_key='relname')
+
+ def __fill_out_info(self, result, info_key=None, schema_key=None, name_key=None):
+ # Convert result to list of dicts to handle it easier:
+ result = [dict(row) for row in result]
+
+ for elem in result:
+ # Add schema name as a key if not presented:
+ if not self.info[info_key].get(elem[schema_key]):
+ self.info[info_key][elem[schema_key]] = {}
+
+ # Add object name key as a subkey
+ # (they must be uniq over a schema, so no need additional checks):
+ self.info[info_key][elem[schema_key]][elem[name_key]] = {}
+
+ # Add other other attributes to a certain index:
+ for key, val in iteritems(elem):
+ if key not in (schema_key, name_key):
+ self.info[info_key][elem[schema_key]][elem[name_key]][key] = val
+
+ if info_key in ('tables', 'indexes'):
+ schemaname = elem[schema_key]
+ if self.schema:
+ schemaname = self.schema
+
+ relname = '%s.%s' % (schemaname, elem[name_key])
+
+ result = exec_sql(self, "SELECT pg_relation_size (%s)",
+ query_params=(relname,),
+ add_to_executed=False)
+
+ self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0]
+
+ if info_key == 'tables':
+ result = exec_sql(self, "SELECT pg_total_relation_size (%s)",
+ query_params=(relname,),
+ add_to_executed=False)
+
+ self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0]
+
+ def set_schema(self, schema):
+ """If schema exists, sets self.schema, otherwise fails."""
+ query = ("SELECT 1 FROM information_schema.schemata "
+ "WHERE schema_name = %s")
+ result = exec_sql(self, query, query_params=(schema,),
+ add_to_executed=False)
+
+ if result and result[0][0]:
+ self.schema = schema
+ else:
+ self.module.fail_json(msg="Schema '%s' does not exist" % (schema))
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ filter=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ schema=dict(type='str'),
+ trust_input=dict(type="bool", default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ filter_ = module.params["filter"]
+ schema = module.params["schema"]
+
+ if not module.params["trust_input"]:
+ check_input(module, module.params['session_role'])
+
+ # Connect to DB and make cursor object:
+ pg_conn_params = get_conn_params(module, module.params)
+ # We don't need to commit anything, so, set it to False:
+ db_connection = connect_to_db(module, pg_conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ############################
+ # Create object and do work:
+ pg_obj_info = PgUserObjStatInfo(module, cursor)
+
+ info_dict = pg_obj_info.collect(filter_, schema)
+
+ # Clean up:
+ cursor.close()
+ db_connection.close()
+
+ # Return information:
+ module.exit_json(**info_dict)
+
+
+if __name__ == '__main__':
+ main()