summaryrefslogtreecommitdiffstats
path: root/src/test/modules
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:19:15 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:19:15 +0000
commit6eb9c5a5657d1fe77b55cc261450f3538d35a94d (patch)
tree657d8194422a5daccecfd42d654b8a245ef7b4c8 /src/test/modules
parentInitial commit. (diff)
downloadpostgresql-13-upstream.tar.xz
postgresql-13-upstream.zip
Adding upstream version 13.4.upstream/13.4upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/test/modules')
-rw-r--r--src/test/modules/Makefile35
-rw-r--r--src/test/modules/README20
-rw-r--r--src/test/modules/brin/.gitignore3
-rw-r--r--src/test/modules/brin/Makefile17
-rw-r--r--src/test/modules/brin/expected/summarization-and-inprogress-insertion.out51
-rw-r--r--src/test/modules/brin/specs/summarization-and-inprogress-insertion.spec44
-rw-r--r--src/test/modules/brin/t/01_workitems.pl41
-rw-r--r--src/test/modules/commit_ts/.gitignore4
-rw-r--r--src/test/modules/commit_ts/Makefile20
-rw-r--r--src/test/modules/commit_ts/commit_ts.conf1
-rw-r--r--src/test/modules/commit_ts/expected/commit_timestamp.out47
-rw-r--r--src/test/modules/commit_ts/expected/commit_timestamp_1.out39
-rw-r--r--src/test/modules/commit_ts/sql/commit_timestamp.sql24
-rw-r--r--src/test/modules/commit_ts/t/001_base.pl33
-rw-r--r--src/test/modules/commit_ts/t/002_standby.pl63
-rw-r--r--src/test/modules/commit_ts/t/003_standby_2.pl64
-rw-r--r--src/test/modules/commit_ts/t/004_restart.pl149
-rw-r--r--src/test/modules/dummy_index_am/.gitignore3
-rw-r--r--src/test/modules/dummy_index_am/Makefile20
-rw-r--r--src/test/modules/dummy_index_am/README12
-rw-r--r--src/test/modules/dummy_index_am/dummy_index_am--1.0.sql19
-rw-r--r--src/test/modules/dummy_index_am/dummy_index_am.c332
-rw-r--r--src/test/modules/dummy_index_am/dummy_index_am.control5
-rw-r--r--src/test/modules/dummy_index_am/expected/reloptions.out145
-rw-r--r--src/test/modules/dummy_index_am/sql/reloptions.sql83
-rw-r--r--src/test/modules/dummy_seclabel/.gitignore4
-rw-r--r--src/test/modules/dummy_seclabel/Makefile20
-rw-r--r--src/test/modules/dummy_seclabel/README41
-rw-r--r--src/test/modules/dummy_seclabel/dummy_seclabel--1.0.sql8
-rw-r--r--src/test/modules/dummy_seclabel/dummy_seclabel.c63
-rw-r--r--src/test/modules/dummy_seclabel/dummy_seclabel.control4
-rw-r--r--src/test/modules/dummy_seclabel/expected/dummy_seclabel.out117
-rw-r--r--src/test/modules/dummy_seclabel/sql/dummy_seclabel.sql115
-rw-r--r--src/test/modules/snapshot_too_old/.gitignore1
-rw-r--r--src/test/modules/snapshot_too_old/Makefile28
-rw-r--r--src/test/modules/snapshot_too_old/expected/sto_using_cursor.out95
-rw-r--r--src/test/modules/snapshot_too_old/expected/sto_using_hash_index.out19
-rw-r--r--src/test/modules/snapshot_too_old/expected/sto_using_select.out73
-rw-r--r--src/test/modules/snapshot_too_old/specs/sto_using_cursor.spec37
-rw-r--r--src/test/modules/snapshot_too_old/specs/sto_using_hash_index.spec31
-rw-r--r--src/test/modules/snapshot_too_old/specs/sto_using_select.spec36
-rw-r--r--src/test/modules/snapshot_too_old/sto.conf2
-rw-r--r--src/test/modules/ssl_passphrase_callback/.gitignore1
-rw-r--r--src/test/modules/ssl_passphrase_callback/Makefile40
-rw-r--r--src/test/modules/ssl_passphrase_callback/server.crt19
-rw-r--r--src/test/modules/ssl_passphrase_callback/server.key30
-rw-r--r--src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c90
-rw-r--r--src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl74
-rw-r--r--src/test/modules/test_bloomfilter/.gitignore4
-rw-r--r--src/test/modules/test_bloomfilter/Makefile23
-rw-r--r--src/test/modules/test_bloomfilter/README68
-rw-r--r--src/test/modules/test_bloomfilter/expected/test_bloomfilter.out22
-rw-r--r--src/test/modules/test_bloomfilter/sql/test_bloomfilter.sql19
-rw-r--r--src/test/modules/test_bloomfilter/test_bloomfilter--1.0.sql11
-rw-r--r--src/test/modules/test_bloomfilter/test_bloomfilter.c138
-rw-r--r--src/test/modules/test_bloomfilter/test_bloomfilter.control4
-rw-r--r--src/test/modules/test_ddl_deparse/.gitignore4
-rw-r--r--src/test/modules/test_ddl_deparse/Makefile43
-rw-r--r--src/test/modules/test_ddl_deparse/README8
-rw-r--r--src/test/modules/test_ddl_deparse/expected/alter_extension.out0
-rw-r--r--src/test/modules/test_ddl_deparse/expected/alter_function.out15
-rw-r--r--src/test/modules/test_ddl_deparse/expected/alter_sequence.out15
-rw-r--r--src/test/modules/test_ddl_deparse/expected/alter_table.out29
-rw-r--r--src/test/modules/test_ddl_deparse/expected/alter_ts_config.out8
-rw-r--r--src/test/modules/test_ddl_deparse/expected/alter_type_enum.out7
-rw-r--r--src/test/modules/test_ddl_deparse/expected/comment_on.out23
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_conversion.out6
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_domain.out11
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_extension.out5
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_function.out0
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_operator.out0
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_rule.out30
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_schema.out19
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_sequence_1.out11
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_table.out164
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_transform.out15
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_trigger.out18
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_type.out24
-rw-r--r--src/test/modules/test_ddl_deparse/expected/create_view.out19
-rw-r--r--src/test/modules/test_ddl_deparse/expected/defprivs.out6
-rw-r--r--src/test/modules/test_ddl_deparse/expected/matviews.out8
-rw-r--r--src/test/modules/test_ddl_deparse/expected/opfamily.out67
-rw-r--r--src/test/modules/test_ddl_deparse/expected/test_ddl_deparse.out40
-rw-r--r--src/test/modules/test_ddl_deparse/sql/alter_function.sql17
-rw-r--r--src/test/modules/test_ddl_deparse/sql/alter_sequence.sql15
-rw-r--r--src/test/modules/test_ddl_deparse/sql/alter_table.sql21
-rw-r--r--src/test/modules/test_ddl_deparse/sql/alter_ts_config.sql8
-rw-r--r--src/test/modules/test_ddl_deparse/sql/alter_type_enum.sql6
-rw-r--r--src/test/modules/test_ddl_deparse/sql/comment_on.sql14
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_conversion.sql6
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_domain.sql10
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_extension.sql5
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_rule.sql31
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_schema.sql17
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_sequence_1.sql11
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_table.sql142
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_transform.sql16
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_trigger.sql18
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_type.sql21
-rw-r--r--src/test/modules/test_ddl_deparse/sql/create_view.sql17
-rw-r--r--src/test/modules/test_ddl_deparse/sql/defprivs.sql6
-rw-r--r--src/test/modules/test_ddl_deparse/sql/matviews.sql8
-rw-r--r--src/test/modules/test_ddl_deparse/sql/opfamily.sql52
-rw-r--r--src/test/modules/test_ddl_deparse/sql/test_ddl_deparse.sql42
-rw-r--r--src/test/modules/test_ddl_deparse/test_ddl_deparse--1.0.sql16
-rw-r--r--src/test/modules/test_ddl_deparse/test_ddl_deparse.c296
-rw-r--r--src/test/modules/test_ddl_deparse/test_ddl_deparse.control4
-rw-r--r--src/test/modules/test_extensions/.gitignore4
-rw-r--r--src/test/modules/test_extensions/Makefile26
-rw-r--r--src/test/modules/test_extensions/expected/test_extdepend.out188
-rw-r--r--src/test/modules/test_extensions/expected/test_extensions.out161
-rw-r--r--src/test/modules/test_extensions/sql/test_extdepend.sql90
-rw-r--r--src/test/modules/test_extensions/sql/test_extensions.sql101
-rw-r--r--src/test/modules/test_extensions/test_ext1--1.0.sql3
-rw-r--r--src/test/modules/test_extensions/test_ext1.control5
-rw-r--r--src/test/modules/test_extensions/test_ext2--1.0.sql3
-rw-r--r--src/test/modules/test_extensions/test_ext2.control4
-rw-r--r--src/test/modules/test_extensions/test_ext3--1.0.sql9
-rw-r--r--src/test/modules/test_extensions/test_ext3.control3
-rw-r--r--src/test/modules/test_extensions/test_ext4--1.0.sql3
-rw-r--r--src/test/modules/test_extensions/test_ext4.control4
-rw-r--r--src/test/modules/test_extensions/test_ext5--1.0.sql3
-rw-r--r--src/test/modules/test_extensions/test_ext5.control3
-rw-r--r--src/test/modules/test_extensions/test_ext6--1.0.sql1
-rw-r--r--src/test/modules/test_extensions/test_ext6.control5
-rw-r--r--src/test/modules/test_extensions/test_ext7--1.0--2.0.sql8
-rw-r--r--src/test/modules/test_extensions/test_ext7--1.0.sql13
-rw-r--r--src/test/modules/test_extensions/test_ext7.control4
-rw-r--r--src/test/modules/test_extensions/test_ext8--1.0.sql21
-rw-r--r--src/test/modules/test_extensions/test_ext8.control4
-rw-r--r--src/test/modules/test_extensions/test_ext_cyclic1--1.0.sql3
-rw-r--r--src/test/modules/test_extensions/test_ext_cyclic1.control4
-rw-r--r--src/test/modules/test_extensions/test_ext_cyclic2--1.0.sql3
-rw-r--r--src/test/modules/test_extensions/test_ext_cyclic2.control4
-rw-r--r--src/test/modules/test_extensions/test_ext_evttrig--1.0--2.0.sql7
-rw-r--r--src/test/modules/test_extensions/test_ext_evttrig--1.0.sql16
-rw-r--r--src/test/modules/test_extensions/test_ext_evttrig.control3
-rw-r--r--src/test/modules/test_ginpostinglist/.gitignore4
-rw-r--r--src/test/modules/test_ginpostinglist/Makefile23
-rw-r--r--src/test/modules/test_ginpostinglist/README2
-rw-r--r--src/test/modules/test_ginpostinglist/expected/test_ginpostinglist.out19
-rw-r--r--src/test/modules/test_ginpostinglist/sql/test_ginpostinglist.sql7
-rw-r--r--src/test/modules/test_ginpostinglist/test_ginpostinglist--1.0.sql8
-rw-r--r--src/test/modules/test_ginpostinglist/test_ginpostinglist.c96
-rw-r--r--src/test/modules/test_ginpostinglist/test_ginpostinglist.control4
-rw-r--r--src/test/modules/test_integerset/.gitignore4
-rw-r--r--src/test/modules/test_integerset/Makefile23
-rw-r--r--src/test/modules/test_integerset/README7
-rw-r--r--src/test/modules/test_integerset/expected/test_integerset.out31
-rw-r--r--src/test/modules/test_integerset/sql/test_integerset.sql7
-rw-r--r--src/test/modules/test_integerset/test_integerset--1.0.sql8
-rw-r--r--src/test/modules/test_integerset/test_integerset.c623
-rw-r--r--src/test/modules/test_integerset/test_integerset.control4
-rw-r--r--src/test/modules/test_misc/.gitignore4
-rw-r--r--src/test/modules/test_misc/Makefile14
-rw-r--r--src/test/modules/test_misc/README4
-rw-r--r--src/test/modules/test_misc/t/001_constraint_validation.pl310
-rw-r--r--src/test/modules/test_parser/.gitignore4
-rw-r--r--src/test/modules/test_parser/Makefile23
-rw-r--r--src/test/modules/test_parser/README61
-rw-r--r--src/test/modules/test_parser/expected/test_parser.out44
-rw-r--r--src/test/modules/test_parser/sql/test_parser.sql18
-rw-r--r--src/test/modules/test_parser/test_parser--1.0.sql32
-rw-r--r--src/test/modules/test_parser/test_parser.c127
-rw-r--r--src/test/modules/test_parser/test_parser.control5
-rw-r--r--src/test/modules/test_pg_dump/.gitignore4
-rw-r--r--src/test/modules/test_pg_dump/Makefile21
-rw-r--r--src/test/modules/test_pg_dump/README4
-rw-r--r--src/test/modules/test_pg_dump/expected/test_pg_dump.out95
-rw-r--r--src/test/modules/test_pg_dump/sql/test_pg_dump.sql108
-rw-r--r--src/test/modules/test_pg_dump/t/001_base.pl786
-rw-r--r--src/test/modules/test_pg_dump/test_pg_dump--1.0.sql62
-rw-r--r--src/test/modules/test_pg_dump/test_pg_dump.control3
-rw-r--r--src/test/modules/test_predtest/.gitignore4
-rw-r--r--src/test/modules/test_predtest/Makefile23
-rw-r--r--src/test/modules/test_predtest/README28
-rw-r--r--src/test/modules/test_predtest/expected/test_predtest.out1096
-rw-r--r--src/test/modules/test_predtest/sql/test_predtest.sql442
-rw-r--r--src/test/modules/test_predtest/test_predtest--1.0.sql16
-rw-r--r--src/test/modules/test_predtest/test_predtest.c218
-rw-r--r--src/test/modules/test_predtest/test_predtest.control4
-rw-r--r--src/test/modules/test_rbtree/.gitignore4
-rw-r--r--src/test/modules/test_rbtree/Makefile23
-rw-r--r--src/test/modules/test_rbtree/README13
-rw-r--r--src/test/modules/test_rbtree/expected/test_rbtree.out12
-rw-r--r--src/test/modules/test_rbtree/sql/test_rbtree.sql8
-rw-r--r--src/test/modules/test_rbtree/test_rbtree--1.0.sql8
-rw-r--r--src/test/modules/test_rbtree/test_rbtree.c413
-rw-r--r--src/test/modules/test_rbtree/test_rbtree.control4
-rw-r--r--src/test/modules/test_rls_hooks/.gitignore4
-rw-r--r--src/test/modules/test_rls_hooks/Makefile27
-rw-r--r--src/test/modules/test_rls_hooks/README16
-rw-r--r--src/test/modules/test_rls_hooks/expected/test_rls_hooks.out201
-rw-r--r--src/test/modules/test_rls_hooks/rls_hooks.conf1
-rw-r--r--src/test/modules/test_rls_hooks/sql/test_rls_hooks.sql176
-rw-r--r--src/test/modules/test_rls_hooks/test_rls_hooks.c178
-rw-r--r--src/test/modules/test_rls_hooks/test_rls_hooks.control4
-rw-r--r--src/test/modules/test_rls_hooks/test_rls_hooks.h25
-rw-r--r--src/test/modules/test_shm_mq/.gitignore4
-rw-r--r--src/test/modules/test_shm_mq/Makefile25
-rw-r--r--src/test/modules/test_shm_mq/README49
-rw-r--r--src/test/modules/test_shm_mq/expected/test_shm_mq.out36
-rw-r--r--src/test/modules/test_shm_mq/setup.c316
-rw-r--r--src/test/modules/test_shm_mq/sql/test_shm_mq.sql12
-rw-r--r--src/test/modules/test_shm_mq/test.c266
-rw-r--r--src/test/modules/test_shm_mq/test_shm_mq--1.0.sql19
-rw-r--r--src/test/modules/test_shm_mq/test_shm_mq.control4
-rw-r--r--src/test/modules/test_shm_mq/test_shm_mq.h45
-rw-r--r--src/test/modules/test_shm_mq/worker.c219
-rw-r--r--src/test/modules/unsafe_tests/.gitignore4
-rw-r--r--src/test/modules/unsafe_tests/Makefile14
-rw-r--r--src/test/modules/unsafe_tests/README8
-rw-r--r--src/test/modules/unsafe_tests/expected/alter_system_table.out179
-rw-r--r--src/test/modules/unsafe_tests/expected/rolenames.out1010
-rw-r--r--src/test/modules/unsafe_tests/sql/alter_system_table.sql195
-rw-r--r--src/test/modules/unsafe_tests/sql/rolenames.sql488
-rw-r--r--src/test/modules/worker_spi/.gitignore4
-rw-r--r--src/test/modules/worker_spi/Makefile26
-rw-r--r--src/test/modules/worker_spi/dynamic.conf2
-rw-r--r--src/test/modules/worker_spi/expected/worker_spi.out50
-rw-r--r--src/test/modules/worker_spi/sql/worker_spi.sql35
-rw-r--r--src/test/modules/worker_spi/worker_spi--1.0.sql9
-rw-r--r--src/test/modules/worker_spi/worker_spi.c418
-rw-r--r--src/test/modules/worker_spi/worker_spi.control5
224 files changed, 13600 insertions, 0 deletions
diff --git a/src/test/modules/Makefile b/src/test/modules/Makefile
new file mode 100644
index 0000000..29de73c
--- /dev/null
+++ b/src/test/modules/Makefile
@@ -0,0 +1,35 @@
+# src/test/modules/Makefile
+
+subdir = src/test/modules
+top_builddir = ../../..
+include $(top_builddir)/src/Makefile.global
+
+SUBDIRS = \
+ brin \
+ commit_ts \
+ dummy_index_am \
+ dummy_seclabel \
+ snapshot_too_old \
+ test_bloomfilter \
+ test_ddl_deparse \
+ test_extensions \
+ test_ginpostinglist \
+ test_integerset \
+ test_misc \
+ test_parser \
+ test_pg_dump \
+ test_predtest \
+ test_rbtree \
+ test_rls_hooks \
+ test_shm_mq \
+ unsafe_tests \
+ worker_spi
+
+ifeq ($(with_openssl),yes)
+SUBDIRS += ssl_passphrase_callback
+else
+ALWAYS_SUBDIRS += ssl_passphrase_callback
+endif
+
+$(recurse)
+$(recurse_always)
diff --git a/src/test/modules/README b/src/test/modules/README
new file mode 100644
index 0000000..025ecac
--- /dev/null
+++ b/src/test/modules/README
@@ -0,0 +1,20 @@
+Test extensions and libraries
+=============================
+
+src/test/modules contains PostgreSQL extensions that are primarily or entirely
+intended for testing PostgreSQL and/or to serve as example code. The extensions
+here aren't intended to be installed in a production server and aren't suitable
+for "real work".
+
+Furthermore, while you can do "make install" and "make installcheck" in
+this directory or its children, it is NOT ADVISABLE to do so with a server
+containing valuable data. Some of these tests may have undesirable
+side-effects on roles or other global objects within the tested server.
+"make installcheck-world" at the top level does not recurse into this
+directory.
+
+Most extensions have their own pg_regress tests or isolationtester specs. Some
+are also used by tests elsewhere in the tree.
+
+If you're adding new hooks or other functionality exposed as C-level API this
+is where to add the tests for it.
diff --git a/src/test/modules/brin/.gitignore b/src/test/modules/brin/.gitignore
new file mode 100644
index 0000000..44f600c
--- /dev/null
+++ b/src/test/modules/brin/.gitignore
@@ -0,0 +1,3 @@
+# Generated subdirectories
+/output_iso/
+/tmp_check/
diff --git a/src/test/modules/brin/Makefile b/src/test/modules/brin/Makefile
new file mode 100644
index 0000000..c871593
--- /dev/null
+++ b/src/test/modules/brin/Makefile
@@ -0,0 +1,17 @@
+# src/test/modules/brin/Makefile
+
+EXTRA_INSTALL = contrib/pageinspect
+
+ISOLATION = summarization-and-inprogress-insertion
+TAP_TESTS = 1
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/brin
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/brin/expected/summarization-and-inprogress-insertion.out b/src/test/modules/brin/expected/summarization-and-inprogress-insertion.out
new file mode 100644
index 0000000..2a4755d
--- /dev/null
+++ b/src/test/modules/brin/expected/summarization-and-inprogress-insertion.out
@@ -0,0 +1,51 @@
+Parsed test spec with 2 sessions
+
+starting permutation: s2check s1b s2b s1i s2summ s1c s2c s2check
+step s2check: SELECT * FROM brin_page_items(get_raw_page('brinidx', 2), 'brinidx'::regclass);
+itemoffset|blknum|attnum|allnulls|hasnulls|placeholder|value
+----------+------+------+--------+--------+-----------+--------
+ 1| 0| 1|f |f |f |{1 .. 1}
+(1 row)
+
+step s1b: BEGIN ISOLATION LEVEL REPEATABLE READ;
+step s2b: BEGIN ISOLATION LEVEL REPEATABLE READ; SELECT 1;
+?column?
+--------
+ 1
+(1 row)
+
+step s1i: INSERT INTO brin_iso VALUES (1000);
+step s2summ: SELECT brin_summarize_new_values('brinidx'::regclass);
+brin_summarize_new_values
+-------------------------
+ 1
+(1 row)
+
+step s1c: COMMIT;
+step s2c: COMMIT;
+step s2check: SELECT * FROM brin_page_items(get_raw_page('brinidx', 2), 'brinidx'::regclass);
+itemoffset|blknum|attnum|allnulls|hasnulls|placeholder|value
+----------+------+------+--------+--------+-----------+-----------
+ 1| 0| 1|f |f |f |{1 .. 1}
+ 2| 1| 1|f |f |f |{1 .. 1000}
+(2 rows)
+
+
+starting permutation: s2check s1b s1i s2vacuum s1c s2check
+step s2check: SELECT * FROM brin_page_items(get_raw_page('brinidx', 2), 'brinidx'::regclass);
+itemoffset|blknum|attnum|allnulls|hasnulls|placeholder|value
+----------+------+------+--------+--------+-----------+--------
+ 1| 0| 1|f |f |f |{1 .. 1}
+(1 row)
+
+step s1b: BEGIN ISOLATION LEVEL REPEATABLE READ;
+step s1i: INSERT INTO brin_iso VALUES (1000);
+step s2vacuum: VACUUM brin_iso;
+step s1c: COMMIT;
+step s2check: SELECT * FROM brin_page_items(get_raw_page('brinidx', 2), 'brinidx'::regclass);
+itemoffset|blknum|attnum|allnulls|hasnulls|placeholder|value
+----------+------+------+--------+--------+-----------+-----------
+ 1| 0| 1|f |f |f |{1 .. 1}
+ 2| 1| 1|f |f |f |{1 .. 1000}
+(2 rows)
+
diff --git a/src/test/modules/brin/specs/summarization-and-inprogress-insertion.spec b/src/test/modules/brin/specs/summarization-and-inprogress-insertion.spec
new file mode 100644
index 0000000..19ac18a
--- /dev/null
+++ b/src/test/modules/brin/specs/summarization-and-inprogress-insertion.spec
@@ -0,0 +1,44 @@
+# This test verifies that values inserted in transactions still in progress
+# are considered during concurrent range summarization (either using the
+# brin_summarize_new_values function or regular VACUUM).
+
+setup
+{
+ CREATE TABLE brin_iso (
+ value int
+ ) WITH (fillfactor=10);
+ CREATE INDEX brinidx ON brin_iso USING brin (value) WITH (pages_per_range=1);
+ -- this fills the first page
+ DO $$
+ DECLARE curtid tid;
+ BEGIN
+ LOOP
+ INSERT INTO brin_iso VALUES (1) RETURNING ctid INTO curtid;
+ EXIT WHEN curtid > tid '(1, 0)';
+ END LOOP;
+ END;
+ $$;
+ CREATE EXTENSION IF NOT EXISTS pageinspect;
+}
+
+teardown
+{
+ DROP TABLE brin_iso;
+}
+
+session "s1"
+step "s1b" { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step "s1i" { INSERT INTO brin_iso VALUES (1000); }
+step "s1c" { COMMIT; }
+
+session "s2"
+step "s2b" { BEGIN ISOLATION LEVEL REPEATABLE READ; SELECT 1; }
+step "s2summ" { SELECT brin_summarize_new_values('brinidx'::regclass); }
+step "s2c" { COMMIT; }
+
+step "s2vacuum" { VACUUM brin_iso; }
+
+step "s2check" { SELECT * FROM brin_page_items(get_raw_page('brinidx', 2), 'brinidx'::regclass); }
+
+permutation "s2check" "s1b" "s2b" "s1i" "s2summ" "s1c" "s2c" "s2check"
+permutation "s2check" "s1b" "s1i" "s2vacuum" "s1c" "s2check"
diff --git a/src/test/modules/brin/t/01_workitems.pl b/src/test/modules/brin/t/01_workitems.pl
new file mode 100644
index 0000000..534ab63
--- /dev/null
+++ b/src/test/modules/brin/t/01_workitems.pl
@@ -0,0 +1,41 @@
+# Verify that work items work correctly
+
+use strict;
+use warnings;
+
+use TestLib;
+use Test::More tests => 2;
+use PostgresNode;
+
+my $node = get_new_node('tango');
+$node->init;
+$node->append_conf('postgresql.conf', 'autovacuum_naptime=1s');
+$node->start;
+
+$node->safe_psql('postgres', 'create extension pageinspect');
+
+# Create a table with an autosummarizing BRIN index
+$node->safe_psql(
+ 'postgres',
+ 'create table brin_wi (a int) with (fillfactor = 10);
+ create index brin_wi_idx on brin_wi using brin (a) with (pages_per_range=1, autosummarize=on);
+ '
+);
+my $count = $node->safe_psql('postgres',
+ "select count(*) from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)"
+);
+is($count, '1', "initial index state is correct");
+
+$node->safe_psql('postgres',
+ 'insert into brin_wi select * from generate_series(1, 100)');
+
+$node->poll_query_until(
+ 'postgres',
+ "select count(*) > 1 from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)",
+ 't');
+
+$count = $node->safe_psql('postgres',
+ "select count(*) > 1 from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)"
+);
+is($count, 't', "index got summarized");
+$node->stop;
diff --git a/src/test/modules/commit_ts/.gitignore b/src/test/modules/commit_ts/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/commit_ts/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/commit_ts/Makefile b/src/test/modules/commit_ts/Makefile
new file mode 100644
index 0000000..113bcfa
--- /dev/null
+++ b/src/test/modules/commit_ts/Makefile
@@ -0,0 +1,20 @@
+# src/test/modules/commit_ts/Makefile
+
+REGRESS = commit_timestamp
+REGRESS_OPTS = --temp-config=$(top_srcdir)/src/test/modules/commit_ts/commit_ts.conf
+# Disabled because these tests require "track_commit_timestamp = on",
+# which typical installcheck users do not have (e.g. buildfarm clients).
+NO_INSTALLCHECK = 1
+
+TAP_TESTS = 1
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/commit_ts
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/commit_ts/commit_ts.conf b/src/test/modules/commit_ts/commit_ts.conf
new file mode 100644
index 0000000..e9d3c35
--- /dev/null
+++ b/src/test/modules/commit_ts/commit_ts.conf
@@ -0,0 +1 @@
+track_commit_timestamp = on
diff --git a/src/test/modules/commit_ts/expected/commit_timestamp.out b/src/test/modules/commit_ts/expected/commit_timestamp.out
new file mode 100644
index 0000000..5b7783b
--- /dev/null
+++ b/src/test/modules/commit_ts/expected/commit_timestamp.out
@@ -0,0 +1,47 @@
+--
+-- Commit Timestamp
+--
+SHOW track_commit_timestamp;
+ track_commit_timestamp
+------------------------
+ on
+(1 row)
+
+CREATE TABLE committs_test(id serial, ts timestamptz default now());
+INSERT INTO committs_test DEFAULT VALUES;
+INSERT INTO committs_test DEFAULT VALUES;
+INSERT INTO committs_test DEFAULT VALUES;
+SELECT id,
+ pg_xact_commit_timestamp(xmin) >= ts,
+ pg_xact_commit_timestamp(xmin) <= now(),
+ pg_xact_commit_timestamp(xmin) - ts < '60s' -- 60s should give a lot of reserve
+FROM committs_test
+ORDER BY id;
+ id | ?column? | ?column? | ?column?
+----+----------+----------+----------
+ 1 | t | t | t
+ 2 | t | t | t
+ 3 | t | t | t
+(3 rows)
+
+DROP TABLE committs_test;
+SELECT pg_xact_commit_timestamp('0'::xid);
+ERROR: cannot retrieve commit timestamp for transaction 0
+SELECT pg_xact_commit_timestamp('1'::xid);
+ pg_xact_commit_timestamp
+--------------------------
+
+(1 row)
+
+SELECT pg_xact_commit_timestamp('2'::xid);
+ pg_xact_commit_timestamp
+--------------------------
+
+(1 row)
+
+SELECT x.xid::text::bigint > 0, x.timestamp > '-infinity'::timestamptz, x.timestamp <= now() FROM pg_last_committed_xact() x;
+ ?column? | ?column? | ?column?
+----------+----------+----------
+ t | t | t
+(1 row)
+
diff --git a/src/test/modules/commit_ts/expected/commit_timestamp_1.out b/src/test/modules/commit_ts/expected/commit_timestamp_1.out
new file mode 100644
index 0000000..c10b0ab
--- /dev/null
+++ b/src/test/modules/commit_ts/expected/commit_timestamp_1.out
@@ -0,0 +1,39 @@
+--
+-- Commit Timestamp
+--
+SHOW track_commit_timestamp;
+ track_commit_timestamp
+------------------------
+ off
+(1 row)
+
+CREATE TABLE committs_test(id serial, ts timestamptz default now());
+INSERT INTO committs_test DEFAULT VALUES;
+INSERT INTO committs_test DEFAULT VALUES;
+INSERT INTO committs_test DEFAULT VALUES;
+SELECT id,
+ pg_xact_commit_timestamp(xmin) >= ts,
+ pg_xact_commit_timestamp(xmin) <= now(),
+ pg_xact_commit_timestamp(xmin) - ts < '60s' -- 60s should give a lot of reserve
+FROM committs_test
+ORDER BY id;
+ERROR: could not get commit timestamp data
+HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
+DROP TABLE committs_test;
+SELECT pg_xact_commit_timestamp('0'::xid);
+ERROR: cannot retrieve commit timestamp for transaction 0
+SELECT pg_xact_commit_timestamp('1'::xid);
+ pg_xact_commit_timestamp
+--------------------------
+
+(1 row)
+
+SELECT pg_xact_commit_timestamp('2'::xid);
+ pg_xact_commit_timestamp
+--------------------------
+
+(1 row)
+
+SELECT x.xid::text::bigint > 0, x.timestamp > '-infinity'::timestamptz, x.timestamp <= now() FROM pg_last_committed_xact() x;
+ERROR: could not get commit timestamp data
+HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
diff --git a/src/test/modules/commit_ts/sql/commit_timestamp.sql b/src/test/modules/commit_ts/sql/commit_timestamp.sql
new file mode 100644
index 0000000..4e041a5
--- /dev/null
+++ b/src/test/modules/commit_ts/sql/commit_timestamp.sql
@@ -0,0 +1,24 @@
+--
+-- Commit Timestamp
+--
+SHOW track_commit_timestamp;
+CREATE TABLE committs_test(id serial, ts timestamptz default now());
+
+INSERT INTO committs_test DEFAULT VALUES;
+INSERT INTO committs_test DEFAULT VALUES;
+INSERT INTO committs_test DEFAULT VALUES;
+
+SELECT id,
+ pg_xact_commit_timestamp(xmin) >= ts,
+ pg_xact_commit_timestamp(xmin) <= now(),
+ pg_xact_commit_timestamp(xmin) - ts < '60s' -- 60s should give a lot of reserve
+FROM committs_test
+ORDER BY id;
+
+DROP TABLE committs_test;
+
+SELECT pg_xact_commit_timestamp('0'::xid);
+SELECT pg_xact_commit_timestamp('1'::xid);
+SELECT pg_xact_commit_timestamp('2'::xid);
+
+SELECT x.xid::text::bigint > 0, x.timestamp > '-infinity'::timestamptz, x.timestamp <= now() FROM pg_last_committed_xact() x;
diff --git a/src/test/modules/commit_ts/t/001_base.pl b/src/test/modules/commit_ts/t/001_base.pl
new file mode 100644
index 0000000..f8d5d84
--- /dev/null
+++ b/src/test/modules/commit_ts/t/001_base.pl
@@ -0,0 +1,33 @@
+# Single-node test: value can be set, and is still present after recovery
+
+use strict;
+use warnings;
+
+use TestLib;
+use Test::More tests => 2;
+use PostgresNode;
+
+my $node = get_new_node('foxtrot');
+$node->init;
+$node->append_conf('postgresql.conf', 'track_commit_timestamp = on');
+$node->start;
+
+# Create a table, compare "now()" to the commit TS of its xmin
+$node->safe_psql('postgres',
+ 'create table t as select now from (select now(), pg_sleep(1)) f');
+my $true = $node->safe_psql('postgres',
+ 'select t.now - ts.* < \'1s\' from t, pg_class c, pg_xact_commit_timestamp(c.xmin) ts where relname = \'t\''
+);
+is($true, 't', 'commit TS is set');
+my $ts = $node->safe_psql('postgres',
+ 'select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = \'t\''
+);
+
+# Verify that we read the same TS after crash recovery
+$node->stop('immediate');
+$node->start;
+
+my $recovered_ts = $node->safe_psql('postgres',
+ 'select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = \'t\''
+);
+is($recovered_ts, $ts, 'commit TS remains after crash recovery');
diff --git a/src/test/modules/commit_ts/t/002_standby.pl b/src/test/modules/commit_ts/t/002_standby.pl
new file mode 100644
index 0000000..f376b59
--- /dev/null
+++ b/src/test/modules/commit_ts/t/002_standby.pl
@@ -0,0 +1,63 @@
+# Test simple scenario involving a standby
+
+use strict;
+use warnings;
+
+use TestLib;
+use Test::More tests => 4;
+use PostgresNode;
+
+my $bkplabel = 'backup';
+my $master = get_new_node('master');
+$master->init(allows_streaming => 1);
+
+$master->append_conf(
+ 'postgresql.conf', qq{
+ track_commit_timestamp = on
+ max_wal_senders = 5
+ });
+$master->start;
+$master->backup($bkplabel);
+
+my $standby = get_new_node('standby');
+$standby->init_from_backup($master, $bkplabel, has_streaming => 1);
+$standby->start;
+
+for my $i (1 .. 10)
+{
+ $master->safe_psql('postgres', "create table t$i()");
+}
+my $master_ts = $master->safe_psql('postgres',
+ qq{SELECT ts.* FROM pg_class, pg_xact_commit_timestamp(xmin) AS ts WHERE relname = 't10'}
+);
+my $master_lsn =
+ $master->safe_psql('postgres', 'select pg_current_wal_lsn()');
+$standby->poll_query_until('postgres',
+ qq{SELECT '$master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()})
+ or die "standby never caught up";
+
+my $standby_ts = $standby->safe_psql('postgres',
+ qq{select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = 't10'}
+);
+is($master_ts, $standby_ts, "standby gives same value as master");
+
+$master->append_conf('postgresql.conf', 'track_commit_timestamp = off');
+$master->restart;
+$master->safe_psql('postgres', 'checkpoint');
+$master_lsn = $master->safe_psql('postgres', 'select pg_current_wal_lsn()');
+$standby->poll_query_until('postgres',
+ qq{SELECT '$master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()})
+ or die "standby never caught up";
+$standby->safe_psql('postgres', 'checkpoint');
+
+# This one should raise an error now
+my ($ret, $standby_ts_stdout, $standby_ts_stderr) = $standby->psql('postgres',
+ 'select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = \'t10\''
+);
+is($ret, 3, 'standby errors when master turned feature off');
+is($standby_ts_stdout, '',
+ "standby gives no value when master turned feature off");
+like(
+ $standby_ts_stderr,
+ qr/could not get commit timestamp data/,
+ 'expected error when master turned feature off');
diff --git a/src/test/modules/commit_ts/t/003_standby_2.pl b/src/test/modules/commit_ts/t/003_standby_2.pl
new file mode 100644
index 0000000..9165d50
--- /dev/null
+++ b/src/test/modules/commit_ts/t/003_standby_2.pl
@@ -0,0 +1,64 @@
+# Test master/standby scenario where the track_commit_timestamp GUC is
+# repeatedly toggled on and off.
+use strict;
+use warnings;
+
+use TestLib;
+use Test::More tests => 4;
+use PostgresNode;
+
+my $bkplabel = 'backup';
+my $master = get_new_node('master');
+$master->init(allows_streaming => 1);
+$master->append_conf(
+ 'postgresql.conf', qq{
+ track_commit_timestamp = on
+ max_wal_senders = 5
+ });
+$master->start;
+$master->backup($bkplabel);
+
+my $standby = get_new_node('standby');
+$standby->init_from_backup($master, $bkplabel, has_streaming => 1);
+$standby->start;
+
+for my $i (1 .. 10)
+{
+ $master->safe_psql('postgres', "create table t$i()");
+}
+$master->append_conf('postgresql.conf', 'track_commit_timestamp = off');
+$master->restart;
+$master->safe_psql('postgres', 'checkpoint');
+my $master_lsn =
+ $master->safe_psql('postgres', 'select pg_current_wal_lsn()');
+$standby->poll_query_until('postgres',
+ qq{SELECT '$master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()})
+ or die "standby never caught up";
+
+$standby->safe_psql('postgres', 'checkpoint');
+$standby->restart;
+
+my ($psql_ret, $standby_ts_stdout, $standby_ts_stderr) = $standby->psql(
+ 'postgres',
+ qq{SELECT ts.* FROM pg_class, pg_xact_commit_timestamp(xmin) AS ts WHERE relname = 't10'}
+);
+is($psql_ret, 3, 'expect error when getting commit timestamp after restart');
+is($standby_ts_stdout, '', "standby does not return a value after restart");
+like(
+ $standby_ts_stderr,
+ qr/could not get commit timestamp data/,
+ 'expected err msg after restart');
+
+$master->append_conf('postgresql.conf', 'track_commit_timestamp = on');
+$master->restart;
+$master->append_conf('postgresql.conf', 'track_commit_timestamp = off');
+$master->restart;
+
+system_or_bail('pg_ctl', '-D', $standby->data_dir, 'promote');
+
+$standby->safe_psql('postgres', "create table t11()");
+my $standby_ts = $standby->safe_psql('postgres',
+ qq{SELECT ts.* FROM pg_class, pg_xact_commit_timestamp(xmin) AS ts WHERE relname = 't11'}
+);
+isnt($standby_ts, '',
+ "standby gives valid value ($standby_ts) after promotion");
diff --git a/src/test/modules/commit_ts/t/004_restart.pl b/src/test/modules/commit_ts/t/004_restart.pl
new file mode 100644
index 0000000..39ca25a
--- /dev/null
+++ b/src/test/modules/commit_ts/t/004_restart.pl
@@ -0,0 +1,149 @@
+# Testing of commit timestamps preservation across restarts
+use strict;
+use warnings;
+use PostgresNode;
+use TestLib;
+use Test::More tests => 16;
+
+my $node_master = get_new_node('master');
+$node_master->init(allows_streaming => 1);
+$node_master->append_conf('postgresql.conf', 'track_commit_timestamp = on');
+$node_master->start;
+
+my ($ret, $stdout, $stderr);
+
+($ret, $stdout, $stderr) =
+ $node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('0');]);
+is($ret, 3, 'getting ts of InvalidTransactionId reports error');
+like(
+ $stderr,
+ qr/cannot retrieve commit timestamp for transaction/,
+ 'expected error from InvalidTransactionId');
+
+($ret, $stdout, $stderr) =
+ $node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('1');]);
+is($ret, 0, 'getting ts of BootstrapTransactionId succeeds');
+is($stdout, '', 'timestamp of BootstrapTransactionId is null');
+
+($ret, $stdout, $stderr) =
+ $node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('2');]);
+is($ret, 0, 'getting ts of FrozenTransactionId succeeds');
+is($stdout, '', 'timestamp of FrozenTransactionId is null');
+
+# Since FirstNormalTransactionId will've occurred during initdb, long before we
+# enabled commit timestamps, it'll be null since we have no cts data for it but
+# cts are enabled.
+is( $node_master->safe_psql(
+ 'postgres', qq[SELECT pg_xact_commit_timestamp('3');]),
+ '',
+ 'committs for FirstNormalTransactionId is null');
+
+$node_master->safe_psql('postgres',
+ qq[CREATE TABLE committs_test(x integer, y timestamp with time zone);]);
+
+my $xid = $node_master->safe_psql(
+ 'postgres', qq[
+ BEGIN;
+ INSERT INTO committs_test(x, y) VALUES (1, current_timestamp);
+ SELECT pg_current_xact_id()::xid;
+ COMMIT;
+]);
+
+my $before_restart_ts = $node_master->safe_psql('postgres',
+ qq[SELECT pg_xact_commit_timestamp('$xid');]);
+ok($before_restart_ts ne '' && $before_restart_ts ne 'null',
+ 'commit timestamp recorded');
+
+$node_master->stop('immediate');
+$node_master->start;
+
+my $after_crash_ts = $node_master->safe_psql('postgres',
+ qq[SELECT pg_xact_commit_timestamp('$xid');]);
+is($after_crash_ts, $before_restart_ts,
+ 'timestamps before and after crash are equal');
+
+$node_master->stop('fast');
+$node_master->start;
+
+my $after_restart_ts = $node_master->safe_psql('postgres',
+ qq[SELECT pg_xact_commit_timestamp('$xid');]);
+is($after_restart_ts, $before_restart_ts,
+ 'timestamps before and after restart are equal');
+
+# Now disable commit timestamps
+$node_master->append_conf('postgresql.conf', 'track_commit_timestamp = off');
+$node_master->stop('fast');
+
+# Start the server, which generates a XLOG_PARAMETER_CHANGE record where
+# the parameter change is registered.
+$node_master->start;
+
+# Now restart again the server so as no XLOG_PARAMETER_CHANGE record are
+# replayed with the follow-up immediate shutdown.
+$node_master->restart;
+
+# Move commit timestamps across page boundaries. Things should still
+# be able to work across restarts with those transactions committed while
+# track_commit_timestamp is disabled.
+$node_master->safe_psql(
+ 'postgres',
+ qq(CREATE PROCEDURE consume_xid(cnt int)
+AS \$\$
+DECLARE
+ i int;
+ BEGIN
+ FOR i in 1..cnt LOOP
+ EXECUTE 'SELECT pg_current_xact_id()';
+ COMMIT;
+ END LOOP;
+ END;
+\$\$
+LANGUAGE plpgsql;
+));
+$node_master->safe_psql('postgres', 'CALL consume_xid(2000)');
+
+($ret, $stdout, $stderr) = $node_master->psql('postgres',
+ qq[SELECT pg_xact_commit_timestamp('$xid');]);
+is($ret, 3, 'no commit timestamp from enable tx when cts disabled');
+like(
+ $stderr,
+ qr/could not get commit timestamp data/,
+ 'expected error from enabled tx when committs disabled');
+
+# Do a tx while cts disabled
+my $xid_disabled = $node_master->safe_psql(
+ 'postgres', qq[
+ BEGIN;
+ INSERT INTO committs_test(x, y) VALUES (2, current_timestamp);
+ SELECT pg_current_xact_id();
+ COMMIT;
+]);
+
+# Should be inaccessible
+($ret, $stdout, $stderr) = $node_master->psql('postgres',
+ qq[SELECT pg_xact_commit_timestamp('$xid_disabled');]);
+is($ret, 3, 'no commit timestamp when disabled');
+like(
+ $stderr,
+ qr/could not get commit timestamp data/,
+ 'expected error from disabled tx when committs disabled');
+
+# Re-enable, restart and ensure we can still get the old timestamps
+$node_master->append_conf('postgresql.conf', 'track_commit_timestamp = on');
+
+# An immediate shutdown is used here. At next startup recovery will
+# replay transactions which committed when track_commit_timestamp was
+# disabled, and the facility should be able to work properly.
+$node_master->stop('immediate');
+$node_master->start;
+
+my $after_enable_ts = $node_master->safe_psql('postgres',
+ qq[SELECT pg_xact_commit_timestamp('$xid');]);
+is($after_enable_ts, '', 'timestamp of enabled tx null after re-enable');
+
+my $after_enable_disabled_ts = $node_master->safe_psql('postgres',
+ qq[SELECT pg_xact_commit_timestamp('$xid_disabled');]);
+is($after_enable_disabled_ts, '',
+ 'timestamp of disabled tx null after re-enable');
+
+$node_master->stop;
diff --git a/src/test/modules/dummy_index_am/.gitignore b/src/test/modules/dummy_index_am/.gitignore
new file mode 100644
index 0000000..44d119c
--- /dev/null
+++ b/src/test/modules/dummy_index_am/.gitignore
@@ -0,0 +1,3 @@
+# Generated subdirectories
+/log/
+/results/
diff --git a/src/test/modules/dummy_index_am/Makefile b/src/test/modules/dummy_index_am/Makefile
new file mode 100644
index 0000000..aaf544a
--- /dev/null
+++ b/src/test/modules/dummy_index_am/Makefile
@@ -0,0 +1,20 @@
+# src/test/modules/dummy_index_am/Makefile
+
+MODULES = dummy_index_am
+
+EXTENSION = dummy_index_am
+DATA = dummy_index_am--1.0.sql
+PGFILEDESC = "dummy_index_am - index access method template"
+
+REGRESS = reloptions
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/dummy_index_am
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/dummy_index_am/README b/src/test/modules/dummy_index_am/README
new file mode 100644
index 0000000..61510f0
--- /dev/null
+++ b/src/test/modules/dummy_index_am/README
@@ -0,0 +1,12 @@
+Dummy Index AM
+==============
+
+Dummy index AM is a module for testing any facility usable by an index
+access method, whose code is kept a maximum simple.
+
+This includes tests for all relation option types:
+- boolean
+- enum
+- integer
+- real
+- strings (with and without NULL as default)
diff --git a/src/test/modules/dummy_index_am/dummy_index_am--1.0.sql b/src/test/modules/dummy_index_am/dummy_index_am--1.0.sql
new file mode 100644
index 0000000..005863d
--- /dev/null
+++ b/src/test/modules/dummy_index_am/dummy_index_am--1.0.sql
@@ -0,0 +1,19 @@
+/* src/test/modules/dummy_index_am/dummy_index_am--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION dummy_index_am" to load this file. \quit
+
+CREATE FUNCTION dihandler(internal)
+RETURNS index_am_handler
+AS 'MODULE_PATHNAME'
+LANGUAGE C;
+
+-- Access method
+CREATE ACCESS METHOD dummy_index_am TYPE INDEX HANDLER dihandler;
+COMMENT ON ACCESS METHOD dummy_index_am IS 'dummy index access method';
+
+-- Operator classes
+CREATE OPERATOR CLASS int4_ops
+DEFAULT FOR TYPE int4 USING dummy_index_am AS
+ OPERATOR 1 = (int4, int4),
+ FUNCTION 1 hashint4(int4);
diff --git a/src/test/modules/dummy_index_am/dummy_index_am.c b/src/test/modules/dummy_index_am/dummy_index_am.c
new file mode 100644
index 0000000..e97a32d
--- /dev/null
+++ b/src/test/modules/dummy_index_am/dummy_index_am.c
@@ -0,0 +1,332 @@
+/*-------------------------------------------------------------------------
+ *
+ * dummy_index_am.c
+ * Index AM template main file.
+ *
+ * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ * src/test/modules/dummy_index_am/dummy_index_am.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/amapi.h"
+#include "access/reloptions.h"
+#include "catalog/index.h"
+#include "commands/vacuum.h"
+#include "nodes/pathnodes.h"
+#include "utils/guc.h"
+#include "utils/rel.h"
+
+PG_MODULE_MAGIC;
+
+void _PG_init(void);
+
+/* parse table for fillRelOptions */
+relopt_parse_elt di_relopt_tab[6];
+
+/* Kind of relation options for dummy index */
+relopt_kind di_relopt_kind;
+
+typedef enum DummyAmEnum
+{
+ DUMMY_AM_ENUM_ONE,
+ DUMMY_AM_ENUM_TWO
+} DummyAmEnum;
+
+/* Dummy index options */
+typedef struct DummyIndexOptions
+{
+ int32 vl_len_; /* varlena header (do not touch directly!) */
+ int option_int;
+ double option_real;
+ bool option_bool;
+ DummyAmEnum option_enum;
+ int option_string_val_offset;
+ int option_string_null_offset;
+} DummyIndexOptions;
+
+relopt_enum_elt_def dummyAmEnumValues[] =
+{
+ {"one", DUMMY_AM_ENUM_ONE},
+ {"two", DUMMY_AM_ENUM_TWO},
+ {(const char *) NULL} /* list terminator */
+};
+
+/* Handler for index AM */
+PG_FUNCTION_INFO_V1(dihandler);
+
+/*
+ * Validation function for string relation options.
+ */
+static void
+validate_string_option(const char *value)
+{
+ ereport(NOTICE,
+ (errmsg("new option value for string parameter %s",
+ value ? value : "NULL")));
+}
+
+/*
+ * This function creates a full set of relation option types,
+ * with various patterns.
+ */
+static void
+create_reloptions_table(void)
+{
+ di_relopt_kind = add_reloption_kind();
+
+ add_int_reloption(di_relopt_kind, "option_int",
+ "Integer option for dummy_index_am",
+ 10, -10, 100, AccessExclusiveLock);
+ di_relopt_tab[0].optname = "option_int";
+ di_relopt_tab[0].opttype = RELOPT_TYPE_INT;
+ di_relopt_tab[0].offset = offsetof(DummyIndexOptions, option_int);
+
+ add_real_reloption(di_relopt_kind, "option_real",
+ "Real option for dummy_index_am",
+ 3.1415, -10, 100, AccessExclusiveLock);
+ di_relopt_tab[1].optname = "option_real";
+ di_relopt_tab[1].opttype = RELOPT_TYPE_REAL;
+ di_relopt_tab[1].offset = offsetof(DummyIndexOptions, option_real);
+
+ add_bool_reloption(di_relopt_kind, "option_bool",
+ "Boolean option for dummy_index_am",
+ true, AccessExclusiveLock);
+ di_relopt_tab[2].optname = "option_bool";
+ di_relopt_tab[2].opttype = RELOPT_TYPE_BOOL;
+ di_relopt_tab[2].offset = offsetof(DummyIndexOptions, option_bool);
+
+ add_enum_reloption(di_relopt_kind, "option_enum",
+ "Enum option for dummy_index_am",
+ dummyAmEnumValues,
+ DUMMY_AM_ENUM_ONE,
+ "Valid values are \"one\" and \"two\".",
+ AccessExclusiveLock);
+ di_relopt_tab[3].optname = "option_enum";
+ di_relopt_tab[3].opttype = RELOPT_TYPE_ENUM;
+ di_relopt_tab[3].offset = offsetof(DummyIndexOptions, option_enum);
+
+ add_string_reloption(di_relopt_kind, "option_string_val",
+ "String option for dummy_index_am with non-NULL default",
+ "DefaultValue", &validate_string_option,
+ AccessExclusiveLock);
+ di_relopt_tab[4].optname = "option_string_val";
+ di_relopt_tab[4].opttype = RELOPT_TYPE_STRING;
+ di_relopt_tab[4].offset = offsetof(DummyIndexOptions,
+ option_string_val_offset);
+
+ /*
+ * String option for dummy_index_am with NULL default, and without
+ * description.
+ */
+ add_string_reloption(di_relopt_kind, "option_string_null",
+ NULL, /* description */
+ NULL, &validate_string_option,
+ AccessExclusiveLock);
+ di_relopt_tab[5].optname = "option_string_null";
+ di_relopt_tab[5].opttype = RELOPT_TYPE_STRING;
+ di_relopt_tab[5].offset = offsetof(DummyIndexOptions,
+ option_string_null_offset);
+}
+
+
+/*
+ * Build a new index.
+ */
+static IndexBuildResult *
+dibuild(Relation heap, Relation index, IndexInfo *indexInfo)
+{
+ IndexBuildResult *result;
+
+ result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
+
+ /* let's pretend that no tuples were scanned */
+ result->heap_tuples = 0;
+ /* and no index tuples were created (that is true) */
+ result->index_tuples = 0;
+
+ return result;
+}
+
+/*
+ * Build an empty index for the initialiation fork.
+ */
+static void
+dibuildempty(Relation index)
+{
+ /* No need to build an init fork for a dummy index */
+}
+
+/*
+ * Insert new tuple to index AM.
+ */
+static bool
+diinsert(Relation index, Datum *values, bool *isnull,
+ ItemPointer ht_ctid, Relation heapRel,
+ IndexUniqueCheck checkUnique,
+ IndexInfo *indexInfo)
+{
+ /* nothing to do */
+ return false;
+}
+
+/*
+ * Bulk deletion of all index entries pointing to a set of table tuples.
+ */
+static IndexBulkDeleteResult *
+dibulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
+ IndexBulkDeleteCallback callback, void *callback_state)
+{
+ /*
+ * There is nothing to delete. Return NULL as there is nothing to pass to
+ * amvacuumcleanup.
+ */
+ return NULL;
+}
+
+/*
+ * Post-VACUUM cleanup for index AM.
+ */
+static IndexBulkDeleteResult *
+divacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
+{
+ /* Index has not been modified, so returning NULL is fine */
+ return NULL;
+}
+
+/*
+ * Estimate cost of index AM.
+ */
+static void
+dicostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
+ Cost *indexStartupCost, Cost *indexTotalCost,
+ Selectivity *indexSelectivity, double *indexCorrelation,
+ double *indexPages)
+{
+ /* Tell planner to never use this index! */
+ *indexStartupCost = 1.0e10;
+ *indexTotalCost = 1.0e10;
+
+ /* Do not care about the rest */
+ *indexSelectivity = 1;
+ *indexCorrelation = 0;
+ *indexPages = 1;
+}
+
+/*
+ * Parse relation options for index AM, returning a DummyIndexOptions
+ * structure filled with option values.
+ */
+static bytea *
+dioptions(Datum reloptions, bool validate)
+{
+ return (bytea *) build_reloptions(reloptions, validate,
+ di_relopt_kind,
+ sizeof(DummyIndexOptions),
+ di_relopt_tab, lengthof(di_relopt_tab));
+}
+
+/*
+ * Validator for index AM.
+ */
+static bool
+divalidate(Oid opclassoid)
+{
+ /* Index is dummy so we are happy with any opclass */
+ return true;
+}
+
+/*
+ * Begin scan of index AM.
+ */
+static IndexScanDesc
+dibeginscan(Relation r, int nkeys, int norderbys)
+{
+ IndexScanDesc scan;
+
+ /* Let's pretend we are doing something */
+ scan = RelationGetIndexScan(r, nkeys, norderbys);
+ return scan;
+}
+
+/*
+ * Rescan of index AM.
+ */
+static void
+direscan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
+ ScanKey orderbys, int norderbys)
+{
+ /* nothing to do */
+}
+
+/*
+ * End scan of index AM.
+ */
+static void
+diendscan(IndexScanDesc scan)
+{
+ /* nothing to do */
+}
+
+/*
+ * Index AM handler function: returns IndexAmRoutine with access method
+ * parameters and callbacks.
+ */
+Datum
+dihandler(PG_FUNCTION_ARGS)
+{
+ IndexAmRoutine *amroutine = makeNode(IndexAmRoutine);
+
+ amroutine->amstrategies = 0;
+ amroutine->amsupport = 1;
+ amroutine->amcanorder = false;
+ amroutine->amcanorderbyop = false;
+ amroutine->amcanbackward = false;
+ amroutine->amcanunique = false;
+ amroutine->amcanmulticol = false;
+ amroutine->amoptionalkey = false;
+ amroutine->amsearcharray = false;
+ amroutine->amsearchnulls = false;
+ amroutine->amstorage = false;
+ amroutine->amclusterable = false;
+ amroutine->ampredlocks = false;
+ amroutine->amcanparallel = false;
+ amroutine->amcaninclude = false;
+ amroutine->amusemaintenanceworkmem = false;
+ amroutine->amparallelvacuumoptions = VACUUM_OPTION_NO_PARALLEL;
+ amroutine->amkeytype = InvalidOid;
+
+ amroutine->ambuild = dibuild;
+ amroutine->ambuildempty = dibuildempty;
+ amroutine->aminsert = diinsert;
+ amroutine->ambulkdelete = dibulkdelete;
+ amroutine->amvacuumcleanup = divacuumcleanup;
+ amroutine->amcanreturn = NULL;
+ amroutine->amcostestimate = dicostestimate;
+ amroutine->amoptions = dioptions;
+ amroutine->amproperty = NULL;
+ amroutine->ambuildphasename = NULL;
+ amroutine->amvalidate = divalidate;
+ amroutine->ambeginscan = dibeginscan;
+ amroutine->amrescan = direscan;
+ amroutine->amgettuple = NULL;
+ amroutine->amgetbitmap = NULL;
+ amroutine->amendscan = diendscan;
+ amroutine->ammarkpos = NULL;
+ amroutine->amrestrpos = NULL;
+ amroutine->amestimateparallelscan = NULL;
+ amroutine->aminitparallelscan = NULL;
+ amroutine->amparallelrescan = NULL;
+
+ PG_RETURN_POINTER(amroutine);
+}
+
+void
+_PG_init(void)
+{
+ create_reloptions_table();
+}
diff --git a/src/test/modules/dummy_index_am/dummy_index_am.control b/src/test/modules/dummy_index_am/dummy_index_am.control
new file mode 100644
index 0000000..77bdea0
--- /dev/null
+++ b/src/test/modules/dummy_index_am/dummy_index_am.control
@@ -0,0 +1,5 @@
+# dummy_index_am extension
+comment = 'dummy_index_am - index access method template'
+default_version = '1.0'
+module_pathname = '$libdir/dummy_index_am'
+relocatable = true
diff --git a/src/test/modules/dummy_index_am/expected/reloptions.out b/src/test/modules/dummy_index_am/expected/reloptions.out
new file mode 100644
index 0000000..c873a80
--- /dev/null
+++ b/src/test/modules/dummy_index_am/expected/reloptions.out
@@ -0,0 +1,145 @@
+-- Tests for relation options
+CREATE EXTENSION dummy_index_am;
+CREATE TABLE dummy_test_tab (i int4);
+-- Silence validation checks for strings
+SET client_min_messages TO 'warning';
+-- Test with default values.
+CREATE INDEX dummy_test_idx ON dummy_test_tab
+ USING dummy_index_am (i);
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+ unnest
+--------
+(0 rows)
+
+DROP INDEX dummy_test_idx;
+-- Test with full set of options.
+-- Allow validation checks for strings, just for the index creation
+SET client_min_messages TO 'notice';
+CREATE INDEX dummy_test_idx ON dummy_test_tab
+ USING dummy_index_am (i) WITH (
+ option_bool = false,
+ option_int = 5,
+ option_real = 3.1,
+ option_enum = 'two',
+ option_string_val = NULL,
+ option_string_null = 'val');
+NOTICE: new option value for string parameter null
+NOTICE: new option value for string parameter val
+-- Silence again validation checks for strings until the end of the test.
+SET client_min_messages TO 'warning';
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+ unnest
+------------------------
+ option_bool=false
+ option_int=5
+ option_real=3.1
+ option_enum=two
+ option_string_val=null
+ option_string_null=val
+(6 rows)
+
+-- ALTER INDEX .. SET
+ALTER INDEX dummy_test_idx SET (option_int = 10);
+ALTER INDEX dummy_test_idx SET (option_bool = true);
+ALTER INDEX dummy_test_idx SET (option_real = 3.2);
+ALTER INDEX dummy_test_idx SET (option_string_val = 'val2');
+ALTER INDEX dummy_test_idx SET (option_string_null = NULL);
+ALTER INDEX dummy_test_idx SET (option_enum = 'one');
+ALTER INDEX dummy_test_idx SET (option_enum = 'three');
+ERROR: invalid value for enum option "option_enum": three
+DETAIL: Valid values are "one" and "two".
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+ unnest
+-------------------------
+ option_int=10
+ option_bool=true
+ option_real=3.2
+ option_string_val=val2
+ option_string_null=null
+ option_enum=one
+(6 rows)
+
+-- ALTER INDEX .. RESET
+ALTER INDEX dummy_test_idx RESET (option_int);
+ALTER INDEX dummy_test_idx RESET (option_bool);
+ALTER INDEX dummy_test_idx RESET (option_real);
+ALTER INDEX dummy_test_idx RESET (option_enum);
+ALTER INDEX dummy_test_idx RESET (option_string_val);
+ALTER INDEX dummy_test_idx RESET (option_string_null);
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+ unnest
+--------
+(0 rows)
+
+-- Cross-type checks for reloption values
+-- Integer
+ALTER INDEX dummy_test_idx SET (option_int = 3.3); -- ok
+ALTER INDEX dummy_test_idx SET (option_int = true); -- error
+ERROR: invalid value for integer option "option_int": true
+ALTER INDEX dummy_test_idx SET (option_int = 'val3'); -- error
+ERROR: invalid value for integer option "option_int": val3
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+ unnest
+----------------
+ option_int=3.3
+(1 row)
+
+ALTER INDEX dummy_test_idx RESET (option_int);
+-- Boolean
+ALTER INDEX dummy_test_idx SET (option_bool = 4); -- error
+ERROR: invalid value for boolean option "option_bool": 4
+ALTER INDEX dummy_test_idx SET (option_bool = 1); -- ok, as true
+ALTER INDEX dummy_test_idx SET (option_bool = 3.4); -- error
+ERROR: invalid value for boolean option "option_bool": 3.4
+ALTER INDEX dummy_test_idx SET (option_bool = 'val4'); -- error
+ERROR: invalid value for boolean option "option_bool": val4
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+ unnest
+---------------
+ option_bool=1
+(1 row)
+
+ALTER INDEX dummy_test_idx RESET (option_bool);
+-- Float
+ALTER INDEX dummy_test_idx SET (option_real = 4); -- ok
+ALTER INDEX dummy_test_idx SET (option_real = true); -- error
+ERROR: invalid value for floating point option "option_real": true
+ALTER INDEX dummy_test_idx SET (option_real = 'val5'); -- error
+ERROR: invalid value for floating point option "option_real": val5
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+ unnest
+---------------
+ option_real=4
+(1 row)
+
+ALTER INDEX dummy_test_idx RESET (option_real);
+-- Enum
+ALTER INDEX dummy_test_idx SET (option_enum = 'one'); -- ok
+ALTER INDEX dummy_test_idx SET (option_enum = 0); -- error
+ERROR: invalid value for enum option "option_enum": 0
+DETAIL: Valid values are "one" and "two".
+ALTER INDEX dummy_test_idx SET (option_enum = true); -- error
+ERROR: invalid value for enum option "option_enum": true
+DETAIL: Valid values are "one" and "two".
+ALTER INDEX dummy_test_idx SET (option_enum = 'three'); -- error
+ERROR: invalid value for enum option "option_enum": three
+DETAIL: Valid values are "one" and "two".
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+ unnest
+-----------------
+ option_enum=one
+(1 row)
+
+ALTER INDEX dummy_test_idx RESET (option_enum);
+-- String
+ALTER INDEX dummy_test_idx SET (option_string_val = 4); -- ok
+ALTER INDEX dummy_test_idx SET (option_string_val = 3.5); -- ok
+ALTER INDEX dummy_test_idx SET (option_string_val = true); -- ok, as "true"
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+ unnest
+------------------------
+ option_string_val=true
+(1 row)
+
+ALTER INDEX dummy_test_idx RESET (option_string_val);
+DROP INDEX dummy_test_idx;
diff --git a/src/test/modules/dummy_index_am/sql/reloptions.sql b/src/test/modules/dummy_index_am/sql/reloptions.sql
new file mode 100644
index 0000000..6749d76
--- /dev/null
+++ b/src/test/modules/dummy_index_am/sql/reloptions.sql
@@ -0,0 +1,83 @@
+-- Tests for relation options
+CREATE EXTENSION dummy_index_am;
+
+CREATE TABLE dummy_test_tab (i int4);
+
+-- Silence validation checks for strings
+SET client_min_messages TO 'warning';
+
+-- Test with default values.
+CREATE INDEX dummy_test_idx ON dummy_test_tab
+ USING dummy_index_am (i);
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+DROP INDEX dummy_test_idx;
+
+-- Test with full set of options.
+-- Allow validation checks for strings, just for the index creation
+SET client_min_messages TO 'notice';
+CREATE INDEX dummy_test_idx ON dummy_test_tab
+ USING dummy_index_am (i) WITH (
+ option_bool = false,
+ option_int = 5,
+ option_real = 3.1,
+ option_enum = 'two',
+ option_string_val = NULL,
+ option_string_null = 'val');
+-- Silence again validation checks for strings until the end of the test.
+SET client_min_messages TO 'warning';
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+
+-- ALTER INDEX .. SET
+ALTER INDEX dummy_test_idx SET (option_int = 10);
+ALTER INDEX dummy_test_idx SET (option_bool = true);
+ALTER INDEX dummy_test_idx SET (option_real = 3.2);
+ALTER INDEX dummy_test_idx SET (option_string_val = 'val2');
+ALTER INDEX dummy_test_idx SET (option_string_null = NULL);
+ALTER INDEX dummy_test_idx SET (option_enum = 'one');
+ALTER INDEX dummy_test_idx SET (option_enum = 'three');
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+
+-- ALTER INDEX .. RESET
+ALTER INDEX dummy_test_idx RESET (option_int);
+ALTER INDEX dummy_test_idx RESET (option_bool);
+ALTER INDEX dummy_test_idx RESET (option_real);
+ALTER INDEX dummy_test_idx RESET (option_enum);
+ALTER INDEX dummy_test_idx RESET (option_string_val);
+ALTER INDEX dummy_test_idx RESET (option_string_null);
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+
+-- Cross-type checks for reloption values
+-- Integer
+ALTER INDEX dummy_test_idx SET (option_int = 3.3); -- ok
+ALTER INDEX dummy_test_idx SET (option_int = true); -- error
+ALTER INDEX dummy_test_idx SET (option_int = 'val3'); -- error
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+ALTER INDEX dummy_test_idx RESET (option_int);
+-- Boolean
+ALTER INDEX dummy_test_idx SET (option_bool = 4); -- error
+ALTER INDEX dummy_test_idx SET (option_bool = 1); -- ok, as true
+ALTER INDEX dummy_test_idx SET (option_bool = 3.4); -- error
+ALTER INDEX dummy_test_idx SET (option_bool = 'val4'); -- error
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+ALTER INDEX dummy_test_idx RESET (option_bool);
+-- Float
+ALTER INDEX dummy_test_idx SET (option_real = 4); -- ok
+ALTER INDEX dummy_test_idx SET (option_real = true); -- error
+ALTER INDEX dummy_test_idx SET (option_real = 'val5'); -- error
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+ALTER INDEX dummy_test_idx RESET (option_real);
+-- Enum
+ALTER INDEX dummy_test_idx SET (option_enum = 'one'); -- ok
+ALTER INDEX dummy_test_idx SET (option_enum = 0); -- error
+ALTER INDEX dummy_test_idx SET (option_enum = true); -- error
+ALTER INDEX dummy_test_idx SET (option_enum = 'three'); -- error
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+ALTER INDEX dummy_test_idx RESET (option_enum);
+-- String
+ALTER INDEX dummy_test_idx SET (option_string_val = 4); -- ok
+ALTER INDEX dummy_test_idx SET (option_string_val = 3.5); -- ok
+ALTER INDEX dummy_test_idx SET (option_string_val = true); -- ok, as "true"
+SELECT unnest(reloptions) FROM pg_class WHERE relname = 'dummy_test_idx';
+ALTER INDEX dummy_test_idx RESET (option_string_val);
+
+DROP INDEX dummy_test_idx;
diff --git a/src/test/modules/dummy_seclabel/.gitignore b/src/test/modules/dummy_seclabel/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/dummy_seclabel/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/dummy_seclabel/Makefile b/src/test/modules/dummy_seclabel/Makefile
new file mode 100644
index 0000000..d93c964
--- /dev/null
+++ b/src/test/modules/dummy_seclabel/Makefile
@@ -0,0 +1,20 @@
+# src/test/modules/dummy_seclabel/Makefile
+
+MODULES = dummy_seclabel
+PGFILEDESC = "dummy_seclabel - regression testing of the SECURITY LABEL statement"
+
+EXTENSION = dummy_seclabel
+DATA = dummy_seclabel--1.0.sql
+
+REGRESS = dummy_seclabel
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/dummy_seclabel
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/dummy_seclabel/README b/src/test/modules/dummy_seclabel/README
new file mode 100644
index 0000000..a3fcbd7
--- /dev/null
+++ b/src/test/modules/dummy_seclabel/README
@@ -0,0 +1,41 @@
+The dummy_seclabel module exists only to support regression testing of
+the SECURITY LABEL statement. It is not intended to be used in production.
+
+Rationale
+=========
+
+The SECURITY LABEL statement allows the user to assign security labels to
+database objects; however, security labels can only be assigned when
+specifically allowed by a loadable module, so this module is provided to
+allow proper regression testing.
+
+Security label providers intended to be used in production will typically be
+dependent on a platform-specific feature such as SELinux. This module is
+platform-independent, and therefore better-suited to regression testing.
+
+Usage
+=====
+
+Here's a simple example of usage:
+
+# postgresql.conf
+shared_preload_libraries = 'dummy_seclabel'
+
+postgres=# CREATE TABLE t (a int, b text);
+CREATE TABLE
+postgres=# SECURITY LABEL ON TABLE t IS 'classified';
+SECURITY LABEL
+
+The dummy_seclabel module provides only four hardcoded
+labels: unclassified, classified,
+secret, and top secret.
+It does not allow any other strings as security labels.
+
+These labels are not used to enforce access controls. They are only used
+to check whether the SECURITY LABEL statement works as expected,
+or not.
+
+Author
+======
+
+KaiGai Kohei <kaigai@ak.jp.nec.com>
diff --git a/src/test/modules/dummy_seclabel/dummy_seclabel--1.0.sql b/src/test/modules/dummy_seclabel/dummy_seclabel--1.0.sql
new file mode 100644
index 0000000..5f3cb5b
--- /dev/null
+++ b/src/test/modules/dummy_seclabel/dummy_seclabel--1.0.sql
@@ -0,0 +1,8 @@
+/* src/test/modules/dummy_seclabel/dummy_seclabel--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION dummy_seclabel" to load this file. \quit
+
+CREATE FUNCTION dummy_seclabel_dummy()
+ RETURNS pg_catalog.void
+ AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/src/test/modules/dummy_seclabel/dummy_seclabel.c b/src/test/modules/dummy_seclabel/dummy_seclabel.c
new file mode 100644
index 0000000..695c7d6
--- /dev/null
+++ b/src/test/modules/dummy_seclabel/dummy_seclabel.c
@@ -0,0 +1,63 @@
+/*
+ * dummy_seclabel.c
+ *
+ * Dummy security label provider.
+ *
+ * This module does not provide anything worthwhile from a security
+ * perspective, but allows regression testing independent of platform-specific
+ * features like SELinux.
+ *
+ * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ */
+#include "postgres.h"
+
+#include "commands/seclabel.h"
+#include "fmgr.h"
+#include "miscadmin.h"
+#include "utils/rel.h"
+
+PG_MODULE_MAGIC;
+
+/* Entrypoint of the module */
+void _PG_init(void);
+
+PG_FUNCTION_INFO_V1(dummy_seclabel_dummy);
+
+static void
+dummy_object_relabel(const ObjectAddress *object, const char *seclabel)
+{
+ if (seclabel == NULL ||
+ strcmp(seclabel, "unclassified") == 0 ||
+ strcmp(seclabel, "classified") == 0)
+ return;
+
+ if (strcmp(seclabel, "secret") == 0 ||
+ strcmp(seclabel, "top secret") == 0)
+ {
+ if (!superuser())
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("only superuser can set '%s' label", seclabel)));
+ return;
+ }
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_NAME),
+ errmsg("'%s' is not a valid security label", seclabel)));
+}
+
+void
+_PG_init(void)
+{
+ register_label_provider("dummy", dummy_object_relabel);
+}
+
+/*
+ * This function is here just so that the extension is not completely empty
+ * and the dynamic library is loaded when CREATE EXTENSION runs.
+ */
+Datum
+dummy_seclabel_dummy(PG_FUNCTION_ARGS)
+{
+ PG_RETURN_VOID();
+}
diff --git a/src/test/modules/dummy_seclabel/dummy_seclabel.control b/src/test/modules/dummy_seclabel/dummy_seclabel.control
new file mode 100644
index 0000000..8c37272
--- /dev/null
+++ b/src/test/modules/dummy_seclabel/dummy_seclabel.control
@@ -0,0 +1,4 @@
+comment = 'Test code for SECURITY LABEL feature'
+default_version = '1.0'
+module_pathname = '$libdir/dummy_seclabel'
+relocatable = true
diff --git a/src/test/modules/dummy_seclabel/expected/dummy_seclabel.out b/src/test/modules/dummy_seclabel/expected/dummy_seclabel.out
new file mode 100644
index 0000000..b2d898a
--- /dev/null
+++ b/src/test/modules/dummy_seclabel/expected/dummy_seclabel.out
@@ -0,0 +1,117 @@
+--
+-- Test for facilities of security label
+--
+CREATE EXTENSION dummy_seclabel;
+-- initial setups
+SET client_min_messages TO 'warning';
+DROP ROLE IF EXISTS regress_dummy_seclabel_user1;
+DROP ROLE IF EXISTS regress_dummy_seclabel_user2;
+RESET client_min_messages;
+CREATE USER regress_dummy_seclabel_user1 WITH CREATEROLE;
+CREATE USER regress_dummy_seclabel_user2;
+CREATE TABLE dummy_seclabel_tbl1 (a int, b text);
+CREATE TABLE dummy_seclabel_tbl2 (x int, y text);
+CREATE VIEW dummy_seclabel_view1 AS SELECT * FROM dummy_seclabel_tbl2;
+CREATE FUNCTION dummy_seclabel_four() RETURNS integer AS $$SELECT 4$$ language sql;
+CREATE DOMAIN dummy_seclabel_domain AS text;
+ALTER TABLE dummy_seclabel_tbl1 OWNER TO regress_dummy_seclabel_user1;
+ALTER TABLE dummy_seclabel_tbl2 OWNER TO regress_dummy_seclabel_user2;
+--
+-- Test of SECURITY LABEL statement with a plugin
+--
+SET SESSION AUTHORIZATION regress_dummy_seclabel_user1;
+SECURITY LABEL ON TABLE dummy_seclabel_tbl1 IS 'classified'; -- OK
+SECURITY LABEL ON COLUMN dummy_seclabel_tbl1.a IS 'unclassified'; -- OK
+SECURITY LABEL ON COLUMN dummy_seclabel_tbl1 IS 'unclassified'; -- fail
+ERROR: column name must be qualified
+SECURITY LABEL ON TABLE dummy_seclabel_tbl1 IS '...invalid label...'; -- fail
+ERROR: '...invalid label...' is not a valid security label
+SECURITY LABEL FOR 'dummy' ON TABLE dummy_seclabel_tbl1 IS 'unclassified'; -- OK
+SECURITY LABEL FOR 'unknown_seclabel' ON TABLE dummy_seclabel_tbl1 IS 'classified'; -- fail
+ERROR: security label provider "unknown_seclabel" is not loaded
+SECURITY LABEL ON TABLE dummy_seclabel_tbl2 IS 'unclassified'; -- fail (not owner)
+ERROR: must be owner of table dummy_seclabel_tbl2
+SECURITY LABEL ON TABLE dummy_seclabel_tbl1 IS 'secret'; -- fail (not superuser)
+ERROR: only superuser can set 'secret' label
+SECURITY LABEL ON TABLE dummy_seclabel_tbl3 IS 'unclassified'; -- fail (not found)
+ERROR: relation "dummy_seclabel_tbl3" does not exist
+SET SESSION AUTHORIZATION regress_dummy_seclabel_user2;
+SECURITY LABEL ON TABLE dummy_seclabel_tbl1 IS 'unclassified'; -- fail
+ERROR: must be owner of table dummy_seclabel_tbl1
+SECURITY LABEL ON TABLE dummy_seclabel_tbl2 IS 'classified'; -- OK
+--
+-- Test for shared database object
+--
+SET SESSION AUTHORIZATION regress_dummy_seclabel_user1;
+SECURITY LABEL ON ROLE regress_dummy_seclabel_user1 IS 'classified'; -- OK
+SECURITY LABEL ON ROLE regress_dummy_seclabel_user1 IS '...invalid label...'; -- fail
+ERROR: '...invalid label...' is not a valid security label
+SECURITY LABEL FOR 'dummy' ON ROLE regress_dummy_seclabel_user2 IS 'unclassified'; -- OK
+SECURITY LABEL FOR 'unknown_seclabel' ON ROLE regress_dummy_seclabel_user1 IS 'unclassified'; -- fail
+ERROR: security label provider "unknown_seclabel" is not loaded
+SECURITY LABEL ON ROLE regress_dummy_seclabel_user1 IS 'secret'; -- fail (not superuser)
+ERROR: only superuser can set 'secret' label
+SECURITY LABEL ON ROLE regress_dummy_seclabel_user3 IS 'unclassified'; -- fail (not found)
+ERROR: role "regress_dummy_seclabel_user3" does not exist
+SET SESSION AUTHORIZATION regress_dummy_seclabel_user2;
+SECURITY LABEL ON ROLE regress_dummy_seclabel_user2 IS 'unclassified'; -- fail (not privileged)
+ERROR: must have CREATEROLE privilege
+RESET SESSION AUTHORIZATION;
+--
+-- Test for various types of object
+--
+RESET SESSION AUTHORIZATION;
+SECURITY LABEL ON TABLE dummy_seclabel_tbl1 IS 'top secret'; -- OK
+SECURITY LABEL ON VIEW dummy_seclabel_view1 IS 'classified'; -- OK
+SECURITY LABEL ON FUNCTION dummy_seclabel_four() IS 'classified'; -- OK
+SECURITY LABEL ON DOMAIN dummy_seclabel_domain IS 'classified'; -- OK
+CREATE SCHEMA dummy_seclabel_test;
+SECURITY LABEL ON SCHEMA dummy_seclabel_test IS 'unclassified'; -- OK
+SET client_min_messages = error;
+CREATE PUBLICATION dummy_pub;
+CREATE SUBSCRIPTION dummy_sub CONNECTION '' PUBLICATION foo WITH (connect = false, slot_name = NONE);
+RESET client_min_messages;
+SECURITY LABEL ON PUBLICATION dummy_pub IS 'classified';
+SECURITY LABEL ON SUBSCRIPTION dummy_sub IS 'classified';
+SELECT objtype, objname, provider, label FROM pg_seclabels
+ ORDER BY objtype, objname;
+ objtype | objname | provider | label
+--------------+------------------------------+----------+--------------
+ column | dummy_seclabel_tbl1.a | dummy | unclassified
+ domain | dummy_seclabel_domain | dummy | classified
+ function | dummy_seclabel_four() | dummy | classified
+ publication | dummy_pub | dummy | classified
+ role | regress_dummy_seclabel_user1 | dummy | classified
+ role | regress_dummy_seclabel_user2 | dummy | unclassified
+ schema | dummy_seclabel_test | dummy | unclassified
+ subscription | dummy_sub | dummy | classified
+ table | dummy_seclabel_tbl1 | dummy | top secret
+ table | dummy_seclabel_tbl2 | dummy | classified
+ view | dummy_seclabel_view1 | dummy | classified
+(11 rows)
+
+-- check for event trigger
+CREATE FUNCTION event_trigger_test()
+RETURNS event_trigger AS $$
+ BEGIN RAISE NOTICE 'event %: %', TG_EVENT, TG_TAG; END;
+$$ LANGUAGE plpgsql;
+CREATE EVENT TRIGGER always_start ON ddl_command_start
+EXECUTE PROCEDURE event_trigger_test();
+CREATE EVENT TRIGGER always_end ON ddl_command_end
+EXECUTE PROCEDURE event_trigger_test();
+CREATE EVENT TRIGGER always_drop ON sql_drop
+EXECUTE PROCEDURE event_trigger_test();
+CREATE EVENT TRIGGER always_rewrite ON table_rewrite
+EXECUTE PROCEDURE event_trigger_test();
+-- should trigger ddl_command_{start,end}
+SECURITY LABEL ON TABLE dummy_seclabel_tbl1 IS 'classified';
+NOTICE: event ddl_command_start: SECURITY LABEL
+NOTICE: event ddl_command_end: SECURITY LABEL
+-- clean up
+DROP EVENT TRIGGER always_start, always_end, always_drop, always_rewrite;
+DROP VIEW dummy_seclabel_view1;
+DROP TABLE dummy_seclabel_tbl1, dummy_seclabel_tbl2;
+DROP SUBSCRIPTION dummy_sub;
+DROP PUBLICATION dummy_pub;
+DROP ROLE regress_dummy_seclabel_user1;
+DROP ROLE regress_dummy_seclabel_user2;
diff --git a/src/test/modules/dummy_seclabel/sql/dummy_seclabel.sql b/src/test/modules/dummy_seclabel/sql/dummy_seclabel.sql
new file mode 100644
index 0000000..8c347b6
--- /dev/null
+++ b/src/test/modules/dummy_seclabel/sql/dummy_seclabel.sql
@@ -0,0 +1,115 @@
+--
+-- Test for facilities of security label
+--
+CREATE EXTENSION dummy_seclabel;
+
+-- initial setups
+SET client_min_messages TO 'warning';
+
+DROP ROLE IF EXISTS regress_dummy_seclabel_user1;
+DROP ROLE IF EXISTS regress_dummy_seclabel_user2;
+
+RESET client_min_messages;
+
+CREATE USER regress_dummy_seclabel_user1 WITH CREATEROLE;
+CREATE USER regress_dummy_seclabel_user2;
+
+CREATE TABLE dummy_seclabel_tbl1 (a int, b text);
+CREATE TABLE dummy_seclabel_tbl2 (x int, y text);
+CREATE VIEW dummy_seclabel_view1 AS SELECT * FROM dummy_seclabel_tbl2;
+CREATE FUNCTION dummy_seclabel_four() RETURNS integer AS $$SELECT 4$$ language sql;
+CREATE DOMAIN dummy_seclabel_domain AS text;
+
+ALTER TABLE dummy_seclabel_tbl1 OWNER TO regress_dummy_seclabel_user1;
+ALTER TABLE dummy_seclabel_tbl2 OWNER TO regress_dummy_seclabel_user2;
+
+--
+-- Test of SECURITY LABEL statement with a plugin
+--
+SET SESSION AUTHORIZATION regress_dummy_seclabel_user1;
+
+SECURITY LABEL ON TABLE dummy_seclabel_tbl1 IS 'classified'; -- OK
+SECURITY LABEL ON COLUMN dummy_seclabel_tbl1.a IS 'unclassified'; -- OK
+SECURITY LABEL ON COLUMN dummy_seclabel_tbl1 IS 'unclassified'; -- fail
+SECURITY LABEL ON TABLE dummy_seclabel_tbl1 IS '...invalid label...'; -- fail
+SECURITY LABEL FOR 'dummy' ON TABLE dummy_seclabel_tbl1 IS 'unclassified'; -- OK
+SECURITY LABEL FOR 'unknown_seclabel' ON TABLE dummy_seclabel_tbl1 IS 'classified'; -- fail
+SECURITY LABEL ON TABLE dummy_seclabel_tbl2 IS 'unclassified'; -- fail (not owner)
+SECURITY LABEL ON TABLE dummy_seclabel_tbl1 IS 'secret'; -- fail (not superuser)
+SECURITY LABEL ON TABLE dummy_seclabel_tbl3 IS 'unclassified'; -- fail (not found)
+
+SET SESSION AUTHORIZATION regress_dummy_seclabel_user2;
+SECURITY LABEL ON TABLE dummy_seclabel_tbl1 IS 'unclassified'; -- fail
+SECURITY LABEL ON TABLE dummy_seclabel_tbl2 IS 'classified'; -- OK
+
+--
+-- Test for shared database object
+--
+SET SESSION AUTHORIZATION regress_dummy_seclabel_user1;
+
+SECURITY LABEL ON ROLE regress_dummy_seclabel_user1 IS 'classified'; -- OK
+SECURITY LABEL ON ROLE regress_dummy_seclabel_user1 IS '...invalid label...'; -- fail
+SECURITY LABEL FOR 'dummy' ON ROLE regress_dummy_seclabel_user2 IS 'unclassified'; -- OK
+SECURITY LABEL FOR 'unknown_seclabel' ON ROLE regress_dummy_seclabel_user1 IS 'unclassified'; -- fail
+SECURITY LABEL ON ROLE regress_dummy_seclabel_user1 IS 'secret'; -- fail (not superuser)
+SECURITY LABEL ON ROLE regress_dummy_seclabel_user3 IS 'unclassified'; -- fail (not found)
+
+SET SESSION AUTHORIZATION regress_dummy_seclabel_user2;
+SECURITY LABEL ON ROLE regress_dummy_seclabel_user2 IS 'unclassified'; -- fail (not privileged)
+
+RESET SESSION AUTHORIZATION;
+
+--
+-- Test for various types of object
+--
+RESET SESSION AUTHORIZATION;
+
+SECURITY LABEL ON TABLE dummy_seclabel_tbl1 IS 'top secret'; -- OK
+SECURITY LABEL ON VIEW dummy_seclabel_view1 IS 'classified'; -- OK
+SECURITY LABEL ON FUNCTION dummy_seclabel_four() IS 'classified'; -- OK
+SECURITY LABEL ON DOMAIN dummy_seclabel_domain IS 'classified'; -- OK
+CREATE SCHEMA dummy_seclabel_test;
+SECURITY LABEL ON SCHEMA dummy_seclabel_test IS 'unclassified'; -- OK
+
+SET client_min_messages = error;
+CREATE PUBLICATION dummy_pub;
+CREATE SUBSCRIPTION dummy_sub CONNECTION '' PUBLICATION foo WITH (connect = false, slot_name = NONE);
+RESET client_min_messages;
+SECURITY LABEL ON PUBLICATION dummy_pub IS 'classified';
+SECURITY LABEL ON SUBSCRIPTION dummy_sub IS 'classified';
+
+SELECT objtype, objname, provider, label FROM pg_seclabels
+ ORDER BY objtype, objname;
+
+-- check for event trigger
+CREATE FUNCTION event_trigger_test()
+RETURNS event_trigger AS $$
+ BEGIN RAISE NOTICE 'event %: %', TG_EVENT, TG_TAG; END;
+$$ LANGUAGE plpgsql;
+
+CREATE EVENT TRIGGER always_start ON ddl_command_start
+EXECUTE PROCEDURE event_trigger_test();
+
+CREATE EVENT TRIGGER always_end ON ddl_command_end
+EXECUTE PROCEDURE event_trigger_test();
+
+CREATE EVENT TRIGGER always_drop ON sql_drop
+EXECUTE PROCEDURE event_trigger_test();
+
+CREATE EVENT TRIGGER always_rewrite ON table_rewrite
+EXECUTE PROCEDURE event_trigger_test();
+
+-- should trigger ddl_command_{start,end}
+SECURITY LABEL ON TABLE dummy_seclabel_tbl1 IS 'classified';
+
+-- clean up
+DROP EVENT TRIGGER always_start, always_end, always_drop, always_rewrite;
+
+DROP VIEW dummy_seclabel_view1;
+DROP TABLE dummy_seclabel_tbl1, dummy_seclabel_tbl2;
+
+DROP SUBSCRIPTION dummy_sub;
+DROP PUBLICATION dummy_pub;
+
+DROP ROLE regress_dummy_seclabel_user1;
+DROP ROLE regress_dummy_seclabel_user2;
diff --git a/src/test/modules/snapshot_too_old/.gitignore b/src/test/modules/snapshot_too_old/.gitignore
new file mode 100644
index 0000000..5cf29ed
--- /dev/null
+++ b/src/test/modules/snapshot_too_old/.gitignore
@@ -0,0 +1 @@
+/output_iso/
diff --git a/src/test/modules/snapshot_too_old/Makefile b/src/test/modules/snapshot_too_old/Makefile
new file mode 100644
index 0000000..dfb4537
--- /dev/null
+++ b/src/test/modules/snapshot_too_old/Makefile
@@ -0,0 +1,28 @@
+# src/test/modules/snapshot_too_old/Makefile
+
+# Note: because we don't tell the Makefile there are any regression tests,
+# we have to clean those result files explicitly
+EXTRA_CLEAN = $(pg_regress_clean_files)
+
+ISOLATION = sto_using_cursor sto_using_select sto_using_hash_index
+ISOLATION_OPTS = --temp-config $(top_srcdir)/src/test/modules/snapshot_too_old/sto.conf
+
+# Disabled because these tests require "old_snapshot_threshold" >= 0, which
+# typical installcheck users do not have (e.g. buildfarm clients).
+NO_INSTALLCHECK = 1
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/snapshot_too_old
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
+
+# But it can nonetheless be very helpful to run tests on preexisting
+# installation, allow to do so, but only if requested explicitly.
+installcheck-force:
+ $(pg_isolation_regress_installcheck) $(ISOLATION)
diff --git a/src/test/modules/snapshot_too_old/expected/sto_using_cursor.out b/src/test/modules/snapshot_too_old/expected/sto_using_cursor.out
new file mode 100644
index 0000000..4359bf2
--- /dev/null
+++ b/src/test/modules/snapshot_too_old/expected/sto_using_cursor.out
@@ -0,0 +1,95 @@
+Parsed test spec with 2 sessions
+
+starting permutation: s1decl s1f1 s1sleep s1f2 s2u
+step s1decl: DECLARE cursor1 CURSOR FOR SELECT c FROM sto1;
+step s1f1: FETCH FIRST FROM cursor1;
+c
+-
+1
+(1 row)
+
+step s1sleep: SELECT setting, pg_sleep(6) FROM pg_settings WHERE name = 'old_snapshot_threshold';
+setting|pg_sleep
+-------+--------
+ 0|
+(1 row)
+
+step s1f2: FETCH FIRST FROM cursor1;
+c
+-
+1
+(1 row)
+
+step s2u: UPDATE sto1 SET c = 1001 WHERE c = 1;
+
+starting permutation: s1decl s1f1 s1sleep s2u s1f2
+step s1decl: DECLARE cursor1 CURSOR FOR SELECT c FROM sto1;
+step s1f1: FETCH FIRST FROM cursor1;
+c
+-
+1
+(1 row)
+
+step s1sleep: SELECT setting, pg_sleep(6) FROM pg_settings WHERE name = 'old_snapshot_threshold';
+setting|pg_sleep
+-------+--------
+ 0|
+(1 row)
+
+step s2u: UPDATE sto1 SET c = 1001 WHERE c = 1;
+step s1f2: FETCH FIRST FROM cursor1;
+ERROR: snapshot too old
+
+starting permutation: s1decl s1f1 s2u s1sleep s1f2
+step s1decl: DECLARE cursor1 CURSOR FOR SELECT c FROM sto1;
+step s1f1: FETCH FIRST FROM cursor1;
+c
+-
+1
+(1 row)
+
+step s2u: UPDATE sto1 SET c = 1001 WHERE c = 1;
+step s1sleep: SELECT setting, pg_sleep(6) FROM pg_settings WHERE name = 'old_snapshot_threshold';
+setting|pg_sleep
+-------+--------
+ 0|
+(1 row)
+
+step s1f2: FETCH FIRST FROM cursor1;
+ERROR: snapshot too old
+
+starting permutation: s1decl s2u s1f1 s1sleep s1f2
+step s1decl: DECLARE cursor1 CURSOR FOR SELECT c FROM sto1;
+step s2u: UPDATE sto1 SET c = 1001 WHERE c = 1;
+step s1f1: FETCH FIRST FROM cursor1;
+c
+-
+1
+(1 row)
+
+step s1sleep: SELECT setting, pg_sleep(6) FROM pg_settings WHERE name = 'old_snapshot_threshold';
+setting|pg_sleep
+-------+--------
+ 0|
+(1 row)
+
+step s1f2: FETCH FIRST FROM cursor1;
+ERROR: snapshot too old
+
+starting permutation: s2u s1decl s1f1 s1sleep s1f2
+step s2u: UPDATE sto1 SET c = 1001 WHERE c = 1;
+step s1decl: DECLARE cursor1 CURSOR FOR SELECT c FROM sto1;
+step s1f1: FETCH FIRST FROM cursor1;
+c
+-
+2
+(1 row)
+
+step s1sleep: SELECT setting, pg_sleep(6) FROM pg_settings WHERE name = 'old_snapshot_threshold';
+setting|pg_sleep
+-------+--------
+ 0|
+(1 row)
+
+step s1f2: FETCH FIRST FROM cursor1;
+ERROR: snapshot too old
diff --git a/src/test/modules/snapshot_too_old/expected/sto_using_hash_index.out b/src/test/modules/snapshot_too_old/expected/sto_using_hash_index.out
new file mode 100644
index 0000000..11f827f
--- /dev/null
+++ b/src/test/modules/snapshot_too_old/expected/sto_using_hash_index.out
@@ -0,0 +1,19 @@
+Parsed test spec with 2 sessions
+
+starting permutation: noseq s1f1 s2sleep s2u s1f2
+step noseq: SET enable_seqscan = false;
+step s1f1: SELECT c FROM sto1 where c = 1000;
+ c
+----
+1000
+(1 row)
+
+step s2sleep: SELECT setting, pg_sleep(6) FROM pg_settings WHERE name = 'old_snapshot_threshold';
+setting|pg_sleep
+-------+--------
+ 0|
+(1 row)
+
+step s2u: UPDATE sto1 SET c = 1001 WHERE c = 1000;
+step s1f2: SELECT c FROM sto1 where c = 1001;
+ERROR: snapshot too old
diff --git a/src/test/modules/snapshot_too_old/expected/sto_using_select.out b/src/test/modules/snapshot_too_old/expected/sto_using_select.out
new file mode 100644
index 0000000..3067e05
--- /dev/null
+++ b/src/test/modules/snapshot_too_old/expected/sto_using_select.out
@@ -0,0 +1,73 @@
+Parsed test spec with 2 sessions
+
+starting permutation: s1f1 s1sleep s1f2 s2u
+step s1f1: SELECT c FROM sto1 ORDER BY c LIMIT 1;
+c
+-
+1
+(1 row)
+
+step s1sleep: SELECT setting, pg_sleep(6) FROM pg_settings WHERE name = 'old_snapshot_threshold';
+setting|pg_sleep
+-------+--------
+ 0|
+(1 row)
+
+step s1f2: SELECT c FROM sto1 ORDER BY c LIMIT 1;
+c
+-
+1
+(1 row)
+
+step s2u: UPDATE sto1 SET c = 1001 WHERE c = 1;
+
+starting permutation: s1f1 s1sleep s2u s1f2
+step s1f1: SELECT c FROM sto1 ORDER BY c LIMIT 1;
+c
+-
+1
+(1 row)
+
+step s1sleep: SELECT setting, pg_sleep(6) FROM pg_settings WHERE name = 'old_snapshot_threshold';
+setting|pg_sleep
+-------+--------
+ 0|
+(1 row)
+
+step s2u: UPDATE sto1 SET c = 1001 WHERE c = 1;
+step s1f2: SELECT c FROM sto1 ORDER BY c LIMIT 1;
+ERROR: snapshot too old
+
+starting permutation: s1f1 s2u s1sleep s1f2
+step s1f1: SELECT c FROM sto1 ORDER BY c LIMIT 1;
+c
+-
+1
+(1 row)
+
+step s2u: UPDATE sto1 SET c = 1001 WHERE c = 1;
+step s1sleep: SELECT setting, pg_sleep(6) FROM pg_settings WHERE name = 'old_snapshot_threshold';
+setting|pg_sleep
+-------+--------
+ 0|
+(1 row)
+
+step s1f2: SELECT c FROM sto1 ORDER BY c LIMIT 1;
+ERROR: snapshot too old
+
+starting permutation: s2u s1f1 s1sleep s1f2
+step s2u: UPDATE sto1 SET c = 1001 WHERE c = 1;
+step s1f1: SELECT c FROM sto1 ORDER BY c LIMIT 1;
+c
+-
+2
+(1 row)
+
+step s1sleep: SELECT setting, pg_sleep(6) FROM pg_settings WHERE name = 'old_snapshot_threshold';
+setting|pg_sleep
+-------+--------
+ 0|
+(1 row)
+
+step s1f2: SELECT c FROM sto1 ORDER BY c LIMIT 1;
+ERROR: snapshot too old
diff --git a/src/test/modules/snapshot_too_old/specs/sto_using_cursor.spec b/src/test/modules/snapshot_too_old/specs/sto_using_cursor.spec
new file mode 100644
index 0000000..eac18ca
--- /dev/null
+++ b/src/test/modules/snapshot_too_old/specs/sto_using_cursor.spec
@@ -0,0 +1,37 @@
+# This test provokes a "snapshot too old" error using a cursor.
+#
+# The sleep is needed because with a threshold of zero a statement could error
+# on changes it made. With more normal settings no external delay is needed,
+# but we don't want these tests to run long enough to see that, since
+# granularity is in minutes.
+#
+# Since results depend on the value of old_snapshot_threshold, sneak that into
+# the line generated by the sleep, so that a surprising values isn't so hard
+# to identify.
+
+setup
+{
+ CREATE TABLE sto1 (c int NOT NULL);
+ INSERT INTO sto1 SELECT generate_series(1, 1000);
+ CREATE TABLE sto2 (c int NOT NULL);
+}
+setup
+{
+ VACUUM ANALYZE sto1;
+}
+
+teardown
+{
+ DROP TABLE sto1, sto2;
+}
+
+session "s1"
+setup { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step "s1decl" { DECLARE cursor1 CURSOR FOR SELECT c FROM sto1; }
+step "s1f1" { FETCH FIRST FROM cursor1; }
+step "s1sleep" { SELECT setting, pg_sleep(6) FROM pg_settings WHERE name = 'old_snapshot_threshold'; }
+step "s1f2" { FETCH FIRST FROM cursor1; }
+teardown { COMMIT; }
+
+session "s2"
+step "s2u" { UPDATE sto1 SET c = 1001 WHERE c = 1; }
diff --git a/src/test/modules/snapshot_too_old/specs/sto_using_hash_index.spec b/src/test/modules/snapshot_too_old/specs/sto_using_hash_index.spec
new file mode 100644
index 0000000..33d91ff
--- /dev/null
+++ b/src/test/modules/snapshot_too_old/specs/sto_using_hash_index.spec
@@ -0,0 +1,31 @@
+# This test is like sto_using_select, except that we test access via a
+# hash index.
+
+setup
+{
+ CREATE TABLE sto1 (c int NOT NULL);
+ INSERT INTO sto1 SELECT generate_series(1, 1000);
+ CREATE INDEX idx_sto1 ON sto1 USING HASH (c);
+}
+setup
+{
+ VACUUM ANALYZE sto1;
+}
+
+teardown
+{
+ DROP TABLE sto1;
+}
+
+session "s1"
+setup { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step "noseq" { SET enable_seqscan = false; }
+step "s1f1" { SELECT c FROM sto1 where c = 1000; }
+step "s1f2" { SELECT c FROM sto1 where c = 1001; }
+teardown { ROLLBACK; }
+
+session "s2"
+step "s2sleep" { SELECT setting, pg_sleep(6) FROM pg_settings WHERE name = 'old_snapshot_threshold'; }
+step "s2u" { UPDATE sto1 SET c = 1001 WHERE c = 1000; }
+
+permutation "noseq" "s1f1" "s2sleep" "s2u" "s1f2"
diff --git a/src/test/modules/snapshot_too_old/specs/sto_using_select.spec b/src/test/modules/snapshot_too_old/specs/sto_using_select.spec
new file mode 100644
index 0000000..d7c34f3
--- /dev/null
+++ b/src/test/modules/snapshot_too_old/specs/sto_using_select.spec
@@ -0,0 +1,36 @@
+# This test provokes a "snapshot too old" error using SELECT statements.
+#
+# The sleep is needed because with a threshold of zero a statement could error
+# on changes it made. With more normal settings no external delay is needed,
+# but we don't want these tests to run long enough to see that, since
+# granularity is in minutes.
+#
+# Since results depend on the value of old_snapshot_threshold, sneak that into
+# the line generated by the sleep, so that a surprising values isn't so hard
+# to identify.
+
+setup
+{
+ CREATE TABLE sto1 (c int NOT NULL);
+ INSERT INTO sto1 SELECT generate_series(1, 1000);
+ CREATE TABLE sto2 (c int NOT NULL);
+}
+setup
+{
+ VACUUM ANALYZE sto1;
+}
+
+teardown
+{
+ DROP TABLE sto1, sto2;
+}
+
+session "s1"
+setup { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step "s1f1" { SELECT c FROM sto1 ORDER BY c LIMIT 1; }
+step "s1sleep" { SELECT setting, pg_sleep(6) FROM pg_settings WHERE name = 'old_snapshot_threshold'; }
+step "s1f2" { SELECT c FROM sto1 ORDER BY c LIMIT 1; }
+teardown { COMMIT; }
+
+session "s2"
+step "s2u" { UPDATE sto1 SET c = 1001 WHERE c = 1; }
diff --git a/src/test/modules/snapshot_too_old/sto.conf b/src/test/modules/snapshot_too_old/sto.conf
new file mode 100644
index 0000000..7eeaeeb
--- /dev/null
+++ b/src/test/modules/snapshot_too_old/sto.conf
@@ -0,0 +1,2 @@
+autovacuum = off
+old_snapshot_threshold = 0
diff --git a/src/test/modules/ssl_passphrase_callback/.gitignore b/src/test/modules/ssl_passphrase_callback/.gitignore
new file mode 100644
index 0000000..1dbadf7
--- /dev/null
+++ b/src/test/modules/ssl_passphrase_callback/.gitignore
@@ -0,0 +1 @@
+tmp_check
diff --git a/src/test/modules/ssl_passphrase_callback/Makefile b/src/test/modules/ssl_passphrase_callback/Makefile
new file mode 100644
index 0000000..f81265c
--- /dev/null
+++ b/src/test/modules/ssl_passphrase_callback/Makefile
@@ -0,0 +1,40 @@
+# ssl_passphrase_callback Makefile
+
+export with_openssl
+
+MODULE_big = ssl_passphrase_func
+OBJS = ssl_passphrase_func.o $(WIN32RES)
+PGFILEDESC = "callback function to provide a passphrase"
+
+TAP_TESTS = 1
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/ssl_passphrase_callback
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
+
+SHLIB_LINK += $(filter -lssl -lcrypto -lssleay32 -leay32, $(LIBS))
+
+# Targets to generate or remove the ssl certificate and key
+# Normally not needed. Don't run these targets in a vpath build, the results
+# won't be in the right place if you do.
+
+# needs to agree with what's in the test script
+PASS = FooBaR1
+
+.PHONY: ssl-files ssl-files-clean
+
+ssl-files:
+ openssl req -new -x509 -days 10000 -nodes -out server.crt \
+ -keyout server.ckey -subj "/CN=localhost"
+ openssl rsa -aes256 -in server.ckey -out server.key -passout pass:$(PASS)
+ rm server.ckey
+
+ssl-files-clean:
+ rm -f server.crt server.key
diff --git a/src/test/modules/ssl_passphrase_callback/server.crt b/src/test/modules/ssl_passphrase_callback/server.crt
new file mode 100644
index 0000000..b3c4be4
--- /dev/null
+++ b/src/test/modules/ssl_passphrase_callback/server.crt
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDCTCCAfGgAwIBAgIUfHgPLNys4V0d0cWrzRHqfs91LFMwDQYJKoZIhvcNAQEL
+BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTIwMDMyMTE0MDM1OVoXDTQ3MDgw
+NzE0MDM1OVowFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEA2j0PZwmeahBC7QpG7i9/VUVJrLzy+b8oVaqZUO6nlPbY
+wuPISYTO/jqc0XDfs/Gb0kccDJ6bPfNfvSnRTG1omE6OO9YjR0u3296l4bWAmYVq
+q4SesgQmm1Wy8ODNpeGaoBUwR51OB/gFHFjUlqAjRwOmrTCbDiAsLt7e+cx+W26r
+2SrJIweiSJsqaQsMMaqlY2qpHnYgWfqRUTqwXqlno0dXuqBt+KKgqeHMY3w3XS51
+8roOI0+Q9KWsexL/aYnLwMRsHRMZcthhzTK6HD/OrLh9CxURImr4ed9TtsNiZltA
+KqLTeGbtS1D2AvFqJU8n5DvtU+26wDrHu6pEM3kSJQIDAQABo1MwUTAdBgNVHQ4E
+FgQUkkfa08hDnxYs1UjG2ydCBJs1b2AwHwYDVR0jBBgwFoAUkkfa08hDnxYs1UjG
+2ydCBJs1b2AwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAjsJh
+p4tCopCA/Pvxupv3VEwGJ+nbH7Zg/hp+o2IWuHBOK1qrkyXBv34h/69bRnWZ5UFV
+HxQwL7CjNtjZu9SbpKkaHbZXPWANC9fbPKdBz9fAEwunf33KbZe3dPv/7xbJirMz
+e+j5V0LE0Spkr/p89LipXfjGw0jLC8VRTx/vKPnmbiBsCKw5SQKh3w7CcBx84Y6q
+Nc27WQ8ReR4W4X1zHGN6kEV4H+yPN2Z9OlSixTiSNvr2mtJQsZa7gK7Wwfm79RN7
+5Kf3l8b6e2BToJwLorpK9mvu41NtwRzl4UoJ1BFJDyhMplFMd8RcwTW6yT2biOFC
+lYCajcBoms3IiyqBog==
+-----END CERTIFICATE-----
diff --git a/src/test/modules/ssl_passphrase_callback/server.key b/src/test/modules/ssl_passphrase_callback/server.key
new file mode 100644
index 0000000..1475007
--- /dev/null
+++ b/src/test/modules/ssl_passphrase_callback/server.key
@@ -0,0 +1,30 @@
+-----BEGIN RSA PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: AES-256-CBC,DB0E7068D4DCE79FFE63C95B8D8F7CEA
+
+Y4uvnlWX/kyulqsmt8aWI55vKFdfJL4wEZItL8ZKlQFuZuxC9w0OworyjTdqO38R
+v9hwnetZBDgK8kEv6U7wR58mTfwHHCGuxYgSiPZtiW7btS4zu16ePdh8oBEzCxjW
+ALrCFt7uvRu5h2AWy/4BgV4gLNVPNB+lJFABtUoiSnUDr7+bcx7UjZ4An6HriGxC
+Kg/N1pKjT/xiKOy+yHtrp1Jih5HYDE4i99jPtMuTROf8Uyz7ibyrdd/E7QNvANQN
+Cmw4I4Xk4hZ68F0iGU0C0wLND3pWbeYPKorpo3PkI4Du+Aqlg15ae5u8CtU3fXGJ
+mq4/qLGAi1sr/45f5P5a3Q8BQpKkCmGopXMaFYOOiaf3YYgD1eVOxLhsCWqUB+O8
+ygcTNRCoKhzY+ULComXp880J3fFk5b92g4Hm1PAO42uDKzhWSnrmCBJ7ynXvnEc+
+JqhiE8Obrp6FBIHvfN26JtHcXTd/bgUMXSh7AXjsotfvPPV0URve9JJG+RnwckeT
+K3AYDOQK/lbqDGliNqHg1WiMSA2oHSqDhUMB0Sm0jh6+jxCQlsmSDvPvJfWRo5wY
+zbZZZARQnFUaHa9CZVdFxbaPGhYU6vAwxDqi42osSJEdf68Gy2KVXcelqpU/2dKk
+aHfTgAWOsajbgt9p+0369TeZb39+zLODdDJnvZYiu1pTASHP5VrJ2xHhu5zOdjXm
+GafYiPwYBM280wkIVQ0HsTX7BViU2R/7W3FqflXgQvBiraVQVwHyaX4bOU1a3rzg
+emHNLTCpRamT0i/D0tkEPgS42bWSVi9ko5Mn9yb+qToBjAOLVUOAOs9Bv3qxawhI
+XFbBDZ7DS59l2yV6eQkrG7DUCLDf4dv4WZeBnhrPe/Jg8HKcsKcJYV3cejZh8sgu
+XHeCU50+jpJDfTZVPW3TjZWmrTqStGwF1UFpj+tTsTcX+OHAY/shFs3bBZulAsMy
+5UWZWzyWHMWr/wbxW7dbhTb1gNmOgpQQz9dunSgcZ8umzSGLa0ZGmnQj9P/kZkQA
+RenuswH5O7CK/MDmf3J6svwyLt/jULmH26MZTcNu7igT6dj3VMSwkoQQaaQdtmzb
+glzN3uqf8qM+CEjV8dxlt8fv6KJV7gvoYfPAz+1pp5DVJBmRo/+b4e/d4QTV9iWS
+ScBYdonc9WXcrjmExX9+Wf/K/IKfLnKLIi2MZ3pwr1n7yY+dMeF6iREYSjFVIpZd
+MH3G9/SxTrqR7X/eHjwdv1UupYYyaDag8wpVn1RMCb0xYqh2/QP1k0pQycckL0WQ
+lieXibEuQhV/heXcqt83G6pGqLImc6YPYU46jdGpPIMyOK+ZSqJTHUWHfRMQTIMz
+varR2M3uhHvwUFzmvjLh/o6I3r0a0Rl1MztpYfjBV6MS4BKYfraWZ0kxCyV+e6tz
+O7vD0P5W2qm6b89Md3nqjUcbOM8AojcfBl3xpQrpSdgJ25YJBoJ9L2I2pIMNCK/x
+yDNEJl7yP87fdHfXZm2VoUXclDUYHyNys9Rtv9NSr+VNkIMcqrCHEgpAxwQQ5NsO
+/vOZe3wjhXXLyRO7Nh5W8jojw3xcb9c9avFUWUvM2BaS4vEYcItUoF4QuHohrCwk
+-----END RSA PRIVATE KEY-----
diff --git a/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c b/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c
new file mode 100644
index 0000000..563ff14
--- /dev/null
+++ b/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c
@@ -0,0 +1,90 @@
+/*-------------------------------------------------------------------------
+ *
+ * ssl_passphrase_func.c
+ *
+ * Loadable PostgreSQL module fetch an ssl passphrase for the server cert.
+ * instead of calling an external program. This implementation just hands
+ * back the configured password rot13'd.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include <float.h>
+#include <stdio.h>
+
+#include "libpq/libpq.h"
+#include "libpq/libpq-be.h"
+#include "utils/guc.h"
+
+PG_MODULE_MAGIC;
+
+void _PG_init(void);
+void _PG_fini(void);
+
+static char *ssl_passphrase = NULL;
+
+/* callback function */
+static int rot13_passphrase(char *buf, int size, int rwflag, void *userdata);
+
+/* hook function to set the callback */
+static void set_rot13(SSL_CTX *context, bool isServerStart);
+
+/*
+ * Module load callback
+ */
+void
+_PG_init(void)
+{
+ /* Define custom GUC variable. */
+ DefineCustomStringVariable("ssl_passphrase.passphrase",
+ "passphrase before transformation",
+ NULL,
+ &ssl_passphrase,
+ NULL,
+ PGC_SIGHUP,
+ 0, /* no flags required */
+ NULL,
+ NULL,
+ NULL);
+ if (ssl_passphrase)
+ openssl_tls_init_hook = set_rot13;
+}
+
+void
+_PG_fini(void)
+{
+ /* do nothing yet */
+}
+
+static void
+set_rot13(SSL_CTX *context, bool isServerStart)
+{
+ /* warn if the user has set ssl_passphrase_command */
+ if (ssl_passphrase_command[0])
+ ereport(WARNING,
+ (errmsg("ssl_passphrase_command setting ignored by ssl_passphrase_func module")));
+
+ SSL_CTX_set_default_passwd_cb(context, rot13_passphrase);
+}
+
+static int
+rot13_passphrase(char *buf, int size, int rwflag, void *userdata)
+{
+
+ Assert(ssl_passphrase != NULL);
+ StrNCpy(buf, ssl_passphrase, size);
+ for (char *p = buf; *p; p++)
+ {
+ char c = *p;
+
+ if ((c >= 'a' && c <= 'm') || (c >= 'A' && c <= 'M'))
+ *p = c + 13;
+ else if ((c >= 'n' && c <= 'z') || (c >= 'N' && c <= 'Z'))
+ *p = c - 13;
+ }
+
+ return strlen(buf);
+
+}
diff --git a/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl b/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl
new file mode 100644
index 0000000..dbc084f
--- /dev/null
+++ b/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl
@@ -0,0 +1,74 @@
+use strict;
+use warnings;
+
+use File::Copy;
+
+use TestLib;
+use Test::More;
+use PostgresNode;
+
+unless (($ENV{with_openssl} || 'no') eq 'yes')
+{
+ plan skip_all => 'SSL not supported by this build';
+}
+
+my $clearpass = "FooBaR1";
+my $rot13pass = "SbbOnE1";
+
+# see the Makefile for how the certificate and key have been generated
+
+my $node = get_new_node('main');
+$node->init;
+$node->append_conf('postgresql.conf',
+ "ssl_passphrase.passphrase = '$rot13pass'");
+$node->append_conf('postgresql.conf',
+ "shared_preload_libraries = 'ssl_passphrase_func'");
+$node->append_conf('postgresql.conf', "ssl = 'on'");
+
+my $ddir = $node->data_dir;
+
+# install certificate and protected key
+copy("server.crt", $ddir);
+copy("server.key", $ddir);
+chmod 0600, "$ddir/server.key";
+
+$node->start;
+
+# if the server is running we must have successfully transformed the passphrase
+ok(-e "$ddir/postmaster.pid", "postgres started");
+
+$node->stop('fast');
+
+# should get a warning if ssl_passphrase_command is set
+my $log = $node->rotate_logfile();
+
+$node->append_conf('postgresql.conf',
+ "ssl_passphrase_command = 'echo spl0tz'");
+
+$node->start;
+
+$node->stop('fast');
+
+my $log_contents = slurp_file($log);
+
+like(
+ $log_contents,
+ qr/WARNING.*ssl_passphrase_command setting ignored by ssl_passphrase_func module/,
+ "ssl_passphrase_command set warning");
+
+# set the wrong passphrase
+$node->append_conf('postgresql.conf', "ssl_passphrase.passphrase = 'blurfl'");
+
+# try to start the server again
+my $ret = TestLib::system_log('pg_ctl', '-D', $node->data_dir, '-l',
+ $node->logfile, 'start');
+
+
+# with a bad passphrase the server should not start
+ok($ret, "pg_ctl fails with bad passphrase");
+ok(!-e "$ddir/postmaster.pid", "postgres not started with bad passphrase");
+
+# just in case
+$node->stop('fast');
+
+done_testing();
diff --git a/src/test/modules/test_bloomfilter/.gitignore b/src/test/modules/test_bloomfilter/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/test_bloomfilter/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_bloomfilter/Makefile b/src/test/modules/test_bloomfilter/Makefile
new file mode 100644
index 0000000..c8b7890
--- /dev/null
+++ b/src/test/modules/test_bloomfilter/Makefile
@@ -0,0 +1,23 @@
+# src/test/modules/test_bloomfilter/Makefile
+
+MODULE_big = test_bloomfilter
+OBJS = \
+ $(WIN32RES) \
+ test_bloomfilter.o
+PGFILEDESC = "test_bloomfilter - test code for Bloom filter library"
+
+EXTENSION = test_bloomfilter
+DATA = test_bloomfilter--1.0.sql
+
+REGRESS = test_bloomfilter
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_bloomfilter
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_bloomfilter/README b/src/test/modules/test_bloomfilter/README
new file mode 100644
index 0000000..4c05efe
--- /dev/null
+++ b/src/test/modules/test_bloomfilter/README
@@ -0,0 +1,68 @@
+test_bloomfilter overview
+=========================
+
+test_bloomfilter is a test harness module for testing Bloom filter library set
+membership operations. It consists of a single SQL-callable function,
+test_bloomfilter(), plus a regression test that calls test_bloomfilter().
+Membership tests are performed against a dataset that the test harness module
+generates.
+
+The test_bloomfilter() function displays instrumentation at DEBUG1 elog level
+(WARNING when the false positive rate exceeds a 1% threshold). This can be
+used to get a sense of the performance characteristics of the Postgres Bloom
+filter implementation under varied conditions.
+
+Bitset size
+-----------
+
+The main bloomfilter.c criteria for sizing its bitset is that the false
+positive rate should not exceed 2% when sufficient bloom_work_mem is available
+(and the caller-supplied estimate of the number of elements turns out to have
+been accurate). A 1% - 2% rate is currently assumed to be suitable for all
+Bloom filter callers.
+
+With an optimal K (number of hash functions), Bloom filters should only have a
+1% false positive rate with just 9.6 bits of memory per element. The Postgres
+implementation's 2% worst case guarantee exists because there is a need for
+some slop due to implementation inflexibility in bitset sizing. Since the
+bitset size is always actually kept to a power of two number of bits, callers
+can have their bloom_work_mem argument truncated down by almost half.
+In practice, callers that make a point of passing a bloom_work_mem that is an
+exact power of two bitset size (such as test_bloomfilter.c) will actually get
+the "9.6 bits per element" 1% false positive rate.
+
+Testing strategy
+----------------
+
+Our approach to regression testing is to test that a Bloom filter has only a 1%
+false positive rate for a single bitset size (2 ^ 23, or 1MB). We test a
+dataset with 838,861 elements, which works out at 10 bits of memory per
+element. We round up from 9.6 bits to 10 bits to make sure that we reliably
+get under 1% for regression testing. Note that a random seed is used in the
+regression tests because the exact false positive rate is inconsistent across
+platforms. Inconsistent hash function behavior is something that the
+regression tests need to be tolerant of anyway.
+
+test_bloomfilter() SQL-callable function
+========================================
+
+The SQL-callable function test_bloomfilter() provides the following arguments:
+
+* "power" is the power of two used to size the Bloom filter's bitset.
+
+The minimum valid argument value is 23 (2^23 bits), or 1MB of memory. The
+maximum valid argument value is 32, or 512MB of memory.
+
+* "nelements" is the number of elements to generate for testing purposes.
+
+* "seed" is a seed value for hashing.
+
+A value < 0 is interpreted as "use random seed". Varying the seed value (or
+specifying -1) should result in small variations in the total number of false
+positives.
+
+* "tests" is the number of tests to run.
+
+This may be increased when it's useful to perform many tests in an interactive
+session. It only makes sense to perform multiple tests when a random seed is
+used.
diff --git a/src/test/modules/test_bloomfilter/expected/test_bloomfilter.out b/src/test/modules/test_bloomfilter/expected/test_bloomfilter.out
new file mode 100644
index 0000000..21c0688
--- /dev/null
+++ b/src/test/modules/test_bloomfilter/expected/test_bloomfilter.out
@@ -0,0 +1,22 @@
+CREATE EXTENSION test_bloomfilter;
+-- See README for explanation of arguments:
+SELECT test_bloomfilter(power => 23,
+ nelements => 838861,
+ seed => -1,
+ tests => 1);
+ test_bloomfilter
+------------------
+
+(1 row)
+
+-- Equivalent "10 bits per element" tests for all possible bitset sizes:
+--
+-- SELECT test_bloomfilter(24, 1677722)
+-- SELECT test_bloomfilter(25, 3355443)
+-- SELECT test_bloomfilter(26, 6710886)
+-- SELECT test_bloomfilter(27, 13421773)
+-- SELECT test_bloomfilter(28, 26843546)
+-- SELECT test_bloomfilter(29, 53687091)
+-- SELECT test_bloomfilter(30, 107374182)
+-- SELECT test_bloomfilter(31, 214748365)
+-- SELECT test_bloomfilter(32, 429496730)
diff --git a/src/test/modules/test_bloomfilter/sql/test_bloomfilter.sql b/src/test/modules/test_bloomfilter/sql/test_bloomfilter.sql
new file mode 100644
index 0000000..9ec159c
--- /dev/null
+++ b/src/test/modules/test_bloomfilter/sql/test_bloomfilter.sql
@@ -0,0 +1,19 @@
+CREATE EXTENSION test_bloomfilter;
+
+-- See README for explanation of arguments:
+SELECT test_bloomfilter(power => 23,
+ nelements => 838861,
+ seed => -1,
+ tests => 1);
+
+-- Equivalent "10 bits per element" tests for all possible bitset sizes:
+--
+-- SELECT test_bloomfilter(24, 1677722)
+-- SELECT test_bloomfilter(25, 3355443)
+-- SELECT test_bloomfilter(26, 6710886)
+-- SELECT test_bloomfilter(27, 13421773)
+-- SELECT test_bloomfilter(28, 26843546)
+-- SELECT test_bloomfilter(29, 53687091)
+-- SELECT test_bloomfilter(30, 107374182)
+-- SELECT test_bloomfilter(31, 214748365)
+-- SELECT test_bloomfilter(32, 429496730)
diff --git a/src/test/modules/test_bloomfilter/test_bloomfilter--1.0.sql b/src/test/modules/test_bloomfilter/test_bloomfilter--1.0.sql
new file mode 100644
index 0000000..7682318
--- /dev/null
+++ b/src/test/modules/test_bloomfilter/test_bloomfilter--1.0.sql
@@ -0,0 +1,11 @@
+/* src/test/modules/test_bloomfilter/test_bloomfilter--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_bloomfilter" to load this file. \quit
+
+CREATE FUNCTION test_bloomfilter(power integer,
+ nelements bigint,
+ seed integer DEFAULT -1,
+ tests integer DEFAULT 1)
+RETURNS pg_catalog.void STRICT
+AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/src/test/modules/test_bloomfilter/test_bloomfilter.c b/src/test/modules/test_bloomfilter/test_bloomfilter.c
new file mode 100644
index 0000000..2a3fe33
--- /dev/null
+++ b/src/test/modules/test_bloomfilter/test_bloomfilter.c
@@ -0,0 +1,138 @@
+/*--------------------------------------------------------------------------
+ *
+ * test_bloomfilter.c
+ * Test false positive rate of Bloom filter.
+ *
+ * Copyright (c) 2018-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_bloomfilter/test_bloomfilter.c
+ *
+ * -------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "fmgr.h"
+#include "lib/bloomfilter.h"
+#include "miscadmin.h"
+
+PG_MODULE_MAGIC;
+
+/* Fits decimal representation of PG_INT64_MIN + 2 bytes: */
+#define MAX_ELEMENT_BYTES 21
+/* False positive rate WARNING threshold (1%): */
+#define FPOSITIVE_THRESHOLD 0.01
+
+
+/*
+ * Populate an empty Bloom filter with "nelements" dummy strings.
+ */
+static void
+populate_with_dummy_strings(bloom_filter *filter, int64 nelements)
+{
+ char element[MAX_ELEMENT_BYTES];
+ int64 i;
+
+ for (i = 0; i < nelements; i++)
+ {
+ CHECK_FOR_INTERRUPTS();
+
+ snprintf(element, sizeof(element), "i" INT64_FORMAT, i);
+ bloom_add_element(filter, (unsigned char *) element, strlen(element));
+ }
+}
+
+/*
+ * Returns number of strings that are indicated as probably appearing in Bloom
+ * filter that were in fact never added by populate_with_dummy_strings().
+ * These are false positives.
+ */
+static int64
+nfalsepos_for_missing_strings(bloom_filter *filter, int64 nelements)
+{
+ char element[MAX_ELEMENT_BYTES];
+ int64 nfalsepos = 0;
+ int64 i;
+
+ for (i = 0; i < nelements; i++)
+ {
+ CHECK_FOR_INTERRUPTS();
+
+ snprintf(element, sizeof(element), "M" INT64_FORMAT, i);
+ if (!bloom_lacks_element(filter, (unsigned char *) element,
+ strlen(element)))
+ nfalsepos++;
+ }
+
+ return nfalsepos;
+}
+
+static void
+create_and_test_bloom(int power, int64 nelements, int callerseed)
+{
+ int bloom_work_mem;
+ uint64 seed;
+ int64 nfalsepos;
+ bloom_filter *filter;
+
+ bloom_work_mem = (1L << power) / 8L / 1024L;
+
+ elog(DEBUG1, "bloom_work_mem (KB): %d", bloom_work_mem);
+
+ /*
+ * Generate random seed, or use caller's. Seed should always be a
+ * positive value less than or equal to PG_INT32_MAX, to ensure that any
+ * random seed can be recreated through callerseed if the need arises.
+ * (Don't assume that RAND_MAX cannot exceed PG_INT32_MAX.)
+ */
+ seed = callerseed < 0 ? random() % PG_INT32_MAX : callerseed;
+
+ /* Create Bloom filter, populate it, and report on false positive rate */
+ filter = bloom_create(nelements, bloom_work_mem, seed);
+ populate_with_dummy_strings(filter, nelements);
+ nfalsepos = nfalsepos_for_missing_strings(filter, nelements);
+
+ ereport((nfalsepos > nelements * FPOSITIVE_THRESHOLD) ? WARNING : DEBUG1,
+ (errmsg_internal("seed: " UINT64_FORMAT " false positives: " INT64_FORMAT " (%.6f%%) bitset %.2f%% set",
+ seed, nfalsepos, (double) nfalsepos / nelements,
+ 100.0 * bloom_prop_bits_set(filter))));
+
+ bloom_free(filter);
+}
+
+PG_FUNCTION_INFO_V1(test_bloomfilter);
+
+/*
+ * SQL-callable entry point to perform all tests.
+ *
+ * If a 1% false positive threshold is not met, emits WARNINGs.
+ *
+ * See README for details of arguments.
+ */
+Datum
+test_bloomfilter(PG_FUNCTION_ARGS)
+{
+ int power = PG_GETARG_INT32(0);
+ int64 nelements = PG_GETARG_INT64(1);
+ int seed = PG_GETARG_INT32(2);
+ int tests = PG_GETARG_INT32(3);
+ int i;
+
+ if (power < 23 || power > 32)
+ elog(ERROR, "power argument must be between 23 and 32 inclusive");
+
+ if (tests <= 0)
+ elog(ERROR, "invalid number of tests: %d", tests);
+
+ if (nelements < 0)
+ elog(ERROR, "invalid number of elements: %d", tests);
+
+ for (i = 0; i < tests; i++)
+ {
+ elog(DEBUG1, "beginning test #%d...", i + 1);
+
+ create_and_test_bloom(power, nelements, seed);
+ }
+
+ PG_RETURN_VOID();
+}
diff --git a/src/test/modules/test_bloomfilter/test_bloomfilter.control b/src/test/modules/test_bloomfilter/test_bloomfilter.control
new file mode 100644
index 0000000..99e56ee
--- /dev/null
+++ b/src/test/modules/test_bloomfilter/test_bloomfilter.control
@@ -0,0 +1,4 @@
+comment = 'Test code for Bloom filter library'
+default_version = '1.0'
+module_pathname = '$libdir/test_bloomfilter'
+relocatable = true
diff --git a/src/test/modules/test_ddl_deparse/.gitignore b/src/test/modules/test_ddl_deparse/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_ddl_deparse/Makefile b/src/test/modules/test_ddl_deparse/Makefile
new file mode 100644
index 0000000..3a57a95
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/Makefile
@@ -0,0 +1,43 @@
+# src/test/modules/test_ddl_deparse/Makefile
+
+MODULES = test_ddl_deparse
+PGFILEDESC = "test_ddl_deparse - regression testing for DDL deparsing"
+
+EXTENSION = test_ddl_deparse
+DATA = test_ddl_deparse--1.0.sql
+
+# test_ddl_deparse must be first
+REGRESS = test_ddl_deparse \
+ create_extension \
+ create_schema \
+ create_type \
+ create_conversion \
+ create_domain \
+ create_sequence_1 \
+ create_table \
+ create_transform \
+ alter_table \
+ create_view \
+ create_trigger \
+ create_rule \
+ comment_on \
+ alter_function \
+ alter_sequence \
+ alter_ts_config \
+ alter_type_enum \
+ opfamily \
+ defprivs \
+ matviews
+
+EXTRA_INSTALL = contrib/pg_stat_statements
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_ddl_deparse
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_ddl_deparse/README b/src/test/modules/test_ddl_deparse/README
new file mode 100644
index 0000000..b12a129
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/README
@@ -0,0 +1,8 @@
+test_ddl_deparse is an example of how to use the pg_ddl_command datatype.
+It is not intended to do anything useful on its own; rather, it is a
+demonstration of how to use the datatype, and to provide some unit tests for
+it.
+
+The functions in this extension are intended to be able to process some
+part of the struct and produce some readable output, preferably handling
+all possible cases so that SQL test code can be written.
diff --git a/src/test/modules/test_ddl_deparse/expected/alter_extension.out b/src/test/modules/test_ddl_deparse/expected/alter_extension.out
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/alter_extension.out
diff --git a/src/test/modules/test_ddl_deparse/expected/alter_function.out b/src/test/modules/test_ddl_deparse/expected/alter_function.out
new file mode 100644
index 0000000..69a3742
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/alter_function.out
@@ -0,0 +1,15 @@
+--
+-- ALTER_FUNCTION
+--
+ALTER FUNCTION plpgsql_function_trigger_1 ()
+ SET SCHEMA foo;
+NOTICE: DDL test: type simple, tag ALTER FUNCTION
+ALTER FUNCTION foo.plpgsql_function_trigger_1()
+ COST 10;
+NOTICE: DDL test: type simple, tag ALTER FUNCTION
+CREATE ROLE regress_alter_function_role;
+ALTER FUNCTION plpgsql_function_trigger_2()
+ OWNER TO regress_alter_function_role;
+ERROR: function plpgsql_function_trigger_2() does not exist
+DROP OWNED BY regress_alter_function_role;
+DROP ROLE regress_alter_function_role;
diff --git a/src/test/modules/test_ddl_deparse/expected/alter_sequence.out b/src/test/modules/test_ddl_deparse/expected/alter_sequence.out
new file mode 100644
index 0000000..319f36f
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/alter_sequence.out
@@ -0,0 +1,15 @@
+--
+-- ALTER_SEQUENCE
+--
+ALTER SEQUENCE fkey_table_seq
+ MINVALUE 10
+ START 20
+ CACHE 1
+ NO CYCLE;
+NOTICE: DDL test: type simple, tag ALTER SEQUENCE
+ALTER SEQUENCE fkey_table_seq
+ RENAME TO fkey_table_seq_renamed;
+NOTICE: DDL test: type simple, tag ALTER SEQUENCE
+ALTER SEQUENCE fkey_table_seq_renamed
+ SET SCHEMA foo;
+NOTICE: DDL test: type simple, tag ALTER SEQUENCE
diff --git a/src/test/modules/test_ddl_deparse/expected/alter_table.out b/src/test/modules/test_ddl_deparse/expected/alter_table.out
new file mode 100644
index 0000000..141060f
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/alter_table.out
@@ -0,0 +1,29 @@
+CREATE TABLE parent (
+ a int
+);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+CREATE TABLE child () INHERITS (parent);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+CREATE TABLE grandchild () INHERITS (child);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+ALTER TABLE parent ADD COLUMN b serial;
+NOTICE: DDL test: type simple, tag CREATE SEQUENCE
+NOTICE: DDL test: type alter table, tag ALTER TABLE
+NOTICE: subcommand: ADD COLUMN (and recurse)
+NOTICE: DDL test: type simple, tag ALTER SEQUENCE
+ALTER TABLE parent RENAME COLUMN b TO c;
+NOTICE: DDL test: type simple, tag ALTER TABLE
+ALTER TABLE parent ADD CONSTRAINT a_pos CHECK (a > 0);
+NOTICE: DDL test: type alter table, tag ALTER TABLE
+NOTICE: subcommand: ADD CONSTRAINT (and recurse)
+CREATE TABLE part (
+ a int
+) PARTITION BY RANGE (a);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+CREATE TABLE part1 PARTITION OF part FOR VALUES FROM (1) to (100);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+ALTER TABLE part ADD PRIMARY KEY (a);
+NOTICE: DDL test: type alter table, tag ALTER TABLE
+NOTICE: subcommand: SET NOT NULL
+NOTICE: subcommand: SET NOT NULL
+NOTICE: subcommand: ADD INDEX
diff --git a/src/test/modules/test_ddl_deparse/expected/alter_ts_config.out b/src/test/modules/test_ddl_deparse/expected/alter_ts_config.out
new file mode 100644
index 0000000..afc352f
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/alter_ts_config.out
@@ -0,0 +1,8 @@
+--
+-- ALTER TEXT SEARCH CONFIGURATION
+--
+CREATE TEXT SEARCH CONFIGURATION en (copy=english);
+NOTICE: DDL test: type simple, tag CREATE TEXT SEARCH CONFIGURATION
+ALTER TEXT SEARCH CONFIGURATION en
+ ALTER MAPPING FOR host, email, url, sfloat WITH simple;
+NOTICE: DDL test: type alter text search configuration, tag ALTER TEXT SEARCH CONFIGURATION
diff --git a/src/test/modules/test_ddl_deparse/expected/alter_type_enum.out b/src/test/modules/test_ddl_deparse/expected/alter_type_enum.out
new file mode 100644
index 0000000..74107c2
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/alter_type_enum.out
@@ -0,0 +1,7 @@
+---
+--- ALTER_TYPE_ENUM
+---
+ALTER TYPE enum_test ADD VALUE 'zzz' AFTER 'baz';
+NOTICE: DDL test: type simple, tag ALTER TYPE
+ALTER TYPE enum_test ADD VALUE 'aaa' BEFORE 'foo';
+NOTICE: DDL test: type simple, tag ALTER TYPE
diff --git a/src/test/modules/test_ddl_deparse/expected/comment_on.out b/src/test/modules/test_ddl_deparse/expected/comment_on.out
new file mode 100644
index 0000000..129eff9
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/comment_on.out
@@ -0,0 +1,23 @@
+--
+-- COMMENT_ON
+--
+COMMENT ON SCHEMA foo IS 'This is schema foo';
+NOTICE: DDL test: type simple, tag COMMENT
+COMMENT ON TYPE enum_test IS 'ENUM test';
+NOTICE: DDL test: type simple, tag COMMENT
+COMMENT ON TYPE int2range IS 'RANGE test';
+NOTICE: DDL test: type simple, tag COMMENT
+COMMENT ON DOMAIN japanese_postal_code IS 'DOMAIN test';
+NOTICE: DDL test: type simple, tag COMMENT
+COMMENT ON SEQUENCE fkey_table_seq IS 'SEQUENCE test';
+NOTICE: DDL test: type simple, tag COMMENT
+COMMENT ON TABLE datatype_table IS 'This table should contain all native datatypes';
+NOTICE: DDL test: type simple, tag COMMENT
+COMMENT ON VIEW datatype_view IS 'This is a view';
+NOTICE: DDL test: type simple, tag COMMENT
+COMMENT ON FUNCTION c_function_test() IS 'FUNCTION test';
+ERROR: function c_function_test() does not exist
+COMMENT ON TRIGGER trigger_1 ON datatype_table IS 'TRIGGER test';
+NOTICE: DDL test: type simple, tag COMMENT
+COMMENT ON RULE rule_1 ON datatype_table IS 'RULE test';
+NOTICE: DDL test: type simple, tag COMMENT
diff --git a/src/test/modules/test_ddl_deparse/expected/create_conversion.out b/src/test/modules/test_ddl_deparse/expected/create_conversion.out
new file mode 100644
index 0000000..e8697cf
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/create_conversion.out
@@ -0,0 +1,6 @@
+---
+--- CREATE_CONVERSION
+---
+-- Simple test should suffice for this
+CREATE CONVERSION myconv FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8;
+NOTICE: DDL test: type simple, tag CREATE CONVERSION
diff --git a/src/test/modules/test_ddl_deparse/expected/create_domain.out b/src/test/modules/test_ddl_deparse/expected/create_domain.out
new file mode 100644
index 0000000..2e7f585
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/create_domain.out
@@ -0,0 +1,11 @@
+---
+--- CREATE_DOMAIN
+---
+CREATE DOMAIN domainvarchar VARCHAR(5);
+NOTICE: DDL test: type simple, tag CREATE DOMAIN
+CREATE DOMAIN japanese_postal_code AS TEXT
+CHECK(
+ VALUE ~ '^\d{3}$'
+OR VALUE ~ '^\d{3}-\d{4}$'
+);
+NOTICE: DDL test: type simple, tag CREATE DOMAIN
diff --git a/src/test/modules/test_ddl_deparse/expected/create_extension.out b/src/test/modules/test_ddl_deparse/expected/create_extension.out
new file mode 100644
index 0000000..4042e02
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/create_extension.out
@@ -0,0 +1,5 @@
+---
+--- CREATE_EXTENSION
+---
+CREATE EXTENSION pg_stat_statements;
+NOTICE: DDL test: type simple, tag CREATE EXTENSION
diff --git a/src/test/modules/test_ddl_deparse/expected/create_function.out b/src/test/modules/test_ddl_deparse/expected/create_function.out
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/create_function.out
diff --git a/src/test/modules/test_ddl_deparse/expected/create_operator.out b/src/test/modules/test_ddl_deparse/expected/create_operator.out
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/create_operator.out
diff --git a/src/test/modules/test_ddl_deparse/expected/create_rule.out b/src/test/modules/test_ddl_deparse/expected/create_rule.out
new file mode 100644
index 0000000..fe3d047
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/create_rule.out
@@ -0,0 +1,30 @@
+---
+--- CREATE_RULE
+---
+CREATE RULE rule_1 AS
+ ON INSERT
+ TO datatype_table
+ DO NOTHING;
+NOTICE: DDL test: type simple, tag CREATE RULE
+CREATE RULE rule_2 AS
+ ON UPDATE
+ TO datatype_table
+ DO INSERT INTO unlogged_table (id) VALUES(NEW.id);
+NOTICE: DDL test: type simple, tag CREATE RULE
+CREATE RULE rule_3 AS
+ ON DELETE
+ TO datatype_table
+ DO ALSO NOTHING;
+NOTICE: DDL test: type simple, tag CREATE RULE
+CREATE RULE "_RETURN" AS
+ ON SELECT
+ TO like_datatype_table
+ DO INSTEAD
+ SELECT * FROM datatype_view;
+NOTICE: DDL test: type simple, tag CREATE RULE
+CREATE RULE rule_3 AS
+ ON DELETE
+ TO like_datatype_table
+ WHERE id < 100
+ DO ALSO NOTHING;
+NOTICE: DDL test: type simple, tag CREATE RULE
diff --git a/src/test/modules/test_ddl_deparse/expected/create_schema.out b/src/test/modules/test_ddl_deparse/expected/create_schema.out
new file mode 100644
index 0000000..8ab4eb0
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/create_schema.out
@@ -0,0 +1,19 @@
+--
+-- CREATE_SCHEMA
+--
+CREATE SCHEMA foo;
+NOTICE: DDL test: type simple, tag CREATE SCHEMA
+CREATE SCHEMA IF NOT EXISTS bar;
+NOTICE: DDL test: type simple, tag CREATE SCHEMA
+CREATE SCHEMA baz;
+NOTICE: DDL test: type simple, tag CREATE SCHEMA
+-- Will not be created, and will not be handled by the
+-- event trigger
+CREATE SCHEMA IF NOT EXISTS baz;
+NOTICE: schema "baz" already exists, skipping
+CREATE SCHEMA element_test
+ CREATE TABLE foo (id int)
+ CREATE VIEW bar AS SELECT * FROM foo;
+NOTICE: DDL test: type simple, tag CREATE SCHEMA
+NOTICE: DDL test: type simple, tag CREATE TABLE
+NOTICE: DDL test: type simple, tag CREATE VIEW
diff --git a/src/test/modules/test_ddl_deparse/expected/create_sequence_1.out b/src/test/modules/test_ddl_deparse/expected/create_sequence_1.out
new file mode 100644
index 0000000..5837ea4
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/create_sequence_1.out
@@ -0,0 +1,11 @@
+--
+-- CREATE_SEQUENCE
+--
+CREATE SEQUENCE fkey_table_seq
+ INCREMENT BY 1
+ MINVALUE 0
+ MAXVALUE 1000000
+ START 10
+ CACHE 10
+ CYCLE;
+NOTICE: DDL test: type simple, tag CREATE SEQUENCE
diff --git a/src/test/modules/test_ddl_deparse/expected/create_table.out b/src/test/modules/test_ddl_deparse/expected/create_table.out
new file mode 100644
index 0000000..0f2a2c1
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/create_table.out
@@ -0,0 +1,164 @@
+--
+-- CREATE_TABLE
+--
+-- Datatypes
+CREATE TABLE datatype_table (
+ id SERIAL,
+ id_big BIGSERIAL,
+ is_small SMALLSERIAL,
+ v_bytea BYTEA,
+ v_smallint SMALLINT,
+ v_int INT,
+ v_bigint BIGINT,
+ v_char CHAR(1),
+ v_varchar VARCHAR(10),
+ v_text TEXT,
+ v_bool BOOLEAN,
+ v_inet INET,
+ v_cidr CIDR,
+ v_macaddr MACADDR,
+ v_numeric NUMERIC(1,0),
+ v_real REAL,
+ v_float FLOAT(1),
+ v_float8 FLOAT8,
+ v_money MONEY,
+ v_tsquery TSQUERY,
+ v_tsvector TSVECTOR,
+ v_date DATE,
+ v_time TIME,
+ v_time_tz TIME WITH TIME ZONE,
+ v_timestamp TIMESTAMP,
+ v_timestamp_tz TIMESTAMP WITH TIME ZONE,
+ v_interval INTERVAL,
+ v_bit BIT,
+ v_bit4 BIT(4),
+ v_varbit VARBIT,
+ v_varbit4 VARBIT(4),
+ v_box BOX,
+ v_circle CIRCLE,
+ v_lseg LSEG,
+ v_path PATH,
+ v_point POINT,
+ v_polygon POLYGON,
+ v_json JSON,
+ v_xml XML,
+ v_uuid UUID,
+ v_pg_snapshot pg_snapshot,
+ v_enum ENUM_TEST,
+ v_postal_code japanese_postal_code,
+ v_int2range int2range,
+ PRIMARY KEY (id),
+ UNIQUE (id_big)
+);
+NOTICE: DDL test: type simple, tag CREATE SEQUENCE
+NOTICE: DDL test: type simple, tag CREATE SEQUENCE
+NOTICE: DDL test: type simple, tag CREATE SEQUENCE
+NOTICE: DDL test: type simple, tag CREATE TABLE
+NOTICE: DDL test: type simple, tag CREATE INDEX
+NOTICE: DDL test: type simple, tag CREATE INDEX
+NOTICE: DDL test: type simple, tag ALTER SEQUENCE
+NOTICE: DDL test: type simple, tag ALTER SEQUENCE
+NOTICE: DDL test: type simple, tag ALTER SEQUENCE
+-- Constraint definitions
+CREATE TABLE IF NOT EXISTS fkey_table (
+ id INT NOT NULL DEFAULT nextval('fkey_table_seq'::REGCLASS),
+ datatype_id INT NOT NULL REFERENCES datatype_table(id),
+ big_id BIGINT NOT NULL,
+ sometext TEXT COLLATE "POSIX",
+ check_col_1 INT NOT NULL CHECK(check_col_1 < 10),
+ check_col_2 INT NOT NULL,
+ PRIMARY KEY (id),
+ CONSTRAINT fkey_big_id
+ FOREIGN KEY (big_id)
+ REFERENCES datatype_table(id_big),
+ EXCLUDE USING btree (check_col_2 WITH =)
+);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+NOTICE: DDL test: type simple, tag CREATE INDEX
+NOTICE: DDL test: type simple, tag CREATE INDEX
+NOTICE: DDL test: type alter table, tag ALTER TABLE
+NOTICE: subcommand: ADD CONSTRAINT (and recurse)
+NOTICE: subcommand: ADD CONSTRAINT (and recurse)
+-- Typed table
+CREATE TABLE employees OF employee_type (
+ PRIMARY KEY (name),
+ salary WITH OPTIONS DEFAULT 1000
+);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+NOTICE: DDL test: type alter table, tag ALTER TABLE
+NOTICE: subcommand: SET NOT NULL
+NOTICE: DDL test: type simple, tag CREATE INDEX
+-- Inheritance
+CREATE TABLE person (
+ id INT NOT NULL PRIMARY KEY,
+ name text,
+ age int4,
+ location point
+);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+NOTICE: DDL test: type simple, tag CREATE INDEX
+CREATE TABLE emp (
+ salary int4,
+ manager name
+) INHERITS (person);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+CREATE TABLE student (
+ gpa float8
+) INHERITS (person);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+CREATE TABLE stud_emp (
+ percent int4
+) INHERITS (emp, student);
+NOTICE: merging multiple inherited definitions of column "id"
+NOTICE: merging multiple inherited definitions of column "name"
+NOTICE: merging multiple inherited definitions of column "age"
+NOTICE: merging multiple inherited definitions of column "location"
+NOTICE: DDL test: type simple, tag CREATE TABLE
+-- Storage parameters
+CREATE TABLE storage (
+ id INT
+) WITH (
+ fillfactor = 10,
+ autovacuum_enabled = FALSE
+);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+-- LIKE
+CREATE TABLE like_datatype_table (
+ LIKE datatype_table
+ EXCLUDING ALL
+);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+CREATE TABLE like_fkey_table (
+ LIKE fkey_table
+ INCLUDING DEFAULTS
+ INCLUDING INDEXES
+ INCLUDING STORAGE
+);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+NOTICE: DDL test: type alter table, tag ALTER TABLE
+NOTICE: subcommand: ALTER COLUMN SET DEFAULT (precooked)
+NOTICE: DDL test: type simple, tag CREATE INDEX
+NOTICE: DDL test: type simple, tag CREATE INDEX
+-- Volatile table types
+CREATE UNLOGGED TABLE unlogged_table (
+ id INT PRIMARY KEY
+);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+NOTICE: DDL test: type simple, tag CREATE INDEX
+CREATE TEMP TABLE temp_table (
+ id INT PRIMARY KEY
+);
+NOTICE: DDL test: type simple, tag CREATE TABLE
+NOTICE: DDL test: type simple, tag CREATE INDEX
+CREATE TEMP TABLE temp_table_commit_delete (
+ id INT PRIMARY KEY
+)
+ON COMMIT DELETE ROWS;
+NOTICE: DDL test: type simple, tag CREATE TABLE
+NOTICE: DDL test: type simple, tag CREATE INDEX
+CREATE TEMP TABLE temp_table_commit_drop (
+ id INT PRIMARY KEY
+)
+ON COMMIT DROP;
+NOTICE: DDL test: type simple, tag CREATE TABLE
+NOTICE: DDL test: type simple, tag CREATE INDEX
diff --git a/src/test/modules/test_ddl_deparse/expected/create_transform.out b/src/test/modules/test_ddl_deparse/expected/create_transform.out
new file mode 100644
index 0000000..5066051
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/create_transform.out
@@ -0,0 +1,15 @@
+--
+-- CREATE_TRANSFORM
+--
+-- Create a dummy transform
+-- The function FROM SQL should have internal as single argument as well
+-- as return type. The function TO SQL should have as single argument
+-- internal and as return argument the datatype of the transform done.
+-- We choose some random built-in functions that have the right signature.
+-- This won't actually be used, because the SQL function language
+-- doesn't implement transforms (there would be no point).
+CREATE TRANSFORM FOR int LANGUAGE SQL (
+ FROM SQL WITH FUNCTION prsd_lextype(internal),
+ TO SQL WITH FUNCTION int4recv(internal));
+NOTICE: DDL test: type simple, tag CREATE TRANSFORM
+DROP TRANSFORM FOR int LANGUAGE SQL;
diff --git a/src/test/modules/test_ddl_deparse/expected/create_trigger.out b/src/test/modules/test_ddl_deparse/expected/create_trigger.out
new file mode 100644
index 0000000..c89c847
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/create_trigger.out
@@ -0,0 +1,18 @@
+---
+--- CREATE_TRIGGER
+---
+CREATE FUNCTION plpgsql_function_trigger_1()
+ RETURNS TRIGGER
+ LANGUAGE plpgsql
+AS $$
+BEGIN
+ RETURN NEW;
+END;
+$$;
+NOTICE: DDL test: type simple, tag CREATE FUNCTION
+CREATE TRIGGER trigger_1
+ BEFORE INSERT OR UPDATE
+ ON datatype_table
+ FOR EACH ROW
+ EXECUTE PROCEDURE plpgsql_function_trigger_1();
+NOTICE: DDL test: type simple, tag CREATE TRIGGER
diff --git a/src/test/modules/test_ddl_deparse/expected/create_type.out b/src/test/modules/test_ddl_deparse/expected/create_type.out
new file mode 100644
index 0000000..dadbc8f
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/create_type.out
@@ -0,0 +1,24 @@
+---
+--- CREATE_TYPE
+---
+CREATE FUNCTION text_w_default_in(cstring)
+ RETURNS text_w_default
+ AS 'textin'
+ LANGUAGE internal STABLE STRICT;
+NOTICE: type "text_w_default" is not yet defined
+DETAIL: Creating a shell type definition.
+NOTICE: DDL test: type simple, tag CREATE FUNCTION
+CREATE FUNCTION text_w_default_out(text_w_default)
+ RETURNS cstring
+ AS 'textout'
+ LANGUAGE internal STABLE STRICT ;
+NOTICE: argument type text_w_default is only a shell
+NOTICE: DDL test: type simple, tag CREATE FUNCTION
+CREATE TYPE employee_type AS (name TEXT, salary NUMERIC);
+NOTICE: DDL test: type simple, tag CREATE TYPE
+CREATE TYPE enum_test AS ENUM ('foo', 'bar', 'baz');
+NOTICE: DDL test: type simple, tag CREATE TYPE
+CREATE TYPE int2range AS RANGE (
+ SUBTYPE = int2
+);
+NOTICE: DDL test: type simple, tag CREATE TYPE
diff --git a/src/test/modules/test_ddl_deparse/expected/create_view.out b/src/test/modules/test_ddl_deparse/expected/create_view.out
new file mode 100644
index 0000000..2ae4e2d
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/create_view.out
@@ -0,0 +1,19 @@
+--
+-- CREATE_VIEW
+--
+CREATE VIEW static_view AS
+ SELECT 'foo'::TEXT AS col;
+NOTICE: DDL test: type simple, tag CREATE VIEW
+CREATE OR REPLACE VIEW static_view AS
+ SELECT 'bar'::TEXT AS col;
+NOTICE: DDL test: type simple, tag CREATE VIEW
+NOTICE: DDL test: type alter table, tag CREATE VIEW
+NOTICE: subcommand: REPLACE RELOPTIONS
+CREATE VIEW datatype_view AS
+ SELECT * FROM datatype_table;
+NOTICE: DDL test: type simple, tag CREATE VIEW
+CREATE RECURSIVE VIEW nums_1_100 (n) AS
+ VALUES (1)
+UNION ALL
+ SELECT n+1 FROM nums_1_100 WHERE n < 100;
+NOTICE: DDL test: type simple, tag CREATE VIEW
diff --git a/src/test/modules/test_ddl_deparse/expected/defprivs.out b/src/test/modules/test_ddl_deparse/expected/defprivs.out
new file mode 100644
index 0000000..66b2680
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/defprivs.out
@@ -0,0 +1,6 @@
+--
+-- ALTER DEFAULT PRIVILEGES
+--
+ALTER DEFAULT PRIVILEGES IN SCHEMA public
+ REVOKE ALL PRIVILEGES ON TABLES FROM public;
+NOTICE: DDL test: type alter default privileges, tag ALTER DEFAULT PRIVILEGES
diff --git a/src/test/modules/test_ddl_deparse/expected/matviews.out b/src/test/modules/test_ddl_deparse/expected/matviews.out
new file mode 100644
index 0000000..69a5627
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/matviews.out
@@ -0,0 +1,8 @@
+--
+-- Materialized views
+--
+CREATE MATERIALIZED VIEW ddl_deparse_mv AS
+ SELECT * FROM datatype_table LIMIT 1 WITH NO DATA;
+NOTICE: DDL test: type simple, tag CREATE MATERIALIZED VIEW
+REFRESH MATERIALIZED VIEW ddl_deparse_mv;
+NOTICE: DDL test: type simple, tag REFRESH MATERIALIZED VIEW
diff --git a/src/test/modules/test_ddl_deparse/expected/opfamily.out b/src/test/modules/test_ddl_deparse/expected/opfamily.out
new file mode 100644
index 0000000..14bd603
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/opfamily.out
@@ -0,0 +1,67 @@
+-- copied from equivclass.sql
+create type int8alias1;
+NOTICE: DDL test: type simple, tag CREATE TYPE
+create function int8alias1in(cstring) returns int8alias1
+ strict immutable language internal as 'int8in';
+NOTICE: return type int8alias1 is only a shell
+NOTICE: DDL test: type simple, tag CREATE FUNCTION
+create function int8alias1out(int8alias1) returns cstring
+ strict immutable language internal as 'int8out';
+NOTICE: argument type int8alias1 is only a shell
+NOTICE: DDL test: type simple, tag CREATE FUNCTION
+create type int8alias1 (
+ input = int8alias1in,
+ output = int8alias1out,
+ like = int8
+);
+NOTICE: DDL test: type simple, tag CREATE TYPE
+create type int8alias2;
+NOTICE: DDL test: type simple, tag CREATE TYPE
+create function int8alias2in(cstring) returns int8alias2
+ strict immutable language internal as 'int8in';
+NOTICE: return type int8alias2 is only a shell
+NOTICE: DDL test: type simple, tag CREATE FUNCTION
+create function int8alias2out(int8alias2) returns cstring
+ strict immutable language internal as 'int8out';
+NOTICE: argument type int8alias2 is only a shell
+NOTICE: DDL test: type simple, tag CREATE FUNCTION
+create type int8alias2 (
+ input = int8alias2in,
+ output = int8alias2out,
+ like = int8
+);
+NOTICE: DDL test: type simple, tag CREATE TYPE
+create cast (int8 as int8alias1) without function;
+NOTICE: DDL test: type simple, tag CREATE CAST
+create cast (int8 as int8alias2) without function;
+NOTICE: DDL test: type simple, tag CREATE CAST
+create cast (int8alias1 as int8) without function;
+NOTICE: DDL test: type simple, tag CREATE CAST
+create cast (int8alias2 as int8) without function;
+NOTICE: DDL test: type simple, tag CREATE CAST
+create function int8alias1eq(int8alias1, int8alias1) returns bool
+ strict immutable language internal as 'int8eq';
+NOTICE: DDL test: type simple, tag CREATE FUNCTION
+create operator = (
+ procedure = int8alias1eq,
+ leftarg = int8alias1, rightarg = int8alias1,
+ commutator = =,
+ restrict = eqsel, join = eqjoinsel,
+ merges
+);
+NOTICE: DDL test: type simple, tag CREATE OPERATOR
+alter operator family integer_ops using btree add
+ operator 3 = (int8alias1, int8alias1);
+NOTICE: DDL test: type alter operator family, tag ALTER OPERATOR FAMILY
+-- copied from alter_table.sql
+create type ctype as (f1 int, f2 text);
+NOTICE: DDL test: type simple, tag CREATE TYPE
+create function same(ctype, ctype) returns boolean language sql
+as 'select $1.f1 is not distinct from $2.f1 and $1.f2 is not distinct from $2.f2';
+NOTICE: DDL test: type simple, tag CREATE FUNCTION
+create operator =(procedure = same, leftarg = ctype, rightarg = ctype);
+NOTICE: DDL test: type simple, tag CREATE OPERATOR
+create operator class ctype_hash_ops
+ default for type ctype using hash as
+ operator 1 =(ctype, ctype);
+NOTICE: DDL test: type create operator class, tag CREATE OPERATOR CLASS
diff --git a/src/test/modules/test_ddl_deparse/expected/test_ddl_deparse.out b/src/test/modules/test_ddl_deparse/expected/test_ddl_deparse.out
new file mode 100644
index 0000000..4a5ea9e
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/expected/test_ddl_deparse.out
@@ -0,0 +1,40 @@
+CREATE EXTENSION test_ddl_deparse;
+CREATE OR REPLACE FUNCTION test_ddl_deparse()
+ RETURNS event_trigger LANGUAGE plpgsql AS
+$$
+DECLARE
+ r record;
+ r2 record;
+ cmdtype text;
+ objtype text;
+ tag text;
+BEGIN
+ FOR r IN SELECT * FROM pg_event_trigger_ddl_commands()
+ LOOP
+ -- verify that tags match
+ tag = public.get_command_tag(r.command);
+ IF tag <> r.command_tag THEN
+ RAISE NOTICE 'tag % doesn''t match %', tag, r.command_tag;
+ END IF;
+
+ -- log the operation
+ cmdtype = public.get_command_type(r.command);
+ IF cmdtype <> 'grant' THEN
+ RAISE NOTICE 'DDL test: type %, tag %', cmdtype, tag;
+ ELSE
+ RAISE NOTICE 'DDL test: type %, object type %', cmdtype, r.object_type;
+ END IF;
+
+ -- if alter table, log more
+ IF cmdtype = 'alter table' THEN
+ FOR r2 IN SELECT *
+ FROM unnest(public.get_altertable_subcmdtypes(r.command))
+ LOOP
+ RAISE NOTICE ' subcommand: %', r2.unnest;
+ END LOOP;
+ END IF;
+ END LOOP;
+END;
+$$;
+CREATE EVENT TRIGGER test_ddl_deparse
+ON ddl_command_end EXECUTE PROCEDURE test_ddl_deparse();
diff --git a/src/test/modules/test_ddl_deparse/sql/alter_function.sql b/src/test/modules/test_ddl_deparse/sql/alter_function.sql
new file mode 100644
index 0000000..45c8d1e
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/alter_function.sql
@@ -0,0 +1,17 @@
+--
+-- ALTER_FUNCTION
+--
+
+ALTER FUNCTION plpgsql_function_trigger_1 ()
+ SET SCHEMA foo;
+
+ALTER FUNCTION foo.plpgsql_function_trigger_1()
+ COST 10;
+
+CREATE ROLE regress_alter_function_role;
+
+ALTER FUNCTION plpgsql_function_trigger_2()
+ OWNER TO regress_alter_function_role;
+
+DROP OWNED BY regress_alter_function_role;
+DROP ROLE regress_alter_function_role;
diff --git a/src/test/modules/test_ddl_deparse/sql/alter_sequence.sql b/src/test/modules/test_ddl_deparse/sql/alter_sequence.sql
new file mode 100644
index 0000000..9b2799f
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/alter_sequence.sql
@@ -0,0 +1,15 @@
+--
+-- ALTER_SEQUENCE
+--
+
+ALTER SEQUENCE fkey_table_seq
+ MINVALUE 10
+ START 20
+ CACHE 1
+ NO CYCLE;
+
+ALTER SEQUENCE fkey_table_seq
+ RENAME TO fkey_table_seq_renamed;
+
+ALTER SEQUENCE fkey_table_seq_renamed
+ SET SCHEMA foo;
diff --git a/src/test/modules/test_ddl_deparse/sql/alter_table.sql b/src/test/modules/test_ddl_deparse/sql/alter_table.sql
new file mode 100644
index 0000000..dec53a0
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/alter_table.sql
@@ -0,0 +1,21 @@
+CREATE TABLE parent (
+ a int
+);
+
+CREATE TABLE child () INHERITS (parent);
+
+CREATE TABLE grandchild () INHERITS (child);
+
+ALTER TABLE parent ADD COLUMN b serial;
+
+ALTER TABLE parent RENAME COLUMN b TO c;
+
+ALTER TABLE parent ADD CONSTRAINT a_pos CHECK (a > 0);
+
+CREATE TABLE part (
+ a int
+) PARTITION BY RANGE (a);
+
+CREATE TABLE part1 PARTITION OF part FOR VALUES FROM (1) to (100);
+
+ALTER TABLE part ADD PRIMARY KEY (a);
diff --git a/src/test/modules/test_ddl_deparse/sql/alter_ts_config.sql b/src/test/modules/test_ddl_deparse/sql/alter_ts_config.sql
new file mode 100644
index 0000000..ac13e21
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/alter_ts_config.sql
@@ -0,0 +1,8 @@
+--
+-- ALTER TEXT SEARCH CONFIGURATION
+--
+
+CREATE TEXT SEARCH CONFIGURATION en (copy=english);
+
+ALTER TEXT SEARCH CONFIGURATION en
+ ALTER MAPPING FOR host, email, url, sfloat WITH simple;
diff --git a/src/test/modules/test_ddl_deparse/sql/alter_type_enum.sql b/src/test/modules/test_ddl_deparse/sql/alter_type_enum.sql
new file mode 100644
index 0000000..8999b38
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/alter_type_enum.sql
@@ -0,0 +1,6 @@
+---
+--- ALTER_TYPE_ENUM
+---
+
+ALTER TYPE enum_test ADD VALUE 'zzz' AFTER 'baz';
+ALTER TYPE enum_test ADD VALUE 'aaa' BEFORE 'foo';
diff --git a/src/test/modules/test_ddl_deparse/sql/comment_on.sql b/src/test/modules/test_ddl_deparse/sql/comment_on.sql
new file mode 100644
index 0000000..fc29a73
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/comment_on.sql
@@ -0,0 +1,14 @@
+--
+-- COMMENT_ON
+--
+
+COMMENT ON SCHEMA foo IS 'This is schema foo';
+COMMENT ON TYPE enum_test IS 'ENUM test';
+COMMENT ON TYPE int2range IS 'RANGE test';
+COMMENT ON DOMAIN japanese_postal_code IS 'DOMAIN test';
+COMMENT ON SEQUENCE fkey_table_seq IS 'SEQUENCE test';
+COMMENT ON TABLE datatype_table IS 'This table should contain all native datatypes';
+COMMENT ON VIEW datatype_view IS 'This is a view';
+COMMENT ON FUNCTION c_function_test() IS 'FUNCTION test';
+COMMENT ON TRIGGER trigger_1 ON datatype_table IS 'TRIGGER test';
+COMMENT ON RULE rule_1 ON datatype_table IS 'RULE test';
diff --git a/src/test/modules/test_ddl_deparse/sql/create_conversion.sql b/src/test/modules/test_ddl_deparse/sql/create_conversion.sql
new file mode 100644
index 0000000..813c66d
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/create_conversion.sql
@@ -0,0 +1,6 @@
+---
+--- CREATE_CONVERSION
+---
+
+-- Simple test should suffice for this
+CREATE CONVERSION myconv FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8;
diff --git a/src/test/modules/test_ddl_deparse/sql/create_domain.sql b/src/test/modules/test_ddl_deparse/sql/create_domain.sql
new file mode 100644
index 0000000..6ab5525
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/create_domain.sql
@@ -0,0 +1,10 @@
+---
+--- CREATE_DOMAIN
+---
+CREATE DOMAIN domainvarchar VARCHAR(5);
+
+CREATE DOMAIN japanese_postal_code AS TEXT
+CHECK(
+ VALUE ~ '^\d{3}$'
+OR VALUE ~ '^\d{3}-\d{4}$'
+);
diff --git a/src/test/modules/test_ddl_deparse/sql/create_extension.sql b/src/test/modules/test_ddl_deparse/sql/create_extension.sql
new file mode 100644
index 0000000..d23e7fd
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/create_extension.sql
@@ -0,0 +1,5 @@
+---
+--- CREATE_EXTENSION
+---
+
+CREATE EXTENSION pg_stat_statements;
diff --git a/src/test/modules/test_ddl_deparse/sql/create_rule.sql b/src/test/modules/test_ddl_deparse/sql/create_rule.sql
new file mode 100644
index 0000000..60ac151
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/create_rule.sql
@@ -0,0 +1,31 @@
+---
+--- CREATE_RULE
+---
+
+
+CREATE RULE rule_1 AS
+ ON INSERT
+ TO datatype_table
+ DO NOTHING;
+
+CREATE RULE rule_2 AS
+ ON UPDATE
+ TO datatype_table
+ DO INSERT INTO unlogged_table (id) VALUES(NEW.id);
+
+CREATE RULE rule_3 AS
+ ON DELETE
+ TO datatype_table
+ DO ALSO NOTHING;
+
+CREATE RULE "_RETURN" AS
+ ON SELECT
+ TO like_datatype_table
+ DO INSTEAD
+ SELECT * FROM datatype_view;
+
+CREATE RULE rule_3 AS
+ ON DELETE
+ TO like_datatype_table
+ WHERE id < 100
+ DO ALSO NOTHING;
diff --git a/src/test/modules/test_ddl_deparse/sql/create_schema.sql b/src/test/modules/test_ddl_deparse/sql/create_schema.sql
new file mode 100644
index 0000000..f314dc2
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/create_schema.sql
@@ -0,0 +1,17 @@
+--
+-- CREATE_SCHEMA
+--
+
+CREATE SCHEMA foo;
+
+CREATE SCHEMA IF NOT EXISTS bar;
+
+CREATE SCHEMA baz;
+
+-- Will not be created, and will not be handled by the
+-- event trigger
+CREATE SCHEMA IF NOT EXISTS baz;
+
+CREATE SCHEMA element_test
+ CREATE TABLE foo (id int)
+ CREATE VIEW bar AS SELECT * FROM foo;
diff --git a/src/test/modules/test_ddl_deparse/sql/create_sequence_1.sql b/src/test/modules/test_ddl_deparse/sql/create_sequence_1.sql
new file mode 100644
index 0000000..9e6743f
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/create_sequence_1.sql
@@ -0,0 +1,11 @@
+--
+-- CREATE_SEQUENCE
+--
+
+CREATE SEQUENCE fkey_table_seq
+ INCREMENT BY 1
+ MINVALUE 0
+ MAXVALUE 1000000
+ START 10
+ CACHE 10
+ CYCLE;
diff --git a/src/test/modules/test_ddl_deparse/sql/create_table.sql b/src/test/modules/test_ddl_deparse/sql/create_table.sql
new file mode 100644
index 0000000..39cdb9d
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/create_table.sql
@@ -0,0 +1,142 @@
+--
+-- CREATE_TABLE
+--
+
+-- Datatypes
+CREATE TABLE datatype_table (
+ id SERIAL,
+ id_big BIGSERIAL,
+ is_small SMALLSERIAL,
+ v_bytea BYTEA,
+ v_smallint SMALLINT,
+ v_int INT,
+ v_bigint BIGINT,
+ v_char CHAR(1),
+ v_varchar VARCHAR(10),
+ v_text TEXT,
+ v_bool BOOLEAN,
+ v_inet INET,
+ v_cidr CIDR,
+ v_macaddr MACADDR,
+ v_numeric NUMERIC(1,0),
+ v_real REAL,
+ v_float FLOAT(1),
+ v_float8 FLOAT8,
+ v_money MONEY,
+ v_tsquery TSQUERY,
+ v_tsvector TSVECTOR,
+ v_date DATE,
+ v_time TIME,
+ v_time_tz TIME WITH TIME ZONE,
+ v_timestamp TIMESTAMP,
+ v_timestamp_tz TIMESTAMP WITH TIME ZONE,
+ v_interval INTERVAL,
+ v_bit BIT,
+ v_bit4 BIT(4),
+ v_varbit VARBIT,
+ v_varbit4 VARBIT(4),
+ v_box BOX,
+ v_circle CIRCLE,
+ v_lseg LSEG,
+ v_path PATH,
+ v_point POINT,
+ v_polygon POLYGON,
+ v_json JSON,
+ v_xml XML,
+ v_uuid UUID,
+ v_pg_snapshot pg_snapshot,
+ v_enum ENUM_TEST,
+ v_postal_code japanese_postal_code,
+ v_int2range int2range,
+ PRIMARY KEY (id),
+ UNIQUE (id_big)
+);
+
+-- Constraint definitions
+
+CREATE TABLE IF NOT EXISTS fkey_table (
+ id INT NOT NULL DEFAULT nextval('fkey_table_seq'::REGCLASS),
+ datatype_id INT NOT NULL REFERENCES datatype_table(id),
+ big_id BIGINT NOT NULL,
+ sometext TEXT COLLATE "POSIX",
+ check_col_1 INT NOT NULL CHECK(check_col_1 < 10),
+ check_col_2 INT NOT NULL,
+ PRIMARY KEY (id),
+ CONSTRAINT fkey_big_id
+ FOREIGN KEY (big_id)
+ REFERENCES datatype_table(id_big),
+ EXCLUDE USING btree (check_col_2 WITH =)
+);
+
+-- Typed table
+
+CREATE TABLE employees OF employee_type (
+ PRIMARY KEY (name),
+ salary WITH OPTIONS DEFAULT 1000
+);
+
+-- Inheritance
+CREATE TABLE person (
+ id INT NOT NULL PRIMARY KEY,
+ name text,
+ age int4,
+ location point
+);
+
+CREATE TABLE emp (
+ salary int4,
+ manager name
+) INHERITS (person);
+
+
+CREATE TABLE student (
+ gpa float8
+) INHERITS (person);
+
+CREATE TABLE stud_emp (
+ percent int4
+) INHERITS (emp, student);
+
+
+-- Storage parameters
+
+CREATE TABLE storage (
+ id INT
+) WITH (
+ fillfactor = 10,
+ autovacuum_enabled = FALSE
+);
+
+-- LIKE
+
+CREATE TABLE like_datatype_table (
+ LIKE datatype_table
+ EXCLUDING ALL
+);
+
+CREATE TABLE like_fkey_table (
+ LIKE fkey_table
+ INCLUDING DEFAULTS
+ INCLUDING INDEXES
+ INCLUDING STORAGE
+);
+
+
+-- Volatile table types
+CREATE UNLOGGED TABLE unlogged_table (
+ id INT PRIMARY KEY
+);
+
+CREATE TEMP TABLE temp_table (
+ id INT PRIMARY KEY
+);
+
+CREATE TEMP TABLE temp_table_commit_delete (
+ id INT PRIMARY KEY
+)
+ON COMMIT DELETE ROWS;
+
+CREATE TEMP TABLE temp_table_commit_drop (
+ id INT PRIMARY KEY
+)
+ON COMMIT DROP;
diff --git a/src/test/modules/test_ddl_deparse/sql/create_transform.sql b/src/test/modules/test_ddl_deparse/sql/create_transform.sql
new file mode 100644
index 0000000..970d89e
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/create_transform.sql
@@ -0,0 +1,16 @@
+--
+-- CREATE_TRANSFORM
+--
+
+-- Create a dummy transform
+-- The function FROM SQL should have internal as single argument as well
+-- as return type. The function TO SQL should have as single argument
+-- internal and as return argument the datatype of the transform done.
+-- We choose some random built-in functions that have the right signature.
+-- This won't actually be used, because the SQL function language
+-- doesn't implement transforms (there would be no point).
+CREATE TRANSFORM FOR int LANGUAGE SQL (
+ FROM SQL WITH FUNCTION prsd_lextype(internal),
+ TO SQL WITH FUNCTION int4recv(internal));
+
+DROP TRANSFORM FOR int LANGUAGE SQL;
diff --git a/src/test/modules/test_ddl_deparse/sql/create_trigger.sql b/src/test/modules/test_ddl_deparse/sql/create_trigger.sql
new file mode 100644
index 0000000..fc0aef7
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/create_trigger.sql
@@ -0,0 +1,18 @@
+---
+--- CREATE_TRIGGER
+---
+
+CREATE FUNCTION plpgsql_function_trigger_1()
+ RETURNS TRIGGER
+ LANGUAGE plpgsql
+AS $$
+BEGIN
+ RETURN NEW;
+END;
+$$;
+
+CREATE TRIGGER trigger_1
+ BEFORE INSERT OR UPDATE
+ ON datatype_table
+ FOR EACH ROW
+ EXECUTE PROCEDURE plpgsql_function_trigger_1();
diff --git a/src/test/modules/test_ddl_deparse/sql/create_type.sql b/src/test/modules/test_ddl_deparse/sql/create_type.sql
new file mode 100644
index 0000000..a387cfd
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/create_type.sql
@@ -0,0 +1,21 @@
+---
+--- CREATE_TYPE
+---
+
+CREATE FUNCTION text_w_default_in(cstring)
+ RETURNS text_w_default
+ AS 'textin'
+ LANGUAGE internal STABLE STRICT;
+
+CREATE FUNCTION text_w_default_out(text_w_default)
+ RETURNS cstring
+ AS 'textout'
+ LANGUAGE internal STABLE STRICT ;
+
+CREATE TYPE employee_type AS (name TEXT, salary NUMERIC);
+
+CREATE TYPE enum_test AS ENUM ('foo', 'bar', 'baz');
+
+CREATE TYPE int2range AS RANGE (
+ SUBTYPE = int2
+);
diff --git a/src/test/modules/test_ddl_deparse/sql/create_view.sql b/src/test/modules/test_ddl_deparse/sql/create_view.sql
new file mode 100644
index 0000000..030b76f
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/create_view.sql
@@ -0,0 +1,17 @@
+--
+-- CREATE_VIEW
+--
+
+CREATE VIEW static_view AS
+ SELECT 'foo'::TEXT AS col;
+
+CREATE OR REPLACE VIEW static_view AS
+ SELECT 'bar'::TEXT AS col;
+
+CREATE VIEW datatype_view AS
+ SELECT * FROM datatype_table;
+
+CREATE RECURSIVE VIEW nums_1_100 (n) AS
+ VALUES (1)
+UNION ALL
+ SELECT n+1 FROM nums_1_100 WHERE n < 100;
diff --git a/src/test/modules/test_ddl_deparse/sql/defprivs.sql b/src/test/modules/test_ddl_deparse/sql/defprivs.sql
new file mode 100644
index 0000000..a0fb4c2
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/defprivs.sql
@@ -0,0 +1,6 @@
+--
+-- ALTER DEFAULT PRIVILEGES
+--
+
+ALTER DEFAULT PRIVILEGES IN SCHEMA public
+ REVOKE ALL PRIVILEGES ON TABLES FROM public;
diff --git a/src/test/modules/test_ddl_deparse/sql/matviews.sql b/src/test/modules/test_ddl_deparse/sql/matviews.sql
new file mode 100644
index 0000000..6e22c52
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/matviews.sql
@@ -0,0 +1,8 @@
+--
+-- Materialized views
+--
+
+CREATE MATERIALIZED VIEW ddl_deparse_mv AS
+ SELECT * FROM datatype_table LIMIT 1 WITH NO DATA;
+
+REFRESH MATERIALIZED VIEW ddl_deparse_mv;
diff --git a/src/test/modules/test_ddl_deparse/sql/opfamily.sql b/src/test/modules/test_ddl_deparse/sql/opfamily.sql
new file mode 100644
index 0000000..b2bacbb
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/opfamily.sql
@@ -0,0 +1,52 @@
+-- copied from equivclass.sql
+create type int8alias1;
+create function int8alias1in(cstring) returns int8alias1
+ strict immutable language internal as 'int8in';
+create function int8alias1out(int8alias1) returns cstring
+ strict immutable language internal as 'int8out';
+create type int8alias1 (
+ input = int8alias1in,
+ output = int8alias1out,
+ like = int8
+);
+
+create type int8alias2;
+create function int8alias2in(cstring) returns int8alias2
+ strict immutable language internal as 'int8in';
+create function int8alias2out(int8alias2) returns cstring
+ strict immutable language internal as 'int8out';
+create type int8alias2 (
+ input = int8alias2in,
+ output = int8alias2out,
+ like = int8
+);
+
+create cast (int8 as int8alias1) without function;
+create cast (int8 as int8alias2) without function;
+create cast (int8alias1 as int8) without function;
+create cast (int8alias2 as int8) without function;
+
+create function int8alias1eq(int8alias1, int8alias1) returns bool
+ strict immutable language internal as 'int8eq';
+create operator = (
+ procedure = int8alias1eq,
+ leftarg = int8alias1, rightarg = int8alias1,
+ commutator = =,
+ restrict = eqsel, join = eqjoinsel,
+ merges
+);
+alter operator family integer_ops using btree add
+ operator 3 = (int8alias1, int8alias1);
+
+
+-- copied from alter_table.sql
+create type ctype as (f1 int, f2 text);
+
+create function same(ctype, ctype) returns boolean language sql
+as 'select $1.f1 is not distinct from $2.f1 and $1.f2 is not distinct from $2.f2';
+
+create operator =(procedure = same, leftarg = ctype, rightarg = ctype);
+
+create operator class ctype_hash_ops
+ default for type ctype using hash as
+ operator 1 =(ctype, ctype);
diff --git a/src/test/modules/test_ddl_deparse/sql/test_ddl_deparse.sql b/src/test/modules/test_ddl_deparse/sql/test_ddl_deparse.sql
new file mode 100644
index 0000000..e257a21
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/sql/test_ddl_deparse.sql
@@ -0,0 +1,42 @@
+CREATE EXTENSION test_ddl_deparse;
+
+CREATE OR REPLACE FUNCTION test_ddl_deparse()
+ RETURNS event_trigger LANGUAGE plpgsql AS
+$$
+DECLARE
+ r record;
+ r2 record;
+ cmdtype text;
+ objtype text;
+ tag text;
+BEGIN
+ FOR r IN SELECT * FROM pg_event_trigger_ddl_commands()
+ LOOP
+ -- verify that tags match
+ tag = public.get_command_tag(r.command);
+ IF tag <> r.command_tag THEN
+ RAISE NOTICE 'tag % doesn''t match %', tag, r.command_tag;
+ END IF;
+
+ -- log the operation
+ cmdtype = public.get_command_type(r.command);
+ IF cmdtype <> 'grant' THEN
+ RAISE NOTICE 'DDL test: type %, tag %', cmdtype, tag;
+ ELSE
+ RAISE NOTICE 'DDL test: type %, object type %', cmdtype, r.object_type;
+ END IF;
+
+ -- if alter table, log more
+ IF cmdtype = 'alter table' THEN
+ FOR r2 IN SELECT *
+ FROM unnest(public.get_altertable_subcmdtypes(r.command))
+ LOOP
+ RAISE NOTICE ' subcommand: %', r2.unnest;
+ END LOOP;
+ END IF;
+ END LOOP;
+END;
+$$;
+
+CREATE EVENT TRIGGER test_ddl_deparse
+ON ddl_command_end EXECUTE PROCEDURE test_ddl_deparse();
diff --git a/src/test/modules/test_ddl_deparse/test_ddl_deparse--1.0.sql b/src/test/modules/test_ddl_deparse/test_ddl_deparse--1.0.sql
new file mode 100644
index 0000000..093005a
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/test_ddl_deparse--1.0.sql
@@ -0,0 +1,16 @@
+/* src/test/modules/test_ddl_deparse/test_ddl_deparse--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_ddl_deparse" to load this file. \quit
+
+CREATE FUNCTION get_command_type(pg_ddl_command)
+ RETURNS text IMMUTABLE STRICT
+ AS 'MODULE_PATHNAME' LANGUAGE C;
+
+CREATE FUNCTION get_command_tag(pg_ddl_command)
+ RETURNS text IMMUTABLE STRICT
+ AS 'MODULE_PATHNAME' LANGUAGE C;
+
+CREATE FUNCTION get_altertable_subcmdtypes(pg_ddl_command)
+ RETURNS text[] IMMUTABLE STRICT
+ AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c
new file mode 100644
index 0000000..def4e39
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c
@@ -0,0 +1,296 @@
+/*----------------------------------------------------------------------
+ * test_ddl_deparse.c
+ * Support functions for the test_ddl_deparse module
+ *
+ * Copyright (c) 2014-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_ddl_deparse/test_ddl_deparse.c
+ *----------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "catalog/pg_type.h"
+#include "tcop/deparse_utility.h"
+#include "tcop/utility.h"
+#include "utils/builtins.h"
+
+PG_MODULE_MAGIC;
+
+PG_FUNCTION_INFO_V1(get_command_type);
+PG_FUNCTION_INFO_V1(get_command_tag);
+PG_FUNCTION_INFO_V1(get_altertable_subcmdtypes);
+
+/*
+ * Return the textual representation of the struct type used to represent a
+ * command in struct CollectedCommand format.
+ */
+Datum
+get_command_type(PG_FUNCTION_ARGS)
+{
+ CollectedCommand *cmd = (CollectedCommand *) PG_GETARG_POINTER(0);
+ const char *type;
+
+ switch (cmd->type)
+ {
+ case SCT_Simple:
+ type = "simple";
+ break;
+ case SCT_AlterTable:
+ type = "alter table";
+ break;
+ case SCT_Grant:
+ type = "grant";
+ break;
+ case SCT_AlterOpFamily:
+ type = "alter operator family";
+ break;
+ case SCT_AlterDefaultPrivileges:
+ type = "alter default privileges";
+ break;
+ case SCT_CreateOpClass:
+ type = "create operator class";
+ break;
+ case SCT_AlterTSConfig:
+ type = "alter text search configuration";
+ break;
+ default:
+ type = "unknown command type";
+ break;
+ }
+
+ PG_RETURN_TEXT_P(cstring_to_text(type));
+}
+
+/*
+ * Return the command tag corresponding to a parse node contained in a
+ * CollectedCommand struct.
+ */
+Datum
+get_command_tag(PG_FUNCTION_ARGS)
+{
+ CollectedCommand *cmd = (CollectedCommand *) PG_GETARG_POINTER(0);
+
+ if (!cmd->parsetree)
+ PG_RETURN_NULL();
+
+ PG_RETURN_TEXT_P(cstring_to_text(CreateCommandName(cmd->parsetree)));
+}
+
+/*
+ * Return a text array representation of the subcommands of an ALTER TABLE
+ * command.
+ */
+Datum
+get_altertable_subcmdtypes(PG_FUNCTION_ARGS)
+{
+ CollectedCommand *cmd = (CollectedCommand *) PG_GETARG_POINTER(0);
+ ArrayBuildState *astate = NULL;
+ ListCell *cell;
+
+ if (cmd->type != SCT_AlterTable)
+ elog(ERROR, "command is not ALTER TABLE");
+
+ foreach(cell, cmd->d.alterTable.subcmds)
+ {
+ CollectedATSubcmd *sub = lfirst(cell);
+ AlterTableCmd *subcmd = castNode(AlterTableCmd, sub->parsetree);
+ const char *strtype;
+
+ switch (subcmd->subtype)
+ {
+ case AT_AddColumn:
+ strtype = "ADD COLUMN";
+ break;
+ case AT_AddColumnRecurse:
+ strtype = "ADD COLUMN (and recurse)";
+ break;
+ case AT_AddColumnToView:
+ strtype = "ADD COLUMN TO VIEW";
+ break;
+ case AT_ColumnDefault:
+ strtype = "ALTER COLUMN SET DEFAULT";
+ break;
+ case AT_CookedColumnDefault:
+ strtype = "ALTER COLUMN SET DEFAULT (precooked)";
+ break;
+ case AT_DropNotNull:
+ strtype = "DROP NOT NULL";
+ break;
+ case AT_SetNotNull:
+ strtype = "SET NOT NULL";
+ break;
+ case AT_CheckNotNull:
+ strtype = "CHECK NOT NULL";
+ break;
+ case AT_SetStatistics:
+ strtype = "SET STATS";
+ break;
+ case AT_SetOptions:
+ strtype = "SET OPTIONS";
+ break;
+ case AT_ResetOptions:
+ strtype = "RESET OPTIONS";
+ break;
+ case AT_SetStorage:
+ strtype = "SET STORAGE";
+ break;
+ case AT_DropColumn:
+ strtype = "DROP COLUMN";
+ break;
+ case AT_DropColumnRecurse:
+ strtype = "DROP COLUMN (and recurse)";
+ break;
+ case AT_AddIndex:
+ strtype = "ADD INDEX";
+ break;
+ case AT_ReAddIndex:
+ strtype = "(re) ADD INDEX";
+ break;
+ case AT_AddConstraint:
+ strtype = "ADD CONSTRAINT";
+ break;
+ case AT_AddConstraintRecurse:
+ strtype = "ADD CONSTRAINT (and recurse)";
+ break;
+ case AT_ReAddConstraint:
+ strtype = "(re) ADD CONSTRAINT";
+ break;
+ case AT_AlterConstraint:
+ strtype = "ALTER CONSTRAINT";
+ break;
+ case AT_ValidateConstraint:
+ strtype = "VALIDATE CONSTRAINT";
+ break;
+ case AT_ValidateConstraintRecurse:
+ strtype = "VALIDATE CONSTRAINT (and recurse)";
+ break;
+ case AT_AddIndexConstraint:
+ strtype = "ADD CONSTRAINT (using index)";
+ break;
+ case AT_DropConstraint:
+ strtype = "DROP CONSTRAINT";
+ break;
+ case AT_DropConstraintRecurse:
+ strtype = "DROP CONSTRAINT (and recurse)";
+ break;
+ case AT_ReAddComment:
+ strtype = "(re) ADD COMMENT";
+ break;
+ case AT_AlterColumnType:
+ strtype = "ALTER COLUMN SET TYPE";
+ break;
+ case AT_AlterColumnGenericOptions:
+ strtype = "ALTER COLUMN SET OPTIONS";
+ break;
+ case AT_ChangeOwner:
+ strtype = "CHANGE OWNER";
+ break;
+ case AT_ClusterOn:
+ strtype = "CLUSTER";
+ break;
+ case AT_DropCluster:
+ strtype = "DROP CLUSTER";
+ break;
+ case AT_SetLogged:
+ strtype = "SET LOGGED";
+ break;
+ case AT_SetUnLogged:
+ strtype = "SET UNLOGGED";
+ break;
+ case AT_DropOids:
+ strtype = "DROP OIDS";
+ break;
+ case AT_SetTableSpace:
+ strtype = "SET TABLESPACE";
+ break;
+ case AT_SetRelOptions:
+ strtype = "SET RELOPTIONS";
+ break;
+ case AT_ResetRelOptions:
+ strtype = "RESET RELOPTIONS";
+ break;
+ case AT_ReplaceRelOptions:
+ strtype = "REPLACE RELOPTIONS";
+ break;
+ case AT_EnableTrig:
+ strtype = "ENABLE TRIGGER";
+ break;
+ case AT_EnableAlwaysTrig:
+ strtype = "ENABLE TRIGGER (always)";
+ break;
+ case AT_EnableReplicaTrig:
+ strtype = "ENABLE TRIGGER (replica)";
+ break;
+ case AT_DisableTrig:
+ strtype = "DISABLE TRIGGER";
+ break;
+ case AT_EnableTrigAll:
+ strtype = "ENABLE TRIGGER (all)";
+ break;
+ case AT_DisableTrigAll:
+ strtype = "DISABLE TRIGGER (all)";
+ break;
+ case AT_EnableTrigUser:
+ strtype = "ENABLE TRIGGER (user)";
+ break;
+ case AT_DisableTrigUser:
+ strtype = "DISABLE TRIGGER (user)";
+ break;
+ case AT_EnableRule:
+ strtype = "ENABLE RULE";
+ break;
+ case AT_EnableAlwaysRule:
+ strtype = "ENABLE RULE (always)";
+ break;
+ case AT_EnableReplicaRule:
+ strtype = "ENABLE RULE (replica)";
+ break;
+ case AT_DisableRule:
+ strtype = "DISABLE RULE";
+ break;
+ case AT_AddInherit:
+ strtype = "ADD INHERIT";
+ break;
+ case AT_DropInherit:
+ strtype = "DROP INHERIT";
+ break;
+ case AT_AddOf:
+ strtype = "OF";
+ break;
+ case AT_DropOf:
+ strtype = "NOT OF";
+ break;
+ case AT_ReplicaIdentity:
+ strtype = "REPLICA IDENTITY";
+ break;
+ case AT_EnableRowSecurity:
+ strtype = "ENABLE ROW SECURITY";
+ break;
+ case AT_DisableRowSecurity:
+ strtype = "DISABLE ROW SECURITY";
+ break;
+ case AT_ForceRowSecurity:
+ strtype = "FORCE ROW SECURITY";
+ break;
+ case AT_NoForceRowSecurity:
+ strtype = "NO FORCE ROW SECURITY";
+ break;
+ case AT_GenericOptions:
+ strtype = "SET OPTIONS";
+ break;
+ default:
+ strtype = "unrecognized";
+ break;
+ }
+
+ astate =
+ accumArrayResult(astate, CStringGetTextDatum(strtype),
+ false, TEXTOID, CurrentMemoryContext);
+ }
+
+ if (astate == NULL)
+ elog(ERROR, "empty alter table subcommand list");
+
+ PG_RETURN_ARRAYTYPE_P(makeArrayResult(astate, CurrentMemoryContext));
+}
diff --git a/src/test/modules/test_ddl_deparse/test_ddl_deparse.control b/src/test/modules/test_ddl_deparse/test_ddl_deparse.control
new file mode 100644
index 0000000..09112ee
--- /dev/null
+++ b/src/test/modules/test_ddl_deparse/test_ddl_deparse.control
@@ -0,0 +1,4 @@
+comment = 'Test code for DDL deparse feature'
+default_version = '1.0'
+module_pathname = '$libdir/test_ddl_deparse'
+relocatable = true
diff --git a/src/test/modules/test_extensions/.gitignore b/src/test/modules/test_extensions/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/test_extensions/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_extensions/Makefile b/src/test/modules/test_extensions/Makefile
new file mode 100644
index 0000000..77ee4d5
--- /dev/null
+++ b/src/test/modules/test_extensions/Makefile
@@ -0,0 +1,26 @@
+# src/test/modules/test_extensions/Makefile
+
+MODULE = test_extensions
+PGFILEDESC = "test_extensions - regression testing for EXTENSION support"
+
+EXTENSION = test_ext1 test_ext2 test_ext3 test_ext4 test_ext5 test_ext6 \
+ test_ext7 test_ext8 test_ext_cyclic1 test_ext_cyclic2 \
+ test_ext_evttrig
+DATA = test_ext1--1.0.sql test_ext2--1.0.sql test_ext3--1.0.sql \
+ test_ext4--1.0.sql test_ext5--1.0.sql test_ext6--1.0.sql \
+ test_ext7--1.0.sql test_ext7--1.0--2.0.sql test_ext8--1.0.sql \
+ test_ext_cyclic1--1.0.sql test_ext_cyclic2--1.0.sql \
+ test_ext_evttrig--1.0.sql test_ext_evttrig--1.0--2.0.sql
+
+REGRESS = test_extensions test_extdepend
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_extensions
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_extensions/expected/test_extdepend.out b/src/test/modules/test_extensions/expected/test_extdepend.out
new file mode 100644
index 0000000..0b62015
--- /dev/null
+++ b/src/test/modules/test_extensions/expected/test_extdepend.out
@@ -0,0 +1,188 @@
+--
+-- test ALTER THING name DEPENDS ON EXTENSION
+--
+-- Common setup for all tests
+CREATE TABLE test_extdep_commands (command text);
+COPY test_extdep_commands FROM stdin;
+SELECT * FROM test_extdep_commands;
+ command
+-------------------------------------------------------------------------
+ CREATE SCHEMA test_ext
+ CREATE EXTENSION test_ext5 SCHEMA test_ext
+ SET search_path TO test_ext
+ CREATE TABLE a (a1 int)
+
+ CREATE FUNCTION b() RETURNS TRIGGER LANGUAGE plpgsql AS +
+ $$ BEGIN NEW.a1 := NEW.a1 + 42; RETURN NEW; END; $$
+ ALTER FUNCTION b() DEPENDS ON EXTENSION test_ext5
+
+ CREATE TRIGGER c BEFORE INSERT ON a FOR EACH ROW EXECUTE PROCEDURE b()
+ ALTER TRIGGER c ON a DEPENDS ON EXTENSION test_ext5
+
+ CREATE MATERIALIZED VIEW d AS SELECT * FROM a
+ ALTER MATERIALIZED VIEW d DEPENDS ON EXTENSION test_ext5
+
+ CREATE INDEX e ON a (a1)
+ ALTER INDEX e DEPENDS ON EXTENSION test_ext5
+ RESET search_path
+(17 rows)
+
+-- First, test that dependent objects go away when the extension is dropped.
+SELECT * FROM test_extdep_commands \gexec
+ CREATE SCHEMA test_ext
+ CREATE EXTENSION test_ext5 SCHEMA test_ext
+ SET search_path TO test_ext
+ CREATE TABLE a (a1 int)
+
+ CREATE FUNCTION b() RETURNS TRIGGER LANGUAGE plpgsql AS
+ $$ BEGIN NEW.a1 := NEW.a1 + 42; RETURN NEW; END; $$
+ ALTER FUNCTION b() DEPENDS ON EXTENSION test_ext5
+
+ CREATE TRIGGER c BEFORE INSERT ON a FOR EACH ROW EXECUTE PROCEDURE b()
+ ALTER TRIGGER c ON a DEPENDS ON EXTENSION test_ext5
+
+ CREATE MATERIALIZED VIEW d AS SELECT * FROM a
+ ALTER MATERIALIZED VIEW d DEPENDS ON EXTENSION test_ext5
+
+ CREATE INDEX e ON a (a1)
+ ALTER INDEX e DEPENDS ON EXTENSION test_ext5
+ RESET search_path
+-- A dependent object made dependent again has no effect
+ALTER FUNCTION test_ext.b() DEPENDS ON EXTENSION test_ext5;
+-- make sure we have the right dependencies on the extension
+SELECT deptype, p.*
+ FROM pg_depend, pg_identify_object(classid, objid, objsubid) AS p
+ WHERE refclassid = 'pg_extension'::regclass AND
+ refobjid = (SELECT oid FROM pg_extension WHERE extname = 'test_ext5')
+ORDER BY type;
+ deptype | type | schema | name | identity
+---------+-------------------+----------+------+-----------------
+ x | function | test_ext | | test_ext.b()
+ x | index | test_ext | e | test_ext.e
+ x | materialized view | test_ext | d | test_ext.d
+ x | trigger | | | c on test_ext.a
+(4 rows)
+
+DROP EXTENSION test_ext5;
+-- anything still depending on the table?
+SELECT deptype, i.*
+ FROM pg_catalog.pg_depend, pg_identify_object(classid, objid, objsubid) i
+WHERE refclassid='pg_class'::regclass AND
+ refobjid='test_ext.a'::regclass AND NOT deptype IN ('i', 'a');
+ deptype | type | schema | name | identity
+---------+------+--------+------+----------
+(0 rows)
+
+DROP SCHEMA test_ext CASCADE;
+NOTICE: drop cascades to table test_ext.a
+-- Second test: If we drop the table, the objects are dropped too and no
+-- vestige remains in pg_depend.
+SELECT * FROM test_extdep_commands \gexec
+ CREATE SCHEMA test_ext
+ CREATE EXTENSION test_ext5 SCHEMA test_ext
+ SET search_path TO test_ext
+ CREATE TABLE a (a1 int)
+
+ CREATE FUNCTION b() RETURNS TRIGGER LANGUAGE plpgsql AS
+ $$ BEGIN NEW.a1 := NEW.a1 + 42; RETURN NEW; END; $$
+ ALTER FUNCTION b() DEPENDS ON EXTENSION test_ext5
+
+ CREATE TRIGGER c BEFORE INSERT ON a FOR EACH ROW EXECUTE PROCEDURE b()
+ ALTER TRIGGER c ON a DEPENDS ON EXTENSION test_ext5
+
+ CREATE MATERIALIZED VIEW d AS SELECT * FROM a
+ ALTER MATERIALIZED VIEW d DEPENDS ON EXTENSION test_ext5
+
+ CREATE INDEX e ON a (a1)
+ ALTER INDEX e DEPENDS ON EXTENSION test_ext5
+ RESET search_path
+DROP TABLE test_ext.a; -- should fail, require cascade
+ERROR: cannot drop table test_ext.a because other objects depend on it
+DETAIL: materialized view test_ext.d depends on table test_ext.a
+HINT: Use DROP ... CASCADE to drop the dependent objects too.
+DROP TABLE test_ext.a CASCADE;
+NOTICE: drop cascades to materialized view test_ext.d
+-- anything still depending on the extension? Should be only function b()
+SELECT deptype, i.*
+ FROM pg_catalog.pg_depend, pg_identify_object(classid, objid, objsubid) i
+ WHERE refclassid='pg_extension'::regclass AND
+ refobjid=(SELECT oid FROM pg_extension WHERE extname='test_ext5');
+ deptype | type | schema | name | identity
+---------+----------+----------+------+--------------
+ x | function | test_ext | | test_ext.b()
+(1 row)
+
+DROP EXTENSION test_ext5;
+DROP SCHEMA test_ext CASCADE;
+-- Third test: we can drop the objects individually
+SELECT * FROM test_extdep_commands \gexec
+ CREATE SCHEMA test_ext
+ CREATE EXTENSION test_ext5 SCHEMA test_ext
+ SET search_path TO test_ext
+ CREATE TABLE a (a1 int)
+
+ CREATE FUNCTION b() RETURNS TRIGGER LANGUAGE plpgsql AS
+ $$ BEGIN NEW.a1 := NEW.a1 + 42; RETURN NEW; END; $$
+ ALTER FUNCTION b() DEPENDS ON EXTENSION test_ext5
+
+ CREATE TRIGGER c BEFORE INSERT ON a FOR EACH ROW EXECUTE PROCEDURE b()
+ ALTER TRIGGER c ON a DEPENDS ON EXTENSION test_ext5
+
+ CREATE MATERIALIZED VIEW d AS SELECT * FROM a
+ ALTER MATERIALIZED VIEW d DEPENDS ON EXTENSION test_ext5
+
+ CREATE INDEX e ON a (a1)
+ ALTER INDEX e DEPENDS ON EXTENSION test_ext5
+ RESET search_path
+SET search_path TO test_ext;
+DROP TRIGGER c ON a;
+DROP FUNCTION b();
+DROP MATERIALIZED VIEW d;
+DROP INDEX e;
+SELECT deptype, i.*
+ FROM pg_catalog.pg_depend, pg_identify_object(classid, objid, objsubid) i
+ WHERE (refclassid='pg_extension'::regclass AND
+ refobjid=(SELECT oid FROM pg_extension WHERE extname='test_ext5'))
+ OR (refclassid='pg_class'::regclass AND refobjid='test_ext.a'::regclass)
+ AND NOT deptype IN ('i', 'a');
+ deptype | type | schema | name | identity
+---------+------+--------+------+----------
+(0 rows)
+
+DROP TABLE a;
+RESET search_path;
+DROP SCHEMA test_ext CASCADE;
+NOTICE: drop cascades to extension test_ext5
+-- Fourth test: we can mark the objects as dependent, then unmark; then the
+-- drop of the extension does nothing
+SELECT * FROM test_extdep_commands \gexec
+ CREATE SCHEMA test_ext
+ CREATE EXTENSION test_ext5 SCHEMA test_ext
+ SET search_path TO test_ext
+ CREATE TABLE a (a1 int)
+
+ CREATE FUNCTION b() RETURNS TRIGGER LANGUAGE plpgsql AS
+ $$ BEGIN NEW.a1 := NEW.a1 + 42; RETURN NEW; END; $$
+ ALTER FUNCTION b() DEPENDS ON EXTENSION test_ext5
+
+ CREATE TRIGGER c BEFORE INSERT ON a FOR EACH ROW EXECUTE PROCEDURE b()
+ ALTER TRIGGER c ON a DEPENDS ON EXTENSION test_ext5
+
+ CREATE MATERIALIZED VIEW d AS SELECT * FROM a
+ ALTER MATERIALIZED VIEW d DEPENDS ON EXTENSION test_ext5
+
+ CREATE INDEX e ON a (a1)
+ ALTER INDEX e DEPENDS ON EXTENSION test_ext5
+ RESET search_path
+SET search_path TO test_ext;
+ALTER FUNCTION b() NO DEPENDS ON EXTENSION test_ext5;
+ALTER TRIGGER c ON a NO DEPENDS ON EXTENSION test_ext5;
+ALTER MATERIALIZED VIEW d NO DEPENDS ON EXTENSION test_ext5;
+ALTER INDEX e NO DEPENDS ON EXTENSION test_ext5;
+DROP EXTENSION test_ext5;
+DROP TRIGGER c ON a;
+DROP FUNCTION b();
+DROP MATERIALIZED VIEW d;
+DROP INDEX e;
+DROP SCHEMA test_ext CASCADE;
+NOTICE: drop cascades to table a
diff --git a/src/test/modules/test_extensions/expected/test_extensions.out b/src/test/modules/test_extensions/expected/test_extensions.out
new file mode 100644
index 0000000..30ae621
--- /dev/null
+++ b/src/test/modules/test_extensions/expected/test_extensions.out
@@ -0,0 +1,161 @@
+-- test some errors
+CREATE EXTENSION test_ext1;
+ERROR: required extension "test_ext2" is not installed
+HINT: Use CREATE EXTENSION ... CASCADE to install required extensions too.
+CREATE EXTENSION test_ext1 SCHEMA test_ext1;
+ERROR: schema "test_ext1" does not exist
+CREATE EXTENSION test_ext1 SCHEMA test_ext;
+ERROR: schema "test_ext" does not exist
+CREATE SCHEMA test_ext;
+CREATE EXTENSION test_ext1 SCHEMA test_ext;
+ERROR: extension "test_ext1" must be installed in schema "test_ext1"
+-- finally success
+CREATE EXTENSION test_ext1 SCHEMA test_ext CASCADE;
+NOTICE: installing required extension "test_ext2"
+NOTICE: installing required extension "test_ext3"
+NOTICE: installing required extension "test_ext5"
+NOTICE: installing required extension "test_ext4"
+SELECT extname, nspname, extversion, extrelocatable FROM pg_extension e, pg_namespace n WHERE extname LIKE 'test_ext%' AND e.extnamespace = n.oid ORDER BY 1;
+ extname | nspname | extversion | extrelocatable
+-----------+-----------+------------+----------------
+ test_ext1 | test_ext1 | 1.0 | f
+ test_ext2 | test_ext | 1.0 | t
+ test_ext3 | test_ext | 1.0 | t
+ test_ext4 | test_ext | 1.0 | t
+ test_ext5 | test_ext | 1.0 | t
+(5 rows)
+
+CREATE EXTENSION test_ext_cyclic1 CASCADE;
+NOTICE: installing required extension "test_ext_cyclic2"
+ERROR: cyclic dependency detected between extensions "test_ext_cyclic1" and "test_ext_cyclic2"
+DROP SCHEMA test_ext CASCADE;
+NOTICE: drop cascades to 5 other objects
+DETAIL: drop cascades to extension test_ext3
+drop cascades to extension test_ext5
+drop cascades to extension test_ext2
+drop cascades to extension test_ext4
+drop cascades to extension test_ext1
+CREATE EXTENSION test_ext6;
+DROP EXTENSION test_ext6;
+CREATE EXTENSION test_ext6;
+-- test dropping of member tables that own extensions:
+-- this table will be absorbed into test_ext7
+create table old_table1 (col1 serial primary key);
+create extension test_ext7;
+\dx+ test_ext7
+Objects in extension "test_ext7"
+ Object description
+-------------------------------
+ sequence ext7_table1_col1_seq
+ sequence ext7_table2_col2_seq
+ sequence old_table1_col1_seq
+ table ext7_table1
+ table ext7_table2
+ table old_table1
+(6 rows)
+
+alter extension test_ext7 update to '2.0';
+\dx+ test_ext7
+Objects in extension "test_ext7"
+ Object description
+-------------------------------
+ sequence ext7_table2_col2_seq
+ table ext7_table2
+(2 rows)
+
+-- test handling of temp objects created by extensions
+create extension test_ext8;
+-- \dx+ would expose a variable pg_temp_nn schema name, so we can't use it here
+select regexp_replace(pg_describe_object(classid, objid, objsubid),
+ 'pg_temp_\d+', 'pg_temp', 'g') as "Object description"
+from pg_depend
+where refclassid = 'pg_extension'::regclass and deptype = 'e' and
+ refobjid = (select oid from pg_extension where extname = 'test_ext8')
+order by 1;
+ Object description
+-----------------------------------------
+ function ext8_even(posint)
+ function pg_temp.ext8_temp_even(posint)
+ table ext8_table1
+ table ext8_temp_table1
+ type posint
+(5 rows)
+
+-- Should be possible to drop and recreate this extension
+drop extension test_ext8;
+create extension test_ext8;
+select regexp_replace(pg_describe_object(classid, objid, objsubid),
+ 'pg_temp_\d+', 'pg_temp', 'g') as "Object description"
+from pg_depend
+where refclassid = 'pg_extension'::regclass and deptype = 'e' and
+ refobjid = (select oid from pg_extension where extname = 'test_ext8')
+order by 1;
+ Object description
+-----------------------------------------
+ function ext8_even(posint)
+ function pg_temp.ext8_temp_even(posint)
+ table ext8_table1
+ table ext8_temp_table1
+ type posint
+(5 rows)
+
+-- here we want to start a new session and wait till old one is gone
+select pg_backend_pid() as oldpid \gset
+\c -
+do 'declare c int = 0;
+begin
+ while (select count(*) from pg_stat_activity where pid = '
+ :'oldpid'
+ ') > 0 loop c := c + 1; perform pg_stat_clear_snapshot(); end loop;
+ raise log ''test_extensions looped % times'', c;
+end';
+-- extension should now contain no temp objects
+\dx+ test_ext8
+Objects in extension "test_ext8"
+ Object description
+----------------------------
+ function ext8_even(posint)
+ table ext8_table1
+ type posint
+(3 rows)
+
+-- dropping it should still work
+drop extension test_ext8;
+-- Test creation of extension in temporary schema with two-phase commit,
+-- which should not work. This function wrapper is useful for portability.
+-- Avoid noise caused by CONTEXT and NOTICE messages including the temporary
+-- schema name.
+\set SHOW_CONTEXT never
+SET client_min_messages TO 'warning';
+-- First enforce presence of temporary schema.
+CREATE TEMP TABLE test_ext4_tab ();
+CREATE OR REPLACE FUNCTION create_extension_with_temp_schema()
+ RETURNS VOID AS $$
+ DECLARE
+ tmpschema text;
+ query text;
+ BEGIN
+ SELECT INTO tmpschema pg_my_temp_schema()::regnamespace;
+ query := 'CREATE EXTENSION test_ext4 SCHEMA ' || tmpschema || ' CASCADE;';
+ RAISE NOTICE 'query %', query;
+ EXECUTE query;
+ END; $$ LANGUAGE plpgsql;
+BEGIN;
+SELECT create_extension_with_temp_schema();
+ create_extension_with_temp_schema
+-----------------------------------
+
+(1 row)
+
+PREPARE TRANSACTION 'twophase_extension';
+ERROR: cannot PREPARE a transaction that has operated on temporary objects
+-- Clean up
+DROP TABLE test_ext4_tab;
+DROP FUNCTION create_extension_with_temp_schema();
+RESET client_min_messages;
+\unset SHOW_CONTEXT
+-- Test case of an event trigger run in an extension upgrade script.
+-- See: https://postgr.es/m/20200902193715.6e0269d4@firost
+CREATE EXTENSION test_ext_evttrig;
+ALTER EXTENSION test_ext_evttrig UPDATE TO '2.0';
+DROP EXTENSION test_ext_evttrig;
diff --git a/src/test/modules/test_extensions/sql/test_extdepend.sql b/src/test/modules/test_extensions/sql/test_extdepend.sql
new file mode 100644
index 0000000..63240a1
--- /dev/null
+++ b/src/test/modules/test_extensions/sql/test_extdepend.sql
@@ -0,0 +1,90 @@
+--
+-- test ALTER THING name DEPENDS ON EXTENSION
+--
+
+-- Common setup for all tests
+CREATE TABLE test_extdep_commands (command text);
+COPY test_extdep_commands FROM stdin;
+ CREATE SCHEMA test_ext
+ CREATE EXTENSION test_ext5 SCHEMA test_ext
+ SET search_path TO test_ext
+ CREATE TABLE a (a1 int)
+
+ CREATE FUNCTION b() RETURNS TRIGGER LANGUAGE plpgsql AS\n $$ BEGIN NEW.a1 := NEW.a1 + 42; RETURN NEW; END; $$
+ ALTER FUNCTION b() DEPENDS ON EXTENSION test_ext5
+
+ CREATE TRIGGER c BEFORE INSERT ON a FOR EACH ROW EXECUTE PROCEDURE b()
+ ALTER TRIGGER c ON a DEPENDS ON EXTENSION test_ext5
+
+ CREATE MATERIALIZED VIEW d AS SELECT * FROM a
+ ALTER MATERIALIZED VIEW d DEPENDS ON EXTENSION test_ext5
+
+ CREATE INDEX e ON a (a1)
+ ALTER INDEX e DEPENDS ON EXTENSION test_ext5
+ RESET search_path
+\.
+
+SELECT * FROM test_extdep_commands;
+-- First, test that dependent objects go away when the extension is dropped.
+SELECT * FROM test_extdep_commands \gexec
+-- A dependent object made dependent again has no effect
+ALTER FUNCTION test_ext.b() DEPENDS ON EXTENSION test_ext5;
+-- make sure we have the right dependencies on the extension
+SELECT deptype, p.*
+ FROM pg_depend, pg_identify_object(classid, objid, objsubid) AS p
+ WHERE refclassid = 'pg_extension'::regclass AND
+ refobjid = (SELECT oid FROM pg_extension WHERE extname = 'test_ext5')
+ORDER BY type;
+DROP EXTENSION test_ext5;
+-- anything still depending on the table?
+SELECT deptype, i.*
+ FROM pg_catalog.pg_depend, pg_identify_object(classid, objid, objsubid) i
+WHERE refclassid='pg_class'::regclass AND
+ refobjid='test_ext.a'::regclass AND NOT deptype IN ('i', 'a');
+DROP SCHEMA test_ext CASCADE;
+
+-- Second test: If we drop the table, the objects are dropped too and no
+-- vestige remains in pg_depend.
+SELECT * FROM test_extdep_commands \gexec
+DROP TABLE test_ext.a; -- should fail, require cascade
+DROP TABLE test_ext.a CASCADE;
+-- anything still depending on the extension? Should be only function b()
+SELECT deptype, i.*
+ FROM pg_catalog.pg_depend, pg_identify_object(classid, objid, objsubid) i
+ WHERE refclassid='pg_extension'::regclass AND
+ refobjid=(SELECT oid FROM pg_extension WHERE extname='test_ext5');
+DROP EXTENSION test_ext5;
+DROP SCHEMA test_ext CASCADE;
+
+-- Third test: we can drop the objects individually
+SELECT * FROM test_extdep_commands \gexec
+SET search_path TO test_ext;
+DROP TRIGGER c ON a;
+DROP FUNCTION b();
+DROP MATERIALIZED VIEW d;
+DROP INDEX e;
+
+SELECT deptype, i.*
+ FROM pg_catalog.pg_depend, pg_identify_object(classid, objid, objsubid) i
+ WHERE (refclassid='pg_extension'::regclass AND
+ refobjid=(SELECT oid FROM pg_extension WHERE extname='test_ext5'))
+ OR (refclassid='pg_class'::regclass AND refobjid='test_ext.a'::regclass)
+ AND NOT deptype IN ('i', 'a');
+DROP TABLE a;
+RESET search_path;
+DROP SCHEMA test_ext CASCADE;
+
+-- Fourth test: we can mark the objects as dependent, then unmark; then the
+-- drop of the extension does nothing
+SELECT * FROM test_extdep_commands \gexec
+SET search_path TO test_ext;
+ALTER FUNCTION b() NO DEPENDS ON EXTENSION test_ext5;
+ALTER TRIGGER c ON a NO DEPENDS ON EXTENSION test_ext5;
+ALTER MATERIALIZED VIEW d NO DEPENDS ON EXTENSION test_ext5;
+ALTER INDEX e NO DEPENDS ON EXTENSION test_ext5;
+DROP EXTENSION test_ext5;
+DROP TRIGGER c ON a;
+DROP FUNCTION b();
+DROP MATERIALIZED VIEW d;
+DROP INDEX e;
+DROP SCHEMA test_ext CASCADE;
diff --git a/src/test/modules/test_extensions/sql/test_extensions.sql b/src/test/modules/test_extensions/sql/test_extensions.sql
new file mode 100644
index 0000000..c16fd36
--- /dev/null
+++ b/src/test/modules/test_extensions/sql/test_extensions.sql
@@ -0,0 +1,101 @@
+-- test some errors
+CREATE EXTENSION test_ext1;
+CREATE EXTENSION test_ext1 SCHEMA test_ext1;
+CREATE EXTENSION test_ext1 SCHEMA test_ext;
+CREATE SCHEMA test_ext;
+CREATE EXTENSION test_ext1 SCHEMA test_ext;
+
+-- finally success
+CREATE EXTENSION test_ext1 SCHEMA test_ext CASCADE;
+
+SELECT extname, nspname, extversion, extrelocatable FROM pg_extension e, pg_namespace n WHERE extname LIKE 'test_ext%' AND e.extnamespace = n.oid ORDER BY 1;
+
+CREATE EXTENSION test_ext_cyclic1 CASCADE;
+
+DROP SCHEMA test_ext CASCADE;
+
+CREATE EXTENSION test_ext6;
+DROP EXTENSION test_ext6;
+CREATE EXTENSION test_ext6;
+
+-- test dropping of member tables that own extensions:
+-- this table will be absorbed into test_ext7
+create table old_table1 (col1 serial primary key);
+create extension test_ext7;
+\dx+ test_ext7
+alter extension test_ext7 update to '2.0';
+\dx+ test_ext7
+
+-- test handling of temp objects created by extensions
+create extension test_ext8;
+
+-- \dx+ would expose a variable pg_temp_nn schema name, so we can't use it here
+select regexp_replace(pg_describe_object(classid, objid, objsubid),
+ 'pg_temp_\d+', 'pg_temp', 'g') as "Object description"
+from pg_depend
+where refclassid = 'pg_extension'::regclass and deptype = 'e' and
+ refobjid = (select oid from pg_extension where extname = 'test_ext8')
+order by 1;
+
+-- Should be possible to drop and recreate this extension
+drop extension test_ext8;
+create extension test_ext8;
+
+select regexp_replace(pg_describe_object(classid, objid, objsubid),
+ 'pg_temp_\d+', 'pg_temp', 'g') as "Object description"
+from pg_depend
+where refclassid = 'pg_extension'::regclass and deptype = 'e' and
+ refobjid = (select oid from pg_extension where extname = 'test_ext8')
+order by 1;
+
+-- here we want to start a new session and wait till old one is gone
+select pg_backend_pid() as oldpid \gset
+\c -
+do 'declare c int = 0;
+begin
+ while (select count(*) from pg_stat_activity where pid = '
+ :'oldpid'
+ ') > 0 loop c := c + 1; perform pg_stat_clear_snapshot(); end loop;
+ raise log ''test_extensions looped % times'', c;
+end';
+
+-- extension should now contain no temp objects
+\dx+ test_ext8
+
+-- dropping it should still work
+drop extension test_ext8;
+
+-- Test creation of extension in temporary schema with two-phase commit,
+-- which should not work. This function wrapper is useful for portability.
+
+-- Avoid noise caused by CONTEXT and NOTICE messages including the temporary
+-- schema name.
+\set SHOW_CONTEXT never
+SET client_min_messages TO 'warning';
+-- First enforce presence of temporary schema.
+CREATE TEMP TABLE test_ext4_tab ();
+CREATE OR REPLACE FUNCTION create_extension_with_temp_schema()
+ RETURNS VOID AS $$
+ DECLARE
+ tmpschema text;
+ query text;
+ BEGIN
+ SELECT INTO tmpschema pg_my_temp_schema()::regnamespace;
+ query := 'CREATE EXTENSION test_ext4 SCHEMA ' || tmpschema || ' CASCADE;';
+ RAISE NOTICE 'query %', query;
+ EXECUTE query;
+ END; $$ LANGUAGE plpgsql;
+BEGIN;
+SELECT create_extension_with_temp_schema();
+PREPARE TRANSACTION 'twophase_extension';
+-- Clean up
+DROP TABLE test_ext4_tab;
+DROP FUNCTION create_extension_with_temp_schema();
+RESET client_min_messages;
+\unset SHOW_CONTEXT
+
+-- Test case of an event trigger run in an extension upgrade script.
+-- See: https://postgr.es/m/20200902193715.6e0269d4@firost
+CREATE EXTENSION test_ext_evttrig;
+ALTER EXTENSION test_ext_evttrig UPDATE TO '2.0';
+DROP EXTENSION test_ext_evttrig;
diff --git a/src/test/modules/test_extensions/test_ext1--1.0.sql b/src/test/modules/test_extensions/test_ext1--1.0.sql
new file mode 100644
index 0000000..9a4bb1b
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext1--1.0.sql
@@ -0,0 +1,3 @@
+/* src/test/modules/test_extensions/test_ext1--1.0.sql */
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_ext1" to load this file. \quit
diff --git a/src/test/modules/test_extensions/test_ext1.control b/src/test/modules/test_extensions/test_ext1.control
new file mode 100644
index 0000000..9c069df
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext1.control
@@ -0,0 +1,5 @@
+comment = 'Test extension 1'
+default_version = '1.0'
+schema = 'test_ext1'
+relocatable = false
+requires = 'test_ext2,test_ext4'
diff --git a/src/test/modules/test_extensions/test_ext2--1.0.sql b/src/test/modules/test_extensions/test_ext2--1.0.sql
new file mode 100644
index 0000000..0f6d4ec
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext2--1.0.sql
@@ -0,0 +1,3 @@
+/* src/test/modules/test_extensions/test_ext2--1.0.sql */
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_ext2" to load this file. \quit
diff --git a/src/test/modules/test_extensions/test_ext2.control b/src/test/modules/test_extensions/test_ext2.control
new file mode 100644
index 0000000..946b7d5
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext2.control
@@ -0,0 +1,4 @@
+comment = 'Test extension 2'
+default_version = '1.0'
+relocatable = true
+requires = 'test_ext3,test_ext5'
diff --git a/src/test/modules/test_extensions/test_ext3--1.0.sql b/src/test/modules/test_extensions/test_ext3--1.0.sql
new file mode 100644
index 0000000..4fcb63d
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext3--1.0.sql
@@ -0,0 +1,9 @@
+/* src/test/modules/test_extensions/test_ext3--1.0.sql */
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_ext3" to load this file. \quit
+
+CREATE TABLE test_ext3_table (col_old INT);
+
+ALTER TABLE test_ext3_table RENAME col_old TO col_new;
+
+UPDATE test_ext3_table SET col_new = 0;
diff --git a/src/test/modules/test_extensions/test_ext3.control b/src/test/modules/test_extensions/test_ext3.control
new file mode 100644
index 0000000..5f1afe7
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext3.control
@@ -0,0 +1,3 @@
+comment = 'Test extension 3'
+default_version = '1.0'
+relocatable = true
diff --git a/src/test/modules/test_extensions/test_ext4--1.0.sql b/src/test/modules/test_extensions/test_ext4--1.0.sql
new file mode 100644
index 0000000..19f051f
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext4--1.0.sql
@@ -0,0 +1,3 @@
+/* src/test/modules/test_extensions/test_ext4--1.0.sql */
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_ext4" to load this file. \quit
diff --git a/src/test/modules/test_extensions/test_ext4.control b/src/test/modules/test_extensions/test_ext4.control
new file mode 100644
index 0000000..fc62591
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext4.control
@@ -0,0 +1,4 @@
+comment = 'Test extension 4'
+default_version = '1.0'
+relocatable = true
+requires = 'test_ext5'
diff --git a/src/test/modules/test_extensions/test_ext5--1.0.sql b/src/test/modules/test_extensions/test_ext5--1.0.sql
new file mode 100644
index 0000000..baf6ef8
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext5--1.0.sql
@@ -0,0 +1,3 @@
+/* src/test/modules/test_extensions/test_ext5--1.0.sql */
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_ext5" to load this file. \quit
diff --git a/src/test/modules/test_extensions/test_ext5.control b/src/test/modules/test_extensions/test_ext5.control
new file mode 100644
index 0000000..51bc57e
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext5.control
@@ -0,0 +1,3 @@
+comment = 'Test extension 5'
+default_version = '1.0'
+relocatable = true
diff --git a/src/test/modules/test_extensions/test_ext6--1.0.sql b/src/test/modules/test_extensions/test_ext6--1.0.sql
new file mode 100644
index 0000000..65a4fc5
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext6--1.0.sql
@@ -0,0 +1 @@
+grant usage on schema @extschema@ to public;
diff --git a/src/test/modules/test_extensions/test_ext6.control b/src/test/modules/test_extensions/test_ext6.control
new file mode 100644
index 0000000..04b2146
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext6.control
@@ -0,0 +1,5 @@
+comment = 'test_ext6'
+default_version = '1.0'
+relocatable = false
+superuser = true
+schema = 'test_ext6'
diff --git a/src/test/modules/test_extensions/test_ext7--1.0--2.0.sql b/src/test/modules/test_extensions/test_ext7--1.0--2.0.sql
new file mode 100644
index 0000000..50e3dca
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext7--1.0--2.0.sql
@@ -0,0 +1,8 @@
+/* src/test/modules/test_extensions/test_ext7--1.0--2.0.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION test_ext7 UPDATE TO '2.0'" to load this file. \quit
+
+-- drop some tables with serial columns
+drop table ext7_table1;
+drop table old_table1;
diff --git a/src/test/modules/test_extensions/test_ext7--1.0.sql b/src/test/modules/test_extensions/test_ext7--1.0.sql
new file mode 100644
index 0000000..0c2d72a
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext7--1.0.sql
@@ -0,0 +1,13 @@
+/* src/test/modules/test_extensions/test_ext7--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_ext7" to load this file. \quit
+
+-- link some existing serial-owning table to the extension
+alter extension test_ext7 add table old_table1;
+alter extension test_ext7 add sequence old_table1_col1_seq;
+
+-- ordinary member tables with serial columns
+create table ext7_table1 (col1 serial primary key);
+
+create table ext7_table2 (col2 serial primary key);
diff --git a/src/test/modules/test_extensions/test_ext7.control b/src/test/modules/test_extensions/test_ext7.control
new file mode 100644
index 0000000..b58df53
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext7.control
@@ -0,0 +1,4 @@
+comment = 'Test extension 7'
+default_version = '1.0'
+schema = 'public'
+relocatable = false
diff --git a/src/test/modules/test_extensions/test_ext8--1.0.sql b/src/test/modules/test_extensions/test_ext8--1.0.sql
new file mode 100644
index 0000000..1561ffe
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext8--1.0.sql
@@ -0,0 +1,21 @@
+/* src/test/modules/test_extensions/test_ext8--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_ext8" to load this file. \quit
+
+-- create some random data type
+create domain posint as int check (value > 0);
+
+-- use it in regular and temporary tables and functions
+
+create table ext8_table1 (f1 posint);
+
+create temp table ext8_temp_table1 (f1 posint);
+
+create function ext8_even (posint) returns bool as
+ 'select ($1 % 2) = 0' language sql;
+
+create function pg_temp.ext8_temp_even (posint) returns bool as
+ 'select ($1 % 2) = 0' language sql;
+
+-- we intentionally don't drop the temp objects before exiting
diff --git a/src/test/modules/test_extensions/test_ext8.control b/src/test/modules/test_extensions/test_ext8.control
new file mode 100644
index 0000000..70f8caa
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext8.control
@@ -0,0 +1,4 @@
+comment = 'Test extension 8'
+default_version = '1.0'
+schema = 'public'
+relocatable = false
diff --git a/src/test/modules/test_extensions/test_ext_cyclic1--1.0.sql b/src/test/modules/test_extensions/test_ext_cyclic1--1.0.sql
new file mode 100644
index 0000000..81bdaf4
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext_cyclic1--1.0.sql
@@ -0,0 +1,3 @@
+/* src/test/modules/test_extensions/test_ext_cyclic1--1.0.sql */
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_ext_cyclic1" to load this file. \quit
diff --git a/src/test/modules/test_extensions/test_ext_cyclic1.control b/src/test/modules/test_extensions/test_ext_cyclic1.control
new file mode 100644
index 0000000..aaab403
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext_cyclic1.control
@@ -0,0 +1,4 @@
+comment = 'Test extension cyclic 1'
+default_version = '1.0'
+relocatable = true
+requires = 'test_ext_cyclic2'
diff --git a/src/test/modules/test_extensions/test_ext_cyclic2--1.0.sql b/src/test/modules/test_extensions/test_ext_cyclic2--1.0.sql
new file mode 100644
index 0000000..ae2b3e9
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext_cyclic2--1.0.sql
@@ -0,0 +1,3 @@
+/* src/test/modules/test_extensions/test_ext_cyclic2--1.0.sql */
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_ext_cyclic2" to load this file. \quit
diff --git a/src/test/modules/test_extensions/test_ext_cyclic2.control b/src/test/modules/test_extensions/test_ext_cyclic2.control
new file mode 100644
index 0000000..1e28f96
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext_cyclic2.control
@@ -0,0 +1,4 @@
+comment = 'Test extension cyclic 2'
+default_version = '1.0'
+relocatable = true
+requires = 'test_ext_cyclic1'
diff --git a/src/test/modules/test_extensions/test_ext_evttrig--1.0--2.0.sql b/src/test/modules/test_extensions/test_ext_evttrig--1.0--2.0.sql
new file mode 100644
index 0000000..fdd2f35
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext_evttrig--1.0--2.0.sql
@@ -0,0 +1,7 @@
+/* src/test/modules/test_extensions/test_event_trigger--1.0--2.0.sql */
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION test_event_trigger UPDATE TO '2.0'" to load this file. \quit
+
+-- Test extension upgrade with event trigger.
+ALTER EVENT TRIGGER table_rewrite_trg DISABLE;
+ALTER TABLE t DROP COLUMN id;
diff --git a/src/test/modules/test_extensions/test_ext_evttrig--1.0.sql b/src/test/modules/test_extensions/test_ext_evttrig--1.0.sql
new file mode 100644
index 0000000..0071712
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext_evttrig--1.0.sql
@@ -0,0 +1,16 @@
+/* src/test/modules/test_extensions/test_event_trigger--1.0.sql */
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_event_trigger" to load this file. \quit
+
+-- Base table with event trigger, used in a regression test involving
+-- extension upgrades.
+CREATE TABLE t (id text);
+CREATE OR REPLACE FUNCTION _evt_table_rewrite_fnct()
+RETURNS EVENT_TRIGGER LANGUAGE plpgsql AS
+$$
+ BEGIN
+ END;
+$$;
+CREATE EVENT TRIGGER table_rewrite_trg
+ ON table_rewrite
+ EXECUTE PROCEDURE _evt_table_rewrite_fnct();
diff --git a/src/test/modules/test_extensions/test_ext_evttrig.control b/src/test/modules/test_extensions/test_ext_evttrig.control
new file mode 100644
index 0000000..915fae6
--- /dev/null
+++ b/src/test/modules/test_extensions/test_ext_evttrig.control
@@ -0,0 +1,3 @@
+comment = 'Test extension - event trigger'
+default_version = '1.0'
+relocatable = true
diff --git a/src/test/modules/test_ginpostinglist/.gitignore b/src/test/modules/test_ginpostinglist/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/test_ginpostinglist/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_ginpostinglist/Makefile b/src/test/modules/test_ginpostinglist/Makefile
new file mode 100644
index 0000000..51b941b
--- /dev/null
+++ b/src/test/modules/test_ginpostinglist/Makefile
@@ -0,0 +1,23 @@
+# src/test/modules/test_ginpostinglist/Makefile
+
+MODULE_big = test_ginpostinglist
+OBJS = \
+ $(WIN32RES) \
+ test_ginpostinglist.o
+PGFILEDESC = "test_ginpostinglist - test code for src/backend/access/gin//ginpostinglist.c"
+
+EXTENSION = test_ginpostinglist
+DATA = test_ginpostinglist--1.0.sql
+
+REGRESS = test_ginpostinglist
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_ginpostinglist
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_ginpostinglist/README b/src/test/modules/test_ginpostinglist/README
new file mode 100644
index 0000000..66684dd
--- /dev/null
+++ b/src/test/modules/test_ginpostinglist/README
@@ -0,0 +1,2 @@
+test_ginpostinglist contains unit tests for the GIN posting list code in
+src/backend/access/gin/ginpostinglist.c.
diff --git a/src/test/modules/test_ginpostinglist/expected/test_ginpostinglist.out b/src/test/modules/test_ginpostinglist/expected/test_ginpostinglist.out
new file mode 100644
index 0000000..4d0beae
--- /dev/null
+++ b/src/test/modules/test_ginpostinglist/expected/test_ginpostinglist.out
@@ -0,0 +1,19 @@
+CREATE EXTENSION test_ginpostinglist;
+--
+-- All the logic is in the test_ginpostinglist() function. It will throw
+-- a error if something fails.
+--
+SELECT test_ginpostinglist();
+NOTICE: testing with (0, 1), (0, 2), max 14 bytes
+NOTICE: encoded 2 item pointers to 10 bytes
+NOTICE: testing with (0, 1), (0, 291), max 14 bytes
+NOTICE: encoded 2 item pointers to 10 bytes
+NOTICE: testing with (0, 1), (4294967294, 291), max 14 bytes
+NOTICE: encoded 1 item pointers to 8 bytes
+NOTICE: testing with (0, 1), (4294967294, 291), max 16 bytes
+NOTICE: encoded 2 item pointers to 16 bytes
+ test_ginpostinglist
+---------------------
+
+(1 row)
+
diff --git a/src/test/modules/test_ginpostinglist/sql/test_ginpostinglist.sql b/src/test/modules/test_ginpostinglist/sql/test_ginpostinglist.sql
new file mode 100644
index 0000000..b8cab7a
--- /dev/null
+++ b/src/test/modules/test_ginpostinglist/sql/test_ginpostinglist.sql
@@ -0,0 +1,7 @@
+CREATE EXTENSION test_ginpostinglist;
+
+--
+-- All the logic is in the test_ginpostinglist() function. It will throw
+-- a error if something fails.
+--
+SELECT test_ginpostinglist();
diff --git a/src/test/modules/test_ginpostinglist/test_ginpostinglist--1.0.sql b/src/test/modules/test_ginpostinglist/test_ginpostinglist--1.0.sql
new file mode 100644
index 0000000..37396a4
--- /dev/null
+++ b/src/test/modules/test_ginpostinglist/test_ginpostinglist--1.0.sql
@@ -0,0 +1,8 @@
+/* src/test/modules/test_ginpostinglist/test_ginpostinglist--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_ginpostinglist" to load this file. \quit
+
+CREATE FUNCTION test_ginpostinglist()
+RETURNS pg_catalog.void STRICT
+AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/src/test/modules/test_ginpostinglist/test_ginpostinglist.c b/src/test/modules/test_ginpostinglist/test_ginpostinglist.c
new file mode 100644
index 0000000..4a8451e
--- /dev/null
+++ b/src/test/modules/test_ginpostinglist/test_ginpostinglist.c
@@ -0,0 +1,96 @@
+/*--------------------------------------------------------------------------
+ *
+ * test_ginpostinglist.c
+ * Test varbyte-encoding in ginpostinglist.c
+ *
+ * Copyright (c) 2019-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_ginpostinglist/test_ginpostinglist.c
+ *
+ * -------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/gin_private.h"
+#include "access/ginblock.h"
+#include "access/htup_details.h"
+#include "fmgr.h"
+
+PG_MODULE_MAGIC;
+
+PG_FUNCTION_INFO_V1(test_ginpostinglist);
+
+/*
+ * Encodes a pair of TIDs, and decodes it back. The first TID is always
+ * (0, 1), the second one is formed from the blk/off arguments. The 'maxsize'
+ * argument is passed to ginCompressPostingList(); it can be used to test the
+ * overflow checks.
+ *
+ * The reason that we test a pair, instead of just a single TID, is that
+ * the GinPostingList stores the first TID as is, and the varbyte-encoding
+ * is only used for the deltas between TIDs. So testing a single TID would
+ * not exercise the varbyte encoding at all.
+ *
+ * This function prints NOTICEs to describe what is tested, and how large the
+ * resulting GinPostingList is. Any incorrect results, e.g. if the encode +
+ * decode round trip doesn't return the original input, are reported as
+ * ERRORs.
+ */
+static void
+test_itemptr_pair(BlockNumber blk, OffsetNumber off, int maxsize)
+{
+ ItemPointerData orig_itemptrs[2];
+ ItemPointer decoded_itemptrs;
+ GinPostingList *pl;
+ int nwritten;
+ int ndecoded;
+
+ elog(NOTICE, "testing with (%u, %d), (%u, %d), max %d bytes",
+ 0, 1, blk, off, maxsize);
+ ItemPointerSet(&orig_itemptrs[0], 0, 1);
+ ItemPointerSet(&orig_itemptrs[1], blk, off);
+
+ /* Encode, and decode it back */
+ pl = ginCompressPostingList(orig_itemptrs, 2, maxsize, &nwritten);
+ elog(NOTICE, "encoded %d item pointers to %zu bytes",
+ nwritten, SizeOfGinPostingList(pl));
+
+ if (SizeOfGinPostingList(pl) > maxsize)
+ elog(ERROR, "overflow: result was %zu bytes, max %d",
+ SizeOfGinPostingList(pl), maxsize);
+
+ decoded_itemptrs = ginPostingListDecode(pl, &ndecoded);
+ if (nwritten != ndecoded)
+ elog(NOTICE, "encoded %d itemptrs, %d came back", nwritten, ndecoded);
+
+ /* Check the result */
+ if (!ItemPointerEquals(&orig_itemptrs[0], &decoded_itemptrs[0]))
+ elog(ERROR, "mismatch on first itemptr: (%u, %d) vs (%u, %d)",
+ 0, 1,
+ ItemPointerGetBlockNumber(&decoded_itemptrs[0]),
+ ItemPointerGetOffsetNumber(&decoded_itemptrs[0]));
+
+ if (ndecoded == 2 &&
+ !ItemPointerEquals(&orig_itemptrs[0], &decoded_itemptrs[0]))
+ {
+ elog(ERROR, "mismatch on second itemptr: (%u, %d) vs (%u, %d)",
+ 0, 1,
+ ItemPointerGetBlockNumber(&decoded_itemptrs[0]),
+ ItemPointerGetOffsetNumber(&decoded_itemptrs[0]));
+ }
+}
+
+/*
+ * SQL-callable entry point to perform all tests.
+ */
+Datum
+test_ginpostinglist(PG_FUNCTION_ARGS)
+{
+ test_itemptr_pair(0, 2, 14);
+ test_itemptr_pair(0, MaxHeapTuplesPerPage, 14);
+ test_itemptr_pair(MaxBlockNumber, MaxHeapTuplesPerPage, 14);
+ test_itemptr_pair(MaxBlockNumber, MaxHeapTuplesPerPage, 16);
+
+ PG_RETURN_VOID();
+}
diff --git a/src/test/modules/test_ginpostinglist/test_ginpostinglist.control b/src/test/modules/test_ginpostinglist/test_ginpostinglist.control
new file mode 100644
index 0000000..e4f5a7c
--- /dev/null
+++ b/src/test/modules/test_ginpostinglist/test_ginpostinglist.control
@@ -0,0 +1,4 @@
+comment = 'Test code for ginpostinglist.c'
+default_version = '1.0'
+module_pathname = '$libdir/test_ginpostinglist'
+relocatable = true
diff --git a/src/test/modules/test_integerset/.gitignore b/src/test/modules/test_integerset/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/test_integerset/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_integerset/Makefile b/src/test/modules/test_integerset/Makefile
new file mode 100644
index 0000000..799c17c
--- /dev/null
+++ b/src/test/modules/test_integerset/Makefile
@@ -0,0 +1,23 @@
+# src/test/modules/test_integerset/Makefile
+
+MODULE_big = test_integerset
+OBJS = \
+ $(WIN32RES) \
+ test_integerset.o
+PGFILEDESC = "test_integerset - test code for src/backend/lib/integerset.c"
+
+EXTENSION = test_integerset
+DATA = test_integerset--1.0.sql
+
+REGRESS = test_integerset
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_integerset
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_integerset/README b/src/test/modules/test_integerset/README
new file mode 100644
index 0000000..a8b2718
--- /dev/null
+++ b/src/test/modules/test_integerset/README
@@ -0,0 +1,7 @@
+test_integerset contains unit tests for testing the integer set implementation
+in src/backend/lib/integerset.c.
+
+The tests verify the correctness of the implementation, but they can also be
+used as a micro-benchmark. If you set the 'intset_test_stats' flag in
+test_integerset.c, the tests will print extra information about execution time
+and memory usage.
diff --git a/src/test/modules/test_integerset/expected/test_integerset.out b/src/test/modules/test_integerset/expected/test_integerset.out
new file mode 100644
index 0000000..822dd03
--- /dev/null
+++ b/src/test/modules/test_integerset/expected/test_integerset.out
@@ -0,0 +1,31 @@
+CREATE EXTENSION test_integerset;
+--
+-- All the logic is in the test_integerset() function. It will throw
+-- an error if something fails.
+--
+SELECT test_integerset();
+NOTICE: testing intset with empty set
+NOTICE: testing intset with distances > 2^60 between values
+NOTICE: testing intset with single value 0
+NOTICE: testing intset with single value 1
+NOTICE: testing intset with single value 18446744073709551614
+NOTICE: testing intset with single value 18446744073709551615
+NOTICE: testing intset with value 0, and all between 1000 and 2000
+NOTICE: testing intset with value 1, and all between 1000 and 2000
+NOTICE: testing intset with value 1, and all between 1000 and 2000000
+NOTICE: testing intset with value 18446744073709551614, and all between 1000 and 2000
+NOTICE: testing intset with value 18446744073709551615, and all between 1000 and 2000
+NOTICE: testing intset with pattern "all ones"
+NOTICE: testing intset with pattern "alternating bits"
+NOTICE: testing intset with pattern "clusters of ten"
+NOTICE: testing intset with pattern "clusters of hundred"
+NOTICE: testing intset with pattern "one-every-64k"
+NOTICE: testing intset with pattern "sparse"
+NOTICE: testing intset with pattern "single values, distance > 2^32"
+NOTICE: testing intset with pattern "clusters, distance > 2^32"
+NOTICE: testing intset with pattern "clusters, distance > 2^60"
+ test_integerset
+-----------------
+
+(1 row)
+
diff --git a/src/test/modules/test_integerset/sql/test_integerset.sql b/src/test/modules/test_integerset/sql/test_integerset.sql
new file mode 100644
index 0000000..9d970dd
--- /dev/null
+++ b/src/test/modules/test_integerset/sql/test_integerset.sql
@@ -0,0 +1,7 @@
+CREATE EXTENSION test_integerset;
+
+--
+-- All the logic is in the test_integerset() function. It will throw
+-- an error if something fails.
+--
+SELECT test_integerset();
diff --git a/src/test/modules/test_integerset/test_integerset--1.0.sql b/src/test/modules/test_integerset/test_integerset--1.0.sql
new file mode 100644
index 0000000..d6d5a3f
--- /dev/null
+++ b/src/test/modules/test_integerset/test_integerset--1.0.sql
@@ -0,0 +1,8 @@
+/* src/test/modules/test_integerset/test_integerset--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_integerset" to load this file. \quit
+
+CREATE FUNCTION test_integerset()
+RETURNS pg_catalog.void STRICT
+AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/src/test/modules/test_integerset/test_integerset.c b/src/test/modules/test_integerset/test_integerset.c
new file mode 100644
index 0000000..bb9c9a0
--- /dev/null
+++ b/src/test/modules/test_integerset/test_integerset.c
@@ -0,0 +1,623 @@
+/*--------------------------------------------------------------------------
+ *
+ * test_integerset.c
+ * Test integer set data structure.
+ *
+ * Copyright (c) 2019-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_integerset/test_integerset.c
+ *
+ * -------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "fmgr.h"
+#include "lib/integerset.h"
+#include "miscadmin.h"
+#include "nodes/bitmapset.h"
+#include "storage/block.h"
+#include "storage/itemptr.h"
+#include "utils/memutils.h"
+#include "utils/timestamp.h"
+
+/*
+ * If you enable this, the "pattern" tests will print information about
+ * how long populating, probing, and iterating the test set takes, and
+ * how much memory the test set consumed. That can be used as
+ * micro-benchmark of various operations and input patterns (you might
+ * want to increase the number of values used in each of the test, if
+ * you do that, to reduce noise).
+ *
+ * The information is printed to the server's stderr, mostly because
+ * that's where MemoryContextStats() output goes.
+ */
+static const bool intset_test_stats = false;
+
+PG_MODULE_MAGIC;
+
+PG_FUNCTION_INFO_V1(test_integerset);
+
+/*
+ * A struct to define a pattern of integers, for use with the test_pattern()
+ * function.
+ */
+typedef struct
+{
+ char *test_name; /* short name of the test, for humans */
+ char *pattern_str; /* a bit pattern */
+ uint64 spacing; /* pattern repeats at this interval */
+ uint64 num_values; /* number of integers to set in total */
+} test_spec;
+
+static const test_spec test_specs[] = {
+ {
+ "all ones", "1111111111",
+ 10, 10000000
+ },
+ {
+ "alternating bits", "0101010101",
+ 10, 10000000
+ },
+ {
+ "clusters of ten", "1111111111",
+ 10000, 10000000
+ },
+ {
+ "clusters of hundred",
+ "1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111",
+ 10000, 100000000
+ },
+ {
+ "one-every-64k", "1",
+ 65536, 10000000
+ },
+ {
+ "sparse", "100000000000000000000000000000001",
+ 10000000, 10000000
+ },
+ {
+ "single values, distance > 2^32", "1",
+ UINT64CONST(10000000000), 1000000
+ },
+ {
+ "clusters, distance > 2^32", "10101010",
+ UINT64CONST(10000000000), 10000000
+ },
+ {
+ "clusters, distance > 2^60", "10101010",
+ UINT64CONST(2000000000000000000),
+ 23 /* can't be much higher than this, or we
+ * overflow uint64 */
+ }
+};
+
+static void test_pattern(const test_spec *spec);
+static void test_empty(void);
+static void test_single_value(uint64 value);
+static void check_with_filler(IntegerSet *intset, uint64 x, uint64 value, uint64 filler_min, uint64 filler_max);
+static void test_single_value_and_filler(uint64 value, uint64 filler_min, uint64 filler_max);
+static void test_huge_distances(void);
+
+/*
+ * SQL-callable entry point to perform all tests.
+ */
+Datum
+test_integerset(PG_FUNCTION_ARGS)
+{
+ /* Tests for various corner cases */
+ test_empty();
+ test_huge_distances();
+ test_single_value(0);
+ test_single_value(1);
+ test_single_value(PG_UINT64_MAX - 1);
+ test_single_value(PG_UINT64_MAX);
+ test_single_value_and_filler(0, 1000, 2000);
+ test_single_value_and_filler(1, 1000, 2000);
+ test_single_value_and_filler(1, 1000, 2000000);
+ test_single_value_and_filler(PG_UINT64_MAX - 1, 1000, 2000);
+ test_single_value_and_filler(PG_UINT64_MAX, 1000, 2000);
+
+ /* Test different test patterns, with lots of entries */
+ for (int i = 0; i < lengthof(test_specs); i++)
+ {
+ test_pattern(&test_specs[i]);
+ }
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Test with a repeating pattern, defined by the 'spec'.
+ */
+static void
+test_pattern(const test_spec *spec)
+{
+ IntegerSet *intset;
+ MemoryContext intset_ctx;
+ MemoryContext old_ctx;
+ TimestampTz starttime;
+ TimestampTz endtime;
+ uint64 n;
+ uint64 last_int;
+ int patternlen;
+ uint64 *pattern_values;
+ uint64 pattern_num_values;
+
+ elog(NOTICE, "testing intset with pattern \"%s\"", spec->test_name);
+ if (intset_test_stats)
+ fprintf(stderr, "-----\ntesting intset with pattern \"%s\"\n", spec->test_name);
+
+ /* Pre-process the pattern, creating an array of integers from it. */
+ patternlen = strlen(spec->pattern_str);
+ pattern_values = palloc(patternlen * sizeof(uint64));
+ pattern_num_values = 0;
+ for (int i = 0; i < patternlen; i++)
+ {
+ if (spec->pattern_str[i] == '1')
+ pattern_values[pattern_num_values++] = i;
+ }
+
+ /*
+ * Allocate the integer set.
+ *
+ * Allocate it in a separate memory context, so that we can print its
+ * memory usage easily. (intset_create() creates a memory context of its
+ * own, too, but we don't have direct access to it, so we cannot call
+ * MemoryContextStats() on it directly).
+ */
+ intset_ctx = AllocSetContextCreate(CurrentMemoryContext,
+ "intset test",
+ ALLOCSET_SMALL_SIZES);
+ MemoryContextSetIdentifier(intset_ctx, spec->test_name);
+ old_ctx = MemoryContextSwitchTo(intset_ctx);
+ intset = intset_create();
+ MemoryContextSwitchTo(old_ctx);
+
+ /*
+ * Add values to the set.
+ */
+ starttime = GetCurrentTimestamp();
+
+ n = 0;
+ last_int = 0;
+ while (n < spec->num_values)
+ {
+ uint64 x = 0;
+
+ for (int i = 0; i < pattern_num_values && n < spec->num_values; i++)
+ {
+ x = last_int + pattern_values[i];
+
+ intset_add_member(intset, x);
+ n++;
+ }
+ last_int += spec->spacing;
+ }
+
+ endtime = GetCurrentTimestamp();
+
+ if (intset_test_stats)
+ fprintf(stderr, "added " UINT64_FORMAT " values in %d ms\n",
+ spec->num_values, (int) (endtime - starttime) / 1000);
+
+ /*
+ * Print stats on the amount of memory used.
+ *
+ * We print the usage reported by intset_memory_usage(), as well as the
+ * stats from the memory context. They should be in the same ballpark,
+ * but it's hard to automate testing that, so if you're making changes to
+ * the implementation, just observe that manually.
+ */
+ if (intset_test_stats)
+ {
+ uint64 mem_usage;
+
+ /*
+ * Also print memory usage as reported by intset_memory_usage(). It
+ * should be in the same ballpark as the usage reported by
+ * MemoryContextStats().
+ */
+ mem_usage = intset_memory_usage(intset);
+ fprintf(stderr, "intset_memory_usage() reported " UINT64_FORMAT " (%0.2f bytes / integer)\n",
+ mem_usage, (double) mem_usage / spec->num_values);
+
+ MemoryContextStats(intset_ctx);
+ }
+
+ /* Check that intset_get_num_entries works */
+ n = intset_num_entries(intset);
+ if (n != spec->num_values)
+ elog(ERROR, "intset_num_entries returned " UINT64_FORMAT ", expected " UINT64_FORMAT, n, spec->num_values);
+
+ /*
+ * Test random-access probes with intset_is_member()
+ */
+ starttime = GetCurrentTimestamp();
+
+ for (n = 0; n < 100000; n++)
+ {
+ bool b;
+ bool expected;
+ uint64 x;
+
+ /*
+ * Pick next value to probe at random. We limit the probes to the
+ * last integer that we added to the set, plus an arbitrary constant
+ * (1000). There's no point in probing the whole 0 - 2^64 range, if
+ * only a small part of the integer space is used. We would very
+ * rarely hit values that are actually in the set.
+ */
+ x = (pg_lrand48() << 31) | pg_lrand48();
+ x = x % (last_int + 1000);
+
+ /* Do we expect this value to be present in the set? */
+ if (x >= last_int)
+ expected = false;
+ else
+ {
+ uint64 idx = x % spec->spacing;
+
+ if (idx >= patternlen)
+ expected = false;
+ else if (spec->pattern_str[idx] == '1')
+ expected = true;
+ else
+ expected = false;
+ }
+
+ /* Is it present according to intset_is_member() ? */
+ b = intset_is_member(intset, x);
+
+ if (b != expected)
+ elog(ERROR, "mismatch at " UINT64_FORMAT ": %d vs %d", x, b, expected);
+ }
+ endtime = GetCurrentTimestamp();
+ if (intset_test_stats)
+ fprintf(stderr, "probed " UINT64_FORMAT " values in %d ms\n",
+ n, (int) (endtime - starttime) / 1000);
+
+ /*
+ * Test iterator
+ */
+ starttime = GetCurrentTimestamp();
+
+ intset_begin_iterate(intset);
+ n = 0;
+ last_int = 0;
+ while (n < spec->num_values)
+ {
+ for (int i = 0; i < pattern_num_values && n < spec->num_values; i++)
+ {
+ uint64 expected = last_int + pattern_values[i];
+ uint64 x;
+
+ if (!intset_iterate_next(intset, &x))
+ break;
+
+ if (x != expected)
+ elog(ERROR, "iterate returned wrong value; got " UINT64_FORMAT ", expected " UINT64_FORMAT, x, expected);
+ n++;
+ }
+ last_int += spec->spacing;
+ }
+ endtime = GetCurrentTimestamp();
+ if (intset_test_stats)
+ fprintf(stderr, "iterated " UINT64_FORMAT " values in %d ms\n",
+ n, (int) (endtime - starttime) / 1000);
+
+ if (n < spec->num_values)
+ elog(ERROR, "iterator stopped short after " UINT64_FORMAT " entries, expected " UINT64_FORMAT, n, spec->num_values);
+ if (n > spec->num_values)
+ elog(ERROR, "iterator returned " UINT64_FORMAT " entries, " UINT64_FORMAT " was expected", n, spec->num_values);
+
+ MemoryContextDelete(intset_ctx);
+}
+
+/*
+ * Test with a set containing a single integer.
+ */
+static void
+test_single_value(uint64 value)
+{
+ IntegerSet *intset;
+ uint64 x;
+ uint64 num_entries;
+ bool found;
+
+ elog(NOTICE, "testing intset with single value " UINT64_FORMAT, value);
+
+ /* Create the set. */
+ intset = intset_create();
+ intset_add_member(intset, value);
+
+ /* Test intset_get_num_entries() */
+ num_entries = intset_num_entries(intset);
+ if (num_entries != 1)
+ elog(ERROR, "intset_num_entries returned " UINT64_FORMAT ", expected 1", num_entries);
+
+ /*
+ * Test intset_is_member() at various special values, like 0 and maximum
+ * possible 64-bit integer, as well as the value itself.
+ */
+ if (intset_is_member(intset, 0) != (value == 0))
+ elog(ERROR, "intset_is_member failed for 0");
+ if (intset_is_member(intset, 1) != (value == 1))
+ elog(ERROR, "intset_is_member failed for 1");
+ if (intset_is_member(intset, PG_UINT64_MAX) != (value == PG_UINT64_MAX))
+ elog(ERROR, "intset_is_member failed for PG_UINT64_MAX");
+ if (intset_is_member(intset, value) != true)
+ elog(ERROR, "intset_is_member failed for the tested value");
+
+ /*
+ * Test iterator
+ */
+ intset_begin_iterate(intset);
+ found = intset_iterate_next(intset, &x);
+ if (!found || x != value)
+ elog(ERROR, "intset_iterate_next failed for " UINT64_FORMAT, x);
+
+ found = intset_iterate_next(intset, &x);
+ if (found)
+ elog(ERROR, "intset_iterate_next failed " UINT64_FORMAT, x);
+}
+
+/*
+ * Test with an integer set that contains:
+ *
+ * - a given single 'value', and
+ * - all integers between 'filler_min' and 'filler_max'.
+ *
+ * This exercises different codepaths than testing just with a single value,
+ * because the implementation buffers newly-added values. If we add just a
+ * single value to the set, we won't test the internal B-tree code at all,
+ * just the code that deals with the buffer.
+ */
+static void
+test_single_value_and_filler(uint64 value, uint64 filler_min, uint64 filler_max)
+{
+ IntegerSet *intset;
+ uint64 x;
+ bool found;
+ uint64 *iter_expected;
+ uint64 n = 0;
+ uint64 num_entries = 0;
+ uint64 mem_usage;
+
+ elog(NOTICE, "testing intset with value " UINT64_FORMAT ", and all between " UINT64_FORMAT " and " UINT64_FORMAT,
+ value, filler_min, filler_max);
+
+ intset = intset_create();
+
+ iter_expected = palloc(sizeof(uint64) * (filler_max - filler_min + 1));
+ if (value < filler_min)
+ {
+ intset_add_member(intset, value);
+ iter_expected[n++] = value;
+ }
+
+ for (x = filler_min; x < filler_max; x++)
+ {
+ intset_add_member(intset, x);
+ iter_expected[n++] = x;
+ }
+
+ if (value >= filler_max)
+ {
+ intset_add_member(intset, value);
+ iter_expected[n++] = value;
+ }
+
+ /* Test intset_get_num_entries() */
+ num_entries = intset_num_entries(intset);
+ if (num_entries != n)
+ elog(ERROR, "intset_num_entries returned " UINT64_FORMAT ", expected " UINT64_FORMAT, num_entries, n);
+
+ /*
+ * Test intset_is_member() at various spots, at and around the values that
+ * we expect to be set, as well as 0 and the maximum possible value.
+ */
+ check_with_filler(intset, 0,
+ value, filler_min, filler_max);
+ check_with_filler(intset, 1,
+ value, filler_min, filler_max);
+ check_with_filler(intset, filler_min - 1,
+ value, filler_min, filler_max);
+ check_with_filler(intset, filler_min,
+ value, filler_min, filler_max);
+ check_with_filler(intset, filler_min + 1,
+ value, filler_min, filler_max);
+ check_with_filler(intset, value - 1,
+ value, filler_min, filler_max);
+ check_with_filler(intset, value,
+ value, filler_min, filler_max);
+ check_with_filler(intset, value + 1,
+ value, filler_min, filler_max);
+ check_with_filler(intset, filler_max - 1,
+ value, filler_min, filler_max);
+ check_with_filler(intset, filler_max,
+ value, filler_min, filler_max);
+ check_with_filler(intset, filler_max + 1,
+ value, filler_min, filler_max);
+ check_with_filler(intset, PG_UINT64_MAX - 1,
+ value, filler_min, filler_max);
+ check_with_filler(intset, PG_UINT64_MAX,
+ value, filler_min, filler_max);
+
+ intset_begin_iterate(intset);
+ for (uint64 i = 0; i < n; i++)
+ {
+ found = intset_iterate_next(intset, &x);
+ if (!found || x != iter_expected[i])
+ elog(ERROR, "intset_iterate_next failed for " UINT64_FORMAT, x);
+ }
+ found = intset_iterate_next(intset, &x);
+ if (found)
+ elog(ERROR, "intset_iterate_next failed " UINT64_FORMAT, x);
+
+ mem_usage = intset_memory_usage(intset);
+ if (mem_usage < 5000 || mem_usage > 500000000)
+ elog(ERROR, "intset_memory_usage() reported suspicious value: " UINT64_FORMAT, mem_usage);
+}
+
+/*
+ * Helper function for test_single_value_and_filler.
+ *
+ * Calls intset_is_member() for value 'x', and checks that the result is what
+ * we expect.
+ */
+static void
+check_with_filler(IntegerSet *intset, uint64 x,
+ uint64 value, uint64 filler_min, uint64 filler_max)
+{
+ bool expected;
+ bool actual;
+
+ expected = (x == value || (filler_min <= x && x < filler_max));
+
+ actual = intset_is_member(intset, x);
+
+ if (actual != expected)
+ elog(ERROR, "intset_is_member failed for " UINT64_FORMAT, x);
+}
+
+/*
+ * Test empty set
+ */
+static void
+test_empty(void)
+{
+ IntegerSet *intset;
+ uint64 x;
+
+ elog(NOTICE, "testing intset with empty set");
+
+ intset = intset_create();
+
+ /* Test intset_is_member() */
+ if (intset_is_member(intset, 0) != false)
+ elog(ERROR, "intset_is_member on empty set returned true");
+ if (intset_is_member(intset, 1) != false)
+ elog(ERROR, "intset_is_member on empty set returned true");
+ if (intset_is_member(intset, PG_UINT64_MAX) != false)
+ elog(ERROR, "intset_is_member on empty set returned true");
+
+ /* Test iterator */
+ intset_begin_iterate(intset);
+ if (intset_iterate_next(intset, &x))
+ elog(ERROR, "intset_iterate_next on empty set returned a value (" UINT64_FORMAT ")", x);
+}
+
+/*
+ * Test with integers that are more than 2^60 apart.
+ *
+ * The Simple-8b encoding used by the set implementation can only encode
+ * values up to 2^60. That makes large differences like this interesting
+ * to test.
+ */
+static void
+test_huge_distances(void)
+{
+ IntegerSet *intset;
+ uint64 values[1000];
+ int num_values = 0;
+ uint64 val = 0;
+ bool found;
+ uint64 x;
+
+ elog(NOTICE, "testing intset with distances > 2^60 between values");
+
+ val = 0;
+ values[num_values++] = val;
+
+ /* Test differences on both sides of the 2^60 boundary. */
+ val += UINT64CONST(1152921504606846976) - 1; /* 2^60 - 1 */
+ values[num_values++] = val;
+
+ val += UINT64CONST(1152921504606846976) - 1; /* 2^60 - 1 */
+ values[num_values++] = val;
+
+ val += UINT64CONST(1152921504606846976); /* 2^60 */
+ values[num_values++] = val;
+
+ val += UINT64CONST(1152921504606846976); /* 2^60 */
+ values[num_values++] = val;
+
+ val += UINT64CONST(1152921504606846976); /* 2^60 */
+ values[num_values++] = val;
+
+ val += UINT64CONST(1152921504606846976) + 1; /* 2^60 + 1 */
+ values[num_values++] = val;
+
+ val += UINT64CONST(1152921504606846976) + 1; /* 2^60 + 1 */
+ values[num_values++] = val;
+
+ val += UINT64CONST(1152921504606846976) + 1; /* 2^60 + 1 */
+ values[num_values++] = val;
+
+ val += UINT64CONST(1152921504606846976) + 2; /* 2^60 + 2 */
+ values[num_values++] = val;
+
+ val += UINT64CONST(1152921504606846976) + 2; /* 2^60 + 2 */
+ values[num_values++] = val;
+
+ val += UINT64CONST(1152921504606846976); /* 2^60 */
+ values[num_values++] = val;
+
+ /*
+ * We're now very close to 2^64, so can't add large values anymore. But
+ * add more smaller values to the end, to make sure that all the above
+ * values get flushed and packed into the tree structure.
+ */
+ while (num_values < 1000)
+ {
+ val += pg_lrand48();
+ values[num_values++] = val;
+ }
+
+ /* Create an IntegerSet using these values */
+ intset = intset_create();
+ for (int i = 0; i < num_values; i++)
+ intset_add_member(intset, values[i]);
+
+ /*
+ * Test intset_is_member() around each of these values
+ */
+ for (int i = 0; i < num_values; i++)
+ {
+ uint64 x = values[i];
+ bool expected;
+ bool result;
+
+ if (x > 0)
+ {
+ expected = (values[i - 1] == x - 1);
+ result = intset_is_member(intset, x - 1);
+ if (result != expected)
+ elog(ERROR, "intset_is_member failed for " UINT64_FORMAT, x - 1);
+ }
+
+ result = intset_is_member(intset, x);
+ if (result != true)
+ elog(ERROR, "intset_is_member failed for " UINT64_FORMAT, x);
+
+ expected = (i != num_values - 1) ? (values[i + 1] == x + 1) : false;
+ result = intset_is_member(intset, x + 1);
+ if (result != expected)
+ elog(ERROR, "intset_is_member failed for " UINT64_FORMAT, x + 1);
+ }
+
+ /*
+ * Test iterator
+ */
+ intset_begin_iterate(intset);
+ for (int i = 0; i < num_values; i++)
+ {
+ found = intset_iterate_next(intset, &x);
+ if (!found || x != values[i])
+ elog(ERROR, "intset_iterate_next failed for " UINT64_FORMAT, x);
+ }
+ found = intset_iterate_next(intset, &x);
+ if (found)
+ elog(ERROR, "intset_iterate_next failed " UINT64_FORMAT, x);
+}
diff --git a/src/test/modules/test_integerset/test_integerset.control b/src/test/modules/test_integerset/test_integerset.control
new file mode 100644
index 0000000..7d20c2d
--- /dev/null
+++ b/src/test/modules/test_integerset/test_integerset.control
@@ -0,0 +1,4 @@
+comment = 'Test code for integerset'
+default_version = '1.0'
+module_pathname = '$libdir/test_integerset'
+relocatable = true
diff --git a/src/test/modules/test_misc/.gitignore b/src/test/modules/test_misc/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/test_misc/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_misc/Makefile b/src/test/modules/test_misc/Makefile
new file mode 100644
index 0000000..39c6c20
--- /dev/null
+++ b/src/test/modules/test_misc/Makefile
@@ -0,0 +1,14 @@
+# src/test/modules/test_misc/Makefile
+
+TAP_TESTS = 1
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_misc
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_misc/README b/src/test/modules/test_misc/README
new file mode 100644
index 0000000..4876733
--- /dev/null
+++ b/src/test/modules/test_misc/README
@@ -0,0 +1,4 @@
+This directory doesn't actually contain any extension module.
+
+What it is is a home for otherwise-unclassified TAP tests that exercise core
+server features. We might equally well have called it, say, src/test/misc.
diff --git a/src/test/modules/test_misc/t/001_constraint_validation.pl b/src/test/modules/test_misc/t/001_constraint_validation.pl
new file mode 100644
index 0000000..0e27dae
--- /dev/null
+++ b/src/test/modules/test_misc/t/001_constraint_validation.pl
@@ -0,0 +1,310 @@
+# Verify that ALTER TABLE optimizes certain operations as expected
+
+use strict;
+use warnings;
+use PostgresNode;
+use TestLib;
+use Test::More tests => 42;
+
+# Initialize a test cluster
+my $node = get_new_node('master');
+$node->init();
+# Turn message level up to DEBUG1 so that we get the messages we want to see
+$node->append_conf('postgresql.conf', 'client_min_messages = DEBUG1');
+$node->start;
+
+# Run a SQL command and return psql's stderr (including debug messages)
+sub run_sql_command
+{
+ my $sql = shift;
+ my $stderr;
+
+ $node->psql(
+ 'postgres',
+ $sql,
+ stderr => \$stderr,
+ on_error_die => 1,
+ on_error_stop => 1);
+ return $stderr;
+}
+
+# Check whether result of run_sql_command shows that we did a verify pass
+sub is_table_verified
+{
+ my $output = shift;
+ return index($output, 'DEBUG: verifying table') != -1;
+}
+
+my $output;
+
+note "test alter table set not null";
+
+run_sql_command(
+ 'create table atacc1 (test_a int, test_b int);
+ insert into atacc1 values (1, 2);');
+
+$output = run_sql_command('alter table atacc1 alter test_a set not null;');
+ok(is_table_verified($output),
+ 'column test_a without constraint will scan table');
+
+run_sql_command(
+ 'alter table atacc1 alter test_a drop not null;
+ alter table atacc1 add constraint atacc1_constr_a_valid
+ check(test_a is not null);');
+
+# normal run will verify table data
+$output = run_sql_command('alter table atacc1 alter test_a set not null;');
+ok(!is_table_verified($output), 'with constraint will not scan table');
+ok( $output =~
+ m/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/,
+ 'test_a proved by constraints');
+
+run_sql_command('alter table atacc1 alter test_a drop not null;');
+
+# we have check only for test_a column, so we need verify table for test_b
+$output = run_sql_command(
+ 'alter table atacc1 alter test_b set not null, alter test_a set not null;'
+);
+ok(is_table_verified($output), 'table was scanned');
+# we may miss debug message for test_a constraint because we need verify table due test_b
+ok( !( $output =~
+ m/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/
+ ),
+ 'test_b not proved by wrong constraints');
+run_sql_command(
+ 'alter table atacc1 alter test_a drop not null, alter test_b drop not null;'
+);
+
+# test with both columns having check constraints
+run_sql_command(
+ 'alter table atacc1 add constraint atacc1_constr_b_valid check(test_b is not null);'
+);
+$output = run_sql_command(
+ 'alter table atacc1 alter test_b set not null, alter test_a set not null;'
+);
+ok(!is_table_verified($output), 'table was not scanned for both columns');
+ok( $output =~
+ m/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/,
+ 'test_a proved by constraints');
+ok( $output =~
+ m/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/,
+ 'test_b proved by constraints');
+run_sql_command('drop table atacc1;');
+
+note "test alter table attach partition";
+
+run_sql_command(
+ 'CREATE TABLE list_parted2 (
+ a int,
+ b char
+ ) PARTITION BY LIST (a);
+ CREATE TABLE part_3_4 (
+ LIKE list_parted2,
+ CONSTRAINT check_a CHECK (a IN (3)));');
+
+# need NOT NULL to skip table scan
+$output = run_sql_command(
+ 'ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4);'
+);
+ok(is_table_verified($output), 'table part_3_4 scanned');
+
+run_sql_command(
+ 'ALTER TABLE list_parted2 DETACH PARTITION part_3_4;
+ ALTER TABLE part_3_4 ALTER a SET NOT NULL;');
+
+$output = run_sql_command(
+ 'ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4);'
+);
+ok(!is_table_verified($output), 'table part_3_4 not scanned');
+ok( $output =~
+ m/partition constraint for table "part_3_4" is implied by existing constraints/,
+ 'part_3_4 verified by existing constraints');
+
+# test attach default partition
+run_sql_command(
+ 'CREATE TABLE list_parted2_def (
+ LIKE list_parted2,
+ CONSTRAINT check_a CHECK (a IN (5, 6)));');
+$output = run_sql_command(
+ 'ALTER TABLE list_parted2 ATTACH PARTITION list_parted2_def default;');
+ok(!is_table_verified($output), 'table list_parted2_def not scanned');
+ok( $output =~
+ m/partition constraint for table "list_parted2_def" is implied by existing constraints/,
+ 'list_parted2_def verified by existing constraints');
+
+$output = run_sql_command(
+ 'CREATE TABLE part_55_66 PARTITION OF list_parted2 FOR VALUES IN (55, 66);'
+);
+ok(!is_table_verified($output), 'table list_parted2_def not scanned');
+ok( $output =~
+ m/updated partition constraint for default partition "list_parted2_def" is implied by existing constraints/,
+ 'updated partition constraint for default partition list_parted2_def');
+
+# test attach another partitioned table
+run_sql_command(
+ 'CREATE TABLE part_5 (
+ LIKE list_parted2
+ ) PARTITION BY LIST (b);
+ CREATE TABLE part_5_a PARTITION OF part_5 FOR VALUES IN (\'a\');
+ ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 5);'
+);
+$output = run_sql_command(
+ 'ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);');
+ok(!($output =~ m/verifying table "part_5"/), 'table part_5 not scanned');
+ok($output =~ m/verifying table "list_parted2_def"/,
+ 'list_parted2_def scanned');
+ok( $output =~
+ m/partition constraint for table "part_5" is implied by existing constraints/,
+ 'part_5 verified by existing constraints');
+
+run_sql_command(
+ 'ALTER TABLE list_parted2 DETACH PARTITION part_5;
+ ALTER TABLE part_5 DROP CONSTRAINT check_a;');
+
+# scan should again be skipped, even though NOT NULL is now a column property
+run_sql_command(
+ 'ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IN (5)),
+ ALTER a SET NOT NULL;'
+);
+$output = run_sql_command(
+ 'ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);');
+ok(!($output =~ m/verifying table "part_5"/), 'table part_5 not scanned');
+ok($output =~ m/verifying table "list_parted2_def"/,
+ 'list_parted2_def scanned');
+ok( $output =~
+ m/partition constraint for table "part_5" is implied by existing constraints/,
+ 'part_5 verified by existing constraints');
+
+# Check the case where attnos of the partitioning columns in the table being
+# attached differs from the parent. It should not affect the constraint-
+# checking logic that allows to skip the scan.
+run_sql_command(
+ 'CREATE TABLE part_6 (
+ c int,
+ LIKE list_parted2,
+ CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 6)
+ );
+ ALTER TABLE part_6 DROP c;');
+$output = run_sql_command(
+ 'ALTER TABLE list_parted2 ATTACH PARTITION part_6 FOR VALUES IN (6);');
+ok(!($output =~ m/verifying table "part_6"/), 'table part_6 not scanned');
+ok($output =~ m/verifying table "list_parted2_def"/,
+ 'list_parted2_def scanned');
+ok( $output =~
+ m/partition constraint for table "part_6" is implied by existing constraints/,
+ 'part_6 verified by existing constraints');
+
+# Similar to above, but the table being attached is a partitioned table
+# whose partition has still different attnos for the root partitioning
+# columns.
+run_sql_command(
+ 'CREATE TABLE part_7 (
+ LIKE list_parted2,
+ CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 7)
+ ) PARTITION BY LIST (b);
+ CREATE TABLE part_7_a_null (
+ c int,
+ d int,
+ e int,
+ LIKE list_parted2, -- a will have attnum = 4
+ CONSTRAINT check_b CHECK (b IS NULL OR b = \'a\'),
+ CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 7)
+ );
+ ALTER TABLE part_7_a_null DROP c, DROP d, DROP e;');
+
+$output = run_sql_command(
+ 'ALTER TABLE part_7 ATTACH PARTITION part_7_a_null FOR VALUES IN (\'a\', null);'
+);
+ok(!is_table_verified($output), 'table not scanned');
+ok( $output =~
+ m/partition constraint for table "part_7_a_null" is implied by existing constraints/,
+ 'part_7_a_null verified by existing constraints');
+$output = run_sql_command(
+ 'ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7);');
+ok(!is_table_verified($output), 'tables not scanned');
+ok( $output =~
+ m/partition constraint for table "part_7" is implied by existing constraints/,
+ 'part_7 verified by existing constraints');
+ok( $output =~
+ m/updated partition constraint for default partition "list_parted2_def" is implied by existing constraints/,
+ 'updated partition constraint for default partition list_parted2_def');
+
+run_sql_command(
+ 'CREATE TABLE range_parted (
+ a int,
+ b int
+ ) PARTITION BY RANGE (a, b);
+ CREATE TABLE range_part1 (
+ a int NOT NULL CHECK (a = 1),
+ b int NOT NULL);');
+
+$output = run_sql_command(
+ 'ALTER TABLE range_parted ATTACH PARTITION range_part1 FOR VALUES FROM (1, 1) TO (1, 10);'
+);
+ok(is_table_verified($output), 'table range_part1 scanned');
+ok( !( $output =~
+ m/partition constraint for table "range_part1" is implied by existing constraints/
+ ),
+ 'range_part1 not verified by existing constraints');
+
+run_sql_command(
+ 'CREATE TABLE range_part2 (
+ a int NOT NULL CHECK (a = 1),
+ b int NOT NULL CHECK (b >= 10 and b < 18)
+);');
+$output = run_sql_command(
+ 'ALTER TABLE range_parted ATTACH PARTITION range_part2 FOR VALUES FROM (1, 10) TO (1, 20);'
+);
+ok(!is_table_verified($output), 'table range_part2 not scanned');
+ok( $output =~
+ m/partition constraint for table "range_part2" is implied by existing constraints/,
+ 'range_part2 verified by existing constraints');
+
+# If a partitioned table being created or an existing table being attached
+# as a partition does not have a constraint that would allow validation scan
+# to be skipped, but an individual partition does, then the partition's
+# validation scan is skipped.
+run_sql_command(
+ 'CREATE TABLE quuux (a int, b text) PARTITION BY LIST (a);
+ CREATE TABLE quuux_default PARTITION OF quuux DEFAULT PARTITION BY LIST (b);
+ CREATE TABLE quuux_default1 PARTITION OF quuux_default (
+ CONSTRAINT check_1 CHECK (a IS NOT NULL AND a = 1)
+ ) FOR VALUES IN (\'b\');
+ CREATE TABLE quuux1 (a int, b text);');
+
+$output = run_sql_command(
+ 'ALTER TABLE quuux ATTACH PARTITION quuux1 FOR VALUES IN (1);');
+ok(is_table_verified($output), 'quuux1 table scanned');
+ok( !( $output =~
+ m/partition constraint for table "quuux1" is implied by existing constraints/
+ ),
+ 'quuux1 verified by existing constraints');
+
+run_sql_command('CREATE TABLE quuux2 (a int, b text);');
+$output = run_sql_command(
+ 'ALTER TABLE quuux ATTACH PARTITION quuux2 FOR VALUES IN (2);');
+ok(!($output =~ m/verifying table "quuux_default1"/),
+ 'quuux_default1 not scanned');
+ok($output =~ m/verifying table "quuux2"/, 'quuux2 scanned');
+ok( $output =~
+ m/updated partition constraint for default partition "quuux_default1" is implied by existing constraints/,
+ 'updated partition constraint for default partition quuux_default1');
+run_sql_command('DROP TABLE quuux1, quuux2;');
+
+# should validate for quuux1, but not for quuux2
+$output = run_sql_command(
+ 'CREATE TABLE quuux1 PARTITION OF quuux FOR VALUES IN (1);');
+ok(!is_table_verified($output), 'tables not scanned');
+ok( !( $output =~
+ m/partition constraint for table "quuux1" is implied by existing constraints/
+ ),
+ 'quuux1 verified by existing constraints');
+$output = run_sql_command(
+ 'CREATE TABLE quuux2 PARTITION OF quuux FOR VALUES IN (2);');
+ok(!is_table_verified($output), 'tables not scanned');
+ok( $output =~
+ m/updated partition constraint for default partition "quuux_default1" is implied by existing constraints/,
+ 'updated partition constraint for default partition quuux_default1');
+run_sql_command('DROP TABLE quuux;');
+
+$node->stop('fast');
diff --git a/src/test/modules/test_parser/.gitignore b/src/test/modules/test_parser/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/test_parser/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_parser/Makefile b/src/test/modules/test_parser/Makefile
new file mode 100644
index 0000000..5327080
--- /dev/null
+++ b/src/test/modules/test_parser/Makefile
@@ -0,0 +1,23 @@
+# src/test/modules/test_parser/Makefile
+
+MODULE_big = test_parser
+OBJS = \
+ $(WIN32RES) \
+ test_parser.o
+PGFILEDESC = "test_parser - example of a custom parser for full-text search"
+
+EXTENSION = test_parser
+DATA = test_parser--1.0.sql
+
+REGRESS = test_parser
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_parser
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_parser/README b/src/test/modules/test_parser/README
new file mode 100644
index 0000000..0a11ec8
--- /dev/null
+++ b/src/test/modules/test_parser/README
@@ -0,0 +1,61 @@
+test_parser is an example of a custom parser for full-text
+search. It doesn't do anything especially useful, but can serve as
+a starting point for developing your own parser.
+
+test_parser recognizes words separated by white space,
+and returns just two token types:
+
+mydb=# SELECT * FROM ts_token_type('testparser');
+ tokid | alias | description
+-------+-------+---------------
+ 3 | word | Word
+ 12 | blank | Space symbols
+(2 rows)
+
+These token numbers have been chosen to be compatible with the default
+parser's numbering. This allows us to use its headline()
+function, thus keeping the example simple.
+
+Usage
+=====
+
+Installing the test_parser extension creates a text search
+parser testparser. It has no user-configurable parameters.
+
+You can test the parser with, for example,
+
+mydb=# SELECT * FROM ts_parse('testparser', 'That''s my first own parser');
+ tokid | token
+-------+--------
+ 3 | That's
+ 12 |
+ 3 | my
+ 12 |
+ 3 | first
+ 12 |
+ 3 | own
+ 12 |
+ 3 | parser
+
+Real-world use requires setting up a text search configuration
+that uses the parser. For example,
+
+mydb=# CREATE TEXT SEARCH CONFIGURATION testcfg ( PARSER = testparser );
+CREATE TEXT SEARCH CONFIGURATION
+
+mydb=# ALTER TEXT SEARCH CONFIGURATION testcfg
+mydb-# ADD MAPPING FOR word WITH english_stem;
+ALTER TEXT SEARCH CONFIGURATION
+
+mydb=# SELECT to_tsvector('testcfg', 'That''s my first own parser');
+ to_tsvector
+-------------------------------
+ 'that':1 'first':3 'parser':5
+(1 row)
+
+mydb=# SELECT ts_headline('testcfg', 'Supernovae stars are the brightest phenomena in galaxies',
+mydb(# to_tsquery('testcfg', 'star'));
+ ts_headline
+-----------------------------------------------------------------
+ Supernovae <b>stars</b> are the brightest phenomena in galaxies
+(1 row)
diff --git a/src/test/modules/test_parser/expected/test_parser.out b/src/test/modules/test_parser/expected/test_parser.out
new file mode 100644
index 0000000..8a49bc0
--- /dev/null
+++ b/src/test/modules/test_parser/expected/test_parser.out
@@ -0,0 +1,44 @@
+CREATE EXTENSION test_parser;
+-- make test configuration using parser
+CREATE TEXT SEARCH CONFIGURATION testcfg (PARSER = testparser);
+ALTER TEXT SEARCH CONFIGURATION testcfg ADD MAPPING FOR word WITH simple;
+-- ts_parse
+SELECT * FROM ts_parse('testparser', 'That''s simple parser can''t parse urls like http://some.url/here/');
+ tokid | token
+-------+-----------------------
+ 3 | That's
+ 12 |
+ 3 | simple
+ 12 |
+ 3 | parser
+ 12 |
+ 3 | can't
+ 12 |
+ 3 | parse
+ 12 |
+ 3 | urls
+ 12 |
+ 3 | like
+ 12 |
+ 3 | http://some.url/here/
+(15 rows)
+
+SELECT to_tsvector('testcfg','That''s my first own parser');
+ to_tsvector
+-------------------------------------------------
+ 'first':3 'my':2 'own':4 'parser':5 'that''s':1
+(1 row)
+
+SELECT to_tsquery('testcfg', 'star');
+ to_tsquery
+------------
+ 'star'
+(1 row)
+
+SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies',
+ to_tsquery('testcfg', 'stars'));
+ ts_headline
+-----------------------------------------------------------------
+ Supernovae <b>stars</b> are the brightest phenomena in galaxies
+(1 row)
+
diff --git a/src/test/modules/test_parser/sql/test_parser.sql b/src/test/modules/test_parser/sql/test_parser.sql
new file mode 100644
index 0000000..1f21504
--- /dev/null
+++ b/src/test/modules/test_parser/sql/test_parser.sql
@@ -0,0 +1,18 @@
+CREATE EXTENSION test_parser;
+
+-- make test configuration using parser
+
+CREATE TEXT SEARCH CONFIGURATION testcfg (PARSER = testparser);
+
+ALTER TEXT SEARCH CONFIGURATION testcfg ADD MAPPING FOR word WITH simple;
+
+-- ts_parse
+
+SELECT * FROM ts_parse('testparser', 'That''s simple parser can''t parse urls like http://some.url/here/');
+
+SELECT to_tsvector('testcfg','That''s my first own parser');
+
+SELECT to_tsquery('testcfg', 'star');
+
+SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies',
+ to_tsquery('testcfg', 'stars'));
diff --git a/src/test/modules/test_parser/test_parser--1.0.sql b/src/test/modules/test_parser/test_parser--1.0.sql
new file mode 100644
index 0000000..56bb244
--- /dev/null
+++ b/src/test/modules/test_parser/test_parser--1.0.sql
@@ -0,0 +1,32 @@
+/* src/test/modules/test_parser/test_parser--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_parser" to load this file. \quit
+
+CREATE FUNCTION testprs_start(internal, int4)
+RETURNS internal
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION testprs_getlexeme(internal, internal, internal)
+RETURNS internal
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION testprs_end(internal)
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION testprs_lextype(internal)
+RETURNS internal
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE TEXT SEARCH PARSER testparser (
+ START = testprs_start,
+ GETTOKEN = testprs_getlexeme,
+ END = testprs_end,
+ HEADLINE = pg_catalog.prsd_headline,
+ LEXTYPES = testprs_lextype
+);
diff --git a/src/test/modules/test_parser/test_parser.c b/src/test/modules/test_parser/test_parser.c
new file mode 100644
index 0000000..1279e9a
--- /dev/null
+++ b/src/test/modules/test_parser/test_parser.c
@@ -0,0 +1,127 @@
+/*-------------------------------------------------------------------------
+ *
+ * test_parser.c
+ * Simple example of a text search parser
+ *
+ * Copyright (c) 2007-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_parser/test_parser.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "fmgr.h"
+
+PG_MODULE_MAGIC;
+
+/*
+ * types
+ */
+
+/* self-defined type */
+typedef struct
+{
+ char *buffer; /* text to parse */
+ int len; /* length of the text in buffer */
+ int pos; /* position of the parser */
+} ParserState;
+
+typedef struct
+{
+ int lexid;
+ char *alias;
+ char *descr;
+} LexDescr;
+
+/*
+ * functions
+ */
+PG_FUNCTION_INFO_V1(testprs_start);
+PG_FUNCTION_INFO_V1(testprs_getlexeme);
+PG_FUNCTION_INFO_V1(testprs_end);
+PG_FUNCTION_INFO_V1(testprs_lextype);
+
+Datum
+testprs_start(PG_FUNCTION_ARGS)
+{
+ ParserState *pst = (ParserState *) palloc0(sizeof(ParserState));
+
+ pst->buffer = (char *) PG_GETARG_POINTER(0);
+ pst->len = PG_GETARG_INT32(1);
+ pst->pos = 0;
+
+ PG_RETURN_POINTER(pst);
+}
+
+Datum
+testprs_getlexeme(PG_FUNCTION_ARGS)
+{
+ ParserState *pst = (ParserState *) PG_GETARG_POINTER(0);
+ char **t = (char **) PG_GETARG_POINTER(1);
+ int *tlen = (int *) PG_GETARG_POINTER(2);
+ int startpos = pst->pos;
+ int type;
+
+ *t = pst->buffer + pst->pos;
+
+ if (pst->pos < pst->len &&
+ (pst->buffer)[pst->pos] == ' ')
+ {
+ /* blank type */
+ type = 12;
+ /* go to the next non-space character */
+ while (pst->pos < pst->len &&
+ (pst->buffer)[pst->pos] == ' ')
+ (pst->pos)++;
+ }
+ else
+ {
+ /* word type */
+ type = 3;
+ /* go to the next space character */
+ while (pst->pos < pst->len &&
+ (pst->buffer)[pst->pos] != ' ')
+ (pst->pos)++;
+ }
+
+ *tlen = pst->pos - startpos;
+
+ /* we are finished if (*tlen == 0) */
+ if (*tlen == 0)
+ type = 0;
+
+ PG_RETURN_INT32(type);
+}
+
+Datum
+testprs_end(PG_FUNCTION_ARGS)
+{
+ ParserState *pst = (ParserState *) PG_GETARG_POINTER(0);
+
+ pfree(pst);
+ PG_RETURN_VOID();
+}
+
+Datum
+testprs_lextype(PG_FUNCTION_ARGS)
+{
+ /*
+ * Remarks: - we have to return the blanks for headline reason - we use
+ * the same lexids like Teodor in the default word parser; in this way we
+ * can reuse the headline function of the default word parser.
+ */
+ LexDescr *descr = (LexDescr *) palloc(sizeof(LexDescr) * (2 + 1));
+
+ /* there are only two types in this parser */
+ descr[0].lexid = 3;
+ descr[0].alias = pstrdup("word");
+ descr[0].descr = pstrdup("Word");
+ descr[1].lexid = 12;
+ descr[1].alias = pstrdup("blank");
+ descr[1].descr = pstrdup("Space symbols");
+ descr[2].lexid = 0;
+
+ PG_RETURN_POINTER(descr);
+}
diff --git a/src/test/modules/test_parser/test_parser.control b/src/test/modules/test_parser/test_parser.control
new file mode 100644
index 0000000..36b26b2
--- /dev/null
+++ b/src/test/modules/test_parser/test_parser.control
@@ -0,0 +1,5 @@
+# test_parser extension
+comment = 'example of a custom parser for full-text search'
+default_version = '1.0'
+module_pathname = '$libdir/test_parser'
+relocatable = true
diff --git a/src/test/modules/test_pg_dump/.gitignore b/src/test/modules/test_pg_dump/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/test_pg_dump/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_pg_dump/Makefile b/src/test/modules/test_pg_dump/Makefile
new file mode 100644
index 0000000..6123b99
--- /dev/null
+++ b/src/test/modules/test_pg_dump/Makefile
@@ -0,0 +1,21 @@
+# src/test/modules/test_pg_dump/Makefile
+
+MODULE = test_pg_dump
+PGFILEDESC = "test_pg_dump - Test pg_dump with an extension"
+
+EXTENSION = test_pg_dump
+DATA = test_pg_dump--1.0.sql
+
+REGRESS = test_pg_dump
+TAP_TESTS = 1
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_pg_dump
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_pg_dump/README b/src/test/modules/test_pg_dump/README
new file mode 100644
index 0000000..b7c2e33
--- /dev/null
+++ b/src/test/modules/test_pg_dump/README
@@ -0,0 +1,4 @@
+test_pg_dump is an extension explicitly to test pg_dump when
+extensions are present in the system.
+
+We also make use of this module to test ALTER EXTENSION ADD/DROP.
diff --git a/src/test/modules/test_pg_dump/expected/test_pg_dump.out b/src/test/modules/test_pg_dump/expected/test_pg_dump.out
new file mode 100644
index 0000000..a50eaf6
--- /dev/null
+++ b/src/test/modules/test_pg_dump/expected/test_pg_dump.out
@@ -0,0 +1,95 @@
+CREATE ROLE regress_dump_test_role;
+CREATE EXTENSION test_pg_dump;
+ALTER EXTENSION test_pg_dump ADD DATABASE postgres; -- error
+ERROR: syntax error at or near "DATABASE"
+LINE 1: ALTER EXTENSION test_pg_dump ADD DATABASE postgres;
+ ^
+CREATE TABLE test_pg_dump_t1 (c1 int, junk text);
+ALTER TABLE test_pg_dump_t1 DROP COLUMN junk; -- to exercise dropped-col cases
+CREATE VIEW test_pg_dump_v1 AS SELECT * FROM test_pg_dump_t1;
+CREATE MATERIALIZED VIEW test_pg_dump_mv1 AS SELECT * FROM test_pg_dump_t1;
+CREATE SCHEMA test_pg_dump_s1;
+CREATE TYPE test_pg_dump_e1 AS ENUM ('abc', 'def');
+CREATE AGGREGATE newavg (
+ sfunc = int4_avg_accum, basetype = int4, stype = _int8,
+ finalfunc = int8_avg,
+ initcond1 = '{0,0}'
+);
+CREATE FUNCTION test_pg_dump(int) RETURNS int AS $$
+BEGIN
+RETURN abs($1);
+END
+$$ LANGUAGE plpgsql IMMUTABLE;
+CREATE OPERATOR ==== (
+ LEFTARG = int,
+ RIGHTARG = int,
+ PROCEDURE = int4eq,
+ COMMUTATOR = ====
+);
+CREATE ACCESS METHOD gist2 TYPE INDEX HANDLER gisthandler;
+CREATE TYPE casttesttype;
+CREATE FUNCTION casttesttype_in(cstring)
+ RETURNS casttesttype
+ AS 'textin'
+ LANGUAGE internal STRICT IMMUTABLE;
+NOTICE: return type casttesttype is only a shell
+CREATE FUNCTION casttesttype_out(casttesttype)
+ RETURNS cstring
+ AS 'textout'
+ LANGUAGE internal STRICT IMMUTABLE;
+NOTICE: argument type casttesttype is only a shell
+CREATE TYPE casttesttype (
+ internallength = variable,
+ input = casttesttype_in,
+ output = casttesttype_out,
+ alignment = int4
+);
+CREATE CAST (text AS casttesttype) WITHOUT FUNCTION;
+CREATE FOREIGN DATA WRAPPER dummy;
+CREATE SERVER s0 FOREIGN DATA WRAPPER dummy;
+CREATE FOREIGN TABLE ft1 (
+ c1 integer OPTIONS ("param 1" 'val1') NOT NULL,
+ c2 text OPTIONS (param2 'val2', param3 'val3') CHECK (c2 <> ''),
+ c3 date,
+ CHECK (c3 BETWEEN '1994-01-01'::date AND '1994-01-31'::date)
+) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value');
+REVOKE EXECUTE ON FUNCTION test_pg_dump(int) FROM PUBLIC;
+GRANT EXECUTE ON FUNCTION test_pg_dump(int) TO regress_dump_test_role;
+GRANT SELECT (c1) ON test_pg_dump_t1 TO regress_dump_test_role;
+GRANT SELECT ON test_pg_dump_v1 TO regress_dump_test_role;
+GRANT USAGE ON FOREIGN DATA WRAPPER dummy TO regress_dump_test_role;
+GRANT USAGE ON FOREIGN SERVER s0 TO regress_dump_test_role;
+GRANT SELECT (c1) ON ft1 TO regress_dump_test_role;
+GRANT SELECT ON ft1 TO regress_dump_test_role;
+GRANT UPDATE ON test_pg_dump_mv1 TO regress_dump_test_role;
+GRANT USAGE ON SCHEMA test_pg_dump_s1 TO regress_dump_test_role;
+GRANT USAGE ON TYPE test_pg_dump_e1 TO regress_dump_test_role;
+ALTER EXTENSION test_pg_dump ADD ACCESS METHOD gist2;
+ALTER EXTENSION test_pg_dump ADD AGGREGATE newavg(int4);
+ALTER EXTENSION test_pg_dump ADD CAST (text AS casttesttype);
+ALTER EXTENSION test_pg_dump ADD FOREIGN DATA WRAPPER dummy;
+ALTER EXTENSION test_pg_dump ADD FOREIGN TABLE ft1;
+ALTER EXTENSION test_pg_dump ADD MATERIALIZED VIEW test_pg_dump_mv1;
+ALTER EXTENSION test_pg_dump ADD OPERATOR ==== (int, int);
+ALTER EXTENSION test_pg_dump ADD SCHEMA test_pg_dump_s1;
+ALTER EXTENSION test_pg_dump ADD SERVER s0;
+ALTER EXTENSION test_pg_dump ADD FUNCTION test_pg_dump(int);
+ALTER EXTENSION test_pg_dump ADD TABLE test_pg_dump_t1;
+ALTER EXTENSION test_pg_dump ADD TYPE test_pg_dump_e1;
+ALTER EXTENSION test_pg_dump ADD VIEW test_pg_dump_v1;
+REVOKE SELECT (c1) ON test_pg_dump_t1 FROM regress_dump_test_role;
+REVOKE SELECT ON test_pg_dump_v1 FROM regress_dump_test_role;
+REVOKE USAGE ON FOREIGN DATA WRAPPER dummy FROM regress_dump_test_role;
+ALTER EXTENSION test_pg_dump DROP ACCESS METHOD gist2;
+ALTER EXTENSION test_pg_dump DROP AGGREGATE newavg(int4);
+ALTER EXTENSION test_pg_dump DROP CAST (text AS casttesttype);
+ALTER EXTENSION test_pg_dump DROP FOREIGN DATA WRAPPER dummy;
+ALTER EXTENSION test_pg_dump DROP FOREIGN TABLE ft1;
+ALTER EXTENSION test_pg_dump DROP FUNCTION test_pg_dump(int);
+ALTER EXTENSION test_pg_dump DROP MATERIALIZED VIEW test_pg_dump_mv1;
+ALTER EXTENSION test_pg_dump DROP OPERATOR ==== (int, int);
+ALTER EXTENSION test_pg_dump DROP SCHEMA test_pg_dump_s1;
+ALTER EXTENSION test_pg_dump DROP SERVER s0;
+ALTER EXTENSION test_pg_dump DROP TABLE test_pg_dump_t1;
+ALTER EXTENSION test_pg_dump DROP TYPE test_pg_dump_e1;
+ALTER EXTENSION test_pg_dump DROP VIEW test_pg_dump_v1;
diff --git a/src/test/modules/test_pg_dump/sql/test_pg_dump.sql b/src/test/modules/test_pg_dump/sql/test_pg_dump.sql
new file mode 100644
index 0000000..a61a7c8
--- /dev/null
+++ b/src/test/modules/test_pg_dump/sql/test_pg_dump.sql
@@ -0,0 +1,108 @@
+CREATE ROLE regress_dump_test_role;
+CREATE EXTENSION test_pg_dump;
+
+ALTER EXTENSION test_pg_dump ADD DATABASE postgres; -- error
+
+CREATE TABLE test_pg_dump_t1 (c1 int, junk text);
+ALTER TABLE test_pg_dump_t1 DROP COLUMN junk; -- to exercise dropped-col cases
+CREATE VIEW test_pg_dump_v1 AS SELECT * FROM test_pg_dump_t1;
+CREATE MATERIALIZED VIEW test_pg_dump_mv1 AS SELECT * FROM test_pg_dump_t1;
+CREATE SCHEMA test_pg_dump_s1;
+CREATE TYPE test_pg_dump_e1 AS ENUM ('abc', 'def');
+
+CREATE AGGREGATE newavg (
+ sfunc = int4_avg_accum, basetype = int4, stype = _int8,
+ finalfunc = int8_avg,
+ initcond1 = '{0,0}'
+);
+
+CREATE FUNCTION test_pg_dump(int) RETURNS int AS $$
+BEGIN
+RETURN abs($1);
+END
+$$ LANGUAGE plpgsql IMMUTABLE;
+
+CREATE OPERATOR ==== (
+ LEFTARG = int,
+ RIGHTARG = int,
+ PROCEDURE = int4eq,
+ COMMUTATOR = ====
+);
+
+CREATE ACCESS METHOD gist2 TYPE INDEX HANDLER gisthandler;
+
+CREATE TYPE casttesttype;
+
+CREATE FUNCTION casttesttype_in(cstring)
+ RETURNS casttesttype
+ AS 'textin'
+ LANGUAGE internal STRICT IMMUTABLE;
+CREATE FUNCTION casttesttype_out(casttesttype)
+ RETURNS cstring
+ AS 'textout'
+ LANGUAGE internal STRICT IMMUTABLE;
+
+CREATE TYPE casttesttype (
+ internallength = variable,
+ input = casttesttype_in,
+ output = casttesttype_out,
+ alignment = int4
+);
+
+CREATE CAST (text AS casttesttype) WITHOUT FUNCTION;
+
+CREATE FOREIGN DATA WRAPPER dummy;
+
+CREATE SERVER s0 FOREIGN DATA WRAPPER dummy;
+
+CREATE FOREIGN TABLE ft1 (
+ c1 integer OPTIONS ("param 1" 'val1') NOT NULL,
+ c2 text OPTIONS (param2 'val2', param3 'val3') CHECK (c2 <> ''),
+ c3 date,
+ CHECK (c3 BETWEEN '1994-01-01'::date AND '1994-01-31'::date)
+) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value');
+
+REVOKE EXECUTE ON FUNCTION test_pg_dump(int) FROM PUBLIC;
+GRANT EXECUTE ON FUNCTION test_pg_dump(int) TO regress_dump_test_role;
+
+GRANT SELECT (c1) ON test_pg_dump_t1 TO regress_dump_test_role;
+GRANT SELECT ON test_pg_dump_v1 TO regress_dump_test_role;
+GRANT USAGE ON FOREIGN DATA WRAPPER dummy TO regress_dump_test_role;
+GRANT USAGE ON FOREIGN SERVER s0 TO regress_dump_test_role;
+GRANT SELECT (c1) ON ft1 TO regress_dump_test_role;
+GRANT SELECT ON ft1 TO regress_dump_test_role;
+GRANT UPDATE ON test_pg_dump_mv1 TO regress_dump_test_role;
+GRANT USAGE ON SCHEMA test_pg_dump_s1 TO regress_dump_test_role;
+GRANT USAGE ON TYPE test_pg_dump_e1 TO regress_dump_test_role;
+
+ALTER EXTENSION test_pg_dump ADD ACCESS METHOD gist2;
+ALTER EXTENSION test_pg_dump ADD AGGREGATE newavg(int4);
+ALTER EXTENSION test_pg_dump ADD CAST (text AS casttesttype);
+ALTER EXTENSION test_pg_dump ADD FOREIGN DATA WRAPPER dummy;
+ALTER EXTENSION test_pg_dump ADD FOREIGN TABLE ft1;
+ALTER EXTENSION test_pg_dump ADD MATERIALIZED VIEW test_pg_dump_mv1;
+ALTER EXTENSION test_pg_dump ADD OPERATOR ==== (int, int);
+ALTER EXTENSION test_pg_dump ADD SCHEMA test_pg_dump_s1;
+ALTER EXTENSION test_pg_dump ADD SERVER s0;
+ALTER EXTENSION test_pg_dump ADD FUNCTION test_pg_dump(int);
+ALTER EXTENSION test_pg_dump ADD TABLE test_pg_dump_t1;
+ALTER EXTENSION test_pg_dump ADD TYPE test_pg_dump_e1;
+ALTER EXTENSION test_pg_dump ADD VIEW test_pg_dump_v1;
+
+REVOKE SELECT (c1) ON test_pg_dump_t1 FROM regress_dump_test_role;
+REVOKE SELECT ON test_pg_dump_v1 FROM regress_dump_test_role;
+REVOKE USAGE ON FOREIGN DATA WRAPPER dummy FROM regress_dump_test_role;
+
+ALTER EXTENSION test_pg_dump DROP ACCESS METHOD gist2;
+ALTER EXTENSION test_pg_dump DROP AGGREGATE newavg(int4);
+ALTER EXTENSION test_pg_dump DROP CAST (text AS casttesttype);
+ALTER EXTENSION test_pg_dump DROP FOREIGN DATA WRAPPER dummy;
+ALTER EXTENSION test_pg_dump DROP FOREIGN TABLE ft1;
+ALTER EXTENSION test_pg_dump DROP FUNCTION test_pg_dump(int);
+ALTER EXTENSION test_pg_dump DROP MATERIALIZED VIEW test_pg_dump_mv1;
+ALTER EXTENSION test_pg_dump DROP OPERATOR ==== (int, int);
+ALTER EXTENSION test_pg_dump DROP SCHEMA test_pg_dump_s1;
+ALTER EXTENSION test_pg_dump DROP SERVER s0;
+ALTER EXTENSION test_pg_dump DROP TABLE test_pg_dump_t1;
+ALTER EXTENSION test_pg_dump DROP TYPE test_pg_dump_e1;
+ALTER EXTENSION test_pg_dump DROP VIEW test_pg_dump_v1;
diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl
new file mode 100644
index 0000000..501aff0
--- /dev/null
+++ b/src/test/modules/test_pg_dump/t/001_base.pl
@@ -0,0 +1,786 @@
+use strict;
+use warnings;
+
+use Config;
+use PostgresNode;
+use TestLib;
+use Test::More;
+
+my $tempdir = TestLib::tempdir;
+my $tempdir_short = TestLib::tempdir_short;
+
+###############################################################
+# This structure is based off of the src/bin/pg_dump/t test
+# suite.
+###############################################################
+# Definition of the pg_dump runs to make.
+#
+# Each of these runs are named and those names are used below
+# to define how each test should (or shouldn't) treat a result
+# from a given run.
+#
+# test_key indicates that a given run should simply use the same
+# set of like/unlike tests as another run, and which run that is.
+#
+# dump_cmd is the pg_dump command to run, which is an array of
+# the full command and arguments to run. Note that this is run
+# using $node->command_ok(), so the port does not need to be
+# specified and is pulled from $PGPORT, which is set by the
+# PostgresNode system.
+#
+# restore_cmd is the pg_restore command to run, if any. Note
+# that this should generally be used when the pg_dump goes to
+# a non-text file and that the restore can then be used to
+# generate a text file to run through the tests from the
+# non-text file generated by pg_dump.
+#
+# TODO: Have pg_restore actually restore to an independent
+# database and then pg_dump *that* database (or something along
+# those lines) to validate that part of the process.
+
+my %pgdump_runs = (
+ binary_upgrade => {
+ dump_cmd => [
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/binary_upgrade.sql", '--schema-only',
+ '--binary-upgrade', '--dbname=postgres',
+ ],
+ },
+ clean => {
+ dump_cmd => [
+ 'pg_dump', "--file=$tempdir/clean.sql",
+ '-c', '--no-sync',
+ '--dbname=postgres',
+ ],
+ },
+ clean_if_exists => {
+ dump_cmd => [
+ 'pg_dump',
+ '--no-sync',
+ "--file=$tempdir/clean_if_exists.sql",
+ '-c',
+ '--if-exists',
+ '--encoding=UTF8', # no-op, just tests that option is accepted
+ 'postgres',
+ ],
+ },
+ createdb => {
+ dump_cmd => [
+ 'pg_dump',
+ '--no-sync',
+ "--file=$tempdir/createdb.sql",
+ '-C',
+ '-R', # no-op, just for testing
+ 'postgres',
+ ],
+ },
+ data_only => {
+ dump_cmd => [
+ 'pg_dump',
+ '--no-sync',
+ "--file=$tempdir/data_only.sql",
+ '-a',
+ '-v', # no-op, just make sure it works
+ 'postgres',
+ ],
+ },
+ defaults => {
+ dump_cmd => [ 'pg_dump', '-f', "$tempdir/defaults.sql", 'postgres', ],
+ },
+ defaults_custom_format => {
+ test_key => 'defaults',
+ dump_cmd => [
+ 'pg_dump', '--no-sync', '-Fc', '-Z6',
+ "--file=$tempdir/defaults_custom_format.dump", 'postgres',
+ ],
+ restore_cmd => [
+ 'pg_restore',
+ "--file=$tempdir/defaults_custom_format.sql",
+ "$tempdir/defaults_custom_format.dump",
+ ],
+ },
+ defaults_dir_format => {
+ test_key => 'defaults',
+ dump_cmd => [
+ 'pg_dump', '--no-sync', '-Fd',
+ "--file=$tempdir/defaults_dir_format", 'postgres',
+ ],
+ restore_cmd => [
+ 'pg_restore',
+ "--file=$tempdir/defaults_dir_format.sql",
+ "$tempdir/defaults_dir_format",
+ ],
+ },
+ defaults_parallel => {
+ test_key => 'defaults',
+ dump_cmd => [
+ 'pg_dump', '--no-sync', '-Fd', '-j2',
+ "--file=$tempdir/defaults_parallel", 'postgres',
+ ],
+ restore_cmd => [
+ 'pg_restore',
+ "--file=$tempdir/defaults_parallel.sql",
+ "$tempdir/defaults_parallel",
+ ],
+ },
+ defaults_tar_format => {
+ test_key => 'defaults',
+ dump_cmd => [
+ 'pg_dump', '--no-sync', '-Ft',
+ "--file=$tempdir/defaults_tar_format.tar", 'postgres',
+ ],
+ restore_cmd => [
+ 'pg_restore',
+ "--file=$tempdir/defaults_tar_format.sql",
+ "$tempdir/defaults_tar_format.tar",
+ ],
+ },
+ exclude_table => {
+ dump_cmd => [
+ 'pg_dump',
+ '--exclude-table=regress_table_dumpable',
+ "--file=$tempdir/exclude_table.sql",
+ 'postgres',
+ ],
+ },
+ extension_schema => {
+ dump_cmd => [
+ 'pg_dump', '--schema=public',
+ "--file=$tempdir/extension_schema.sql", 'postgres',
+ ],
+ },
+ pg_dumpall_globals => {
+ dump_cmd => [
+ 'pg_dumpall', '--no-sync',
+ "--file=$tempdir/pg_dumpall_globals.sql", '-g',
+ ],
+ },
+ no_privs => {
+ dump_cmd => [
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/no_privs.sql", '-x',
+ 'postgres',
+ ],
+ },
+ no_owner => {
+ dump_cmd => [
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/no_owner.sql", '-O',
+ 'postgres',
+ ],
+ },
+ schema_only => {
+ dump_cmd => [
+ 'pg_dump', '--no-sync', "--file=$tempdir/schema_only.sql",
+ '-s', 'postgres',
+ ],
+ },
+ section_pre_data => {
+ dump_cmd => [
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/section_pre_data.sql", '--section=pre-data',
+ 'postgres',
+ ],
+ },
+ section_data => {
+ dump_cmd => [
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/section_data.sql", '--section=data',
+ 'postgres',
+ ],
+ },
+ section_post_data => {
+ dump_cmd => [
+ 'pg_dump', '--no-sync', "--file=$tempdir/section_post_data.sql",
+ '--section=post-data', 'postgres',
+ ],
+ },);
+
+###############################################################
+# Definition of the tests to run.
+#
+# Each test is defined using the log message that will be used.
+#
+# A regexp should be defined for each test which provides the
+# basis for the test. That regexp will be run against the output
+# file of each of the runs which the test is to be run against
+# and the success of the result will depend on if the regexp
+# result matches the expected 'like' or 'unlike' case.
+# The runs listed as 'like' will be checked if they match the
+# regexp and, if so, the test passes. All runs which are not
+# listed as 'like' will be checked to ensure they don't match
+# the regexp; if they do, the test will fail.
+#
+# The below hashes provide convenience sets of runs. Individual
+# runs can be excluded from a general hash by placing that run
+# into the 'unlike' section.
+#
+# There can then be a 'create_sql' and 'create_order' for a
+# given test. The 'create_sql' commands are collected up in
+# 'create_order' and then run against the database prior to any
+# of the pg_dump runs happening. This is what "seeds" the
+# system with objects to be dumped out.
+#
+# Building of this hash takes a bit of time as all of the regexps
+# included in it are compiled. This greatly improves performance
+# as the regexps are used for each run the test applies to.
+
+# Tests which are considered 'full' dumps by pg_dump, but there
+# are flags used to exclude specific items (ACLs, blobs, etc).
+my %full_runs = (
+ binary_upgrade => 1,
+ clean => 1,
+ clean_if_exists => 1,
+ createdb => 1,
+ defaults => 1,
+ exclude_table => 1,
+ no_privs => 1,
+ no_owner => 1,);
+
+my %tests = (
+ 'ALTER EXTENSION test_pg_dump' => {
+ create_order => 9,
+ create_sql =>
+ 'ALTER EXTENSION test_pg_dump ADD TABLE regress_pg_dump_table_added;',
+ regexp => qr/^
+ \QCREATE TABLE public.regress_pg_dump_table_added (\E
+ \n\s+\Qcol1 integer NOT NULL,\E
+ \n\s+\Qcol2 integer\E
+ \n\);\n/xm,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'CREATE EXTENSION test_pg_dump' => {
+ create_order => 2,
+ create_sql => 'CREATE EXTENSION test_pg_dump;',
+ regexp => qr/^
+ \QCREATE EXTENSION IF NOT EXISTS test_pg_dump WITH SCHEMA public;\E
+ \n/xm,
+ like => {
+ %full_runs,
+ schema_only => 1,
+ section_pre_data => 1,
+ },
+ unlike => { binary_upgrade => 1, },
+ },
+
+ 'CREATE ROLE regress_dump_test_role' => {
+ create_order => 1,
+ create_sql => 'CREATE ROLE regress_dump_test_role;',
+ regexp => qr/^CREATE ROLE regress_dump_test_role;\n/m,
+ like => { pg_dumpall_globals => 1, },
+ },
+
+ 'CREATE SEQUENCE regress_pg_dump_table_col1_seq' => {
+ regexp => qr/^
+ \QCREATE SEQUENCE public.regress_pg_dump_table_col1_seq\E
+ \n\s+\QAS integer\E
+ \n\s+\QSTART WITH 1\E
+ \n\s+\QINCREMENT BY 1\E
+ \n\s+\QNO MINVALUE\E
+ \n\s+\QNO MAXVALUE\E
+ \n\s+\QCACHE 1;\E
+ \n/xm,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'CREATE TABLE regress_pg_dump_table_added' => {
+ create_order => 7,
+ create_sql =>
+ 'CREATE TABLE regress_pg_dump_table_added (col1 int not null, col2 int);',
+ regexp => qr/^
+ \QCREATE TABLE public.regress_pg_dump_table_added (\E
+ \n\s+\Qcol1 integer NOT NULL,\E
+ \n\s+\Qcol2 integer\E
+ \n\);\n/xm,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'CREATE SEQUENCE regress_pg_dump_seq' => {
+ regexp => qr/^
+ \QCREATE SEQUENCE public.regress_pg_dump_seq\E
+ \n\s+\QSTART WITH 1\E
+ \n\s+\QINCREMENT BY 1\E
+ \n\s+\QNO MINVALUE\E
+ \n\s+\QNO MAXVALUE\E
+ \n\s+\QCACHE 1;\E
+ \n/xm,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'SETVAL SEQUENCE regress_seq_dumpable' => {
+ create_order => 6,
+ create_sql => qq{SELECT nextval('regress_seq_dumpable');},
+ regexp => qr/^
+ \QSELECT pg_catalog.setval('public.regress_seq_dumpable', 1, true);\E
+ \n/xm,
+ like => {
+ %full_runs,
+ data_only => 1,
+ section_data => 1,
+ extension_schema => 1,
+ },
+ },
+
+ 'CREATE TABLE regress_pg_dump_table' => {
+ regexp => qr/^
+ \QCREATE TABLE public.regress_pg_dump_table (\E
+ \n\s+\Qcol1 integer NOT NULL,\E
+ \n\s+\Qcol2 integer,\E
+ \n\s+\QCONSTRAINT regress_pg_dump_table_col2_check CHECK ((col2 > 0))\E
+ \n\);\n/xm,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'COPY public.regress_table_dumpable (col1)' => {
+ regexp => qr/^
+ \QCOPY public.regress_table_dumpable (col1) FROM stdin;\E
+ \n/xm,
+ like => {
+ %full_runs,
+ data_only => 1,
+ section_data => 1,
+ extension_schema => 1,
+ },
+ unlike => {
+ binary_upgrade => 1,
+ exclude_table => 1,
+ },
+ },
+
+ 'REVOKE ALL ON FUNCTION wgo_then_no_access' => {
+ create_order => 3,
+ create_sql => q{
+ DO $$BEGIN EXECUTE format(
+ 'REVOKE ALL ON FUNCTION wgo_then_no_access()
+ FROM pg_signal_backend, public, %I',
+ (SELECT usename
+ FROM pg_user JOIN pg_proc ON proowner = usesysid
+ WHERE proname = 'wgo_then_no_access')); END$$;},
+ regexp => qr/^
+ \QREVOKE ALL ON FUNCTION public.wgo_then_no_access() FROM PUBLIC;\E
+ \n\QREVOKE ALL ON FUNCTION public.wgo_then_no_access() FROM \E.*;
+ \n\QREVOKE ALL ON FUNCTION public.wgo_then_no_access() FROM pg_signal_backend;\E
+ /xm,
+ like => {
+ %full_runs,
+ schema_only => 1,
+ section_pre_data => 1,
+ },
+ unlike => { no_privs => 1, },
+ },
+
+ 'REVOKE GRANT OPTION FOR UPDATE ON SEQUENCE wgo_then_regular' => {
+ create_order => 3,
+ create_sql => 'REVOKE GRANT OPTION FOR UPDATE ON SEQUENCE
+ wgo_then_regular FROM pg_signal_backend;',
+ regexp => qr/^
+ \QREVOKE ALL ON SEQUENCE public.wgo_then_regular FROM pg_signal_backend;\E
+ \n\QGRANT SELECT,UPDATE ON SEQUENCE public.wgo_then_regular TO pg_signal_backend;\E
+ \n\QGRANT USAGE ON SEQUENCE public.wgo_then_regular TO pg_signal_backend WITH GRANT OPTION;\E
+ /xm,
+ like => {
+ %full_runs,
+ schema_only => 1,
+ section_pre_data => 1,
+ },
+ unlike => { no_privs => 1, },
+ },
+
+ 'CREATE ACCESS METHOD regress_test_am' => {
+ regexp => qr/^
+ \QCREATE ACCESS METHOD regress_test_am TYPE INDEX HANDLER bthandler;\E
+ \n/xm,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'COMMENT ON EXTENSION test_pg_dump' => {
+ regexp => qr/^
+ \QCOMMENT ON EXTENSION test_pg_dump \E
+ \QIS 'Test pg_dump with an extension';\E
+ \n/xm,
+ like => {
+ %full_runs,
+ schema_only => 1,
+ section_pre_data => 1,
+ },
+ },
+
+ 'GRANT SELECT regress_pg_dump_table_added pre-ALTER EXTENSION' => {
+ create_order => 8,
+ create_sql =>
+ 'GRANT SELECT ON regress_pg_dump_table_added TO regress_dump_test_role;',
+ regexp => qr/^
+ \QGRANT SELECT ON TABLE public.regress_pg_dump_table_added TO regress_dump_test_role;\E
+ \n/xm,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'REVOKE SELECT regress_pg_dump_table_added post-ALTER EXTENSION' => {
+ create_order => 10,
+ create_sql =>
+ 'REVOKE SELECT ON regress_pg_dump_table_added FROM regress_dump_test_role;',
+ regexp => qr/^
+ \QREVOKE SELECT ON TABLE public.regress_pg_dump_table_added FROM regress_dump_test_role;\E
+ \n/xm,
+ like => {
+ %full_runs,
+ schema_only => 1,
+ section_pre_data => 1,
+ },
+ unlike => { no_privs => 1, },
+ },
+
+ 'GRANT SELECT ON TABLE regress_pg_dump_table' => {
+ regexp => qr/^
+ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\E\n
+ \QGRANT SELECT ON TABLE public.regress_pg_dump_table TO regress_dump_test_role;\E\n
+ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E
+ \n/xms,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'GRANT SELECT(col1) ON regress_pg_dump_table' => {
+ regexp => qr/^
+ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\E\n
+ \QGRANT SELECT(col1) ON TABLE public.regress_pg_dump_table TO PUBLIC;\E\n
+ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E
+ \n/xms,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role'
+ => {
+ create_order => 4,
+ create_sql => 'GRANT SELECT(col2) ON regress_pg_dump_table
+ TO regress_dump_test_role;',
+ regexp => qr/^
+ \QGRANT SELECT(col2) ON TABLE public.regress_pg_dump_table TO regress_dump_test_role;\E
+ \n/xm,
+ like => {
+ %full_runs,
+ schema_only => 1,
+ section_pre_data => 1,
+ },
+ unlike => { no_privs => 1, },
+ },
+
+ 'GRANT USAGE ON regress_pg_dump_table_col1_seq TO regress_dump_test_role'
+ => {
+ create_order => 5,
+ create_sql => 'GRANT USAGE ON SEQUENCE regress_pg_dump_table_col1_seq
+ TO regress_dump_test_role;',
+ regexp => qr/^
+ \QGRANT USAGE ON SEQUENCE public.regress_pg_dump_table_col1_seq TO regress_dump_test_role;\E
+ \n/xm,
+ like => {
+ %full_runs,
+ schema_only => 1,
+ section_pre_data => 1,
+ },
+ unlike => { no_privs => 1, },
+ },
+
+ 'GRANT USAGE ON regress_pg_dump_seq TO regress_dump_test_role' => {
+ regexp => qr/^
+ \QGRANT USAGE ON SEQUENCE public.regress_pg_dump_seq TO regress_dump_test_role;\E
+ \n/xm,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'REVOKE SELECT(col1) ON regress_pg_dump_table' => {
+ create_order => 3,
+ create_sql => 'REVOKE SELECT(col1) ON regress_pg_dump_table
+ FROM PUBLIC;',
+ regexp => qr/^
+ \QREVOKE SELECT(col1) ON TABLE public.regress_pg_dump_table FROM PUBLIC;\E
+ \n/xm,
+ like => {
+ %full_runs,
+ schema_only => 1,
+ section_pre_data => 1,
+ },
+ unlike => { no_privs => 1, },
+ },
+
+ # Objects included in extension part of a schema created by this extension */
+ 'CREATE TABLE regress_pg_dump_schema.test_table' => {
+ regexp => qr/^
+ \QCREATE TABLE regress_pg_dump_schema.test_table (\E
+ \n\s+\Qcol1 integer,\E
+ \n\s+\Qcol2 integer,\E
+ \n\s+\QCONSTRAINT test_table_col2_check CHECK ((col2 > 0))\E
+ \n\);\n/xm,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'GRANT SELECT ON regress_pg_dump_schema.test_table' => {
+ regexp => qr/^
+ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\E\n
+ \QGRANT SELECT ON TABLE regress_pg_dump_schema.test_table TO regress_dump_test_role;\E\n
+ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E
+ \n/xms,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'CREATE SEQUENCE regress_pg_dump_schema.test_seq' => {
+ regexp => qr/^
+ \QCREATE SEQUENCE regress_pg_dump_schema.test_seq\E
+ \n\s+\QSTART WITH 1\E
+ \n\s+\QINCREMENT BY 1\E
+ \n\s+\QNO MINVALUE\E
+ \n\s+\QNO MAXVALUE\E
+ \n\s+\QCACHE 1;\E
+ \n/xm,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'GRANT USAGE ON regress_pg_dump_schema.test_seq' => {
+ regexp => qr/^
+ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\E\n
+ \QGRANT USAGE ON SEQUENCE regress_pg_dump_schema.test_seq TO regress_dump_test_role;\E\n
+ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E
+ \n/xms,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'CREATE TYPE regress_pg_dump_schema.test_type' => {
+ regexp => qr/^
+ \QCREATE TYPE regress_pg_dump_schema.test_type AS (\E
+ \n\s+\Qcol1 integer\E
+ \n\);\n/xm,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'GRANT USAGE ON regress_pg_dump_schema.test_type' => {
+ regexp => qr/^
+ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\E\n
+ \QGRANT ALL ON TYPE regress_pg_dump_schema.test_type TO regress_dump_test_role;\E\n
+ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E
+ \n/xms,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'CREATE FUNCTION regress_pg_dump_schema.test_func' => {
+ regexp => qr/^
+ \QCREATE FUNCTION regress_pg_dump_schema.test_func() RETURNS integer\E
+ \n\s+\QLANGUAGE sql\E
+ \n/xm,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'GRANT ALL ON regress_pg_dump_schema.test_func' => {
+ regexp => qr/^
+ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\E\n
+ \QGRANT ALL ON FUNCTION regress_pg_dump_schema.test_func() TO regress_dump_test_role;\E\n
+ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E
+ \n/xms,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'CREATE AGGREGATE regress_pg_dump_schema.test_agg' => {
+ regexp => qr/^
+ \QCREATE AGGREGATE regress_pg_dump_schema.test_agg(smallint) (\E
+ \n\s+\QSFUNC = int2_sum,\E
+ \n\s+\QSTYPE = bigint\E
+ \n\);\n/xm,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'GRANT ALL ON regress_pg_dump_schema.test_agg' => {
+ regexp => qr/^
+ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\E\n
+ \QGRANT ALL ON FUNCTION regress_pg_dump_schema.test_agg(smallint) TO regress_dump_test_role;\E\n
+ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E
+ \n/xms,
+ like => { binary_upgrade => 1, },
+ },
+
+ 'ALTER INDEX pkey DEPENDS ON extension' => {
+ create_order => 11,
+ create_sql =>
+ 'CREATE TABLE regress_pg_dump_schema.extdependtab (col1 integer primary key, col2 int);
+ CREATE INDEX ON regress_pg_dump_schema.extdependtab (col2);
+ ALTER INDEX regress_pg_dump_schema.extdependtab_col2_idx DEPENDS ON EXTENSION test_pg_dump;
+ ALTER INDEX regress_pg_dump_schema.extdependtab_pkey DEPENDS ON EXTENSION test_pg_dump;',
+ regexp => qr/^
+ \QALTER INDEX regress_pg_dump_schema.extdependtab_pkey DEPENDS ON EXTENSION test_pg_dump;\E\n
+ /xms,
+ like => {%pgdump_runs},
+ unlike => {
+ data_only => 1,
+ extension_schema => 1,
+ pg_dumpall_globals => 1,
+ section_data => 1,
+ section_pre_data => 1,
+ },
+ },
+
+ 'ALTER INDEX idx DEPENDS ON extension' => {
+ regexp => qr/^
+ \QALTER INDEX regress_pg_dump_schema.extdependtab_col2_idx DEPENDS ON EXTENSION test_pg_dump;\E\n
+ /xms,
+ like => {%pgdump_runs},
+ unlike => {
+ data_only => 1,
+ extension_schema => 1,
+ pg_dumpall_globals => 1,
+ section_data => 1,
+ section_pre_data => 1,
+ },
+ },
+
+ # Objects not included in extension, part of schema created by extension
+ 'CREATE TABLE regress_pg_dump_schema.external_tab' => {
+ create_order => 4,
+ create_sql => 'CREATE TABLE regress_pg_dump_schema.external_tab
+ (col1 int);',
+ regexp => qr/^
+ \QCREATE TABLE regress_pg_dump_schema.external_tab (\E
+ \n\s+\Qcol1 integer\E
+ \n\);\n/xm,
+ like => {
+ %full_runs,
+ schema_only => 1,
+ section_pre_data => 1,
+ },
+ },);
+
+#########################################
+# Create a PG instance to test actually dumping from
+
+my $node = get_new_node('main');
+$node->init;
+$node->start;
+
+my $port = $node->port;
+
+my $num_tests = 0;
+
+foreach my $run (sort keys %pgdump_runs)
+{
+ my $test_key = $run;
+
+ # Each run of pg_dump is a test itself
+ $num_tests++;
+
+ # If there is a restore cmd, that's another test
+ if ($pgdump_runs{$run}->{restore_cmd})
+ {
+ $num_tests++;
+ }
+
+ if ($pgdump_runs{$run}->{test_key})
+ {
+ $test_key = $pgdump_runs{$run}->{test_key};
+ }
+
+ # Then count all the tests run against each run
+ foreach my $test (sort keys %tests)
+ {
+ # If there is a like entry, but no unlike entry, then we will test the like case
+ if ($tests{$test}->{like}->{$test_key}
+ && !defined($tests{$test}->{unlike}->{$test_key}))
+ {
+ $num_tests++;
+ }
+ else
+ {
+ # We will test everything that isn't a 'like'
+ $num_tests++;
+ }
+ }
+}
+plan tests => $num_tests;
+
+#########################################
+# Set up schemas, tables, etc, to be dumped.
+
+# Build up the create statements
+my $create_sql = '';
+
+foreach my $test (
+ sort {
+ if ($tests{$a}->{create_order} and $tests{$b}->{create_order})
+ {
+ $tests{$a}->{create_order} <=> $tests{$b}->{create_order};
+ }
+ elsif ($tests{$a}->{create_order})
+ {
+ -1;
+ }
+ elsif ($tests{$b}->{create_order})
+ {
+ 1;
+ }
+ else
+ {
+ 0;
+ }
+ } keys %tests)
+{
+ if ($tests{$test}->{create_sql})
+ {
+ $create_sql .= $tests{$test}->{create_sql};
+ }
+}
+
+# Send the combined set of commands to psql
+$node->safe_psql('postgres', $create_sql);
+
+#########################################
+# Run all runs
+
+foreach my $run (sort keys %pgdump_runs)
+{
+
+ my $test_key = $run;
+
+ $node->command_ok(\@{ $pgdump_runs{$run}->{dump_cmd} },
+ "$run: pg_dump runs");
+
+ if ($pgdump_runs{$run}->{restore_cmd})
+ {
+ $node->command_ok(\@{ $pgdump_runs{$run}->{restore_cmd} },
+ "$run: pg_restore runs");
+ }
+
+ if ($pgdump_runs{$run}->{test_key})
+ {
+ $test_key = $pgdump_runs{$run}->{test_key};
+ }
+
+ my $output_file = slurp_file("$tempdir/${run}.sql");
+
+ #########################################
+ # Run all tests where this run is included
+ # as either a 'like' or 'unlike' test.
+
+ foreach my $test (sort keys %tests)
+ {
+ # Run the test listed as a like, unless it is specifically noted
+ # as an unlike (generally due to an explicit exclusion or similar).
+ if ($tests{$test}->{like}->{$test_key}
+ && !defined($tests{$test}->{unlike}->{$test_key}))
+ {
+ if (!ok($output_file =~ $tests{$test}->{regexp},
+ "$run: should dump $test"))
+ {
+ diag("Review $run results in $tempdir");
+ }
+ }
+ else
+ {
+ if (!ok($output_file !~ $tests{$test}->{regexp},
+ "$run: should not dump $test"))
+ {
+ diag("Review $run results in $tempdir");
+ }
+ }
+ }
+}
+
+#########################################
+# Stop the database instance, which will be removed at the end of the tests.
+
+$node->stop('fast');
diff --git a/src/test/modules/test_pg_dump/test_pg_dump--1.0.sql b/src/test/modules/test_pg_dump/test_pg_dump--1.0.sql
new file mode 100644
index 0000000..110f7ee
--- /dev/null
+++ b/src/test/modules/test_pg_dump/test_pg_dump--1.0.sql
@@ -0,0 +1,62 @@
+/* src/test/modules/test_pg_dump/test_pg_dump--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_pg_dump" to load this file. \quit
+
+CREATE TABLE regress_pg_dump_table (
+ col1 serial,
+ col2 int check (col2 > 0)
+);
+
+CREATE SEQUENCE regress_pg_dump_seq;
+
+CREATE SEQUENCE regress_seq_dumpable;
+SELECT pg_catalog.pg_extension_config_dump('regress_seq_dumpable', '');
+
+CREATE TABLE regress_table_dumpable (
+ col1 int check (col1 > 0)
+);
+SELECT pg_catalog.pg_extension_config_dump('regress_table_dumpable', '');
+
+CREATE SCHEMA regress_pg_dump_schema;
+
+GRANT USAGE ON regress_pg_dump_seq TO regress_dump_test_role;
+
+GRANT SELECT ON regress_pg_dump_table TO regress_dump_test_role;
+GRANT SELECT(col1) ON regress_pg_dump_table TO public;
+
+GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role;
+REVOKE SELECT(col2) ON regress_pg_dump_table FROM regress_dump_test_role;
+
+CREATE FUNCTION wgo_then_no_access() RETURNS int LANGUAGE SQL AS 'SELECT 1';
+GRANT ALL ON FUNCTION wgo_then_no_access()
+ TO pg_signal_backend WITH GRANT OPTION;
+
+CREATE SEQUENCE wgo_then_regular;
+GRANT ALL ON SEQUENCE wgo_then_regular TO pg_signal_backend WITH GRANT OPTION;
+REVOKE GRANT OPTION FOR SELECT ON SEQUENCE wgo_then_regular
+ FROM pg_signal_backend;
+
+CREATE ACCESS METHOD regress_test_am TYPE INDEX HANDLER bthandler;
+
+-- Create a set of objects that are part of the schema created by
+-- this extension.
+CREATE TABLE regress_pg_dump_schema.test_table (
+ col1 int,
+ col2 int check (col2 > 0)
+);
+GRANT SELECT ON regress_pg_dump_schema.test_table TO regress_dump_test_role;
+
+CREATE SEQUENCE regress_pg_dump_schema.test_seq;
+GRANT USAGE ON regress_pg_dump_schema.test_seq TO regress_dump_test_role;
+
+CREATE TYPE regress_pg_dump_schema.test_type AS (col1 int);
+GRANT USAGE ON TYPE regress_pg_dump_schema.test_type TO regress_dump_test_role;
+
+CREATE FUNCTION regress_pg_dump_schema.test_func () RETURNS int
+AS 'SELECT 1;' LANGUAGE SQL;
+GRANT EXECUTE ON FUNCTION regress_pg_dump_schema.test_func() TO regress_dump_test_role;
+
+CREATE AGGREGATE regress_pg_dump_schema.test_agg(int2)
+(SFUNC = int2_sum, STYPE = int8);
+GRANT EXECUTE ON FUNCTION regress_pg_dump_schema.test_agg(int2) TO regress_dump_test_role;
diff --git a/src/test/modules/test_pg_dump/test_pg_dump.control b/src/test/modules/test_pg_dump/test_pg_dump.control
new file mode 100644
index 0000000..fe3450d
--- /dev/null
+++ b/src/test/modules/test_pg_dump/test_pg_dump.control
@@ -0,0 +1,3 @@
+comment = 'Test pg_dump with an extension'
+default_version = '1.0'
+relocatable = true
diff --git a/src/test/modules/test_predtest/.gitignore b/src/test/modules/test_predtest/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/test_predtest/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_predtest/Makefile b/src/test/modules/test_predtest/Makefile
new file mode 100644
index 0000000..a235e2a
--- /dev/null
+++ b/src/test/modules/test_predtest/Makefile
@@ -0,0 +1,23 @@
+# src/test/modules/test_predtest/Makefile
+
+MODULE_big = test_predtest
+OBJS = \
+ $(WIN32RES) \
+ test_predtest.o
+PGFILEDESC = "test_predtest - test code for optimizer/util/predtest.c"
+
+EXTENSION = test_predtest
+DATA = test_predtest--1.0.sql
+
+REGRESS = test_predtest
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_predtest
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_predtest/README b/src/test/modules/test_predtest/README
new file mode 100644
index 0000000..2c9bec0
--- /dev/null
+++ b/src/test/modules/test_predtest/README
@@ -0,0 +1,28 @@
+test_predtest is a module for checking the correctness of the optimizer's
+predicate-proof logic, in src/backend/optimizer/util/predtest.c.
+
+The module provides a function that allows direct application of
+predtest.c's exposed functions, predicate_implied_by() and
+predicate_refuted_by(), to arbitrary boolean expressions, with direct
+inspection of the results. This could be done indirectly by checking
+planner results, but it can be difficult to construct end-to-end test
+cases that prove that the expected results were obtained.
+
+In general, the use of this function is like
+ select * from test_predtest('query string')
+where the query string must be a SELECT returning two boolean
+columns, for example
+
+ select * from test_predtest($$
+ select x, not x
+ from (values (false), (true), (null)) as v(x)
+ $$);
+
+The function parses and plans the given query, and then applies the
+predtest.c code to the two boolean expressions in the SELECT list, to see
+if the first expression can be proven or refuted by the second. It also
+executes the query, and checks the resulting rows to see whether any
+claimed implication or refutation relationship actually holds. If the
+query is designed to exercise the expressions on a full set of possible
+input values, as in the example above, then this provides a mechanical
+cross-check as to whether the proof code has given a correct answer.
diff --git a/src/test/modules/test_predtest/expected/test_predtest.out b/src/test/modules/test_predtest/expected/test_predtest.out
new file mode 100644
index 0000000..6d21bcd
--- /dev/null
+++ b/src/test/modules/test_predtest/expected/test_predtest.out
@@ -0,0 +1,1096 @@
+CREATE EXTENSION test_predtest;
+-- Make output more legible
+\pset expanded on
+-- Test data
+-- all combinations of four boolean values
+create table booleans as
+select
+ case i%3 when 0 then true when 1 then false else null end as x,
+ case (i/3)%3 when 0 then true when 1 then false else null end as y,
+ case (i/9)%3 when 0 then true when 1 then false else null end as z,
+ case (i/27)%3 when 0 then true when 1 then false else null end as w
+from generate_series(0, 3*3*3*3-1) i;
+-- all combinations of two integers 0..9, plus null
+create table integers as
+select
+ case i%11 when 10 then null else i%11 end as x,
+ case (i/11)%11 when 10 then null else (i/11)%11 end as y
+from generate_series(0, 11*11-1) i;
+-- and a simple strict function that's opaque to the optimizer
+create function strictf(bool, bool) returns bool
+language plpgsql as $$begin return $1 and not $2; end$$ strict;
+-- a simple function to make arrays opaque to the optimizer
+create function opaque_array(int[]) returns int[]
+language plpgsql as $$begin return $1; end$$ strict;
+-- Basic proof rules for single boolean variables
+select * from test_predtest($$
+select x, x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x, not x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select not x, x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select not x, not x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x is not null, x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x is not null, x is null
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x is null, x is not null
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x is not true, x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x, x is not true
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | f
+s_r_holds | f
+w_r_holds | t
+
+select * from test_predtest($$
+select x is false, x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x, x is false
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x is unknown, x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x, x is unknown
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | t
+s_r_holds | f
+w_r_holds | t
+
+-- Assorted not-so-trivial refutation rules
+select * from test_predtest($$
+select x is null, x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x, x is null
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | t
+s_r_holds | f
+w_r_holds | t
+
+select * from test_predtest($$
+select strictf(x,y), x is null
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | t
+s_r_holds | f
+w_r_holds | t
+
+select * from test_predtest($$
+select (x is not null) is not true, x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select strictf(x,y), (x is not null) is false
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | t
+s_r_holds | f
+w_r_holds | t
+
+select * from test_predtest($$
+select x > y, (y < x) is false
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+-- Tests involving AND/OR constructs
+select * from test_predtest($$
+select x, x and y
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select not x, x and y
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x, not x and y
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x or y, x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x and y, x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x and y, not x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x and y, y and x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select not y, y and x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x or y, y or x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x or y or z, x or z
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x and z, x and y and z
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select z or w, x or y
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select z and w, x or y
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x, (x and y) or (x and z)
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select (x and y) or z, y and x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select (not x or not y) and z, y and x
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select y or x, (x or y) and z
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select not x and not y, (x or y) and z
+from booleans
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+-- Tests using btree operator knowledge
+select * from test_predtest($$
+select x <= y, x < y
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x <= y, x > y
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x <= y, y >= x
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x <= y, y > x and y < x+2
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x <= 5, x <= 7
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x <= 5, x > 7
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x <= 5, 7 > x
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select 5 >= x, 7 > x
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select 5 >= x, x > 7
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select 5 = x, x = 7
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x is not null, x > 7
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x is not null, int4lt(x,8)
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x is null, x > 7
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x is null, int4lt(x,8)
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x is not null, x < 'foo'
+from (values
+ ('aaa'::varchar), ('zzz'::varchar), (null)) as v(x)
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+-- Cases using ScalarArrayOpExpr
+select * from test_predtest($$
+select x <= 5, x in (1,3,5)
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x <= 5, x in (1,3,5,7)
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x <= 5, x in (1,3,5,null)
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x in (null,1,3,5,7), x in (1,3,5)
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x <= 5, x < all(array[1,3,5])
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | t
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x <= y, x = any(array[1,3,y])
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+-- In these tests, we want to prevent predtest.c from breaking down the
+-- ScalarArrayOpExpr into an AND/OR tree, so as to exercise the logic
+-- that handles ScalarArrayOpExpr directly. We use opaque_array() if
+-- possible, otherwise an array longer than MAX_SAOP_ARRAY_SIZE.
+-- ScalarArrayOpExpr implies scalar IS NOT NULL
+select * from test_predtest($$
+select x is not null, x = any(opaque_array(array[1]))
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+-- but for ALL, we have to be able to prove the array nonempty
+select * from test_predtest($$
+select x is not null, x <> all(opaque_array(array[1]))
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x is not null, x <> all(array[
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,
+ 29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+ 54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,
+ 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101
+])
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+select * from test_predtest($$
+select x is not null, x <> all(array[
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,
+ 29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+ 54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,
+ 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,y
+])
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+-- check empty-array cases
+select * from test_predtest($$
+select x is not null, x = any(opaque_array(array[]::int[]))
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | t
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | t
+w_i_holds | t
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x is not null, x <> all(opaque_array(array[]::int[]))
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+-- same thing under a strict function doesn't prove it
+select * from test_predtest($$
+select x is not null, strictf(true, x = any(opaque_array(array[]::int[])))
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+-- ScalarArrayOpExpr refutes scalar IS NULL
+select * from test_predtest($$
+select x is null, x = any(opaque_array(array[1]))
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+-- but for ALL, we have to be able to prove the array nonempty
+select * from test_predtest($$
+select x is null, x <> all(opaque_array(array[1]))
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x is null, x <> all(array[
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,
+ 29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+ 54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,
+ 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101
+])
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | f
+s_r_holds | t
+w_r_holds | t
+
+-- check empty-array cases
+select * from test_predtest($$
+select x is null, x = any(opaque_array(array[]::int[]))
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | t
+weak_refuted_by | t
+s_i_holds | t
+w_i_holds | t
+s_r_holds | t
+w_r_holds | t
+
+select * from test_predtest($$
+select x is null, x <> all(opaque_array(array[]::int[]))
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+-- same thing under a strict function doesn't prove it
+select * from test_predtest($$
+select x is null, strictf(true, x = any(opaque_array(array[]::int[])))
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | f
+s_r_holds | f
+w_r_holds | f
+
+-- Also, nullness of the scalar weakly refutes a SAOP
+select * from test_predtest($$
+select x = any(opaque_array(array[1])), x is null
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | t
+s_i_holds | f
+w_i_holds | t
+s_r_holds | f
+w_r_holds | t
+
+-- as does nullness of the array
+select * from test_predtest($$
+select x = any(opaque_array(array[y])), array[y] is null
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | t
+s_i_holds | t
+w_i_holds | t
+s_r_holds | t
+w_r_holds | t
+
+-- ... unless we need to prove array empty
+select * from test_predtest($$
+select x = all(opaque_array(array[1])), x is null
+from integers
+$$);
+-[ RECORD 1 ]-----+--
+strong_implied_by | f
+weak_implied_by | f
+strong_refuted_by | f
+weak_refuted_by | f
+s_i_holds | f
+w_i_holds | t
+s_r_holds | f
+w_r_holds | t
+
diff --git a/src/test/modules/test_predtest/sql/test_predtest.sql b/src/test/modules/test_predtest/sql/test_predtest.sql
new file mode 100644
index 0000000..072eb5b
--- /dev/null
+++ b/src/test/modules/test_predtest/sql/test_predtest.sql
@@ -0,0 +1,442 @@
+CREATE EXTENSION test_predtest;
+
+-- Make output more legible
+\pset expanded on
+
+-- Test data
+
+-- all combinations of four boolean values
+create table booleans as
+select
+ case i%3 when 0 then true when 1 then false else null end as x,
+ case (i/3)%3 when 0 then true when 1 then false else null end as y,
+ case (i/9)%3 when 0 then true when 1 then false else null end as z,
+ case (i/27)%3 when 0 then true when 1 then false else null end as w
+from generate_series(0, 3*3*3*3-1) i;
+
+-- all combinations of two integers 0..9, plus null
+create table integers as
+select
+ case i%11 when 10 then null else i%11 end as x,
+ case (i/11)%11 when 10 then null else (i/11)%11 end as y
+from generate_series(0, 11*11-1) i;
+
+-- and a simple strict function that's opaque to the optimizer
+create function strictf(bool, bool) returns bool
+language plpgsql as $$begin return $1 and not $2; end$$ strict;
+
+-- a simple function to make arrays opaque to the optimizer
+create function opaque_array(int[]) returns int[]
+language plpgsql as $$begin return $1; end$$ strict;
+
+-- Basic proof rules for single boolean variables
+
+select * from test_predtest($$
+select x, x
+from booleans
+$$);
+
+select * from test_predtest($$
+select x, not x
+from booleans
+$$);
+
+select * from test_predtest($$
+select not x, x
+from booleans
+$$);
+
+select * from test_predtest($$
+select not x, not x
+from booleans
+$$);
+
+select * from test_predtest($$
+select x is not null, x
+from booleans
+$$);
+
+select * from test_predtest($$
+select x is not null, x is null
+from integers
+$$);
+
+select * from test_predtest($$
+select x is null, x is not null
+from integers
+$$);
+
+select * from test_predtest($$
+select x is not true, x
+from booleans
+$$);
+
+select * from test_predtest($$
+select x, x is not true
+from booleans
+$$);
+
+select * from test_predtest($$
+select x is false, x
+from booleans
+$$);
+
+select * from test_predtest($$
+select x, x is false
+from booleans
+$$);
+
+select * from test_predtest($$
+select x is unknown, x
+from booleans
+$$);
+
+select * from test_predtest($$
+select x, x is unknown
+from booleans
+$$);
+
+-- Assorted not-so-trivial refutation rules
+
+select * from test_predtest($$
+select x is null, x
+from booleans
+$$);
+
+select * from test_predtest($$
+select x, x is null
+from booleans
+$$);
+
+select * from test_predtest($$
+select strictf(x,y), x is null
+from booleans
+$$);
+
+select * from test_predtest($$
+select (x is not null) is not true, x
+from booleans
+$$);
+
+select * from test_predtest($$
+select strictf(x,y), (x is not null) is false
+from booleans
+$$);
+
+select * from test_predtest($$
+select x > y, (y < x) is false
+from integers
+$$);
+
+-- Tests involving AND/OR constructs
+
+select * from test_predtest($$
+select x, x and y
+from booleans
+$$);
+
+select * from test_predtest($$
+select not x, x and y
+from booleans
+$$);
+
+select * from test_predtest($$
+select x, not x and y
+from booleans
+$$);
+
+select * from test_predtest($$
+select x or y, x
+from booleans
+$$);
+
+select * from test_predtest($$
+select x and y, x
+from booleans
+$$);
+
+select * from test_predtest($$
+select x and y, not x
+from booleans
+$$);
+
+select * from test_predtest($$
+select x and y, y and x
+from booleans
+$$);
+
+select * from test_predtest($$
+select not y, y and x
+from booleans
+$$);
+
+select * from test_predtest($$
+select x or y, y or x
+from booleans
+$$);
+
+select * from test_predtest($$
+select x or y or z, x or z
+from booleans
+$$);
+
+select * from test_predtest($$
+select x and z, x and y and z
+from booleans
+$$);
+
+select * from test_predtest($$
+select z or w, x or y
+from booleans
+$$);
+
+select * from test_predtest($$
+select z and w, x or y
+from booleans
+$$);
+
+select * from test_predtest($$
+select x, (x and y) or (x and z)
+from booleans
+$$);
+
+select * from test_predtest($$
+select (x and y) or z, y and x
+from booleans
+$$);
+
+select * from test_predtest($$
+select (not x or not y) and z, y and x
+from booleans
+$$);
+
+select * from test_predtest($$
+select y or x, (x or y) and z
+from booleans
+$$);
+
+select * from test_predtest($$
+select not x and not y, (x or y) and z
+from booleans
+$$);
+
+-- Tests using btree operator knowledge
+
+select * from test_predtest($$
+select x <= y, x < y
+from integers
+$$);
+
+select * from test_predtest($$
+select x <= y, x > y
+from integers
+$$);
+
+select * from test_predtest($$
+select x <= y, y >= x
+from integers
+$$);
+
+select * from test_predtest($$
+select x <= y, y > x and y < x+2
+from integers
+$$);
+
+select * from test_predtest($$
+select x <= 5, x <= 7
+from integers
+$$);
+
+select * from test_predtest($$
+select x <= 5, x > 7
+from integers
+$$);
+
+select * from test_predtest($$
+select x <= 5, 7 > x
+from integers
+$$);
+
+select * from test_predtest($$
+select 5 >= x, 7 > x
+from integers
+$$);
+
+select * from test_predtest($$
+select 5 >= x, x > 7
+from integers
+$$);
+
+select * from test_predtest($$
+select 5 = x, x = 7
+from integers
+$$);
+
+select * from test_predtest($$
+select x is not null, x > 7
+from integers
+$$);
+
+select * from test_predtest($$
+select x is not null, int4lt(x,8)
+from integers
+$$);
+
+select * from test_predtest($$
+select x is null, x > 7
+from integers
+$$);
+
+select * from test_predtest($$
+select x is null, int4lt(x,8)
+from integers
+$$);
+
+select * from test_predtest($$
+select x is not null, x < 'foo'
+from (values
+ ('aaa'::varchar), ('zzz'::varchar), (null)) as v(x)
+$$);
+
+-- Cases using ScalarArrayOpExpr
+
+select * from test_predtest($$
+select x <= 5, x in (1,3,5)
+from integers
+$$);
+
+select * from test_predtest($$
+select x <= 5, x in (1,3,5,7)
+from integers
+$$);
+
+select * from test_predtest($$
+select x <= 5, x in (1,3,5,null)
+from integers
+$$);
+
+select * from test_predtest($$
+select x in (null,1,3,5,7), x in (1,3,5)
+from integers
+$$);
+
+select * from test_predtest($$
+select x <= 5, x < all(array[1,3,5])
+from integers
+$$);
+
+select * from test_predtest($$
+select x <= y, x = any(array[1,3,y])
+from integers
+$$);
+
+-- In these tests, we want to prevent predtest.c from breaking down the
+-- ScalarArrayOpExpr into an AND/OR tree, so as to exercise the logic
+-- that handles ScalarArrayOpExpr directly. We use opaque_array() if
+-- possible, otherwise an array longer than MAX_SAOP_ARRAY_SIZE.
+
+-- ScalarArrayOpExpr implies scalar IS NOT NULL
+select * from test_predtest($$
+select x is not null, x = any(opaque_array(array[1]))
+from integers
+$$);
+
+-- but for ALL, we have to be able to prove the array nonempty
+select * from test_predtest($$
+select x is not null, x <> all(opaque_array(array[1]))
+from integers
+$$);
+
+select * from test_predtest($$
+select x is not null, x <> all(array[
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,
+ 29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+ 54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,
+ 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101
+])
+from integers
+$$);
+
+select * from test_predtest($$
+select x is not null, x <> all(array[
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,
+ 29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+ 54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,
+ 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,y
+])
+from integers
+$$);
+
+-- check empty-array cases
+select * from test_predtest($$
+select x is not null, x = any(opaque_array(array[]::int[]))
+from integers
+$$);
+
+select * from test_predtest($$
+select x is not null, x <> all(opaque_array(array[]::int[]))
+from integers
+$$);
+
+-- same thing under a strict function doesn't prove it
+select * from test_predtest($$
+select x is not null, strictf(true, x = any(opaque_array(array[]::int[])))
+from integers
+$$);
+
+-- ScalarArrayOpExpr refutes scalar IS NULL
+select * from test_predtest($$
+select x is null, x = any(opaque_array(array[1]))
+from integers
+$$);
+
+-- but for ALL, we have to be able to prove the array nonempty
+select * from test_predtest($$
+select x is null, x <> all(opaque_array(array[1]))
+from integers
+$$);
+
+select * from test_predtest($$
+select x is null, x <> all(array[
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,
+ 29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,
+ 54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,
+ 79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101
+])
+from integers
+$$);
+
+-- check empty-array cases
+select * from test_predtest($$
+select x is null, x = any(opaque_array(array[]::int[]))
+from integers
+$$);
+
+select * from test_predtest($$
+select x is null, x <> all(opaque_array(array[]::int[]))
+from integers
+$$);
+
+-- same thing under a strict function doesn't prove it
+select * from test_predtest($$
+select x is null, strictf(true, x = any(opaque_array(array[]::int[])))
+from integers
+$$);
+
+-- Also, nullness of the scalar weakly refutes a SAOP
+select * from test_predtest($$
+select x = any(opaque_array(array[1])), x is null
+from integers
+$$);
+
+-- as does nullness of the array
+select * from test_predtest($$
+select x = any(opaque_array(array[y])), array[y] is null
+from integers
+$$);
+
+-- ... unless we need to prove array empty
+select * from test_predtest($$
+select x = all(opaque_array(array[1])), x is null
+from integers
+$$);
diff --git a/src/test/modules/test_predtest/test_predtest--1.0.sql b/src/test/modules/test_predtest/test_predtest--1.0.sql
new file mode 100644
index 0000000..11e1444
--- /dev/null
+++ b/src/test/modules/test_predtest/test_predtest--1.0.sql
@@ -0,0 +1,16 @@
+/* src/test/modules/test_predtest/test_predtest--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_predtest" to load this file. \quit
+
+CREATE FUNCTION test_predtest(query text,
+ OUT strong_implied_by bool,
+ OUT weak_implied_by bool,
+ OUT strong_refuted_by bool,
+ OUT weak_refuted_by bool,
+ OUT s_i_holds bool,
+ OUT w_i_holds bool,
+ OUT s_r_holds bool,
+ OUT w_r_holds bool)
+STRICT
+AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/src/test/modules/test_predtest/test_predtest.c b/src/test/modules/test_predtest/test_predtest.c
new file mode 100644
index 0000000..7cbb1bf
--- /dev/null
+++ b/src/test/modules/test_predtest/test_predtest.c
@@ -0,0 +1,218 @@
+/*--------------------------------------------------------------------------
+ *
+ * test_predtest.c
+ * Test correctness of optimizer's predicate proof logic.
+ *
+ * Copyright (c) 2018-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_predtest/test_predtest.c
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/htup_details.h"
+#include "catalog/pg_type.h"
+#include "executor/spi.h"
+#include "funcapi.h"
+#include "nodes/makefuncs.h"
+#include "optimizer/optimizer.h"
+#include "utils/builtins.h"
+
+PG_MODULE_MAGIC;
+
+/*
+ * test_predtest(query text) returns record
+ */
+PG_FUNCTION_INFO_V1(test_predtest);
+
+Datum
+test_predtest(PG_FUNCTION_ARGS)
+{
+ text *txt = PG_GETARG_TEXT_PP(0);
+ char *query_string = text_to_cstring(txt);
+ SPIPlanPtr spiplan;
+ int spirc;
+ TupleDesc tupdesc;
+ bool s_i_holds,
+ w_i_holds,
+ s_r_holds,
+ w_r_holds;
+ CachedPlan *cplan;
+ PlannedStmt *stmt;
+ Plan *plan;
+ Expr *clause1;
+ Expr *clause2;
+ bool strong_implied_by,
+ weak_implied_by,
+ strong_refuted_by,
+ weak_refuted_by;
+ Datum values[8];
+ bool nulls[8];
+ int i;
+
+ /* We use SPI to parse, plan, and execute the test query */
+ if (SPI_connect() != SPI_OK_CONNECT)
+ elog(ERROR, "SPI_connect failed");
+
+ /*
+ * First, plan and execute the query, and inspect the results. To the
+ * extent that the query fully exercises the two expressions, this
+ * provides an experimental indication of whether implication or
+ * refutation holds.
+ */
+ spiplan = SPI_prepare(query_string, 0, NULL);
+ if (spiplan == NULL)
+ elog(ERROR, "SPI_prepare failed for \"%s\"", query_string);
+
+ spirc = SPI_execute_plan(spiplan, NULL, NULL, true, 0);
+ if (spirc != SPI_OK_SELECT)
+ elog(ERROR, "failed to execute \"%s\"", query_string);
+ tupdesc = SPI_tuptable->tupdesc;
+ if (tupdesc->natts != 2 ||
+ TupleDescAttr(tupdesc, 0)->atttypid != BOOLOID ||
+ TupleDescAttr(tupdesc, 1)->atttypid != BOOLOID)
+ elog(ERROR, "query must yield two boolean columns");
+
+ s_i_holds = w_i_holds = s_r_holds = w_r_holds = true;
+ for (i = 0; i < SPI_processed; i++)
+ {
+ HeapTuple tup = SPI_tuptable->vals[i];
+ Datum dat;
+ bool isnull;
+ char c1,
+ c2;
+
+ /* Extract column values in a 3-way representation */
+ dat = SPI_getbinval(tup, tupdesc, 1, &isnull);
+ if (isnull)
+ c1 = 'n';
+ else if (DatumGetBool(dat))
+ c1 = 't';
+ else
+ c1 = 'f';
+
+ dat = SPI_getbinval(tup, tupdesc, 2, &isnull);
+ if (isnull)
+ c2 = 'n';
+ else if (DatumGetBool(dat))
+ c2 = 't';
+ else
+ c2 = 'f';
+
+ /* Check for violations of various proof conditions */
+
+ /* strong implication: truth of c2 implies truth of c1 */
+ if (c2 == 't' && c1 != 't')
+ s_i_holds = false;
+ /* weak implication: non-falsity of c2 implies non-falsity of c1 */
+ if (c2 != 'f' && c1 == 'f')
+ w_i_holds = false;
+ /* strong refutation: truth of c2 implies falsity of c1 */
+ if (c2 == 't' && c1 != 'f')
+ s_r_holds = false;
+ /* weak refutation: truth of c2 implies non-truth of c1 */
+ if (c2 == 't' && c1 == 't')
+ w_r_holds = false;
+ }
+
+ /*
+ * Now, dig the clause querytrees out of the plan, and see what predtest.c
+ * does with them.
+ */
+ cplan = SPI_plan_get_cached_plan(spiplan);
+
+ if (list_length(cplan->stmt_list) != 1)
+ elog(ERROR, "failed to decipher query plan");
+ stmt = linitial_node(PlannedStmt, cplan->stmt_list);
+ if (stmt->commandType != CMD_SELECT)
+ elog(ERROR, "failed to decipher query plan");
+ plan = stmt->planTree;
+ Assert(list_length(plan->targetlist) >= 2);
+ clause1 = castNode(TargetEntry, linitial(plan->targetlist))->expr;
+ clause2 = castNode(TargetEntry, lsecond(plan->targetlist))->expr;
+
+ /*
+ * Because the clauses are in the SELECT list, preprocess_expression did
+ * not pass them through canonicalize_qual nor make_ands_implicit.
+ *
+ * We can't do canonicalize_qual here, since it's unclear whether the
+ * expressions ought to be treated as WHERE or CHECK clauses. Fortunately,
+ * useful test expressions wouldn't be affected by those transformations
+ * anyway. We should do make_ands_implicit, though.
+ *
+ * Another way in which this does not exactly duplicate the normal usage
+ * of the proof functions is that they are often given qual clauses
+ * containing RestrictInfo nodes. But since predtest.c just looks through
+ * those anyway, it seems OK to not worry about that point.
+ */
+ clause1 = (Expr *) make_ands_implicit(clause1);
+ clause2 = (Expr *) make_ands_implicit(clause2);
+
+ strong_implied_by = predicate_implied_by((List *) clause1,
+ (List *) clause2,
+ false);
+
+ weak_implied_by = predicate_implied_by((List *) clause1,
+ (List *) clause2,
+ true);
+
+ strong_refuted_by = predicate_refuted_by((List *) clause1,
+ (List *) clause2,
+ false);
+
+ weak_refuted_by = predicate_refuted_by((List *) clause1,
+ (List *) clause2,
+ true);
+
+ /*
+ * Issue warning if any proof is demonstrably incorrect.
+ */
+ if (strong_implied_by && !s_i_holds)
+ elog(WARNING, "strong_implied_by result is incorrect");
+ if (weak_implied_by && !w_i_holds)
+ elog(WARNING, "weak_implied_by result is incorrect");
+ if (strong_refuted_by && !s_r_holds)
+ elog(WARNING, "strong_refuted_by result is incorrect");
+ if (weak_refuted_by && !w_r_holds)
+ elog(WARNING, "weak_refuted_by result is incorrect");
+
+ /*
+ * Clean up and return a record of the results.
+ */
+ if (SPI_finish() != SPI_OK_FINISH)
+ elog(ERROR, "SPI_finish failed");
+
+ tupdesc = CreateTemplateTupleDesc(8);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 1,
+ "strong_implied_by", BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 2,
+ "weak_implied_by", BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 3,
+ "strong_refuted_by", BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 4,
+ "weak_refuted_by", BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 5,
+ "s_i_holds", BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 6,
+ "w_i_holds", BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 7,
+ "s_r_holds", BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 8,
+ "w_r_holds", BOOLOID, -1, 0);
+ tupdesc = BlessTupleDesc(tupdesc);
+
+ MemSet(nulls, 0, sizeof(nulls));
+ values[0] = BoolGetDatum(strong_implied_by);
+ values[1] = BoolGetDatum(weak_implied_by);
+ values[2] = BoolGetDatum(strong_refuted_by);
+ values[3] = BoolGetDatum(weak_refuted_by);
+ values[4] = BoolGetDatum(s_i_holds);
+ values[5] = BoolGetDatum(w_i_holds);
+ values[6] = BoolGetDatum(s_r_holds);
+ values[7] = BoolGetDatum(w_r_holds);
+
+ PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
+}
diff --git a/src/test/modules/test_predtest/test_predtest.control b/src/test/modules/test_predtest/test_predtest.control
new file mode 100644
index 0000000..a899a9d
--- /dev/null
+++ b/src/test/modules/test_predtest/test_predtest.control
@@ -0,0 +1,4 @@
+comment = 'Test code for optimizer/util/predtest.c'
+default_version = '1.0'
+module_pathname = '$libdir/test_predtest'
+relocatable = true
diff --git a/src/test/modules/test_rbtree/.gitignore b/src/test/modules/test_rbtree/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/test_rbtree/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_rbtree/Makefile b/src/test/modules/test_rbtree/Makefile
new file mode 100644
index 0000000..faf376a
--- /dev/null
+++ b/src/test/modules/test_rbtree/Makefile
@@ -0,0 +1,23 @@
+# src/test/modules/test_rbtree/Makefile
+
+MODULE_big = test_rbtree
+OBJS = \
+ $(WIN32RES) \
+ test_rbtree.o
+PGFILEDESC = "test_rbtree - test code for red-black tree library"
+
+EXTENSION = test_rbtree
+DATA = test_rbtree--1.0.sql
+
+REGRESS = test_rbtree
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_rbtree
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_rbtree/README b/src/test/modules/test_rbtree/README
new file mode 100644
index 0000000..d69eb8d
--- /dev/null
+++ b/src/test/modules/test_rbtree/README
@@ -0,0 +1,13 @@
+test_rbtree is a test module for checking the correctness of red-black
+tree operations.
+
+These tests are performed on red-black trees that store integers.
+Since the rbtree logic treats the comparison function as a black
+box, it shouldn't be important exactly what the key type is.
+
+Checking the correctness of traversals is based on the fact that a red-black
+tree is a binary search tree, so the elements should be visited in increasing
+(for Left-Current-Right) or decreasing (for Right-Current-Left) order.
+
+Also, this module does some checks of the correctness of the find, delete
+and leftmost operations.
diff --git a/src/test/modules/test_rbtree/expected/test_rbtree.out b/src/test/modules/test_rbtree/expected/test_rbtree.out
new file mode 100644
index 0000000..3e32956
--- /dev/null
+++ b/src/test/modules/test_rbtree/expected/test_rbtree.out
@@ -0,0 +1,12 @@
+CREATE EXTENSION test_rbtree;
+--
+-- These tests don't produce any interesting output. We're checking that
+-- the operations complete without crashing or hanging and that none of their
+-- internal sanity tests fail.
+--
+SELECT test_rb_tree(10000);
+ test_rb_tree
+--------------
+
+(1 row)
+
diff --git a/src/test/modules/test_rbtree/sql/test_rbtree.sql b/src/test/modules/test_rbtree/sql/test_rbtree.sql
new file mode 100644
index 0000000..d8dc88e
--- /dev/null
+++ b/src/test/modules/test_rbtree/sql/test_rbtree.sql
@@ -0,0 +1,8 @@
+CREATE EXTENSION test_rbtree;
+
+--
+-- These tests don't produce any interesting output. We're checking that
+-- the operations complete without crashing or hanging and that none of their
+-- internal sanity tests fail.
+--
+SELECT test_rb_tree(10000);
diff --git a/src/test/modules/test_rbtree/test_rbtree--1.0.sql b/src/test/modules/test_rbtree/test_rbtree--1.0.sql
new file mode 100644
index 0000000..04f2a3a
--- /dev/null
+++ b/src/test/modules/test_rbtree/test_rbtree--1.0.sql
@@ -0,0 +1,8 @@
+/* src/test/modules/test_rbtree/test_rbtree--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_rbtree" to load this file. \quit
+
+CREATE FUNCTION test_rb_tree(size INTEGER)
+ RETURNS pg_catalog.void STRICT
+ AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/src/test/modules/test_rbtree/test_rbtree.c b/src/test/modules/test_rbtree/test_rbtree.c
new file mode 100644
index 0000000..1b4b8cf
--- /dev/null
+++ b/src/test/modules/test_rbtree/test_rbtree.c
@@ -0,0 +1,413 @@
+/*--------------------------------------------------------------------------
+ *
+ * test_rbtree.c
+ * Test correctness of red-black tree operations.
+ *
+ * Copyright (c) 2009-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_rbtree/test_rbtree.c
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "fmgr.h"
+#include "lib/rbtree.h"
+#include "utils/memutils.h"
+
+PG_MODULE_MAGIC;
+
+
+/*
+ * Our test trees store an integer key, and nothing else.
+ */
+typedef struct IntRBTreeNode
+{
+ RBTNode rbtnode;
+ int key;
+} IntRBTreeNode;
+
+
+/*
+ * Node comparator. We don't worry about overflow in the subtraction,
+ * since none of our test keys are negative.
+ */
+static int
+irbt_cmp(const RBTNode *a, const RBTNode *b, void *arg)
+{
+ const IntRBTreeNode *ea = (const IntRBTreeNode *) a;
+ const IntRBTreeNode *eb = (const IntRBTreeNode *) b;
+
+ return ea->key - eb->key;
+}
+
+/*
+ * Node combiner. For testing purposes, just check that library doesn't
+ * try to combine unequal keys.
+ */
+static void
+irbt_combine(RBTNode *existing, const RBTNode *newdata, void *arg)
+{
+ const IntRBTreeNode *eexist = (const IntRBTreeNode *) existing;
+ const IntRBTreeNode *enew = (const IntRBTreeNode *) newdata;
+
+ if (eexist->key != enew->key)
+ elog(ERROR, "red-black tree combines %d into %d",
+ enew->key, eexist->key);
+}
+
+/* Node allocator */
+static RBTNode *
+irbt_alloc(void *arg)
+{
+ return (RBTNode *) palloc(sizeof(IntRBTreeNode));
+}
+
+/* Node freer */
+static void
+irbt_free(RBTNode *node, void *arg)
+{
+ pfree(node);
+}
+
+/*
+ * Create a red-black tree using our support functions
+ */
+static RBTree *
+create_int_rbtree(void)
+{
+ return rbt_create(sizeof(IntRBTreeNode),
+ irbt_cmp,
+ irbt_combine,
+ irbt_alloc,
+ irbt_free,
+ NULL);
+}
+
+/*
+ * Generate a random permutation of the integers 0..size-1
+ */
+static int *
+GetPermutation(int size)
+{
+ int *permutation;
+ int i;
+
+ permutation = (int *) palloc(size * sizeof(int));
+
+ permutation[0] = 0;
+
+ /*
+ * This is the "inside-out" variant of the Fisher-Yates shuffle algorithm.
+ * Notionally, we append each new value to the array and then swap it with
+ * a randomly-chosen array element (possibly including itself, else we
+ * fail to generate permutations with the last integer last). The swap
+ * step can be optimized by combining it with the insertion.
+ */
+ for (i = 1; i < size; i++)
+ {
+ int j = random() % (i + 1);
+
+ if (j < i) /* avoid fetching undefined data if j=i */
+ permutation[i] = permutation[j];
+ permutation[j] = i;
+ }
+
+ return permutation;
+}
+
+/*
+ * Populate an empty RBTree with "size" integers having the values
+ * 0, step, 2*step, 3*step, ..., inserting them in random order
+ */
+static void
+rbt_populate(RBTree *tree, int size, int step)
+{
+ int *permutation = GetPermutation(size);
+ IntRBTreeNode node;
+ bool isNew;
+ int i;
+
+ /* Insert values. We don't expect any collisions. */
+ for (i = 0; i < size; i++)
+ {
+ node.key = step * permutation[i];
+ rbt_insert(tree, (RBTNode *) &node, &isNew);
+ if (!isNew)
+ elog(ERROR, "unexpected !isNew result from rbt_insert");
+ }
+
+ /*
+ * Re-insert the first value to make sure collisions work right. It's
+ * probably not useful to test that case over again for all the values.
+ */
+ if (size > 0)
+ {
+ node.key = step * permutation[0];
+ rbt_insert(tree, (RBTNode *) &node, &isNew);
+ if (isNew)
+ elog(ERROR, "unexpected isNew result from rbt_insert");
+ }
+
+ pfree(permutation);
+}
+
+/*
+ * Check the correctness of left-right traversal.
+ * Left-right traversal is correct if all elements are
+ * visited in increasing order.
+ */
+static void
+testleftright(int size)
+{
+ RBTree *tree = create_int_rbtree();
+ IntRBTreeNode *node;
+ RBTreeIterator iter;
+ int lastKey = -1;
+ int count = 0;
+
+ /* check iteration over empty tree */
+ rbt_begin_iterate(tree, LeftRightWalk, &iter);
+ if (rbt_iterate(&iter) != NULL)
+ elog(ERROR, "left-right walk over empty tree produced an element");
+
+ /* fill tree with consecutive natural numbers */
+ rbt_populate(tree, size, 1);
+
+ /* iterate over the tree */
+ rbt_begin_iterate(tree, LeftRightWalk, &iter);
+
+ while ((node = (IntRBTreeNode *) rbt_iterate(&iter)) != NULL)
+ {
+ /* check that order is increasing */
+ if (node->key <= lastKey)
+ elog(ERROR, "left-right walk gives elements not in sorted order");
+ lastKey = node->key;
+ count++;
+ }
+
+ if (lastKey != size - 1)
+ elog(ERROR, "left-right walk did not reach end");
+ if (count != size)
+ elog(ERROR, "left-right walk missed some elements");
+}
+
+/*
+ * Check the correctness of right-left traversal.
+ * Right-left traversal is correct if all elements are
+ * visited in decreasing order.
+ */
+static void
+testrightleft(int size)
+{
+ RBTree *tree = create_int_rbtree();
+ IntRBTreeNode *node;
+ RBTreeIterator iter;
+ int lastKey = size;
+ int count = 0;
+
+ /* check iteration over empty tree */
+ rbt_begin_iterate(tree, RightLeftWalk, &iter);
+ if (rbt_iterate(&iter) != NULL)
+ elog(ERROR, "right-left walk over empty tree produced an element");
+
+ /* fill tree with consecutive natural numbers */
+ rbt_populate(tree, size, 1);
+
+ /* iterate over the tree */
+ rbt_begin_iterate(tree, RightLeftWalk, &iter);
+
+ while ((node = (IntRBTreeNode *) rbt_iterate(&iter)) != NULL)
+ {
+ /* check that order is decreasing */
+ if (node->key >= lastKey)
+ elog(ERROR, "right-left walk gives elements not in sorted order");
+ lastKey = node->key;
+ count++;
+ }
+
+ if (lastKey != 0)
+ elog(ERROR, "right-left walk did not reach end");
+ if (count != size)
+ elog(ERROR, "right-left walk missed some elements");
+}
+
+/*
+ * Check the correctness of the rbt_find operation by searching for
+ * both elements we inserted and elements we didn't.
+ */
+static void
+testfind(int size)
+{
+ RBTree *tree = create_int_rbtree();
+ int i;
+
+ /* Insert even integers from 0 to 2 * (size-1) */
+ rbt_populate(tree, size, 2);
+
+ /* Check that all inserted elements can be found */
+ for (i = 0; i < size; i++)
+ {
+ IntRBTreeNode node;
+ IntRBTreeNode *resultNode;
+
+ node.key = 2 * i;
+ resultNode = (IntRBTreeNode *) rbt_find(tree, (RBTNode *) &node);
+ if (resultNode == NULL)
+ elog(ERROR, "inserted element was not found");
+ if (node.key != resultNode->key)
+ elog(ERROR, "find operation in rbtree gave wrong result");
+ }
+
+ /*
+ * Check that not-inserted elements can not be found, being sure to try
+ * values before the first and after the last element.
+ */
+ for (i = -1; i <= 2 * size; i += 2)
+ {
+ IntRBTreeNode node;
+ IntRBTreeNode *resultNode;
+
+ node.key = i;
+ resultNode = (IntRBTreeNode *) rbt_find(tree, (RBTNode *) &node);
+ if (resultNode != NULL)
+ elog(ERROR, "not-inserted element was found");
+ }
+}
+
+/*
+ * Check the correctness of the rbt_leftmost operation.
+ * This operation should always return the smallest element of the tree.
+ */
+static void
+testleftmost(int size)
+{
+ RBTree *tree = create_int_rbtree();
+ IntRBTreeNode *result;
+
+ /* Check that empty tree has no leftmost element */
+ if (rbt_leftmost(tree) != NULL)
+ elog(ERROR, "leftmost node of empty tree is not NULL");
+
+ /* fill tree with consecutive natural numbers */
+ rbt_populate(tree, size, 1);
+
+ /* Check that leftmost element is the smallest one */
+ result = (IntRBTreeNode *) rbt_leftmost(tree);
+ if (result == NULL || result->key != 0)
+ elog(ERROR, "rbt_leftmost gave wrong result");
+}
+
+/*
+ * Check the correctness of the rbt_delete operation.
+ */
+static void
+testdelete(int size, int delsize)
+{
+ RBTree *tree = create_int_rbtree();
+ int *deleteIds;
+ bool *chosen;
+ int i;
+
+ /* fill tree with consecutive natural numbers */
+ rbt_populate(tree, size, 1);
+
+ /* Choose unique ids to delete */
+ deleteIds = (int *) palloc(delsize * sizeof(int));
+ chosen = (bool *) palloc0(size * sizeof(bool));
+
+ for (i = 0; i < delsize; i++)
+ {
+ int k = random() % size;
+
+ while (chosen[k])
+ k = (k + 1) % size;
+ deleteIds[i] = k;
+ chosen[k] = true;
+ }
+
+ /* Delete elements */
+ for (i = 0; i < delsize; i++)
+ {
+ IntRBTreeNode find;
+ IntRBTreeNode *node;
+
+ find.key = deleteIds[i];
+ /* Locate the node to be deleted */
+ node = (IntRBTreeNode *) rbt_find(tree, (RBTNode *) &find);
+ if (node == NULL || node->key != deleteIds[i])
+ elog(ERROR, "expected element was not found during deleting");
+ /* Delete it */
+ rbt_delete(tree, (RBTNode *) node);
+ }
+
+ /* Check that deleted elements are deleted */
+ for (i = 0; i < size; i++)
+ {
+ IntRBTreeNode node;
+ IntRBTreeNode *result;
+
+ node.key = i;
+ result = (IntRBTreeNode *) rbt_find(tree, (RBTNode *) &node);
+ if (chosen[i])
+ {
+ /* Deleted element should be absent */
+ if (result != NULL)
+ elog(ERROR, "deleted element still present in the rbtree");
+ }
+ else
+ {
+ /* Else it should be present */
+ if (result == NULL || result->key != i)
+ elog(ERROR, "delete operation removed wrong rbtree value");
+ }
+ }
+
+ /* Delete remaining elements, so as to exercise reducing tree to empty */
+ for (i = 0; i < size; i++)
+ {
+ IntRBTreeNode find;
+ IntRBTreeNode *node;
+
+ if (chosen[i])
+ continue;
+ find.key = i;
+ /* Locate the node to be deleted */
+ node = (IntRBTreeNode *) rbt_find(tree, (RBTNode *) &find);
+ if (node == NULL || node->key != i)
+ elog(ERROR, "expected element was not found during deleting");
+ /* Delete it */
+ rbt_delete(tree, (RBTNode *) node);
+ }
+
+ /* Tree should now be empty */
+ if (rbt_leftmost(tree) != NULL)
+ elog(ERROR, "deleting all elements failed");
+
+ pfree(deleteIds);
+ pfree(chosen);
+}
+
+/*
+ * SQL-callable entry point to perform all tests
+ *
+ * Argument is the number of entries to put in the trees
+ */
+PG_FUNCTION_INFO_V1(test_rb_tree);
+
+Datum
+test_rb_tree(PG_FUNCTION_ARGS)
+{
+ int size = PG_GETARG_INT32(0);
+
+ if (size <= 0 || size > MaxAllocSize / sizeof(int))
+ elog(ERROR, "invalid size for test_rb_tree: %d", size);
+ testleftright(size);
+ testrightleft(size);
+ testfind(size);
+ testleftmost(size);
+ testdelete(size, Max(size / 10, 1));
+ PG_RETURN_VOID();
+}
diff --git a/src/test/modules/test_rbtree/test_rbtree.control b/src/test/modules/test_rbtree/test_rbtree.control
new file mode 100644
index 0000000..17966a5
--- /dev/null
+++ b/src/test/modules/test_rbtree/test_rbtree.control
@@ -0,0 +1,4 @@
+comment = 'Test code for red-black tree library'
+default_version = '1.0'
+module_pathname = '$libdir/test_rbtree'
+relocatable = true
diff --git a/src/test/modules/test_rls_hooks/.gitignore b/src/test/modules/test_rls_hooks/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/test_rls_hooks/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_rls_hooks/Makefile b/src/test/modules/test_rls_hooks/Makefile
new file mode 100644
index 0000000..a4f7d85
--- /dev/null
+++ b/src/test/modules/test_rls_hooks/Makefile
@@ -0,0 +1,27 @@
+# src/test/modules/test_rls_hooks/Makefile
+
+MODULE_big = test_rls_hooks
+OBJS = \
+ $(WIN32RES) \
+ test_rls_hooks.o
+PGFILEDESC = "test_rls_hooks - example use of RLS hooks"
+
+EXTENSION = test_rls_hooks
+# DATA = test_rls_hooks--1.0.sql
+
+REGRESS = test_rls_hooks
+REGRESS_OPTS = --temp-config=$(top_srcdir)/src/test/modules/test_rls_hooks/rls_hooks.conf
+# Disabled because these tests require "shared_preload_libraries=test_rls_hooks",
+# which typical installcheck users do not have (e.g. buildfarm clients).
+NO_INSTALLCHECK = 1
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_rls_hooks
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_rls_hooks/README b/src/test/modules/test_rls_hooks/README
new file mode 100644
index 0000000..c22e0d3
--- /dev/null
+++ b/src/test/modules/test_rls_hooks/README
@@ -0,0 +1,16 @@
+test_rls_hooks is an example of how to use the hooks provided for RLS to
+define additional policies to be used.
+
+Functions
+=========
+test_rls_hooks_permissive(CmdType cmdtype, Relation relation)
+ RETURNS List*
+
+Returns a list of policies which should be added to any existing
+policies on the relation, combined with OR.
+
+test_rls_hooks_restrictive(CmdType cmdtype, Relation relation)
+ RETURNS List*
+
+Returns a list of policies which should be added to any existing
+policies on the relation, combined with AND.
diff --git a/src/test/modules/test_rls_hooks/expected/test_rls_hooks.out b/src/test/modules/test_rls_hooks/expected/test_rls_hooks.out
new file mode 100644
index 0000000..b8c6d38
--- /dev/null
+++ b/src/test/modules/test_rls_hooks/expected/test_rls_hooks.out
@@ -0,0 +1,201 @@
+LOAD 'test_rls_hooks';
+CREATE TABLE rls_test_permissive (
+ username name,
+ supervisor name,
+ data integer
+);
+-- initial test data
+INSERT INTO rls_test_permissive VALUES ('regress_r1','regress_s1',4);
+INSERT INTO rls_test_permissive VALUES ('regress_r2','regress_s2',5);
+INSERT INTO rls_test_permissive VALUES ('regress_r3','regress_s3',6);
+CREATE TABLE rls_test_restrictive (
+ username name,
+ supervisor name,
+ data integer
+);
+-- At least one permissive policy must exist, otherwise
+-- the default deny policy will be applied. For
+-- testing the only-restrictive-policies from the hook,
+-- create a simple 'allow all' policy.
+CREATE POLICY p1 ON rls_test_restrictive USING (true);
+-- initial test data
+INSERT INTO rls_test_restrictive VALUES ('regress_r1','regress_s1',1);
+INSERT INTO rls_test_restrictive VALUES ('regress_r2','regress_s2',2);
+INSERT INTO rls_test_restrictive VALUES ('regress_r3','regress_s3',3);
+CREATE TABLE rls_test_both (
+ username name,
+ supervisor name,
+ data integer
+);
+-- initial test data
+INSERT INTO rls_test_both VALUES ('regress_r1','regress_s1',7);
+INSERT INTO rls_test_both VALUES ('regress_r2','regress_s2',8);
+INSERT INTO rls_test_both VALUES ('regress_r3','regress_s3',9);
+ALTER TABLE rls_test_permissive ENABLE ROW LEVEL SECURITY;
+ALTER TABLE rls_test_restrictive ENABLE ROW LEVEL SECURITY;
+ALTER TABLE rls_test_both ENABLE ROW LEVEL SECURITY;
+CREATE ROLE regress_r1;
+CREATE ROLE regress_s1;
+GRANT SELECT,INSERT ON rls_test_permissive TO regress_r1;
+GRANT SELECT,INSERT ON rls_test_restrictive TO regress_r1;
+GRANT SELECT,INSERT ON rls_test_both TO regress_r1;
+GRANT SELECT,INSERT ON rls_test_permissive TO regress_s1;
+GRANT SELECT,INSERT ON rls_test_restrictive TO regress_s1;
+GRANT SELECT,INSERT ON rls_test_both TO regress_s1;
+SET ROLE regress_r1;
+-- With only the hook's policies, permissive
+-- hook's policy is current_user = username
+EXPLAIN (costs off) SELECT * FROM rls_test_permissive;
+ QUERY PLAN
+-----------------------------------------
+ Seq Scan on rls_test_permissive
+ Filter: ("current_user"() = username)
+(2 rows)
+
+SELECT * FROM rls_test_permissive;
+ username | supervisor | data
+------------+------------+------
+ regress_r1 | regress_s1 | 4
+(1 row)
+
+-- success
+INSERT INTO rls_test_permissive VALUES ('regress_r1','regress_s1',10);
+-- failure
+INSERT INTO rls_test_permissive VALUES ('regress_r4','regress_s4',10);
+ERROR: new row violates row-level security policy for table "rls_test_permissive"
+SET ROLE regress_s1;
+-- With only the hook's policies, restrictive
+-- hook's policy is current_user = supervisor
+EXPLAIN (costs off) SELECT * FROM rls_test_restrictive;
+ QUERY PLAN
+-------------------------------------------
+ Seq Scan on rls_test_restrictive
+ Filter: ("current_user"() = supervisor)
+(2 rows)
+
+SELECT * FROM rls_test_restrictive;
+ username | supervisor | data
+------------+------------+------
+ regress_r1 | regress_s1 | 1
+(1 row)
+
+-- success
+INSERT INTO rls_test_restrictive VALUES ('regress_r1','regress_s1',10);
+-- failure
+INSERT INTO rls_test_restrictive VALUES ('regress_r4','regress_s4',10);
+ERROR: new row violates row-level security policy "extension policy" for table "rls_test_restrictive"
+SET ROLE regress_s1;
+-- With only the hook's policies, both
+-- permissive hook's policy is current_user = username
+-- restrictive hook's policy is current_user = superuser
+-- combined with AND, results in nothing being allowed
+EXPLAIN (costs off) SELECT * FROM rls_test_both;
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Seq Scan on rls_test_both
+ Filter: ((supervisor = "current_user"()) AND (username = "current_user"()))
+(2 rows)
+
+SELECT * FROM rls_test_both;
+ username | supervisor | data
+----------+------------+------
+(0 rows)
+
+-- failure
+INSERT INTO rls_test_both VALUES ('regress_r1','regress_s1',10);
+ERROR: new row violates row-level security policy for table "rls_test_both"
+-- failure
+INSERT INTO rls_test_both VALUES ('regress_r4','regress_s1',10);
+ERROR: new row violates row-level security policy for table "rls_test_both"
+-- failure
+INSERT INTO rls_test_both VALUES ('regress_r4','regress_s4',10);
+ERROR: new row violates row-level security policy for table "rls_test_both"
+RESET ROLE;
+-- Create "internal" policies, to check that the policies from
+-- the hooks are combined correctly.
+CREATE POLICY p1 ON rls_test_permissive USING (data % 2 = 0);
+-- Remove the original allow-all policy
+DROP POLICY p1 ON rls_test_restrictive;
+CREATE POLICY p1 ON rls_test_restrictive USING (data % 2 = 0);
+CREATE POLICY p1 ON rls_test_both USING (data % 2 = 0);
+SET ROLE regress_r1;
+-- With both internal and hook policies, permissive
+EXPLAIN (costs off) SELECT * FROM rls_test_permissive;
+ QUERY PLAN
+---------------------------------------------------------------
+ Seq Scan on rls_test_permissive
+ Filter: (((data % 2) = 0) OR ("current_user"() = username))
+(2 rows)
+
+SELECT * FROM rls_test_permissive;
+ username | supervisor | data
+------------+------------+------
+ regress_r1 | regress_s1 | 4
+ regress_r3 | regress_s3 | 6
+ regress_r1 | regress_s1 | 10
+(3 rows)
+
+-- success
+INSERT INTO rls_test_permissive VALUES ('regress_r1','regress_s1',7);
+-- success
+INSERT INTO rls_test_permissive VALUES ('regress_r3','regress_s3',10);
+-- failure
+INSERT INTO rls_test_permissive VALUES ('regress_r4','regress_s4',7);
+ERROR: new row violates row-level security policy for table "rls_test_permissive"
+SET ROLE regress_s1;
+-- With both internal and hook policies, restrictive
+EXPLAIN (costs off) SELECT * FROM rls_test_restrictive;
+ QUERY PLAN
+------------------------------------------------------------------
+ Seq Scan on rls_test_restrictive
+ Filter: (("current_user"() = supervisor) AND ((data % 2) = 0))
+(2 rows)
+
+SELECT * FROM rls_test_restrictive;
+ username | supervisor | data
+------------+------------+------
+ regress_r1 | regress_s1 | 10
+(1 row)
+
+-- success
+INSERT INTO rls_test_restrictive VALUES ('regress_r1','regress_s1',8);
+-- failure
+INSERT INTO rls_test_restrictive VALUES ('regress_r3','regress_s3',10);
+ERROR: new row violates row-level security policy "extension policy" for table "rls_test_restrictive"
+-- failure
+INSERT INTO rls_test_restrictive VALUES ('regress_r1','regress_s1',7);
+ERROR: new row violates row-level security policy for table "rls_test_restrictive"
+-- failure
+INSERT INTO rls_test_restrictive VALUES ('regress_r4','regress_s4',7);
+ERROR: new row violates row-level security policy for table "rls_test_restrictive"
+-- With both internal and hook policies, both permissive
+-- and restrictive hook policies
+EXPLAIN (costs off) SELECT * FROM rls_test_both;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------
+ Seq Scan on rls_test_both
+ Filter: (("current_user"() = supervisor) AND (((data % 2) = 0) OR ("current_user"() = username)))
+(2 rows)
+
+SELECT * FROM rls_test_both;
+ username | supervisor | data
+----------+------------+------
+(0 rows)
+
+-- success
+INSERT INTO rls_test_both VALUES ('regress_r1','regress_s1',8);
+-- failure
+INSERT INTO rls_test_both VALUES ('regress_r3','regress_s3',10);
+ERROR: new row violates row-level security policy "extension policy" for table "rls_test_both"
+-- failure
+INSERT INTO rls_test_both VALUES ('regress_r1','regress_s1',7);
+ERROR: new row violates row-level security policy for table "rls_test_both"
+-- failure
+INSERT INTO rls_test_both VALUES ('regress_r4','regress_s4',7);
+ERROR: new row violates row-level security policy for table "rls_test_both"
+RESET ROLE;
+DROP TABLE rls_test_restrictive;
+DROP TABLE rls_test_permissive;
+DROP TABLE rls_test_both;
+DROP ROLE regress_r1;
+DROP ROLE regress_s1;
diff --git a/src/test/modules/test_rls_hooks/rls_hooks.conf b/src/test/modules/test_rls_hooks/rls_hooks.conf
new file mode 100644
index 0000000..a522c0e
--- /dev/null
+++ b/src/test/modules/test_rls_hooks/rls_hooks.conf
@@ -0,0 +1 @@
+shared_preload_libraries = test_rls_hooks
diff --git a/src/test/modules/test_rls_hooks/sql/test_rls_hooks.sql b/src/test/modules/test_rls_hooks/sql/test_rls_hooks.sql
new file mode 100644
index 0000000..746f6dd
--- /dev/null
+++ b/src/test/modules/test_rls_hooks/sql/test_rls_hooks.sql
@@ -0,0 +1,176 @@
+LOAD 'test_rls_hooks';
+
+CREATE TABLE rls_test_permissive (
+ username name,
+ supervisor name,
+ data integer
+);
+
+-- initial test data
+INSERT INTO rls_test_permissive VALUES ('regress_r1','regress_s1',4);
+INSERT INTO rls_test_permissive VALUES ('regress_r2','regress_s2',5);
+INSERT INTO rls_test_permissive VALUES ('regress_r3','regress_s3',6);
+
+CREATE TABLE rls_test_restrictive (
+ username name,
+ supervisor name,
+ data integer
+);
+
+-- At least one permissive policy must exist, otherwise
+-- the default deny policy will be applied. For
+-- testing the only-restrictive-policies from the hook,
+-- create a simple 'allow all' policy.
+CREATE POLICY p1 ON rls_test_restrictive USING (true);
+
+-- initial test data
+INSERT INTO rls_test_restrictive VALUES ('regress_r1','regress_s1',1);
+INSERT INTO rls_test_restrictive VALUES ('regress_r2','regress_s2',2);
+INSERT INTO rls_test_restrictive VALUES ('regress_r3','regress_s3',3);
+
+CREATE TABLE rls_test_both (
+ username name,
+ supervisor name,
+ data integer
+);
+
+-- initial test data
+INSERT INTO rls_test_both VALUES ('regress_r1','regress_s1',7);
+INSERT INTO rls_test_both VALUES ('regress_r2','regress_s2',8);
+INSERT INTO rls_test_both VALUES ('regress_r3','regress_s3',9);
+
+ALTER TABLE rls_test_permissive ENABLE ROW LEVEL SECURITY;
+ALTER TABLE rls_test_restrictive ENABLE ROW LEVEL SECURITY;
+ALTER TABLE rls_test_both ENABLE ROW LEVEL SECURITY;
+
+CREATE ROLE regress_r1;
+CREATE ROLE regress_s1;
+
+GRANT SELECT,INSERT ON rls_test_permissive TO regress_r1;
+GRANT SELECT,INSERT ON rls_test_restrictive TO regress_r1;
+GRANT SELECT,INSERT ON rls_test_both TO regress_r1;
+
+GRANT SELECT,INSERT ON rls_test_permissive TO regress_s1;
+GRANT SELECT,INSERT ON rls_test_restrictive TO regress_s1;
+GRANT SELECT,INSERT ON rls_test_both TO regress_s1;
+
+SET ROLE regress_r1;
+
+-- With only the hook's policies, permissive
+-- hook's policy is current_user = username
+EXPLAIN (costs off) SELECT * FROM rls_test_permissive;
+
+SELECT * FROM rls_test_permissive;
+
+-- success
+INSERT INTO rls_test_permissive VALUES ('regress_r1','regress_s1',10);
+
+-- failure
+INSERT INTO rls_test_permissive VALUES ('regress_r4','regress_s4',10);
+
+SET ROLE regress_s1;
+
+-- With only the hook's policies, restrictive
+-- hook's policy is current_user = supervisor
+EXPLAIN (costs off) SELECT * FROM rls_test_restrictive;
+
+SELECT * FROM rls_test_restrictive;
+
+-- success
+INSERT INTO rls_test_restrictive VALUES ('regress_r1','regress_s1',10);
+
+-- failure
+INSERT INTO rls_test_restrictive VALUES ('regress_r4','regress_s4',10);
+
+SET ROLE regress_s1;
+
+-- With only the hook's policies, both
+-- permissive hook's policy is current_user = username
+-- restrictive hook's policy is current_user = superuser
+-- combined with AND, results in nothing being allowed
+EXPLAIN (costs off) SELECT * FROM rls_test_both;
+
+SELECT * FROM rls_test_both;
+
+-- failure
+INSERT INTO rls_test_both VALUES ('regress_r1','regress_s1',10);
+
+-- failure
+INSERT INTO rls_test_both VALUES ('regress_r4','regress_s1',10);
+
+-- failure
+INSERT INTO rls_test_both VALUES ('regress_r4','regress_s4',10);
+
+RESET ROLE;
+
+-- Create "internal" policies, to check that the policies from
+-- the hooks are combined correctly.
+CREATE POLICY p1 ON rls_test_permissive USING (data % 2 = 0);
+
+-- Remove the original allow-all policy
+DROP POLICY p1 ON rls_test_restrictive;
+CREATE POLICY p1 ON rls_test_restrictive USING (data % 2 = 0);
+
+CREATE POLICY p1 ON rls_test_both USING (data % 2 = 0);
+
+SET ROLE regress_r1;
+
+-- With both internal and hook policies, permissive
+EXPLAIN (costs off) SELECT * FROM rls_test_permissive;
+
+SELECT * FROM rls_test_permissive;
+
+-- success
+INSERT INTO rls_test_permissive VALUES ('regress_r1','regress_s1',7);
+
+-- success
+INSERT INTO rls_test_permissive VALUES ('regress_r3','regress_s3',10);
+
+-- failure
+INSERT INTO rls_test_permissive VALUES ('regress_r4','regress_s4',7);
+
+SET ROLE regress_s1;
+
+-- With both internal and hook policies, restrictive
+EXPLAIN (costs off) SELECT * FROM rls_test_restrictive;
+
+SELECT * FROM rls_test_restrictive;
+
+-- success
+INSERT INTO rls_test_restrictive VALUES ('regress_r1','regress_s1',8);
+
+-- failure
+INSERT INTO rls_test_restrictive VALUES ('regress_r3','regress_s3',10);
+
+-- failure
+INSERT INTO rls_test_restrictive VALUES ('regress_r1','regress_s1',7);
+
+-- failure
+INSERT INTO rls_test_restrictive VALUES ('regress_r4','regress_s4',7);
+
+-- With both internal and hook policies, both permissive
+-- and restrictive hook policies
+EXPLAIN (costs off) SELECT * FROM rls_test_both;
+
+SELECT * FROM rls_test_both;
+
+-- success
+INSERT INTO rls_test_both VALUES ('regress_r1','regress_s1',8);
+
+-- failure
+INSERT INTO rls_test_both VALUES ('regress_r3','regress_s3',10);
+
+-- failure
+INSERT INTO rls_test_both VALUES ('regress_r1','regress_s1',7);
+
+-- failure
+INSERT INTO rls_test_both VALUES ('regress_r4','regress_s4',7);
+
+RESET ROLE;
+
+DROP TABLE rls_test_restrictive;
+DROP TABLE rls_test_permissive;
+DROP TABLE rls_test_both;
+
+DROP ROLE regress_r1;
+DROP ROLE regress_s1;
diff --git a/src/test/modules/test_rls_hooks/test_rls_hooks.c b/src/test/modules/test_rls_hooks/test_rls_hooks.c
new file mode 100644
index 0000000..0bfa878
--- /dev/null
+++ b/src/test/modules/test_rls_hooks/test_rls_hooks.c
@@ -0,0 +1,178 @@
+/*--------------------------------------------------------------------------
+ *
+ * test_rls_hooks.c
+ * Code for testing RLS hooks.
+ *
+ * Copyright (c) 2015-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_rls_hooks/test_rls_hooks.c
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "catalog/pg_type.h"
+#include "fmgr.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "parser/parse_clause.h"
+#include "parser/parse_collate.h"
+#include "parser/parse_node.h"
+#include "parser/parse_relation.h"
+#include "rewrite/rowsecurity.h"
+#include "test_rls_hooks.h"
+#include "utils/acl.h"
+#include "utils/rel.h"
+#include "utils/relcache.h"
+
+PG_MODULE_MAGIC;
+
+/* Saved hook values in case of unload */
+static row_security_policy_hook_type prev_row_security_policy_hook_permissive = NULL;
+static row_security_policy_hook_type prev_row_security_policy_hook_restrictive = NULL;
+
+void _PG_init(void);
+void _PG_fini(void);
+
+/* Install hooks */
+void
+_PG_init(void)
+{
+ /* Save values for unload */
+ prev_row_security_policy_hook_permissive = row_security_policy_hook_permissive;
+ prev_row_security_policy_hook_restrictive = row_security_policy_hook_restrictive;
+
+ /* Set our hooks */
+ row_security_policy_hook_permissive = test_rls_hooks_permissive;
+ row_security_policy_hook_restrictive = test_rls_hooks_restrictive;
+}
+
+/* Uninstall hooks */
+void
+_PG_fini(void)
+{
+ row_security_policy_hook_permissive = prev_row_security_policy_hook_permissive;
+ row_security_policy_hook_restrictive = prev_row_security_policy_hook_restrictive;
+}
+
+/*
+ * Return permissive policies to be added
+ */
+List *
+test_rls_hooks_permissive(CmdType cmdtype, Relation relation)
+{
+ List *policies = NIL;
+ RowSecurityPolicy *policy = palloc0(sizeof(RowSecurityPolicy));
+ Datum role;
+ FuncCall *n;
+ Node *e;
+ ColumnRef *c;
+ ParseState *qual_pstate;
+ ParseNamespaceItem *nsitem;
+
+ if (strcmp(RelationGetRelationName(relation), "rls_test_permissive") != 0 &&
+ strcmp(RelationGetRelationName(relation), "rls_test_both") != 0)
+ return NIL;
+
+ qual_pstate = make_parsestate(NULL);
+
+ nsitem = addRangeTableEntryForRelation(qual_pstate,
+ relation, AccessShareLock,
+ NULL, false, false);
+ addNSItemToQuery(qual_pstate, nsitem, false, true, true);
+
+ role = ObjectIdGetDatum(ACL_ID_PUBLIC);
+
+ policy->policy_name = pstrdup("extension policy");
+ policy->polcmd = '*';
+ policy->roles = construct_array(&role, 1, OIDOID, sizeof(Oid), true, TYPALIGN_INT);
+
+ /*
+ * policy->qual = (Expr *) makeConst(BOOLOID, -1, InvalidOid,
+ * sizeof(bool), BoolGetDatum(true), false, true);
+ */
+
+ n = makeFuncCall(list_make2(makeString("pg_catalog"),
+ makeString("current_user")), NIL, 0);
+
+ c = makeNode(ColumnRef);
+ c->fields = list_make1(makeString("username"));
+ c->location = 0;
+
+ e = (Node *) makeSimpleA_Expr(AEXPR_OP, "=", (Node *) n, (Node *) c, 0);
+
+ policy->qual = (Expr *) transformWhereClause(qual_pstate, copyObject(e),
+ EXPR_KIND_POLICY,
+ "POLICY");
+ /* Fix up collation information */
+ assign_expr_collations(qual_pstate, (Node *) policy->qual);
+
+ policy->with_check_qual = copyObject(policy->qual);
+ policy->hassublinks = false;
+
+ policies = list_make1(policy);
+
+ return policies;
+}
+
+/*
+ * Return restrictive policies to be added
+ *
+ * Note that a permissive policy must exist or the default-deny policy
+ * will be included and nothing will be visible. If no filtering should
+ * be done except for the restrictive policy, then a single "USING (true)"
+ * permissive policy can be used; see the regression tests.
+ */
+List *
+test_rls_hooks_restrictive(CmdType cmdtype, Relation relation)
+{
+ List *policies = NIL;
+ RowSecurityPolicy *policy = palloc0(sizeof(RowSecurityPolicy));
+ Datum role;
+ FuncCall *n;
+ Node *e;
+ ColumnRef *c;
+ ParseState *qual_pstate;
+ ParseNamespaceItem *nsitem;
+
+ if (strcmp(RelationGetRelationName(relation), "rls_test_restrictive") != 0 &&
+ strcmp(RelationGetRelationName(relation), "rls_test_both") != 0)
+ return NIL;
+
+ qual_pstate = make_parsestate(NULL);
+
+ nsitem = addRangeTableEntryForRelation(qual_pstate,
+ relation, AccessShareLock,
+ NULL, false, false);
+ addNSItemToQuery(qual_pstate, nsitem, false, true, true);
+
+ role = ObjectIdGetDatum(ACL_ID_PUBLIC);
+
+ policy->policy_name = pstrdup("extension policy");
+ policy->polcmd = '*';
+ policy->roles = construct_array(&role, 1, OIDOID, sizeof(Oid), true, TYPALIGN_INT);
+
+ n = makeFuncCall(list_make2(makeString("pg_catalog"),
+ makeString("current_user")), NIL, 0);
+
+ c = makeNode(ColumnRef);
+ c->fields = list_make1(makeString("supervisor"));
+ c->location = 0;
+
+ e = (Node *) makeSimpleA_Expr(AEXPR_OP, "=", (Node *) n, (Node *) c, 0);
+
+ policy->qual = (Expr *) transformWhereClause(qual_pstate, copyObject(e),
+ EXPR_KIND_POLICY,
+ "POLICY");
+ /* Fix up collation information */
+ assign_expr_collations(qual_pstate, (Node *) policy->qual);
+
+ policy->with_check_qual = copyObject(policy->qual);
+ policy->hassublinks = false;
+
+ policies = list_make1(policy);
+
+ return policies;
+}
diff --git a/src/test/modules/test_rls_hooks/test_rls_hooks.control b/src/test/modules/test_rls_hooks/test_rls_hooks.control
new file mode 100644
index 0000000..9f9f13f
--- /dev/null
+++ b/src/test/modules/test_rls_hooks/test_rls_hooks.control
@@ -0,0 +1,4 @@
+comment = 'Test code for RLS hooks'
+default_version = '1.0'
+module_pathname = '$libdir/test_rls_hooks'
+relocatable = true
diff --git a/src/test/modules/test_rls_hooks/test_rls_hooks.h b/src/test/modules/test_rls_hooks/test_rls_hooks.h
new file mode 100644
index 0000000..f4c94bc
--- /dev/null
+++ b/src/test/modules/test_rls_hooks/test_rls_hooks.h
@@ -0,0 +1,25 @@
+/*--------------------------------------------------------------------------
+ *
+ * test_rls_hooks.h
+ * Definitions for RLS hooks
+ *
+ * Copyright (c) 2015-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_rls_hooks/test_rls_hooks.h
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#ifndef TEST_RLS_HOOKS_H
+#define TEST_RLS_HOOKS_H
+
+#include <rewrite/rowsecurity.h>
+
+/* Return set of permissive hooks based on CmdType and Relation */
+extern List *test_rls_hooks_permissive(CmdType cmdtype, Relation relation);
+
+/* Return set of restrictive hooks based on CmdType and Relation */
+extern List *test_rls_hooks_restrictive(CmdType cmdtype, Relation relation);
+
+#endif
diff --git a/src/test/modules/test_shm_mq/.gitignore b/src/test/modules/test_shm_mq/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/test_shm_mq/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_shm_mq/Makefile b/src/test/modules/test_shm_mq/Makefile
new file mode 100644
index 0000000..1171ced
--- /dev/null
+++ b/src/test/modules/test_shm_mq/Makefile
@@ -0,0 +1,25 @@
+# src/test/modules/test_shm_mq/Makefile
+
+MODULE_big = test_shm_mq
+OBJS = \
+ $(WIN32RES) \
+ setup.o \
+ test.o \
+ worker.o
+PGFILEDESC = "test_shm_mq - example use of shared memory message queue"
+
+EXTENSION = test_shm_mq
+DATA = test_shm_mq--1.0.sql
+
+REGRESS = test_shm_mq
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_shm_mq
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_shm_mq/README b/src/test/modules/test_shm_mq/README
new file mode 100644
index 0000000..641407b
--- /dev/null
+++ b/src/test/modules/test_shm_mq/README
@@ -0,0 +1,49 @@
+test_shm_mq is an example of how to use dynamic shared memory
+and the shared memory message queue facilities to coordinate a user backend
+with the efforts of one or more background workers. It is not intended to
+do anything useful on its own; rather, it is a demonstration of how these
+facilities can be used, and a unit test of those facilities.
+
+The function is this extension send the same message repeatedly through
+a loop of processes. The message payload, the size of the message queue
+through which it is sent, and the number of processes in the loop are
+configurable. At the end, the message may be verified to ensure that it
+has not been corrupted in transmission.
+
+Functions
+=========
+
+
+test_shm_mq(queue_size int8, message text,
+ repeat_count int4 default 1, num_workers int4 default 1)
+ RETURNS void
+
+This function sends and receives messages synchronously. The user
+backend sends the provided message to the first background worker using
+a message queue of the given size. The first background worker sends
+the message to the second background worker, if the number of workers
+is greater than one, and so forth. Eventually, the last background
+worker sends the message back to the user backend. If the repeat count
+is greater than one, the user backend then sends the message back to
+the first worker. Once the message has been sent and received by all
+the coordinating processes a number of times equal to the repeat count,
+the user backend verifies that the message finally received matches the
+one originally sent and throws an error if not.
+
+
+test_shm_mq_pipelined(queue_size int8, message text,
+ repeat_count int4 default 1, num_workers int4 default 1,
+ verify bool default true)
+ RETURNS void
+
+This function sends the same message multiple times, as specified by the
+repeat count, to the first background worker using a queue of the given
+size. These messages are then forwarded to each background worker in
+turn, in each case using a queue of the given size. Finally, the last
+background worker sends the messages back to the user backend. The user
+backend uses non-blocking sends and receives, so that it may begin receiving
+copies of the message before it has finished sending all copies of the
+message. The 'verify' argument controls whether or not the
+received copies are checked against the message that was sent. (This
+takes nontrivial time so it may be useful to disable it for benchmarking
+purposes.)
diff --git a/src/test/modules/test_shm_mq/expected/test_shm_mq.out b/src/test/modules/test_shm_mq/expected/test_shm_mq.out
new file mode 100644
index 0000000..c4858b0
--- /dev/null
+++ b/src/test/modules/test_shm_mq/expected/test_shm_mq.out
@@ -0,0 +1,36 @@
+CREATE EXTENSION test_shm_mq;
+--
+-- These tests don't produce any interesting output. We're checking that
+-- the operations complete without crashing or hanging and that none of their
+-- internal sanity tests fail.
+--
+SELECT test_shm_mq(1024, '', 2000, 1);
+ test_shm_mq
+-------------
+
+(1 row)
+
+SELECT test_shm_mq(1024, 'a', 2001, 1);
+ test_shm_mq
+-------------
+
+(1 row)
+
+SELECT test_shm_mq(32768, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,(100+900*random())::int)), 10000, 1);
+ test_shm_mq
+-------------
+
+(1 row)
+
+SELECT test_shm_mq(100, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,(100+200*random())::int)), 10000, 1);
+ test_shm_mq
+-------------
+
+(1 row)
+
+SELECT test_shm_mq_pipelined(16384, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,270000)), 200, 3);
+ test_shm_mq_pipelined
+-----------------------
+
+(1 row)
+
diff --git a/src/test/modules/test_shm_mq/setup.c b/src/test/modules/test_shm_mq/setup.c
new file mode 100644
index 0000000..509a90f
--- /dev/null
+++ b/src/test/modules/test_shm_mq/setup.c
@@ -0,0 +1,316 @@
+/*--------------------------------------------------------------------------
+ *
+ * setup.c
+ * Code to set up a dynamic shared memory segments and a specified
+ * number of background workers for shared memory message queue
+ * testing.
+ *
+ * Copyright (c) 2013-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_shm_mq/setup.c
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "postmaster/bgworker.h"
+#include "storage/procsignal.h"
+#include "storage/shm_toc.h"
+#include "test_shm_mq.h"
+#include "utils/memutils.h"
+
+typedef struct
+{
+ int nworkers;
+ BackgroundWorkerHandle *handle[FLEXIBLE_ARRAY_MEMBER];
+} worker_state;
+
+static void setup_dynamic_shared_memory(int64 queue_size, int nworkers,
+ dsm_segment **segp,
+ test_shm_mq_header **hdrp,
+ shm_mq **outp, shm_mq **inp);
+static worker_state *setup_background_workers(int nworkers,
+ dsm_segment *seg);
+static void cleanup_background_workers(dsm_segment *seg, Datum arg);
+static void wait_for_workers_to_become_ready(worker_state *wstate,
+ volatile test_shm_mq_header *hdr);
+static bool check_worker_status(worker_state *wstate);
+
+/*
+ * Set up a dynamic shared memory segment and zero or more background workers
+ * for a test run.
+ */
+void
+test_shm_mq_setup(int64 queue_size, int32 nworkers, dsm_segment **segp,
+ shm_mq_handle **output, shm_mq_handle **input)
+{
+ dsm_segment *seg;
+ test_shm_mq_header *hdr;
+ shm_mq *outq = NULL; /* placate compiler */
+ shm_mq *inq = NULL; /* placate compiler */
+ worker_state *wstate;
+
+ /* Set up a dynamic shared memory segment. */
+ setup_dynamic_shared_memory(queue_size, nworkers, &seg, &hdr, &outq, &inq);
+ *segp = seg;
+
+ /* Register background workers. */
+ wstate = setup_background_workers(nworkers, seg);
+
+ /* Attach the queues. */
+ *output = shm_mq_attach(outq, seg, wstate->handle[0]);
+ *input = shm_mq_attach(inq, seg, wstate->handle[nworkers - 1]);
+
+ /* Wait for workers to become ready. */
+ wait_for_workers_to_become_ready(wstate, hdr);
+
+ /*
+ * Once we reach this point, all workers are ready. We no longer need to
+ * kill them if we die; they'll die on their own as the message queues
+ * shut down.
+ */
+ cancel_on_dsm_detach(seg, cleanup_background_workers,
+ PointerGetDatum(wstate));
+ pfree(wstate);
+}
+
+/*
+ * Set up a dynamic shared memory segment.
+ *
+ * We set up a small control region that contains only a test_shm_mq_header,
+ * plus one region per message queue. There are as many message queues as
+ * the number of workers, plus one.
+ */
+static void
+setup_dynamic_shared_memory(int64 queue_size, int nworkers,
+ dsm_segment **segp, test_shm_mq_header **hdrp,
+ shm_mq **outp, shm_mq **inp)
+{
+ shm_toc_estimator e;
+ int i;
+ Size segsize;
+ dsm_segment *seg;
+ shm_toc *toc;
+ test_shm_mq_header *hdr;
+
+ /* Ensure a valid queue size. */
+ if (queue_size < 0 || ((uint64) queue_size) < shm_mq_minimum_size)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("queue size must be at least %zu bytes",
+ shm_mq_minimum_size)));
+ if (queue_size != ((Size) queue_size))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("queue size overflows size_t")));
+
+ /*
+ * Estimate how much shared memory we need.
+ *
+ * Because the TOC machinery may choose to insert padding of oddly-sized
+ * requests, we must estimate each chunk separately.
+ *
+ * We need one key to register the location of the header, and we need
+ * nworkers + 1 keys to track the locations of the message queues.
+ */
+ shm_toc_initialize_estimator(&e);
+ shm_toc_estimate_chunk(&e, sizeof(test_shm_mq_header));
+ for (i = 0; i <= nworkers; ++i)
+ shm_toc_estimate_chunk(&e, (Size) queue_size);
+ shm_toc_estimate_keys(&e, 2 + nworkers);
+ segsize = shm_toc_estimate(&e);
+
+ /* Create the shared memory segment and establish a table of contents. */
+ seg = dsm_create(shm_toc_estimate(&e), 0);
+ toc = shm_toc_create(PG_TEST_SHM_MQ_MAGIC, dsm_segment_address(seg),
+ segsize);
+
+ /* Set up the header region. */
+ hdr = shm_toc_allocate(toc, sizeof(test_shm_mq_header));
+ SpinLockInit(&hdr->mutex);
+ hdr->workers_total = nworkers;
+ hdr->workers_attached = 0;
+ hdr->workers_ready = 0;
+ shm_toc_insert(toc, 0, hdr);
+
+ /* Set up one message queue per worker, plus one. */
+ for (i = 0; i <= nworkers; ++i)
+ {
+ shm_mq *mq;
+
+ mq = shm_mq_create(shm_toc_allocate(toc, (Size) queue_size),
+ (Size) queue_size);
+ shm_toc_insert(toc, i + 1, mq);
+
+ if (i == 0)
+ {
+ /* We send messages to the first queue. */
+ shm_mq_set_sender(mq, MyProc);
+ *outp = mq;
+ }
+ if (i == nworkers)
+ {
+ /* We receive messages from the last queue. */
+ shm_mq_set_receiver(mq, MyProc);
+ *inp = mq;
+ }
+ }
+
+ /* Return results to caller. */
+ *segp = seg;
+ *hdrp = hdr;
+}
+
+/*
+ * Register background workers.
+ */
+static worker_state *
+setup_background_workers(int nworkers, dsm_segment *seg)
+{
+ MemoryContext oldcontext;
+ BackgroundWorker worker;
+ worker_state *wstate;
+ int i;
+
+ /*
+ * We need the worker_state object and the background worker handles to
+ * which it points to be allocated in CurTransactionContext rather than
+ * ExprContext; otherwise, they'll be destroyed before the on_dsm_detach
+ * hooks run.
+ */
+ oldcontext = MemoryContextSwitchTo(CurTransactionContext);
+
+ /* Create worker state object. */
+ wstate = MemoryContextAlloc(TopTransactionContext,
+ offsetof(worker_state, handle) +
+ sizeof(BackgroundWorkerHandle *) * nworkers);
+ wstate->nworkers = 0;
+
+ /*
+ * Arrange to kill all the workers if we abort before all workers are
+ * finished hooking themselves up to the dynamic shared memory segment.
+ *
+ * If we die after all the workers have finished hooking themselves up to
+ * the dynamic shared memory segment, we'll mark the two queues to which
+ * we're directly connected as detached, and the worker(s) connected to
+ * those queues will exit, marking any other queues to which they are
+ * connected as detached. This will cause any as-yet-unaware workers
+ * connected to those queues to exit in their turn, and so on, until
+ * everybody exits.
+ *
+ * But suppose the workers which are supposed to connect to the queues to
+ * which we're directly attached exit due to some error before they
+ * actually attach the queues. The remaining workers will have no way of
+ * knowing this. From their perspective, they're still waiting for those
+ * workers to start, when in fact they've already died.
+ */
+ on_dsm_detach(seg, cleanup_background_workers,
+ PointerGetDatum(wstate));
+
+ /* Configure a worker. */
+ memset(&worker, 0, sizeof(worker));
+ worker.bgw_flags = BGWORKER_SHMEM_ACCESS;
+ worker.bgw_start_time = BgWorkerStart_ConsistentState;
+ worker.bgw_restart_time = BGW_NEVER_RESTART;
+ sprintf(worker.bgw_library_name, "test_shm_mq");
+ sprintf(worker.bgw_function_name, "test_shm_mq_main");
+ snprintf(worker.bgw_type, BGW_MAXLEN, "test_shm_mq");
+ worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(seg));
+ /* set bgw_notify_pid, so we can detect if the worker stops */
+ worker.bgw_notify_pid = MyProcPid;
+
+ /* Register the workers. */
+ for (i = 0; i < nworkers; ++i)
+ {
+ if (!RegisterDynamicBackgroundWorker(&worker, &wstate->handle[i]))
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
+ errmsg("could not register background process"),
+ errhint("You may need to increase max_worker_processes.")));
+ ++wstate->nworkers;
+ }
+
+ /* All done. */
+ MemoryContextSwitchTo(oldcontext);
+ return wstate;
+}
+
+static void
+cleanup_background_workers(dsm_segment *seg, Datum arg)
+{
+ worker_state *wstate = (worker_state *) DatumGetPointer(arg);
+
+ while (wstate->nworkers > 0)
+ {
+ --wstate->nworkers;
+ TerminateBackgroundWorker(wstate->handle[wstate->nworkers]);
+ }
+}
+
+static void
+wait_for_workers_to_become_ready(worker_state *wstate,
+ volatile test_shm_mq_header *hdr)
+{
+ bool result = false;
+
+ for (;;)
+ {
+ int workers_ready;
+
+ /* If all the workers are ready, we have succeeded. */
+ SpinLockAcquire(&hdr->mutex);
+ workers_ready = hdr->workers_ready;
+ SpinLockRelease(&hdr->mutex);
+ if (workers_ready >= wstate->nworkers)
+ {
+ result = true;
+ break;
+ }
+
+ /* If any workers (or the postmaster) have died, we have failed. */
+ if (!check_worker_status(wstate))
+ {
+ result = false;
+ break;
+ }
+
+ /* Wait to be signaled. */
+ (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
+ PG_WAIT_EXTENSION);
+
+ /* Reset the latch so we don't spin. */
+ ResetLatch(MyLatch);
+
+ /* An interrupt may have occurred while we were waiting. */
+ CHECK_FOR_INTERRUPTS();
+ }
+
+ if (!result)
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
+ errmsg("one or more background workers failed to start")));
+}
+
+static bool
+check_worker_status(worker_state *wstate)
+{
+ int n;
+
+ /* If any workers (or the postmaster) have died, we have failed. */
+ for (n = 0; n < wstate->nworkers; ++n)
+ {
+ BgwHandleStatus status;
+ pid_t pid;
+
+ status = GetBackgroundWorkerPid(wstate->handle[n], &pid);
+ if (status == BGWH_STOPPED || status == BGWH_POSTMASTER_DIED)
+ return false;
+ }
+
+ /* Otherwise, things still look OK. */
+ return true;
+}
diff --git a/src/test/modules/test_shm_mq/sql/test_shm_mq.sql b/src/test/modules/test_shm_mq/sql/test_shm_mq.sql
new file mode 100644
index 0000000..9de19d3
--- /dev/null
+++ b/src/test/modules/test_shm_mq/sql/test_shm_mq.sql
@@ -0,0 +1,12 @@
+CREATE EXTENSION test_shm_mq;
+
+--
+-- These tests don't produce any interesting output. We're checking that
+-- the operations complete without crashing or hanging and that none of their
+-- internal sanity tests fail.
+--
+SELECT test_shm_mq(1024, '', 2000, 1);
+SELECT test_shm_mq(1024, 'a', 2001, 1);
+SELECT test_shm_mq(32768, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,(100+900*random())::int)), 10000, 1);
+SELECT test_shm_mq(100, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,(100+200*random())::int)), 10000, 1);
+SELECT test_shm_mq_pipelined(16384, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,270000)), 200, 3);
diff --git a/src/test/modules/test_shm_mq/test.c b/src/test/modules/test_shm_mq/test.c
new file mode 100644
index 0000000..5eda8b9
--- /dev/null
+++ b/src/test/modules/test_shm_mq/test.c
@@ -0,0 +1,266 @@
+/*--------------------------------------------------------------------------
+ *
+ * test.c
+ * Test harness code for shared memory message queues.
+ *
+ * Copyright (c) 2013-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_shm_mq/test.c
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "fmgr.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+
+#include "test_shm_mq.h"
+
+PG_MODULE_MAGIC;
+
+PG_FUNCTION_INFO_V1(test_shm_mq);
+PG_FUNCTION_INFO_V1(test_shm_mq_pipelined);
+
+void _PG_init(void);
+
+static void verify_message(Size origlen, char *origdata, Size newlen,
+ char *newdata);
+
+/*
+ * Simple test of the shared memory message queue infrastructure.
+ *
+ * We set up a ring of message queues passing through 1 or more background
+ * processes and eventually looping back to ourselves. We then send a message
+ * through the ring a number of times indicated by the loop count. At the end,
+ * we check whether the final message matches the one we started with.
+ */
+Datum
+test_shm_mq(PG_FUNCTION_ARGS)
+{
+ int64 queue_size = PG_GETARG_INT64(0);
+ text *message = PG_GETARG_TEXT_PP(1);
+ char *message_contents = VARDATA_ANY(message);
+ int message_size = VARSIZE_ANY_EXHDR(message);
+ int32 loop_count = PG_GETARG_INT32(2);
+ int32 nworkers = PG_GETARG_INT32(3);
+ dsm_segment *seg;
+ shm_mq_handle *outqh;
+ shm_mq_handle *inqh;
+ shm_mq_result res;
+ Size len;
+ void *data;
+
+ /* A negative loopcount is nonsensical. */
+ if (loop_count < 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("repeat count size must be an integer value greater than or equal to zero")));
+
+ /*
+ * Since this test sends data using the blocking interfaces, it cannot
+ * send data to itself. Therefore, a minimum of 1 worker is required. Of
+ * course, a negative worker count is nonsensical.
+ */
+ if (nworkers <= 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("number of workers must be an integer value greater than zero")));
+
+ /* Set up dynamic shared memory segment and background workers. */
+ test_shm_mq_setup(queue_size, nworkers, &seg, &outqh, &inqh);
+
+ /* Send the initial message. */
+ res = shm_mq_send(outqh, message_size, message_contents, false);
+ if (res != SHM_MQ_SUCCESS)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not send message")));
+
+ /*
+ * Receive a message and send it back out again. Do this a number of
+ * times equal to the loop count.
+ */
+ for (;;)
+ {
+ /* Receive a message. */
+ res = shm_mq_receive(inqh, &len, &data, false);
+ if (res != SHM_MQ_SUCCESS)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not receive message")));
+
+ /* If this is supposed to be the last iteration, stop here. */
+ if (--loop_count <= 0)
+ break;
+
+ /* Send it back out. */
+ res = shm_mq_send(outqh, len, data, false);
+ if (res != SHM_MQ_SUCCESS)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not send message")));
+ }
+
+ /*
+ * Finally, check that we got back the same message from the last
+ * iteration that we originally sent.
+ */
+ verify_message(message_size, message_contents, len, data);
+
+ /* Clean up. */
+ dsm_detach(seg);
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Pipelined test of the shared memory message queue infrastructure.
+ *
+ * As in the basic test, we set up a ring of message queues passing through
+ * 1 or more background processes and eventually looping back to ourselves.
+ * Then, we send N copies of the user-specified message through the ring and
+ * receive them all back. Since this might fill up all message queues in the
+ * ring and then stall, we must be prepared to begin receiving the messages
+ * back before we've finished sending them.
+ */
+Datum
+test_shm_mq_pipelined(PG_FUNCTION_ARGS)
+{
+ int64 queue_size = PG_GETARG_INT64(0);
+ text *message = PG_GETARG_TEXT_PP(1);
+ char *message_contents = VARDATA_ANY(message);
+ int message_size = VARSIZE_ANY_EXHDR(message);
+ int32 loop_count = PG_GETARG_INT32(2);
+ int32 nworkers = PG_GETARG_INT32(3);
+ bool verify = PG_GETARG_BOOL(4);
+ int32 send_count = 0;
+ int32 receive_count = 0;
+ dsm_segment *seg;
+ shm_mq_handle *outqh;
+ shm_mq_handle *inqh;
+ shm_mq_result res;
+ Size len;
+ void *data;
+
+ /* A negative loopcount is nonsensical. */
+ if (loop_count < 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("repeat count size must be an integer value greater than or equal to zero")));
+
+ /*
+ * Using the nonblocking interfaces, we can even send data to ourselves,
+ * so the minimum number of workers for this test is zero.
+ */
+ if (nworkers < 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("number of workers must be an integer value greater than or equal to zero")));
+
+ /* Set up dynamic shared memory segment and background workers. */
+ test_shm_mq_setup(queue_size, nworkers, &seg, &outqh, &inqh);
+
+ /* Main loop. */
+ for (;;)
+ {
+ bool wait = true;
+
+ /*
+ * If we haven't yet sent the message the requisite number of times,
+ * try again to send it now. Note that when shm_mq_send() returns
+ * SHM_MQ_WOULD_BLOCK, the next call to that function must pass the
+ * same message size and contents; that's not an issue here because
+ * we're sending the same message every time.
+ */
+ if (send_count < loop_count)
+ {
+ res = shm_mq_send(outqh, message_size, message_contents, true);
+ if (res == SHM_MQ_SUCCESS)
+ {
+ ++send_count;
+ wait = false;
+ }
+ else if (res == SHM_MQ_DETACHED)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not send message")));
+ }
+
+ /*
+ * If we haven't yet received the message the requisite number of
+ * times, try to receive it again now.
+ */
+ if (receive_count < loop_count)
+ {
+ res = shm_mq_receive(inqh, &len, &data, true);
+ if (res == SHM_MQ_SUCCESS)
+ {
+ ++receive_count;
+ /* Verifying every time is slow, so it's optional. */
+ if (verify)
+ verify_message(message_size, message_contents, len, data);
+ wait = false;
+ }
+ else if (res == SHM_MQ_DETACHED)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not receive message")));
+ }
+ else
+ {
+ /*
+ * Otherwise, we've received the message enough times. This
+ * shouldn't happen unless we've also sent it enough times.
+ */
+ if (send_count != receive_count)
+ ereport(ERROR,
+ (errcode(ERRCODE_INTERNAL_ERROR),
+ errmsg("message sent %d times, but received %d times",
+ send_count, receive_count)));
+ break;
+ }
+
+ if (wait)
+ {
+ /*
+ * If we made no progress, wait for one of the other processes to
+ * which we are connected to set our latch, indicating that they
+ * have read or written data and therefore there may now be work
+ * for us to do.
+ */
+ (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
+ PG_WAIT_EXTENSION);
+ ResetLatch(MyLatch);
+ CHECK_FOR_INTERRUPTS();
+ }
+ }
+
+ /* Clean up. */
+ dsm_detach(seg);
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Verify that two messages are the same.
+ */
+static void
+verify_message(Size origlen, char *origdata, Size newlen, char *newdata)
+{
+ Size i;
+
+ if (origlen != newlen)
+ ereport(ERROR,
+ (errmsg("message corrupted"),
+ errdetail("The original message was %zu bytes but the final message is %zu bytes.",
+ origlen, newlen)));
+
+ for (i = 0; i < origlen; ++i)
+ if (origdata[i] != newdata[i])
+ ereport(ERROR,
+ (errmsg("message corrupted"),
+ errdetail("The new and original messages differ at byte %zu of %zu.", i, origlen)));
+}
diff --git a/src/test/modules/test_shm_mq/test_shm_mq--1.0.sql b/src/test/modules/test_shm_mq/test_shm_mq--1.0.sql
new file mode 100644
index 0000000..56db05d
--- /dev/null
+++ b/src/test/modules/test_shm_mq/test_shm_mq--1.0.sql
@@ -0,0 +1,19 @@
+/* src/test/modules/test_shm_mq/test_shm_mq--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_shm_mq" to load this file. \quit
+
+CREATE FUNCTION test_shm_mq(queue_size pg_catalog.int8,
+ message pg_catalog.text,
+ repeat_count pg_catalog.int4 default 1,
+ num_workers pg_catalog.int4 default 1)
+ RETURNS pg_catalog.void STRICT
+ AS 'MODULE_PATHNAME' LANGUAGE C;
+
+CREATE FUNCTION test_shm_mq_pipelined(queue_size pg_catalog.int8,
+ message pg_catalog.text,
+ repeat_count pg_catalog.int4 default 1,
+ num_workers pg_catalog.int4 default 1,
+ verify pg_catalog.bool default true)
+ RETURNS pg_catalog.void STRICT
+ AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/src/test/modules/test_shm_mq/test_shm_mq.control b/src/test/modules/test_shm_mq/test_shm_mq.control
new file mode 100644
index 0000000..d9a74c7
--- /dev/null
+++ b/src/test/modules/test_shm_mq/test_shm_mq.control
@@ -0,0 +1,4 @@
+comment = 'Test code for shared memory message queues'
+default_version = '1.0'
+module_pathname = '$libdir/test_shm_mq'
+relocatable = true
diff --git a/src/test/modules/test_shm_mq/test_shm_mq.h b/src/test/modules/test_shm_mq/test_shm_mq.h
new file mode 100644
index 0000000..5764852
--- /dev/null
+++ b/src/test/modules/test_shm_mq/test_shm_mq.h
@@ -0,0 +1,45 @@
+/*--------------------------------------------------------------------------
+ *
+ * test_shm_mq.h
+ * Definitions for shared memory message queues
+ *
+ * Copyright (c) 2013-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_shm_mq/test_shm_mq.h
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#ifndef TEST_SHM_MQ_H
+#define TEST_SHM_MQ_H
+
+#include "storage/dsm.h"
+#include "storage/shm_mq.h"
+#include "storage/spin.h"
+
+/* Identifier for shared memory segments used by this extension. */
+#define PG_TEST_SHM_MQ_MAGIC 0x79fb2447
+
+/*
+ * This structure is stored in the dynamic shared memory segment. We use
+ * it to determine whether all workers started up OK and successfully
+ * attached to their respective shared message queues.
+ */
+typedef struct
+{
+ slock_t mutex;
+ int workers_total;
+ int workers_attached;
+ int workers_ready;
+} test_shm_mq_header;
+
+/* Set up dynamic shared memory and background workers for test run. */
+extern void test_shm_mq_setup(int64 queue_size, int32 nworkers,
+ dsm_segment **seg, shm_mq_handle **output,
+ shm_mq_handle **input);
+
+/* Main entrypoint for a worker. */
+extern void test_shm_mq_main(Datum) pg_attribute_noreturn();
+
+#endif
diff --git a/src/test/modules/test_shm_mq/worker.c b/src/test/modules/test_shm_mq/worker.c
new file mode 100644
index 0000000..cb63ade
--- /dev/null
+++ b/src/test/modules/test_shm_mq/worker.c
@@ -0,0 +1,219 @@
+/*--------------------------------------------------------------------------
+ *
+ * worker.c
+ * Code for sample worker making use of shared memory message queues.
+ * Our test worker simply reads messages from one message queue and
+ * writes them back out to another message queue. In a real
+ * application, you'd presumably want the worker to do some more
+ * complex calculation rather than simply returning the input,
+ * but it should be possible to use much of the control logic just
+ * as presented here.
+ *
+ * Copyright (c) 2013-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_shm_mq/worker.c
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "miscadmin.h"
+#include "storage/ipc.h"
+#include "storage/procarray.h"
+#include "storage/shm_mq.h"
+#include "storage/shm_toc.h"
+
+#include "test_shm_mq.h"
+
+static void handle_sigterm(SIGNAL_ARGS);
+static void attach_to_queues(dsm_segment *seg, shm_toc *toc,
+ int myworkernumber, shm_mq_handle **inqhp,
+ shm_mq_handle **outqhp);
+static void copy_messages(shm_mq_handle *inqh, shm_mq_handle *outqh);
+
+/*
+ * Background worker entrypoint.
+ *
+ * This is intended to demonstrate how a background worker can be used to
+ * facilitate a parallel computation. Most of the logic here is fairly
+ * boilerplate stuff, designed to attach to the shared memory segment,
+ * notify the user backend that we're alive, and so on. The
+ * application-specific bits of logic that you'd replace for your own worker
+ * are attach_to_queues() and copy_messages().
+ */
+void
+test_shm_mq_main(Datum main_arg)
+{
+ dsm_segment *seg;
+ shm_toc *toc;
+ shm_mq_handle *inqh;
+ shm_mq_handle *outqh;
+ volatile test_shm_mq_header *hdr;
+ int myworkernumber;
+ PGPROC *registrant;
+
+ /*
+ * Establish signal handlers.
+ *
+ * We want CHECK_FOR_INTERRUPTS() to kill off this worker process just as
+ * it would a normal user backend. To make that happen, we establish a
+ * signal handler that is a stripped-down version of die().
+ */
+ pqsignal(SIGTERM, handle_sigterm);
+ BackgroundWorkerUnblockSignals();
+
+ /*
+ * Connect to the dynamic shared memory segment.
+ *
+ * The backend that registered this worker passed us the ID of a shared
+ * memory segment to which we must attach for further instructions. Once
+ * we've mapped the segment in our address space, attach to the table of
+ * contents so we can locate the various data structures we'll need to
+ * find within the segment.
+ *
+ * Note: at this point, we have not created any ResourceOwner in this
+ * process. This will result in our DSM mapping surviving until process
+ * exit, which is fine. If there were a ResourceOwner, it would acquire
+ * ownership of the mapping, but we have no need for that.
+ */
+ seg = dsm_attach(DatumGetInt32(main_arg));
+ if (seg == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("unable to map dynamic shared memory segment")));
+ toc = shm_toc_attach(PG_TEST_SHM_MQ_MAGIC, dsm_segment_address(seg));
+ if (toc == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("bad magic number in dynamic shared memory segment")));
+
+ /*
+ * Acquire a worker number.
+ *
+ * By convention, the process registering this background worker should
+ * have stored the control structure at key 0. We look up that key to
+ * find it. Our worker number gives our identity: there may be just one
+ * worker involved in this parallel operation, or there may be many.
+ */
+ hdr = shm_toc_lookup(toc, 0, false);
+ SpinLockAcquire(&hdr->mutex);
+ myworkernumber = ++hdr->workers_attached;
+ SpinLockRelease(&hdr->mutex);
+ if (myworkernumber > hdr->workers_total)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("too many message queue testing workers already")));
+
+ /*
+ * Attach to the appropriate message queues.
+ */
+ attach_to_queues(seg, toc, myworkernumber, &inqh, &outqh);
+
+ /*
+ * Indicate that we're fully initialized and ready to begin the main part
+ * of the parallel operation.
+ *
+ * Once we signal that we're ready, the user backend is entitled to assume
+ * that our on_dsm_detach callbacks will fire before we disconnect from
+ * the shared memory segment and exit. Generally, that means we must have
+ * attached to all relevant dynamic shared memory data structures by now.
+ */
+ SpinLockAcquire(&hdr->mutex);
+ ++hdr->workers_ready;
+ SpinLockRelease(&hdr->mutex);
+ registrant = BackendPidGetProc(MyBgworkerEntry->bgw_notify_pid);
+ if (registrant == NULL)
+ {
+ elog(DEBUG1, "registrant backend has exited prematurely");
+ proc_exit(1);
+ }
+ SetLatch(&registrant->procLatch);
+
+ /* Do the work. */
+ copy_messages(inqh, outqh);
+
+ /*
+ * We're done. For cleanliness, explicitly detach from the shared memory
+ * segment (that would happen anyway during process exit, though).
+ */
+ dsm_detach(seg);
+ proc_exit(1);
+}
+
+/*
+ * Attach to shared memory message queues.
+ *
+ * We use our worker number to determine to which queue we should attach.
+ * The queues are registered at keys 1..<number-of-workers>. The user backend
+ * writes to queue #1 and reads from queue #<number-of-workers>; each worker
+ * reads from the queue whose number is equal to its worker number and writes
+ * to the next higher-numbered queue.
+ */
+static void
+attach_to_queues(dsm_segment *seg, shm_toc *toc, int myworkernumber,
+ shm_mq_handle **inqhp, shm_mq_handle **outqhp)
+{
+ shm_mq *inq;
+ shm_mq *outq;
+
+ inq = shm_toc_lookup(toc, myworkernumber, false);
+ shm_mq_set_receiver(inq, MyProc);
+ *inqhp = shm_mq_attach(inq, seg, NULL);
+ outq = shm_toc_lookup(toc, myworkernumber + 1, false);
+ shm_mq_set_sender(outq, MyProc);
+ *outqhp = shm_mq_attach(outq, seg, NULL);
+}
+
+/*
+ * Loop, receiving and sending messages, until the connection is broken.
+ *
+ * This is the "real work" performed by this worker process. Everything that
+ * happens before this is initialization of one form or another, and everything
+ * after this point is cleanup.
+ */
+static void
+copy_messages(shm_mq_handle *inqh, shm_mq_handle *outqh)
+{
+ Size len;
+ void *data;
+ shm_mq_result res;
+
+ for (;;)
+ {
+ /* Notice any interrupts that have occurred. */
+ CHECK_FOR_INTERRUPTS();
+
+ /* Receive a message. */
+ res = shm_mq_receive(inqh, &len, &data, false);
+ if (res != SHM_MQ_SUCCESS)
+ break;
+
+ /* Send it back out. */
+ res = shm_mq_send(outqh, len, data, false);
+ if (res != SHM_MQ_SUCCESS)
+ break;
+ }
+}
+
+/*
+ * When we receive a SIGTERM, we set InterruptPending and ProcDiePending just
+ * like a normal backend. The next CHECK_FOR_INTERRUPTS() will do the right
+ * thing.
+ */
+static void
+handle_sigterm(SIGNAL_ARGS)
+{
+ int save_errno = errno;
+
+ SetLatch(MyLatch);
+
+ if (!proc_exit_inprogress)
+ {
+ InterruptPending = true;
+ ProcDiePending = true;
+ }
+
+ errno = save_errno;
+}
diff --git a/src/test/modules/unsafe_tests/.gitignore b/src/test/modules/unsafe_tests/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/unsafe_tests/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/unsafe_tests/Makefile b/src/test/modules/unsafe_tests/Makefile
new file mode 100644
index 0000000..3ecf5fc
--- /dev/null
+++ b/src/test/modules/unsafe_tests/Makefile
@@ -0,0 +1,14 @@
+# src/test/modules/unsafe_tests/Makefile
+
+REGRESS = rolenames alter_system_table
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/unsafe_tests
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/unsafe_tests/README b/src/test/modules/unsafe_tests/README
new file mode 100644
index 0000000..a7e5b2a
--- /dev/null
+++ b/src/test/modules/unsafe_tests/README
@@ -0,0 +1,8 @@
+This directory doesn't actually contain any extension module.
+
+What it is is a home for regression tests that we don't want to run
+during "make installcheck" because they could have side-effects that
+seem undesirable for a production installation.
+
+An example is that rolenames.sql tests ALTER USER ALL and so could
+have effects on pre-existing roles.
diff --git a/src/test/modules/unsafe_tests/expected/alter_system_table.out b/src/test/modules/unsafe_tests/expected/alter_system_table.out
new file mode 100644
index 0000000..ecd1505
--- /dev/null
+++ b/src/test/modules/unsafe_tests/expected/alter_system_table.out
@@ -0,0 +1,179 @@
+--
+-- Tests for things affected by allow_system_table_mods
+--
+-- We run the same set of commands once with allow_system_table_mods
+-- off and then again with on.
+--
+-- The "on" tests should where possible be wrapped in BEGIN/ROLLBACK
+-- blocks so as to not leave a mess around.
+CREATE USER regress_user_ast;
+SET allow_system_table_mods = off;
+-- create new table in pg_catalog
+CREATE TABLE pg_catalog.test (a int);
+ERROR: permission denied to create "pg_catalog.test"
+DETAIL: System catalog modifications are currently disallowed.
+-- anyarray column
+CREATE TABLE t1x (a int, b anyarray);
+ERROR: column "b" has pseudo-type anyarray
+-- index on system catalog
+ALTER TABLE pg_namespace ADD UNIQUE USING INDEX pg_namespace_oid_index;
+ERROR: permission denied: "pg_namespace" is a system catalog
+-- write to system catalog table as superuser
+-- (allowed even without allow_system_table_mods)
+INSERT INTO pg_description (objoid, classoid, objsubid, description) VALUES (0, 0, 0, 'foo');
+-- write to system catalog table as normal user
+GRANT INSERT ON pg_description TO regress_user_ast;
+SET ROLE regress_user_ast;
+INSERT INTO pg_description (objoid, classoid, objsubid, description) VALUES (0, 0, 1, 'foo');
+ERROR: permission denied for table pg_description
+RESET ROLE;
+-- policy on system catalog
+CREATE POLICY foo ON pg_description FOR SELECT USING (description NOT LIKE 'secret%');
+ERROR: permission denied: "pg_description" is a system catalog
+-- reserved schema name
+CREATE SCHEMA pg_foo;
+ERROR: unacceptable schema name "pg_foo"
+DETAIL: The prefix "pg_" is reserved for system schemas.
+-- drop system table
+DROP TABLE pg_description;
+ERROR: permission denied: "pg_description" is a system catalog
+-- truncate of system table
+TRUNCATE pg_description;
+ERROR: permission denied: "pg_description" is a system catalog
+-- rename column of system table
+ALTER TABLE pg_description RENAME COLUMN description TO comment;
+ERROR: permission denied: "pg_description" is a system catalog
+-- ATSimplePermissions()
+ALTER TABLE pg_description ALTER COLUMN description SET NOT NULL;
+ERROR: permission denied: "pg_description" is a system catalog
+-- SET STATISTICS
+ALTER TABLE pg_description ALTER COLUMN description SET STATISTICS -1;
+ERROR: permission denied: "pg_description" is a system catalog
+-- foreign key referencing catalog
+CREATE TABLE foo (a oid, b oid, c int, FOREIGN KEY (a, b, c) REFERENCES pg_description);
+ERROR: permission denied: "pg_description" is a system catalog
+-- RangeVarCallbackOwnsRelation()
+CREATE INDEX pg_descripton_test_index ON pg_description (description);
+ERROR: permission denied: "pg_description" is a system catalog
+-- RangeVarCallbackForAlterRelation()
+ALTER TABLE pg_description RENAME TO pg_comment;
+ERROR: permission denied: "pg_description" is a system catalog
+ALTER TABLE pg_description SET SCHEMA public;
+ERROR: permission denied: "pg_description" is a system catalog
+-- reserved tablespace name
+CREATE TABLESPACE pg_foo LOCATION '/no/such/location';
+ERROR: unacceptable tablespace name "pg_foo"
+DETAIL: The prefix "pg_" is reserved for system tablespaces.
+-- triggers
+CREATE FUNCTION tf1() RETURNS trigger
+LANGUAGE plpgsql
+AS $$
+BEGIN
+ RETURN NULL;
+END $$;
+CREATE TRIGGER t1 BEFORE INSERT ON pg_description EXECUTE FUNCTION tf1();
+ERROR: permission denied: "pg_description" is a system catalog
+ALTER TRIGGER t1 ON pg_description RENAME TO t2;
+ERROR: permission denied: "pg_description" is a system catalog
+--DROP TRIGGER t2 ON pg_description;
+-- rules
+CREATE RULE r1 AS ON INSERT TO pg_description DO INSTEAD NOTHING;
+ERROR: permission denied: "pg_description" is a system catalog
+ALTER RULE r1 ON pg_description RENAME TO r2;
+ERROR: permission denied: "pg_description" is a system catalog
+-- now make one to test dropping:
+SET allow_system_table_mods TO on;
+CREATE RULE r2 AS ON INSERT TO pg_description DO INSTEAD NOTHING;
+RESET allow_system_table_mods;
+DROP RULE r2 ON pg_description;
+ERROR: permission denied: "pg_description" is a system catalog
+-- cleanup:
+SET allow_system_table_mods TO on;
+DROP RULE r2 ON pg_description;
+RESET allow_system_table_mods;
+SET allow_system_table_mods = on;
+-- create new table in pg_catalog
+BEGIN;
+CREATE TABLE pg_catalog.test (a int);
+ROLLBACK;
+-- anyarray column
+BEGIN;
+CREATE TABLE t1 (a int, b anyarray);
+ROLLBACK;
+-- index on system catalog
+BEGIN;
+ALTER TABLE pg_namespace ADD UNIQUE USING INDEX pg_namespace_oid_index;
+ROLLBACK;
+-- write to system catalog table as superuser
+BEGIN;
+INSERT INTO pg_description (objoid, classoid, objsubid, description) VALUES (0, 0, 2, 'foo');
+ROLLBACK;
+-- write to system catalog table as normal user
+-- (not allowed)
+SET ROLE regress_user_ast;
+INSERT INTO pg_description (objoid, classoid, objsubid, description) VALUES (0, 0, 3, 'foo');
+ERROR: permission denied for table pg_description
+RESET ROLE;
+-- policy on system catalog
+BEGIN;
+CREATE POLICY foo ON pg_description FOR SELECT USING (description NOT LIKE 'secret%');
+ROLLBACK;
+-- reserved schema name
+BEGIN;
+CREATE SCHEMA pg_foo;
+ROLLBACK;
+-- drop system table
+-- (This will fail anyway because it's pinned.)
+BEGIN;
+DROP TABLE pg_description;
+ERROR: cannot drop table pg_description because it is required by the database system
+ROLLBACK;
+-- truncate of system table
+BEGIN;
+TRUNCATE pg_description;
+ROLLBACK;
+-- rename column of system table
+BEGIN;
+ALTER TABLE pg_description RENAME COLUMN description TO comment;
+ROLLBACK;
+-- ATSimplePermissions()
+BEGIN;
+ALTER TABLE pg_description ALTER COLUMN description SET NOT NULL;
+ROLLBACK;
+-- SET STATISTICS
+BEGIN;
+ALTER TABLE pg_description ALTER COLUMN description SET STATISTICS -1;
+ROLLBACK;
+-- foreign key referencing catalog
+BEGIN;
+ALTER TABLE pg_description ADD PRIMARY KEY USING INDEX pg_description_o_c_o_index;
+CREATE TABLE foo (a oid, b oid, c int, FOREIGN KEY (a, b, c) REFERENCES pg_description);
+ROLLBACK;
+-- RangeVarCallbackOwnsRelation()
+BEGIN;
+CREATE INDEX pg_descripton_test_index ON pg_description (description);
+ROLLBACK;
+-- RangeVarCallbackForAlterRelation()
+BEGIN;
+ALTER TABLE pg_description RENAME TO pg_comment;
+ROLLBACK;
+BEGIN;
+ALTER TABLE pg_description SET SCHEMA public;
+ROLLBACK;
+-- reserved tablespace name
+SET client_min_messages = error; -- disable ENFORCE_REGRESSION_TEST_NAME_RESTRICTIONS warning
+CREATE TABLESPACE pg_foo LOCATION '/no/such/location';
+ERROR: directory "/no/such/location" does not exist
+RESET client_min_messages;
+-- triggers
+CREATE TRIGGER t1 BEFORE INSERT ON pg_description EXECUTE FUNCTION tf1();
+ALTER TRIGGER t1 ON pg_description RENAME TO t2;
+DROP TRIGGER t2 ON pg_description;
+-- rules
+CREATE RULE r1 AS ON INSERT TO pg_description DO INSTEAD NOTHING;
+ALTER RULE r1 ON pg_description RENAME TO r2;
+DROP RULE r2 ON pg_description;
+-- cleanup
+REVOKE ALL ON pg_description FROM regress_user_ast;
+DROP USER regress_user_ast;
+DROP FUNCTION tf1;
diff --git a/src/test/modules/unsafe_tests/expected/rolenames.out b/src/test/modules/unsafe_tests/expected/rolenames.out
new file mode 100644
index 0000000..03c1a25
--- /dev/null
+++ b/src/test/modules/unsafe_tests/expected/rolenames.out
@@ -0,0 +1,1010 @@
+CREATE OR REPLACE FUNCTION chkrolattr()
+ RETURNS TABLE ("role" name, rolekeyword text, canlogin bool, replication bool)
+ AS $$
+SELECT r.rolname, v.keyword, r.rolcanlogin, r.rolreplication
+ FROM pg_roles r
+ JOIN (VALUES(CURRENT_USER, 'current_user'),
+ (SESSION_USER, 'session_user'),
+ ('current_user', '-'),
+ ('session_user', '-'),
+ ('Public', '-'),
+ ('None', '-'))
+ AS v(uname, keyword)
+ ON (r.rolname = v.uname)
+ ORDER BY 1;
+$$ LANGUAGE SQL;
+CREATE OR REPLACE FUNCTION chksetconfig()
+ RETURNS TABLE (db name, "role" name, rolkeyword text, setconfig text[])
+ AS $$
+SELECT COALESCE(d.datname, 'ALL'), COALESCE(r.rolname, 'ALL'),
+ COALESCE(v.keyword, '-'), s.setconfig
+ FROM pg_db_role_setting s
+ LEFT JOIN pg_roles r ON (r.oid = s.setrole)
+ LEFT JOIN pg_database d ON (d.oid = s.setdatabase)
+ LEFT JOIN (VALUES(CURRENT_USER, 'current_user'),
+ (SESSION_USER, 'session_user'))
+ AS v(uname, keyword)
+ ON (r.rolname = v.uname)
+ WHERE (r.rolname) IN ('Public', 'current_user', 'regress_testrol1', 'regress_testrol2')
+ORDER BY 1, 2;
+$$ LANGUAGE SQL;
+CREATE OR REPLACE FUNCTION chkumapping()
+ RETURNS TABLE (umname name, umserver name, umoptions text[])
+ AS $$
+SELECT r.rolname, s.srvname, m.umoptions
+ FROM pg_user_mapping m
+ LEFT JOIN pg_roles r ON (r.oid = m.umuser)
+ JOIN pg_foreign_server s ON (s.oid = m.umserver)
+ ORDER BY 2;
+$$ LANGUAGE SQL;
+--
+-- We test creation and use of these role names to ensure that the server
+-- correctly distinguishes role keywords from quoted names that look like
+-- those keywords. In a test environment, creation of these roles may
+-- provoke warnings, so hide the warnings by raising client_min_messages.
+--
+SET client_min_messages = ERROR;
+CREATE ROLE "Public";
+CREATE ROLE "None";
+CREATE ROLE "current_user";
+CREATE ROLE "session_user";
+CREATE ROLE "user";
+RESET client_min_messages;
+CREATE ROLE current_user; -- error
+ERROR: CURRENT_USER cannot be used as a role name here
+LINE 1: CREATE ROLE current_user;
+ ^
+CREATE ROLE current_role; -- error
+ERROR: syntax error at or near "current_role"
+LINE 1: CREATE ROLE current_role;
+ ^
+CREATE ROLE session_user; -- error
+ERROR: SESSION_USER cannot be used as a role name here
+LINE 1: CREATE ROLE session_user;
+ ^
+CREATE ROLE user; -- error
+ERROR: syntax error at or near "user"
+LINE 1: CREATE ROLE user;
+ ^
+CREATE ROLE all; -- error
+ERROR: syntax error at or near "all"
+LINE 1: CREATE ROLE all;
+ ^
+CREATE ROLE public; -- error
+ERROR: role name "public" is reserved
+LINE 1: CREATE ROLE public;
+ ^
+CREATE ROLE "public"; -- error
+ERROR: role name "public" is reserved
+LINE 1: CREATE ROLE "public";
+ ^
+CREATE ROLE none; -- error
+ERROR: role name "none" is reserved
+LINE 1: CREATE ROLE none;
+ ^
+CREATE ROLE "none"; -- error
+ERROR: role name "none" is reserved
+LINE 1: CREATE ROLE "none";
+ ^
+CREATE ROLE pg_abc; -- error
+ERROR: role name "pg_abc" is reserved
+DETAIL: Role names starting with "pg_" are reserved.
+CREATE ROLE "pg_abc"; -- error
+ERROR: role name "pg_abc" is reserved
+DETAIL: Role names starting with "pg_" are reserved.
+CREATE ROLE pg_abcdef; -- error
+ERROR: role name "pg_abcdef" is reserved
+DETAIL: Role names starting with "pg_" are reserved.
+CREATE ROLE "pg_abcdef"; -- error
+ERROR: role name "pg_abcdef" is reserved
+DETAIL: Role names starting with "pg_" are reserved.
+CREATE ROLE regress_testrol0 SUPERUSER LOGIN;
+CREATE ROLE regress_testrolx SUPERUSER LOGIN;
+CREATE ROLE regress_testrol2 SUPERUSER;
+CREATE ROLE regress_testrol1 SUPERUSER LOGIN IN ROLE regress_testrol2;
+\c -
+SET SESSION AUTHORIZATION regress_testrol1;
+SET ROLE regress_testrol2;
+-- ALTER ROLE
+BEGIN;
+SELECT * FROM chkrolattr();
+ role | rolekeyword | canlogin | replication
+------------------+--------------+----------+-------------
+ None | - | f | f
+ Public | - | f | f
+ current_user | - | f | f
+ regress_testrol1 | session_user | t | f
+ regress_testrol2 | current_user | f | f
+ session_user | - | f | f
+(6 rows)
+
+ALTER ROLE CURRENT_USER WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ role | rolekeyword | canlogin | replication
+------------------+--------------+----------+-------------
+ None | - | f | f
+ Public | - | f | f
+ current_user | - | f | f
+ regress_testrol1 | session_user | t | f
+ regress_testrol2 | current_user | f | t
+ session_user | - | f | f
+(6 rows)
+
+ALTER ROLE "current_user" WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ role | rolekeyword | canlogin | replication
+------------------+--------------+----------+-------------
+ None | - | f | f
+ Public | - | f | f
+ current_user | - | f | t
+ regress_testrol1 | session_user | t | f
+ regress_testrol2 | current_user | f | t
+ session_user | - | f | f
+(6 rows)
+
+ALTER ROLE SESSION_USER WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ role | rolekeyword | canlogin | replication
+------------------+--------------+----------+-------------
+ None | - | f | f
+ Public | - | f | f
+ current_user | - | f | t
+ regress_testrol1 | session_user | t | t
+ regress_testrol2 | current_user | f | t
+ session_user | - | f | f
+(6 rows)
+
+ALTER ROLE "session_user" WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ role | rolekeyword | canlogin | replication
+------------------+--------------+----------+-------------
+ None | - | f | f
+ Public | - | f | f
+ current_user | - | f | t
+ regress_testrol1 | session_user | t | t
+ regress_testrol2 | current_user | f | t
+ session_user | - | f | t
+(6 rows)
+
+ALTER USER "Public" WITH REPLICATION;
+ALTER USER "None" WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ role | rolekeyword | canlogin | replication
+------------------+--------------+----------+-------------
+ None | - | f | t
+ Public | - | f | t
+ current_user | - | f | t
+ regress_testrol1 | session_user | t | t
+ regress_testrol2 | current_user | f | t
+ session_user | - | f | t
+(6 rows)
+
+ALTER USER regress_testrol1 WITH NOREPLICATION;
+ALTER USER regress_testrol2 WITH NOREPLICATION;
+SELECT * FROM chkrolattr();
+ role | rolekeyword | canlogin | replication
+------------------+--------------+----------+-------------
+ None | - | f | t
+ Public | - | f | t
+ current_user | - | f | t
+ regress_testrol1 | session_user | t | f
+ regress_testrol2 | current_user | f | f
+ session_user | - | f | t
+(6 rows)
+
+ROLLBACK;
+ALTER ROLE USER WITH LOGIN; -- error
+ERROR: syntax error at or near "USER"
+LINE 1: ALTER ROLE USER WITH LOGIN;
+ ^
+ALTER ROLE CURRENT_ROLE WITH LOGIN; --error
+ERROR: syntax error at or near "CURRENT_ROLE"
+LINE 1: ALTER ROLE CURRENT_ROLE WITH LOGIN;
+ ^
+ALTER ROLE ALL WITH REPLICATION; -- error
+ERROR: syntax error at or near "WITH"
+LINE 1: ALTER ROLE ALL WITH REPLICATION;
+ ^
+ALTER ROLE SESSION_ROLE WITH NOREPLICATION; -- error
+ERROR: role "session_role" does not exist
+ALTER ROLE PUBLIC WITH NOREPLICATION; -- error
+ERROR: role "public" does not exist
+ALTER ROLE "public" WITH NOREPLICATION; -- error
+ERROR: role "public" does not exist
+ALTER ROLE NONE WITH NOREPLICATION; -- error
+ERROR: role name "none" is reserved
+LINE 1: ALTER ROLE NONE WITH NOREPLICATION;
+ ^
+ALTER ROLE "none" WITH NOREPLICATION; -- error
+ERROR: role name "none" is reserved
+LINE 1: ALTER ROLE "none" WITH NOREPLICATION;
+ ^
+ALTER ROLE nonexistent WITH NOREPLICATION; -- error
+ERROR: role "nonexistent" does not exist
+-- ALTER USER
+BEGIN;
+SELECT * FROM chkrolattr();
+ role | rolekeyword | canlogin | replication
+------------------+--------------+----------+-------------
+ None | - | f | f
+ Public | - | f | f
+ current_user | - | f | f
+ regress_testrol1 | session_user | t | f
+ regress_testrol2 | current_user | f | f
+ session_user | - | f | f
+(6 rows)
+
+ALTER USER CURRENT_USER WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ role | rolekeyword | canlogin | replication
+------------------+--------------+----------+-------------
+ None | - | f | f
+ Public | - | f | f
+ current_user | - | f | f
+ regress_testrol1 | session_user | t | f
+ regress_testrol2 | current_user | f | t
+ session_user | - | f | f
+(6 rows)
+
+ALTER USER "current_user" WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ role | rolekeyword | canlogin | replication
+------------------+--------------+----------+-------------
+ None | - | f | f
+ Public | - | f | f
+ current_user | - | f | t
+ regress_testrol1 | session_user | t | f
+ regress_testrol2 | current_user | f | t
+ session_user | - | f | f
+(6 rows)
+
+ALTER USER SESSION_USER WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ role | rolekeyword | canlogin | replication
+------------------+--------------+----------+-------------
+ None | - | f | f
+ Public | - | f | f
+ current_user | - | f | t
+ regress_testrol1 | session_user | t | t
+ regress_testrol2 | current_user | f | t
+ session_user | - | f | f
+(6 rows)
+
+ALTER USER "session_user" WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ role | rolekeyword | canlogin | replication
+------------------+--------------+----------+-------------
+ None | - | f | f
+ Public | - | f | f
+ current_user | - | f | t
+ regress_testrol1 | session_user | t | t
+ regress_testrol2 | current_user | f | t
+ session_user | - | f | t
+(6 rows)
+
+ALTER USER "Public" WITH REPLICATION;
+ALTER USER "None" WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ role | rolekeyword | canlogin | replication
+------------------+--------------+----------+-------------
+ None | - | f | t
+ Public | - | f | t
+ current_user | - | f | t
+ regress_testrol1 | session_user | t | t
+ regress_testrol2 | current_user | f | t
+ session_user | - | f | t
+(6 rows)
+
+ALTER USER regress_testrol1 WITH NOREPLICATION;
+ALTER USER regress_testrol2 WITH NOREPLICATION;
+SELECT * FROM chkrolattr();
+ role | rolekeyword | canlogin | replication
+------------------+--------------+----------+-------------
+ None | - | f | t
+ Public | - | f | t
+ current_user | - | f | t
+ regress_testrol1 | session_user | t | f
+ regress_testrol2 | current_user | f | f
+ session_user | - | f | t
+(6 rows)
+
+ROLLBACK;
+ALTER USER USER WITH LOGIN; -- error
+ERROR: syntax error at or near "USER"
+LINE 1: ALTER USER USER WITH LOGIN;
+ ^
+ALTER USER CURRENT_ROLE WITH LOGIN; -- error
+ERROR: syntax error at or near "CURRENT_ROLE"
+LINE 1: ALTER USER CURRENT_ROLE WITH LOGIN;
+ ^
+ALTER USER ALL WITH REPLICATION; -- error
+ERROR: syntax error at or near "WITH"
+LINE 1: ALTER USER ALL WITH REPLICATION;
+ ^
+ALTER USER SESSION_ROLE WITH NOREPLICATION; -- error
+ERROR: role "session_role" does not exist
+ALTER USER PUBLIC WITH NOREPLICATION; -- error
+ERROR: role "public" does not exist
+ALTER USER "public" WITH NOREPLICATION; -- error
+ERROR: role "public" does not exist
+ALTER USER NONE WITH NOREPLICATION; -- error
+ERROR: role name "none" is reserved
+LINE 1: ALTER USER NONE WITH NOREPLICATION;
+ ^
+ALTER USER "none" WITH NOREPLICATION; -- error
+ERROR: role name "none" is reserved
+LINE 1: ALTER USER "none" WITH NOREPLICATION;
+ ^
+ALTER USER nonexistent WITH NOREPLICATION; -- error
+ERROR: role "nonexistent" does not exist
+-- ALTER ROLE SET/RESET
+SELECT * FROM chksetconfig();
+ db | role | rolkeyword | setconfig
+----+------+------------+-----------
+(0 rows)
+
+ALTER ROLE CURRENT_USER SET application_name to 'FOO';
+ALTER ROLE SESSION_USER SET application_name to 'BAR';
+ALTER ROLE "current_user" SET application_name to 'FOOFOO';
+ALTER ROLE "Public" SET application_name to 'BARBAR';
+ALTER ROLE ALL SET application_name to 'SLAP';
+SELECT * FROM chksetconfig();
+ db | role | rolkeyword | setconfig
+-----+------------------+--------------+---------------------------
+ ALL | Public | - | {application_name=BARBAR}
+ ALL | current_user | - | {application_name=FOOFOO}
+ ALL | regress_testrol1 | session_user | {application_name=BAR}
+ ALL | regress_testrol2 | current_user | {application_name=FOO}
+(4 rows)
+
+ALTER ROLE regress_testrol1 SET application_name to 'SLAM';
+SELECT * FROM chksetconfig();
+ db | role | rolkeyword | setconfig
+-----+------------------+--------------+---------------------------
+ ALL | Public | - | {application_name=BARBAR}
+ ALL | current_user | - | {application_name=FOOFOO}
+ ALL | regress_testrol1 | session_user | {application_name=SLAM}
+ ALL | regress_testrol2 | current_user | {application_name=FOO}
+(4 rows)
+
+ALTER ROLE CURRENT_USER RESET application_name;
+ALTER ROLE SESSION_USER RESET application_name;
+ALTER ROLE "current_user" RESET application_name;
+ALTER ROLE "Public" RESET application_name;
+ALTER ROLE ALL RESET application_name;
+SELECT * FROM chksetconfig();
+ db | role | rolkeyword | setconfig
+----+------+------------+-----------
+(0 rows)
+
+ALTER ROLE CURRENT_ROLE SET application_name to 'BAZ'; -- error
+ERROR: syntax error at or near "CURRENT_ROLE"
+LINE 1: ALTER ROLE CURRENT_ROLE SET application_name to 'BAZ';
+ ^
+ALTER ROLE USER SET application_name to 'BOOM'; -- error
+ERROR: syntax error at or near "USER"
+LINE 1: ALTER ROLE USER SET application_name to 'BOOM';
+ ^
+ALTER ROLE PUBLIC SET application_name to 'BOMB'; -- error
+ERROR: role "public" does not exist
+ALTER ROLE nonexistent SET application_name to 'BOMB'; -- error
+ERROR: role "nonexistent" does not exist
+-- ALTER USER SET/RESET
+SELECT * FROM chksetconfig();
+ db | role | rolkeyword | setconfig
+----+------+------------+-----------
+(0 rows)
+
+ALTER USER CURRENT_USER SET application_name to 'FOO';
+ALTER USER SESSION_USER SET application_name to 'BAR';
+ALTER USER "current_user" SET application_name to 'FOOFOO';
+ALTER USER "Public" SET application_name to 'BARBAR';
+ALTER USER ALL SET application_name to 'SLAP';
+SELECT * FROM chksetconfig();
+ db | role | rolkeyword | setconfig
+-----+------------------+--------------+---------------------------
+ ALL | Public | - | {application_name=BARBAR}
+ ALL | current_user | - | {application_name=FOOFOO}
+ ALL | regress_testrol1 | session_user | {application_name=BAR}
+ ALL | regress_testrol2 | current_user | {application_name=FOO}
+(4 rows)
+
+ALTER USER regress_testrol1 SET application_name to 'SLAM';
+SELECT * FROM chksetconfig();
+ db | role | rolkeyword | setconfig
+-----+------------------+--------------+---------------------------
+ ALL | Public | - | {application_name=BARBAR}
+ ALL | current_user | - | {application_name=FOOFOO}
+ ALL | regress_testrol1 | session_user | {application_name=SLAM}
+ ALL | regress_testrol2 | current_user | {application_name=FOO}
+(4 rows)
+
+ALTER USER CURRENT_USER RESET application_name;
+ALTER USER SESSION_USER RESET application_name;
+ALTER USER "current_user" RESET application_name;
+ALTER USER "Public" RESET application_name;
+ALTER USER ALL RESET application_name;
+SELECT * FROM chksetconfig();
+ db | role | rolkeyword | setconfig
+----+------+------------+-----------
+(0 rows)
+
+ALTER USER CURRENT_USER SET application_name to 'BAZ'; -- error
+ALTER USER USER SET application_name to 'BOOM'; -- error
+ERROR: syntax error at or near "USER"
+LINE 1: ALTER USER USER SET application_name to 'BOOM';
+ ^
+ALTER USER PUBLIC SET application_name to 'BOMB'; -- error
+ERROR: role "public" does not exist
+ALTER USER NONE SET application_name to 'BOMB'; -- error
+ERROR: role name "none" is reserved
+LINE 1: ALTER USER NONE SET application_name to 'BOMB';
+ ^
+ALTER USER nonexistent SET application_name to 'BOMB'; -- error
+ERROR: role "nonexistent" does not exist
+-- CREATE SCHEMA
+CREATE SCHEMA newschema1 AUTHORIZATION CURRENT_USER;
+CREATE SCHEMA newschema2 AUTHORIZATION "current_user";
+CREATE SCHEMA newschema3 AUTHORIZATION SESSION_USER;
+CREATE SCHEMA newschema4 AUTHORIZATION regress_testrolx;
+CREATE SCHEMA newschema5 AUTHORIZATION "Public";
+CREATE SCHEMA newschema6 AUTHORIZATION USER; -- error
+ERROR: syntax error at or near "USER"
+LINE 1: CREATE SCHEMA newschema6 AUTHORIZATION USER;
+ ^
+CREATE SCHEMA newschema6 AUTHORIZATION CURRENT_ROLE; -- error
+ERROR: syntax error at or near "CURRENT_ROLE"
+LINE 1: CREATE SCHEMA newschema6 AUTHORIZATION CURRENT_ROLE;
+ ^
+CREATE SCHEMA newschema6 AUTHORIZATION PUBLIC; -- error
+ERROR: role "public" does not exist
+CREATE SCHEMA newschema6 AUTHORIZATION "public"; -- error
+ERROR: role "public" does not exist
+CREATE SCHEMA newschema6 AUTHORIZATION NONE; -- error
+ERROR: role name "none" is reserved
+LINE 1: CREATE SCHEMA newschema6 AUTHORIZATION NONE;
+ ^
+CREATE SCHEMA newschema6 AUTHORIZATION nonexistent; -- error
+ERROR: role "nonexistent" does not exist
+SELECT n.nspname, r.rolname FROM pg_namespace n
+ JOIN pg_roles r ON (r.oid = n.nspowner)
+ WHERE n.nspname LIKE 'newschema_' ORDER BY 1;
+ nspname | rolname
+------------+------------------
+ newschema1 | regress_testrol2
+ newschema2 | current_user
+ newschema3 | regress_testrol1
+ newschema4 | regress_testrolx
+ newschema5 | Public
+(5 rows)
+
+CREATE SCHEMA IF NOT EXISTS newschema1 AUTHORIZATION CURRENT_USER;
+NOTICE: schema "newschema1" already exists, skipping
+CREATE SCHEMA IF NOT EXISTS newschema2 AUTHORIZATION "current_user";
+NOTICE: schema "newschema2" already exists, skipping
+CREATE SCHEMA IF NOT EXISTS newschema3 AUTHORIZATION SESSION_USER;
+NOTICE: schema "newschema3" already exists, skipping
+CREATE SCHEMA IF NOT EXISTS newschema4 AUTHORIZATION regress_testrolx;
+NOTICE: schema "newschema4" already exists, skipping
+CREATE SCHEMA IF NOT EXISTS newschema5 AUTHORIZATION "Public";
+NOTICE: schema "newschema5" already exists, skipping
+CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION USER; -- error
+ERROR: syntax error at or near "USER"
+LINE 1: CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION USER;
+ ^
+CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION CURRENT_ROLE; -- error
+ERROR: syntax error at or near "CURRENT_ROLE"
+LINE 1: ...ATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION CURRENT_RO...
+ ^
+CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION PUBLIC; -- error
+ERROR: role "public" does not exist
+CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION "public"; -- error
+ERROR: role "public" does not exist
+CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION NONE; -- error
+ERROR: role name "none" is reserved
+LINE 1: CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION NONE;
+ ^
+CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION nonexistent; -- error
+ERROR: role "nonexistent" does not exist
+SELECT n.nspname, r.rolname FROM pg_namespace n
+ JOIN pg_roles r ON (r.oid = n.nspowner)
+ WHERE n.nspname LIKE 'newschema_' ORDER BY 1;
+ nspname | rolname
+------------+------------------
+ newschema1 | regress_testrol2
+ newschema2 | current_user
+ newschema3 | regress_testrol1
+ newschema4 | regress_testrolx
+ newschema5 | Public
+(5 rows)
+
+-- ALTER TABLE OWNER TO
+\c -
+SET SESSION AUTHORIZATION regress_testrol0;
+CREATE TABLE testtab1 (a int);
+CREATE TABLE testtab2 (a int);
+CREATE TABLE testtab3 (a int);
+CREATE TABLE testtab4 (a int);
+CREATE TABLE testtab5 (a int);
+CREATE TABLE testtab6 (a int);
+\c -
+SET SESSION AUTHORIZATION regress_testrol1;
+SET ROLE regress_testrol2;
+ALTER TABLE testtab1 OWNER TO CURRENT_USER;
+ALTER TABLE testtab2 OWNER TO "current_user";
+ALTER TABLE testtab3 OWNER TO SESSION_USER;
+ALTER TABLE testtab4 OWNER TO regress_testrolx;
+ALTER TABLE testtab5 OWNER TO "Public";
+ALTER TABLE testtab6 OWNER TO CURRENT_ROLE; -- error
+ERROR: syntax error at or near "CURRENT_ROLE"
+LINE 1: ALTER TABLE testtab6 OWNER TO CURRENT_ROLE;
+ ^
+ALTER TABLE testtab6 OWNER TO USER; --error
+ERROR: syntax error at or near "USER"
+LINE 1: ALTER TABLE testtab6 OWNER TO USER;
+ ^
+ALTER TABLE testtab6 OWNER TO PUBLIC; -- error
+ERROR: role "public" does not exist
+ALTER TABLE testtab6 OWNER TO "public"; -- error
+ERROR: role "public" does not exist
+ALTER TABLE testtab6 OWNER TO nonexistent; -- error
+ERROR: role "nonexistent" does not exist
+SELECT c.relname, r.rolname
+ FROM pg_class c JOIN pg_roles r ON (r.oid = c.relowner)
+ WHERE relname LIKE 'testtab_'
+ ORDER BY 1;
+ relname | rolname
+----------+------------------
+ testtab1 | regress_testrol2
+ testtab2 | current_user
+ testtab3 | regress_testrol1
+ testtab4 | regress_testrolx
+ testtab5 | Public
+ testtab6 | regress_testrol0
+(6 rows)
+
+-- ALTER TABLE, VIEW, MATERIALIZED VIEW, FOREIGN TABLE, SEQUENCE are
+-- changed their owner in the same way.
+-- ALTER AGGREGATE
+\c -
+SET SESSION AUTHORIZATION regress_testrol0;
+CREATE AGGREGATE testagg1(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg2(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg3(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg4(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg5(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg5(int2) (SFUNC = int2_sum, STYPE = int8);
+ERROR: function "testagg5" already exists with same argument types
+CREATE AGGREGATE testagg6(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg7(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg8(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg9(int2) (SFUNC = int2_sum, STYPE = int8);
+\c -
+SET SESSION AUTHORIZATION regress_testrol1;
+SET ROLE regress_testrol2;
+ALTER AGGREGATE testagg1(int2) OWNER TO CURRENT_USER;
+ALTER AGGREGATE testagg2(int2) OWNER TO "current_user";
+ALTER AGGREGATE testagg3(int2) OWNER TO SESSION_USER;
+ALTER AGGREGATE testagg4(int2) OWNER TO regress_testrolx;
+ALTER AGGREGATE testagg5(int2) OWNER TO "Public";
+ALTER AGGREGATE testagg5(int2) OWNER TO CURRENT_ROLE; -- error
+ERROR: syntax error at or near "CURRENT_ROLE"
+LINE 1: ALTER AGGREGATE testagg5(int2) OWNER TO CURRENT_ROLE;
+ ^
+ALTER AGGREGATE testagg5(int2) OWNER TO USER; -- error
+ERROR: syntax error at or near "USER"
+LINE 1: ALTER AGGREGATE testagg5(int2) OWNER TO USER;
+ ^
+ALTER AGGREGATE testagg5(int2) OWNER TO PUBLIC; -- error
+ERROR: role "public" does not exist
+ALTER AGGREGATE testagg5(int2) OWNER TO "public"; -- error
+ERROR: role "public" does not exist
+ALTER AGGREGATE testagg5(int2) OWNER TO nonexistent; -- error
+ERROR: role "nonexistent" does not exist
+SELECT p.proname, r.rolname
+ FROM pg_proc p JOIN pg_roles r ON (r.oid = p.proowner)
+ WHERE proname LIKE 'testagg_'
+ ORDER BY 1;
+ proname | rolname
+----------+------------------
+ testagg1 | regress_testrol2
+ testagg2 | current_user
+ testagg3 | regress_testrol1
+ testagg4 | regress_testrolx
+ testagg5 | Public
+ testagg6 | regress_testrol0
+ testagg7 | regress_testrol0
+ testagg8 | regress_testrol0
+ testagg9 | regress_testrol0
+(9 rows)
+
+-- CREATE USER MAPPING
+CREATE FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv1 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv2 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv3 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv4 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv5 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv6 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv7 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv8 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv9 FOREIGN DATA WRAPPER test_wrapper;
+CREATE USER MAPPING FOR CURRENT_USER SERVER sv1 OPTIONS (user 'CURRENT_USER');
+CREATE USER MAPPING FOR "current_user" SERVER sv2 OPTIONS (user '"current_user"');
+CREATE USER MAPPING FOR USER SERVER sv3 OPTIONS (user 'USER');
+CREATE USER MAPPING FOR "user" SERVER sv4 OPTIONS (user '"USER"');
+CREATE USER MAPPING FOR SESSION_USER SERVER sv5 OPTIONS (user 'SESSION_USER');
+CREATE USER MAPPING FOR PUBLIC SERVER sv6 OPTIONS (user 'PUBLIC');
+CREATE USER MAPPING FOR "Public" SERVER sv7 OPTIONS (user '"Public"');
+CREATE USER MAPPING FOR regress_testrolx SERVER sv8 OPTIONS (user 'regress_testrolx');
+CREATE USER MAPPING FOR CURRENT_ROLE SERVER sv9
+ OPTIONS (user 'CURRENT_ROLE'); -- error
+ERROR: syntax error at or near "CURRENT_ROLE"
+LINE 1: CREATE USER MAPPING FOR CURRENT_ROLE SERVER sv9
+ ^
+CREATE USER MAPPING FOR nonexistent SERVER sv9
+ OPTIONS (user 'nonexistent'); -- error;
+ERROR: role "nonexistent" does not exist
+SELECT * FROM chkumapping();
+ umname | umserver | umoptions
+------------------+----------+---------------------------
+ regress_testrol2 | sv1 | {user=CURRENT_USER}
+ current_user | sv2 | {"user=\"current_user\""}
+ regress_testrol2 | sv3 | {user=USER}
+ user | sv4 | {"user=\"USER\""}
+ regress_testrol1 | sv5 | {user=SESSION_USER}
+ | sv6 | {user=PUBLIC}
+ Public | sv7 | {"user=\"Public\""}
+ regress_testrolx | sv8 | {user=regress_testrolx}
+(8 rows)
+
+-- ALTER USER MAPPING
+ALTER USER MAPPING FOR CURRENT_USER SERVER sv1
+ OPTIONS (SET user 'CURRENT_USER_alt');
+ALTER USER MAPPING FOR "current_user" SERVER sv2
+ OPTIONS (SET user '"current_user"_alt');
+ALTER USER MAPPING FOR USER SERVER sv3
+ OPTIONS (SET user 'USER_alt');
+ALTER USER MAPPING FOR "user" SERVER sv4
+ OPTIONS (SET user '"user"_alt');
+ALTER USER MAPPING FOR SESSION_USER SERVER sv5
+ OPTIONS (SET user 'SESSION_USER_alt');
+ALTER USER MAPPING FOR PUBLIC SERVER sv6
+ OPTIONS (SET user 'public_alt');
+ALTER USER MAPPING FOR "Public" SERVER sv7
+ OPTIONS (SET user '"Public"_alt');
+ALTER USER MAPPING FOR regress_testrolx SERVER sv8
+ OPTIONS (SET user 'regress_testrolx_alt');
+ALTER USER MAPPING FOR CURRENT_ROLE SERVER sv9
+ OPTIONS (SET user 'CURRENT_ROLE_alt');
+ERROR: syntax error at or near "CURRENT_ROLE"
+LINE 1: ALTER USER MAPPING FOR CURRENT_ROLE SERVER sv9
+ ^
+ALTER USER MAPPING FOR nonexistent SERVER sv9
+ OPTIONS (SET user 'nonexistent_alt'); -- error
+ERROR: role "nonexistent" does not exist
+SELECT * FROM chkumapping();
+ umname | umserver | umoptions
+------------------+----------+-------------------------------
+ regress_testrol2 | sv1 | {user=CURRENT_USER_alt}
+ current_user | sv2 | {"user=\"current_user\"_alt"}
+ regress_testrol2 | sv3 | {user=USER_alt}
+ user | sv4 | {"user=\"user\"_alt"}
+ regress_testrol1 | sv5 | {user=SESSION_USER_alt}
+ | sv6 | {user=public_alt}
+ Public | sv7 | {"user=\"Public\"_alt"}
+ regress_testrolx | sv8 | {user=regress_testrolx_alt}
+(8 rows)
+
+-- DROP USER MAPPING
+DROP USER MAPPING FOR CURRENT_USER SERVER sv1;
+DROP USER MAPPING FOR "current_user" SERVER sv2;
+DROP USER MAPPING FOR USER SERVER sv3;
+DROP USER MAPPING FOR "user" SERVER sv4;
+DROP USER MAPPING FOR SESSION_USER SERVER sv5;
+DROP USER MAPPING FOR PUBLIC SERVER sv6;
+DROP USER MAPPING FOR "Public" SERVER sv7;
+DROP USER MAPPING FOR regress_testrolx SERVER sv8;
+DROP USER MAPPING FOR CURRENT_ROLE SERVER sv9; -- error
+ERROR: syntax error at or near "CURRENT_ROLE"
+LINE 1: DROP USER MAPPING FOR CURRENT_ROLE SERVER sv9;
+ ^
+DROP USER MAPPING FOR nonexistent SERVER sv; -- error
+ERROR: role "nonexistent" does not exist
+SELECT * FROM chkumapping();
+ umname | umserver | umoptions
+--------+----------+-----------
+(0 rows)
+
+CREATE USER MAPPING FOR CURRENT_USER SERVER sv1 OPTIONS (user 'CURRENT_USER');
+CREATE USER MAPPING FOR "current_user" SERVER sv2 OPTIONS (user '"current_user"');
+CREATE USER MAPPING FOR USER SERVER sv3 OPTIONS (user 'USER');
+CREATE USER MAPPING FOR "user" SERVER sv4 OPTIONS (user '"USER"');
+CREATE USER MAPPING FOR SESSION_USER SERVER sv5 OPTIONS (user 'SESSION_USER');
+CREATE USER MAPPING FOR PUBLIC SERVER sv6 OPTIONS (user 'PUBLIC');
+CREATE USER MAPPING FOR "Public" SERVER sv7 OPTIONS (user '"Public"');
+CREATE USER MAPPING FOR regress_testrolx SERVER sv8 OPTIONS (user 'regress_testrolx');
+SELECT * FROM chkumapping();
+ umname | umserver | umoptions
+------------------+----------+---------------------------
+ regress_testrol2 | sv1 | {user=CURRENT_USER}
+ current_user | sv2 | {"user=\"current_user\""}
+ regress_testrol2 | sv3 | {user=USER}
+ user | sv4 | {"user=\"USER\""}
+ regress_testrol1 | sv5 | {user=SESSION_USER}
+ | sv6 | {user=PUBLIC}
+ Public | sv7 | {"user=\"Public\""}
+ regress_testrolx | sv8 | {user=regress_testrolx}
+(8 rows)
+
+-- DROP USER MAPPING IF EXISTS
+DROP USER MAPPING IF EXISTS FOR CURRENT_USER SERVER sv1;
+SELECT * FROM chkumapping();
+ umname | umserver | umoptions
+------------------+----------+---------------------------
+ current_user | sv2 | {"user=\"current_user\""}
+ regress_testrol2 | sv3 | {user=USER}
+ user | sv4 | {"user=\"USER\""}
+ regress_testrol1 | sv5 | {user=SESSION_USER}
+ | sv6 | {user=PUBLIC}
+ Public | sv7 | {"user=\"Public\""}
+ regress_testrolx | sv8 | {user=regress_testrolx}
+(7 rows)
+
+DROP USER MAPPING IF EXISTS FOR "current_user" SERVER sv2;
+SELECT * FROM chkumapping();
+ umname | umserver | umoptions
+------------------+----------+-------------------------
+ regress_testrol2 | sv3 | {user=USER}
+ user | sv4 | {"user=\"USER\""}
+ regress_testrol1 | sv5 | {user=SESSION_USER}
+ | sv6 | {user=PUBLIC}
+ Public | sv7 | {"user=\"Public\""}
+ regress_testrolx | sv8 | {user=regress_testrolx}
+(6 rows)
+
+DROP USER MAPPING IF EXISTS FOR USER SERVER sv3;
+SELECT * FROM chkumapping();
+ umname | umserver | umoptions
+------------------+----------+-------------------------
+ user | sv4 | {"user=\"USER\""}
+ regress_testrol1 | sv5 | {user=SESSION_USER}
+ | sv6 | {user=PUBLIC}
+ Public | sv7 | {"user=\"Public\""}
+ regress_testrolx | sv8 | {user=regress_testrolx}
+(5 rows)
+
+DROP USER MAPPING IF EXISTS FOR "user" SERVER sv4;
+SELECT * FROM chkumapping();
+ umname | umserver | umoptions
+------------------+----------+-------------------------
+ regress_testrol1 | sv5 | {user=SESSION_USER}
+ | sv6 | {user=PUBLIC}
+ Public | sv7 | {"user=\"Public\""}
+ regress_testrolx | sv8 | {user=regress_testrolx}
+(4 rows)
+
+DROP USER MAPPING IF EXISTS FOR SESSION_USER SERVER sv5;
+SELECT * FROM chkumapping();
+ umname | umserver | umoptions
+------------------+----------+-------------------------
+ | sv6 | {user=PUBLIC}
+ Public | sv7 | {"user=\"Public\""}
+ regress_testrolx | sv8 | {user=regress_testrolx}
+(3 rows)
+
+DROP USER MAPPING IF EXISTS FOR PUBLIC SERVER sv6;
+SELECT * FROM chkumapping();
+ umname | umserver | umoptions
+------------------+----------+-------------------------
+ Public | sv7 | {"user=\"Public\""}
+ regress_testrolx | sv8 | {user=regress_testrolx}
+(2 rows)
+
+DROP USER MAPPING IF EXISTS FOR "Public" SERVER sv7;
+SELECT * FROM chkumapping();
+ umname | umserver | umoptions
+------------------+----------+-------------------------
+ regress_testrolx | sv8 | {user=regress_testrolx}
+(1 row)
+
+DROP USER MAPPING IF EXISTS FOR regress_testrolx SERVER sv8;
+SELECT * FROM chkumapping();
+ umname | umserver | umoptions
+--------+----------+-----------
+(0 rows)
+
+DROP USER MAPPING IF EXISTS FOR CURRENT_ROLE SERVER sv9; --error
+ERROR: syntax error at or near "CURRENT_ROLE"
+LINE 1: DROP USER MAPPING IF EXISTS FOR CURRENT_ROLE SERVER sv9;
+ ^
+DROP USER MAPPING IF EXISTS FOR nonexistent SERVER sv9; -- error
+NOTICE: role "nonexistent" does not exist, skipping
+-- GRANT/REVOKE
+GRANT regress_testrol0 TO pg_signal_backend; -- success
+SET ROLE pg_signal_backend; --success
+RESET ROLE;
+CREATE SCHEMA test_roles_schema AUTHORIZATION pg_signal_backend; --success
+SET ROLE regress_testrol2;
+UPDATE pg_proc SET proacl = null WHERE proname LIKE 'testagg_';
+SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_';
+ proname | proacl
+----------+--------
+ testagg1 |
+ testagg2 |
+ testagg3 |
+ testagg4 |
+ testagg5 |
+ testagg6 |
+ testagg7 |
+ testagg8 |
+ testagg9 |
+(9 rows)
+
+REVOKE ALL PRIVILEGES ON FUNCTION testagg1(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg2(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg3(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg4(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg5(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg6(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg7(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg8(int2) FROM PUBLIC;
+GRANT ALL PRIVILEGES ON FUNCTION testagg1(int2) TO PUBLIC;
+GRANT ALL PRIVILEGES ON FUNCTION testagg2(int2) TO CURRENT_USER;
+GRANT ALL PRIVILEGES ON FUNCTION testagg3(int2) TO "current_user";
+GRANT ALL PRIVILEGES ON FUNCTION testagg4(int2) TO SESSION_USER;
+GRANT ALL PRIVILEGES ON FUNCTION testagg5(int2) TO "Public";
+GRANT ALL PRIVILEGES ON FUNCTION testagg6(int2) TO regress_testrolx;
+GRANT ALL PRIVILEGES ON FUNCTION testagg7(int2) TO "public";
+GRANT ALL PRIVILEGES ON FUNCTION testagg8(int2)
+ TO current_user, public, regress_testrolx;
+SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_';
+ proname | proacl
+----------+-----------------------------------------------------------------------------------------------------------------------------------
+ testagg1 | {regress_testrol2=X/regress_testrol2,=X/regress_testrol2}
+ testagg2 | {current_user=X/current_user,regress_testrol2=X/current_user}
+ testagg3 | {regress_testrol1=X/regress_testrol1,current_user=X/regress_testrol1}
+ testagg4 | {regress_testrolx=X/regress_testrolx,regress_testrol1=X/regress_testrolx}
+ testagg5 | {Public=X/Public}
+ testagg6 | {regress_testrol0=X/regress_testrol0,regress_testrolx=X/regress_testrol0}
+ testagg7 | {regress_testrol0=X/regress_testrol0,=X/regress_testrol0}
+ testagg8 | {regress_testrol0=X/regress_testrol0,regress_testrol2=X/regress_testrol0,=X/regress_testrol0,regress_testrolx=X/regress_testrol0}
+ testagg9 |
+(9 rows)
+
+GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO CURRENT_ROLE; --error
+ERROR: syntax error at or near "CURRENT_ROLE"
+LINE 1: ...RANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO CURRENT_RO...
+ ^
+GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO USER; --error
+ERROR: syntax error at or near "USER"
+LINE 1: GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO USER;
+ ^
+GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO NONE; --error
+ERROR: role name "none" is reserved
+LINE 1: GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO NONE;
+ ^
+GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO "none"; --error
+ERROR: role name "none" is reserved
+LINE 1: GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO "none";
+ ^
+SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_';
+ proname | proacl
+----------+-----------------------------------------------------------------------------------------------------------------------------------
+ testagg1 | {regress_testrol2=X/regress_testrol2,=X/regress_testrol2}
+ testagg2 | {current_user=X/current_user,regress_testrol2=X/current_user}
+ testagg3 | {regress_testrol1=X/regress_testrol1,current_user=X/regress_testrol1}
+ testagg4 | {regress_testrolx=X/regress_testrolx,regress_testrol1=X/regress_testrolx}
+ testagg5 | {Public=X/Public}
+ testagg6 | {regress_testrol0=X/regress_testrol0,regress_testrolx=X/regress_testrol0}
+ testagg7 | {regress_testrol0=X/regress_testrol0,=X/regress_testrol0}
+ testagg8 | {regress_testrol0=X/regress_testrol0,regress_testrol2=X/regress_testrol0,=X/regress_testrol0,regress_testrolx=X/regress_testrol0}
+ testagg9 |
+(9 rows)
+
+REVOKE ALL PRIVILEGES ON FUNCTION testagg1(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg2(int2) FROM CURRENT_USER;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg3(int2) FROM "current_user";
+REVOKE ALL PRIVILEGES ON FUNCTION testagg4(int2) FROM SESSION_USER;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg5(int2) FROM "Public";
+REVOKE ALL PRIVILEGES ON FUNCTION testagg6(int2) FROM regress_testrolx;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg7(int2) FROM "public";
+REVOKE ALL PRIVILEGES ON FUNCTION testagg8(int2)
+ FROM current_user, public, regress_testrolx;
+SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_';
+ proname | proacl
+----------+---------------------------------------
+ testagg1 | {regress_testrol2=X/regress_testrol2}
+ testagg2 | {current_user=X/current_user}
+ testagg3 | {regress_testrol1=X/regress_testrol1}
+ testagg4 | {regress_testrolx=X/regress_testrolx}
+ testagg5 | {}
+ testagg6 | {regress_testrol0=X/regress_testrol0}
+ testagg7 | {regress_testrol0=X/regress_testrol0}
+ testagg8 | {regress_testrol0=X/regress_testrol0}
+ testagg9 |
+(9 rows)
+
+REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM CURRENT_ROLE; --error
+ERROR: syntax error at or near "CURRENT_ROLE"
+LINE 1: ...KE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM CURRENT_RO...
+ ^
+REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM USER; --error
+ERROR: syntax error at or near "USER"
+LINE 1: REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM USER;
+ ^
+REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM NONE; --error
+ERROR: role name "none" is reserved
+LINE 1: REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM NONE;
+ ^
+REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM "none"; --error
+ERROR: role name "none" is reserved
+LINE 1: ...EVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM "none";
+ ^
+SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_';
+ proname | proacl
+----------+---------------------------------------
+ testagg1 | {regress_testrol2=X/regress_testrol2}
+ testagg2 | {current_user=X/current_user}
+ testagg3 | {regress_testrol1=X/regress_testrol1}
+ testagg4 | {regress_testrolx=X/regress_testrolx}
+ testagg5 | {}
+ testagg6 | {regress_testrol0=X/regress_testrol0}
+ testagg7 | {regress_testrol0=X/regress_testrol0}
+ testagg8 | {regress_testrol0=X/regress_testrol0}
+ testagg9 |
+(9 rows)
+
+-- DEFAULT MONITORING ROLES
+CREATE ROLE regress_role_haspriv;
+CREATE ROLE regress_role_nopriv;
+-- pg_read_all_stats
+GRANT pg_read_all_stats TO regress_role_haspriv;
+SET SESSION AUTHORIZATION regress_role_haspriv;
+-- returns true with role member of pg_read_all_stats
+SELECT COUNT(*) = 0 AS haspriv FROM pg_stat_activity
+ WHERE query = '<insufficient privilege>';
+ haspriv
+---------
+ t
+(1 row)
+
+SET SESSION AUTHORIZATION regress_role_nopriv;
+-- returns false with role not member of pg_read_all_stats
+SELECT COUNT(*) = 0 AS haspriv FROM pg_stat_activity
+ WHERE query = '<insufficient privilege>';
+ haspriv
+---------
+ f
+(1 row)
+
+RESET SESSION AUTHORIZATION;
+REVOKE pg_read_all_stats FROM regress_role_haspriv;
+-- pg_read_all_settings
+GRANT pg_read_all_settings TO regress_role_haspriv;
+BEGIN;
+-- A GUC using GUC_SUPERUSER_ONLY is useful for negative tests.
+SET LOCAL session_preload_libraries TO 'path-to-preload-libraries';
+SET SESSION AUTHORIZATION regress_role_haspriv;
+-- passes with role member of pg_read_all_settings
+SHOW session_preload_libraries;
+ session_preload_libraries
+-----------------------------
+ "path-to-preload-libraries"
+(1 row)
+
+SET SESSION AUTHORIZATION regress_role_nopriv;
+-- fails with role not member of pg_read_all_settings
+SHOW session_preload_libraries;
+ERROR: must be superuser or a member of pg_read_all_settings to examine "session_preload_libraries"
+RESET SESSION AUTHORIZATION;
+ERROR: current transaction is aborted, commands ignored until end of transaction block
+ROLLBACK;
+REVOKE pg_read_all_settings FROM regress_role_haspriv;
+-- clean up
+\c
+DROP SCHEMA test_roles_schema;
+DROP OWNED BY regress_testrol0, "Public", "current_user", regress_testrol1, regress_testrol2, regress_testrolx CASCADE;
+DROP ROLE regress_testrol0, regress_testrol1, regress_testrol2, regress_testrolx;
+DROP ROLE "Public", "None", "current_user", "session_user", "user";
+DROP ROLE regress_role_haspriv, regress_role_nopriv;
diff --git a/src/test/modules/unsafe_tests/sql/alter_system_table.sql b/src/test/modules/unsafe_tests/sql/alter_system_table.sql
new file mode 100644
index 0000000..5663570
--- /dev/null
+++ b/src/test/modules/unsafe_tests/sql/alter_system_table.sql
@@ -0,0 +1,195 @@
+--
+-- Tests for things affected by allow_system_table_mods
+--
+-- We run the same set of commands once with allow_system_table_mods
+-- off and then again with on.
+--
+-- The "on" tests should where possible be wrapped in BEGIN/ROLLBACK
+-- blocks so as to not leave a mess around.
+
+CREATE USER regress_user_ast;
+
+SET allow_system_table_mods = off;
+
+-- create new table in pg_catalog
+CREATE TABLE pg_catalog.test (a int);
+
+-- anyarray column
+CREATE TABLE t1x (a int, b anyarray);
+
+-- index on system catalog
+ALTER TABLE pg_namespace ADD UNIQUE USING INDEX pg_namespace_oid_index;
+
+-- write to system catalog table as superuser
+-- (allowed even without allow_system_table_mods)
+INSERT INTO pg_description (objoid, classoid, objsubid, description) VALUES (0, 0, 0, 'foo');
+
+-- write to system catalog table as normal user
+GRANT INSERT ON pg_description TO regress_user_ast;
+SET ROLE regress_user_ast;
+INSERT INTO pg_description (objoid, classoid, objsubid, description) VALUES (0, 0, 1, 'foo');
+RESET ROLE;
+
+-- policy on system catalog
+CREATE POLICY foo ON pg_description FOR SELECT USING (description NOT LIKE 'secret%');
+
+-- reserved schema name
+CREATE SCHEMA pg_foo;
+
+-- drop system table
+DROP TABLE pg_description;
+
+-- truncate of system table
+TRUNCATE pg_description;
+
+-- rename column of system table
+ALTER TABLE pg_description RENAME COLUMN description TO comment;
+
+-- ATSimplePermissions()
+ALTER TABLE pg_description ALTER COLUMN description SET NOT NULL;
+
+-- SET STATISTICS
+ALTER TABLE pg_description ALTER COLUMN description SET STATISTICS -1;
+
+-- foreign key referencing catalog
+CREATE TABLE foo (a oid, b oid, c int, FOREIGN KEY (a, b, c) REFERENCES pg_description);
+
+-- RangeVarCallbackOwnsRelation()
+CREATE INDEX pg_descripton_test_index ON pg_description (description);
+
+-- RangeVarCallbackForAlterRelation()
+ALTER TABLE pg_description RENAME TO pg_comment;
+ALTER TABLE pg_description SET SCHEMA public;
+
+-- reserved tablespace name
+CREATE TABLESPACE pg_foo LOCATION '/no/such/location';
+
+-- triggers
+CREATE FUNCTION tf1() RETURNS trigger
+LANGUAGE plpgsql
+AS $$
+BEGIN
+ RETURN NULL;
+END $$;
+
+CREATE TRIGGER t1 BEFORE INSERT ON pg_description EXECUTE FUNCTION tf1();
+ALTER TRIGGER t1 ON pg_description RENAME TO t2;
+--DROP TRIGGER t2 ON pg_description;
+
+-- rules
+CREATE RULE r1 AS ON INSERT TO pg_description DO INSTEAD NOTHING;
+ALTER RULE r1 ON pg_description RENAME TO r2;
+-- now make one to test dropping:
+SET allow_system_table_mods TO on;
+CREATE RULE r2 AS ON INSERT TO pg_description DO INSTEAD NOTHING;
+RESET allow_system_table_mods;
+DROP RULE r2 ON pg_description;
+-- cleanup:
+SET allow_system_table_mods TO on;
+DROP RULE r2 ON pg_description;
+RESET allow_system_table_mods;
+
+
+SET allow_system_table_mods = on;
+
+-- create new table in pg_catalog
+BEGIN;
+CREATE TABLE pg_catalog.test (a int);
+ROLLBACK;
+
+-- anyarray column
+BEGIN;
+CREATE TABLE t1 (a int, b anyarray);
+ROLLBACK;
+
+-- index on system catalog
+BEGIN;
+ALTER TABLE pg_namespace ADD UNIQUE USING INDEX pg_namespace_oid_index;
+ROLLBACK;
+
+-- write to system catalog table as superuser
+BEGIN;
+INSERT INTO pg_description (objoid, classoid, objsubid, description) VALUES (0, 0, 2, 'foo');
+ROLLBACK;
+
+-- write to system catalog table as normal user
+-- (not allowed)
+SET ROLE regress_user_ast;
+INSERT INTO pg_description (objoid, classoid, objsubid, description) VALUES (0, 0, 3, 'foo');
+RESET ROLE;
+
+-- policy on system catalog
+BEGIN;
+CREATE POLICY foo ON pg_description FOR SELECT USING (description NOT LIKE 'secret%');
+ROLLBACK;
+
+-- reserved schema name
+BEGIN;
+CREATE SCHEMA pg_foo;
+ROLLBACK;
+
+-- drop system table
+-- (This will fail anyway because it's pinned.)
+BEGIN;
+DROP TABLE pg_description;
+ROLLBACK;
+
+-- truncate of system table
+BEGIN;
+TRUNCATE pg_description;
+ROLLBACK;
+
+-- rename column of system table
+BEGIN;
+ALTER TABLE pg_description RENAME COLUMN description TO comment;
+ROLLBACK;
+
+-- ATSimplePermissions()
+BEGIN;
+ALTER TABLE pg_description ALTER COLUMN description SET NOT NULL;
+ROLLBACK;
+
+-- SET STATISTICS
+BEGIN;
+ALTER TABLE pg_description ALTER COLUMN description SET STATISTICS -1;
+ROLLBACK;
+
+-- foreign key referencing catalog
+BEGIN;
+ALTER TABLE pg_description ADD PRIMARY KEY USING INDEX pg_description_o_c_o_index;
+CREATE TABLE foo (a oid, b oid, c int, FOREIGN KEY (a, b, c) REFERENCES pg_description);
+ROLLBACK;
+
+-- RangeVarCallbackOwnsRelation()
+BEGIN;
+CREATE INDEX pg_descripton_test_index ON pg_description (description);
+ROLLBACK;
+
+-- RangeVarCallbackForAlterRelation()
+BEGIN;
+ALTER TABLE pg_description RENAME TO pg_comment;
+ROLLBACK;
+BEGIN;
+ALTER TABLE pg_description SET SCHEMA public;
+ROLLBACK;
+
+-- reserved tablespace name
+SET client_min_messages = error; -- disable ENFORCE_REGRESSION_TEST_NAME_RESTRICTIONS warning
+CREATE TABLESPACE pg_foo LOCATION '/no/such/location';
+RESET client_min_messages;
+
+-- triggers
+CREATE TRIGGER t1 BEFORE INSERT ON pg_description EXECUTE FUNCTION tf1();
+ALTER TRIGGER t1 ON pg_description RENAME TO t2;
+DROP TRIGGER t2 ON pg_description;
+
+-- rules
+CREATE RULE r1 AS ON INSERT TO pg_description DO INSTEAD NOTHING;
+ALTER RULE r1 ON pg_description RENAME TO r2;
+DROP RULE r2 ON pg_description;
+
+
+-- cleanup
+REVOKE ALL ON pg_description FROM regress_user_ast;
+DROP USER regress_user_ast;
+DROP FUNCTION tf1;
diff --git a/src/test/modules/unsafe_tests/sql/rolenames.sql b/src/test/modules/unsafe_tests/sql/rolenames.sql
new file mode 100644
index 0000000..5a3cf44
--- /dev/null
+++ b/src/test/modules/unsafe_tests/sql/rolenames.sql
@@ -0,0 +1,488 @@
+CREATE OR REPLACE FUNCTION chkrolattr()
+ RETURNS TABLE ("role" name, rolekeyword text, canlogin bool, replication bool)
+ AS $$
+SELECT r.rolname, v.keyword, r.rolcanlogin, r.rolreplication
+ FROM pg_roles r
+ JOIN (VALUES(CURRENT_USER, 'current_user'),
+ (SESSION_USER, 'session_user'),
+ ('current_user', '-'),
+ ('session_user', '-'),
+ ('Public', '-'),
+ ('None', '-'))
+ AS v(uname, keyword)
+ ON (r.rolname = v.uname)
+ ORDER BY 1;
+$$ LANGUAGE SQL;
+
+CREATE OR REPLACE FUNCTION chksetconfig()
+ RETURNS TABLE (db name, "role" name, rolkeyword text, setconfig text[])
+ AS $$
+SELECT COALESCE(d.datname, 'ALL'), COALESCE(r.rolname, 'ALL'),
+ COALESCE(v.keyword, '-'), s.setconfig
+ FROM pg_db_role_setting s
+ LEFT JOIN pg_roles r ON (r.oid = s.setrole)
+ LEFT JOIN pg_database d ON (d.oid = s.setdatabase)
+ LEFT JOIN (VALUES(CURRENT_USER, 'current_user'),
+ (SESSION_USER, 'session_user'))
+ AS v(uname, keyword)
+ ON (r.rolname = v.uname)
+ WHERE (r.rolname) IN ('Public', 'current_user', 'regress_testrol1', 'regress_testrol2')
+ORDER BY 1, 2;
+$$ LANGUAGE SQL;
+
+CREATE OR REPLACE FUNCTION chkumapping()
+ RETURNS TABLE (umname name, umserver name, umoptions text[])
+ AS $$
+SELECT r.rolname, s.srvname, m.umoptions
+ FROM pg_user_mapping m
+ LEFT JOIN pg_roles r ON (r.oid = m.umuser)
+ JOIN pg_foreign_server s ON (s.oid = m.umserver)
+ ORDER BY 2;
+$$ LANGUAGE SQL;
+
+--
+-- We test creation and use of these role names to ensure that the server
+-- correctly distinguishes role keywords from quoted names that look like
+-- those keywords. In a test environment, creation of these roles may
+-- provoke warnings, so hide the warnings by raising client_min_messages.
+--
+SET client_min_messages = ERROR;
+
+CREATE ROLE "Public";
+CREATE ROLE "None";
+CREATE ROLE "current_user";
+CREATE ROLE "session_user";
+CREATE ROLE "user";
+
+RESET client_min_messages;
+
+CREATE ROLE current_user; -- error
+CREATE ROLE current_role; -- error
+CREATE ROLE session_user; -- error
+CREATE ROLE user; -- error
+CREATE ROLE all; -- error
+
+CREATE ROLE public; -- error
+CREATE ROLE "public"; -- error
+CREATE ROLE none; -- error
+CREATE ROLE "none"; -- error
+
+CREATE ROLE pg_abc; -- error
+CREATE ROLE "pg_abc"; -- error
+CREATE ROLE pg_abcdef; -- error
+CREATE ROLE "pg_abcdef"; -- error
+
+CREATE ROLE regress_testrol0 SUPERUSER LOGIN;
+CREATE ROLE regress_testrolx SUPERUSER LOGIN;
+CREATE ROLE regress_testrol2 SUPERUSER;
+CREATE ROLE regress_testrol1 SUPERUSER LOGIN IN ROLE regress_testrol2;
+
+\c -
+SET SESSION AUTHORIZATION regress_testrol1;
+SET ROLE regress_testrol2;
+
+-- ALTER ROLE
+BEGIN;
+SELECT * FROM chkrolattr();
+ALTER ROLE CURRENT_USER WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ALTER ROLE "current_user" WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ALTER ROLE SESSION_USER WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ALTER ROLE "session_user" WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ALTER USER "Public" WITH REPLICATION;
+ALTER USER "None" WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ALTER USER regress_testrol1 WITH NOREPLICATION;
+ALTER USER regress_testrol2 WITH NOREPLICATION;
+SELECT * FROM chkrolattr();
+ROLLBACK;
+
+ALTER ROLE USER WITH LOGIN; -- error
+ALTER ROLE CURRENT_ROLE WITH LOGIN; --error
+ALTER ROLE ALL WITH REPLICATION; -- error
+ALTER ROLE SESSION_ROLE WITH NOREPLICATION; -- error
+ALTER ROLE PUBLIC WITH NOREPLICATION; -- error
+ALTER ROLE "public" WITH NOREPLICATION; -- error
+ALTER ROLE NONE WITH NOREPLICATION; -- error
+ALTER ROLE "none" WITH NOREPLICATION; -- error
+ALTER ROLE nonexistent WITH NOREPLICATION; -- error
+
+-- ALTER USER
+BEGIN;
+SELECT * FROM chkrolattr();
+ALTER USER CURRENT_USER WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ALTER USER "current_user" WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ALTER USER SESSION_USER WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ALTER USER "session_user" WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ALTER USER "Public" WITH REPLICATION;
+ALTER USER "None" WITH REPLICATION;
+SELECT * FROM chkrolattr();
+ALTER USER regress_testrol1 WITH NOREPLICATION;
+ALTER USER regress_testrol2 WITH NOREPLICATION;
+SELECT * FROM chkrolattr();
+ROLLBACK;
+
+ALTER USER USER WITH LOGIN; -- error
+ALTER USER CURRENT_ROLE WITH LOGIN; -- error
+ALTER USER ALL WITH REPLICATION; -- error
+ALTER USER SESSION_ROLE WITH NOREPLICATION; -- error
+ALTER USER PUBLIC WITH NOREPLICATION; -- error
+ALTER USER "public" WITH NOREPLICATION; -- error
+ALTER USER NONE WITH NOREPLICATION; -- error
+ALTER USER "none" WITH NOREPLICATION; -- error
+ALTER USER nonexistent WITH NOREPLICATION; -- error
+
+-- ALTER ROLE SET/RESET
+SELECT * FROM chksetconfig();
+ALTER ROLE CURRENT_USER SET application_name to 'FOO';
+ALTER ROLE SESSION_USER SET application_name to 'BAR';
+ALTER ROLE "current_user" SET application_name to 'FOOFOO';
+ALTER ROLE "Public" SET application_name to 'BARBAR';
+ALTER ROLE ALL SET application_name to 'SLAP';
+SELECT * FROM chksetconfig();
+ALTER ROLE regress_testrol1 SET application_name to 'SLAM';
+SELECT * FROM chksetconfig();
+ALTER ROLE CURRENT_USER RESET application_name;
+ALTER ROLE SESSION_USER RESET application_name;
+ALTER ROLE "current_user" RESET application_name;
+ALTER ROLE "Public" RESET application_name;
+ALTER ROLE ALL RESET application_name;
+SELECT * FROM chksetconfig();
+
+
+ALTER ROLE CURRENT_ROLE SET application_name to 'BAZ'; -- error
+ALTER ROLE USER SET application_name to 'BOOM'; -- error
+ALTER ROLE PUBLIC SET application_name to 'BOMB'; -- error
+ALTER ROLE nonexistent SET application_name to 'BOMB'; -- error
+
+-- ALTER USER SET/RESET
+SELECT * FROM chksetconfig();
+ALTER USER CURRENT_USER SET application_name to 'FOO';
+ALTER USER SESSION_USER SET application_name to 'BAR';
+ALTER USER "current_user" SET application_name to 'FOOFOO';
+ALTER USER "Public" SET application_name to 'BARBAR';
+ALTER USER ALL SET application_name to 'SLAP';
+SELECT * FROM chksetconfig();
+ALTER USER regress_testrol1 SET application_name to 'SLAM';
+SELECT * FROM chksetconfig();
+ALTER USER CURRENT_USER RESET application_name;
+ALTER USER SESSION_USER RESET application_name;
+ALTER USER "current_user" RESET application_name;
+ALTER USER "Public" RESET application_name;
+ALTER USER ALL RESET application_name;
+SELECT * FROM chksetconfig();
+
+
+ALTER USER CURRENT_USER SET application_name to 'BAZ'; -- error
+ALTER USER USER SET application_name to 'BOOM'; -- error
+ALTER USER PUBLIC SET application_name to 'BOMB'; -- error
+ALTER USER NONE SET application_name to 'BOMB'; -- error
+ALTER USER nonexistent SET application_name to 'BOMB'; -- error
+
+-- CREATE SCHEMA
+CREATE SCHEMA newschema1 AUTHORIZATION CURRENT_USER;
+CREATE SCHEMA newschema2 AUTHORIZATION "current_user";
+CREATE SCHEMA newschema3 AUTHORIZATION SESSION_USER;
+CREATE SCHEMA newschema4 AUTHORIZATION regress_testrolx;
+CREATE SCHEMA newschema5 AUTHORIZATION "Public";
+
+CREATE SCHEMA newschema6 AUTHORIZATION USER; -- error
+CREATE SCHEMA newschema6 AUTHORIZATION CURRENT_ROLE; -- error
+CREATE SCHEMA newschema6 AUTHORIZATION PUBLIC; -- error
+CREATE SCHEMA newschema6 AUTHORIZATION "public"; -- error
+CREATE SCHEMA newschema6 AUTHORIZATION NONE; -- error
+CREATE SCHEMA newschema6 AUTHORIZATION nonexistent; -- error
+
+SELECT n.nspname, r.rolname FROM pg_namespace n
+ JOIN pg_roles r ON (r.oid = n.nspowner)
+ WHERE n.nspname LIKE 'newschema_' ORDER BY 1;
+
+CREATE SCHEMA IF NOT EXISTS newschema1 AUTHORIZATION CURRENT_USER;
+CREATE SCHEMA IF NOT EXISTS newschema2 AUTHORIZATION "current_user";
+CREATE SCHEMA IF NOT EXISTS newschema3 AUTHORIZATION SESSION_USER;
+CREATE SCHEMA IF NOT EXISTS newschema4 AUTHORIZATION regress_testrolx;
+CREATE SCHEMA IF NOT EXISTS newschema5 AUTHORIZATION "Public";
+
+CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION USER; -- error
+CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION CURRENT_ROLE; -- error
+CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION PUBLIC; -- error
+CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION "public"; -- error
+CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION NONE; -- error
+CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION nonexistent; -- error
+
+SELECT n.nspname, r.rolname FROM pg_namespace n
+ JOIN pg_roles r ON (r.oid = n.nspowner)
+ WHERE n.nspname LIKE 'newschema_' ORDER BY 1;
+
+-- ALTER TABLE OWNER TO
+\c -
+SET SESSION AUTHORIZATION regress_testrol0;
+CREATE TABLE testtab1 (a int);
+CREATE TABLE testtab2 (a int);
+CREATE TABLE testtab3 (a int);
+CREATE TABLE testtab4 (a int);
+CREATE TABLE testtab5 (a int);
+CREATE TABLE testtab6 (a int);
+
+\c -
+SET SESSION AUTHORIZATION regress_testrol1;
+SET ROLE regress_testrol2;
+
+ALTER TABLE testtab1 OWNER TO CURRENT_USER;
+ALTER TABLE testtab2 OWNER TO "current_user";
+ALTER TABLE testtab3 OWNER TO SESSION_USER;
+ALTER TABLE testtab4 OWNER TO regress_testrolx;
+ALTER TABLE testtab5 OWNER TO "Public";
+
+ALTER TABLE testtab6 OWNER TO CURRENT_ROLE; -- error
+ALTER TABLE testtab6 OWNER TO USER; --error
+ALTER TABLE testtab6 OWNER TO PUBLIC; -- error
+ALTER TABLE testtab6 OWNER TO "public"; -- error
+ALTER TABLE testtab6 OWNER TO nonexistent; -- error
+
+SELECT c.relname, r.rolname
+ FROM pg_class c JOIN pg_roles r ON (r.oid = c.relowner)
+ WHERE relname LIKE 'testtab_'
+ ORDER BY 1;
+
+-- ALTER TABLE, VIEW, MATERIALIZED VIEW, FOREIGN TABLE, SEQUENCE are
+-- changed their owner in the same way.
+
+-- ALTER AGGREGATE
+\c -
+SET SESSION AUTHORIZATION regress_testrol0;
+CREATE AGGREGATE testagg1(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg2(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg3(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg4(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg5(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg5(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg6(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg7(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg8(int2) (SFUNC = int2_sum, STYPE = int8);
+CREATE AGGREGATE testagg9(int2) (SFUNC = int2_sum, STYPE = int8);
+
+\c -
+SET SESSION AUTHORIZATION regress_testrol1;
+SET ROLE regress_testrol2;
+
+ALTER AGGREGATE testagg1(int2) OWNER TO CURRENT_USER;
+ALTER AGGREGATE testagg2(int2) OWNER TO "current_user";
+ALTER AGGREGATE testagg3(int2) OWNER TO SESSION_USER;
+ALTER AGGREGATE testagg4(int2) OWNER TO regress_testrolx;
+ALTER AGGREGATE testagg5(int2) OWNER TO "Public";
+
+ALTER AGGREGATE testagg5(int2) OWNER TO CURRENT_ROLE; -- error
+ALTER AGGREGATE testagg5(int2) OWNER TO USER; -- error
+ALTER AGGREGATE testagg5(int2) OWNER TO PUBLIC; -- error
+ALTER AGGREGATE testagg5(int2) OWNER TO "public"; -- error
+ALTER AGGREGATE testagg5(int2) OWNER TO nonexistent; -- error
+
+SELECT p.proname, r.rolname
+ FROM pg_proc p JOIN pg_roles r ON (r.oid = p.proowner)
+ WHERE proname LIKE 'testagg_'
+ ORDER BY 1;
+
+-- CREATE USER MAPPING
+CREATE FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv1 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv2 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv3 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv4 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv5 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv6 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv7 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv8 FOREIGN DATA WRAPPER test_wrapper;
+CREATE SERVER sv9 FOREIGN DATA WRAPPER test_wrapper;
+
+CREATE USER MAPPING FOR CURRENT_USER SERVER sv1 OPTIONS (user 'CURRENT_USER');
+CREATE USER MAPPING FOR "current_user" SERVER sv2 OPTIONS (user '"current_user"');
+CREATE USER MAPPING FOR USER SERVER sv3 OPTIONS (user 'USER');
+CREATE USER MAPPING FOR "user" SERVER sv4 OPTIONS (user '"USER"');
+CREATE USER MAPPING FOR SESSION_USER SERVER sv5 OPTIONS (user 'SESSION_USER');
+CREATE USER MAPPING FOR PUBLIC SERVER sv6 OPTIONS (user 'PUBLIC');
+CREATE USER MAPPING FOR "Public" SERVER sv7 OPTIONS (user '"Public"');
+CREATE USER MAPPING FOR regress_testrolx SERVER sv8 OPTIONS (user 'regress_testrolx');
+
+CREATE USER MAPPING FOR CURRENT_ROLE SERVER sv9
+ OPTIONS (user 'CURRENT_ROLE'); -- error
+CREATE USER MAPPING FOR nonexistent SERVER sv9
+ OPTIONS (user 'nonexistent'); -- error;
+
+SELECT * FROM chkumapping();
+
+-- ALTER USER MAPPING
+ALTER USER MAPPING FOR CURRENT_USER SERVER sv1
+ OPTIONS (SET user 'CURRENT_USER_alt');
+ALTER USER MAPPING FOR "current_user" SERVER sv2
+ OPTIONS (SET user '"current_user"_alt');
+ALTER USER MAPPING FOR USER SERVER sv3
+ OPTIONS (SET user 'USER_alt');
+ALTER USER MAPPING FOR "user" SERVER sv4
+ OPTIONS (SET user '"user"_alt');
+ALTER USER MAPPING FOR SESSION_USER SERVER sv5
+ OPTIONS (SET user 'SESSION_USER_alt');
+ALTER USER MAPPING FOR PUBLIC SERVER sv6
+ OPTIONS (SET user 'public_alt');
+ALTER USER MAPPING FOR "Public" SERVER sv7
+ OPTIONS (SET user '"Public"_alt');
+ALTER USER MAPPING FOR regress_testrolx SERVER sv8
+ OPTIONS (SET user 'regress_testrolx_alt');
+
+ALTER USER MAPPING FOR CURRENT_ROLE SERVER sv9
+ OPTIONS (SET user 'CURRENT_ROLE_alt');
+ALTER USER MAPPING FOR nonexistent SERVER sv9
+ OPTIONS (SET user 'nonexistent_alt'); -- error
+
+SELECT * FROM chkumapping();
+
+-- DROP USER MAPPING
+DROP USER MAPPING FOR CURRENT_USER SERVER sv1;
+DROP USER MAPPING FOR "current_user" SERVER sv2;
+DROP USER MAPPING FOR USER SERVER sv3;
+DROP USER MAPPING FOR "user" SERVER sv4;
+DROP USER MAPPING FOR SESSION_USER SERVER sv5;
+DROP USER MAPPING FOR PUBLIC SERVER sv6;
+DROP USER MAPPING FOR "Public" SERVER sv7;
+DROP USER MAPPING FOR regress_testrolx SERVER sv8;
+
+DROP USER MAPPING FOR CURRENT_ROLE SERVER sv9; -- error
+DROP USER MAPPING FOR nonexistent SERVER sv; -- error
+SELECT * FROM chkumapping();
+
+CREATE USER MAPPING FOR CURRENT_USER SERVER sv1 OPTIONS (user 'CURRENT_USER');
+CREATE USER MAPPING FOR "current_user" SERVER sv2 OPTIONS (user '"current_user"');
+CREATE USER MAPPING FOR USER SERVER sv3 OPTIONS (user 'USER');
+CREATE USER MAPPING FOR "user" SERVER sv4 OPTIONS (user '"USER"');
+CREATE USER MAPPING FOR SESSION_USER SERVER sv5 OPTIONS (user 'SESSION_USER');
+CREATE USER MAPPING FOR PUBLIC SERVER sv6 OPTIONS (user 'PUBLIC');
+CREATE USER MAPPING FOR "Public" SERVER sv7 OPTIONS (user '"Public"');
+CREATE USER MAPPING FOR regress_testrolx SERVER sv8 OPTIONS (user 'regress_testrolx');
+SELECT * FROM chkumapping();
+
+-- DROP USER MAPPING IF EXISTS
+DROP USER MAPPING IF EXISTS FOR CURRENT_USER SERVER sv1;
+SELECT * FROM chkumapping();
+DROP USER MAPPING IF EXISTS FOR "current_user" SERVER sv2;
+SELECT * FROM chkumapping();
+DROP USER MAPPING IF EXISTS FOR USER SERVER sv3;
+SELECT * FROM chkumapping();
+DROP USER MAPPING IF EXISTS FOR "user" SERVER sv4;
+SELECT * FROM chkumapping();
+DROP USER MAPPING IF EXISTS FOR SESSION_USER SERVER sv5;
+SELECT * FROM chkumapping();
+DROP USER MAPPING IF EXISTS FOR PUBLIC SERVER sv6;
+SELECT * FROM chkumapping();
+DROP USER MAPPING IF EXISTS FOR "Public" SERVER sv7;
+SELECT * FROM chkumapping();
+DROP USER MAPPING IF EXISTS FOR regress_testrolx SERVER sv8;
+SELECT * FROM chkumapping();
+
+DROP USER MAPPING IF EXISTS FOR CURRENT_ROLE SERVER sv9; --error
+DROP USER MAPPING IF EXISTS FOR nonexistent SERVER sv9; -- error
+
+-- GRANT/REVOKE
+GRANT regress_testrol0 TO pg_signal_backend; -- success
+
+SET ROLE pg_signal_backend; --success
+RESET ROLE;
+CREATE SCHEMA test_roles_schema AUTHORIZATION pg_signal_backend; --success
+SET ROLE regress_testrol2;
+
+UPDATE pg_proc SET proacl = null WHERE proname LIKE 'testagg_';
+SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_';
+
+REVOKE ALL PRIVILEGES ON FUNCTION testagg1(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg2(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg3(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg4(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg5(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg6(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg7(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg8(int2) FROM PUBLIC;
+
+GRANT ALL PRIVILEGES ON FUNCTION testagg1(int2) TO PUBLIC;
+GRANT ALL PRIVILEGES ON FUNCTION testagg2(int2) TO CURRENT_USER;
+GRANT ALL PRIVILEGES ON FUNCTION testagg3(int2) TO "current_user";
+GRANT ALL PRIVILEGES ON FUNCTION testagg4(int2) TO SESSION_USER;
+GRANT ALL PRIVILEGES ON FUNCTION testagg5(int2) TO "Public";
+GRANT ALL PRIVILEGES ON FUNCTION testagg6(int2) TO regress_testrolx;
+GRANT ALL PRIVILEGES ON FUNCTION testagg7(int2) TO "public";
+GRANT ALL PRIVILEGES ON FUNCTION testagg8(int2)
+ TO current_user, public, regress_testrolx;
+
+SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_';
+
+GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO CURRENT_ROLE; --error
+GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO USER; --error
+GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO NONE; --error
+GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO "none"; --error
+
+SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_';
+
+REVOKE ALL PRIVILEGES ON FUNCTION testagg1(int2) FROM PUBLIC;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg2(int2) FROM CURRENT_USER;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg3(int2) FROM "current_user";
+REVOKE ALL PRIVILEGES ON FUNCTION testagg4(int2) FROM SESSION_USER;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg5(int2) FROM "Public";
+REVOKE ALL PRIVILEGES ON FUNCTION testagg6(int2) FROM regress_testrolx;
+REVOKE ALL PRIVILEGES ON FUNCTION testagg7(int2) FROM "public";
+REVOKE ALL PRIVILEGES ON FUNCTION testagg8(int2)
+ FROM current_user, public, regress_testrolx;
+
+SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_';
+
+REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM CURRENT_ROLE; --error
+REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM USER; --error
+REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM NONE; --error
+REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM "none"; --error
+
+SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_';
+
+-- DEFAULT MONITORING ROLES
+CREATE ROLE regress_role_haspriv;
+CREATE ROLE regress_role_nopriv;
+
+-- pg_read_all_stats
+GRANT pg_read_all_stats TO regress_role_haspriv;
+SET SESSION AUTHORIZATION regress_role_haspriv;
+-- returns true with role member of pg_read_all_stats
+SELECT COUNT(*) = 0 AS haspriv FROM pg_stat_activity
+ WHERE query = '<insufficient privilege>';
+SET SESSION AUTHORIZATION regress_role_nopriv;
+-- returns false with role not member of pg_read_all_stats
+SELECT COUNT(*) = 0 AS haspriv FROM pg_stat_activity
+ WHERE query = '<insufficient privilege>';
+RESET SESSION AUTHORIZATION;
+REVOKE pg_read_all_stats FROM regress_role_haspriv;
+
+-- pg_read_all_settings
+GRANT pg_read_all_settings TO regress_role_haspriv;
+BEGIN;
+-- A GUC using GUC_SUPERUSER_ONLY is useful for negative tests.
+SET LOCAL session_preload_libraries TO 'path-to-preload-libraries';
+SET SESSION AUTHORIZATION regress_role_haspriv;
+-- passes with role member of pg_read_all_settings
+SHOW session_preload_libraries;
+SET SESSION AUTHORIZATION regress_role_nopriv;
+-- fails with role not member of pg_read_all_settings
+SHOW session_preload_libraries;
+RESET SESSION AUTHORIZATION;
+ROLLBACK;
+REVOKE pg_read_all_settings FROM regress_role_haspriv;
+
+-- clean up
+\c
+
+DROP SCHEMA test_roles_schema;
+DROP OWNED BY regress_testrol0, "Public", "current_user", regress_testrol1, regress_testrol2, regress_testrolx CASCADE;
+DROP ROLE regress_testrol0, regress_testrol1, regress_testrol2, regress_testrolx;
+DROP ROLE "Public", "None", "current_user", "session_user", "user";
+DROP ROLE regress_role_haspriv, regress_role_nopriv;
diff --git a/src/test/modules/worker_spi/.gitignore b/src/test/modules/worker_spi/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/src/test/modules/worker_spi/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/worker_spi/Makefile b/src/test/modules/worker_spi/Makefile
new file mode 100644
index 0000000..cbf9b2e
--- /dev/null
+++ b/src/test/modules/worker_spi/Makefile
@@ -0,0 +1,26 @@
+# src/test/modules/worker_spi/Makefile
+
+MODULES = worker_spi
+
+EXTENSION = worker_spi
+DATA = worker_spi--1.0.sql
+PGFILEDESC = "worker_spi - background worker example"
+
+REGRESS = worker_spi
+
+# enable our module in shared_preload_libraries for dynamic bgworkers
+REGRESS_OPTS = --temp-config $(top_srcdir)/src/test/modules/worker_spi/dynamic.conf
+
+# Disable installcheck to ensure we cover dynamic bgworkers.
+NO_INSTALLCHECK = 1
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/worker_spi
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/worker_spi/dynamic.conf b/src/test/modules/worker_spi/dynamic.conf
new file mode 100644
index 0000000..bfe015f
--- /dev/null
+++ b/src/test/modules/worker_spi/dynamic.conf
@@ -0,0 +1,2 @@
+shared_preload_libraries = worker_spi
+worker_spi.database = contrib_regression
diff --git a/src/test/modules/worker_spi/expected/worker_spi.out b/src/test/modules/worker_spi/expected/worker_spi.out
new file mode 100644
index 0000000..dc0a79b
--- /dev/null
+++ b/src/test/modules/worker_spi/expected/worker_spi.out
@@ -0,0 +1,50 @@
+CREATE EXTENSION worker_spi;
+SELECT worker_spi_launch(4) IS NOT NULL;
+ ?column?
+----------
+ t
+(1 row)
+
+-- wait until the worker completes its initialization
+DO $$
+DECLARE
+ visible bool;
+ loops int := 0;
+BEGIN
+ LOOP
+ visible := table_name IS NOT NULL
+ FROM information_schema.tables
+ WHERE table_schema = 'schema4' AND table_name = 'counted';
+ IF visible OR loops > 120 * 10 THEN EXIT; END IF;
+ PERFORM pg_sleep(0.1);
+ loops := loops + 1;
+ END LOOP;
+END
+$$;
+INSERT INTO schema4.counted VALUES ('total', 0), ('delta', 1);
+SELECT pg_reload_conf();
+ pg_reload_conf
+----------------
+ t
+(1 row)
+
+-- wait until the worker has processed the tuple we just inserted
+DO $$
+DECLARE
+ count int;
+ loops int := 0;
+BEGIN
+ LOOP
+ count := count(*) FROM schema4.counted WHERE type = 'delta';
+ IF count = 0 OR loops > 120 * 10 THEN EXIT; END IF;
+ PERFORM pg_sleep(0.1);
+ loops := loops + 1;
+ END LOOP;
+END
+$$;
+SELECT * FROM schema4.counted;
+ type | value
+-------+-------
+ total | 1
+(1 row)
+
diff --git a/src/test/modules/worker_spi/sql/worker_spi.sql b/src/test/modules/worker_spi/sql/worker_spi.sql
new file mode 100644
index 0000000..4683523
--- /dev/null
+++ b/src/test/modules/worker_spi/sql/worker_spi.sql
@@ -0,0 +1,35 @@
+CREATE EXTENSION worker_spi;
+SELECT worker_spi_launch(4) IS NOT NULL;
+-- wait until the worker completes its initialization
+DO $$
+DECLARE
+ visible bool;
+ loops int := 0;
+BEGIN
+ LOOP
+ visible := table_name IS NOT NULL
+ FROM information_schema.tables
+ WHERE table_schema = 'schema4' AND table_name = 'counted';
+ IF visible OR loops > 120 * 10 THEN EXIT; END IF;
+ PERFORM pg_sleep(0.1);
+ loops := loops + 1;
+ END LOOP;
+END
+$$;
+INSERT INTO schema4.counted VALUES ('total', 0), ('delta', 1);
+SELECT pg_reload_conf();
+-- wait until the worker has processed the tuple we just inserted
+DO $$
+DECLARE
+ count int;
+ loops int := 0;
+BEGIN
+ LOOP
+ count := count(*) FROM schema4.counted WHERE type = 'delta';
+ IF count = 0 OR loops > 120 * 10 THEN EXIT; END IF;
+ PERFORM pg_sleep(0.1);
+ loops := loops + 1;
+ END LOOP;
+END
+$$;
+SELECT * FROM schema4.counted;
diff --git a/src/test/modules/worker_spi/worker_spi--1.0.sql b/src/test/modules/worker_spi/worker_spi--1.0.sql
new file mode 100644
index 0000000..e9d5b07
--- /dev/null
+++ b/src/test/modules/worker_spi/worker_spi--1.0.sql
@@ -0,0 +1,9 @@
+/* src/test/modules/worker_spi/worker_spi--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION worker_spi" to load this file. \quit
+
+CREATE FUNCTION worker_spi_launch(pg_catalog.int4)
+RETURNS pg_catalog.int4 STRICT
+AS 'MODULE_PATHNAME'
+LANGUAGE C;
diff --git a/src/test/modules/worker_spi/worker_spi.c b/src/test/modules/worker_spi/worker_spi.c
new file mode 100644
index 0000000..1c7b17c
--- /dev/null
+++ b/src/test/modules/worker_spi/worker_spi.c
@@ -0,0 +1,418 @@
+/* -------------------------------------------------------------------------
+ *
+ * worker_spi.c
+ * Sample background worker code that demonstrates various coding
+ * patterns: establishing a database connection; starting and committing
+ * transactions; using GUC variables, and heeding SIGHUP to reread
+ * the configuration file; reporting to pg_stat_activity; using the
+ * process latch to sleep and exit in case of postmaster death.
+ *
+ * This code connects to a database, creates a schema and table, and summarizes
+ * the numbers contained therein. To see it working, insert an initial value
+ * with "total" type and some initial value; then insert some other rows with
+ * "delta" type. Delta rows will be deleted by this worker and their values
+ * aggregated into the total.
+ *
+ * Copyright (c) 2013-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/worker_spi/worker_spi.c
+ *
+ * -------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+/* These are always necessary for a bgworker */
+#include "miscadmin.h"
+#include "postmaster/bgworker.h"
+#include "storage/ipc.h"
+#include "storage/latch.h"
+#include "storage/lwlock.h"
+#include "storage/proc.h"
+#include "storage/shmem.h"
+
+/* these headers are used by this particular worker's code */
+#include "access/xact.h"
+#include "executor/spi.h"
+#include "fmgr.h"
+#include "lib/stringinfo.h"
+#include "pgstat.h"
+#include "utils/builtins.h"
+#include "utils/snapmgr.h"
+#include "tcop/utility.h"
+
+PG_MODULE_MAGIC;
+
+PG_FUNCTION_INFO_V1(worker_spi_launch);
+
+void _PG_init(void);
+void worker_spi_main(Datum) pg_attribute_noreturn();
+
+/* flags set by signal handlers */
+static volatile sig_atomic_t got_sighup = false;
+static volatile sig_atomic_t got_sigterm = false;
+
+/* GUC variables */
+static int worker_spi_naptime = 10;
+static int worker_spi_total_workers = 2;
+static char *worker_spi_database = NULL;
+
+
+typedef struct worktable
+{
+ const char *schema;
+ const char *name;
+} worktable;
+
+/*
+ * Signal handler for SIGTERM
+ * Set a flag to let the main loop to terminate, and set our latch to wake
+ * it up.
+ */
+static void
+worker_spi_sigterm(SIGNAL_ARGS)
+{
+ int save_errno = errno;
+
+ got_sigterm = true;
+ SetLatch(MyLatch);
+
+ errno = save_errno;
+}
+
+/*
+ * Signal handler for SIGHUP
+ * Set a flag to tell the main loop to reread the config file, and set
+ * our latch to wake it up.
+ */
+static void
+worker_spi_sighup(SIGNAL_ARGS)
+{
+ int save_errno = errno;
+
+ got_sighup = true;
+ SetLatch(MyLatch);
+
+ errno = save_errno;
+}
+
+/*
+ * Initialize workspace for a worker process: create the schema if it doesn't
+ * already exist.
+ */
+static void
+initialize_worker_spi(worktable *table)
+{
+ int ret;
+ int ntup;
+ bool isnull;
+ StringInfoData buf;
+
+ SetCurrentStatementStartTimestamp();
+ StartTransactionCommand();
+ SPI_connect();
+ PushActiveSnapshot(GetTransactionSnapshot());
+ pgstat_report_activity(STATE_RUNNING, "initializing worker_spi schema");
+
+ /* XXX could we use CREATE SCHEMA IF NOT EXISTS? */
+ initStringInfo(&buf);
+ appendStringInfo(&buf, "select count(*) from pg_namespace where nspname = '%s'",
+ table->schema);
+
+ ret = SPI_execute(buf.data, true, 0);
+ if (ret != SPI_OK_SELECT)
+ elog(FATAL, "SPI_execute failed: error code %d", ret);
+
+ if (SPI_processed != 1)
+ elog(FATAL, "not a singleton result");
+
+ ntup = DatumGetInt64(SPI_getbinval(SPI_tuptable->vals[0],
+ SPI_tuptable->tupdesc,
+ 1, &isnull));
+ if (isnull)
+ elog(FATAL, "null result");
+
+ if (ntup == 0)
+ {
+ resetStringInfo(&buf);
+ appendStringInfo(&buf,
+ "CREATE SCHEMA \"%s\" "
+ "CREATE TABLE \"%s\" ("
+ " type text CHECK (type IN ('total', 'delta')), "
+ " value integer)"
+ "CREATE UNIQUE INDEX \"%s_unique_total\" ON \"%s\" (type) "
+ "WHERE type = 'total'",
+ table->schema, table->name, table->name, table->name);
+
+ /* set statement start time */
+ SetCurrentStatementStartTimestamp();
+
+ ret = SPI_execute(buf.data, false, 0);
+
+ if (ret != SPI_OK_UTILITY)
+ elog(FATAL, "failed to create my schema");
+ }
+
+ SPI_finish();
+ PopActiveSnapshot();
+ CommitTransactionCommand();
+ pgstat_report_activity(STATE_IDLE, NULL);
+}
+
+void
+worker_spi_main(Datum main_arg)
+{
+ int index = DatumGetInt32(main_arg);
+ worktable *table;
+ StringInfoData buf;
+ char name[20];
+
+ table = palloc(sizeof(worktable));
+ sprintf(name, "schema%d", index);
+ table->schema = pstrdup(name);
+ table->name = pstrdup("counted");
+
+ /* Establish signal handlers before unblocking signals. */
+ pqsignal(SIGHUP, worker_spi_sighup);
+ pqsignal(SIGTERM, worker_spi_sigterm);
+
+ /* We're now ready to receive signals */
+ BackgroundWorkerUnblockSignals();
+
+ /* Connect to our database */
+ BackgroundWorkerInitializeConnection(worker_spi_database, NULL, 0);
+
+ elog(LOG, "%s initialized with %s.%s",
+ MyBgworkerEntry->bgw_name, table->schema, table->name);
+ initialize_worker_spi(table);
+
+ /*
+ * Quote identifiers passed to us. Note that this must be done after
+ * initialize_worker_spi, because that routine assumes the names are not
+ * quoted.
+ *
+ * Note some memory might be leaked here.
+ */
+ table->schema = quote_identifier(table->schema);
+ table->name = quote_identifier(table->name);
+
+ initStringInfo(&buf);
+ appendStringInfo(&buf,
+ "WITH deleted AS (DELETE "
+ "FROM %s.%s "
+ "WHERE type = 'delta' RETURNING value), "
+ "total AS (SELECT coalesce(sum(value), 0) as sum "
+ "FROM deleted) "
+ "UPDATE %s.%s "
+ "SET value = %s.value + total.sum "
+ "FROM total WHERE type = 'total' "
+ "RETURNING %s.value",
+ table->schema, table->name,
+ table->schema, table->name,
+ table->name,
+ table->name);
+
+ /*
+ * Main loop: do this until the SIGTERM handler tells us to terminate
+ */
+ while (!got_sigterm)
+ {
+ int ret;
+
+ /*
+ * Background workers mustn't call usleep() or any direct equivalent:
+ * instead, they may wait on their process latch, which sleeps as
+ * necessary, but is awakened if postmaster dies. That way the
+ * background process goes away immediately in an emergency.
+ */
+ (void) WaitLatch(MyLatch,
+ WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
+ worker_spi_naptime * 1000L,
+ PG_WAIT_EXTENSION);
+ ResetLatch(MyLatch);
+
+ CHECK_FOR_INTERRUPTS();
+
+ /*
+ * In case of a SIGHUP, just reload the configuration.
+ */
+ if (got_sighup)
+ {
+ got_sighup = false;
+ ProcessConfigFile(PGC_SIGHUP);
+ }
+
+ /*
+ * Start a transaction on which we can run queries. Note that each
+ * StartTransactionCommand() call should be preceded by a
+ * SetCurrentStatementStartTimestamp() call, which sets both the time
+ * for the statement we're about the run, and also the transaction
+ * start time. Also, each other query sent to SPI should probably be
+ * preceded by SetCurrentStatementStartTimestamp(), so that statement
+ * start time is always up to date.
+ *
+ * The SPI_connect() call lets us run queries through the SPI manager,
+ * and the PushActiveSnapshot() call creates an "active" snapshot
+ * which is necessary for queries to have MVCC data to work on.
+ *
+ * The pgstat_report_activity() call makes our activity visible
+ * through the pgstat views.
+ */
+ SetCurrentStatementStartTimestamp();
+ StartTransactionCommand();
+ SPI_connect();
+ PushActiveSnapshot(GetTransactionSnapshot());
+ pgstat_report_activity(STATE_RUNNING, buf.data);
+
+ /* We can now execute queries via SPI */
+ ret = SPI_execute(buf.data, false, 0);
+
+ if (ret != SPI_OK_UPDATE_RETURNING)
+ elog(FATAL, "cannot select from table %s.%s: error code %d",
+ table->schema, table->name, ret);
+
+ if (SPI_processed > 0)
+ {
+ bool isnull;
+ int32 val;
+
+ val = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[0],
+ SPI_tuptable->tupdesc,
+ 1, &isnull));
+ if (!isnull)
+ elog(LOG, "%s: count in %s.%s is now %d",
+ MyBgworkerEntry->bgw_name,
+ table->schema, table->name, val);
+ }
+
+ /*
+ * And finish our transaction.
+ */
+ SPI_finish();
+ PopActiveSnapshot();
+ CommitTransactionCommand();
+ pgstat_report_stat(false);
+ pgstat_report_activity(STATE_IDLE, NULL);
+ }
+
+ proc_exit(1);
+}
+
+/*
+ * Entrypoint of this module.
+ *
+ * We register more than one worker process here, to demonstrate how that can
+ * be done.
+ */
+void
+_PG_init(void)
+{
+ BackgroundWorker worker;
+ unsigned int i;
+
+ /* get the configuration */
+ DefineCustomIntVariable("worker_spi.naptime",
+ "Duration between each check (in seconds).",
+ NULL,
+ &worker_spi_naptime,
+ 10,
+ 1,
+ INT_MAX,
+ PGC_SIGHUP,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
+ if (!process_shared_preload_libraries_in_progress)
+ return;
+
+ DefineCustomIntVariable("worker_spi.total_workers",
+ "Number of workers.",
+ NULL,
+ &worker_spi_total_workers,
+ 2,
+ 1,
+ 100,
+ PGC_POSTMASTER,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
+ DefineCustomStringVariable("worker_spi.database",
+ "Database to connect to.",
+ NULL,
+ &worker_spi_database,
+ "postgres",
+ PGC_POSTMASTER,
+ 0,
+ NULL, NULL, NULL);
+
+ /* set up common data for all our workers */
+ memset(&worker, 0, sizeof(worker));
+ worker.bgw_flags = BGWORKER_SHMEM_ACCESS |
+ BGWORKER_BACKEND_DATABASE_CONNECTION;
+ worker.bgw_start_time = BgWorkerStart_RecoveryFinished;
+ worker.bgw_restart_time = BGW_NEVER_RESTART;
+ sprintf(worker.bgw_library_name, "worker_spi");
+ sprintf(worker.bgw_function_name, "worker_spi_main");
+ worker.bgw_notify_pid = 0;
+
+ /*
+ * Now fill in worker-specific data, and do the actual registrations.
+ */
+ for (i = 1; i <= worker_spi_total_workers; i++)
+ {
+ snprintf(worker.bgw_name, BGW_MAXLEN, "worker_spi worker %d", i);
+ snprintf(worker.bgw_type, BGW_MAXLEN, "worker_spi");
+ worker.bgw_main_arg = Int32GetDatum(i);
+
+ RegisterBackgroundWorker(&worker);
+ }
+}
+
+/*
+ * Dynamically launch an SPI worker.
+ */
+Datum
+worker_spi_launch(PG_FUNCTION_ARGS)
+{
+ int32 i = PG_GETARG_INT32(0);
+ BackgroundWorker worker;
+ BackgroundWorkerHandle *handle;
+ BgwHandleStatus status;
+ pid_t pid;
+
+ memset(&worker, 0, sizeof(worker));
+ worker.bgw_flags = BGWORKER_SHMEM_ACCESS |
+ BGWORKER_BACKEND_DATABASE_CONNECTION;
+ worker.bgw_start_time = BgWorkerStart_RecoveryFinished;
+ worker.bgw_restart_time = BGW_NEVER_RESTART;
+ sprintf(worker.bgw_library_name, "worker_spi");
+ sprintf(worker.bgw_function_name, "worker_spi_main");
+ snprintf(worker.bgw_name, BGW_MAXLEN, "worker_spi worker %d", i);
+ snprintf(worker.bgw_type, BGW_MAXLEN, "worker_spi");
+ worker.bgw_main_arg = Int32GetDatum(i);
+ /* set bgw_notify_pid so that we can use WaitForBackgroundWorkerStartup */
+ worker.bgw_notify_pid = MyProcPid;
+
+ if (!RegisterDynamicBackgroundWorker(&worker, &handle))
+ PG_RETURN_NULL();
+
+ status = WaitForBackgroundWorkerStartup(handle, &pid);
+
+ if (status == BGWH_STOPPED)
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
+ errmsg("could not start background process"),
+ errhint("More details may be available in the server log.")));
+ if (status == BGWH_POSTMASTER_DIED)
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
+ errmsg("cannot start background processes without postmaster"),
+ errhint("Kill all remaining database processes and restart the database.")));
+ Assert(status == BGWH_STARTED);
+
+ PG_RETURN_INT32(pid);
+}
diff --git a/src/test/modules/worker_spi/worker_spi.control b/src/test/modules/worker_spi/worker_spi.control
new file mode 100644
index 0000000..84d6294
--- /dev/null
+++ b/src/test/modules/worker_spi/worker_spi.control
@@ -0,0 +1,5 @@
+# worker_spi extension
+comment = 'Sample background worker'
+default_version = '1.0'
+module_pathname = '$libdir/worker_spi'
+relocatable = true