summaryrefslogtreecommitdiffstats
path: root/src/test/subscription/t
diff options
context:
space:
mode:
Diffstat (limited to 'src/test/subscription/t')
-rw-r--r--src/test/subscription/t/001_rep_changes.pl579
-rw-r--r--src/test/subscription/t/002_types.pl565
-rw-r--r--src/test/subscription/t/003_constraints.pl141
-rw-r--r--src/test/subscription/t/004_sync.pl178
-rw-r--r--src/test/subscription/t/005_encoding.pl52
-rw-r--r--src/test/subscription/t/006_rewrite.pl65
-rw-r--r--src/test/subscription/t/007_ddl.pl75
-rw-r--r--src/test/subscription/t/008_diff_schema.pl124
-rw-r--r--src/test/subscription/t/009_matviews.pl54
-rw-r--r--src/test/subscription/t/010_truncate.pl239
-rw-r--r--src/test/subscription/t/011_generated.pl99
-rw-r--r--src/test/subscription/t/012_collation.pl108
-rw-r--r--src/test/subscription/t/013_partition.pl889
-rw-r--r--src/test/subscription/t/014_binary.pl296
-rw-r--r--src/test/subscription/t/015_stream.pl328
-rw-r--r--src/test/subscription/t/016_stream_subxact.pl154
-rw-r--r--src/test/subscription/t/017_stream_ddl.pl129
-rw-r--r--src/test/subscription/t/018_stream_subxact_abort.pl264
-rw-r--r--src/test/subscription/t/019_stream_subxact_ddl_abort.pl87
-rw-r--r--src/test/subscription/t/020_messages.pl149
-rw-r--r--src/test/subscription/t/021_twophase.pl399
-rw-r--r--src/test/subscription/t/022_twophase_cascade.pl463
-rw-r--r--src/test/subscription/t/023_twophase_stream.pl458
-rw-r--r--src/test/subscription/t/024_add_drop_pub.pl87
-rw-r--r--src/test/subscription/t/025_rep_changes_for_schema.pl207
-rw-r--r--src/test/subscription/t/026_stats.pl302
-rw-r--r--src/test/subscription/t/027_nosuperuser.pl397
-rw-r--r--src/test/subscription/t/028_row_filter.pl769
-rw-r--r--src/test/subscription/t/029_on_error.pl180
-rw-r--r--src/test/subscription/t/030_origin.pl216
-rw-r--r--src/test/subscription/t/031_column_list.pl1293
-rw-r--r--src/test/subscription/t/032_subscribe_use_index.pl484
-rw-r--r--src/test/subscription/t/033_run_as_table_owner.pl259
-rw-r--r--src/test/subscription/t/100_bugs.pl494
34 files changed, 10583 insertions, 0 deletions
diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl
new file mode 100644
index 0000000..857bcfb
--- /dev/null
+++ b/src/test/subscription/t/001_rep_changes.pl
@@ -0,0 +1,579 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Basic logical replication test
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Initialize publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+# Create some preexisting content on publisher
+$node_publisher->safe_psql(
+ 'postgres',
+ "CREATE FUNCTION public.pg_get_replica_identity_index(int)
+ RETURNS regclass LANGUAGE sql AS 'SELECT 1/0'"); # shall not call
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_notrep AS SELECT generate_series(1,10) AS a");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_ins AS SELECT generate_series(1,1002) AS a");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_full AS SELECT generate_series(1,10) AS a");
+$node_publisher->safe_psql('postgres', "CREATE TABLE tab_full2 (x text)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_full2 VALUES ('a'), ('b'), ('b')");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rep (a int primary key)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_mixed (a int primary key, b text, c numeric)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_mixed (a, b, c) VALUES (1, 'foo', 1.1)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_include (a int, b text, CONSTRAINT covering PRIMARY KEY(a) INCLUDE(b))"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_full_pk (a int primary key, b text)");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab_full_pk REPLICA IDENTITY FULL");
+# Let this table with REPLICA IDENTITY NOTHING, allowing only INSERT changes.
+$node_publisher->safe_psql('postgres', "CREATE TABLE tab_nothing (a int)");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab_nothing REPLICA IDENTITY NOTHING");
+
+# Replicate the changes without replica identity index
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_no_replidentity_index(c1 int)");
+$node_publisher->safe_psql('postgres',
+ "CREATE INDEX idx_no_replidentity_index ON tab_no_replidentity_index(c1)"
+);
+
+# Replicate the changes without columns
+$node_publisher->safe_psql('postgres', "CREATE TABLE tab_no_col()");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_no_col default VALUES");
+
+# Setup structure on subscriber
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_notrep (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_ins (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_full (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_full2 (x text)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rep (a int primary key)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_full_pk (a int primary key, b text)");
+$node_subscriber->safe_psql('postgres',
+ "ALTER TABLE tab_full_pk REPLICA IDENTITY FULL");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_nothing (a int)");
+
+# different column count and order than on publisher
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_mixed (d text default 'local', c numeric, b text, a int primary key)"
+);
+
+# replication of the table with included index
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_include (a int, b text, CONSTRAINT covering PRIMARY KEY(a) INCLUDE(b))"
+);
+
+# replication of the table without replica identity index
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_no_replidentity_index(c1 int)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE INDEX idx_no_replidentity_index ON tab_no_replidentity_index(c1)"
+);
+
+# replication of the table without columns
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_no_col()");
+
+# Setup logical replication
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_ins_only WITH (publish = insert)");
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub ADD TABLE tab_rep, tab_full, tab_full2, tab_mixed, tab_include, tab_nothing, tab_full_pk, tab_no_replidentity_index, tab_no_col"
+);
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub_ins_only ADD TABLE tab_ins");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub, tap_pub_ins_only"
+);
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub');
+
+my $result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_notrep");
+is($result, qq(0), 'check non-replicated table is empty on subscriber');
+
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_ins");
+is($result, qq(1002), 'check initial data was copied to subscriber');
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_ins SELECT generate_series(1,50)");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab_ins WHERE a > 20");
+$node_publisher->safe_psql('postgres', "UPDATE tab_ins SET a = -a");
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rep SELECT generate_series(1,50)");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab_rep WHERE a > 20");
+$node_publisher->safe_psql('postgres', "UPDATE tab_rep SET a = -a");
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_mixed VALUES (2, 'bar', 2.2)");
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_full_pk VALUES (1, 'foo'), (2, 'baz')");
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_nothing VALUES (generate_series(1,20))");
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_include SELECT generate_series(1,50)");
+$node_publisher->safe_psql('postgres',
+ "DELETE FROM tab_include WHERE a > 20");
+$node_publisher->safe_psql('postgres', "UPDATE tab_include SET a = -a");
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_no_replidentity_index VALUES(1)");
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_no_col default VALUES");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_ins");
+is($result, qq(1052|1|1002), 'check replicated inserts on subscriber');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_rep");
+is($result, qq(20|-20|-1), 'check replicated changes on subscriber');
+
+$result = $node_subscriber->safe_psql('postgres', "SELECT * FROM tab_mixed");
+is( $result, qq(local|1.1|foo|1
+local|2.2|bar|2), 'check replicated changes with different column order');
+
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_nothing");
+is($result, qq(20), 'check replicated changes with REPLICA IDENTITY NOTHING');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_include");
+is($result, qq(20|-20|-1),
+ 'check replicated changes with primary key index with included columns');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', q(SELECT c1 FROM tab_no_replidentity_index)),
+ 1,
+ "value replicated to subscriber without replica identity index");
+
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_no_col");
+is($result, qq(2), 'check replicated changes for table having no columns');
+
+# insert some duplicate rows
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_full SELECT generate_series(1,10)");
+
+# Test behaviour of ALTER PUBLICATION ... DROP TABLE
+#
+# When a publisher drops a table from publication, it should also stop sending
+# its changes to subscribers. We look at the subscriber whether it receives
+# the row that is inserted to the table on the publisher after it is dropped
+# from the publication.
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_ins");
+is($result, qq(1052|1|1002),
+ 'check rows on subscriber before table drop from publication');
+
+# Drop the table from publication
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub_ins_only DROP TABLE tab_ins");
+
+# Insert a row in publisher, but publisher will not send this row to subscriber
+$node_publisher->safe_psql('postgres', "INSERT INTO tab_ins VALUES(8888)");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+# Subscriber will not receive the inserted row, after table is dropped from
+# publication, so row count should remain the same.
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_ins");
+is($result, qq(1052|1|1002),
+ 'check rows on subscriber after table drop from publication');
+
+# Delete the inserted row in publisher
+$node_publisher->safe_psql('postgres', "DELETE FROM tab_ins WHERE a = 8888");
+
+# Add the table to publication again
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub_ins_only ADD TABLE tab_ins");
+
+# Refresh publication after table is added to publication
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION");
+
+# Test replication with multiple publications for a subscription such that the
+# operations are performed on the table from the first publication in the list.
+
+# Create tables on publisher
+$node_publisher->safe_psql('postgres', "CREATE TABLE temp1 (a int)");
+$node_publisher->safe_psql('postgres', "CREATE TABLE temp2 (a int)");
+
+# Create tables on subscriber
+$node_subscriber->safe_psql('postgres', "CREATE TABLE temp1 (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE temp2 (a int)");
+
+# Setup logical replication that will only be used for this test
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_temp1 FOR TABLE temp1 WITH (publish = insert)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_temp2 FOR TABLE temp2");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub_temp1 CONNECTION '$publisher_connstr' PUBLICATION tap_pub_temp1, tap_pub_temp2"
+);
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher,
+ 'tap_sub_temp1');
+
+# Subscriber table will have no rows initially
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM temp1");
+is($result, qq(0),
+ 'check initial rows on subscriber with multiple publications');
+
+# Insert a row into the table that's part of first publication in subscriber
+# list of publications.
+$node_publisher->safe_psql('postgres', "INSERT INTO temp1 VALUES (1)");
+
+$node_publisher->wait_for_catchup('tap_sub_temp1');
+
+# Subscriber should receive the inserted row
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM temp1");
+is($result, qq(1), 'check rows on subscriber with multiple publications');
+
+# Drop subscription as we don't need it anymore
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_temp1");
+
+# Drop publications as we don't need them anymore
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_temp1");
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_temp2");
+
+# Clean up the tables on both publisher and subscriber as we don't need them
+$node_publisher->safe_psql('postgres', "DROP TABLE temp1");
+$node_publisher->safe_psql('postgres', "DROP TABLE temp2");
+$node_subscriber->safe_psql('postgres', "DROP TABLE temp1");
+$node_subscriber->safe_psql('postgres', "DROP TABLE temp2");
+
+# add REPLICA IDENTITY FULL so we can update
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab_full REPLICA IDENTITY FULL");
+$node_subscriber->safe_psql('postgres',
+ "ALTER TABLE tab_full REPLICA IDENTITY FULL");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab_full2 REPLICA IDENTITY FULL");
+$node_subscriber->safe_psql('postgres',
+ "ALTER TABLE tab_full2 REPLICA IDENTITY FULL");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab_ins REPLICA IDENTITY FULL");
+$node_subscriber->safe_psql('postgres',
+ "ALTER TABLE tab_ins REPLICA IDENTITY FULL");
+# tab_mixed can use DEFAULT, since it has a primary key
+
+# and do the updates
+$node_publisher->safe_psql('postgres', "UPDATE tab_full SET a = a * a");
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_full2 SET x = 'bb' WHERE x = 'b'");
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_mixed SET b = 'baz' WHERE a = 1");
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_full_pk SET b = 'bar' WHERE a = 1");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_full");
+is($result, qq(20|1|100),
+ 'update works with REPLICA IDENTITY FULL and duplicate tuples');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT x FROM tab_full2 ORDER BY 1");
+is( $result, qq(a
+bb
+bb),
+ 'update works with REPLICA IDENTITY FULL and text datums');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT * FROM tab_mixed ORDER BY a");
+is( $result, qq(local|1.1|baz|1
+local|2.2|bar|2),
+ 'update works with different column order and subscriber local values');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT * FROM tab_full_pk ORDER BY a");
+is( $result, qq(1|bar
+2|baz),
+ 'update works with REPLICA IDENTITY FULL and a primary key');
+
+# Check that subscriber handles cases where update/delete target tuple
+# is missing. We have to look for the DEBUG1 log messages about that,
+# so temporarily bump up the log verbosity.
+$node_subscriber->append_conf('postgresql.conf', "log_min_messages = debug1");
+$node_subscriber->reload;
+
+$node_subscriber->safe_psql('postgres', "DELETE FROM tab_full_pk");
+
+# Note that the current location of the log file is not grabbed immediately
+# after reloading the configuration, but after sending one SQL command to
+# the node so as we are sure that the reloading has taken effect.
+my $log_location = -s $node_subscriber->logfile;
+
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_full_pk SET b = 'quux' WHERE a = 1");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab_full_pk WHERE a = 2");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+my $logfile = slurp_file($node_subscriber->logfile, $log_location);
+ok( $logfile =~
+ qr/logical replication did not find row to be updated in replication target relation "tab_full_pk"/,
+ 'update target row is missing');
+ok( $logfile =~
+ qr/logical replication did not find row to be deleted in replication target relation "tab_full_pk"/,
+ 'delete target row is missing');
+
+$node_subscriber->append_conf('postgresql.conf',
+ "log_min_messages = warning");
+$node_subscriber->reload;
+
+# check behavior with toasted values
+
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_mixed SET b = repeat('xyzzy', 100000) WHERE a = 2");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT a, length(b), c, d FROM tab_mixed ORDER BY a");
+is( $result, qq(1|3|1.1|local
+2|500000|2.2|local),
+ 'update transmits large column value');
+
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_mixed SET c = 3.3 WHERE a = 2");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT a, length(b), c, d FROM tab_mixed ORDER BY a");
+is( $result, qq(1|3|1.1|local
+2|500000|3.3|local),
+ 'update with non-transmitted large column value');
+
+# check behavior with dropped columns
+
+# this update should get transmitted before the column goes away
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_mixed SET b = 'bar', c = 2.2 WHERE a = 2");
+
+$node_publisher->safe_psql('postgres', "ALTER TABLE tab_mixed DROP COLUMN b");
+
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_mixed SET c = 11.11 WHERE a = 1");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT * FROM tab_mixed ORDER BY a");
+is( $result, qq(local|11.11|baz|1
+local|2.2|bar|2),
+ 'update works with dropped publisher column');
+
+$node_subscriber->safe_psql('postgres',
+ "ALTER TABLE tab_mixed DROP COLUMN d");
+
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_mixed SET c = 22.22 WHERE a = 2");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT * FROM tab_mixed ORDER BY a");
+is( $result, qq(11.11|baz|1
+22.22|bar|2),
+ 'update works with dropped subscriber column');
+
+# check that change of connection string and/or publication list causes
+# restart of subscription workers. We check the state along with
+# application_name to ensure that the walsender is (re)started.
+#
+# Not all of these are registered as tests as we need to poll for a change
+# but the test suite will fail nonetheless when something goes wrong.
+my $oldpid = $node_publisher->safe_psql('postgres',
+ "SELECT pid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
+);
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr sslmode=disable'"
+);
+$node_publisher->poll_query_until('postgres',
+ "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
+ )
+ or die
+ "Timed out while waiting for apply to restart after changing CONNECTION";
+
+$oldpid = $node_publisher->safe_psql('postgres',
+ "SELECT pid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
+);
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub SET PUBLICATION tap_pub_ins_only WITH (copy_data = false)"
+);
+$node_publisher->poll_query_until('postgres',
+ "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
+ )
+ or die
+ "Timed out while waiting for apply to restart after changing PUBLICATION";
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_ins SELECT generate_series(1001,1100)");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab_rep");
+
+# Restart the publisher and check the state of the subscriber which
+# should be in a streaming state after catching up.
+$node_publisher->stop('fast');
+$node_publisher->start;
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_ins");
+is($result, qq(1152|1|1100),
+ 'check replicated inserts after subscription publication change');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_rep");
+is($result, qq(20|-20|-1),
+ 'check changes skipped after subscription publication change');
+
+# check alter publication (relcache invalidation etc)
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub_ins_only SET (publish = 'insert, delete')");
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub_ins_only ADD TABLE tab_full");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab_ins WHERE a > 0");
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION WITH (copy_data = false)"
+);
+$node_publisher->safe_psql('postgres', "INSERT INTO tab_full VALUES(0)");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+# Check that we don't send BEGIN and COMMIT because of empty transaction
+# optimization. We have to look for the DEBUG1 log messages about that, so
+# temporarily bump up the log verbosity.
+$node_publisher->append_conf('postgresql.conf', "log_min_messages = debug1");
+$node_publisher->reload;
+
+# Note that the current location of the log file is not grabbed immediately
+# after reloading the configuration, but after sending one SQL command to
+# the node so that we are sure that the reloading has taken effect.
+$log_location = -s $node_publisher->logfile;
+
+$node_publisher->safe_psql('postgres', "INSERT INTO tab_notrep VALUES (11)");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$logfile = slurp_file($node_publisher->logfile, $log_location);
+ok($logfile =~ qr/skipped replication of an empty transaction with XID/,
+ 'empty transaction is skipped');
+
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_notrep");
+is($result, qq(0), 'check non-replicated table is empty on subscriber');
+
+$node_publisher->append_conf('postgresql.conf', "log_min_messages = warning");
+$node_publisher->reload;
+
+# note that data are different on provider and subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_ins");
+is($result, qq(1052|1|1002),
+ 'check replicated deletes after alter publication');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_full");
+is($result, qq(21|0|100), 'check replicated insert after alter publication');
+
+# check restart on rename
+$oldpid = $node_publisher->safe_psql('postgres',
+ "SELECT pid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
+);
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub RENAME TO tap_sub_renamed");
+$node_publisher->poll_query_until('postgres',
+ "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = 'tap_sub_renamed' AND state = 'streaming';"
+ )
+ or die
+ "Timed out while waiting for apply to restart after renaming SUBSCRIPTION";
+
+# check all the cleanup
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_renamed");
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription");
+is($result, qq(0), 'check subscription was dropped on subscriber');
+
+$result = $node_publisher->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_slots");
+is($result, qq(0), 'check replication slot was dropped on publisher');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber');
+
+$result = $node_publisher->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_slots");
+is($result, qq(0), 'check replication slot was dropped on publisher');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
+is($result, qq(0), 'check replication origin was dropped on subscriber');
+
+$node_subscriber->stop('fast');
+$node_publisher->stop('fast');
+
+# CREATE PUBLICATION while wal_level=minimal should succeed, with a WARNING
+$node_publisher->append_conf(
+ 'postgresql.conf', qq(
+wal_level=minimal
+max_wal_senders=0
+));
+$node_publisher->start;
+($result, my $retout, my $reterr) = $node_publisher->psql(
+ 'postgres', qq{
+BEGIN;
+CREATE TABLE skip_wal();
+CREATE PUBLICATION tap_pub2 FOR TABLE skip_wal;
+ROLLBACK;
+});
+ok( $reterr =~
+ m/WARNING: wal_level is insufficient to publish logical changes/,
+ 'CREATE PUBLICATION while wal_level=minimal');
+
+done_testing();
diff --git a/src/test/subscription/t/002_types.pl b/src/test/subscription/t/002_types.pl
new file mode 100644
index 0000000..6b5853b
--- /dev/null
+++ b/src/test/subscription/t/002_types.pl
@@ -0,0 +1,565 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# This tests that more complex datatypes are replicated correctly
+# by logical replication
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Initialize publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+# Create some preexisting content on publisher
+my $ddl = qq(
+ CREATE EXTENSION hstore WITH SCHEMA public;
+ CREATE TABLE public.tst_one_array (
+ a INTEGER PRIMARY KEY,
+ b INTEGER[]
+ );
+ CREATE TABLE public.tst_arrays (
+ a INTEGER[] PRIMARY KEY,
+ b TEXT[],
+ c FLOAT[],
+ d INTERVAL[]
+ );
+
+ CREATE TYPE public.tst_enum_t AS ENUM ('a', 'b', 'c', 'd', 'e');
+ CREATE TABLE public.tst_one_enum (
+ a INTEGER PRIMARY KEY,
+ b public.tst_enum_t
+ );
+ CREATE TABLE public.tst_enums (
+ a public.tst_enum_t PRIMARY KEY,
+ b public.tst_enum_t[]
+ );
+
+ CREATE TYPE public.tst_comp_basic_t AS (a FLOAT, b TEXT, c INTEGER);
+ CREATE TYPE public.tst_comp_enum_t AS (a FLOAT, b public.tst_enum_t, c INTEGER);
+ CREATE TYPE public.tst_comp_enum_array_t AS (a FLOAT, b public.tst_enum_t[], c INTEGER);
+ CREATE TABLE public.tst_one_comp (
+ a INTEGER PRIMARY KEY,
+ b public.tst_comp_basic_t
+ );
+ CREATE TABLE public.tst_comps (
+ a public.tst_comp_basic_t PRIMARY KEY,
+ b public.tst_comp_basic_t[]
+ );
+ CREATE TABLE public.tst_comp_enum (
+ a INTEGER PRIMARY KEY,
+ b public.tst_comp_enum_t
+ );
+ CREATE TABLE public.tst_comp_enum_array (
+ a public.tst_comp_enum_t PRIMARY KEY,
+ b public.tst_comp_enum_t[]
+ );
+ CREATE TABLE public.tst_comp_one_enum_array (
+ a INTEGER PRIMARY KEY,
+ b public.tst_comp_enum_array_t
+ );
+ CREATE TABLE public.tst_comp_enum_what (
+ a public.tst_comp_enum_array_t PRIMARY KEY,
+ b public.tst_comp_enum_array_t[]
+ );
+
+ CREATE TYPE public.tst_comp_mix_t AS (
+ a public.tst_comp_basic_t,
+ b public.tst_comp_basic_t[],
+ c public.tst_enum_t,
+ d public.tst_enum_t[]
+ );
+ CREATE TABLE public.tst_comp_mix_array (
+ a public.tst_comp_mix_t PRIMARY KEY,
+ b public.tst_comp_mix_t[]
+ );
+ CREATE TABLE public.tst_range (
+ a INTEGER PRIMARY KEY,
+ b int4range
+ );
+ CREATE TABLE public.tst_range_array (
+ a INTEGER PRIMARY KEY,
+ b TSTZRANGE,
+ c int8range[]
+ );
+ CREATE TABLE public.tst_hstore (
+ a INTEGER PRIMARY KEY,
+ b public.hstore
+ );
+
+ SET check_function_bodies=off;
+ CREATE FUNCTION public.monot_incr(int) RETURNS bool LANGUAGE sql
+ AS ' select \$1 > max(a) from public.tst_dom_constr; ';
+ CREATE DOMAIN monot_int AS int CHECK (monot_incr(VALUE));
+ CREATE TABLE public.tst_dom_constr (a monot_int););
+
+# Setup structure on both nodes
+$node_publisher->safe_psql('postgres', $ddl);
+$node_subscriber->safe_psql('postgres', $ddl);
+
+# Setup logical replication
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR ALL TABLES");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub WITH (slot_name = tap_sub_slot)"
+);
+
+# Wait for initial sync to finish as well
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub');
+
+# Insert initial test data
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ -- test_tbl_one_array_col
+ INSERT INTO tst_one_array (a, b) VALUES
+ (1, '{1, 2, 3}'),
+ (2, '{2, 3, 1}'),
+ (3, '{3, 2, 1}'),
+ (4, '{4, 3, 2}'),
+ (5, '{5, NULL, 3}');
+
+ -- test_tbl_arrays
+ INSERT INTO tst_arrays (a, b, c, d) VALUES
+ ('{1, 2, 3}', '{"a", "b", "c"}', '{1.1, 2.2, 3.3}', '{"1 day", "2 days", "3 days"}'),
+ ('{2, 3, 1}', '{"b", "c", "a"}', '{2.2, 3.3, 1.1}', '{"2 minutes", "3 minutes", "1 minute"}'),
+ ('{3, 1, 2}', '{"c", "a", "b"}', '{3.3, 1.1, 2.2}', '{"3 years", "1 year", "2 years"}'),
+ ('{4, 1, 2}', '{"d", "a", "b"}', '{4.4, 1.1, 2.2}', '{"4 years", "1 year", "2 years"}'),
+ ('{5, NULL, NULL}', '{"e", NULL, "b"}', '{5.5, 1.1, NULL}', '{"5 years", NULL, NULL}');
+
+ -- test_tbl_single_enum
+ INSERT INTO tst_one_enum (a, b) VALUES
+ (1, 'a'),
+ (2, 'b'),
+ (3, 'c'),
+ (4, 'd'),
+ (5, NULL);
+
+ -- test_tbl_enums
+ INSERT INTO tst_enums (a, b) VALUES
+ ('a', '{b, c}'),
+ ('b', '{c, a}'),
+ ('c', '{b, a}'),
+ ('d', '{c, b}'),
+ ('e', '{d, NULL}');
+
+ -- test_tbl_single_composites
+ INSERT INTO tst_one_comp (a, b) VALUES
+ (1, ROW(1.0, 'a', 1)),
+ (2, ROW(2.0, 'b', 2)),
+ (3, ROW(3.0, 'c', 3)),
+ (4, ROW(4.0, 'd', 4)),
+ (5, ROW(NULL, NULL, 5));
+
+ -- test_tbl_composites
+ INSERT INTO tst_comps (a, b) VALUES
+ (ROW(1.0, 'a', 1), ARRAY[ROW(1, 'a', 1)::tst_comp_basic_t]),
+ (ROW(2.0, 'b', 2), ARRAY[ROW(2, 'b', 2)::tst_comp_basic_t]),
+ (ROW(3.0, 'c', 3), ARRAY[ROW(3, 'c', 3)::tst_comp_basic_t]),
+ (ROW(4.0, 'd', 4), ARRAY[ROW(4, 'd', 3)::tst_comp_basic_t]),
+ (ROW(5.0, 'e', NULL), ARRAY[NULL, ROW(5, NULL, 5)::tst_comp_basic_t]);
+
+ -- test_tbl_composite_with_enums
+ INSERT INTO tst_comp_enum (a, b) VALUES
+ (1, ROW(1.0, 'a', 1)),
+ (2, ROW(2.0, 'b', 2)),
+ (3, ROW(3.0, 'c', 3)),
+ (4, ROW(4.0, 'd', 4)),
+ (5, ROW(NULL, 'e', NULL));
+
+ -- test_tbl_composite_with_enums_array
+ INSERT INTO tst_comp_enum_array (a, b) VALUES
+ (ROW(1.0, 'a', 1), ARRAY[ROW(1, 'a', 1)::tst_comp_enum_t]),
+ (ROW(2.0, 'b', 2), ARRAY[ROW(2, 'b', 2)::tst_comp_enum_t]),
+ (ROW(3.0, 'c', 3), ARRAY[ROW(3, 'c', 3)::tst_comp_enum_t]),
+ (ROW(4.0, 'd', 3), ARRAY[ROW(3, 'd', 3)::tst_comp_enum_t]),
+ (ROW(5.0, 'e', 3), ARRAY[ROW(3, 'e', 3)::tst_comp_enum_t, NULL]);
+
+ -- test_tbl_composite_with_single_enums_array_in_composite
+ INSERT INTO tst_comp_one_enum_array (a, b) VALUES
+ (1, ROW(1.0, '{a, b, c}', 1)),
+ (2, ROW(2.0, '{a, b, c}', 2)),
+ (3, ROW(3.0, '{a, b, c}', 3)),
+ (4, ROW(4.0, '{c, b, d}', 4)),
+ (5, ROW(5.0, '{NULL, e, NULL}', 5));
+
+ -- test_tbl_composite_with_enums_array_in_composite
+ INSERT INTO tst_comp_enum_what (a, b) VALUES
+ (ROW(1.0, '{a, b, c}', 1), ARRAY[ROW(1, '{a, b, c}', 1)::tst_comp_enum_array_t]),
+ (ROW(2.0, '{b, c, a}', 2), ARRAY[ROW(2, '{b, c, a}', 1)::tst_comp_enum_array_t]),
+ (ROW(3.0, '{c, a, b}', 1), ARRAY[ROW(3, '{c, a, b}', 1)::tst_comp_enum_array_t]),
+ (ROW(4.0, '{c, b, d}', 4), ARRAY[ROW(4, '{c, b, d}', 4)::tst_comp_enum_array_t]),
+ (ROW(5.0, '{c, NULL, b}', NULL), ARRAY[ROW(5, '{c, e, b}', 1)::tst_comp_enum_array_t]);
+
+ -- test_tbl_mixed_composites
+ INSERT INTO tst_comp_mix_array (a, b) VALUES
+ (ROW(
+ ROW(1,'a',1),
+ ARRAY[ROW(1,'a',1)::tst_comp_basic_t, ROW(2,'b',2)::tst_comp_basic_t],
+ 'a',
+ '{a,b,NULL,c}'),
+ ARRAY[
+ ROW(
+ ROW(1,'a',1),
+ ARRAY[
+ ROW(1,'a',1)::tst_comp_basic_t,
+ ROW(2,'b',2)::tst_comp_basic_t,
+ NULL
+ ],
+ 'a',
+ '{a,b,c}'
+ )::tst_comp_mix_t
+ ]
+ );
+
+ -- test_tbl_range
+ INSERT INTO tst_range (a, b) VALUES
+ (1, '[1, 10]'),
+ (2, '[2, 20]'),
+ (3, '[3, 30]'),
+ (4, '[4, 40]'),
+ (5, '[5, 50]');
+
+ -- test_tbl_range_array
+ INSERT INTO tst_range_array (a, b, c) VALUES
+ (1, tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz, 'infinity'), '{"[1,2]", "[10,20]"}'),
+ (2, tstzrange('Sat Aug 02 00:00:00 2014 CEST'::timestamptz, 'Mon Aug 04 00:00:00 2014 CEST'::timestamptz), '{"[2,3]", "[20,30]"}'),
+ (3, tstzrange('Fri Aug 01 00:00:00 2014 CEST'::timestamptz, 'Mon Aug 04 00:00:00 2014 CEST'::timestamptz), '{"[3,4]"}'),
+ (4, tstzrange('Thu Jul 31 00:00:00 2014 CEST'::timestamptz, 'Mon Aug 04 00:00:00 2014 CEST'::timestamptz), '{"[4,5]", NULL, "[40,50]"}'),
+ (5, NULL, NULL);
+
+ -- tst_hstore
+ INSERT INTO tst_hstore (a, b) VALUES
+ (1, '"a"=>"1"'),
+ (2, '"zzz"=>"foo"'),
+ (3, '"123"=>"321"'),
+ (4, '"yellow horse"=>"moaned"');
+
+ -- tst_dom_constr
+ INSERT INTO tst_dom_constr VALUES (10);
+));
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+# Check the data on subscriber
+my $result = $node_subscriber->safe_psql(
+ 'postgres', qq(
+ SET timezone = '+2';
+ SELECT a, b FROM tst_one_array ORDER BY a;
+ SELECT a, b, c, d FROM tst_arrays ORDER BY a;
+ SELECT a, b FROM tst_one_enum ORDER BY a;
+ SELECT a, b FROM tst_enums ORDER BY a;
+ SELECT a, b FROM tst_one_comp ORDER BY a;
+ SELECT a, b FROM tst_comps ORDER BY a;
+ SELECT a, b FROM tst_comp_enum ORDER BY a;
+ SELECT a, b FROM tst_comp_enum_array ORDER BY a;
+ SELECT a, b FROM tst_comp_one_enum_array ORDER BY a;
+ SELECT a, b FROM tst_comp_enum_what ORDER BY a;
+ SELECT a, b FROM tst_comp_mix_array ORDER BY a;
+ SELECT a, b FROM tst_range ORDER BY a;
+ SELECT a, b, c FROM tst_range_array ORDER BY a;
+ SELECT a, b FROM tst_hstore ORDER BY a;
+));
+
+is( $result, '1|{1,2,3}
+2|{2,3,1}
+3|{3,2,1}
+4|{4,3,2}
+5|{5,NULL,3}
+{1,2,3}|{a,b,c}|{1.1,2.2,3.3}|{"1 day","2 days","3 days"}
+{2,3,1}|{b,c,a}|{2.2,3.3,1.1}|{00:02:00,00:03:00,00:01:00}
+{3,1,2}|{c,a,b}|{3.3,1.1,2.2}|{"3 years","1 year","2 years"}
+{4,1,2}|{d,a,b}|{4.4,1.1,2.2}|{"4 years","1 year","2 years"}
+{5,NULL,NULL}|{e,NULL,b}|{5.5,1.1,NULL}|{"5 years",NULL,NULL}
+1|a
+2|b
+3|c
+4|d
+5|
+a|{b,c}
+b|{c,a}
+c|{b,a}
+d|{c,b}
+e|{d,NULL}
+1|(1,a,1)
+2|(2,b,2)
+3|(3,c,3)
+4|(4,d,4)
+5|(,,5)
+(1,a,1)|{"(1,a,1)"}
+(2,b,2)|{"(2,b,2)"}
+(3,c,3)|{"(3,c,3)"}
+(4,d,4)|{"(4,d,3)"}
+(5,e,)|{NULL,"(5,,5)"}
+1|(1,a,1)
+2|(2,b,2)
+3|(3,c,3)
+4|(4,d,4)
+5|(,e,)
+(1,a,1)|{"(1,a,1)"}
+(2,b,2)|{"(2,b,2)"}
+(3,c,3)|{"(3,c,3)"}
+(4,d,3)|{"(3,d,3)"}
+(5,e,3)|{"(3,e,3)",NULL}
+1|(1,"{a,b,c}",1)
+2|(2,"{a,b,c}",2)
+3|(3,"{a,b,c}",3)
+4|(4,"{c,b,d}",4)
+5|(5,"{NULL,e,NULL}",5)
+(1,"{a,b,c}",1)|{"(1,\"{a,b,c}\",1)"}
+(2,"{b,c,a}",2)|{"(2,\"{b,c,a}\",1)"}
+(3,"{c,a,b}",1)|{"(3,\"{c,a,b}\",1)"}
+(4,"{c,b,d}",4)|{"(4,\"{c,b,d}\",4)"}
+(5,"{c,NULL,b}",)|{"(5,\"{c,e,b}\",1)"}
+("(1,a,1)","{""(1,a,1)"",""(2,b,2)""}",a,"{a,b,NULL,c}")|{"(\"(1,a,1)\",\"{\"\"(1,a,1)\"\",\"\"(2,b,2)\"\",NULL}\",a,\"{a,b,c}\")"}
+1|[1,11)
+2|[2,21)
+3|[3,31)
+4|[4,41)
+5|[5,51)
+1|["2014-08-04 00:00:00+02",infinity)|{"[1,3)","[10,21)"}
+2|["2014-08-02 00:00:00+02","2014-08-04 00:00:00+02")|{"[2,4)","[20,31)"}
+3|["2014-08-01 00:00:00+02","2014-08-04 00:00:00+02")|{"[3,5)"}
+4|["2014-07-31 00:00:00+02","2014-08-04 00:00:00+02")|{"[4,6)",NULL,"[40,51)"}
+5||
+1|"a"=>"1"
+2|"zzz"=>"foo"
+3|"123"=>"321"
+4|"yellow horse"=>"moaned"',
+ 'check replicated inserts on subscriber');
+
+# Run batch of updates
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ UPDATE tst_one_array SET b = '{4, 5, 6}' WHERE a = 1;
+ UPDATE tst_one_array SET b = '{4, 5, 6, 1}' WHERE a > 3;
+ UPDATE tst_arrays SET b = '{"1a", "2b", "3c"}', c = '{1.0, 2.0, 3.0}', d = '{"1 day 1 second", "2 days 2 seconds", "3 days 3 second"}' WHERE a = '{1, 2, 3}';
+ UPDATE tst_arrays SET b = '{"c", "d", "e"}', c = '{3.0, 4.0, 5.0}', d = '{"3 day 1 second", "4 days 2 seconds", "5 days 3 second"}' WHERE a[1] > 3;
+ UPDATE tst_one_enum SET b = 'c' WHERE a = 1;
+ UPDATE tst_one_enum SET b = NULL WHERE a > 3;
+ UPDATE tst_enums SET b = '{e, NULL}' WHERE a = 'a';
+ UPDATE tst_enums SET b = '{e, d}' WHERE a > 'c';
+ UPDATE tst_one_comp SET b = ROW(1.0, 'A', 1) WHERE a = 1;
+ UPDATE tst_one_comp SET b = ROW(NULL, 'x', -1) WHERE a > 3;
+ UPDATE tst_comps SET b = ARRAY[ROW(9, 'x', -1)::tst_comp_basic_t] WHERE (a).a = 1.0;
+ UPDATE tst_comps SET b = ARRAY[NULL, ROW(9, 'x', NULL)::tst_comp_basic_t] WHERE (a).a > 3.9;
+ UPDATE tst_comp_enum SET b = ROW(1.0, NULL, NULL) WHERE a = 1;
+ UPDATE tst_comp_enum SET b = ROW(4.0, 'd', 44) WHERE a > 3;
+ UPDATE tst_comp_enum_array SET b = ARRAY[NULL, ROW(3, 'd', 3)::tst_comp_enum_t] WHERE a = ROW(1.0, 'a', 1)::tst_comp_enum_t;
+ UPDATE tst_comp_enum_array SET b = ARRAY[ROW(1, 'a', 1)::tst_comp_enum_t, ROW(2, 'b', 2)::tst_comp_enum_t] WHERE (a).a > 3;
+ UPDATE tst_comp_one_enum_array SET b = ROW(1.0, '{a, e, c}', NULL) WHERE a = 1;
+ UPDATE tst_comp_one_enum_array SET b = ROW(4.0, '{c, b, d}', 4) WHERE a > 3;
+ UPDATE tst_comp_enum_what SET b = ARRAY[NULL, ROW(1, '{a, b, c}', 1)::tst_comp_enum_array_t, ROW(NULL, '{a, e, c}', 2)::tst_comp_enum_array_t] WHERE (a).a = 1;
+ UPDATE tst_comp_enum_what SET b = ARRAY[ROW(5, '{a, b, c}', 5)::tst_comp_enum_array_t] WHERE (a).a > 3;
+ UPDATE tst_comp_mix_array SET b[2] = NULL WHERE ((a).a).a = 1;
+ UPDATE tst_range SET b = '[100, 1000]' WHERE a = 1;
+ UPDATE tst_range SET b = '(1, 90)' WHERE a > 3;
+ UPDATE tst_range_array SET c = '{"[100, 1000]"}' WHERE a = 1;
+ UPDATE tst_range_array SET b = tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz, 'infinity'), c = '{NULL, "[11,9999999]"}' WHERE a > 3;
+ UPDATE tst_hstore SET b = '"updated"=>"value"' WHERE a < 3;
+ UPDATE tst_hstore SET b = '"also"=>"updated"' WHERE a = 3;
+));
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+# Check the data on subscriber
+$result = $node_subscriber->safe_psql(
+ 'postgres', qq(
+ SET timezone = '+2';
+ SELECT a, b FROM tst_one_array ORDER BY a;
+ SELECT a, b, c, d FROM tst_arrays ORDER BY a;
+ SELECT a, b FROM tst_one_enum ORDER BY a;
+ SELECT a, b FROM tst_enums ORDER BY a;
+ SELECT a, b FROM tst_one_comp ORDER BY a;
+ SELECT a, b FROM tst_comps ORDER BY a;
+ SELECT a, b FROM tst_comp_enum ORDER BY a;
+ SELECT a, b FROM tst_comp_enum_array ORDER BY a;
+ SELECT a, b FROM tst_comp_one_enum_array ORDER BY a;
+ SELECT a, b FROM tst_comp_enum_what ORDER BY a;
+ SELECT a, b FROM tst_comp_mix_array ORDER BY a;
+ SELECT a, b FROM tst_range ORDER BY a;
+ SELECT a, b, c FROM tst_range_array ORDER BY a;
+ SELECT a, b FROM tst_hstore ORDER BY a;
+));
+
+is( $result, '1|{4,5,6}
+2|{2,3,1}
+3|{3,2,1}
+4|{4,5,6,1}
+5|{4,5,6,1}
+{1,2,3}|{1a,2b,3c}|{1,2,3}|{"1 day 00:00:01","2 days 00:00:02","3 days 00:00:03"}
+{2,3,1}|{b,c,a}|{2.2,3.3,1.1}|{00:02:00,00:03:00,00:01:00}
+{3,1,2}|{c,a,b}|{3.3,1.1,2.2}|{"3 years","1 year","2 years"}
+{4,1,2}|{c,d,e}|{3,4,5}|{"3 days 00:00:01","4 days 00:00:02","5 days 00:00:03"}
+{5,NULL,NULL}|{c,d,e}|{3,4,5}|{"3 days 00:00:01","4 days 00:00:02","5 days 00:00:03"}
+1|c
+2|b
+3|c
+4|
+5|
+a|{e,NULL}
+b|{c,a}
+c|{b,a}
+d|{e,d}
+e|{e,d}
+1|(1,A,1)
+2|(2,b,2)
+3|(3,c,3)
+4|(,x,-1)
+5|(,x,-1)
+(1,a,1)|{"(9,x,-1)"}
+(2,b,2)|{"(2,b,2)"}
+(3,c,3)|{"(3,c,3)"}
+(4,d,4)|{NULL,"(9,x,)"}
+(5,e,)|{NULL,"(9,x,)"}
+1|(1,,)
+2|(2,b,2)
+3|(3,c,3)
+4|(4,d,44)
+5|(4,d,44)
+(1,a,1)|{NULL,"(3,d,3)"}
+(2,b,2)|{"(2,b,2)"}
+(3,c,3)|{"(3,c,3)"}
+(4,d,3)|{"(1,a,1)","(2,b,2)"}
+(5,e,3)|{"(1,a,1)","(2,b,2)"}
+1|(1,"{a,e,c}",)
+2|(2,"{a,b,c}",2)
+3|(3,"{a,b,c}",3)
+4|(4,"{c,b,d}",4)
+5|(4,"{c,b,d}",4)
+(1,"{a,b,c}",1)|{NULL,"(1,\"{a,b,c}\",1)","(,\"{a,e,c}\",2)"}
+(2,"{b,c,a}",2)|{"(2,\"{b,c,a}\",1)"}
+(3,"{c,a,b}",1)|{"(3,\"{c,a,b}\",1)"}
+(4,"{c,b,d}",4)|{"(5,\"{a,b,c}\",5)"}
+(5,"{c,NULL,b}",)|{"(5,\"{a,b,c}\",5)"}
+("(1,a,1)","{""(1,a,1)"",""(2,b,2)""}",a,"{a,b,NULL,c}")|{"(\"(1,a,1)\",\"{\"\"(1,a,1)\"\",\"\"(2,b,2)\"\",NULL}\",a,\"{a,b,c}\")",NULL}
+1|[100,1001)
+2|[2,21)
+3|[3,31)
+4|[2,90)
+5|[2,90)
+1|["2014-08-04 00:00:00+02",infinity)|{"[100,1001)"}
+2|["2014-08-02 00:00:00+02","2014-08-04 00:00:00+02")|{"[2,4)","[20,31)"}
+3|["2014-08-01 00:00:00+02","2014-08-04 00:00:00+02")|{"[3,5)"}
+4|["2014-08-04 00:00:00+02",infinity)|{NULL,"[11,10000000)"}
+5|["2014-08-04 00:00:00+02",infinity)|{NULL,"[11,10000000)"}
+1|"updated"=>"value"
+2|"updated"=>"value"
+3|"also"=>"updated"
+4|"yellow horse"=>"moaned"',
+ 'check replicated updates on subscriber');
+
+# Run batch of deletes
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ DELETE FROM tst_one_array WHERE a = 1;
+ DELETE FROM tst_one_array WHERE b = '{2, 3, 1}';
+ DELETE FROM tst_arrays WHERE a = '{1, 2, 3}';
+ DELETE FROM tst_arrays WHERE a[1] = 2;
+ DELETE FROM tst_one_enum WHERE a = 1;
+ DELETE FROM tst_one_enum WHERE b = 'b';
+ DELETE FROM tst_enums WHERE a = 'a';
+ DELETE FROM tst_enums WHERE b[1] = 'b';
+ DELETE FROM tst_one_comp WHERE a = 1;
+ DELETE FROM tst_one_comp WHERE (b).a = 2.0;
+ DELETE FROM tst_comps WHERE (a).b = 'a';
+ DELETE FROM tst_comps WHERE ROW(3, 'c', 3)::tst_comp_basic_t = ANY(b);
+ DELETE FROM tst_comp_enum WHERE a = 1;
+ DELETE FROM tst_comp_enum WHERE (b).a = 2.0;
+ DELETE FROM tst_comp_enum_array WHERE a = ROW(1.0, 'a', 1)::tst_comp_enum_t;
+ DELETE FROM tst_comp_enum_array WHERE ROW(3, 'c', 3)::tst_comp_enum_t = ANY(b);
+ DELETE FROM tst_comp_one_enum_array WHERE a = 1;
+ DELETE FROM tst_comp_one_enum_array WHERE 'a' = ANY((b).b);
+ DELETE FROM tst_comp_enum_what WHERE (a).a = 1;
+ DELETE FROM tst_comp_enum_what WHERE (b[1]).b = '{c, a, b}';
+ DELETE FROM tst_comp_mix_array WHERE ((a).a).a = 1;
+ DELETE FROM tst_range WHERE a = 1;
+ DELETE FROM tst_range WHERE '[10,20]' && b;
+ DELETE FROM tst_range_array WHERE a = 1;
+ DELETE FROM tst_range_array WHERE tstzrange('Mon Aug 04 00:00:00 2014 CEST'::timestamptz, 'Mon Aug 05 00:00:00 2014 CEST'::timestamptz) && b;
+ DELETE FROM tst_hstore WHERE a = 1;
+));
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+# Check the data on subscriber
+$result = $node_subscriber->safe_psql(
+ 'postgres', qq(
+ SET timezone = '+2';
+ SELECT a, b FROM tst_one_array ORDER BY a;
+ SELECT a, b, c, d FROM tst_arrays ORDER BY a;
+ SELECT a, b FROM tst_one_enum ORDER BY a;
+ SELECT a, b FROM tst_enums ORDER BY a;
+ SELECT a, b FROM tst_one_comp ORDER BY a;
+ SELECT a, b FROM tst_comps ORDER BY a;
+ SELECT a, b FROM tst_comp_enum ORDER BY a;
+ SELECT a, b FROM tst_comp_enum_array ORDER BY a;
+ SELECT a, b FROM tst_comp_one_enum_array ORDER BY a;
+ SELECT a, b FROM tst_comp_enum_what ORDER BY a;
+ SELECT a, b FROM tst_comp_mix_array ORDER BY a;
+ SELECT a, b FROM tst_range ORDER BY a;
+ SELECT a, b, c FROM tst_range_array ORDER BY a;
+ SELECT a, b FROM tst_hstore ORDER BY a;
+));
+
+is( $result, '3|{3,2,1}
+4|{4,5,6,1}
+5|{4,5,6,1}
+{3,1,2}|{c,a,b}|{3.3,1.1,2.2}|{"3 years","1 year","2 years"}
+{4,1,2}|{c,d,e}|{3,4,5}|{"3 days 00:00:01","4 days 00:00:02","5 days 00:00:03"}
+{5,NULL,NULL}|{c,d,e}|{3,4,5}|{"3 days 00:00:01","4 days 00:00:02","5 days 00:00:03"}
+3|c
+4|
+5|
+b|{c,a}
+d|{e,d}
+e|{e,d}
+3|(3,c,3)
+4|(,x,-1)
+5|(,x,-1)
+(2,b,2)|{"(2,b,2)"}
+(4,d,4)|{NULL,"(9,x,)"}
+(5,e,)|{NULL,"(9,x,)"}
+3|(3,c,3)
+4|(4,d,44)
+5|(4,d,44)
+(2,b,2)|{"(2,b,2)"}
+(4,d,3)|{"(1,a,1)","(2,b,2)"}
+(5,e,3)|{"(1,a,1)","(2,b,2)"}
+4|(4,"{c,b,d}",4)
+5|(4,"{c,b,d}",4)
+(2,"{b,c,a}",2)|{"(2,\"{b,c,a}\",1)"}
+(4,"{c,b,d}",4)|{"(5,\"{a,b,c}\",5)"}
+(5,"{c,NULL,b}",)|{"(5,\"{a,b,c}\",5)"}
+2|["2014-08-02 00:00:00+02","2014-08-04 00:00:00+02")|{"[2,4)","[20,31)"}
+3|["2014-08-01 00:00:00+02","2014-08-04 00:00:00+02")|{"[3,5)"}
+2|"updated"=>"value"
+3|"also"=>"updated"
+4|"yellow horse"=>"moaned"',
+ 'check replicated deletes on subscriber');
+
+# Test a domain with a constraint backed by a SQL-language function,
+# which needs an active snapshot in order to operate.
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tst_dom_constr VALUES (11)");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT sum(a) FROM tst_dom_constr");
+is($result, '21', 'sql-function constraint on domain');
+
+$node_subscriber->stop('fast');
+$node_publisher->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/003_constraints.pl b/src/test/subscription/t/003_constraints.pl
new file mode 100644
index 0000000..6e90236
--- /dev/null
+++ b/src/test/subscription/t/003_constraints.pl
@@ -0,0 +1,141 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# This test checks that constraints work on subscriber
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Initialize publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+# Setup structure on publisher
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_fk (bid int PRIMARY KEY);");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_fk_ref (id int PRIMARY KEY, junk text, bid int REFERENCES tab_fk (bid));"
+);
+
+# Setup structure on subscriber; column order intentionally different
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_fk (bid int PRIMARY KEY);");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid), junk text);"
+);
+
+# Setup logical replication
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR ALL TABLES;");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub WITH (copy_data = false)"
+);
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_fk (bid) VALUES (1);");
+# "junk" value is meant to be large enough to force out-of-line storage
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_fk_ref (id, bid, junk) VALUES (1, 1, repeat(pi()::text,20000));"
+);
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+# Check data on subscriber
+my $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(bid), max(bid) FROM tab_fk;");
+is($result, qq(1|1|1), 'check replicated tab_fk inserts on subscriber');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
+is($result, qq(1|1|1), 'check replicated tab_fk_ref inserts on subscriber');
+
+# Drop the fk on publisher
+$node_publisher->safe_psql('postgres', "DROP TABLE tab_fk CASCADE;");
+
+# Insert data
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_fk_ref (id, bid) VALUES (2, 2);");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+# FK is not enforced on subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
+is($result, qq(2|1|2), 'check FK ignored on subscriber');
+
+# Add replica trigger
+$node_subscriber->safe_psql(
+ 'postgres', qq{
+CREATE FUNCTION filter_basic_dml_fn() RETURNS TRIGGER AS \$\$
+BEGIN
+ IF (TG_OP = 'INSERT') THEN
+ IF (NEW.id < 10) THEN
+ RETURN NEW;
+ ELSE
+ RETURN NULL;
+ END IF;
+ ELSIF (TG_OP = 'UPDATE') THEN
+ RETURN NULL;
+ ELSE
+ RAISE WARNING 'Unknown action';
+ RETURN NULL;
+ END IF;
+END;
+\$\$ LANGUAGE plpgsql;
+CREATE TRIGGER filter_basic_dml_trg
+ BEFORE INSERT OR UPDATE OF bid ON tab_fk_ref
+ FOR EACH ROW EXECUTE PROCEDURE filter_basic_dml_fn();
+ALTER TABLE tab_fk_ref ENABLE REPLICA TRIGGER filter_basic_dml_trg;
+});
+
+# Insert data
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_fk_ref (id, bid) VALUES (10, 10);");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+# The trigger should cause the insert to be skipped on subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
+is($result, qq(2|1|2), 'check replica insert trigger applied on subscriber');
+
+# Update data
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_fk_ref SET bid = 2 WHERE bid = 1;");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+# The trigger should cause the update to be skipped on subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
+is($result, qq(2|1|2),
+ 'check replica update column trigger applied on subscriber');
+
+# Update on a column not specified in the trigger, but it will trigger
+# anyway because logical replication ships all columns in an update.
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_fk_ref SET id = 6 WHERE id = 1;");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(id), max(id) FROM tab_fk_ref;");
+is($result, qq(2|1|2),
+ 'check column trigger applied even on update for other column');
+
+$node_subscriber->stop('fast');
+$node_publisher->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/004_sync.pl b/src/test/subscription/t/004_sync.pl
new file mode 100644
index 0000000..aa7714c
--- /dev/null
+++ b/src/test/subscription/t/004_sync.pl
@@ -0,0 +1,178 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Tests for logical replication table syncing
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Initialize publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->append_conf('postgresql.conf',
+ "wal_retrieve_retry_interval = 1ms");
+$node_subscriber->start;
+
+# Create some preexisting content on publisher
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rep (a int primary key)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rep SELECT generate_series(1,10)");
+
+# Setup structure on subscriber
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rep (a int primary key)");
+
+# Setup logical replication
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR ALL TABLES");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub"
+);
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub');
+
+my $result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep");
+is($result, qq(10), 'initial data synced for first sub');
+
+# drop subscription so that there is unreplicated data
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rep SELECT generate_series(11,20)");
+
+# recreate the subscription, it will try to do initial copy
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub"
+);
+
+# but it will be stuck on data copy as it will fail on constraint
+my $started_query = "SELECT srsubstate = 'd' FROM pg_subscription_rel;";
+$node_subscriber->poll_query_until('postgres', $started_query)
+ or die "Timed out while waiting for subscriber to start sync";
+
+# remove the conflicting data
+$node_subscriber->safe_psql('postgres', "DELETE FROM tab_rep;");
+
+# wait for sync to finish this time
+$node_subscriber->wait_for_subscription_sync;
+
+# check that all data is synced
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep");
+is($result, qq(20), 'initial data synced for second sub');
+
+# now check another subscription for the same node pair
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub2 CONNECTION '$publisher_connstr' PUBLICATION tap_pub WITH (copy_data = false)"
+);
+
+# wait for it to start
+$node_subscriber->poll_query_until('postgres',
+ "SELECT pid IS NOT NULL FROM pg_stat_subscription WHERE subname = 'tap_sub2' AND relid IS NULL"
+) or die "Timed out while waiting for subscriber to start";
+
+# and drop both subscriptions
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub2");
+
+# check subscriptions are removed
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription");
+is($result, qq(0), 'second and third sub are dropped');
+
+# remove the conflicting data
+$node_subscriber->safe_psql('postgres', "DELETE FROM tab_rep;");
+
+# recreate the subscription again
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub"
+);
+
+# and wait for data sync to finish again
+$node_subscriber->wait_for_subscription_sync;
+
+# check that all data is synced
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep");
+is($result, qq(20), 'initial data synced for fourth sub');
+
+# add new table on subscriber
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_rep_next (a int)");
+
+# setup structure with existing data on publisher
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rep_next (a) AS SELECT generate_series(1,10)");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_rep_next");
+is($result, qq(0), 'no data for table added after subscription initialized');
+
+# ask for data sync
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION");
+
+# wait for sync to finish
+$node_subscriber->wait_for_subscription_sync;
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_rep_next");
+is($result, qq(10),
+ 'data for table added after subscription initialized are now synced');
+
+# Add some data
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rep_next SELECT generate_series(1,10)");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_rep_next");
+is($result, qq(20),
+ 'changes for table added after subscription initialized replicated');
+
+# clean up
+$node_publisher->safe_psql('postgres', "DROP TABLE tab_rep_next");
+$node_subscriber->safe_psql('postgres', "DROP TABLE tab_rep_next");
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
+
+# Table tab_rep already has the same records on both publisher and subscriber
+# at this time. Recreate the subscription which will do the initial copy of
+# the table again and fails due to unique constraint violation.
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub"
+);
+
+$result = $node_subscriber->poll_query_until('postgres', $started_query)
+ or die "Timed out while waiting for subscriber to start sync";
+
+# DROP SUBSCRIPTION must clean up slots on the publisher side when the
+# subscriber is stuck on data copy for constraint violation.
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
+
+# When DROP SUBSCRIPTION tries to drop the tablesync slot, the slot may not
+# have been created, which causes the slot to be created after the DROP
+# SUSCRIPTION finishes. Such slots eventually get dropped at walsender exit
+# time. So, to prevent being affected by such ephemeral tablesync slots, we
+# wait until all the slots have been cleaned.
+ok( $node_publisher->poll_query_until(
+ 'postgres', 'SELECT count(*) = 0 FROM pg_replication_slots'),
+ 'DROP SUBSCRIPTION during error can clean up the slots on the publisher');
+
+$node_subscriber->stop('fast');
+$node_publisher->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/005_encoding.pl b/src/test/subscription/t/005_encoding.pl
new file mode 100644
index 0000000..2f0bf77
--- /dev/null
+++ b/src/test/subscription/t/005_encoding.pl
@@ -0,0 +1,52 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test replication between databases with different encodings
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(
+ allows_streaming => 'logical',
+ extra => [ '--locale=C', '--encoding=UTF8' ]);
+$node_publisher->start;
+
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(
+ allows_streaming => 'logical',
+ extra => [ '--locale=C', '--encoding=LATIN1' ]);
+$node_subscriber->start;
+
+my $ddl = "CREATE TABLE test1 (a int, b text);";
+$node_publisher->safe_psql('postgres', $ddl);
+$node_subscriber->safe_psql('postgres', $ddl);
+
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION mypub FOR ALL TABLES;");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr' PUBLICATION mypub;"
+);
+
+# Wait for initial sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'mysub');
+
+$node_publisher->safe_psql('postgres',
+ q{INSERT INTO test1 VALUES (1, E'Mot\xc3\xb6rhead')}); # hand-rolled UTF-8
+
+$node_publisher->wait_for_catchup('mysub');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', q{SELECT a FROM test1 WHERE b = E'Mot\xf6rhead'}
+ ), # LATIN1
+ qq(1),
+ 'data replicated to subscriber');
+
+$node_subscriber->stop;
+$node_publisher->stop;
+
+done_testing();
diff --git a/src/test/subscription/t/006_rewrite.pl b/src/test/subscription/t/006_rewrite.pl
new file mode 100644
index 0000000..8bc7e87
--- /dev/null
+++ b/src/test/subscription/t/006_rewrite.pl
@@ -0,0 +1,65 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test logical replication behavior with heap rewrites
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+my $ddl = "CREATE TABLE test1 (a int, b text);";
+$node_publisher->safe_psql('postgres', $ddl);
+$node_subscriber->safe_psql('postgres', $ddl);
+
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION mypub FOR ALL TABLES;");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr' PUBLICATION mypub;"
+);
+
+# Wait for initial sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'mysub');
+
+$node_publisher->safe_psql('postgres',
+ q{INSERT INTO test1 (a, b) VALUES (1, 'one'), (2, 'two');});
+
+$node_publisher->wait_for_catchup('mysub');
+
+is( $node_subscriber->safe_psql('postgres', q{SELECT a, b FROM test1}),
+ qq(1|one
+2|two),
+ 'initial data replicated to subscriber');
+
+# DDL that causes a heap rewrite
+my $ddl2 = "ALTER TABLE test1 ADD c int NOT NULL DEFAULT 0;";
+$node_subscriber->safe_psql('postgres', $ddl2);
+$node_publisher->safe_psql('postgres', $ddl2);
+
+$node_publisher->wait_for_catchup('mysub');
+
+$node_publisher->safe_psql('postgres',
+ q{INSERT INTO test1 (a, b, c) VALUES (3, 'three', 33);});
+
+$node_publisher->wait_for_catchup('mysub');
+
+is( $node_subscriber->safe_psql('postgres', q{SELECT a, b, c FROM test1}),
+ qq(1|one|0
+2|two|0
+3|three|33),
+ 'data replicated to subscriber');
+
+$node_subscriber->stop;
+$node_publisher->stop;
+
+done_testing();
diff --git a/src/test/subscription/t/007_ddl.pl b/src/test/subscription/t/007_ddl.pl
new file mode 100644
index 0000000..cbdb5b6
--- /dev/null
+++ b/src/test/subscription/t/007_ddl.pl
@@ -0,0 +1,75 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test some logical replication DDL behavior
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+my $ddl = "CREATE TABLE test1 (a int, b text);";
+$node_publisher->safe_psql('postgres', $ddl);
+$node_subscriber->safe_psql('postgres', $ddl);
+
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION mypub FOR ALL TABLES;");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr' PUBLICATION mypub;"
+);
+
+$node_publisher->wait_for_catchup('mysub');
+
+$node_subscriber->safe_psql(
+ 'postgres', q{
+BEGIN;
+ALTER SUBSCRIPTION mysub DISABLE;
+ALTER SUBSCRIPTION mysub SET (slot_name = NONE);
+DROP SUBSCRIPTION mysub;
+COMMIT;
+});
+
+pass "subscription disable and drop in same transaction did not hang";
+
+# One of the specified publications exists.
+my ($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
+ "CREATE SUBSCRIPTION mysub1 CONNECTION '$publisher_connstr' PUBLICATION mypub, non_existent_pub"
+);
+ok( $stderr =~
+ m/WARNING: publication "non_existent_pub" does not exist on the publisher/,
+ "Create subscription throws warning for non-existent publication");
+
+# Wait for initial table sync to finish.
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'mysub1');
+
+# Specifying non-existent publication along with add publication.
+($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
+ "ALTER SUBSCRIPTION mysub1 ADD PUBLICATION non_existent_pub1, non_existent_pub2"
+);
+ok( $stderr =~
+ m/WARNING: publications "non_existent_pub1", "non_existent_pub2" do not exist on the publisher/,
+ "Alter subscription add publication throws warning for non-existent publications"
+);
+
+# Specifying non-existent publication along with set publication.
+($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
+ "ALTER SUBSCRIPTION mysub1 SET PUBLICATION non_existent_pub");
+ok( $stderr =~
+ m/WARNING: publication "non_existent_pub" does not exist on the publisher/,
+ "Alter subscription set publication throws warning for non-existent publication"
+);
+
+$node_subscriber->stop;
+$node_publisher->stop;
+
+done_testing();
diff --git a/src/test/subscription/t/008_diff_schema.pl b/src/test/subscription/t/008_diff_schema.pl
new file mode 100644
index 0000000..67db1eb
--- /dev/null
+++ b/src/test/subscription/t/008_diff_schema.pl
@@ -0,0 +1,124 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test behavior with different schema on subscriber
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Create publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+# Create some preexisting content on publisher
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b varchar)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
+
+# Setup structure on subscriber
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999, e int GENERATED BY DEFAULT AS IDENTITY)"
+);
+
+# Setup logical replication
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR ALL TABLES");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub"
+);
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub');
+
+my $result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(2|2|2), 'check initial data was copied to subscriber');
+
+# Update the rows on the publisher and check the additional columns on
+# subscriber didn't change
+$node_publisher->safe_psql('postgres', "UPDATE test_tab SET b = md5(b)");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999), count(e) FROM test_tab");
+is($result, qq(2|2|2|2),
+ 'check extra columns contain local defaults after copy');
+
+# Change the local values of the extra columns on the subscriber,
+# update publisher, and check that subscriber retains the expected
+# values
+$node_subscriber->safe_psql('postgres',
+ "UPDATE test_tab SET c = 'epoch'::timestamptz + 987654321 * interval '1s'"
+);
+$node_publisher->safe_psql('postgres',
+ "UPDATE test_tab SET b = md5(a::text)");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(extract(epoch from c) = 987654321), count(d = 999) FROM test_tab"
+);
+is($result, qq(2|2|2), 'check extra columns contain locally changed data');
+
+# Another insert
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (3, 'baz')");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999), count(e) FROM test_tab");
+is($result, qq(3|3|3|3),
+ 'check extra columns contain local defaults after apply');
+
+
+# Check a bug about adding a replica identity column on the subscriber
+# that was not yet mapped to a column on the publisher. This would
+# result in errors on the subscriber and replication thus not
+# progressing.
+# (https://www.postgresql.org/message-id/flat/a9139c29-7ddd-973b-aa7f-71fed9c38d75%40minerva.info)
+
+$node_publisher->safe_psql('postgres', "CREATE TABLE test_tab2 (a int)");
+
+$node_subscriber->safe_psql('postgres', "CREATE TABLE test_tab2 (a int)");
+
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION");
+
+$node_subscriber->wait_for_subscription_sync;
+
+# Add replica identity column. (The serial is not necessary, but it's
+# a convenient way to get a default on the new column so that rows
+# from the publisher that don't have the column yet can be inserted.)
+$node_subscriber->safe_psql('postgres',
+ "ALTER TABLE test_tab2 ADD COLUMN b serial PRIMARY KEY");
+
+$node_publisher->safe_psql('postgres', "INSERT INTO test_tab2 VALUES (1)");
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT count(*), min(a), max(a) FROM test_tab2"),
+ qq(1|1|1),
+ 'check replicated inserts on subscriber');
+
+
+$node_subscriber->stop;
+$node_publisher->stop;
+
+done_testing();
diff --git a/src/test/subscription/t/009_matviews.pl b/src/test/subscription/t/009_matviews.pl
new file mode 100644
index 0000000..38080b4
--- /dev/null
+++ b/src/test/subscription/t/009_matviews.pl
@@ -0,0 +1,54 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test materialized views behavior
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION mypub FOR ALL TABLES;");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr' PUBLICATION mypub;"
+);
+
+$node_publisher->safe_psql('postgres',
+ q{CREATE TABLE test1 (a int PRIMARY KEY, b text)});
+$node_publisher->safe_psql('postgres',
+ q{INSERT INTO test1 (a, b) VALUES (1, 'one'), (2, 'two');});
+
+$node_subscriber->safe_psql('postgres',
+ q{CREATE TABLE test1 (a int PRIMARY KEY, b text);});
+
+$node_publisher->wait_for_catchup('mysub');
+
+# Materialized views are not supported by logical replication, but
+# logical decoding does produce change information for them, so we
+# need to make sure they are properly ignored. (bug #15044)
+
+# create a MV with some data
+$node_publisher->safe_psql('postgres',
+ q{CREATE MATERIALIZED VIEW testmv1 AS SELECT * FROM test1;});
+$node_publisher->wait_for_catchup('mysub');
+
+# There is no equivalent relation on the subscriber, but MV data is
+# not replicated, so this does not hang.
+
+pass "materialized view data not replicated";
+
+$node_subscriber->stop;
+$node_publisher->stop;
+
+done_testing();
diff --git a/src/test/subscription/t/010_truncate.pl b/src/test/subscription/t/010_truncate.pl
new file mode 100644
index 0000000..a5b6445
--- /dev/null
+++ b/src/test/subscription/t/010_truncate.pl
@@ -0,0 +1,239 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test TRUNCATE
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# setup
+
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->append_conf('postgresql.conf',
+ qq(max_logical_replication_workers = 6));
+$node_subscriber->start;
+
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab1 (a int PRIMARY KEY)");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab1 (a int PRIMARY KEY)");
+
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab2 (a int PRIMARY KEY)");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab2 (a int PRIMARY KEY)");
+
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab3 (a int PRIMARY KEY)");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab3 (a int PRIMARY KEY)");
+
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab4 (x int PRIMARY KEY, y int REFERENCES tab3)");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab4 (x int PRIMARY KEY, y int REFERENCES tab3)");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE SEQUENCE seq1 OWNED BY tab1.a");
+$node_subscriber->safe_psql('postgres', "ALTER SEQUENCE seq1 START 101");
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION pub1 FOR TABLE tab1");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION pub2 FOR TABLE tab2 WITH (publish = insert)");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION pub3 FOR TABLE tab3, tab4");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION sub2 CONNECTION '$publisher_connstr' PUBLICATION pub2"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION sub3 CONNECTION '$publisher_connstr' PUBLICATION pub3"
+);
+
+# Wait for initial sync of all subscriptions
+$node_subscriber->wait_for_subscription_sync;
+
+# insert data to truncate
+
+$node_subscriber->safe_psql('postgres',
+ "INSERT INTO tab1 VALUES (1), (2), (3)");
+
+$node_publisher->wait_for_catchup('sub1');
+
+# truncate and check
+
+$node_publisher->safe_psql('postgres', "TRUNCATE tab1");
+
+$node_publisher->wait_for_catchup('sub1');
+
+my $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab1");
+is($result, qq(0||), 'truncate replicated');
+
+$result = $node_subscriber->safe_psql('postgres', "SELECT nextval('seq1')");
+is($result, qq(1), 'sequence not restarted');
+
+# truncate with restart identity
+
+$node_publisher->safe_psql('postgres', "TRUNCATE tab1 RESTART IDENTITY");
+
+$node_publisher->wait_for_catchup('sub1');
+
+$result = $node_subscriber->safe_psql('postgres', "SELECT nextval('seq1')");
+is($result, qq(101), 'truncate restarted identities');
+
+# test publication that does not replicate truncate
+
+$node_subscriber->safe_psql('postgres',
+ "INSERT INTO tab2 VALUES (1), (2), (3)");
+
+$node_publisher->safe_psql('postgres', "TRUNCATE tab2");
+
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab2");
+is($result, qq(3|1|3), 'truncate not replicated');
+
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION pub2 SET (publish = 'insert, truncate')");
+
+$node_publisher->safe_psql('postgres', "TRUNCATE tab2");
+
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab2");
+is($result, qq(0||), 'truncate replicated after publication change');
+
+# test multiple tables connected by foreign keys
+
+$node_subscriber->safe_psql('postgres',
+ "INSERT INTO tab3 VALUES (1), (2), (3)");
+$node_subscriber->safe_psql('postgres',
+ "INSERT INTO tab4 VALUES (11, 1), (111, 1), (22, 2)");
+
+$node_publisher->safe_psql('postgres', "TRUNCATE tab3, tab4");
+
+$node_publisher->wait_for_catchup('sub3');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab3");
+is($result, qq(0||), 'truncate of multiple tables replicated');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(x), max(x) FROM tab4");
+is($result, qq(0||), 'truncate of multiple tables replicated');
+
+# test truncate of multiple tables, some of which are not published
+
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION sub2");
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION pub2");
+
+$node_subscriber->safe_psql('postgres',
+ "INSERT INTO tab1 VALUES (1), (2), (3)");
+$node_subscriber->safe_psql('postgres',
+ "INSERT INTO tab2 VALUES (1), (2), (3)");
+
+$node_publisher->safe_psql('postgres', "TRUNCATE tab1, tab2");
+
+$node_publisher->wait_for_catchup('sub1');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab1");
+is($result, qq(0||), 'truncate of multiple tables some not published');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab2");
+is($result, qq(3|1|3), 'truncate of multiple tables some not published');
+
+# Test that truncate works for synchronous logical replication
+
+$node_publisher->safe_psql('postgres',
+ "ALTER SYSTEM SET synchronous_standby_names TO 'sub1'");
+$node_publisher->safe_psql('postgres', "SELECT pg_reload_conf()");
+
+# insert data to truncate
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab1 VALUES (1), (2), (3)");
+
+$node_publisher->wait_for_catchup('sub1');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab1");
+is($result, qq(3|1|3), 'check synchronous logical replication');
+
+$node_publisher->safe_psql('postgres', "TRUNCATE tab1");
+
+$node_publisher->wait_for_catchup('sub1');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab1");
+is($result, qq(0||),
+ 'truncate replicated in synchronous logical replication');
+
+$node_publisher->safe_psql('postgres',
+ "ALTER SYSTEM RESET synchronous_standby_names");
+$node_publisher->safe_psql('postgres', "SELECT pg_reload_conf()");
+
+# test that truncate works for logical replication when there are multiple
+# subscriptions for a single table
+
+$node_publisher->safe_psql('postgres', "CREATE TABLE tab5 (a int)");
+
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab5 (a int)");
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION pub5 FOR TABLE tab5");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION sub5_1 CONNECTION '$publisher_connstr' PUBLICATION pub5"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION sub5_2 CONNECTION '$publisher_connstr' PUBLICATION pub5"
+);
+
+# wait for initial data sync
+$node_subscriber->wait_for_subscription_sync;
+
+# insert data to truncate
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab5 VALUES (1), (2), (3)");
+
+$node_publisher->wait_for_catchup('sub5_1');
+$node_publisher->wait_for_catchup('sub5_2');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab5");
+is($result, qq(6|1|3), 'insert replicated for multiple subscriptions');
+
+$node_publisher->safe_psql('postgres', "TRUNCATE tab5");
+
+$node_publisher->wait_for_catchup('sub5_1');
+$node_publisher->wait_for_catchup('sub5_2');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab5");
+is($result, qq(0||), 'truncate replicated for multiple subscriptions');
+
+# check deadlocks
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT deadlocks FROM pg_stat_database WHERE datname='postgres'");
+is($result, qq(0), 'no deadlocks detected');
+
+done_testing();
diff --git a/src/test/subscription/t/011_generated.pl b/src/test/subscription/t/011_generated.pl
new file mode 100644
index 0000000..7711be2
--- /dev/null
+++ b/src/test/subscription/t/011_generated.pl
@@ -0,0 +1,99 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test generated columns
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# setup
+
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab1 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED)"
+);
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab1 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 22) STORED, c int)"
+);
+
+# data for initial sync
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab1 (a) VALUES (1), (2), (3)");
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION pub1 FOR ALL TABLES");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1"
+);
+
+# Wait for initial sync of all subscriptions
+$node_subscriber->wait_for_subscription_sync;
+
+my $result = $node_subscriber->safe_psql('postgres', "SELECT a, b FROM tab1");
+is( $result, qq(1|22
+2|44
+3|66), 'generated columns initial sync');
+
+# data to replicate
+
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (4), (5)");
+
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 6 WHERE a = 5");
+
+$node_publisher->wait_for_catchup('sub1');
+
+$result = $node_subscriber->safe_psql('postgres', "SELECT * FROM tab1");
+is( $result, qq(1|22|
+2|44|
+3|66|
+4|88|
+6|132|), 'generated columns replicated');
+
+# try it with a subscriber-side trigger
+
+$node_subscriber->safe_psql(
+ 'postgres', q{
+CREATE FUNCTION tab1_trigger_func() RETURNS trigger
+LANGUAGE plpgsql AS $$
+BEGIN
+ NEW.c := NEW.a + 10;
+ RETURN NEW;
+END $$;
+
+CREATE TRIGGER test1 BEFORE INSERT OR UPDATE ON tab1
+ FOR EACH ROW
+ EXECUTE PROCEDURE tab1_trigger_func();
+
+ALTER TABLE tab1 ENABLE REPLICA TRIGGER test1;
+});
+
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (7), (8)");
+
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 9 WHERE a = 7");
+
+$node_publisher->wait_for_catchup('sub1');
+
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab1 ORDER BY 1");
+is( $result, qq(1|22|
+2|44|
+3|66|
+4|88|
+6|132|
+8|176|18
+9|198|19), 'generated columns replicated with trigger');
+
+done_testing();
diff --git a/src/test/subscription/t/012_collation.pl b/src/test/subscription/t/012_collation.pl
new file mode 100644
index 0000000..823550a
--- /dev/null
+++ b/src/test/subscription/t/012_collation.pl
@@ -0,0 +1,108 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test collations, in particular nondeterministic ones
+# (only works with ICU)
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+if ($ENV{with_icu} ne 'yes')
+{
+ plan skip_all => 'ICU not supported by this build';
+}
+
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(
+ allows_streaming => 'logical',
+ extra => [ '--locale=C', '--encoding=UTF8' ]);
+$node_publisher->start;
+
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(
+ allows_streaming => 'logical',
+ extra => [ '--locale=C', '--encoding=UTF8' ]);
+$node_subscriber->start;
+
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+
+# Test plan: Create a table with a nondeterministic collation in the
+# primary key column. Pre-insert rows on the publisher and subscriber
+# that are collation-wise equal but byte-wise different. (We use a
+# string in different normal forms for that.) Set up publisher and
+# subscriber. Update the row on the publisher, but don't change the
+# primary key column. The subscriber needs to find the row to be
+# updated using the nondeterministic collation semantics. We need to
+# test for both a replica identity index and for replica identity
+# full, since those have different code paths internally.
+
+$node_subscriber->safe_psql('postgres',
+ q{CREATE COLLATION ctest_nondet (provider = icu, locale = 'und', deterministic = false)}
+);
+
+# table with replica identity index
+
+$node_publisher->safe_psql('postgres',
+ q{CREATE TABLE tab1 (a text PRIMARY KEY, b text)});
+
+$node_publisher->safe_psql('postgres',
+ q{INSERT INTO tab1 VALUES (U&'\00E4bc', 'foo')});
+
+$node_subscriber->safe_psql('postgres',
+ q{CREATE TABLE tab1 (a text COLLATE ctest_nondet PRIMARY KEY, b text)});
+
+$node_subscriber->safe_psql('postgres',
+ q{INSERT INTO tab1 VALUES (U&'\0061\0308bc', 'foo')});
+
+# table with replica identity full
+
+$node_publisher->safe_psql('postgres', q{CREATE TABLE tab2 (a text, b text)});
+$node_publisher->safe_psql('postgres',
+ q{ALTER TABLE tab2 REPLICA IDENTITY FULL});
+
+$node_publisher->safe_psql('postgres',
+ q{INSERT INTO tab2 VALUES (U&'\00E4bc', 'foo')});
+
+$node_subscriber->safe_psql('postgres',
+ q{CREATE TABLE tab2 (a text COLLATE ctest_nondet, b text)});
+$node_subscriber->safe_psql('postgres',
+ q{ALTER TABLE tab2 REPLICA IDENTITY FULL});
+
+$node_subscriber->safe_psql('postgres',
+ q{INSERT INTO tab2 VALUES (U&'\0061\0308bc', 'foo')});
+
+# set up publication, subscription
+
+$node_publisher->safe_psql('postgres',
+ q{CREATE PUBLICATION pub1 FOR ALL TABLES});
+
+$node_subscriber->safe_psql('postgres',
+ qq{CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1 WITH (copy_data = false)}
+);
+
+$node_publisher->wait_for_catchup('sub1');
+
+# test with replica identity index
+
+$node_publisher->safe_psql('postgres',
+ q{UPDATE tab1 SET b = 'bar' WHERE b = 'foo'});
+
+$node_publisher->wait_for_catchup('sub1');
+
+is($node_subscriber->safe_psql('postgres', q{SELECT b FROM tab1}),
+ qq(bar), 'update with primary key with nondeterministic collation');
+
+# test with replica identity full
+
+$node_publisher->safe_psql('postgres',
+ q{UPDATE tab2 SET b = 'bar' WHERE b = 'foo'});
+
+$node_publisher->wait_for_catchup('sub1');
+
+is($node_subscriber->safe_psql('postgres', q{SELECT b FROM tab2}),
+ qq(bar),
+ 'update with replica identity full with nondeterministic collation');
+
+done_testing();
diff --git a/src/test/subscription/t/013_partition.pl b/src/test/subscription/t/013_partition.pl
new file mode 100644
index 0000000..275fb3b
--- /dev/null
+++ b/src/test/subscription/t/013_partition.pl
@@ -0,0 +1,889 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test logical replication with partitioned tables
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# setup
+
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+my $node_subscriber1 = PostgreSQL::Test::Cluster->new('subscriber1');
+$node_subscriber1->init(allows_streaming => 'logical');
+$node_subscriber1->start;
+
+my $node_subscriber2 = PostgreSQL::Test::Cluster->new('subscriber2');
+$node_subscriber2->init(allows_streaming => 'logical');
+$node_subscriber2->start;
+
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+
+# publisher
+$node_publisher->safe_psql('postgres', "CREATE PUBLICATION pub1");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION pub_all FOR ALL TABLES");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab1 (a int PRIMARY KEY, b text) PARTITION BY LIST (a)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab1_1 (b text, a int NOT NULL)");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab1 ATTACH PARTITION tab1_1 FOR VALUES IN (1, 2, 3)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab1_2 PARTITION OF tab1 FOR VALUES IN (4, 5, 6)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab1_def PARTITION OF tab1 DEFAULT");
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION pub1 ADD TABLE tab1, tab1_1");
+
+# subscriber1
+#
+# This is partitioned differently from the publisher. tab1_2 is
+# subpartitioned. This tests the tuple routing code on the
+# subscriber.
+$node_subscriber1->safe_psql('postgres',
+ "CREATE TABLE tab1 (c text, a int PRIMARY KEY, b text) PARTITION BY LIST (a)"
+);
+$node_subscriber1->safe_psql('postgres',
+ "CREATE TABLE tab1_1 (b text, c text DEFAULT 'sub1_tab1', a int NOT NULL)"
+);
+$node_subscriber1->safe_psql('postgres',
+ "ALTER TABLE tab1 ATTACH PARTITION tab1_1 FOR VALUES IN (1, 2, 3)");
+$node_subscriber1->safe_psql('postgres',
+ "CREATE TABLE tab1_2 PARTITION OF tab1 (c DEFAULT 'sub1_tab1') FOR VALUES IN (4, 5, 6) PARTITION BY LIST (a)"
+);
+$node_subscriber1->safe_psql('postgres',
+ "CREATE TABLE tab1_2_1 (c text, b text, a int NOT NULL)");
+$node_subscriber1->safe_psql('postgres',
+ "ALTER TABLE tab1_2 ATTACH PARTITION tab1_2_1 FOR VALUES IN (5)");
+$node_subscriber1->safe_psql('postgres',
+ "CREATE TABLE tab1_2_2 PARTITION OF tab1_2 FOR VALUES IN (4, 6)");
+$node_subscriber1->safe_psql('postgres',
+ "CREATE TABLE tab1_def PARTITION OF tab1 (c DEFAULT 'sub1_tab1') DEFAULT"
+);
+$node_subscriber1->safe_psql('postgres',
+ "CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1"
+);
+
+# Add set of AFTER replica triggers for testing that they are fired
+# correctly. This uses a table that records details of all trigger
+# activities. Triggers are marked as enabled for a subset of the
+# partition tree.
+$node_subscriber1->safe_psql(
+ 'postgres', qq{
+CREATE TABLE sub1_trigger_activity (tgtab text, tgop text,
+ tgwhen text, tglevel text, olda int, newa int);
+CREATE FUNCTION sub1_trigger_activity_func() RETURNS TRIGGER AS \$\$
+BEGIN
+ IF (TG_OP = 'INSERT') THEN
+ INSERT INTO public.sub1_trigger_activity
+ SELECT TG_RELNAME, TG_OP, TG_WHEN, TG_LEVEL, NULL, NEW.a;
+ ELSIF (TG_OP = 'UPDATE') THEN
+ INSERT INTO public.sub1_trigger_activity
+ SELECT TG_RELNAME, TG_OP, TG_WHEN, TG_LEVEL, OLD.a, NEW.a;
+ END IF;
+ RETURN NULL;
+END;
+\$\$ LANGUAGE plpgsql;
+CREATE TRIGGER sub1_tab1_log_op_trigger
+ AFTER INSERT OR UPDATE ON tab1
+ FOR EACH ROW EXECUTE PROCEDURE sub1_trigger_activity_func();
+ALTER TABLE ONLY tab1 ENABLE REPLICA TRIGGER sub1_tab1_log_op_trigger;
+CREATE TRIGGER sub1_tab1_2_log_op_trigger
+ AFTER INSERT OR UPDATE ON tab1_2
+ FOR EACH ROW EXECUTE PROCEDURE sub1_trigger_activity_func();
+ALTER TABLE ONLY tab1_2 ENABLE REPLICA TRIGGER sub1_tab1_2_log_op_trigger;
+CREATE TRIGGER sub1_tab1_2_2_log_op_trigger
+ AFTER INSERT OR UPDATE ON tab1_2_2
+ FOR EACH ROW EXECUTE PROCEDURE sub1_trigger_activity_func();
+ALTER TABLE ONLY tab1_2_2 ENABLE REPLICA TRIGGER sub1_tab1_2_2_log_op_trigger;
+});
+
+# subscriber 2
+#
+# This does not use partitioning. The tables match the leaf tables on
+# the publisher.
+$node_subscriber2->safe_psql('postgres',
+ "CREATE TABLE tab1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1', b text)"
+);
+$node_subscriber2->safe_psql('postgres',
+ "CREATE TABLE tab1_1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1_1', b text)"
+);
+$node_subscriber2->safe_psql('postgres',
+ "CREATE TABLE tab1_2 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1_2', b text)"
+);
+$node_subscriber2->safe_psql('postgres',
+ "CREATE TABLE tab1_def (a int PRIMARY KEY, b text, c text DEFAULT 'sub2_tab1_def')"
+);
+$node_subscriber2->safe_psql('postgres',
+ "CREATE SUBSCRIPTION sub2 CONNECTION '$publisher_connstr' PUBLICATION pub_all"
+);
+
+# Add set of AFTER replica triggers for testing that they are fired
+# correctly, using the same method as the first subscriber.
+$node_subscriber2->safe_psql(
+ 'postgres', qq{
+CREATE TABLE sub2_trigger_activity (tgtab text,
+ tgop text, tgwhen text, tglevel text, olda int, newa int);
+CREATE FUNCTION sub2_trigger_activity_func() RETURNS TRIGGER AS \$\$
+BEGIN
+ IF (TG_OP = 'INSERT') THEN
+ INSERT INTO public.sub2_trigger_activity
+ SELECT TG_RELNAME, TG_OP, TG_WHEN, TG_LEVEL, NULL, NEW.a;
+ ELSIF (TG_OP = 'UPDATE') THEN
+ INSERT INTO public.sub2_trigger_activity
+ SELECT TG_RELNAME, TG_OP, TG_WHEN, TG_LEVEL, OLD.a, NEW.a;
+ END IF;
+ RETURN NULL;
+END;
+\$\$ LANGUAGE plpgsql;
+CREATE TRIGGER sub2_tab1_log_op_trigger
+ AFTER INSERT OR UPDATE ON tab1
+ FOR EACH ROW EXECUTE PROCEDURE sub2_trigger_activity_func();
+ALTER TABLE ONLY tab1 ENABLE REPLICA TRIGGER sub2_tab1_log_op_trigger;
+CREATE TRIGGER sub2_tab1_2_log_op_trigger
+ AFTER INSERT OR UPDATE ON tab1_2
+ FOR EACH ROW EXECUTE PROCEDURE sub2_trigger_activity_func();
+ALTER TABLE ONLY tab1_2 ENABLE REPLICA TRIGGER sub2_tab1_2_log_op_trigger;
+});
+
+# Wait for initial sync of all subscriptions
+$node_subscriber1->wait_for_subscription_sync;
+$node_subscriber2->wait_for_subscription_sync;
+
+# Tests for replication using leaf partition identity and schema
+
+# insert
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (1)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1_1 (a) VALUES (3)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1_2 VALUES (5)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (0)");
+
+$node_publisher->wait_for_catchup('sub1');
+$node_publisher->wait_for_catchup('sub2');
+
+my $result = $node_subscriber1->safe_psql('postgres',
+ "SELECT c, a FROM tab1 ORDER BY 1, 2");
+is( $result, qq(sub1_tab1|0
+sub1_tab1|1
+sub1_tab1|3
+sub1_tab1|5), 'inserts into tab1 and its partitions replicated');
+
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT a FROM tab1_2_1 ORDER BY 1");
+is($result, qq(5), 'inserts into tab1_2 replicated into tab1_2_1 correctly');
+
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT a FROM tab1_2_2 ORDER BY 1");
+is($result, qq(), 'inserts into tab1_2 replicated into tab1_2_2 correctly');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab1_1 ORDER BY 1, 2");
+is( $result, qq(sub2_tab1_1|1
+sub2_tab1_1|3), 'inserts into tab1_1 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab1_2 ORDER BY 1, 2");
+is($result, qq(sub2_tab1_2|5), 'inserts into tab1_2 replicated');
+
+# The AFTER trigger of tab1_2 should have recorded one INSERT.
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT * FROM sub2_trigger_activity ORDER BY tgtab, tgop, tgwhen, olda, newa;"
+);
+is( $result,
+ qq(tab1_2|INSERT|AFTER|ROW||5),
+ 'check replica insert after trigger applied on subscriber');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab1_def ORDER BY 1, 2");
+is($result, qq(sub2_tab1_def|0), 'inserts into tab1_def replicated');
+
+# update (replicated as update)
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 2 WHERE a = 1");
+# All of the following cause an update to be applied to a partitioned
+# table on subscriber1: tab1_2 is leaf partition on publisher, whereas
+# it's sub-partitioned on subscriber1.
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 6 WHERE a = 5");
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 4 WHERE a = 6");
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 6 WHERE a = 4");
+
+$node_publisher->wait_for_catchup('sub1');
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT c, a FROM tab1 ORDER BY 1, 2");
+is( $result, qq(sub1_tab1|0
+sub1_tab1|2
+sub1_tab1|3
+sub1_tab1|6), 'update of tab1_1, tab1_2 replicated');
+
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT a FROM tab1_2_1 ORDER BY 1");
+is($result, qq(), 'updates of tab1_2 replicated into tab1_2_1 correctly');
+
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT a FROM tab1_2_2 ORDER BY 1");
+is($result, qq(6), 'updates of tab1_2 replicated into tab1_2_2 correctly');
+
+# The AFTER trigger should have recorded the UPDATEs of tab1_2_2.
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT * FROM sub1_trigger_activity ORDER BY tgtab, tgop, tgwhen, olda, newa;"
+);
+is( $result, qq(tab1_2_2|INSERT|AFTER|ROW||6
+tab1_2_2|UPDATE|AFTER|ROW|4|6
+tab1_2_2|UPDATE|AFTER|ROW|6|4),
+ 'check replica update after trigger applied on subscriber');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab1_1 ORDER BY 1, 2");
+is( $result, qq(sub2_tab1_1|2
+sub2_tab1_1|3), 'update of tab1_1 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab1_2 ORDER BY 1, 2");
+is($result, qq(sub2_tab1_2|6), 'tab1_2 updated');
+
+# The AFTER trigger should have recorded the updates of tab1_2.
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT * FROM sub2_trigger_activity ORDER BY tgtab, tgop, tgwhen, olda, newa;"
+);
+is( $result, qq(tab1_2|INSERT|AFTER|ROW||5
+tab1_2|UPDATE|AFTER|ROW|4|6
+tab1_2|UPDATE|AFTER|ROW|5|6
+tab1_2|UPDATE|AFTER|ROW|6|4),
+ 'check replica update after trigger applied on subscriber');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab1_def ORDER BY 1");
+is($result, qq(sub2_tab1_def|0), 'tab1_def unchanged');
+
+# update (replicated as delete+insert)
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 1 WHERE a = 0");
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 4 WHERE a = 1");
+
+$node_publisher->wait_for_catchup('sub1');
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT c, a FROM tab1 ORDER BY 1, 2");
+is( $result, qq(sub1_tab1|2
+sub1_tab1|3
+sub1_tab1|4
+sub1_tab1|6),
+ 'update of tab1 (delete from tab1_def + insert into tab1_1) replicated');
+
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT a FROM tab1_2_2 ORDER BY 1");
+is( $result, qq(4
+6), 'updates of tab1 (delete + insert) replicated into tab1_2_2 correctly');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab1_1 ORDER BY 1, 2");
+is( $result, qq(sub2_tab1_1|2
+sub2_tab1_1|3), 'tab1_1 unchanged');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab1_2 ORDER BY 1, 2");
+is( $result, qq(sub2_tab1_2|4
+sub2_tab1_2|6), 'insert into tab1_2 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT a FROM tab1_def ORDER BY 1");
+is($result, qq(), 'delete from tab1_def replicated');
+
+# delete
+$node_publisher->safe_psql('postgres',
+ "DELETE FROM tab1 WHERE a IN (2, 3, 5)");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab1_2");
+
+$node_publisher->wait_for_catchup('sub1');
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab1");
+is($result, qq(), 'delete from tab1_1, tab1_2 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1_1");
+is($result, qq(), 'delete from tab1_1 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1_2");
+is($result, qq(), 'delete from tab1_2 replicated');
+
+# truncate
+$node_subscriber1->safe_psql('postgres',
+ "INSERT INTO tab1 (a) VALUES (1), (2), (5)");
+$node_subscriber2->safe_psql('postgres', "INSERT INTO tab1_2 (a) VALUES (2)");
+$node_publisher->safe_psql('postgres', "TRUNCATE tab1_2");
+
+$node_publisher->wait_for_catchup('sub1');
+$node_publisher->wait_for_catchup('sub2');
+
+$result =
+ $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab1 ORDER BY 1");
+is( $result, qq(1
+2), 'truncate of tab1_2 replicated');
+
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1_2 ORDER BY 1");
+is($result, qq(), 'truncate of tab1_2 replicated');
+
+$node_publisher->safe_psql('postgres', "TRUNCATE tab1");
+
+$node_publisher->wait_for_catchup('sub1');
+$node_publisher->wait_for_catchup('sub2');
+
+$result =
+ $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab1 ORDER BY 1");
+is($result, qq(), 'truncate of tab1_1 replicated');
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1 ORDER BY 1");
+is($result, qq(), 'truncate of tab1 replicated');
+
+# Check that subscriber handles cases where update/delete target tuple
+# is missing. We have to look for the DEBUG1 log messages about that,
+# so temporarily bump up the log verbosity.
+$node_subscriber1->append_conf('postgresql.conf',
+ "log_min_messages = debug1");
+$node_subscriber1->reload;
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab1 VALUES (1, 'foo'), (4, 'bar'), (10, 'baz')");
+
+$node_publisher->wait_for_catchup('sub1');
+$node_publisher->wait_for_catchup('sub2');
+
+$node_subscriber1->safe_psql('postgres', "DELETE FROM tab1");
+
+# Note that the current location of the log file is not grabbed immediately
+# after reloading the configuration, but after sending one SQL command to
+# the node so as we are sure that the reloading has taken effect.
+my $log_location = -s $node_subscriber1->logfile;
+
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab1 SET b = 'quux' WHERE a = 4");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab1");
+
+$node_publisher->wait_for_catchup('sub1');
+$node_publisher->wait_for_catchup('sub2');
+
+my $logfile = slurp_file($node_subscriber1->logfile(), $log_location);
+ok( $logfile =~
+ qr/logical replication did not find row to be updated in replication target relation's partition "tab1_2_2"/,
+ 'update target row is missing in tab1_2_2');
+ok( $logfile =~
+ qr/logical replication did not find row to be deleted in replication target relation "tab1_1"/,
+ 'delete target row is missing in tab1_1');
+ok( $logfile =~
+ qr/logical replication did not find row to be deleted in replication target relation "tab1_2_2"/,
+ 'delete target row is missing in tab1_2_2');
+ok( $logfile =~
+ qr/logical replication did not find row to be deleted in replication target relation "tab1_def"/,
+ 'delete target row is missing in tab1_def');
+
+$node_subscriber1->append_conf('postgresql.conf',
+ "log_min_messages = warning");
+$node_subscriber1->reload;
+
+# Tests for replication using root table identity and schema
+
+# publisher
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION pub1");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab2 (a int PRIMARY KEY, b text) PARTITION BY LIST (a)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab2_1 (b text, a int NOT NULL)");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab2 ATTACH PARTITION tab2_1 FOR VALUES IN (0, 1, 2, 3)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab2_2 PARTITION OF tab2 FOR VALUES IN (5, 6)");
+
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab3 (a int PRIMARY KEY, b text) PARTITION BY LIST (a)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab3_1 PARTITION OF tab3 FOR VALUES IN (0, 1, 2, 3, 5, 6)");
+
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab4 (a int PRIMARY KEY) PARTITION BY LIST (a)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab4_1 PARTITION OF tab4 FOR VALUES IN (-1, 0, 1) PARTITION BY LIST (a)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab4_1_1 PARTITION OF tab4_1 FOR VALUES IN (-1, 0, 1)");
+
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION pub_all SET (publish_via_partition_root = true)");
+# Note: tab3_1's parent is not in the publication, in which case its
+# changes are published using own identity. For tab2, even though both parent
+# and child tables are present but changes will be replicated via the parent's
+# identity and only once.
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION pub_viaroot FOR TABLE tab2, tab2_1, tab3_1 WITH (publish_via_partition_root = true)"
+);
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION pub_lower_level FOR TABLE tab4_1 WITH (publish_via_partition_root = true)"
+);
+
+# prepare data for the initial sync
+$node_publisher->safe_psql('postgres', "INSERT INTO tab2 VALUES (1)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab4 VALUES (-1)");
+
+# subscriber 1
+$node_subscriber1->safe_psql('postgres', "DROP SUBSCRIPTION sub1");
+$node_subscriber1->safe_psql('postgres',
+ "CREATE TABLE tab2 (a int PRIMARY KEY, c text DEFAULT 'sub1_tab2', b text) PARTITION BY RANGE (a)"
+);
+$node_subscriber1->safe_psql('postgres',
+ "CREATE TABLE tab2_1 (c text DEFAULT 'sub1_tab2', b text, a int NOT NULL)"
+);
+$node_subscriber1->safe_psql('postgres',
+ "ALTER TABLE tab2 ATTACH PARTITION tab2_1 FOR VALUES FROM (0) TO (10)");
+$node_subscriber1->safe_psql('postgres',
+ "CREATE TABLE tab3_1 (c text DEFAULT 'sub1_tab3_1', b text, a int NOT NULL PRIMARY KEY)"
+);
+$node_subscriber1->safe_psql('postgres',
+ "CREATE SUBSCRIPTION sub_viaroot CONNECTION '$publisher_connstr' PUBLICATION pub_viaroot"
+);
+
+# subscriber 2
+$node_subscriber2->safe_psql('postgres', "DROP TABLE tab1");
+$node_subscriber2->safe_psql('postgres',
+ "CREATE TABLE tab1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1', b text) PARTITION BY HASH (a)"
+);
+# Note: tab1's partitions are named tab1_1 and tab1_2 on the publisher.
+$node_subscriber2->safe_psql('postgres',
+ "CREATE TABLE tab1_part1 (b text, c text, a int NOT NULL)");
+$node_subscriber2->safe_psql('postgres',
+ "ALTER TABLE tab1 ATTACH PARTITION tab1_part1 FOR VALUES WITH (MODULUS 2, REMAINDER 0)"
+);
+$node_subscriber2->safe_psql('postgres',
+ "CREATE TABLE tab1_part2 PARTITION OF tab1 FOR VALUES WITH (MODULUS 2, REMAINDER 1)"
+);
+$node_subscriber2->safe_psql('postgres',
+ "CREATE TABLE tab2 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab2', b text)"
+);
+$node_subscriber2->safe_psql('postgres',
+ "CREATE TABLE tab3 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab3', b text)"
+);
+$node_subscriber2->safe_psql('postgres',
+ "CREATE TABLE tab3_1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab3_1', b text)"
+);
+
+# Note: We create two separate tables, not a partitioned one, so that we can
+# easily identity through which relation were the changes replicated.
+$node_subscriber2->safe_psql('postgres',
+ "CREATE TABLE tab4 (a int PRIMARY KEY)");
+$node_subscriber2->safe_psql('postgres',
+ "CREATE TABLE tab4_1 (a int PRIMARY KEY)");
+# Since we specified publish_via_partition_root in pub_all and
+# pub_lower_level, all partition tables use their root tables' identity and
+# schema. We set the list of publications so that the FOR ALL TABLES
+# publication is second (the list order matters).
+$node_subscriber2->safe_psql('postgres',
+ "ALTER SUBSCRIPTION sub2 SET PUBLICATION pub_lower_level, pub_all");
+
+# Wait for initial sync of all subscriptions
+$node_subscriber1->wait_for_subscription_sync;
+$node_subscriber2->wait_for_subscription_sync;
+
+# check that data is synced correctly
+$result = $node_subscriber1->safe_psql('postgres', "SELECT c, a FROM tab2");
+is($result, qq(sub1_tab2|1), 'initial data synced for pub_viaroot');
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4 ORDER BY 1");
+is($result, qq(-1), 'initial data synced for pub_lower_level and pub_all');
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4_1 ORDER BY 1");
+is($result, qq(), 'initial data synced for pub_lower_level and pub_all');
+
+# insert
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (1), (0)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1_1 (a) VALUES (3)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1_2 VALUES (5)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab2 VALUES (0), (3), (5)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab3 VALUES (1), (0), (3), (5)");
+
+# Insert a row into the leaf partition, should be replicated through the
+# partition root (thanks to the FOR ALL TABLES partition).
+$node_publisher->safe_psql('postgres', "INSERT INTO tab4 VALUES (0)");
+
+$node_publisher->wait_for_catchup('sub_viaroot');
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT c, a FROM tab2 ORDER BY 1, 2");
+is( $result, qq(sub1_tab2|0
+sub1_tab2|1
+sub1_tab2|3
+sub1_tab2|5), 'inserts into tab2 replicated');
+
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT c, a FROM tab3_1 ORDER BY 1, 2");
+is( $result, qq(sub1_tab3_1|0
+sub1_tab3_1|1
+sub1_tab3_1|3
+sub1_tab3_1|5), 'inserts into tab3_1 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab1 ORDER BY 1, 2");
+is( $result, qq(sub2_tab1|0
+sub2_tab1|1
+sub2_tab1|3
+sub2_tab1|5), 'inserts into tab1 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab2 ORDER BY 1, 2");
+is( $result, qq(sub2_tab2|0
+sub2_tab2|1
+sub2_tab2|3
+sub2_tab2|5), 'inserts into tab2 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab3 ORDER BY 1, 2");
+is( $result, qq(sub2_tab3|0
+sub2_tab3|1
+sub2_tab3|3
+sub2_tab3|5), 'inserts into tab3 replicated');
+
+# tab4 change should be replicated through the root partition, which
+# maps to the tab4 relation on subscriber.
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4 ORDER BY 1");
+is( $result, qq(-1
+0), 'inserts into tab4 replicated');
+
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4_1 ORDER BY 1");
+is($result, qq(), 'inserts into tab4_1 replicated');
+
+
+# now switch the order of publications in the list, try again, the result
+# should be the same (no dependence on order of publications)
+$node_subscriber2->safe_psql('postgres',
+ "ALTER SUBSCRIPTION sub2 SET PUBLICATION pub_all, pub_lower_level");
+
+# make sure the subscription on the second subscriber is synced, before
+# continuing
+$node_subscriber2->wait_for_subscription_sync;
+
+# Insert a change into the leaf partition, should be replicated through
+# the partition root (thanks to the FOR ALL TABLES partition).
+$node_publisher->safe_psql('postgres', "INSERT INTO tab4 VALUES (1)");
+
+$node_publisher->wait_for_catchup('sub2');
+
+# tab4 change should be replicated through the root partition, which
+# maps to the tab4 relation on subscriber.
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4 ORDER BY 1");
+is( $result, qq(-1
+0
+1), 'inserts into tab4 replicated');
+
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4_1 ORDER BY 1");
+is($result, qq(), 'inserts into tab4_1 replicated');
+
+
+# update (replicated as update)
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 6 WHERE a = 5");
+$node_publisher->safe_psql('postgres', "UPDATE tab2 SET a = 6 WHERE a = 5");
+$node_publisher->safe_psql('postgres', "UPDATE tab3 SET a = 6 WHERE a = 5");
+
+$node_publisher->wait_for_catchup('sub_viaroot');
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT c, a FROM tab2 ORDER BY 1, 2");
+is( $result, qq(sub1_tab2|0
+sub1_tab2|1
+sub1_tab2|3
+sub1_tab2|6), 'update of tab2 replicated');
+
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT c, a FROM tab3_1 ORDER BY 1, 2");
+is( $result, qq(sub1_tab3_1|0
+sub1_tab3_1|1
+sub1_tab3_1|3
+sub1_tab3_1|6), 'update of tab3_1 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab1 ORDER BY 1, 2");
+is( $result, qq(sub2_tab1|0
+sub2_tab1|1
+sub2_tab1|3
+sub2_tab1|6), 'inserts into tab1 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab2 ORDER BY 1, 2");
+is( $result, qq(sub2_tab2|0
+sub2_tab2|1
+sub2_tab2|3
+sub2_tab2|6), 'inserts into tab2 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab3 ORDER BY 1, 2");
+is( $result, qq(sub2_tab3|0
+sub2_tab3|1
+sub2_tab3|3
+sub2_tab3|6), 'inserts into tab3 replicated');
+
+# update (replicated as delete+insert)
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 2 WHERE a = 6");
+$node_publisher->safe_psql('postgres', "UPDATE tab2 SET a = 2 WHERE a = 6");
+$node_publisher->safe_psql('postgres', "UPDATE tab3 SET a = 2 WHERE a = 6");
+
+$node_publisher->wait_for_catchup('sub_viaroot');
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT c, a FROM tab2 ORDER BY 1, 2");
+is( $result, qq(sub1_tab2|0
+sub1_tab2|1
+sub1_tab2|2
+sub1_tab2|3), 'update of tab2 replicated');
+
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT c, a FROM tab3_1 ORDER BY 1, 2");
+is( $result, qq(sub1_tab3_1|0
+sub1_tab3_1|1
+sub1_tab3_1|2
+sub1_tab3_1|3), 'update of tab3_1 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab1 ORDER BY 1, 2");
+is( $result, qq(sub2_tab1|0
+sub2_tab1|1
+sub2_tab1|2
+sub2_tab1|3), 'update of tab1 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab2 ORDER BY 1, 2");
+is( $result, qq(sub2_tab2|0
+sub2_tab2|1
+sub2_tab2|2
+sub2_tab2|3), 'update of tab2 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a FROM tab3 ORDER BY 1, 2");
+is( $result, qq(sub2_tab3|0
+sub2_tab3|1
+sub2_tab3|2
+sub2_tab3|3), 'update of tab3 replicated');
+
+# delete
+$node_publisher->safe_psql('postgres', "DELETE FROM tab1");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab2");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab3");
+
+$node_publisher->wait_for_catchup('sub_viaroot');
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab2");
+is($result, qq(), 'delete tab2 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1");
+is($result, qq(), 'delete from tab1 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab2");
+is($result, qq(), 'delete from tab2 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab3");
+is($result, qq(), 'delete from tab3 replicated');
+
+# truncate
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab1 VALUES (1), (2), (5)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab2 VALUES (1), (2), (5)");
+# these will NOT be replicated
+$node_publisher->safe_psql('postgres', "TRUNCATE tab1_2, tab2_1, tab3_1");
+
+$node_publisher->wait_for_catchup('sub_viaroot');
+$node_publisher->wait_for_catchup('sub2');
+
+$result =
+ $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab2 ORDER BY 1");
+is( $result, qq(1
+2
+5), 'truncate of tab2_1 NOT replicated');
+
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1 ORDER BY 1");
+is( $result, qq(1
+2
+5), 'truncate of tab1_2 NOT replicated');
+
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab2 ORDER BY 1");
+is( $result, qq(1
+2
+5), 'truncate of tab2_1 NOT replicated');
+
+$node_publisher->safe_psql('postgres', "TRUNCATE tab1, tab2, tab3");
+
+$node_publisher->wait_for_catchup('sub_viaroot');
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab2");
+is($result, qq(), 'truncate of tab2 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1");
+is($result, qq(), 'truncate of tab1 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab2");
+is($result, qq(), 'truncate of tab2 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab3");
+is($result, qq(), 'truncate of tab3 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab3_1");
+is($result, qq(), 'truncate of tab3_1 replicated');
+
+# check that the map to convert tuples from leaf partition to the root
+# table is correctly rebuilt when a new column is added
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab2 DROP b, ADD COLUMN c text DEFAULT 'pub_tab2', ADD b text"
+);
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab2 (a, b) VALUES (1, 'xxx'), (3, 'yyy'), (5, 'zzz')");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab2 (a, b, c) VALUES (6, 'aaa', 'xxx_c')");
+
+$node_publisher->wait_for_catchup('sub_viaroot');
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber1->safe_psql('postgres',
+ "SELECT c, a, b FROM tab2 ORDER BY 1, 2");
+is( $result, qq(pub_tab2|1|xxx
+pub_tab2|3|yyy
+pub_tab2|5|zzz
+xxx_c|6|aaa), 'inserts into tab2 replicated');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT c, a, b FROM tab2 ORDER BY 1, 2");
+is( $result, qq(pub_tab2|1|xxx
+pub_tab2|3|yyy
+pub_tab2|5|zzz
+xxx_c|6|aaa), 'inserts into tab2 replicated');
+
+# Check that subscriber handles cases where update/delete target tuple
+# is missing. We have to look for the DEBUG1 log messages about that,
+# so temporarily bump up the log verbosity.
+$node_subscriber1->append_conf('postgresql.conf',
+ "log_min_messages = debug1");
+$node_subscriber1->reload;
+
+$node_subscriber1->safe_psql('postgres', "DELETE FROM tab2");
+
+# Note that the current location of the log file is not grabbed immediately
+# after reloading the configuration, but after sending one SQL command to
+# the node so as we are sure that the reloading has taken effect.
+$log_location = -s $node_subscriber1->logfile;
+
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab2 SET b = 'quux' WHERE a = 5");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab2 WHERE a = 1");
+
+$node_publisher->wait_for_catchup('sub_viaroot');
+$node_publisher->wait_for_catchup('sub2');
+
+$logfile = slurp_file($node_subscriber1->logfile(), $log_location);
+ok( $logfile =~
+ qr/logical replication did not find row to be updated in replication target relation's partition "tab2_1"/,
+ 'update target row is missing in tab2_1');
+ok( $logfile =~
+ qr/logical replication did not find row to be deleted in replication target relation "tab2_1"/,
+ 'delete target row is missing in tab2_1');
+
+$node_subscriber1->append_conf('postgresql.conf',
+ "log_min_messages = warning");
+$node_subscriber1->reload;
+
+# Test that replication continues to work correctly after altering the
+# partition of a partitioned target table.
+
+$node_publisher->safe_psql(
+ 'postgres', q{
+ CREATE TABLE tab5 (a int NOT NULL, b int);
+ CREATE UNIQUE INDEX tab5_a_idx ON tab5 (a);
+ ALTER TABLE tab5 REPLICA IDENTITY USING INDEX tab5_a_idx;});
+
+$node_subscriber2->safe_psql(
+ 'postgres', q{
+ CREATE TABLE tab5 (a int NOT NULL, b int, c int) PARTITION BY LIST (a);
+ CREATE TABLE tab5_1 PARTITION OF tab5 DEFAULT;
+ CREATE UNIQUE INDEX tab5_a_idx ON tab5 (a);
+ ALTER TABLE tab5 REPLICA IDENTITY USING INDEX tab5_a_idx;
+ ALTER TABLE tab5_1 REPLICA IDENTITY USING INDEX tab5_1_a_idx;});
+
+$node_subscriber2->safe_psql('postgres',
+ "ALTER SUBSCRIPTION sub2 REFRESH PUBLICATION");
+
+$node_subscriber2->wait_for_subscription_sync;
+
+# Make partition map cache
+$node_publisher->safe_psql('postgres', "INSERT INTO tab5 VALUES (1, 1)");
+$node_publisher->safe_psql('postgres', "UPDATE tab5 SET a = 2 WHERE a = 1");
+
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT a, b FROM tab5 ORDER BY 1");
+is($result, qq(2|1), 'updates of tab5 replicated correctly');
+
+# Change the column order of partition on subscriber
+$node_subscriber2->safe_psql(
+ 'postgres', q{
+ ALTER TABLE tab5 DETACH PARTITION tab5_1;
+ ALTER TABLE tab5_1 DROP COLUMN b;
+ ALTER TABLE tab5_1 ADD COLUMN b int;
+ ALTER TABLE tab5 ATTACH PARTITION tab5_1 DEFAULT});
+
+$node_publisher->safe_psql('postgres', "UPDATE tab5 SET a = 3 WHERE a = 2");
+
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT a, b, c FROM tab5 ORDER BY 1");
+is($result, qq(3|1|),
+ 'updates of tab5 replicated correctly after altering table on subscriber'
+);
+
+# Test that replication into the partitioned target table continues to
+# work correctly when the published table is altered.
+$node_publisher->safe_psql(
+ 'postgres', q{
+ ALTER TABLE tab5 DROP COLUMN b, ADD COLUMN c INT;
+ ALTER TABLE tab5 ADD COLUMN b INT;});
+
+$node_publisher->safe_psql('postgres', "UPDATE tab5 SET c = 1 WHERE a = 3");
+
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT a, b, c FROM tab5 ORDER BY 1");
+is($result, qq(3||1),
+ 'updates of tab5 replicated correctly after altering table on publisher');
+
+# Test that replication works correctly as long as the leaf partition
+# has the necessary REPLICA IDENTITY, even though the actual target
+# partitioned table does not.
+$node_subscriber2->safe_psql('postgres',
+ "ALTER TABLE tab5 REPLICA IDENTITY NOTHING");
+
+$node_publisher->safe_psql('postgres', "UPDATE tab5 SET a = 4 WHERE a = 3");
+
+$node_publisher->wait_for_catchup('sub2');
+
+$result = $node_subscriber2->safe_psql('postgres',
+ "SELECT a, b, c FROM tab5_1 ORDER BY 1");
+is($result, qq(4||1), 'updates of tab5 replicated correctly');
+
+done_testing();
diff --git a/src/test/subscription/t/014_binary.pl b/src/test/subscription/t/014_binary.pl
new file mode 100644
index 0000000..e5ce849
--- /dev/null
+++ b/src/test/subscription/t/014_binary.pl
@@ -0,0 +1,296 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Binary mode logical replication test
+
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Create and initialize a publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+# Create and initialize subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+# Create tables on both sides of the replication
+my $ddl = qq(
+ CREATE TABLE public.test_numerical (
+ a INTEGER PRIMARY KEY,
+ b NUMERIC,
+ c FLOAT,
+ d BIGINT
+ );
+ CREATE TABLE public.test_arrays (
+ a INTEGER[] PRIMARY KEY,
+ b NUMERIC[],
+ c TEXT[]
+ ););
+
+$node_publisher->safe_psql('postgres', $ddl);
+$node_subscriber->safe_psql('postgres', $ddl);
+
+# Configure logical replication
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tpub FOR ALL TABLES");
+
+# ------------------------------------------------------
+# Ensure binary mode also executes COPY in binary format
+# ------------------------------------------------------
+
+# Insert some content before creating a subscription
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO public.test_numerical (a, b, c, d) VALUES
+ (1, 1.2, 1.3, 10),
+ (2, 2.2, 2.3, 20);
+ INSERT INTO public.test_arrays (a, b, c) VALUES
+ ('{1,2,3}', '{1.1, 1.2, 1.3}', '{"one", "two", "three"}'),
+ ('{3,1,2}', '{1.3, 1.1, 1.2}', '{"three", "one", "two"}');
+ ));
+
+my $publisher_connstring = $node_publisher->connstr . ' dbname=postgres';
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tsub CONNECTION '$publisher_connstring' "
+ . "PUBLICATION tpub WITH (slot_name = tpub_slot, binary = true)");
+
+# Ensure the COPY command is executed in binary format on the publisher
+$node_publisher->wait_for_log(
+ qr/LOG: ( [A-Z0-9]+:)? statement: COPY (.+)? TO STDOUT WITH \(FORMAT binary\)/
+);
+
+# Ensure nodes are in sync with each other
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'tsub');
+
+my $sync_check = qq(
+ SELECT a, b, c, d FROM test_numerical ORDER BY a;
+ SELECT a, b, c FROM test_arrays ORDER BY a;
+);
+
+# Check the synced data on the subscriber
+my $result = $node_subscriber->safe_psql('postgres', $sync_check);
+
+is( $result, '1|1.2|1.3|10
+2|2.2|2.3|20
+{1,2,3}|{1.1,1.2,1.3}|{one,two,three}
+{3,1,2}|{1.3,1.1,1.2}|{three,one,two}', 'check synced data on subscriber');
+
+# ----------------------------------
+# Ensure apply works in binary mode
+# ----------------------------------
+
+# Insert some content and make sure it's replicated across
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO public.test_arrays (a, b, c) VALUES
+ ('{2,1,3}', '{1.2, 1.1, 1.3}', '{"two", "one", "three"}'),
+ ('{1,3,2}', '{1.1, 1.3, 1.2}', '{"one", "three", "two"}');
+
+ INSERT INTO public.test_numerical (a, b, c, d) VALUES
+ (3, 3.2, 3.3, 30),
+ (4, 4.2, 4.3, 40);
+ ));
+
+$node_publisher->wait_for_catchup('tsub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT a, b, c, d FROM test_numerical ORDER BY a");
+
+is( $result, '1|1.2|1.3|10
+2|2.2|2.3|20
+3|3.2|3.3|30
+4|4.2|4.3|40', 'check replicated data on subscriber');
+
+# Test updates as well
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ UPDATE public.test_arrays SET b[1] = 42, c = NULL;
+ UPDATE public.test_numerical SET b = 42, c = NULL;
+ ));
+
+$node_publisher->wait_for_catchup('tsub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT a, b, c FROM test_arrays ORDER BY a");
+
+is( $result, '{1,2,3}|{42,1.2,1.3}|
+{1,3,2}|{42,1.3,1.2}|
+{2,1,3}|{42,1.1,1.3}|
+{3,1,2}|{42,1.1,1.2}|', 'check updated replicated data on subscriber');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT a, b, c, d FROM test_numerical ORDER BY a");
+
+is( $result, '1|42||10
+2|42||20
+3|42||30
+4|42||40', 'check updated replicated data on subscriber');
+
+# ------------------------------------------------------------------------------
+# Use ALTER SUBSCRIPTION to change to text format and then back to binary format
+# ------------------------------------------------------------------------------
+
+# Test to reset back to text formatting, and then to binary again
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tsub SET (binary = false);");
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO public.test_numerical (a, b, c, d) VALUES
+ (5, 5.2, 5.3, 50);
+ ));
+
+$node_publisher->wait_for_catchup('tsub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT a, b, c, d FROM test_numerical ORDER BY a");
+
+is( $result, '1|42||10
+2|42||20
+3|42||30
+4|42||40
+5|5.2|5.3|50', 'check replicated data on subscriber');
+
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tsub SET (binary = true);");
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO public.test_arrays (a, b, c) VALUES
+ ('{2,3,1}', '{1.2, 1.3, 1.1}', '{"two", "three", "one"}');
+ ));
+
+$node_publisher->wait_for_catchup('tsub');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT a, b, c FROM test_arrays ORDER BY a");
+
+is( $result, '{1,2,3}|{42,1.2,1.3}|
+{1,3,2}|{42,1.3,1.2}|
+{2,1,3}|{42,1.1,1.3}|
+{2,3,1}|{1.2,1.3,1.1}|{two,three,one}
+{3,1,2}|{42,1.1,1.2}|', 'check replicated data on subscriber');
+
+# ---------------------------------------------------------------
+# Test binary replication without and with send/receive functions
+# ---------------------------------------------------------------
+
+# Create a custom type without send/rcv functions
+$ddl = qq(
+ CREATE TYPE myvarchar;
+ CREATE FUNCTION myvarcharin(cstring, oid, integer) RETURNS myvarchar
+ LANGUAGE internal IMMUTABLE PARALLEL SAFE STRICT AS 'varcharin';
+ CREATE FUNCTION myvarcharout(myvarchar) RETURNS cstring
+ LANGUAGE internal IMMUTABLE PARALLEL SAFE STRICT AS 'varcharout';
+ CREATE TYPE myvarchar (
+ input = myvarcharin,
+ output = myvarcharout);
+ CREATE TABLE public.test_myvarchar (
+ a myvarchar
+ ););
+
+$node_publisher->safe_psql('postgres', $ddl);
+$node_subscriber->safe_psql('postgres', $ddl);
+
+# Insert some initial data
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO public.test_myvarchar (a) VALUES
+ ('a');
+ ));
+
+# Check the subscriber log from now on.
+my $offset = -s $node_subscriber->logfile;
+
+# Refresh the publication to trigger the tablesync
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tsub REFRESH PUBLICATION");
+
+# It should fail
+$node_subscriber->wait_for_log(
+ qr/ERROR: ( [A-Z0-9]+:)? no binary input function available for type/,
+ $offset);
+
+# Create and set send/rcv functions for the custom type
+$ddl = qq(
+ CREATE FUNCTION myvarcharsend(myvarchar) RETURNS bytea
+ LANGUAGE internal STABLE PARALLEL SAFE STRICT AS 'varcharsend';
+ CREATE FUNCTION myvarcharrecv(internal, oid, integer) RETURNS myvarchar
+ LANGUAGE internal STABLE PARALLEL SAFE STRICT AS 'varcharrecv';
+ ALTER TYPE myvarchar SET (
+ send = myvarcharsend,
+ receive = myvarcharrecv
+ ););
+
+$node_publisher->safe_psql('postgres', $ddl);
+$node_subscriber->safe_psql('postgres', $ddl);
+
+# Now tablesync should succeed
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'tsub');
+
+# Check the synced data on the subscriber
+$result =
+ $node_subscriber->safe_psql('postgres', 'SELECT a FROM test_myvarchar;');
+
+is($result, 'a', 'check synced data on subscriber with custom type');
+
+# -----------------------------------------------------
+# Test mismatched column types with/without binary mode
+# -----------------------------------------------------
+
+# Test syncing tables with mismatching column types
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE public.test_mismatching_types (
+ a bigint PRIMARY KEY
+ );
+ INSERT INTO public.test_mismatching_types (a)
+ VALUES (1), (2);
+ ));
+
+# Check the subscriber log from now on.
+$offset = -s $node_subscriber->logfile;
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE public.test_mismatching_types (
+ a int PRIMARY KEY
+ );
+ ALTER SUBSCRIPTION tsub REFRESH PUBLICATION;
+ ));
+
+# Cannot sync due to type mismatch
+$node_subscriber->wait_for_log(
+ qr/ERROR: ( [A-Z0-9]+:)? incorrect binary data format/, $offset);
+
+# Check the publisher log from now on.
+$offset = -s $node_publisher->logfile;
+
+# Setting binary to false should allow syncing
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ ALTER SUBSCRIPTION tsub SET (binary = false);));
+
+# Ensure the COPY command is executed in text format on the publisher
+$node_publisher->wait_for_log(
+ qr/LOG: ( [A-Z0-9]+:)? statement: COPY (.+)? TO STDOUT\n/, $offset);
+
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'tsub');
+
+# Check the synced data on the subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ 'SELECT a FROM test_mismatching_types ORDER BY a;');
+
+is( $result, '1
+2', 'check synced data on subscriber with binary = false');
+
+$node_subscriber->stop('fast');
+$node_publisher->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/015_stream.pl b/src/test/subscription/t/015_stream.pl
new file mode 100644
index 0000000..be70b86
--- /dev/null
+++ b/src/test/subscription/t/015_stream.pl
@@ -0,0 +1,328 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test streaming of simple large transaction
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Check that the parallel apply worker has finished applying the streaming
+# transaction.
+sub check_parallel_log
+{
+ my ($node_subscriber, $offset, $is_parallel, $type) = @_;
+
+ if ($is_parallel)
+ {
+ $node_subscriber->wait_for_log(
+ qr/DEBUG: ( [A-Z0-9]+:)? finished processing the STREAM $type command/,
+ $offset);
+ }
+}
+
+# Common test steps for both the streaming=on and streaming=parallel cases.
+sub test_streaming
+{
+ my ($node_publisher, $node_subscriber, $appname, $is_parallel) = @_;
+
+ # Interleave a pair of transactions, each exceeding the 64kB limit.
+ my $offset = 0;
+
+ my $h = $node_publisher->background_psql('postgres', on_error_stop => 0);
+
+ # Check the subscriber log from now on.
+ $offset = -s $node_subscriber->logfile;
+
+ $h->query_safe(
+ q{
+ BEGIN;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
+ UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
+ DELETE FROM test_tab WHERE mod(a,3) = 0;
+ });
+
+ $node_publisher->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(5001, 9999) s(i);
+ DELETE FROM test_tab WHERE a > 5000;
+ COMMIT;
+ });
+
+ $h->query_safe('COMMIT');
+ # errors make the next test fail, so ignore them here
+ $h->quit;
+
+ $node_publisher->wait_for_catchup($appname);
+
+ check_parallel_log($node_subscriber, $offset, $is_parallel, 'COMMIT');
+
+ my $result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+ is($result, qq(3334|3334|3334),
+ 'check extra columns contain local defaults');
+
+ # Test the streaming in binary mode
+ $node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub SET (binary = on)");
+
+ # Check the subscriber log from now on.
+ $offset = -s $node_subscriber->logfile;
+
+ # Insert, update and delete enough rows to exceed the 64kB limit.
+ $node_publisher->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(5001, 10000) s(i);
+ UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
+ DELETE FROM test_tab WHERE mod(a,3) = 0;
+ COMMIT;
+ });
+
+ $node_publisher->wait_for_catchup($appname);
+
+ check_parallel_log($node_subscriber, $offset, $is_parallel, 'COMMIT');
+
+ $result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+ is($result, qq(6667|6667|6667),
+ 'check extra columns contain local defaults');
+
+ # Change the local values of the extra columns on the subscriber,
+ # update publisher, and check that subscriber retains the expected
+ # values. This is to ensure that non-streaming transactions behave
+ # properly after a streaming transaction.
+ $node_subscriber->safe_psql('postgres',
+ "UPDATE test_tab SET c = 'epoch'::timestamptz + 987654321 * interval '1s'"
+ );
+
+ # Check the subscriber log from now on.
+ $offset = -s $node_subscriber->logfile;
+
+ $node_publisher->safe_psql('postgres',
+ "UPDATE test_tab SET b = md5(a::text)");
+
+ $node_publisher->wait_for_catchup($appname);
+
+ check_parallel_log($node_subscriber, $offset, $is_parallel, 'COMMIT');
+
+ $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(extract(epoch from c) = 987654321), count(d = 999) FROM test_tab"
+ );
+ is($result, qq(6667|6667|6667),
+ 'check extra columns contain locally changed data');
+
+ # Cleanup the test data
+ $node_publisher->safe_psql('postgres',
+ "DELETE FROM test_tab WHERE (a > 2)");
+ $node_publisher->wait_for_catchup($appname);
+}
+
+# Create publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->append_conf('postgresql.conf',
+ 'logical_decoding_work_mem = 64kB');
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+# Create some preexisting content on publisher
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b varchar)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
+
+$node_publisher->safe_psql('postgres', "CREATE TABLE test_tab_2 (a int)");
+
+# Setup structure on subscriber
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
+
+$node_subscriber->safe_psql('postgres', "CREATE TABLE test_tab_2 (a int)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE UNIQUE INDEX idx_tab on test_tab_2(a)");
+
+# Setup logical replication
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE test_tab, test_tab_2");
+
+my $appname = 'tap_sub';
+
+################################
+# Test using streaming mode 'on'
+################################
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
+);
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+my $result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(2|2|2), 'check initial data was copied to subscriber');
+
+test_streaming($node_publisher, $node_subscriber, $appname, 0);
+
+######################################
+# Test using streaming mode 'parallel'
+######################################
+my $oldpid = $node_publisher->safe_psql('postgres',
+ "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname' AND state = 'streaming';"
+);
+
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub SET(streaming = parallel, binary = off)");
+
+$node_publisher->poll_query_until('postgres',
+ "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname' AND state = 'streaming';"
+ )
+ or die
+ "Timed out while waiting for apply to restart after changing SUBSCRIPTION";
+
+# We need to check DEBUG logs to ensure that the parallel apply worker has
+# applied the transaction. So, bump up the log verbosity.
+$node_subscriber->append_conf('postgresql.conf', "log_min_messages = debug1");
+$node_subscriber->reload;
+
+# Run a query to make sure that the reload has taken effect.
+$node_subscriber->safe_psql('postgres', q{SELECT 1});
+
+test_streaming($node_publisher, $node_subscriber, $appname, 1);
+
+# Test that the deadlock is detected among the leader and parallel apply
+# workers.
+
+$node_subscriber->append_conf('postgresql.conf', "deadlock_timeout = 10ms");
+$node_subscriber->reload;
+
+# Run a query to make sure that the reload has taken effect.
+$node_subscriber->safe_psql('postgres', q{SELECT 1});
+
+# Interleave a pair of transactions, each exceeding the 64kB limit.
+my $h = $node_publisher->background_psql('postgres', on_error_stop => 0);
+
+# Confirm if a deadlock between the leader apply worker and the parallel apply
+# worker can be detected.
+
+my $offset = -s $node_subscriber->logfile;
+
+$h->query_safe(
+ q{
+BEGIN;
+INSERT INTO test_tab_2 SELECT i FROM generate_series(1, 5000) s(i);
+});
+
+# Ensure that the parallel apply worker executes the insert command before the
+# leader worker.
+$node_subscriber->wait_for_log(
+ qr/DEBUG: ( [A-Z0-9]+:)? applied [0-9]+ changes in the streaming chunk/,
+ $offset);
+
+$node_publisher->safe_psql('postgres', "INSERT INTO test_tab_2 values(1)");
+
+$h->query_safe('COMMIT');
+$h->quit;
+
+$node_subscriber->wait_for_log(qr/ERROR: ( [A-Z0-9]+:)? deadlock detected/,
+ $offset);
+
+# In order for the two transactions to be completed normally without causing
+# conflicts due to the unique index, we temporarily drop it.
+$node_subscriber->safe_psql('postgres', "DROP INDEX idx_tab");
+
+# Wait for this streaming transaction to be applied in the apply worker.
+$node_publisher->wait_for_catchup($appname);
+
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM test_tab_2");
+is($result, qq(5001), 'data replicated to subscriber after dropping index');
+
+# Clean up test data from the environment.
+$node_publisher->safe_psql('postgres', "TRUNCATE TABLE test_tab_2");
+$node_publisher->wait_for_catchup($appname);
+$node_subscriber->safe_psql('postgres',
+ "CREATE UNIQUE INDEX idx_tab on test_tab_2(a)");
+
+# Confirm if a deadlock between two parallel apply workers can be detected.
+
+# Check the subscriber log from now on.
+$offset = -s $node_subscriber->logfile;
+
+$h->query_safe(
+ q{
+BEGIN;
+INSERT INTO test_tab_2 SELECT i FROM generate_series(1, 5000) s(i);
+});
+
+# Ensure that the first parallel apply worker executes the insert command
+# before the second one.
+$node_subscriber->wait_for_log(
+ qr/DEBUG: ( [A-Z0-9]+:)? applied [0-9]+ changes in the streaming chunk/,
+ $offset);
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab_2 SELECT i FROM generate_series(1, 5000) s(i)");
+
+$h->query_safe('COMMIT');
+$h->quit;
+
+$node_subscriber->wait_for_log(qr/ERROR: ( [A-Z0-9]+:)? deadlock detected/,
+ $offset);
+
+# In order for the two transactions to be completed normally without causing
+# conflicts due to the unique index, we temporarily drop it.
+$node_subscriber->safe_psql('postgres', "DROP INDEX idx_tab");
+
+# Wait for this streaming transaction to be applied in the apply worker.
+$node_publisher->wait_for_catchup($appname);
+
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM test_tab_2");
+is($result, qq(10000), 'data replicated to subscriber after dropping index');
+
+# Test serializing changes to files and notify the parallel apply worker to
+# apply them at the end of the transaction.
+$node_subscriber->append_conf('postgresql.conf',
+ 'debug_logical_replication_streaming = immediate');
+# Reset the log_min_messages to default.
+$node_subscriber->append_conf('postgresql.conf',
+ "log_min_messages = warning");
+$node_subscriber->reload;
+
+# Run a query to make sure that the reload has taken effect.
+$node_subscriber->safe_psql('postgres', q{SELECT 1});
+
+$offset = -s $node_subscriber->logfile;
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab_2 SELECT i FROM generate_series(1, 5000) s(i)");
+
+# Ensure that the changes are serialized.
+$node_subscriber->wait_for_log(
+ qr/LOG: ( [A-Z0-9]+:)? logical replication apply worker will serialize the remaining changes of remote transaction \d+ to a file/,
+ $offset);
+
+$node_publisher->wait_for_catchup($appname);
+
+# Check that transaction is committed on subscriber
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM test_tab_2");
+is($result, qq(15000),
+ 'parallel apply worker replayed all changes from file');
+
+$node_subscriber->stop;
+$node_publisher->stop;
+
+done_testing();
diff --git a/src/test/subscription/t/016_stream_subxact.pl b/src/test/subscription/t/016_stream_subxact.pl
new file mode 100644
index 0000000..a962cd8
--- /dev/null
+++ b/src/test/subscription/t/016_stream_subxact.pl
@@ -0,0 +1,154 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test streaming of transaction containing subtransactions
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Check that the parallel apply worker has finished applying the streaming
+# transaction.
+sub check_parallel_log
+{
+ my ($node_subscriber, $offset, $is_parallel, $type) = @_;
+
+ if ($is_parallel)
+ {
+ $node_subscriber->wait_for_log(
+ qr/DEBUG: ( [A-Z0-9]+:)? finished processing the STREAM $type command/,
+ $offset);
+ }
+}
+
+# Common test steps for both the streaming=on and streaming=parallel cases.
+sub test_streaming
+{
+ my ($node_publisher, $node_subscriber, $appname, $is_parallel) = @_;
+
+ my $offset = 0;
+
+ # Check the subscriber log from now on.
+ $offset = -s $node_subscriber->logfile;
+
+ # Insert, update and delete some rows.
+ $node_publisher->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5) s(i);
+ UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
+ DELETE FROM test_tab WHERE mod(a,3) = 0;
+ SAVEPOINT s1;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(6, 8) s(i);
+ UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
+ DELETE FROM test_tab WHERE mod(a,3) = 0;
+ SAVEPOINT s2;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(9, 11) s(i);
+ UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
+ DELETE FROM test_tab WHERE mod(a,3) = 0;
+ SAVEPOINT s3;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(12, 14) s(i);
+ UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
+ DELETE FROM test_tab WHERE mod(a,3) = 0;
+ SAVEPOINT s4;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(15, 17) s(i);
+ UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
+ DELETE FROM test_tab WHERE mod(a,3) = 0;
+ COMMIT;
+ });
+
+ $node_publisher->wait_for_catchup($appname);
+
+ check_parallel_log($node_subscriber, $offset, $is_parallel, 'COMMIT');
+
+ my $result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+ is($result, qq(12|12|12),
+ 'check data was copied to subscriber in streaming mode and extra columns contain local defaults'
+ );
+
+ # Cleanup the test data
+ $node_publisher->safe_psql('postgres',
+ "DELETE FROM test_tab WHERE (a > 2)");
+ $node_publisher->wait_for_catchup($appname);
+}
+
+# Create publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->append_conf('postgresql.conf',
+ 'debug_logical_replication_streaming = immediate');
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+# Create some preexisting content on publisher
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b varchar)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
+
+# Setup structure on subscriber
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
+
+# Setup logical replication
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
+
+my $appname = 'tap_sub';
+
+################################
+# Test using streaming mode 'on'
+################################
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
+);
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+my $result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(2|2|2), 'check initial data was copied to subscriber');
+
+test_streaming($node_publisher, $node_subscriber, $appname, 0);
+
+######################################
+# Test using streaming mode 'parallel'
+######################################
+my $oldpid = $node_publisher->safe_psql('postgres',
+ "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname' AND state = 'streaming';"
+);
+
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub SET(streaming = parallel)");
+
+$node_publisher->poll_query_until('postgres',
+ "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname' AND state = 'streaming';"
+ )
+ or die
+ "Timed out while waiting for apply to restart after changing SUBSCRIPTION";
+
+# We need to check DEBUG logs to ensure that the parallel apply worker has
+# applied the transaction. So, bump up the log verbosity.
+$node_subscriber->append_conf('postgresql.conf', "log_min_messages = debug1");
+$node_subscriber->reload;
+
+# Run a query to make sure that the reload has taken effect.
+$node_subscriber->safe_psql('postgres', q{SELECT 1});
+
+test_streaming($node_publisher, $node_subscriber, $appname, 1);
+
+$node_subscriber->stop;
+$node_publisher->stop;
+
+done_testing();
diff --git a/src/test/subscription/t/017_stream_ddl.pl b/src/test/subscription/t/017_stream_ddl.pl
new file mode 100644
index 0000000..626676a
--- /dev/null
+++ b/src/test/subscription/t/017_stream_ddl.pl
@@ -0,0 +1,129 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test streaming of large transaction with DDL and subtransactions
+#
+# This file is mainly to test the DDL/DML interaction of the publisher side,
+# so we didn't add a parallel apply version for the tests in this file.
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Create publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->append_conf('postgresql.conf',
+ 'logical_decoding_work_mem = 64kB');
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+# Create some preexisting content on publisher
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b varchar)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
+
+# Setup structure on subscriber
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b text, c INT, d INT, e INT, f INT)"
+);
+
+# Setup logical replication
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
+
+my $appname = 'tap_sub';
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
+);
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+my $result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(2|0|0), 'check initial data was copied to subscriber');
+
+# a small (non-streamed) transaction with DDL and DML
+$node_publisher->safe_psql(
+ 'postgres', q{
+BEGIN;
+INSERT INTO test_tab VALUES (3, md5(3::text));
+ALTER TABLE test_tab ADD COLUMN c INT;
+SAVEPOINT s1;
+INSERT INTO test_tab VALUES (4, md5(4::text), -4);
+COMMIT;
+});
+
+# large (streamed) transaction with DDL and DML
+$node_publisher->safe_psql(
+ 'postgres', q{
+BEGIN;
+INSERT INTO test_tab SELECT i, md5(i::text), -i FROM generate_series(5, 1000) s(i);
+ALTER TABLE test_tab ADD COLUMN d INT;
+SAVEPOINT s1;
+INSERT INTO test_tab SELECT i, md5(i::text), -i, 2*i FROM generate_series(1001, 2000) s(i);
+COMMIT;
+});
+
+# a small (non-streamed) transaction with DDL and DML
+$node_publisher->safe_psql(
+ 'postgres', q{
+BEGIN;
+INSERT INTO test_tab VALUES (2001, md5(2001::text), -2001, 2*2001);
+ALTER TABLE test_tab ADD COLUMN e INT;
+SAVEPOINT s1;
+INSERT INTO test_tab VALUES (2002, md5(2002::text), -2002, 2*2002, -3*2002);
+COMMIT;
+});
+
+$node_publisher->wait_for_catchup($appname);
+
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d), count(e) FROM test_tab");
+is($result, qq(2002|1999|1002|1),
+ 'check data was copied to subscriber in streaming mode and extra columns contain local defaults'
+);
+
+# A large (streamed) transaction with DDL and DML. One of the DDL is performed
+# after DML to ensure that we invalidate the schema sent for test_tab so that
+# the next transaction has to send the schema again.
+$node_publisher->safe_psql(
+ 'postgres', q{
+BEGIN;
+INSERT INTO test_tab SELECT i, md5(i::text), -i, 2*i, -3*i FROM generate_series(2003,5000) s(i);
+ALTER TABLE test_tab ADD COLUMN f INT;
+COMMIT;
+});
+
+# A small transaction that won't get streamed. This is just to ensure that we
+# send the schema again to reflect the last column added in the previous test.
+$node_publisher->safe_psql(
+ 'postgres', q{
+BEGIN;
+INSERT INTO test_tab SELECT i, md5(i::text), -i, 2*i, -3*i, 4*i FROM generate_series(5001,5005) s(i);
+COMMIT;
+});
+
+$node_publisher->wait_for_catchup($appname);
+
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d), count(e), count(f) FROM test_tab");
+is($result, qq(5005|5002|4005|3004|5),
+ 'check data was copied to subscriber for both streaming and non-streaming transactions'
+);
+
+$node_subscriber->stop;
+$node_publisher->stop;
+
+done_testing();
diff --git a/src/test/subscription/t/018_stream_subxact_abort.pl b/src/test/subscription/t/018_stream_subxact_abort.pl
new file mode 100644
index 0000000..91730d9
--- /dev/null
+++ b/src/test/subscription/t/018_stream_subxact_abort.pl
@@ -0,0 +1,264 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test streaming of transaction containing multiple subtransactions and rollbacks
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Check that the parallel apply worker has finished applying the streaming
+# transaction.
+sub check_parallel_log
+{
+ my ($node_subscriber, $offset, $is_parallel, $type) = @_;
+
+ if ($is_parallel)
+ {
+ $node_subscriber->wait_for_log(
+ qr/DEBUG: ( [A-Z0-9]+:)? finished processing the STREAM $type command/,
+ $offset);
+ }
+}
+
+# Common test steps for both the streaming=on and streaming=parallel cases.
+sub test_streaming
+{
+ my ($node_publisher, $node_subscriber, $appname, $is_parallel) = @_;
+
+ my $offset = 0;
+
+ # Check the subscriber log from now on.
+ $offset = -s $node_subscriber->logfile;
+
+ # streamed transaction with DDL, DML and ROLLBACKs
+ $node_publisher->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab VALUES (3, md5(3::text));
+ SAVEPOINT s1;
+ INSERT INTO test_tab VALUES (4, md5(4::text));
+ SAVEPOINT s2;
+ INSERT INTO test_tab VALUES (5, md5(5::text));
+ SAVEPOINT s3;
+ INSERT INTO test_tab VALUES (6, md5(6::text));
+ ROLLBACK TO s2;
+ INSERT INTO test_tab VALUES (7, md5(7::text));
+ ROLLBACK TO s1;
+ INSERT INTO test_tab VALUES (8, md5(8::text));
+ SAVEPOINT s4;
+ INSERT INTO test_tab VALUES (9, md5(9::text));
+ SAVEPOINT s5;
+ INSERT INTO test_tab VALUES (10, md5(10::text));
+ COMMIT;
+ });
+
+ $node_publisher->wait_for_catchup($appname);
+
+ check_parallel_log($node_subscriber, $offset, $is_parallel, 'COMMIT');
+
+ my $result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c) FROM test_tab");
+ is($result, qq(6|0),
+ 'check rollback to savepoint was reflected on subscriber and extra columns contain local defaults'
+ );
+
+ # Check the subscriber log from now on.
+ $offset = -s $node_subscriber->logfile;
+
+ # streamed transaction with subscriber receiving out of order
+ # subtransaction ROLLBACKs
+ $node_publisher->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab VALUES (11, md5(11::text));
+ SAVEPOINT s1;
+ INSERT INTO test_tab VALUES (12, md5(12::text));
+ SAVEPOINT s2;
+ INSERT INTO test_tab VALUES (13, md5(13::text));
+ SAVEPOINT s3;
+ INSERT INTO test_tab VALUES (14, md5(14::text));
+ RELEASE s2;
+ INSERT INTO test_tab VALUES (15, md5(15::text));
+ ROLLBACK TO s1;
+ COMMIT;
+ });
+
+ $node_publisher->wait_for_catchup($appname);
+
+ check_parallel_log($node_subscriber, $offset, $is_parallel, 'COMMIT');
+
+ $result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c) FROM test_tab");
+ is($result, qq(7|0),
+ 'check rollback to savepoint was reflected on subscriber');
+
+ # Check the subscriber log from now on.
+ $offset = -s $node_subscriber->logfile;
+
+ # streamed transaction with subscriber receiving rollback
+ $node_publisher->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab VALUES (16, md5(16::text));
+ SAVEPOINT s1;
+ INSERT INTO test_tab VALUES (17, md5(17::text));
+ SAVEPOINT s2;
+ INSERT INTO test_tab VALUES (18, md5(18::text));
+ ROLLBACK;
+ });
+
+ $node_publisher->wait_for_catchup($appname);
+
+ check_parallel_log($node_subscriber, $offset, $is_parallel, 'ABORT');
+
+ $result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c) FROM test_tab");
+ is($result, qq(7|0), 'check rollback was reflected on subscriber');
+
+ # Cleanup the test data
+ $node_publisher->safe_psql('postgres',
+ "DELETE FROM test_tab WHERE (a > 2)");
+ $node_publisher->wait_for_catchup($appname);
+}
+
+# Create publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->append_conf('postgresql.conf',
+ 'debug_logical_replication_streaming = immediate');
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+# Create some preexisting content on publisher
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b varchar)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
+$node_publisher->safe_psql('postgres', "CREATE TABLE test_tab_2 (a int)");
+
+# Setup structure on subscriber
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b text, c INT, d INT, e INT)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE test_tab_2 (a int)");
+
+# Setup logical replication
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE test_tab, test_tab_2");
+
+my $appname = 'tap_sub';
+
+################################
+# Test using streaming mode 'on'
+################################
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
+);
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+my $result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c) FROM test_tab");
+is($result, qq(2|0), 'check initial data was copied to subscriber');
+
+test_streaming($node_publisher, $node_subscriber, $appname, 0);
+
+######################################
+# Test using streaming mode 'parallel'
+######################################
+my $oldpid = $node_publisher->safe_psql('postgres',
+ "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname' AND state = 'streaming';"
+);
+
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub SET(streaming = parallel)");
+
+$node_publisher->poll_query_until('postgres',
+ "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname' AND state = 'streaming';"
+ )
+ or die
+ "Timed out while waiting for apply to restart after changing SUBSCRIPTION";
+
+# We need to check DEBUG logs to ensure that the parallel apply worker has
+# applied the transaction. So, bump up the log verbosity.
+$node_subscriber->append_conf('postgresql.conf', "log_min_messages = debug1");
+$node_subscriber->reload;
+
+# Run a query to make sure that the reload has taken effect.
+$node_subscriber->safe_psql('postgres', q{SELECT 1});
+
+test_streaming($node_publisher, $node_subscriber, $appname, 1);
+
+# Test serializing changes to files and notify the parallel apply worker to
+# apply them at the end of the transaction.
+$node_subscriber->append_conf('postgresql.conf',
+ 'debug_logical_replication_streaming = immediate');
+# Reset the log_min_messages to default.
+$node_subscriber->append_conf('postgresql.conf',
+ "log_min_messages = warning");
+$node_subscriber->reload;
+
+# Run a query to make sure that the reload has taken effect.
+$node_subscriber->safe_psql('postgres', q{SELECT 1});
+
+my $offset = -s $node_subscriber->logfile;
+
+$node_publisher->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab_2 values(1);
+ ROLLBACK;
+ });
+
+# Ensure that the changes are serialized.
+$node_subscriber->wait_for_log(
+ qr/LOG: ( [A-Z0-9]+:)? logical replication apply worker will serialize the remaining changes of remote transaction \d+ to a file/,
+ $offset);
+
+$node_publisher->wait_for_catchup($appname);
+
+# Check that transaction is aborted on subscriber
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM test_tab_2");
+is($result, qq(0), 'check rollback was reflected on subscriber');
+
+# Serialize the ABORT sub-transaction.
+$offset = -s $node_subscriber->logfile;
+
+$node_publisher->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab_2 values(1);
+ SAVEPOINT sp;
+ INSERT INTO test_tab_2 values(1);
+ ROLLBACK TO sp;
+ COMMIT;
+ });
+
+# Ensure that the changes are serialized.
+$node_subscriber->wait_for_log(
+ qr/LOG: ( [A-Z0-9]+:)? logical replication apply worker will serialize the remaining changes of remote transaction \d+ to a file/,
+ $offset);
+
+$node_publisher->wait_for_catchup($appname);
+
+# Check that only sub-transaction is aborted on subscriber.
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM test_tab_2");
+is($result, qq(1), 'check rollback to savepoint was reflected on subscriber');
+
+$node_subscriber->stop;
+$node_publisher->stop;
+
+done_testing();
diff --git a/src/test/subscription/t/019_stream_subxact_ddl_abort.pl b/src/test/subscription/t/019_stream_subxact_ddl_abort.pl
new file mode 100644
index 0000000..420b9a7
--- /dev/null
+++ b/src/test/subscription/t/019_stream_subxact_ddl_abort.pl
@@ -0,0 +1,87 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test streaming of transaction with subtransactions, DDLs, DMLs, and
+# rollbacks
+#
+# This file is mainly to test the DDL/DML interaction of the publisher side,
+# so we didn't add a parallel apply version for the tests in this file.
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Create publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->append_conf('postgresql.conf',
+ 'debug_logical_replication_streaming = immediate');
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+# Create some preexisting content on publisher
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b varchar)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
+
+# Setup structure on subscriber
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b text, c INT, d INT, e INT)");
+
+# Setup logical replication
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
+
+my $appname = 'tap_sub';
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
+);
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+my $result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c) FROM test_tab");
+is($result, qq(2|0), 'check initial data was copied to subscriber');
+
+# streamed transaction with DDL, DML and ROLLBACKs
+$node_publisher->safe_psql(
+ 'postgres', q{
+BEGIN;
+INSERT INTO test_tab VALUES (3, md5(3::text));
+ALTER TABLE test_tab ADD COLUMN c INT;
+SAVEPOINT s1;
+INSERT INTO test_tab VALUES (4, md5(4::text), -4);
+ALTER TABLE test_tab ADD COLUMN d INT;
+SAVEPOINT s2;
+INSERT INTO test_tab VALUES (5, md5(5::text), -5, 5*2);
+ALTER TABLE test_tab ADD COLUMN e INT;
+SAVEPOINT s3;
+INSERT INTO test_tab VALUES (6, md5(6::text), -6, 6*2, -6*3);
+ALTER TABLE test_tab DROP COLUMN c;
+ROLLBACK TO s1;
+INSERT INTO test_tab VALUES (4, md5(4::text), 4);
+COMMIT;
+});
+
+$node_publisher->wait_for_catchup($appname);
+
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c) FROM test_tab");
+is($result, qq(4|1),
+ 'check rollback to savepoint was reflected on subscriber and extra columns contain local defaults'
+);
+
+$node_subscriber->stop;
+$node_publisher->stop;
+
+done_testing();
diff --git a/src/test/subscription/t/020_messages.pl b/src/test/subscription/t/020_messages.pl
new file mode 100644
index 0000000..826d39c
--- /dev/null
+++ b/src/test/subscription/t/020_messages.pl
@@ -0,0 +1,149 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Tests that logical decoding messages
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Create publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->append_conf('postgresql.conf', 'autovacuum = off');
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+# Create some preexisting content on publisher
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_test (a int primary key)");
+
+# Setup structure on subscriber
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_test (a int primary key)");
+
+# Setup logical replication
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE tab_test");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub"
+);
+
+$node_publisher->wait_for_catchup('tap_sub');
+
+# Ensure a transactional logical decoding message shows up on the slot
+$node_subscriber->safe_psql('postgres', "ALTER SUBSCRIPTION tap_sub DISABLE");
+
+# wait for the replication slot to become inactive on the publisher
+$node_publisher->poll_query_until(
+ 'postgres',
+ "SELECT COUNT(*) FROM pg_catalog.pg_replication_slots WHERE slot_name = 'tap_sub' AND active='f'",
+ 1);
+
+$node_publisher->safe_psql('postgres',
+ "SELECT pg_logical_emit_message(true, 'pgoutput', 'a transactional message')"
+);
+
+my $result = $node_publisher->safe_psql(
+ 'postgres', qq(
+ SELECT get_byte(data, 0)
+ FROM pg_logical_slot_peek_binary_changes('tap_sub', NULL, NULL,
+ 'proto_version', '1',
+ 'publication_names', 'tap_pub',
+ 'messages', 'true')
+));
+
+# 66 77 67 == B M C == BEGIN MESSAGE COMMIT
+is( $result, qq(66
+77
+67),
+ 'messages on slot are B M C with message option');
+
+$result = $node_publisher->safe_psql(
+ 'postgres', qq(
+ SELECT get_byte(data, 1), encode(substr(data, 11, 8), 'escape')
+ FROM pg_logical_slot_peek_binary_changes('tap_sub', NULL, NULL,
+ 'proto_version', '1',
+ 'publication_names', 'tap_pub',
+ 'messages', 'true')
+ OFFSET 1 LIMIT 1
+));
+
+is($result, qq(1|pgoutput),
+ "flag transactional is set to 1 and prefix is pgoutput");
+
+$result = $node_publisher->safe_psql(
+ 'postgres', qq(
+ SELECT get_byte(data, 0)
+ FROM pg_logical_slot_get_binary_changes('tap_sub', NULL, NULL,
+ 'proto_version', '1',
+ 'publication_names', 'tap_pub')
+));
+
+# no message and no BEGIN and COMMIT because of empty transaction optimization
+is($result, qq(),
+ 'option messages defaults to false so message (M) is not available on slot'
+);
+
+$node_publisher->safe_psql('postgres', "INSERT INTO tab_test VALUES (1)");
+
+my $message_lsn = $node_publisher->safe_psql('postgres',
+ "SELECT pg_logical_emit_message(false, 'pgoutput', 'a non-transactional message')"
+);
+
+$node_publisher->safe_psql('postgres', "INSERT INTO tab_test VALUES (2)");
+
+$result = $node_publisher->safe_psql(
+ 'postgres', qq(
+ SELECT get_byte(data, 0), get_byte(data, 1)
+ FROM pg_logical_slot_get_binary_changes('tap_sub', NULL, NULL,
+ 'proto_version', '1',
+ 'publication_names', 'tap_pub',
+ 'messages', 'true')
+ WHERE lsn = '$message_lsn' AND xid = 0
+));
+
+is($result, qq(77|0), 'non-transactional message on slot is M');
+
+# Ensure a non-transactional logical decoding message shows up on the slot when
+# it is emitted within an aborted transaction. The message won't emit until
+# something advances the LSN, hence, we intentionally forces the server to
+# switch to a new WAL file.
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ BEGIN;
+ SELECT pg_logical_emit_message(false, 'pgoutput',
+ 'a non-transactional message is available even if the transaction is aborted 1');
+ INSERT INTO tab_test VALUES (3);
+ SELECT pg_logical_emit_message(true, 'pgoutput',
+ 'a transactional message is not available if the transaction is aborted');
+ SELECT pg_logical_emit_message(false, 'pgoutput',
+ 'a non-transactional message is available even if the transaction is aborted 2');
+ ROLLBACK;
+ SELECT pg_switch_wal();
+));
+
+$result = $node_publisher->safe_psql(
+ 'postgres', qq(
+ SELECT get_byte(data, 0), get_byte(data, 1)
+ FROM pg_logical_slot_peek_binary_changes('tap_sub', NULL, NULL,
+ 'proto_version', '1',
+ 'publication_names', 'tap_pub',
+ 'messages', 'true')
+));
+
+is( $result, qq(77|0
+77|0),
+ 'non-transactional message on slot from aborted transaction is M');
+
+$node_subscriber->stop('fast');
+$node_publisher->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/021_twophase.pl b/src/test/subscription/t/021_twophase.pl
new file mode 100644
index 0000000..8ce4cfc
--- /dev/null
+++ b/src/test/subscription/t/021_twophase.pl
@@ -0,0 +1,399 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# logical replication of 2PC test
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+###############################
+# Setup
+###############################
+
+# Initialize publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->append_conf('postgresql.conf',
+ qq(max_prepared_transactions = 10));
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->append_conf('postgresql.conf',
+ qq(max_prepared_transactions = 10));
+$node_subscriber->start;
+
+# Create some pre-existing content on publisher
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_full (a int PRIMARY KEY)");
+$node_publisher->safe_psql(
+ 'postgres', "
+ BEGIN;
+ INSERT INTO tab_full SELECT generate_series(1,10);
+ PREPARE TRANSACTION 'some_initial_data';
+ COMMIT PREPARED 'some_initial_data';");
+
+# Setup structure on subscriber
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_full (a int PRIMARY KEY)");
+
+# Setup logical replication
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE tab_full");
+
+my $appname = 'tap_sub';
+$node_subscriber->safe_psql(
+ 'postgres', "
+ CREATE SUBSCRIPTION tap_sub
+ CONNECTION '$publisher_connstr application_name=$appname'
+ PUBLICATION tap_pub
+ WITH (two_phase = on)");
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+# Also wait for two-phase to be enabled
+my $twophase_query =
+ "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
+$node_subscriber->poll_query_until('postgres', $twophase_query)
+ or die "Timed out while waiting for subscriber to enable twophase";
+
+###############################
+# check that 2PC gets replicated to subscriber
+# then COMMIT PREPARED
+###############################
+
+$node_publisher->safe_psql(
+ 'postgres', "
+ BEGIN;
+ INSERT INTO tab_full VALUES (11);
+ PREPARE TRANSACTION 'test_prepared_tab_full';");
+
+$node_publisher->wait_for_catchup($appname);
+
+# check that transaction is in prepared state on subscriber
+my $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber');
+
+# check that 2PC gets committed on subscriber
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab_full';");
+
+$node_publisher->wait_for_catchup($appname);
+
+# check that transaction is committed on subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 11;");
+is($result, qq(1), 'Row inserted via 2PC has committed on subscriber');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'transaction is committed on subscriber');
+
+###############################
+# check that 2PC gets replicated to subscriber
+# then ROLLBACK PREPARED
+###############################
+
+$node_publisher->safe_psql(
+ 'postgres', "
+ BEGIN;
+ INSERT INTO tab_full VALUES (12);
+ PREPARE TRANSACTION 'test_prepared_tab_full';");
+
+$node_publisher->wait_for_catchup($appname);
+
+# check that transaction is in prepared state on subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber');
+
+# check that 2PC gets aborted on subscriber
+$node_publisher->safe_psql('postgres',
+ "ROLLBACK PREPARED 'test_prepared_tab_full';");
+
+$node_publisher->wait_for_catchup($appname);
+
+# check that transaction is aborted on subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 12;");
+is($result, qq(0), 'Row inserted via 2PC is not present on subscriber');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'transaction is aborted on subscriber');
+
+###############################
+# Check that ROLLBACK PREPARED is decoded properly on crash restart
+# (publisher and subscriber crash)
+###############################
+
+$node_publisher->safe_psql(
+ 'postgres', "
+ BEGIN;
+ INSERT INTO tab_full VALUES (12);
+ INSERT INTO tab_full VALUES (13);
+ PREPARE TRANSACTION 'test_prepared_tab';");
+
+$node_subscriber->stop('immediate');
+$node_publisher->stop('immediate');
+
+$node_publisher->start;
+$node_subscriber->start;
+
+# rollback post the restart
+$node_publisher->safe_psql('postgres',
+ "ROLLBACK PREPARED 'test_prepared_tab';");
+$node_publisher->wait_for_catchup($appname);
+
+# check inserts are rolled back
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a IN (12,13);");
+is($result, qq(0), 'Rows rolled back are not on the subscriber');
+
+###############################
+# Check that COMMIT PREPARED is decoded properly on crash restart
+# (publisher and subscriber crash)
+###############################
+
+$node_publisher->safe_psql(
+ 'postgres', "
+ BEGIN;
+ INSERT INTO tab_full VALUES (12);
+ INSERT INTO tab_full VALUES (13);
+ PREPARE TRANSACTION 'test_prepared_tab';");
+
+$node_subscriber->stop('immediate');
+$node_publisher->stop('immediate');
+
+$node_publisher->start;
+$node_subscriber->start;
+
+# commit post the restart
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->wait_for_catchup($appname);
+
+# check inserts are visible
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a IN (12,13);");
+is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
+
+###############################
+# Check that COMMIT PREPARED is decoded properly on crash restart
+# (subscriber only crash)
+###############################
+
+$node_publisher->safe_psql(
+ 'postgres', "
+ BEGIN;
+ INSERT INTO tab_full VALUES (14);
+ INSERT INTO tab_full VALUES (15);
+ PREPARE TRANSACTION 'test_prepared_tab';");
+
+$node_subscriber->stop('immediate');
+$node_subscriber->start;
+
+# commit post the restart
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->wait_for_catchup($appname);
+
+# check inserts are visible
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a IN (14,15);");
+is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
+
+###############################
+# Check that COMMIT PREPARED is decoded properly on crash restart
+# (publisher only crash)
+###############################
+
+$node_publisher->safe_psql(
+ 'postgres', "
+ BEGIN;
+ INSERT INTO tab_full VALUES (16);
+ INSERT INTO tab_full VALUES (17);
+ PREPARE TRANSACTION 'test_prepared_tab';");
+
+$node_publisher->stop('immediate');
+$node_publisher->start;
+
+# commit post the restart
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->wait_for_catchup($appname);
+
+# check inserts are visible
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a IN (16,17);");
+is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
+
+###############################
+# Test nested transaction with 2PC
+###############################
+
+# check that 2PC gets replicated to subscriber
+$node_publisher->safe_psql(
+ 'postgres', "
+ BEGIN;
+ INSERT INTO tab_full VALUES (21);
+ SAVEPOINT sp_inner;
+ INSERT INTO tab_full VALUES (22);
+ ROLLBACK TO SAVEPOINT sp_inner;
+ PREPARE TRANSACTION 'outer';
+ ");
+$node_publisher->wait_for_catchup($appname);
+
+# check that transaction is in prepared state on subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber');
+
+# COMMIT
+$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'outer';");
+
+$node_publisher->wait_for_catchup($appname);
+
+# check the transaction state on subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'transaction is ended on subscriber');
+
+# check inserts are visible. 22 should be rolled back. 21 should be committed.
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT a FROM tab_full where a IN (21,22);");
+is($result, qq(21), 'Rows committed are on the subscriber');
+
+###############################
+# Test using empty GID
+###############################
+
+# check that 2PC gets replicated to subscriber
+$node_publisher->safe_psql(
+ 'postgres', "
+ BEGIN;
+ INSERT INTO tab_full VALUES (51);
+ PREPARE TRANSACTION '';");
+$node_publisher->wait_for_catchup($appname);
+
+# check that transaction is in prepared state on subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber');
+
+# ROLLBACK
+$node_publisher->safe_psql('postgres', "ROLLBACK PREPARED '';");
+
+# check that 2PC gets aborted on subscriber
+$node_publisher->wait_for_catchup($appname);
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'transaction is aborted on subscriber');
+
+###############################
+# copy_data=false and two_phase
+###############################
+
+#create some test tables for copy tests
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_copy (a int PRIMARY KEY)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_copy SELECT generate_series(1,5);");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_copy (a int PRIMARY KEY)");
+$node_subscriber->safe_psql('postgres', "INSERT INTO tab_copy VALUES (88);");
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+is($result, qq(1), 'initial data in subscriber table');
+
+# Setup logical replication
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_copy FOR TABLE tab_copy;");
+
+my $appname_copy = 'appname_copy';
+$node_subscriber->safe_psql(
+ 'postgres', "
+ CREATE SUBSCRIPTION tap_sub_copy
+ CONNECTION '$publisher_connstr application_name=$appname_copy'
+ PUBLICATION tap_pub_copy
+ WITH (two_phase=on, copy_data=false);");
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname_copy);
+
+# Also wait for two-phase to be enabled
+$node_subscriber->poll_query_until('postgres', $twophase_query)
+ or die "Timed out while waiting for subscriber to enable twophase";
+
+# Check that the initial table data was NOT replicated (because we said copy_data=false)
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+is($result, qq(1), 'initial data in subscriber table');
+
+# Now do a prepare on publisher and check that it IS replicated
+$node_publisher->safe_psql(
+ 'postgres', "
+ BEGIN;
+ INSERT INTO tab_copy VALUES (99);
+ PREPARE TRANSACTION 'mygid';");
+
+# Wait for both subscribers to catchup
+$node_publisher->wait_for_catchup($appname_copy);
+$node_publisher->wait_for_catchup($appname);
+
+# Check that the transaction has been prepared on the subscriber, there will be 2
+# prepared transactions for the 2 subscriptions.
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(2), 'transaction is prepared on subscriber');
+
+# Now commit the insert and verify that it IS replicated
+$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'mygid';");
+
+$result =
+ $node_publisher->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+is($result, qq(6), 'publisher inserted data');
+
+$node_publisher->wait_for_catchup($appname_copy);
+
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+is($result, qq(2), 'replicated data in subscriber table');
+
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_copy;");
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_copy;");
+
+###############################
+# check all the cleanup
+###############################
+
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription");
+is($result, qq(0), 'check subscription was dropped on subscriber');
+
+$result = $node_publisher->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_slots");
+is($result, qq(0), 'check replication slot was dropped on publisher');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
+is($result, qq(0), 'check replication origin was dropped on subscriber');
+
+$node_subscriber->stop('fast');
+$node_publisher->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/022_twophase_cascade.pl b/src/test/subscription/t/022_twophase_cascade.pl
new file mode 100644
index 0000000..e624ebe
--- /dev/null
+++ b/src/test/subscription/t/022_twophase_cascade.pl
@@ -0,0 +1,463 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test cascading logical replication of 2PC.
+#
+# Includes tests for options 2PC (not-streaming) and also for 2PC (streaming).
+#
+# Two-phase and parallel apply will be tested in 023_twophase_stream, so we
+# didn't add a parallel apply version for the tests in this file.
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+###############################
+# Setup a cascade of pub/sub nodes.
+# node_A -> node_B -> node_C
+###############################
+
+# Initialize nodes
+# node_A
+my $node_A = PostgreSQL::Test::Cluster->new('node_A');
+$node_A->init(allows_streaming => 'logical');
+$node_A->append_conf(
+ 'postgresql.conf', qq(
+max_prepared_transactions = 10
+logical_decoding_work_mem = 64kB
+));
+$node_A->start;
+# node_B
+my $node_B = PostgreSQL::Test::Cluster->new('node_B');
+$node_B->init(allows_streaming => 'logical');
+$node_B->append_conf(
+ 'postgresql.conf', qq(
+max_prepared_transactions = 10
+logical_decoding_work_mem = 64kB
+));
+$node_B->start;
+# node_C
+my $node_C = PostgreSQL::Test::Cluster->new('node_C');
+$node_C->init(allows_streaming => 'logical');
+$node_C->append_conf(
+ 'postgresql.conf', qq(
+max_prepared_transactions = 10
+logical_decoding_work_mem = 64kB
+));
+$node_C->start;
+
+# Create some pre-existing content on node_A
+$node_A->safe_psql('postgres', "CREATE TABLE tab_full (a int PRIMARY KEY)");
+$node_A->safe_psql(
+ 'postgres', "
+ INSERT INTO tab_full SELECT generate_series(1,10);");
+
+# Create the same tables on node_B and node_C
+$node_B->safe_psql('postgres', "CREATE TABLE tab_full (a int PRIMARY KEY)");
+$node_C->safe_psql('postgres', "CREATE TABLE tab_full (a int PRIMARY KEY)");
+
+# Create some pre-existing content on node_A (for streaming tests)
+$node_A->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b varchar)");
+$node_A->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
+
+# Create the same tables on node_B and node_C
+# columns a and b are compatible with same table name on node_A
+$node_B->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
+$node_C->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
+
+# Setup logical replication
+
+# -----------------------
+# 2PC NON-STREAMING TESTS
+# -----------------------
+
+# node_A (pub) -> node_B (sub)
+my $node_A_connstr = $node_A->connstr . ' dbname=postgres';
+$node_A->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_A FOR TABLE tab_full, test_tab");
+my $appname_B = 'tap_sub_B';
+$node_B->safe_psql(
+ 'postgres', "
+ CREATE SUBSCRIPTION tap_sub_B
+ CONNECTION '$node_A_connstr application_name=$appname_B'
+ PUBLICATION tap_pub_A
+ WITH (two_phase = on)");
+
+# node_B (pub) -> node_C (sub)
+my $node_B_connstr = $node_B->connstr . ' dbname=postgres';
+$node_B->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_B FOR TABLE tab_full, test_tab");
+my $appname_C = 'tap_sub_C';
+$node_C->safe_psql(
+ 'postgres', "
+ CREATE SUBSCRIPTION tap_sub_C
+ CONNECTION '$node_B_connstr application_name=$appname_C'
+ PUBLICATION tap_pub_B
+ WITH (two_phase = on)");
+
+# Wait for subscribers to finish initialization
+$node_A->wait_for_catchup($appname_B);
+$node_B->wait_for_catchup($appname_C);
+
+# Also wait for two-phase to be enabled
+my $twophase_query =
+ "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
+$node_B->poll_query_until('postgres', $twophase_query)
+ or die "Timed out while waiting for subscriber to enable twophase";
+$node_C->poll_query_until('postgres', $twophase_query)
+ or die "Timed out while waiting for subscriber to enable twophase";
+
+is(1, 1, "Cascade setup is complete");
+
+my $result;
+
+###############################
+# check that 2PC gets replicated to subscriber(s)
+# then COMMIT PREPARED
+###############################
+
+# 2PC PREPARE
+$node_A->safe_psql(
+ 'postgres', "
+ BEGIN;
+ INSERT INTO tab_full VALUES (11);
+ PREPARE TRANSACTION 'test_prepared_tab_full';");
+
+$node_A->wait_for_catchup($appname_B);
+$node_B->wait_for_catchup($appname_C);
+
+# check the transaction state is prepared on subscriber(s)
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber B');
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber C');
+
+# 2PC COMMIT
+$node_A->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab_full';");
+
+$node_A->wait_for_catchup($appname_B);
+$node_B->wait_for_catchup($appname_C);
+
+# check that transaction was committed on subscriber(s)
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 11;");
+is($result, qq(1), 'Row inserted via 2PC has committed on subscriber B');
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 11;");
+is($result, qq(1), 'Row inserted via 2PC has committed on subscriber C');
+
+# check the transaction state is ended on subscriber(s)
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'transaction is committed on subscriber B');
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'transaction is committed on subscriber C');
+
+###############################
+# check that 2PC gets replicated to subscriber(s)
+# then ROLLBACK PREPARED
+###############################
+
+# 2PC PREPARE
+$node_A->safe_psql(
+ 'postgres', "
+ BEGIN;
+ INSERT INTO tab_full VALUES (12);
+ PREPARE TRANSACTION 'test_prepared_tab_full';");
+
+$node_A->wait_for_catchup($appname_B);
+$node_B->wait_for_catchup($appname_C);
+
+# check the transaction state is prepared on subscriber(s)
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber B');
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber C');
+
+# 2PC ROLLBACK
+$node_A->safe_psql('postgres', "ROLLBACK PREPARED 'test_prepared_tab_full';");
+
+$node_A->wait_for_catchup($appname_B);
+$node_B->wait_for_catchup($appname_C);
+
+# check that transaction is aborted on subscriber(s)
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 12;");
+is($result, qq(0), 'Row inserted via 2PC is not present on subscriber B');
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 12;");
+is($result, qq(0), 'Row inserted via 2PC is not present on subscriber C');
+
+# check the transaction state is ended on subscriber(s)
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'transaction is ended on subscriber B');
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'transaction is ended on subscriber C');
+
+###############################
+# Test nested transactions with 2PC
+###############################
+
+# 2PC PREPARE with a nested ROLLBACK TO SAVEPOINT
+$node_A->safe_psql(
+ 'postgres', "
+ BEGIN;
+ INSERT INTO tab_full VALUES (21);
+ SAVEPOINT sp_inner;
+ INSERT INTO tab_full VALUES (22);
+ ROLLBACK TO SAVEPOINT sp_inner;
+ PREPARE TRANSACTION 'outer';
+ ");
+
+$node_A->wait_for_catchup($appname_B);
+$node_B->wait_for_catchup($appname_C);
+
+# check the transaction state prepared on subscriber(s)
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber B');
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber C');
+
+# 2PC COMMIT
+$node_A->safe_psql('postgres', "COMMIT PREPARED 'outer';");
+
+$node_A->wait_for_catchup($appname_B);
+$node_B->wait_for_catchup($appname_C);
+
+# check the transaction state is ended on subscriber
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'transaction is ended on subscriber B');
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'transaction is ended on subscriber C');
+
+# check inserts are visible at subscriber(s).
+# 22 should be rolled back.
+# 21 should be committed.
+$result = $node_B->safe_psql('postgres',
+ "SELECT a FROM tab_full where a IN (21,22);");
+is($result, qq(21), 'Rows committed are present on subscriber B');
+$result = $node_C->safe_psql('postgres',
+ "SELECT a FROM tab_full where a IN (21,22);");
+is($result, qq(21), 'Rows committed are present on subscriber C');
+
+# ---------------------
+# 2PC + STREAMING TESTS
+# ---------------------
+
+my $oldpid_B = $node_A->safe_psql(
+ 'postgres', "
+ SELECT pid FROM pg_stat_replication
+ WHERE application_name = '$appname_B' AND state = 'streaming';");
+my $oldpid_C = $node_B->safe_psql(
+ 'postgres', "
+ SELECT pid FROM pg_stat_replication
+ WHERE application_name = '$appname_C' AND state = 'streaming';");
+
+# Setup logical replication (streaming = on)
+
+$node_B->safe_psql(
+ 'postgres', "
+ ALTER SUBSCRIPTION tap_sub_B
+ SET (streaming = on);");
+$node_C->safe_psql(
+ 'postgres', "
+ ALTER SUBSCRIPTION tap_sub_C
+ SET (streaming = on)");
+
+# Wait for subscribers to finish initialization
+
+$node_A->poll_query_until(
+ 'postgres', "
+ SELECT pid != $oldpid_B FROM pg_stat_replication
+ WHERE application_name = '$appname_B' AND state = 'streaming';"
+) or die "Timed out while waiting for apply to restart";
+$node_B->poll_query_until(
+ 'postgres', "
+ SELECT pid != $oldpid_C FROM pg_stat_replication
+ WHERE application_name = '$appname_C' AND state = 'streaming';"
+) or die "Timed out while waiting for apply to restart";
+
+###############################
+# Test 2PC PREPARE / COMMIT PREPARED.
+# 1. Data is streamed as a 2PC transaction.
+# 2. Then do commit prepared.
+#
+# Expect all data is replicated on subscriber(s) after the commit.
+###############################
+
+# Insert, update and delete enough rows to exceed the 64kB limit.
+# Then 2PC PREPARE
+$node_A->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
+ UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
+ DELETE FROM test_tab WHERE mod(a,3) = 0;
+ PREPARE TRANSACTION 'test_prepared_tab';});
+
+$node_A->wait_for_catchup($appname_B);
+$node_B->wait_for_catchup($appname_C);
+
+# check the transaction state is prepared on subscriber(s)
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber B');
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber C');
+
+# 2PC COMMIT
+$node_A->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+
+$node_A->wait_for_catchup($appname_B);
+$node_B->wait_for_catchup($appname_C);
+
+# check that transaction was committed on subscriber(s)
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(3334|3334|3334),
+ 'Rows inserted by 2PC have committed on subscriber B, and extra columns have local defaults'
+);
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(3334|3334|3334),
+ 'Rows inserted by 2PC have committed on subscriber C, and extra columns have local defaults'
+);
+
+# check the transaction state is ended on subscriber(s)
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'transaction is committed on subscriber B');
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'transaction is committed on subscriber C');
+
+###############################
+# Test 2PC PREPARE with a nested ROLLBACK TO SAVEPOINT.
+# 0. Cleanup from previous test leaving only 2 rows.
+# 1. Insert one more row.
+# 2. Record a SAVEPOINT.
+# 3. Data is streamed using 2PC.
+# 4. Do rollback to SAVEPOINT prior to the streamed inserts.
+# 5. Then COMMIT PREPARED.
+#
+# Expect data after the SAVEPOINT is aborted leaving only 3 rows (= 2 original + 1 from step 1).
+###############################
+
+# First, delete the data except for 2 rows (delete will be replicated)
+$node_A->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
+
+# 2PC PREPARE with a nested ROLLBACK TO SAVEPOINT
+$node_A->safe_psql(
+ 'postgres', "
+ BEGIN;
+ INSERT INTO test_tab VALUES (9999, 'foobar');
+ SAVEPOINT sp_inner;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
+ UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
+ DELETE FROM test_tab WHERE mod(a,3) = 0;
+ ROLLBACK TO SAVEPOINT sp_inner;
+ PREPARE TRANSACTION 'outer';
+ ");
+
+$node_A->wait_for_catchup($appname_B);
+$node_B->wait_for_catchup($appname_C);
+
+# check the transaction state prepared on subscriber(s)
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber B');
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber C');
+
+# 2PC COMMIT
+$node_A->safe_psql('postgres', "COMMIT PREPARED 'outer';");
+
+$node_A->wait_for_catchup($appname_B);
+$node_B->wait_for_catchup($appname_C);
+
+# check the transaction state is ended on subscriber
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'transaction is ended on subscriber B');
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'transaction is ended on subscriber C');
+
+# check inserts are visible at subscriber(s).
+# All the streamed data (prior to the SAVEPOINT) should be rolled back.
+# (9999, 'foobar') should be committed.
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*) FROM test_tab where b = 'foobar';");
+is($result, qq(1), 'Rows committed are present on subscriber B');
+$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM test_tab;");
+is($result, qq(3), 'Rows committed are present on subscriber B');
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*) FROM test_tab where b = 'foobar';");
+is($result, qq(1), 'Rows committed are present on subscriber C');
+$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM test_tab;");
+is($result, qq(3), 'Rows committed are present on subscriber C');
+
+###############################
+# check all the cleanup
+###############################
+
+# cleanup the node_B => node_C pub/sub
+$node_C->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_C");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+is($result, qq(0), 'check subscription was dropped on subscriber node C');
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber node C');
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
+is($result, qq(0),
+ 'check replication origin was dropped on subscriber node C');
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+is($result, qq(0), 'check replication slot was dropped on publisher node B');
+
+# cleanup the node_A => node_B pub/sub
+$node_B->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_B");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+is($result, qq(0), 'check subscription was dropped on subscriber node B');
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber node B');
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
+is($result, qq(0),
+ 'check replication origin was dropped on subscriber node B');
+$result =
+ $node_A->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+is($result, qq(0), 'check replication slot was dropped on publisher node A');
+
+# shutdown
+$node_C->stop('fast');
+$node_B->stop('fast');
+$node_A->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/023_twophase_stream.pl b/src/test/subscription/t/023_twophase_stream.pl
new file mode 100644
index 0000000..0303807
--- /dev/null
+++ b/src/test/subscription/t/023_twophase_stream.pl
@@ -0,0 +1,458 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test logical replication of 2PC with streaming.
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Check that the parallel apply worker has finished applying the streaming
+# transaction.
+sub check_parallel_log
+{
+ my ($node_subscriber, $offset, $is_parallel, $type) = @_;
+
+ if ($is_parallel)
+ {
+ $node_subscriber->wait_for_log(
+ qr/DEBUG: ( [A-Z0-9]+:)? finished processing the STREAM $type command/,
+ $offset);
+ }
+}
+
+# Common test steps for both the streaming=on and streaming=parallel cases.
+sub test_streaming
+{
+ my ($node_publisher, $node_subscriber, $appname, $is_parallel) = @_;
+
+ my $offset = 0;
+
+ ###############################
+ # Test 2PC PREPARE / COMMIT PREPARED.
+ # 1. Data is streamed as a 2PC transaction.
+ # 2. Then do commit prepared.
+ #
+ # Expect all data is replicated on subscriber side after the commit.
+ ###############################
+
+ # Check the subscriber log from now on.
+ $offset = -s $node_subscriber->logfile;
+
+ # check that 2PC gets replicated to subscriber
+ # Insert, update and delete some rows.
+ $node_publisher->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5) s(i);
+ UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
+ DELETE FROM test_tab WHERE mod(a,3) = 0;
+ PREPARE TRANSACTION 'test_prepared_tab';});
+
+ $node_publisher->wait_for_catchup($appname);
+
+ check_parallel_log($node_subscriber, $offset, $is_parallel, 'PREPARE');
+
+ # check that transaction is in prepared state on subscriber
+ my $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+ is($result, qq(1), 'transaction is prepared on subscriber');
+
+ # 2PC transaction gets committed
+ $node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
+
+ $node_publisher->wait_for_catchup($appname);
+
+ # check that transaction is committed on subscriber
+ $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+ is($result, qq(4|4|4),
+ 'Rows inserted by 2PC have committed on subscriber, and extra columns contain local defaults'
+ );
+ $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+ is($result, qq(0), 'transaction is committed on subscriber');
+
+ ###############################
+ # Test 2PC PREPARE / ROLLBACK PREPARED.
+ # 1. Table is deleted back to 2 rows which are replicated on subscriber.
+ # 2. Data is streamed using 2PC.
+ # 3. Do rollback prepared.
+ #
+ # Expect data rolls back leaving only the original 2 rows.
+ ###############################
+
+ # First, delete the data except for 2 rows (will be replicated)
+ $node_publisher->safe_psql('postgres',
+ "DELETE FROM test_tab WHERE a > 2;");
+
+ # Check the subscriber log from now on.
+ $offset = -s $node_subscriber->logfile;
+
+ # Then insert, update and delete some rows.
+ $node_publisher->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5) s(i);
+ UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
+ DELETE FROM test_tab WHERE mod(a,3) = 0;
+ PREPARE TRANSACTION 'test_prepared_tab';});
+
+ $node_publisher->wait_for_catchup($appname);
+
+ check_parallel_log($node_subscriber, $offset, $is_parallel, 'PREPARE');
+
+ # check that transaction is in prepared state on subscriber
+ $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+ is($result, qq(1), 'transaction is prepared on subscriber');
+
+ # 2PC transaction gets aborted
+ $node_publisher->safe_psql('postgres',
+ "ROLLBACK PREPARED 'test_prepared_tab';");
+
+ $node_publisher->wait_for_catchup($appname);
+
+ # check that transaction is aborted on subscriber
+ $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+ is($result, qq(2|2|2),
+ 'Rows inserted by 2PC are rolled back, leaving only the original 2 rows'
+ );
+
+ $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+ is($result, qq(0), 'transaction is aborted on subscriber');
+
+ ###############################
+ # Check that 2PC COMMIT PREPARED is decoded properly on crash restart.
+ # 1. insert, update and delete some rows.
+ # 2. Then server crashes before the 2PC transaction is committed.
+ # 3. After servers are restarted the pending transaction is committed.
+ #
+ # Expect all data is replicated on subscriber side after the commit.
+ # Note: both publisher and subscriber do crash/restart.
+ ###############################
+
+ # Check the subscriber log from now on.
+ $offset = -s $node_subscriber->logfile;
+
+ $node_publisher->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5) s(i);
+ UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
+ DELETE FROM test_tab WHERE mod(a,3) = 0;
+ PREPARE TRANSACTION 'test_prepared_tab';});
+
+ $node_subscriber->stop('immediate');
+ $node_publisher->stop('immediate');
+
+ $node_publisher->start;
+ $node_subscriber->start;
+
+ # We don't try to check the log for parallel option here as the subscriber
+ # may have stopped after finishing the prepare and before logging the
+ # appropriate message.
+
+ # commit post the restart
+ $node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
+ $node_publisher->wait_for_catchup($appname);
+
+ # check inserts are visible
+ $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+ is($result, qq(4|4|4),
+ 'Rows inserted by 2PC have committed on subscriber, and extra columns contain local defaults'
+ );
+
+ ###############################
+ # Do INSERT after the PREPARE but before ROLLBACK PREPARED.
+ # 1. Table is deleted back to 2 rows which are replicated on subscriber.
+ # 2. Data is streamed using 2PC.
+ # 3. A single row INSERT is done which is after the PREPARE.
+ # 4. Then do a ROLLBACK PREPARED.
+ #
+ # Expect the 2PC data rolls back leaving only 3 rows on the subscriber
+ # (the original 2 + inserted 1).
+ ###############################
+
+ # First, delete the data except for 2 rows (will be replicated)
+ $node_publisher->safe_psql('postgres',
+ "DELETE FROM test_tab WHERE a > 2;");
+
+ # Check the subscriber log from now on.
+ $offset = -s $node_subscriber->logfile;
+
+ # Then insert, update and delete some rows.
+ $node_publisher->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5) s(i);
+ UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
+ DELETE FROM test_tab WHERE mod(a,3) = 0;
+ PREPARE TRANSACTION 'test_prepared_tab';});
+
+ $node_publisher->wait_for_catchup($appname);
+
+ check_parallel_log($node_subscriber, $offset, $is_parallel, 'PREPARE');
+
+ # check that transaction is in prepared state on subscriber
+ $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+ is($result, qq(1), 'transaction is prepared on subscriber');
+
+ # Insert a different record (now we are outside of the 2PC transaction)
+ # Note: the 2PC transaction still holds row locks so make sure this insert is for a separate primary key
+ $node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (99999, 'foobar')");
+
+ # 2PC transaction gets aborted
+ $node_publisher->safe_psql('postgres',
+ "ROLLBACK PREPARED 'test_prepared_tab';");
+
+ $node_publisher->wait_for_catchup($appname);
+
+ # check that transaction is aborted on subscriber,
+ # but the extra INSERT outside of the 2PC still was replicated
+ $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+ is($result, qq(3|3|3),
+ 'check the outside insert was copied to subscriber');
+
+ $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+ is($result, qq(0), 'transaction is aborted on subscriber');
+
+ ###############################
+ # Do INSERT after the PREPARE but before COMMIT PREPARED.
+ # 1. Table is deleted back to 2 rows which are replicated on subscriber.
+ # 2. Data is streamed using 2PC.
+ # 3. A single row INSERT is done which is after the PREPARE.
+ # 4. Then do a COMMIT PREPARED.
+ #
+ # Expect 2PC data + the extra row are on the subscriber
+ # (the 3334 + inserted 1 = 3335).
+ ###############################
+
+ # First, delete the data except for 2 rows (will be replicated)
+ $node_publisher->safe_psql('postgres',
+ "DELETE FROM test_tab WHERE a > 2;");
+
+ # Check the subscriber log from now on.
+ $offset = -s $node_subscriber->logfile;
+
+ # Then insert, update and delete some rows.
+ $node_publisher->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5) s(i);
+ UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
+ DELETE FROM test_tab WHERE mod(a,3) = 0;
+ PREPARE TRANSACTION 'test_prepared_tab';});
+
+ $node_publisher->wait_for_catchup($appname);
+
+ check_parallel_log($node_subscriber, $offset, $is_parallel, 'PREPARE');
+
+ # check that transaction is in prepared state on subscriber
+ $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+ is($result, qq(1), 'transaction is prepared on subscriber');
+
+ # Insert a different record (now we are outside of the 2PC transaction)
+ # Note: the 2PC transaction still holds row locks so make sure this insert is for a separate primary key
+ $node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (99999, 'foobar')");
+
+ # 2PC transaction gets committed
+ $node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
+
+ $node_publisher->wait_for_catchup($appname);
+
+ # check that transaction is committed on subscriber
+ $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+ is($result, qq(5|5|5),
+ 'Rows inserted by 2PC (as well as outside insert) have committed on subscriber, and extra columns contain local defaults'
+ );
+
+ $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+ is($result, qq(0), 'transaction is committed on subscriber');
+
+ # Cleanup the test data
+ $node_publisher->safe_psql('postgres',
+ "DELETE FROM test_tab WHERE a > 2;");
+ $node_publisher->wait_for_catchup($appname);
+}
+
+###############################
+# Setup
+###############################
+
+# Initialize publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->append_conf(
+ 'postgresql.conf', qq(
+max_prepared_transactions = 10
+debug_logical_replication_streaming = immediate
+));
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->append_conf(
+ 'postgresql.conf', qq(
+max_prepared_transactions = 10
+));
+$node_subscriber->start;
+
+# Create some pre-existing content on publisher
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b varchar)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
+$node_publisher->safe_psql('postgres', "CREATE TABLE test_tab_2 (a int)");
+
+# Setup structure on subscriber (columns a and b are compatible with same table name on publisher)
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
+$node_subscriber->safe_psql('postgres', "CREATE TABLE test_tab_2 (a int)");
+
+# Setup logical replication (streaming = on)
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE test_tab, test_tab_2");
+
+my $appname = 'tap_sub';
+
+################################
+# Test using streaming mode 'on'
+################################
+$node_subscriber->safe_psql(
+ 'postgres', "
+ CREATE SUBSCRIPTION tap_sub
+ CONNECTION '$publisher_connstr application_name=$appname'
+ PUBLICATION tap_pub
+ WITH (streaming = on, two_phase = on)");
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+# Also wait for two-phase to be enabled
+my $twophase_query =
+ "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
+$node_subscriber->poll_query_until('postgres', $twophase_query)
+ or die "Timed out while waiting for subscriber to enable twophase";
+
+# Check initial data was copied to subscriber
+my $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(2|2|2), 'check initial data was copied to subscriber');
+
+test_streaming($node_publisher, $node_subscriber, $appname, 0);
+
+######################################
+# Test using streaming mode 'parallel'
+######################################
+my $oldpid = $node_publisher->safe_psql('postgres',
+ "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname' AND state = 'streaming';"
+);
+
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub SET(streaming = parallel)");
+
+$node_publisher->poll_query_until('postgres',
+ "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname' AND state = 'streaming';"
+ )
+ or die
+ "Timed out while waiting for apply to restart after changing SUBSCRIPTION";
+
+# We need to check DEBUG logs to ensure that the parallel apply worker has
+# applied the transaction. So, bump up the log verbosity.
+$node_subscriber->append_conf('postgresql.conf', "log_min_messages = debug1");
+$node_subscriber->reload;
+
+# Run a query to make sure that the reload has taken effect.
+$node_subscriber->safe_psql('postgres', q{SELECT 1});
+
+test_streaming($node_publisher, $node_subscriber, $appname, 1);
+
+# Test serializing changes to files and notify the parallel apply worker to
+# apply them at the end of the transaction.
+$node_subscriber->append_conf('postgresql.conf',
+ 'debug_logical_replication_streaming = immediate');
+# Reset the log_min_messages to default.
+$node_subscriber->append_conf('postgresql.conf',
+ "log_min_messages = warning");
+$node_subscriber->reload;
+
+# Run a query to make sure that the reload has taken effect.
+$node_subscriber->safe_psql('postgres', q{SELECT 1});
+
+my $offset = -s $node_subscriber->logfile;
+
+$node_publisher->safe_psql(
+ 'postgres', q{
+ BEGIN;
+ INSERT INTO test_tab_2 values(1);
+ PREPARE TRANSACTION 'xact';
+ });
+
+# Ensure that the changes are serialized.
+$node_subscriber->wait_for_log(
+ qr/LOG: ( [A-Z0-9]+:)? logical replication apply worker will serialize the remaining changes of remote transaction \d+ to a file/,
+ $offset);
+
+$node_publisher->wait_for_catchup($appname);
+
+# Check that transaction is in prepared state on subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(1), 'transaction is prepared on subscriber');
+
+# Check that 2PC gets committed on subscriber
+$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'xact';");
+
+$node_publisher->wait_for_catchup($appname);
+
+# Check that transaction is committed on subscriber
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM test_tab_2");
+is($result, qq(1), 'transaction is committed on subscriber');
+
+###############################
+# check all the cleanup
+###############################
+
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription");
+is($result, qq(0), 'check subscription was dropped on subscriber');
+
+$result = $node_publisher->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_slots");
+is($result, qq(0), 'check replication slot was dropped on publisher');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
+is($result, qq(0), 'check replication origin was dropped on subscriber');
+
+$node_subscriber->stop('fast');
+$node_publisher->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/024_add_drop_pub.pl b/src/test/subscription/t/024_add_drop_pub.pl
new file mode 100644
index 0000000..8614b1b
--- /dev/null
+++ b/src/test/subscription/t/024_add_drop_pub.pl
@@ -0,0 +1,87 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# This test checks behaviour of ALTER SUBSCRIPTION ... ADD/DROP PUBLICATION
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Initialize publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+# Create table on publisher
+$node_publisher->safe_psql('postgres', "CREATE TABLE tab_1 (a int)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_1 SELECT generate_series(1,10)");
+
+# Create table on subscriber
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_1 (a int)");
+
+# Setup logical replication
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_1 FOR TABLE tab_1");
+$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub_2");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub_1, tap_pub_2"
+);
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub');
+
+# Check the initial data of tab_1 is copied to subscriber
+my $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_1");
+is($result, qq(10|1|10), 'check initial data is copied to subscriber');
+
+# Create a new table on publisher
+$node_publisher->safe_psql('postgres', "CREATE TABLE tab_2 (a int)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_2 SELECT generate_series(1,10)");
+
+# Create a new table on subscriber
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_2 (a int)");
+
+# Add the table to publication
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub_2 ADD TABLE tab_2");
+
+# Dropping tap_pub_1 will refresh the entire publication list
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub DROP PUBLICATION tap_pub_1");
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub');
+
+# Check the initial data of tab_drop_refresh was copied to subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_2");
+is($result, qq(10|1|10), 'check initial data is copied to subscriber');
+
+# Re-adding tap_pub_1 will refresh the entire publication list
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub ADD PUBLICATION tap_pub_1");
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub');
+
+# Check the initial data of tab_1 was copied to subscriber again
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_1");
+is($result, qq(20|1|10), 'check initial data is copied to subscriber');
+
+# shutdown
+$node_subscriber->stop('fast');
+$node_publisher->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/025_rep_changes_for_schema.pl b/src/test/subscription/t/025_rep_changes_for_schema.pl
new file mode 100644
index 0000000..8543f52
--- /dev/null
+++ b/src/test/subscription/t/025_rep_changes_for_schema.pl
@@ -0,0 +1,207 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Logical replication tests for schema publications
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Initialize publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+# Test replication with publications created using FOR TABLES IN SCHEMA
+# option.
+# Create schemas and tables on publisher
+$node_publisher->safe_psql('postgres', "CREATE SCHEMA sch1");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE sch1.tab1 AS SELECT generate_series(1,10) AS a");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE sch1.tab2 AS SELECT generate_series(1,10) AS a");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE sch1.tab1_parent (a int PRIMARY KEY, b text) PARTITION BY LIST (a)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE public.tab1_child1 PARTITION OF sch1.tab1_parent FOR VALUES IN (1, 2, 3)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE public.tab1_child2 PARTITION OF sch1.tab1_parent FOR VALUES IN (4, 5, 6)"
+);
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO sch1.tab1_parent values (1),(4)");
+
+# Create schemas and tables on subscriber
+$node_subscriber->safe_psql('postgres', "CREATE SCHEMA sch1");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE sch1.tab1 (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE sch1.tab2 (a int)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE sch1.tab1_parent (a int PRIMARY KEY, b text) PARTITION BY LIST (a)"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE public.tab1_child1 PARTITION OF sch1.tab1_parent FOR VALUES IN (1, 2, 3)"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE public.tab1_child2 PARTITION OF sch1.tab1_parent FOR VALUES IN (4, 5, 6)"
+);
+
+# Setup logical replication
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_schema FOR TABLES IN SCHEMA sch1");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub_schema CONNECTION '$publisher_connstr' PUBLICATION tap_pub_schema"
+);
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher,
+ 'tap_sub_schema');
+
+# Check the schema table data is synced up
+my $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM sch1.tab1");
+is($result, qq(10|1|10), 'check rows on subscriber catchup');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM sch1.tab2");
+is($result, qq(10|1|10), 'check rows on subscriber catchup');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT * FROM sch1.tab1_parent order by 1");
+is( $result, qq(1|
+4|), 'check rows on subscriber catchup');
+
+# Insert some data into few tables and verify that inserted data is replicated
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO sch1.tab1 VALUES(generate_series(11,20))");
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO sch1.tab1_parent values (2),(5)");
+
+$node_publisher->wait_for_catchup('tap_sub_schema');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM sch1.tab1");
+is($result, qq(20|1|20), 'check replicated inserts on subscriber');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT * FROM sch1.tab1_parent order by 1");
+is( $result, qq(1|
+2|
+4|
+5|), 'check replicated inserts on subscriber');
+
+# Create new table in the publication schema, verify that subscriber does not get
+# the new table data before refresh.
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE sch1.tab3 AS SELECT generate_series(1,10) AS a");
+
+$node_subscriber->safe_psql('postgres', "CREATE TABLE sch1.tab3(a int)");
+
+$node_publisher->wait_for_catchup('tap_sub_schema');
+
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM sch1.tab3");
+is($result, qq(0), 'check replicated inserts on subscriber');
+
+# Table data should be reflected after refreshing the publication in
+# subscriber.
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub_schema REFRESH PUBLICATION");
+
+# Wait for sync to finish
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql('postgres', "INSERT INTO sch1.tab3 VALUES(11)");
+
+$node_publisher->wait_for_catchup('tap_sub_schema');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM sch1.tab3");
+is($result, qq(11|1|11), 'check rows on subscriber catchup');
+
+# Set the schema of a publication schema table to a non publication schema and
+# verify that inserted data is not reflected by the subscriber.
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE sch1.tab3 SET SCHEMA public");
+$node_publisher->safe_psql('postgres', "INSERT INTO public.tab3 VALUES(12)");
+
+$node_publisher->wait_for_catchup('tap_sub_schema');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM sch1.tab3");
+is($result, qq(11|1|11), 'check replicated inserts on subscriber');
+
+# Verify that the subscription relation list is updated after refresh
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription_rel WHERE srsubid IN (SELECT oid FROM pg_subscription WHERE subname = 'tap_sub_schema')"
+);
+is($result, qq(5),
+ 'check subscription relation status is not yet dropped on subscriber');
+
+# Ask for data sync
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub_schema REFRESH PUBLICATION");
+
+# Wait for sync to finish
+$node_subscriber->wait_for_subscription_sync;
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription_rel WHERE srsubid IN (SELECT oid FROM pg_subscription WHERE subname = 'tap_sub_schema')"
+);
+is($result, qq(4),
+ 'check subscription relation status was dropped on subscriber');
+
+# Drop table from the publication schema, verify that subscriber removes the
+# table entry after refresh.
+$node_publisher->safe_psql('postgres', "DROP TABLE sch1.tab2");
+$node_publisher->wait_for_catchup('tap_sub_schema');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription_rel WHERE srsubid IN (SELECT oid FROM pg_subscription WHERE subname = 'tap_sub_schema')"
+);
+is($result, qq(4),
+ 'check subscription relation status is not yet dropped on subscriber');
+
+# Table should be removed from pg_subscription_rel after refreshing the
+# publication in subscriber.
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub_schema REFRESH PUBLICATION");
+
+# Wait for sync to finish
+$node_subscriber->wait_for_subscription_sync;
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription_rel WHERE srsubid IN (SELECT oid FROM pg_subscription WHERE subname = 'tap_sub_schema')"
+);
+is($result, qq(3),
+ 'check subscription relation status was dropped on subscriber');
+
+# Drop schema from publication, verify that the inserts are not published after
+# dropping the schema from publication. Here 2nd insert should not be
+# published.
+$node_publisher->safe_psql(
+ 'postgres', "
+ INSERT INTO sch1.tab1 VALUES(21);
+ ALTER PUBLICATION tap_pub_schema DROP TABLES IN SCHEMA sch1;
+ INSERT INTO sch1.tab1 values(22);"
+);
+
+$node_publisher->wait_for_catchup('tap_sub_schema');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM sch1.tab1");
+is($result, qq(21|1|21), 'check replicated inserts on subscriber');
+
+$node_subscriber->stop('fast');
+$node_publisher->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/026_stats.pl b/src/test/subscription/t/026_stats.pl
new file mode 100644
index 0000000..a033588
--- /dev/null
+++ b/src/test/subscription/t/026_stats.pl
@@ -0,0 +1,302 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Tests for subscription stats.
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Create publisher node.
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+# Create subscriber node.
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+
+sub create_sub_pub_w_errors
+{
+ my ($node_publisher, $node_subscriber, $db, $table_name) = @_;
+ # Initial table setup on both publisher and subscriber. On subscriber we
+ # create the same tables but with primary keys. Also, insert some data that
+ # will conflict with the data replicated from publisher later.
+ $node_publisher->safe_psql(
+ $db,
+ qq[
+ BEGIN;
+ CREATE TABLE $table_name(a int);
+ INSERT INTO $table_name VALUES (1);
+ COMMIT;
+ ]);
+ $node_subscriber->safe_psql(
+ $db,
+ qq[
+ BEGIN;
+ CREATE TABLE $table_name(a int primary key);
+ INSERT INTO $table_name VALUES (1);
+ COMMIT;
+ ]);
+
+ # Set up publication.
+ my $pub_name = $table_name . '_pub';
+ my $publisher_connstr = $node_publisher->connstr . qq( dbname=$db);
+
+ $node_publisher->safe_psql($db,
+ qq(CREATE PUBLICATION $pub_name FOR TABLE $table_name));
+
+ # Create subscription. The tablesync for table on subscription will enter into
+ # infinite error loop due to violating the unique constraint.
+ my $sub_name = $table_name . '_sub';
+ $node_subscriber->safe_psql($db,
+ qq(CREATE SUBSCRIPTION $sub_name CONNECTION '$publisher_connstr' PUBLICATION $pub_name)
+ );
+
+ $node_publisher->wait_for_catchup($sub_name);
+
+ # Wait for the tablesync error to be reported.
+ $node_subscriber->poll_query_until(
+ $db,
+ qq[
+ SELECT sync_error_count > 0
+ FROM pg_stat_subscription_stats
+ WHERE subname = '$sub_name'
+ ])
+ or die
+ qq(Timed out while waiting for tablesync errors for subscription '$sub_name');
+
+ # Truncate test_tab1 so that tablesync worker can continue.
+ $node_subscriber->safe_psql($db, qq(TRUNCATE $table_name));
+
+ # Wait for initial tablesync to finish.
+ $node_subscriber->poll_query_until(
+ $db,
+ qq[
+ SELECT count(1) = 1 FROM pg_subscription_rel
+ WHERE srrelid = '$table_name'::regclass AND srsubstate in ('r', 's')
+ ])
+ or die
+ qq(Timed out while waiting for subscriber to synchronize data for table '$table_name'.);
+
+ # Check test table on the subscriber has one row.
+ my $result =
+ $node_subscriber->safe_psql($db, qq(SELECT a FROM $table_name));
+ is($result, qq(1), qq(Check that table '$table_name' now has 1 row.));
+
+ # Insert data to test table on the publisher, raising an error on the
+ # subscriber due to violation of the unique constraint on test table.
+ $node_publisher->safe_psql($db, qq(INSERT INTO $table_name VALUES (1)));
+
+ # Wait for the apply error to be reported.
+ $node_subscriber->poll_query_until(
+ $db,
+ qq[
+ SELECT apply_error_count > 0
+ FROM pg_stat_subscription_stats
+ WHERE subname = '$sub_name'
+ ])
+ or die
+ qq(Timed out while waiting for apply error for subscription '$sub_name');
+
+ # Truncate test table so that apply worker can continue.
+ $node_subscriber->safe_psql($db, qq(TRUNCATE $table_name));
+
+ return ($pub_name, $sub_name);
+}
+
+my $db = 'postgres';
+
+# There shouldn't be any subscription errors before starting logical replication.
+my $result = $node_subscriber->safe_psql($db,
+ qq(SELECT count(1) FROM pg_stat_subscription_stats));
+is($result, qq(0),
+ 'Check that there are no subscription errors before starting logical replication.'
+);
+
+# Create the publication and subscription with sync and apply errors
+my $table1_name = 'test_tab1';
+my ($pub1_name, $sub1_name) =
+ create_sub_pub_w_errors($node_publisher, $node_subscriber, $db,
+ $table1_name);
+
+# Apply and Sync errors are > 0 and reset timestamp is NULL
+is( $node_subscriber->safe_psql(
+ $db,
+ qq(SELECT apply_error_count > 0,
+ sync_error_count > 0,
+ stats_reset IS NULL
+ FROM pg_stat_subscription_stats
+ WHERE subname = '$sub1_name')
+ ),
+ qq(t|t|t),
+ qq(Check that apply errors and sync errors are both > 0 and stats_reset is NULL for subscription '$sub1_name'.)
+);
+
+# Reset a single subscription
+$node_subscriber->safe_psql($db,
+ qq(SELECT pg_stat_reset_subscription_stats((SELECT subid FROM pg_stat_subscription_stats WHERE subname = '$sub1_name')))
+);
+
+# Apply and Sync errors are 0 and stats reset is not NULL
+is( $node_subscriber->safe_psql(
+ $db,
+ qq(SELECT apply_error_count = 0,
+ sync_error_count = 0,
+ stats_reset IS NOT NULL
+ FROM pg_stat_subscription_stats
+ WHERE subname = '$sub1_name')
+ ),
+ qq(t|t|t),
+ qq(Confirm that apply errors and sync errors are both 0 and stats_reset is not NULL after reset for subscription '$sub1_name'.)
+);
+
+# Get reset timestamp
+my $reset_time1 = $node_subscriber->safe_psql($db,
+ qq(SELECT stats_reset FROM pg_stat_subscription_stats WHERE subname = '$sub1_name')
+);
+
+# Reset single sub again
+$node_subscriber->safe_psql(
+ $db,
+ qq(SELECT pg_stat_reset_subscription_stats((SELECT subid FROM
+ pg_stat_subscription_stats WHERE subname = '$sub1_name')))
+);
+
+# check reset timestamp is newer after reset
+is( $node_subscriber->safe_psql(
+ $db,
+ qq(SELECT stats_reset > '$reset_time1'::timestamptz FROM
+ pg_stat_subscription_stats WHERE subname = '$sub1_name')
+ ),
+ qq(t),
+ qq(Check reset timestamp for '$sub1_name' is newer after second reset.));
+
+# Make second subscription and publication
+my $table2_name = 'test_tab2';
+my ($pub2_name, $sub2_name) =
+ create_sub_pub_w_errors($node_publisher, $node_subscriber, $db,
+ $table2_name);
+
+# Apply and Sync errors are > 0 and reset timestamp is NULL
+is( $node_subscriber->safe_psql(
+ $db,
+ qq(SELECT apply_error_count > 0,
+ sync_error_count > 0,
+ stats_reset IS NULL
+ FROM pg_stat_subscription_stats
+ WHERE subname = '$sub2_name')
+ ),
+ qq(t|t|t),
+ qq(Confirm that apply errors and sync errors are both > 0 and stats_reset is NULL for sub '$sub2_name'.)
+);
+
+# Reset all subscriptions
+$node_subscriber->safe_psql($db,
+ qq(SELECT pg_stat_reset_subscription_stats(NULL)));
+
+# Apply and Sync errors are 0 and stats reset is not NULL
+is( $node_subscriber->safe_psql(
+ $db,
+ qq(SELECT apply_error_count = 0,
+ sync_error_count = 0,
+ stats_reset IS NOT NULL
+ FROM pg_stat_subscription_stats
+ WHERE subname = '$sub1_name')
+ ),
+ qq(t|t|t),
+ qq(Confirm that apply errors and sync errors are both 0 and stats_reset is not NULL for sub '$sub1_name' after reset.)
+);
+
+is( $node_subscriber->safe_psql(
+ $db,
+ qq(SELECT apply_error_count = 0,
+ sync_error_count = 0,
+ stats_reset IS NOT NULL
+ FROM pg_stat_subscription_stats
+ WHERE subname = '$sub2_name')
+ ),
+ qq(t|t|t),
+ qq(Confirm that apply errors and sync errors are both 0 and stats_reset is not NULL for sub '$sub2_name' after reset.)
+);
+
+$reset_time1 = $node_subscriber->safe_psql($db,
+ qq(SELECT stats_reset FROM pg_stat_subscription_stats WHERE subname = '$sub1_name')
+);
+my $reset_time2 = $node_subscriber->safe_psql($db,
+ qq(SELECT stats_reset FROM pg_stat_subscription_stats WHERE subname = '$sub2_name')
+);
+
+# Reset all subscriptions
+$node_subscriber->safe_psql($db,
+ qq(SELECT pg_stat_reset_subscription_stats(NULL)));
+
+# check reset timestamp for sub1 is newer after reset
+is( $node_subscriber->safe_psql(
+ $db,
+ qq(SELECT stats_reset > '$reset_time1'::timestamptz FROM
+ pg_stat_subscription_stats WHERE subname = '$sub1_name')
+ ),
+ qq(t),
+ qq(Confirm that reset timestamp for '$sub1_name' is newer after second reset.)
+);
+
+# check reset timestamp for sub2 is newer after reset
+is( $node_subscriber->safe_psql(
+ $db,
+ qq(SELECT stats_reset > '$reset_time2'::timestamptz FROM
+ pg_stat_subscription_stats WHERE subname = '$sub2_name')
+ ),
+ qq(t),
+ qq(Confirm that reset timestamp for '$sub2_name' is newer after second reset.)
+);
+
+# Get subscription 1 oid
+my $sub1_oid = $node_subscriber->safe_psql($db,
+ qq(SELECT oid FROM pg_subscription WHERE subname = '$sub1_name'));
+
+# Drop subscription 1
+$node_subscriber->safe_psql($db, qq(DROP SUBSCRIPTION $sub1_name));
+
+# Subscription stats for sub1 should be gone
+is( $node_subscriber->safe_psql(
+ $db, qq(SELECT pg_stat_have_stats('subscription', 0, $sub1_oid))),
+ qq(f),
+ qq(Subscription stats for subscription '$sub1_name' should be removed.));
+
+# Get subscription 2 oid
+my $sub2_oid = $node_subscriber->safe_psql($db,
+ qq(SELECT oid FROM pg_subscription WHERE subname = '$sub2_name'));
+
+# Diassociate the subscription 2 from its replication slot and drop it
+$node_subscriber->safe_psql(
+ $db,
+ qq(
+ALTER SUBSCRIPTION $sub2_name DISABLE;
+ALTER SUBSCRIPTION $sub2_name SET (slot_name = NONE);
+DROP SUBSCRIPTION $sub2_name;
+ ));
+
+# Subscription stats for sub2 should be gone
+is( $node_subscriber->safe_psql(
+ $db, qq(SELECT pg_stat_have_stats('subscription', 0, $sub2_oid))),
+ qq(f),
+ qq(Subscription stats for subscription '$sub2_name' should be removed.));
+
+# Since disabling subscription doesn't wait for walsender to release the replication
+# slot and exit, wait for the slot to become inactive.
+$node_publisher->poll_query_until(
+ $db,
+ qq(SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = '$sub2_name' AND active_pid IS NULL))
+) or die "slot never became inactive";
+
+$node_publisher->safe_psql($db,
+ qq(SELECT pg_drop_replication_slot('$sub2_name')));
+
+$node_subscriber->stop('fast');
+$node_publisher->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/027_nosuperuser.pl b/src/test/subscription/t/027_nosuperuser.pl
new file mode 100644
index 0000000..1436cf7
--- /dev/null
+++ b/src/test/subscription/t/027_nosuperuser.pl
@@ -0,0 +1,397 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test that logical replication respects permissions
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+my ($node_publisher, $node_subscriber, $publisher_connstr, $result, $offset);
+$offset = 0;
+
+sub publish_insert
+{
+ my ($tbl, $new_i) = @_;
+ $node_publisher->safe_psql(
+ 'postgres', qq(
+ SET SESSION AUTHORIZATION regress_alice;
+ INSERT INTO $tbl (i) VALUES ($new_i);
+ ));
+}
+
+sub publish_update
+{
+ my ($tbl, $old_i, $new_i) = @_;
+ $node_publisher->safe_psql(
+ 'postgres', qq(
+ SET SESSION AUTHORIZATION regress_alice;
+ UPDATE $tbl SET i = $new_i WHERE i = $old_i;
+ ));
+}
+
+sub publish_delete
+{
+ my ($tbl, $old_i) = @_;
+ $node_publisher->safe_psql(
+ 'postgres', qq(
+ SET SESSION AUTHORIZATION regress_alice;
+ DELETE FROM $tbl WHERE i = $old_i;
+ ));
+}
+
+sub expect_replication
+{
+ my ($tbl, $cnt, $min, $max, $testname) = @_;
+ $node_publisher->wait_for_catchup('admin_sub');
+ $result = $node_subscriber->safe_psql(
+ 'postgres', qq(
+ SELECT COUNT(i), MIN(i), MAX(i) FROM $tbl));
+ is($result, "$cnt|$min|$max", $testname);
+}
+
+sub expect_failure
+{
+ my ($tbl, $cnt, $min, $max, $re, $testname) = @_;
+ $offset = $node_subscriber->wait_for_log($re, $offset);
+ $result = $node_subscriber->safe_psql(
+ 'postgres', qq(
+ SELECT COUNT(i), MIN(i), MAX(i) FROM $tbl));
+ is($result, "$cnt|$min|$max", $testname);
+}
+
+sub revoke_superuser
+{
+ my ($role) = @_;
+ $node_subscriber->safe_psql(
+ 'postgres', qq(
+ ALTER ROLE $role NOSUPERUSER));
+}
+
+sub grant_superuser
+{
+ my ($role) = @_;
+ $node_subscriber->safe_psql(
+ 'postgres', qq(
+ ALTER ROLE $role SUPERUSER));
+}
+
+# Create publisher and subscriber nodes with schemas owned and published by
+# "regress_alice" but subscribed and replicated by different role
+# "regress_admin". For partitioned tables, layout the partitions differently
+# on the publisher than on the subscriber.
+#
+$node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_publisher->init(allows_streaming => 'logical');
+$node_subscriber->init;
+$node_publisher->start;
+$node_subscriber->start;
+$publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+my %remainder_a = (
+ publisher => 0,
+ subscriber => 1);
+my %remainder_b = (
+ publisher => 1,
+ subscriber => 0);
+
+for my $node ($node_publisher, $node_subscriber)
+{
+ my $remainder_a = $remainder_a{ $node->name };
+ my $remainder_b = $remainder_b{ $node->name };
+ $node->safe_psql(
+ 'postgres', qq(
+ CREATE ROLE regress_admin SUPERUSER LOGIN;
+ CREATE ROLE regress_alice NOSUPERUSER LOGIN;
+ GRANT CREATE ON DATABASE postgres TO regress_alice;
+ SET SESSION AUTHORIZATION regress_alice;
+ CREATE SCHEMA alice;
+ GRANT USAGE ON SCHEMA alice TO regress_admin;
+
+ CREATE TABLE alice.unpartitioned (i INTEGER);
+ ALTER TABLE alice.unpartitioned REPLICA IDENTITY FULL;
+ GRANT SELECT ON TABLE alice.unpartitioned TO regress_admin;
+
+ CREATE TABLE alice.hashpart (i INTEGER) PARTITION BY HASH (i);
+ ALTER TABLE alice.hashpart REPLICA IDENTITY FULL;
+ GRANT SELECT ON TABLE alice.hashpart TO regress_admin;
+ CREATE TABLE alice.hashpart_a PARTITION OF alice.hashpart
+ FOR VALUES WITH (MODULUS 2, REMAINDER $remainder_a);
+ ALTER TABLE alice.hashpart_a REPLICA IDENTITY FULL;
+ CREATE TABLE alice.hashpart_b PARTITION OF alice.hashpart
+ FOR VALUES WITH (MODULUS 2, REMAINDER $remainder_b);
+ ALTER TABLE alice.hashpart_b REPLICA IDENTITY FULL;
+ ));
+}
+$node_publisher->safe_psql(
+ 'postgres', qq(
+SET SESSION AUTHORIZATION regress_alice;
+
+CREATE PUBLICATION alice
+ FOR TABLE alice.unpartitioned, alice.hashpart
+ WITH (publish_via_partition_root = true);
+));
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+SET SESSION AUTHORIZATION regress_admin;
+CREATE SUBSCRIPTION admin_sub CONNECTION '$publisher_connstr' PUBLICATION alice WITH (password_required=false);
+));
+
+# Wait for initial sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'admin_sub');
+
+# Verify that "regress_admin" can replicate into the tables
+#
+publish_insert("alice.unpartitioned", 1);
+publish_insert("alice.unpartitioned", 3);
+publish_insert("alice.unpartitioned", 5);
+publish_update("alice.unpartitioned", 1 => 7);
+publish_delete("alice.unpartitioned", 3);
+expect_replication("alice.unpartitioned", 2, 5, 7,
+ "superuser admin replicates into unpartitioned");
+
+# Revoke and restore superuser privilege for "regress_admin",
+# verifying that replication fails while superuser privilege is
+# missing, but works again and catches up once superuser is restored.
+#
+revoke_superuser("regress_admin");
+publish_update("alice.unpartitioned", 5 => 9);
+expect_failure(
+ "alice.unpartitioned",
+ 2,
+ 5,
+ 7,
+ qr/ERROR: ( [A-Z0-9]+:)? role "regress_admin" cannot SET ROLE to "regress_alice"/msi,
+ "non-superuser admin fails to replicate update");
+grant_superuser("regress_admin");
+expect_replication("alice.unpartitioned", 2, 7, 9,
+ "admin with restored superuser privilege replicates update");
+
+# Privileges on the target role suffice for non-superuser replication.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ALTER ROLE regress_admin NOSUPERUSER;
+GRANT regress_alice TO regress_admin;
+));
+
+publish_insert("alice.unpartitioned", 11);
+expect_replication("alice.unpartitioned", 3, 7, 11,
+ "nosuperuser admin with privileges on role can replicate INSERT into unpartitioned"
+);
+
+publish_update("alice.unpartitioned", 7 => 13);
+expect_replication("alice.unpartitioned", 3, 9, 13,
+ "nosuperuser admin with privileges on role can replicate UPDATE into unpartitioned"
+);
+
+publish_delete("alice.unpartitioned", 9);
+expect_replication("alice.unpartitioned", 2, 11, 13,
+ "nosuperuser admin with privileges on role can replicate DELETE into unpartitioned"
+);
+
+# Test partitioning
+#
+publish_insert("alice.hashpart", 101);
+publish_insert("alice.hashpart", 102);
+publish_insert("alice.hashpart", 103);
+publish_update("alice.hashpart", 102 => 120);
+publish_delete("alice.hashpart", 101);
+expect_replication("alice.hashpart", 2, 103, 120,
+ "nosuperuser admin with privileges on role can replicate into hashpart");
+
+# Force RLS on the target table and check that replication fails.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+SET SESSION AUTHORIZATION regress_alice;
+ALTER TABLE alice.unpartitioned ENABLE ROW LEVEL SECURITY;
+ALTER TABLE alice.unpartitioned FORCE ROW LEVEL SECURITY;
+));
+
+publish_insert("alice.unpartitioned", 15);
+expect_failure(
+ "alice.unpartitioned",
+ 2,
+ 11,
+ 13,
+ qr/ERROR: ( [A-Z0-9]+:)? user "regress_alice" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
+ "replication of insert into table with forced rls fails");
+
+# Since replication acts as the table owner, replication will succeed if we don't force it.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ALTER TABLE alice.unpartitioned NO FORCE ROW LEVEL SECURITY;
+));
+expect_replication("alice.unpartitioned", 3, 11, 15,
+ "non-superuser admin can replicate insert if rls is not forced");
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ALTER TABLE alice.unpartitioned FORCE ROW LEVEL SECURITY;
+));
+publish_update("alice.unpartitioned", 11 => 17);
+expect_failure(
+ "alice.unpartitioned",
+ 3,
+ 11,
+ 15,
+ qr/ERROR: ( [A-Z0-9]+:)? user "regress_alice" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
+ "replication of update into table with forced rls fails");
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ALTER TABLE alice.unpartitioned NO FORCE ROW LEVEL SECURITY;
+));
+expect_replication("alice.unpartitioned", 3, 13, 17,
+ "non-superuser admin can replicate update if rls is not forced");
+
+# Remove some of alice's privileges on her own table. Then replication should fail.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+REVOKE SELECT, INSERT ON alice.unpartitioned FROM regress_alice;
+));
+publish_insert("alice.unpartitioned", 19);
+expect_failure(
+ "alice.unpartitioned",
+ 3,
+ 13,
+ 17,
+ qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi,
+ "replication of insert fails if table owner lacks insert permission");
+
+# alice needs INSERT but not SELECT to replicate an INSERT.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+GRANT INSERT ON alice.unpartitioned TO regress_alice;
+));
+expect_replication("alice.unpartitioned", 4, 13, 19,
+ "restoring insert permission permits replication to continue");
+
+# Now let's try an UPDATE and a DELETE.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+REVOKE UPDATE, DELETE ON alice.unpartitioned FROM regress_alice;
+));
+publish_update("alice.unpartitioned", 13 => 21);
+publish_delete("alice.unpartitioned", 15);
+expect_failure(
+ "alice.unpartitioned",
+ 4,
+ 13,
+ 19,
+ qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi,
+ "replication of update/delete fails if table owner lacks corresponding permission"
+);
+
+# Restoring UPDATE and DELETE is insufficient.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+GRANT UPDATE, DELETE ON alice.unpartitioned TO regress_alice;
+));
+expect_failure(
+ "alice.unpartitioned",
+ 4,
+ 13,
+ 19,
+ qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi,
+ "replication of update/delete fails if table owner lacks SELECT permission"
+);
+
+# alice needs INSERT but not SELECT to replicate an INSERT.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+GRANT SELECT ON alice.unpartitioned TO regress_alice;
+));
+expect_replication("alice.unpartitioned", 3, 17, 21,
+ "restoring SELECT permission permits replication to continue");
+
+# If the subscription connection requires a password ('password_required'
+# is true) then a non-superuser must specify that password in the connection
+# string.
+SKIP:
+{
+ skip
+ "subscription password_required test cannot run without Unix-domain sockets",
+ 3
+ unless $use_unix_sockets;
+
+ my $node_publisher1 = PostgreSQL::Test::Cluster->new('publisher1');
+ my $node_subscriber1 = PostgreSQL::Test::Cluster->new('subscriber1');
+ $node_publisher1->init(allows_streaming => 'logical');
+ $node_subscriber1->init;
+ $node_publisher1->start;
+ $node_subscriber1->start;
+ my $publisher_connstr1 =
+ $node_publisher1->connstr . ' user=regress_test_user dbname=postgres';
+ my $publisher_connstr2 =
+ $node_publisher1->connstr
+ . ' user=regress_test_user dbname=postgres password=secret';
+
+ for my $node ($node_publisher1, $node_subscriber1)
+ {
+ $node->safe_psql(
+ 'postgres', qq(
+ CREATE ROLE regress_test_user PASSWORD 'secret' LOGIN REPLICATION;
+ GRANT CREATE ON DATABASE postgres TO regress_test_user;
+ GRANT PG_CREATE_SUBSCRIPTION TO regress_test_user;
+ ));
+ }
+
+ $node_publisher1->safe_psql(
+ 'postgres', qq(
+ SET SESSION AUTHORIZATION regress_test_user;
+ CREATE PUBLICATION regress_test_pub;
+ ));
+ $node_subscriber1->safe_psql(
+ 'postgres', qq(
+ CREATE SUBSCRIPTION regress_test_sub CONNECTION '$publisher_connstr1' PUBLICATION regress_test_pub;
+ ));
+
+ # Wait for initial sync to finish
+ $node_subscriber1->wait_for_subscription_sync($node_publisher1,
+ 'regress_test_sub');
+
+ my $save_pgpassword = $ENV{"PGPASSWORD"};
+ $ENV{"PGPASSWORD"} = 'secret';
+
+ # Setup pg_hba configuration so that logical replication connection without
+ # password is not allowed.
+ unlink($node_publisher1->data_dir . '/pg_hba.conf');
+ $node_publisher1->append_conf('pg_hba.conf',
+ qq{local all regress_test_user md5});
+ $node_publisher1->reload;
+
+ # Change the subscription owner to a non-superuser
+ $node_subscriber1->safe_psql(
+ 'postgres', qq(
+ ALTER SUBSCRIPTION regress_test_sub OWNER TO regress_test_user;
+ ));
+
+ # Non-superuser must specify password in the connection string
+ my ($ret, $stdout, $stderr) = $node_subscriber1->psql(
+ 'postgres', qq(
+ SET SESSION AUTHORIZATION regress_test_user;
+ ALTER SUBSCRIPTION regress_test_sub REFRESH PUBLICATION;
+ ));
+ isnt($ret, 0,
+ "non zero exit for subscription whose owner is a non-superuser must specify password parameter of the connection string"
+ );
+ ok( $stderr =~
+ m/DETAIL: Non-superusers must provide a password in the connection string./,
+ 'subscription whose owner is a non-superuser must specify password parameter of the connection string'
+ );
+
+ $ENV{"PGPASSWORD"} = $save_pgpassword;
+
+ # It should succeed after including the password parameter of the connection
+ # string.
+ ($ret, $stdout, $stderr) = $node_subscriber1->psql(
+ 'postgres', qq(
+ SET SESSION AUTHORIZATION regress_test_user;
+ ALTER SUBSCRIPTION regress_test_sub CONNECTION '$publisher_connstr2';
+ ALTER SUBSCRIPTION regress_test_sub REFRESH PUBLICATION;
+ ));
+ is($ret, 0,
+ "Non-superuser will be able to refresh the publication after specifying the password parameter of the connection string"
+ );
+}
+done_testing();
diff --git a/src/test/subscription/t/028_row_filter.pl b/src/test/subscription/t/028_row_filter.pl
new file mode 100644
index 0000000..aec483f
--- /dev/null
+++ b/src/test/subscription/t/028_row_filter.pl
@@ -0,0 +1,769 @@
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test logical replication behavior with row filtering
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# create publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+# create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+my $appname = 'tap_sub';
+
+# ====================================================================
+# Testcase start: FOR ALL TABLES
+#
+# The FOR ALL TABLES test must come first so that it is not affected by
+# all the other test tables that are later created.
+
+# create tables pub and sub
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rf_x (x int primary key)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rf_x (x int primary key)");
+
+# insert some initial data
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rf_x (x) VALUES (0), (5), (10), (15), (20)");
+
+# create pub/sub
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_x FOR TABLE tab_rf_x WHERE (x > 10)");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_forall FOR ALL TABLES");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub_x, tap_pub_forall"
+);
+
+# wait for initial table synchronization to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+# The subscription of the FOR ALL TABLES publication means there should be no
+# filtering on the tablesync COPY, so all expect all 5 will be present.
+my $result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(x) FROM tab_rf_x");
+is($result, qq(5),
+ 'check initial data copy from table tab_rf_x should not be filtered');
+
+# Similarly, the table filter for tab_rf_x (after the initial phase) has no
+# effect when combined with the ALL TABLES.
+# Expected: 5 initial rows + 2 new rows = 7 rows
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rf_x (x) VALUES (-99), (99)");
+$node_publisher->wait_for_catchup($appname);
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(x) FROM tab_rf_x");
+is($result, qq(7), 'check table tab_rf_x should not be filtered');
+
+# cleanup pub
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_forall");
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_x");
+$node_publisher->safe_psql('postgres', "DROP TABLE tab_rf_x");
+# cleanup sub
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
+$node_subscriber->safe_psql('postgres', "DROP TABLE tab_rf_x");
+
+# Testcase end: FOR ALL TABLES
+# ====================================================================
+
+# ====================================================================
+# Testcase start: TABLES IN SCHEMA
+#
+# The TABLES IN SCHEMA test is independent of all other test cases so it
+# cleans up after itself.
+
+# create tables pub and sub
+$node_publisher->safe_psql('postgres', "CREATE SCHEMA schema_rf_x");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE schema_rf_x.tab_rf_x (x int primary key)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE schema_rf_x.tab_rf_partitioned (x int primary key) PARTITION BY RANGE(x)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE public.tab_rf_partition (LIKE schema_rf_x.tab_rf_partitioned)"
+);
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE schema_rf_x.tab_rf_partitioned ATTACH PARTITION public.tab_rf_partition DEFAULT"
+);
+$node_subscriber->safe_psql('postgres', "CREATE SCHEMA schema_rf_x");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE schema_rf_x.tab_rf_x (x int primary key)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE schema_rf_x.tab_rf_partitioned (x int primary key) PARTITION BY RANGE(x)"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE public.tab_rf_partition (LIKE schema_rf_x.tab_rf_partitioned)"
+);
+$node_subscriber->safe_psql('postgres',
+ "ALTER TABLE schema_rf_x.tab_rf_partitioned ATTACH PARTITION public.tab_rf_partition DEFAULT"
+);
+
+# insert some initial data
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO schema_rf_x.tab_rf_x (x) VALUES (0), (5), (10), (15), (20)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO schema_rf_x.tab_rf_partitioned (x) VALUES (1), (20)");
+
+# create pub/sub
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_x FOR TABLE schema_rf_x.tab_rf_x WHERE (x > 10)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_allinschema FOR TABLES IN SCHEMA schema_rf_x, TABLE schema_rf_x.tab_rf_x WHERE (x > 10)"
+);
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub_allinschema ADD TABLE public.tab_rf_partition WHERE (x > 10)"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub_x, tap_pub_allinschema"
+);
+
+# wait for initial table synchronization to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+# The subscription of the TABLES IN SCHEMA publication means there should be
+# no filtering on the tablesync COPY, so expect all 5 will be present.
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(x) FROM schema_rf_x.tab_rf_x");
+is($result, qq(5),
+ 'check initial data copy from table tab_rf_x should not be filtered');
+
+# Similarly, the table filter for tab_rf_x (after the initial phase) has no
+# effect when combined with the TABLES IN SCHEMA. Meanwhile, the filter for
+# the tab_rf_partition does work because that partition belongs to a different
+# schema (and publish_via_partition_root = false).
+# Expected:
+# tab_rf_x : 5 initial rows + 2 new rows = 7 rows
+# tab_rf_partition : 1 initial row + 1 new row = 2 rows
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO schema_rf_x.tab_rf_x (x) VALUES (-99), (99)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO schema_rf_x.tab_rf_partitioned (x) VALUES (5), (25)");
+$node_publisher->wait_for_catchup($appname);
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(x) FROM schema_rf_x.tab_rf_x");
+is($result, qq(7), 'check table tab_rf_x should not be filtered');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT * FROM public.tab_rf_partition");
+is( $result, qq(20
+25), 'check table tab_rf_partition should be filtered');
+
+# cleanup pub
+$node_publisher->safe_psql('postgres',
+ "DROP PUBLICATION tap_pub_allinschema");
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_x");
+$node_publisher->safe_psql('postgres', "DROP TABLE public.tab_rf_partition");
+$node_publisher->safe_psql('postgres',
+ "DROP TABLE schema_rf_x.tab_rf_partitioned");
+$node_publisher->safe_psql('postgres', "DROP TABLE schema_rf_x.tab_rf_x");
+$node_publisher->safe_psql('postgres', "DROP SCHEMA schema_rf_x");
+# cleanup sub
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
+$node_subscriber->safe_psql('postgres', "DROP TABLE public.tab_rf_partition");
+$node_subscriber->safe_psql('postgres',
+ "DROP TABLE schema_rf_x.tab_rf_partitioned");
+$node_subscriber->safe_psql('postgres', "DROP TABLE schema_rf_x.tab_rf_x");
+$node_subscriber->safe_psql('postgres', "DROP SCHEMA schema_rf_x");
+
+# Testcase end: TABLES IN SCHEMA
+# ====================================================================
+
+# ======================================================
+# Testcase start: FOR TABLE with row filter publications
+
+# setup structure on publisher
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_1 (a int primary key, b text)");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab_rowfilter_1 REPLICA IDENTITY FULL;");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_2 (c int primary key)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_3 (a int primary key, b boolean)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_4 (c int primary key)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_partitioned (a int primary key, b integer) PARTITION BY RANGE(a)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_less_10k (LIKE tab_rowfilter_partitioned)");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab_rowfilter_partitioned ATTACH PARTITION tab_rowfilter_less_10k FOR VALUES FROM (MINVALUE) TO (10000)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_greater_10k (LIKE tab_rowfilter_partitioned)"
+);
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab_rowfilter_partitioned ATTACH PARTITION tab_rowfilter_greater_10k FOR VALUES FROM (10000) TO (MAXVALUE)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_partitioned_2 (a int primary key, b integer) PARTITION BY RANGE(a)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_partition (LIKE tab_rowfilter_partitioned_2)"
+);
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab_rowfilter_partitioned_2 ATTACH PARTITION tab_rowfilter_partition DEFAULT"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_toast (a text NOT NULL, b text NOT NULL)");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab_rowfilter_toast ALTER COLUMN a SET STORAGE EXTERNAL");
+$node_publisher->safe_psql('postgres',
+ "CREATE UNIQUE INDEX tab_rowfilter_toast_ri_index on tab_rowfilter_toast (a, b)"
+);
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab_rowfilter_toast REPLICA IDENTITY USING INDEX tab_rowfilter_toast_ri_index"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_inherited (a int)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_child (b text) INHERITS (tab_rowfilter_inherited)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_viaroot_part (a int) PARTITION BY RANGE (a)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_viaroot_part_1 PARTITION OF tab_rowfilter_viaroot_part FOR VALUES FROM (1) TO (20)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_parent_sync (a int) PARTITION BY RANGE (a)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_child_sync PARTITION OF tab_rowfilter_parent_sync FOR VALUES FROM (1) TO (20)"
+);
+
+# setup structure on subscriber
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_1 (a int primary key, b text)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_2 (c int primary key)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_3 (a int primary key, b boolean)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_4 (c int primary key)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_partitioned (a int primary key, b integer) PARTITION BY RANGE(a)"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_less_10k (LIKE tab_rowfilter_partitioned)");
+$node_subscriber->safe_psql('postgres',
+ "ALTER TABLE tab_rowfilter_partitioned ATTACH PARTITION tab_rowfilter_less_10k FOR VALUES FROM (MINVALUE) TO (10000)"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_greater_10k (LIKE tab_rowfilter_partitioned)"
+);
+$node_subscriber->safe_psql('postgres',
+ "ALTER TABLE tab_rowfilter_partitioned ATTACH PARTITION tab_rowfilter_greater_10k FOR VALUES FROM (10000) TO (MAXVALUE)"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_partitioned_2 (a int primary key, b integer) PARTITION BY RANGE(a)"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_partition (LIKE tab_rowfilter_partitioned_2)"
+);
+$node_subscriber->safe_psql('postgres',
+ "ALTER TABLE tab_rowfilter_partitioned_2 ATTACH PARTITION tab_rowfilter_partition DEFAULT"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_toast (a text NOT NULL, b text NOT NULL)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE UNIQUE INDEX tab_rowfilter_toast_ri_index on tab_rowfilter_toast (a, b)"
+);
+$node_subscriber->safe_psql('postgres',
+ "ALTER TABLE tab_rowfilter_toast REPLICA IDENTITY USING INDEX tab_rowfilter_toast_ri_index"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_inherited (a int)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_child (b text) INHERITS (tab_rowfilter_inherited)"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_viaroot_part (a int)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_viaroot_part_1 (a int)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_parent_sync (a int)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_rowfilter_child_sync (a int)");
+
+# setup logical replication
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_1 FOR TABLE tab_rowfilter_1 WHERE (a > 1000 AND b <> 'filtered')"
+);
+
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub_1 ADD TABLE tab_rowfilter_2 WHERE (c % 7 = 0)"
+);
+
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub_1 SET TABLE tab_rowfilter_1 WHERE (a > 1000 AND b <> 'filtered'), tab_rowfilter_2 WHERE (c % 2 = 0), tab_rowfilter_3"
+);
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_2 FOR TABLE tab_rowfilter_2 WHERE (c % 3 = 0)"
+);
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_3 FOR TABLE tab_rowfilter_partitioned");
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub_3 ADD TABLE tab_rowfilter_less_10k WHERE (a < 6000)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_not_used FOR TABLE tab_rowfilter_1 WHERE (a < 0)"
+);
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_4a FOR TABLE tab_rowfilter_4 WHERE (c % 2 = 0)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_4b FOR TABLE tab_rowfilter_4");
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_5a FOR TABLE tab_rowfilter_partitioned_2");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_5b FOR TABLE tab_rowfilter_partition WHERE (a > 10)"
+);
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_toast FOR TABLE tab_rowfilter_toast WHERE (a = repeat('1234567890', 200) AND b < '10')"
+);
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_inherits FOR TABLE tab_rowfilter_inherited WHERE (a > 15)"
+);
+
+# two publications, each publishing the partition through a different ancestor, with
+# different row filters
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_viaroot_1 FOR TABLE tab_rowfilter_viaroot_part WHERE (a > 15) WITH (publish_via_partition_root)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_viaroot_2 FOR TABLE tab_rowfilter_viaroot_part_1 WHERE (a < 15) WITH (publish_via_partition_root)"
+);
+
+# two publications, one publishing through ancestor and another one directly
+# publishing the partition, with different row filters
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_parent_sync FOR TABLE tab_rowfilter_parent_sync WHERE (a > 15) WITH (publish_via_partition_root)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_child_sync FOR TABLE tab_rowfilter_child_sync WHERE (a < 15)"
+);
+
+#
+# The following INSERTs are executed before the CREATE SUBSCRIPTION, so these
+# SQL commands are for testing the initial data copy using logical replication.
+#
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_1 (a, b) VALUES (1, 'not replicated')");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_1 (a, b) VALUES (1500, 'filtered')");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_1 (a, b) VALUES (1980, 'not filtered')");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_1 (a, b) SELECT x, 'test ' || x FROM generate_series(990,1002) x"
+);
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_2 (c) SELECT generate_series(1, 20)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_3 (a, b) SELECT x, (x % 3 = 0) FROM generate_series(1, 10) x"
+);
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_4 (c) SELECT generate_series(1, 10)");
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_parent_sync(a) VALUES(14), (16)");
+
+# insert data into partitioned table and directly on the partition
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_partitioned (a, b) VALUES(1, 100),(7000, 101),(15000, 102),(5500, 300)"
+);
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_less_10k (a, b) VALUES(2, 200),(6005, 201)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_greater_10k (a, b) VALUES(16000, 103)");
+
+# insert data into partitioned table.
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_partitioned_2 (a, b) VALUES(1, 1),(20, 20)");
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_toast(a, b) VALUES(repeat('1234567890', 200), '1234567890')"
+);
+
+# insert data into parent and child table.
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_inherited(a) VALUES(10),(20)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_child(a, b) VALUES(0,'0'),(30,'30'),(40,'40')"
+);
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub_1, tap_pub_2, tap_pub_3, tap_pub_4a, tap_pub_4b, tap_pub_5a, tap_pub_5b, tap_pub_toast, tap_pub_inherits, tap_pub_viaroot_2, tap_pub_viaroot_1, tap_pub_parent_sync, tap_pub_child_sync"
+);
+
+# wait for initial table synchronization to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+# Check expected replicated rows for tab_rowfilter_1
+# tap_pub_1 filter is: (a > 1000 AND b <> 'filtered')
+# - INSERT (1, 'not replicated') NO, because a is not > 1000
+# - INSERT (1500, 'filtered') NO, because b == 'filtered'
+# - INSERT (1980, 'not filtered') YES
+# - generate_series(990,1002) YES, only for 1001,1002 because a > 1000
+#
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a, b FROM tab_rowfilter_1 ORDER BY 1, 2");
+is( $result, qq(1001|test 1001
+1002|test 1002
+1980|not filtered), 'check initial data copy from table tab_rowfilter_1');
+
+# Check expected replicated rows for tab_rowfilter_2
+# tap_pub_1 filter is: (c % 2 = 0)
+# tap_pub_2 filter is: (c % 3 = 0)
+# When there are multiple publications for the same table, the filters
+# expressions are OR'ed together. In this case, rows are replicated if
+# c value is divided by 2 OR 3 (2, 3, 4, 6, 8, 9, 10, 12, 14, 15, 16, 18, 20)
+#
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(c), min(c), max(c) FROM tab_rowfilter_2");
+is($result, qq(13|2|20),
+ 'check initial data copy from table tab_rowfilter_2');
+
+# Check expected replicated rows for tab_rowfilter_4
+# (same table in two publications but only one has a filter).
+# tap_pub_4a filter is: (c % 2 = 0)
+# tap_pub_4b filter is: <no filter>
+# Expressions are OR'ed together but when there is no filter it just means
+# OR everything - e.g. same as no filter at all.
+# Expect all rows: (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(c), min(c), max(c) FROM tab_rowfilter_4");
+is($result, qq(10|1|10),
+ 'check initial data copy from table tab_rowfilter_4');
+
+# Check expected replicated rows for tab_rowfilter_3
+# There is no filter. 10 rows are inserted, so 10 rows are replicated.
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(a) FROM tab_rowfilter_3");
+is($result, qq(10), 'check initial data copy from table tab_rowfilter_3');
+
+# Check expected replicated rows for partitions
+# publication option publish_via_partition_root is false so use the row filter
+# from a partition
+# tab_rowfilter_partitioned filter: (a < 5000)
+# tab_rowfilter_less_10k filter: (a < 6000)
+# tab_rowfilter_greater_10k filter: no filter
+#
+# INSERT into tab_rowfilter_partitioned:
+# - INSERT (1,100) YES, because 1 < 6000
+# - INSERT (7000, 101) NO, because 7000 is not < 6000
+# - INSERT (15000, 102) YES, because tab_rowfilter_greater_10k has no filter
+# - INSERT (5500, 300) YES, because 5500 < 6000
+#
+# INSERT directly into tab_rowfilter_less_10k:
+# - INSERT (2, 200) YES, because 2 < 6000
+# - INSERT (6005, 201) NO, because 6005 is not < 6000
+#
+# INSERT directly into tab_rowfilter_greater_10k:
+# - INSERT (16000, 103) YES, because tab_rowfilter_greater_10k has no filter
+#
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a, b FROM tab_rowfilter_less_10k ORDER BY 1, 2");
+is( $result, qq(1|100
+2|200
+5500|300), 'check initial data copy from partition tab_rowfilter_less_10k');
+
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a, b FROM tab_rowfilter_greater_10k ORDER BY 1, 2");
+is( $result, qq(15000|102
+16000|103), 'check initial data copy from partition tab_rowfilter_greater_10k'
+);
+
+# Check expected replicated rows for partitions
+# publication option publish_via_partition_root is false so use the row filter
+# from a partition
+# tap_pub_5a filter: <no filter>
+# tap_pub_5b filter: (a > 10)
+# The parent table for this partition is published via tap_pub_5a, so there is
+# no filter for the partition. And expressions are OR'ed together so it means
+# OR everything - e.g. same as no filter at all.
+# Expect all rows: (1, 1) and (20, 20)
+#
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a, b FROM tab_rowfilter_partition ORDER BY 1, 2");
+is( $result, qq(1|1
+20|20), 'check initial data copy from partition tab_rowfilter_partition');
+
+# Check expected replicated rows for tab_rowfilter_toast
+# tab_rowfilter_toast filter: (a = repeat('1234567890', 200) AND b < '10')
+# INSERT (repeat('1234567890', 200) ,'1234567890') NO
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_rowfilter_toast");
+is($result, qq(0), 'check initial data copy from table tab_rowfilter_toast');
+
+# Check expected replicated rows for tab_rowfilter_inherited
+# tab_rowfilter_inherited filter is: (a > 15)
+# - INSERT (10) NO, 10 < 15
+# - INSERT (20) YES, 20 > 15
+# - INSERT (0, '0') NO, 0 < 15
+# - INSERT (30, '30') YES, 30 > 15
+# - INSERT (40, '40') YES, 40 > 15
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a FROM tab_rowfilter_inherited ORDER BY a");
+is( $result, qq(20
+30
+40), 'check initial data copy from table tab_rowfilter_inherited');
+
+# Check expected replicated rows for tap_pub_parent_sync and
+# tap_pub_child_sync.
+# Since the option publish_via_partition_root of tap_pub_parent_sync is true,
+# so the row filter of tap_pub_parent_sync will be used:
+# tap_pub_parent_sync filter is: (a > 15)
+# tap_pub_child_sync filter is: (a < 15)
+# - INSERT (14) NO, 14 < 15
+# - INSERT (16) YES, 16 > 15
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a FROM tab_rowfilter_parent_sync ORDER BY 1");
+is($result, qq(16), 'check initial data copy from tab_rowfilter_parent_sync');
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a FROM tab_rowfilter_child_sync ORDER BY 1");
+is($result, qq(), 'check initial data copy from tab_rowfilter_child_sync');
+
+# The following commands are executed after CREATE SUBSCRIPTION, so these SQL
+# commands are for testing normal logical replication behavior.
+#
+# test row filter (INSERT, UPDATE, DELETE)
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_1 (a, b) VALUES (800, 'test 800')");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_1 (a, b) VALUES (1600, 'test 1600')");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_1 (a, b) VALUES (1601, 'test 1601')");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_1 (a, b) VALUES (1602, 'filtered')");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_1 (a, b) VALUES (1700, 'test 1700')");
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_rowfilter_1 SET b = NULL WHERE a = 1600");
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_rowfilter_1 SET b = 'test 1601 updated' WHERE a = 1601");
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_rowfilter_1 SET b = 'test 1602 updated' WHERE a = 1602");
+$node_publisher->safe_psql('postgres',
+ "DELETE FROM tab_rowfilter_1 WHERE a = 1700");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_2 (c) VALUES (21), (22), (23), (24), (25)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_4 (c) VALUES (0), (11), (12)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_inherited (a) VALUES (14), (16)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_child (a, b) VALUES (13, '13'), (17, '17')");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_viaroot_part (a) VALUES (14), (15), (16)");
+
+$node_publisher->wait_for_catchup($appname);
+
+# Check expected replicated rows for tab_rowfilter_2
+# tap_pub_1 filter is: (c % 2 = 0)
+# tap_pub_2 filter is: (c % 3 = 0)
+# When there are multiple publications for the same table, the filters
+# expressions are OR'ed together. In this case, rows are replicated if
+# c value is divided by 2 OR 3.
+#
+# Expect original rows (2, 3, 4, 6, 8, 9, 10, 12, 14, 15, 16, 18, 20)
+# Plus (21, 22, 24)
+#
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(c), min(c), max(c) FROM tab_rowfilter_2");
+is($result, qq(16|2|24), 'check replicated rows to tab_rowfilter_2');
+
+# Check expected replicated rows for tab_rowfilter_4
+# (same table in two publications but only one has a filter).
+# tap_pub_4a filter is: (c % 2 = 0)
+# tap_pub_4b filter is: <no filter>
+# Expressions are OR'ed together but when there is no filter it just means
+# OR everything - e.g. same as no filter at all.
+# Expect all rows from initial copy: (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
+# And also (0, 11, 12)
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(c), min(c), max(c) FROM tab_rowfilter_4");
+is($result, qq(13|0|12), 'check replicated rows to tab_rowfilter_4');
+
+# Check expected replicated rows for tab_rowfilter_1
+# tap_pub_1 filter is: (a > 1000 AND b <> 'filtered')
+#
+# - 1001, 1002, 1980 already exist from initial data copy
+# - INSERT (800, 'test 800') NO, because 800 is not > 1000
+# - INSERT (1600, 'test 1600') YES, because 1600 > 1000 and 'test 1600' <> 'filtered',
+# but row deleted after the update below.
+# - INSERT (1601, 'test 1601') YES, because 1601 > 1000 and 'test 1601' <> 'filtered'
+# - INSERT (1602, 'filtered') NO, because b == 'filtered'
+# - INSERT (1700, 'test 1700') YES, because 1700 > 1000 and 'test 1700' <> 'filtered'
+# - UPDATE (1600, NULL) NO, row filter evaluates to false because NULL is not <> 'filtered'
+# - UPDATE (1601, 'test 1601 updated') YES, because 1601 > 1000 and 'test 1601 updated' <> 'filtered'
+# - UPDATE (1602, 'test 1602 updated') YES, because 1602 > 1000 and 'test 1602 updated' <> 'filtered'
+# - DELETE (1700) YES, because 1700 > 1000 and 'test 1700' <> 'filtered'
+#
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a, b FROM tab_rowfilter_1 ORDER BY 1, 2");
+is( $result, qq(1001|test 1001
+1002|test 1002
+1601|test 1601 updated
+1602|test 1602 updated
+1980|not filtered), 'check replicated rows to table tab_rowfilter_1');
+
+# Publish using root partitioned table
+# Use a different partitioned table layout (exercise publish_via_partition_root)
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub_3 SET (publish_via_partition_root = true)");
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub_3 SET TABLE tab_rowfilter_partitioned WHERE (a < 5000), tab_rowfilter_less_10k WHERE (a < 6000)"
+);
+$node_subscriber->safe_psql('postgres',
+ "TRUNCATE TABLE tab_rowfilter_partitioned");
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION WITH (copy_data = true)");
+
+# wait for table synchronization to finish
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_partitioned (a, b) VALUES(4000, 400),(4001, 401),(4002, 402)"
+);
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_less_10k (a, b) VALUES(4500, 450)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_less_10k (a, b) VALUES(5600, 123)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_rowfilter_greater_10k (a, b) VALUES(14000, 1950)");
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_rowfilter_less_10k SET b = 30 WHERE a = 4001");
+$node_publisher->safe_psql('postgres',
+ "DELETE FROM tab_rowfilter_less_10k WHERE a = 4002");
+
+$node_publisher->wait_for_catchup($appname);
+
+# Check expected replicated rows for partitions
+# publication option publish_via_partition_root is true so use the row filter
+# from the root partitioned table
+# tab_rowfilter_partitioned filter: (a < 5000)
+# tab_rowfilter_less_10k filter: (a < 6000)
+# tab_rowfilter_greater_10k filter: no filter
+#
+# After TRUNCATE, REFRESH PUBLICATION, the initial data copy will apply the
+# partitioned table row filter.
+# - INSERT (1, 100) YES, 1 < 5000
+# - INSERT (7000, 101) NO, 7000 is not < 5000
+# - INSERT (15000, 102) NO, 15000 is not < 5000
+# - INSERT (5500, 300) NO, 5500 is not < 5000
+# - INSERT (2, 200) YES, 2 < 5000
+# - INSERT (6005, 201) NO, 6005 is not < 5000
+# - INSERT (16000, 103) NO, 16000 is not < 5000
+#
+# Execute SQL commands after initial data copy for testing the logical
+# replication behavior.
+# - INSERT (4000, 400) YES, 4000 < 5000
+# - INSERT (4001, 401) YES, 4001 < 5000
+# - INSERT (4002, 402) YES, 4002 < 5000
+# - INSERT (4500, 450) YES, 4500 < 5000
+# - INSERT (5600, 123) NO, 5600 is not < 5000
+# - INSERT (14000, 1950) NO, 16000 is not < 5000
+# - UPDATE (4001) YES, 4001 < 5000
+# - DELETE (4002) YES, 4002 < 5000
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a, b FROM tab_rowfilter_partitioned ORDER BY 1, 2");
+is( $result, qq(1|100
+2|200
+4000|400
+4001|30
+4500|450), 'check publish_via_partition_root behavior');
+
+# Check expected replicated rows for tab_rowfilter_inherited and
+# tab_rowfilter_child.
+# tab_rowfilter_inherited filter is: (a > 15)
+# - INSERT (14) NO, 14 < 15
+# - INSERT (16) YES, 16 > 15
+#
+# tab_rowfilter_child filter is: (a > 15)
+# - INSERT (13, '13') NO, 13 < 15
+# - INSERT (17, '17') YES, 17 > 15
+
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a FROM tab_rowfilter_inherited ORDER BY a");
+is( $result, qq(16
+17
+20
+30
+40),
+ 'check replicated rows to tab_rowfilter_inherited and tab_rowfilter_child'
+);
+
+# UPDATE the non-toasted column for table tab_rowfilter_toast
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_rowfilter_toast SET b = '1'");
+
+$node_publisher->wait_for_catchup($appname);
+
+# Check expected replicated rows for tab_rowfilter_toast
+# tab_rowfilter_toast filter: (a = repeat('1234567890', 200) AND b < '10')
+# UPDATE old (repeat('1234567890', 200) ,'1234567890') NO
+# new: (repeat('1234567890', 200) ,'1') YES
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a = repeat('1234567890', 200), b FROM tab_rowfilter_toast");
+is($result, qq(t|1), 'check replicated rows to tab_rowfilter_toast');
+
+# Check expected replicated rows for tab_rowfilter_viaroot_part and
+# tab_rowfilter_viaroot_part_1. We should replicate only rows matching
+# the row filter for the top-level ancestor:
+#
+# tab_rowfilter_viaroot_part filter is: (a > 15)
+# - INSERT (14) NO, 14 < 15
+# - INSERT (15) NO, 15 = 15
+# - INSERT (16) YES, 16 > 15
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a FROM tab_rowfilter_viaroot_part");
+is($result, qq(16), 'check replicated rows to tab_rowfilter_viaroot_part');
+
+# Check there is no data in tab_rowfilter_viaroot_part_1 because rows are
+# replicated via the top most parent table tab_rowfilter_viaroot_part
+$result =
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a FROM tab_rowfilter_viaroot_part_1");
+is($result, qq(), 'check replicated rows to tab_rowfilter_viaroot_part_1');
+
+# Testcase end: FOR TABLE with row filter publications
+# ======================================================
+
+$node_subscriber->stop('fast');
+$node_publisher->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/029_on_error.pl b/src/test/subscription/t/029_on_error.pl
new file mode 100644
index 0000000..7d6fb66
--- /dev/null
+++ b/src/test/subscription/t/029_on_error.pl
@@ -0,0 +1,180 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Tests for disable_on_error and SKIP transaction features.
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+my $offset = 0;
+
+# Test skipping the transaction. This function must be called after the caller
+# has inserted data that conflicts with the subscriber. The finish LSN of the
+# error transaction that is used to specify to ALTER SUBSCRIPTION ... SKIP is
+# fetched from the server logs. After executing ALTER SUBSCRIPTION ... SKIP, we
+# check if logical replication can continue working by inserting $nonconflict_data
+# on the publisher.
+sub test_skip_lsn
+{
+ my ($node_publisher, $node_subscriber, $nonconflict_data, $expected, $msg)
+ = @_;
+
+ # Wait until a conflict occurs on the subscriber.
+ $node_subscriber->poll_query_until('postgres',
+ "SELECT subenabled = FALSE FROM pg_subscription WHERE subname = 'sub'"
+ );
+
+ # Get the finish LSN of the error transaction.
+ my $contents = slurp_file($node_subscriber->logfile, $offset);
+ $contents =~
+ qr/processing remote data for replication origin \"pg_\d+\" during message type "INSERT" for replication target relation "public.tbl" in transaction \d+, finished at ([[:xdigit:]]+\/[[:xdigit:]]+)/
+ or die "could not get error-LSN";
+ my $lsn = $1;
+
+ # Set skip lsn.
+ $node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION sub SKIP (lsn = '$lsn')");
+
+ # Re-enable the subscription.
+ $node_subscriber->safe_psql('postgres', "ALTER SUBSCRIPTION sub ENABLE");
+
+ # Wait for the failed transaction to be skipped
+ $node_subscriber->poll_query_until('postgres',
+ "SELECT subskiplsn = '0/0' FROM pg_subscription WHERE subname = 'sub'"
+ );
+
+ # Check the log to ensure that the transaction is skipped, and advance the
+ # offset of the log file for the next test.
+ $offset = $node_subscriber->wait_for_log(
+ qr/LOG: ( [A-Z0-9]+:)? logical replication completed skipping transaction at LSN $lsn/,
+ $offset);
+
+ # Insert non-conflict data
+ $node_publisher->safe_psql('postgres',
+ "INSERT INTO tbl VALUES $nonconflict_data");
+
+ $node_publisher->wait_for_catchup('sub');
+
+ # Check replicated data
+ my $res =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tbl");
+ is($res, $expected, $msg);
+}
+
+# Create publisher node. Set a low value of logical_decoding_work_mem to test
+# streaming cases.
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->append_conf(
+ 'postgresql.conf',
+ qq[
+logical_decoding_work_mem = 64kB
+max_prepared_transactions = 10
+]);
+$node_publisher->start;
+
+# Create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init;
+$node_subscriber->append_conf(
+ 'postgresql.conf',
+ qq[
+max_prepared_transactions = 10
+]);
+$node_subscriber->start;
+
+# Initial table setup on both publisher and subscriber. On the subscriber, we
+# create the same tables but with a primary key. Also, insert some data that
+# will conflict with the data replicated from publisher later.
+$node_publisher->safe_psql(
+ 'postgres',
+ qq[
+CREATE TABLE tbl (i INT, t TEXT);
+INSERT INTO tbl VALUES (1, NULL);
+]);
+$node_subscriber->safe_psql(
+ 'postgres',
+ qq[
+CREATE TABLE tbl (i INT PRIMARY KEY, t TEXT);
+INSERT INTO tbl VALUES (1, NULL);
+]);
+
+# Create a pub/sub to set up logical replication. This tests that the
+# uniqueness violation will cause the subscription to fail during initial
+# synchronization and make it disabled.
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION pub FOR TABLE tbl");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION sub CONNECTION '$publisher_connstr' PUBLICATION pub WITH (disable_on_error = true, streaming = on, two_phase = on)"
+);
+
+# Initial synchronization failure causes the subscription to be disabled.
+$node_subscriber->poll_query_until('postgres',
+ "SELECT subenabled = false FROM pg_catalog.pg_subscription WHERE subname = 'sub'"
+) or die "Timed out while waiting for subscriber to be disabled";
+
+# Truncate the table on the subscriber which caused the subscription to be
+# disabled.
+$node_subscriber->safe_psql('postgres', "TRUNCATE tbl");
+
+# Re-enable the subscription "sub".
+$node_subscriber->safe_psql('postgres', "ALTER SUBSCRIPTION sub ENABLE");
+
+# Wait for the data to replicate.
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'sub');
+
+# Confirm that we have finished the table sync.
+my $result =
+ $node_subscriber->safe_psql('postgres', "SELECT COUNT(*) FROM tbl");
+is($result, qq(1), "subscription sub replicated data");
+
+# Insert data to tbl, raising an error on the subscriber due to violation
+# of the unique constraint on tbl. Then skip the transaction.
+$node_publisher->safe_psql(
+ 'postgres',
+ qq[
+BEGIN;
+INSERT INTO tbl VALUES (1, NULL);
+COMMIT;
+]);
+test_skip_lsn($node_publisher, $node_subscriber,
+ "(2, NULL)", "2", "test skipping transaction");
+
+# Test for PREPARE and COMMIT PREPARED. Insert the same data to tbl and
+# PREPARE the transaction, raising an error. Then skip the transaction.
+$node_publisher->safe_psql(
+ 'postgres',
+ qq[
+BEGIN;
+INSERT INTO tbl VALUES (1, NULL);
+PREPARE TRANSACTION 'gtx';
+COMMIT PREPARED 'gtx';
+]);
+test_skip_lsn($node_publisher, $node_subscriber,
+ "(3, NULL)", "3", "test skipping prepare and commit prepared ");
+
+# Test for STREAM COMMIT. Insert enough rows to tbl to exceed the 64kB
+# limit, also raising an error on the subscriber during applying spooled
+# changes for the same reason. Then skip the transaction.
+$node_publisher->safe_psql(
+ 'postgres',
+ qq[
+BEGIN;
+INSERT INTO tbl SELECT i, md5(i::text) FROM generate_series(1, 10000) s(i);
+COMMIT;
+]);
+test_skip_lsn($node_publisher, $node_subscriber, "(4, md5(4::text))",
+ "4", "test skipping stream-commit");
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT COUNT(*) FROM pg_prepared_xacts");
+is($result, "0",
+ "check all prepared transactions are resolved on the subscriber");
+
+$node_subscriber->stop;
+$node_publisher->stop;
+
+done_testing();
diff --git a/src/test/subscription/t/030_origin.pl b/src/test/subscription/t/030_origin.pl
new file mode 100644
index 0000000..9ca1fa2
--- /dev/null
+++ b/src/test/subscription/t/030_origin.pl
@@ -0,0 +1,216 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test the CREATE SUBSCRIPTION 'origin' parameter and its interaction with
+# 'copy_data' parameter.
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+my $subname_AB = 'tap_sub_A_B';
+my $subname_AB2 = 'tap_sub_A_B_2';
+my $subname_BA = 'tap_sub_B_A';
+my $subname_BC = 'tap_sub_B_C';
+
+my $result;
+my $stdout;
+my $stderr;
+
+###############################################################################
+# Setup a bidirectional logical replication between node_A & node_B
+###############################################################################
+
+# Initialize nodes
+# node_A
+my $node_A = PostgreSQL::Test::Cluster->new('node_A');
+$node_A->init(allows_streaming => 'logical');
+$node_A->start;
+# node_B
+my $node_B = PostgreSQL::Test::Cluster->new('node_B');
+$node_B->init(allows_streaming => 'logical');
+$node_B->start;
+
+# Create table on node_A
+$node_A->safe_psql('postgres', "CREATE TABLE tab (a int PRIMARY KEY)");
+
+# Create the same table on node_B
+$node_B->safe_psql('postgres', "CREATE TABLE tab (a int PRIMARY KEY)");
+
+# Setup logical replication
+# node_A (pub) -> node_B (sub)
+my $node_A_connstr = $node_A->connstr . ' dbname=postgres';
+$node_A->safe_psql('postgres', "CREATE PUBLICATION tap_pub_A FOR TABLE tab");
+$node_B->safe_psql(
+ 'postgres', "
+ CREATE SUBSCRIPTION $subname_BA
+ CONNECTION '$node_A_connstr application_name=$subname_BA'
+ PUBLICATION tap_pub_A
+ WITH (origin = none)");
+
+# node_B (pub) -> node_A (sub)
+my $node_B_connstr = $node_B->connstr . ' dbname=postgres';
+$node_B->safe_psql('postgres', "CREATE PUBLICATION tap_pub_B FOR TABLE tab");
+$node_A->safe_psql(
+ 'postgres', "
+ CREATE SUBSCRIPTION $subname_AB
+ CONNECTION '$node_B_connstr application_name=$subname_AB'
+ PUBLICATION tap_pub_B
+ WITH (origin = none, copy_data = off)");
+
+# Wait for initial table sync to finish
+$node_A->wait_for_subscription_sync($node_B, $subname_AB);
+$node_B->wait_for_subscription_sync($node_A, $subname_BA);
+
+is(1, 1, 'Bidirectional replication setup is complete');
+
+###############################################################################
+# Check that bidirectional logical replication setup does not cause infinite
+# recursive insertion.
+###############################################################################
+
+# insert a record
+$node_A->safe_psql('postgres', "INSERT INTO tab VALUES (11);");
+$node_B->safe_psql('postgres', "INSERT INTO tab VALUES (21);");
+
+$node_A->wait_for_catchup($subname_BA);
+$node_B->wait_for_catchup($subname_AB);
+
+# check that transaction was committed on subscriber(s)
+$result = $node_A->safe_psql('postgres', "SELECT * FROM tab ORDER BY 1;");
+is( $result, qq(11
+21),
+ 'Inserted successfully without leading to infinite recursion in bidirectional replication setup'
+);
+$result = $node_B->safe_psql('postgres', "SELECT * FROM tab ORDER BY 1;");
+is( $result, qq(11
+21),
+ 'Inserted successfully without leading to infinite recursion in bidirectional replication setup'
+);
+
+$node_A->safe_psql('postgres', "DELETE FROM tab;");
+
+$node_A->wait_for_catchup($subname_BA);
+$node_B->wait_for_catchup($subname_AB);
+
+###############################################################################
+# Check that remote data of node_B (that originated from node_C) is not
+# published to node_A.
+###############################################################################
+$result = $node_A->safe_psql('postgres', "SELECT * FROM tab ORDER BY 1;");
+is($result, qq(), 'Check existing data');
+
+$result = $node_B->safe_psql('postgres', "SELECT * FROM tab ORDER BY 1;");
+is($result, qq(), 'Check existing data');
+
+# Initialize node node_C
+my $node_C = PostgreSQL::Test::Cluster->new('node_C');
+$node_C->init(allows_streaming => 'logical');
+$node_C->start;
+
+$node_C->safe_psql('postgres', "CREATE TABLE tab (a int PRIMARY KEY)");
+
+# Setup logical replication
+# node_C (pub) -> node_B (sub)
+my $node_C_connstr = $node_C->connstr . ' dbname=postgres';
+$node_C->safe_psql('postgres', "CREATE PUBLICATION tap_pub_C FOR TABLE tab");
+$node_B->safe_psql(
+ 'postgres', "
+ CREATE SUBSCRIPTION $subname_BC
+ CONNECTION '$node_C_connstr application_name=$subname_BC'
+ PUBLICATION tap_pub_C
+ WITH (origin = none)");
+$node_B->wait_for_subscription_sync($node_C, $subname_BC);
+
+# insert a record
+$node_C->safe_psql('postgres', "INSERT INTO tab VALUES (32);");
+
+$node_C->wait_for_catchup($subname_BC);
+$node_B->wait_for_catchup($subname_AB);
+$node_A->wait_for_catchup($subname_BA);
+
+$result = $node_B->safe_psql('postgres', "SELECT * FROM tab ORDER BY 1;");
+is($result, qq(32), 'The node_C data replicated to node_B');
+
+# check that the data published from node_C to node_B is not sent to node_A
+$result = $node_A->safe_psql('postgres', "SELECT * FROM tab ORDER BY 1;");
+is($result, qq(),
+ 'Remote data originating from another node (not the publisher) is not replicated when origin parameter is none'
+);
+
+###############################################################################
+# Specifying origin = NONE indicates that the publisher should only replicate the
+# changes that are generated locally from node_B, but in this case since the
+# node_B is also subscribing data from node_A, node_B can have remotely
+# originated data from node_A. We log a warning, in this case, to draw
+# attention to there being possible remote data.
+###############################################################################
+($result, $stdout, $stderr) = $node_A->psql(
+ 'postgres', "
+ CREATE SUBSCRIPTION $subname_AB2
+ CONNECTION '$node_B_connstr application_name=$subname_AB2'
+ PUBLICATION tap_pub_B
+ WITH (origin = none, copy_data = on)");
+like(
+ $stderr,
+ qr/WARNING: ( [A-Z0-9]+:)? subscription "tap_sub_a_b_2" requested copy_data with origin = NONE but might copy data that had a different origin/,
+ "Create subscription with origin = none and copy_data when the publisher has subscribed same table"
+);
+
+$node_A->wait_for_subscription_sync($node_B, $subname_AB2);
+
+# Alter subscription ... refresh publication should be successful when no new
+# table is added
+$node_A->safe_psql(
+ 'postgres', "
+ ALTER SUBSCRIPTION $subname_AB2 REFRESH PUBLICATION");
+
+# Check Alter subscription ... refresh publication when there is a new
+# table that is subscribing data from a different publication
+$node_A->safe_psql('postgres', "CREATE TABLE tab_new (a int PRIMARY KEY)");
+$node_B->safe_psql('postgres', "CREATE TABLE tab_new (a int PRIMARY KEY)");
+
+# add a new table to the publication
+$node_A->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub_A ADD TABLE tab_new");
+$node_B->safe_psql(
+ 'postgres', "
+ ALTER SUBSCRIPTION $subname_BA REFRESH PUBLICATION");
+
+$node_B->wait_for_subscription_sync($node_A, $subname_BA);
+
+# add a new table to the publication
+$node_B->safe_psql('postgres',
+ "ALTER PUBLICATION tap_pub_B ADD TABLE tab_new");
+
+# Alter subscription ... refresh publication should log a warning when a new
+# table on the publisher is subscribing data from a different publication
+($result, $stdout, $stderr) = $node_A->psql(
+ 'postgres', "
+ ALTER SUBSCRIPTION $subname_AB2 REFRESH PUBLICATION");
+like(
+ $stderr,
+ qr/WARNING: ( [A-Z0-9]+:)? subscription "tap_sub_a_b_2" requested copy_data with origin = NONE but might copy data that had a different origin/,
+ "Refresh publication when the publisher has subscribed for the new table, but the subscriber-side wants origin = none"
+);
+
+# Ensure that relation has reached 'ready' state before we try to drop it
+my $synced_query =
+ "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r');";
+$node_A->poll_query_until('postgres', $synced_query)
+ or die "Timed out while waiting for subscriber to synchronize data";
+
+$node_B->wait_for_catchup($subname_AB2);
+
+# clear the operations done by this test
+$node_A->safe_psql('postgres', "DROP TABLE tab_new");
+$node_B->safe_psql('postgres', "DROP TABLE tab_new");
+$node_A->safe_psql('postgres', "DROP SUBSCRIPTION $subname_AB2");
+
+# shutdown
+$node_B->stop('fast');
+$node_A->stop('fast');
+$node_C->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/031_column_list.pl b/src/test/subscription/t/031_column_list.pl
new file mode 100644
index 0000000..dbff806
--- /dev/null
+++ b/src/test/subscription/t/031_column_list.pl
@@ -0,0 +1,1293 @@
+# Copyright (c) 2022-2023, PostgreSQL Global Development Group
+
+# Test partial-column publication of tables
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# create publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+# create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->append_conf('postgresql.conf',
+ qq(max_logical_replication_workers = 6));
+$node_subscriber->start;
+
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+my $offset = 0;
+
+# setup tables on both nodes
+
+# tab1: simple 1:1 replication
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE tab1 (a int PRIMARY KEY, "B" int, c int)
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE tab1 (a int PRIMARY KEY, "B" int, c int)
+));
+
+# tab2: replication from regular to table with fewer columns
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE tab2 (a int PRIMARY KEY, b varchar, c int);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE tab2 (a int PRIMARY KEY, b varchar)
+));
+
+# tab3: simple 1:1 replication with weird column names
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE tab3 ("a'" int PRIMARY KEY, "B" varchar, "c'" int)
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE tab3 ("a'" int PRIMARY KEY, "c'" int)
+));
+
+# test_part: partitioned tables, with partitioning (including multi-level
+# partitioning, and fewer columns on the subscriber)
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_part (a int PRIMARY KEY, b text, c timestamptz) PARTITION BY LIST (a);
+ CREATE TABLE test_part_1_1 PARTITION OF test_part FOR VALUES IN (1,2,3,4,5,6);
+ CREATE TABLE test_part_2_1 PARTITION OF test_part FOR VALUES IN (7,8,9,10,11,12) PARTITION BY LIST (a);
+ CREATE TABLE test_part_2_2 PARTITION OF test_part_2_1 FOR VALUES IN (7,8,9,10);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_part (a int PRIMARY KEY, b text) PARTITION BY LIST (a);
+ CREATE TABLE test_part_1_1 PARTITION OF test_part FOR VALUES IN (1,2,3,4,5,6);
+ CREATE TABLE test_part_2_1 PARTITION OF test_part FOR VALUES IN (7,8,9,10,11,12) PARTITION BY LIST (a);
+ CREATE TABLE test_part_2_2 PARTITION OF test_part_2_1 FOR VALUES IN (7,8,9,10);
+));
+
+# tab4: table with user-defined enum types
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TYPE test_typ AS ENUM ('blue', 'red');
+ CREATE TABLE tab4 (a INT PRIMARY KEY, b test_typ, c int, d text);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TYPE test_typ AS ENUM ('blue', 'red');
+ CREATE TABLE tab4 (a INT PRIMARY KEY, b test_typ, d text);
+));
+
+
+# TEST: create publication and subscription for some of the tables with
+# column lists
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE PUBLICATION pub1
+ FOR TABLE tab1 (a, "B"), tab3 ("a'", "c'"), test_part (a, b), tab4 (a, b, d)
+ WITH (publish_via_partition_root = 'true');
+));
+
+# check that we got the right prattrs values for the publication in the
+# pg_publication_rel catalog (order by relname, to get stable ordering)
+my $result = $node_publisher->safe_psql(
+ 'postgres', qq(
+ SELECT relname, prattrs
+ FROM pg_publication_rel pb JOIN pg_class pc ON(pb.prrelid = pc.oid)
+ ORDER BY relname
+));
+
+is( $result, qq(tab1|1 2
+tab3|1 3
+tab4|1 2 4
+test_part|1 2), 'publication relation updated');
+
+# TEST: insert data into the tables, create subscription and see if sync
+# replicates the right columns
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO tab1 VALUES (1, 2, 3);
+ INSERT INTO tab1 VALUES (4, 5, 6);
+));
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO tab3 VALUES (1, 2, 3);
+ INSERT INTO tab3 VALUES (4, 5, 6);
+));
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO tab4 VALUES (1, 'red', 3, 'oh my');
+ INSERT INTO tab4 VALUES (2, 'blue', 4, 'hello');
+));
+
+# replication of partitioned table
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO test_part VALUES (1, 'abc', '2021-07-04 12:00:00');
+ INSERT INTO test_part VALUES (2, 'bcd', '2021-07-03 11:12:13');
+ INSERT INTO test_part VALUES (7, 'abc', '2021-07-04 12:00:00');
+ INSERT INTO test_part VALUES (8, 'bcd', '2021-07-03 11:12:13');
+));
+
+# create subscription for the publication, wait for sync to complete,
+# then check the sync results
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+# tab1: only (a,b) is replicated
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab1 ORDER BY a");
+is( $result, qq(1|2|
+4|5|), 'insert on column tab1.c is not replicated');
+
+# tab3: only (a,c) is replicated
+$result = $node_subscriber->safe_psql('postgres',
+ qq(SELECT * FROM tab3 ORDER BY "a'"));
+is( $result, qq(1|3
+4|6), 'insert on column tab3.b is not replicated');
+
+# tab4: only (a,b,d) is replicated
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab4 ORDER BY a");
+is( $result, qq(1|red|oh my
+2|blue|hello), 'insert on column tab4.c is not replicated');
+
+# test_part: (a,b) is replicated
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT * FROM test_part ORDER BY a");
+is( $result, qq(1|abc
+2|bcd
+7|abc
+8|bcd), 'insert on column test_part.c columns is not replicated');
+
+
+# TEST: now insert more data into the tables, and wait until we replicate
+# them (not by tablesync, but regular decoding and replication)
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO tab1 VALUES (2, 3, 4);
+ INSERT INTO tab1 VALUES (5, 6, 7);
+));
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO tab3 VALUES (2, 3, 4);
+ INSERT INTO tab3 VALUES (5, 6, 7);
+));
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO tab4 VALUES (3, 'red', 5, 'foo');
+ INSERT INTO tab4 VALUES (4, 'blue', 6, 'bar');
+));
+
+# replication of partitioned table
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO test_part VALUES (3, 'xxx', '2022-02-01 10:00:00');
+ INSERT INTO test_part VALUES (4, 'yyy', '2022-03-02 15:12:13');
+ INSERT INTO test_part VALUES (9, 'zzz', '2022-04-03 21:00:00');
+ INSERT INTO test_part VALUES (10, 'qqq', '2022-05-04 22:12:13');
+));
+
+# wait for catchup before checking the subscriber
+$node_publisher->wait_for_catchup('sub1');
+
+# tab1: only (a,b) is replicated
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab1 ORDER BY a");
+is( $result, qq(1|2|
+2|3|
+4|5|
+5|6|), 'insert on column tab1.c is not replicated');
+
+# tab3: only (a,c) is replicated
+$result = $node_subscriber->safe_psql('postgres',
+ qq(SELECT * FROM tab3 ORDER BY "a'"));
+is( $result, qq(1|3
+2|4
+4|6
+5|7), 'insert on column tab3.b is not replicated');
+
+# tab4: only (a,b,d) is replicated
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab4 ORDER BY a");
+is( $result, qq(1|red|oh my
+2|blue|hello
+3|red|foo
+4|blue|bar), 'insert on column tab4.c is not replicated');
+
+# test_part: (a,b) is replicated
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT * FROM test_part ORDER BY a");
+is( $result, qq(1|abc
+2|bcd
+3|xxx
+4|yyy
+7|abc
+8|bcd
+9|zzz
+10|qqq), 'insert on column test_part.c columns is not replicated');
+
+
+# TEST: do some updates on some of the tables, both on columns included
+# in the column list and other
+
+# tab1: update of replicated column
+$node_publisher->safe_psql('postgres',
+ qq(UPDATE tab1 SET "B" = 2 * "B" where a = 1));
+
+# tab1: update of non-replicated column
+$node_publisher->safe_psql('postgres',
+ qq(UPDATE tab1 SET c = 2*c where a = 4));
+
+# tab3: update of non-replicated
+$node_publisher->safe_psql('postgres',
+ qq(UPDATE tab3 SET "B" = "B" || ' updated' where "a'" = 4));
+
+# tab3: update of replicated column
+$node_publisher->safe_psql('postgres',
+ qq(UPDATE tab3 SET "c'" = 2 * "c'" where "a'" = 1));
+
+# tab4
+$node_publisher->safe_psql('postgres',
+ qq(UPDATE tab4 SET b = 'blue', c = c * 2, d = d || ' updated' where a = 1)
+);
+
+# tab4
+$node_publisher->safe_psql('postgres',
+ qq(UPDATE tab4 SET b = 'red', c = c * 2, d = d || ' updated' where a = 2)
+);
+
+# wait for the replication to catch up, and check the UPDATE results got
+# replicated correctly, with the right column list
+$node_publisher->wait_for_catchup('sub1');
+
+$result =
+ $node_subscriber->safe_psql('postgres', qq(SELECT * FROM tab1 ORDER BY a));
+is( $result,
+ qq(1|4|
+2|3|
+4|5|
+5|6|), 'only update on column tab1.b is replicated');
+
+$result = $node_subscriber->safe_psql('postgres',
+ qq(SELECT * FROM tab3 ORDER BY "a'"));
+is( $result,
+ qq(1|6
+2|4
+4|6
+5|7), 'only update on column tab3.c is replicated');
+
+$result =
+ $node_subscriber->safe_psql('postgres', qq(SELECT * FROM tab4 ORDER BY a));
+
+is( $result, qq(1|blue|oh my updated
+2|red|hello updated
+3|red|foo
+4|blue|bar), 'update on column tab4.c is not replicated');
+
+
+# TEST: add table with a column list, insert data, replicate
+
+# insert some data before adding it to the publication
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO tab2 VALUES (1, 'abc', 3);
+));
+
+$node_publisher->safe_psql('postgres',
+ "ALTER PUBLICATION pub1 ADD TABLE tab2 (a, b)");
+
+$node_subscriber->safe_psql('postgres',
+ "ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION");
+
+# wait for the tablesync to complete, add a bit more data and then check
+# the results of the replication
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO tab2 VALUES (2, 'def', 6);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab2 ORDER BY a");
+is( $result, qq(1|abc
+2|def), 'insert on column tab2.c is not replicated');
+
+# do a couple updates, check the correct stuff gets replicated
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ UPDATE tab2 SET c = 5 where a = 1;
+ UPDATE tab2 SET b = 'xyz' where a = 2;
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab2 ORDER BY a");
+is( $result, qq(1|abc
+2|xyz), 'update on column tab2.c is not replicated');
+
+
+# TEST: add a table to two publications with same column lists, and
+# create a single subscription replicating both publications
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE tab5 (a int PRIMARY KEY, b int, c int, d int);
+ CREATE PUBLICATION pub2 FOR TABLE tab5 (a, b);
+ CREATE PUBLICATION pub3 FOR TABLE tab5 (a, b);
+
+ -- insert a couple initial records
+ INSERT INTO tab5 VALUES (1, 11, 111, 1111);
+ INSERT INTO tab5 VALUES (2, 22, 222, 2222);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE tab5 (a int PRIMARY KEY, b int, d int);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ ALTER SUBSCRIPTION sub1 SET PUBLICATION pub2, pub3
+));
+
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'sub1');
+
+# insert data and make sure the columns in column list get fully replicated
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO tab5 VALUES (3, 33, 333, 3333);
+ INSERT INTO tab5 VALUES (4, 44, 444, 4444);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab5 ORDER BY a"),
+ qq(1|11|
+2|22|
+3|33|
+4|44|),
+ 'overlapping publications with overlapping column lists');
+
+
+# TEST: create a table with a column list, then change the replica
+# identity by replacing a primary key (but use a different column in
+# the column list)
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE tab6 (a int PRIMARY KEY, b int, c int, d int);
+ CREATE PUBLICATION pub4 FOR TABLE tab6 (a, b);
+
+ -- initial data
+ INSERT INTO tab6 VALUES (1, 22, 333, 4444);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE tab6 (a int PRIMARY KEY, b int, c int, d int);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ ALTER SUBSCRIPTION sub1 SET PUBLICATION pub4
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO tab6 VALUES (2, 33, 444, 5555);
+ UPDATE tab6 SET b = b * 2, c = c * 3, d = d * 4;
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab6 ORDER BY a"),
+ qq(1|44||
+2|66||), 'replication with the original primary key');
+
+# now redefine the constraint - move the primary key to a different column
+# (which is still covered by the column list, though)
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ ALTER TABLE tab6 DROP CONSTRAINT tab6_pkey;
+ ALTER TABLE tab6 ADD PRIMARY KEY (b);
+));
+
+# we need to do the same thing on the subscriber
+# XXX What would happen if this happens before the publisher ALTER? Or
+# interleaved, somehow? But that seems unrelated to column lists.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ ALTER TABLE tab6 DROP CONSTRAINT tab6_pkey;
+ ALTER TABLE tab6 ADD PRIMARY KEY (b);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO tab6 VALUES (3, 55, 666, 8888);
+ UPDATE tab6 SET b = b * 2, c = c * 3, d = d * 4;
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab6 ORDER BY a"),
+ qq(1|88||
+2|132||
+3|110||),
+ 'replication with the modified primary key');
+
+
+# TEST: create a table with a column list, then change the replica
+# identity by replacing a primary key with a key on multiple columns
+# (all of them covered by the column list)
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE tab7 (a int PRIMARY KEY, b int, c int, d int);
+ CREATE PUBLICATION pub5 FOR TABLE tab7 (a, b);
+
+ -- some initial data
+ INSERT INTO tab7 VALUES (1, 22, 333, 4444);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE tab7 (a int PRIMARY KEY, b int, c int, d int);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ ALTER SUBSCRIPTION sub1 SET PUBLICATION pub5
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO tab7 VALUES (2, 33, 444, 5555);
+ UPDATE tab7 SET b = b * 2, c = c * 3, d = d * 4;
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab7 ORDER BY a"),
+ qq(1|44||
+2|66||), 'replication with the original primary key');
+
+# now redefine the constraint - move the primary key to a different column
+# (which is not covered by the column list)
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ ALTER TABLE tab7 DROP CONSTRAINT tab7_pkey;
+ ALTER TABLE tab7 ADD PRIMARY KEY (a, b);
+));
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO tab7 VALUES (3, 55, 666, 7777);
+ UPDATE tab7 SET b = b * 2, c = c * 3, d = d * 4;
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab7 ORDER BY a"),
+ qq(1|88||
+2|132||
+3|110||),
+ 'replication with the modified primary key');
+
+# now switch the primary key again to another columns not covered by the
+# column list, but also generate writes between the drop and creation
+# of the new constraint
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ ALTER TABLE tab7 DROP CONSTRAINT tab7_pkey;
+ INSERT INTO tab7 VALUES (4, 77, 888, 9999);
+ -- update/delete is not allowed for tables without RI
+ ALTER TABLE tab7 ADD PRIMARY KEY (b, a);
+ UPDATE tab7 SET b = b * 2, c = c * 3, d = d * 4;
+ DELETE FROM tab7 WHERE a = 1;
+));
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab7 ORDER BY a"),
+ qq(2|264||
+3|220||
+4|154||),
+ 'replication with the modified primary key');
+
+
+# TEST: partitioned tables (with publish_via_partition_root = false)
+# and replica identity. The (leaf) partitions may have different RI, so
+# we need to check the partition RI (with respect to the column list)
+# while attaching the partition.
+
+# First, let's create a partitioned table with two partitions, each with
+# a different RI, but a column list not covering all those RI.
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_part_a (a int, b int, c int) PARTITION BY LIST (a);
+
+ CREATE TABLE test_part_a_1 PARTITION OF test_part_a FOR VALUES IN (1,2,3,4,5);
+ ALTER TABLE test_part_a_1 ADD PRIMARY KEY (a);
+ ALTER TABLE test_part_a_1 REPLICA IDENTITY USING INDEX test_part_a_1_pkey;
+
+ CREATE TABLE test_part_a_2 PARTITION OF test_part_a FOR VALUES IN (6,7,8,9,10);
+ ALTER TABLE test_part_a_2 ADD PRIMARY KEY (b);
+ ALTER TABLE test_part_a_2 REPLICA IDENTITY USING INDEX test_part_a_2_pkey;
+
+ -- initial data, one row in each partition
+ INSERT INTO test_part_a VALUES (1, 3);
+ INSERT INTO test_part_a VALUES (6, 4);
+));
+
+# do the same thing on the subscriber (with the opposite column order)
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_part_a (b int, a int) PARTITION BY LIST (a);
+
+ CREATE TABLE test_part_a_1 PARTITION OF test_part_a FOR VALUES IN (1,2,3,4,5);
+ ALTER TABLE test_part_a_1 ADD PRIMARY KEY (a);
+ ALTER TABLE test_part_a_1 REPLICA IDENTITY USING INDEX test_part_a_1_pkey;
+
+ CREATE TABLE test_part_a_2 PARTITION OF test_part_a FOR VALUES IN (6,7,8,9,10);
+ ALTER TABLE test_part_a_2 ADD PRIMARY KEY (b);
+ ALTER TABLE test_part_a_2 REPLICA IDENTITY USING INDEX test_part_a_2_pkey;
+));
+
+# create a publication replicating just the column "a", which is not enough
+# for the second partition
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE PUBLICATION pub6 FOR TABLE test_part_a (b, a) WITH (publish_via_partition_root = true);
+ ALTER PUBLICATION pub6 ADD TABLE test_part_a_1 (a);
+ ALTER PUBLICATION pub6 ADD TABLE test_part_a_2 (b);
+));
+
+# add the publication to our subscription, wait for sync to complete
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ ALTER SUBSCRIPTION sub1 SET PUBLICATION pub6
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO test_part_a VALUES (2, 5);
+ INSERT INTO test_part_a VALUES (7, 6);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT a, b FROM test_part_a ORDER BY a, b"),
+ qq(1|3
+2|5
+6|4
+7|6),
+ 'partitions with different replica identities not replicated correctly');
+
+# This time start with a column list covering RI for all partitions, but
+# then update the column list to not cover column "b" (needed by the
+# second partition)
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_part_b (a int, b int) PARTITION BY LIST (a);
+
+ CREATE TABLE test_part_b_1 PARTITION OF test_part_b FOR VALUES IN (1,2,3,4,5);
+ ALTER TABLE test_part_b_1 ADD PRIMARY KEY (a);
+ ALTER TABLE test_part_b_1 REPLICA IDENTITY USING INDEX test_part_b_1_pkey;
+
+ CREATE TABLE test_part_b_2 PARTITION OF test_part_b FOR VALUES IN (6,7,8,9,10);
+ ALTER TABLE test_part_b_2 ADD PRIMARY KEY (b);
+ ALTER TABLE test_part_b_2 REPLICA IDENTITY USING INDEX test_part_b_2_pkey;
+
+ -- initial data, one row in each partitions
+ INSERT INTO test_part_b VALUES (1, 1);
+ INSERT INTO test_part_b VALUES (6, 2);
+));
+
+# do the same thing on the subscriber
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_part_b (a int, b int) PARTITION BY LIST (a);
+
+ CREATE TABLE test_part_b_1 PARTITION OF test_part_b FOR VALUES IN (1,2,3,4,5);
+ ALTER TABLE test_part_b_1 ADD PRIMARY KEY (a);
+ ALTER TABLE test_part_b_1 REPLICA IDENTITY USING INDEX test_part_b_1_pkey;
+
+ CREATE TABLE test_part_b_2 PARTITION OF test_part_b FOR VALUES IN (6,7,8,9,10);
+ ALTER TABLE test_part_b_2 ADD PRIMARY KEY (b);
+ ALTER TABLE test_part_b_2 REPLICA IDENTITY USING INDEX test_part_b_2_pkey;
+));
+
+# create a publication replicating both columns, which is sufficient for
+# both partitions
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE PUBLICATION pub7 FOR TABLE test_part_b (a, b) WITH (publish_via_partition_root = true);
+));
+
+# add the publication to our subscription, wait for sync to complete
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ ALTER SUBSCRIPTION sub1 SET PUBLICATION pub7
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO test_part_b VALUES (2, 3);
+ INSERT INTO test_part_b VALUES (7, 4);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_part_b ORDER BY a, b"),
+ qq(1|1
+2|3
+6|2
+7|4),
+ 'partitions with different replica identities not replicated correctly');
+
+
+# TEST: This time start with a column list covering RI for all partitions,
+# but then update RI for one of the partitions to not be covered by the
+# column list anymore.
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_part_c (a int, b int, c int) PARTITION BY LIST (a);
+
+ CREATE TABLE test_part_c_1 PARTITION OF test_part_c FOR VALUES IN (1,3);
+ ALTER TABLE test_part_c_1 ADD PRIMARY KEY (a);
+ ALTER TABLE test_part_c_1 REPLICA IDENTITY USING INDEX test_part_c_1_pkey;
+
+ CREATE TABLE test_part_c_2 PARTITION OF test_part_c FOR VALUES IN (2,4);
+ ALTER TABLE test_part_c_2 ADD PRIMARY KEY (b);
+ ALTER TABLE test_part_c_2 REPLICA IDENTITY USING INDEX test_part_c_2_pkey;
+
+ -- initial data, one row for each partition
+ INSERT INTO test_part_c VALUES (1, 3, 5);
+ INSERT INTO test_part_c VALUES (2, 4, 6);
+));
+
+# do the same thing on the subscriber
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_part_c (a int, b int, c int) PARTITION BY LIST (a);
+
+ CREATE TABLE test_part_c_1 PARTITION OF test_part_c FOR VALUES IN (1,3);
+ ALTER TABLE test_part_c_1 ADD PRIMARY KEY (a);
+ ALTER TABLE test_part_c_1 REPLICA IDENTITY USING INDEX test_part_c_1_pkey;
+
+ CREATE TABLE test_part_c_2 PARTITION OF test_part_c FOR VALUES IN (2,4);
+ ALTER TABLE test_part_c_2 ADD PRIMARY KEY (b);
+ ALTER TABLE test_part_c_2 REPLICA IDENTITY USING INDEX test_part_c_2_pkey;
+));
+
+# create a publication replicating data through partition root, with a column
+# list on the root, and then add the partitions one by one with separate
+# column lists (but those are not applied)
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE PUBLICATION pub8 FOR TABLE test_part_c WITH (publish_via_partition_root = false);
+ ALTER PUBLICATION pub8 ADD TABLE test_part_c_1 (a,c);
+ ALTER PUBLICATION pub8 ADD TABLE test_part_c_2 (a,b);
+));
+
+# add the publication to our subscription, wait for sync to complete
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ DROP SUBSCRIPTION sub1;
+ CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub8;
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO test_part_c VALUES (3, 7, 8);
+ INSERT INTO test_part_c VALUES (4, 9, 10);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_part_c ORDER BY a, b"),
+ qq(1||5
+2|4|
+3||8
+4|9|),
+ 'partitions with different replica identities not replicated correctly');
+
+
+# create a publication not replicating data through partition root, without
+# a column list on the root, and then add the partitions one by one with
+# separate column lists
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ DROP PUBLICATION pub8;
+ CREATE PUBLICATION pub8 FOR TABLE test_part_c WITH (publish_via_partition_root = false);
+ ALTER PUBLICATION pub8 ADD TABLE test_part_c_1 (a);
+ ALTER PUBLICATION pub8 ADD TABLE test_part_c_2 (a,b);
+));
+
+# add the publication to our subscription, wait for sync to complete
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
+ TRUNCATE test_part_c;
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ TRUNCATE test_part_c;
+ INSERT INTO test_part_c VALUES (1, 3, 5);
+ INSERT INTO test_part_c VALUES (2, 4, 6);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_part_c ORDER BY a, b"),
+ qq(1||
+2|4|),
+ 'partitions with different replica identities not replicated correctly');
+
+
+# TEST: Start with a single partition, with RI compatible with the column
+# list, and then attach a partition with incompatible RI.
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_part_d (a int, b int) PARTITION BY LIST (a);
+
+ CREATE TABLE test_part_d_1 PARTITION OF test_part_d FOR VALUES IN (1,3);
+ ALTER TABLE test_part_d_1 ADD PRIMARY KEY (a);
+ ALTER TABLE test_part_d_1 REPLICA IDENTITY USING INDEX test_part_d_1_pkey;
+
+ INSERT INTO test_part_d VALUES (1, 2);
+));
+
+# do the same thing on the subscriber (in fact, create both partitions right
+# away, no need to delay that)
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_part_d (a int, b int) PARTITION BY LIST (a);
+
+ CREATE TABLE test_part_d_1 PARTITION OF test_part_d FOR VALUES IN (1,3);
+ ALTER TABLE test_part_d_1 ADD PRIMARY KEY (a);
+ ALTER TABLE test_part_d_1 REPLICA IDENTITY USING INDEX test_part_d_1_pkey;
+
+ CREATE TABLE test_part_d_2 PARTITION OF test_part_d FOR VALUES IN (2,4);
+ ALTER TABLE test_part_d_2 ADD PRIMARY KEY (a);
+ ALTER TABLE test_part_d_2 REPLICA IDENTITY USING INDEX test_part_d_2_pkey;
+));
+
+# create a publication replicating both columns, which is sufficient for
+# both partitions
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE PUBLICATION pub9 FOR TABLE test_part_d (a) WITH (publish_via_partition_root = true);
+));
+
+# add the publication to our subscription, wait for sync to complete
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ ALTER SUBSCRIPTION sub1 SET PUBLICATION pub9
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO test_part_d VALUES (3, 4);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_part_d ORDER BY a, b"),
+ qq(1|
+3|),
+ 'partitions with different replica identities not replicated correctly');
+
+
+# TEST: With a table included in the publications which is FOR ALL TABLES, it
+# means replicate all columns.
+
+# drop unnecessary tables, so as not to interfere with the FOR ALL TABLES
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ DROP TABLE tab1, tab2, tab3, tab4, tab5, tab6, tab7,
+ test_part, test_part_a, test_part_b, test_part_c, test_part_d;
+));
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_mix_2 (a int PRIMARY KEY, b int, c int);
+ CREATE PUBLICATION pub_mix_3 FOR TABLE test_mix_2 (a, b, c);
+ CREATE PUBLICATION pub_mix_4 FOR ALL TABLES;
+
+ -- initial data
+ INSERT INTO test_mix_2 VALUES (1, 2, 3);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_mix_2 (a int PRIMARY KEY, b int, c int);
+ ALTER SUBSCRIPTION sub1 SET PUBLICATION pub_mix_3, pub_mix_4;
+ ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO test_mix_2 VALUES (4, 5, 6);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM test_mix_2"),
+ qq(1|2|3
+4|5|6),
+ 'all columns should be replicated');
+
+
+# TEST: With a table included in the publication which is FOR TABLES IN
+# SCHEMA, it means replicate all columns.
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ DROP SUBSCRIPTION sub1;
+ CREATE TABLE test_mix_3 (a int PRIMARY KEY, b int, c int);
+));
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ DROP TABLE test_mix_2;
+ CREATE TABLE test_mix_3 (a int PRIMARY KEY, b int, c int);
+ CREATE PUBLICATION pub_mix_5 FOR TABLE test_mix_3 (a, b, c);
+ CREATE PUBLICATION pub_mix_6 FOR TABLES IN SCHEMA public;
+
+ -- initial data
+ INSERT INTO test_mix_3 VALUES (1, 2, 3);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub_mix_5, pub_mix_6;
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO test_mix_3 VALUES (4, 5, 6);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM test_mix_3"),
+ qq(1|2|3
+4|5|6),
+ 'all columns should be replicated');
+
+
+# TEST: Check handling of publish_via_partition_root - if a partition is
+# published through partition root, we should only apply the column list
+# defined for the whole table (not the partitions) - both during the initial
+# sync and when replicating changes. This is what we do for row filters.
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ DROP SUBSCRIPTION sub1;
+
+ CREATE TABLE test_root (a int PRIMARY KEY, b int, c int) PARTITION BY RANGE (a);
+ CREATE TABLE test_root_1 PARTITION OF test_root FOR VALUES FROM (1) TO (10);
+ CREATE TABLE test_root_2 PARTITION OF test_root FOR VALUES FROM (10) TO (20);
+));
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_root (a int PRIMARY KEY, b int, c int) PARTITION BY RANGE (a);
+ CREATE TABLE test_root_1 PARTITION OF test_root FOR VALUES FROM (1) TO (10);
+ CREATE TABLE test_root_2 PARTITION OF test_root FOR VALUES FROM (10) TO (20);
+
+ CREATE PUBLICATION pub_test_root FOR TABLE test_root (a) WITH (publish_via_partition_root = true);
+ CREATE PUBLICATION pub_test_root_1 FOR TABLE test_root_1 (a, b);
+
+ -- initial data
+ INSERT INTO test_root VALUES (1, 2, 3);
+ INSERT INTO test_root VALUES (10, 20, 30);
+));
+
+# Subscribe to pub_test_root and pub_test_root_1 at the same time, which means
+# that the initial data will be synced once, and only the column list of the
+# parent table (test_root) in the publication pub_test_root will be used for
+# both table sync and data replication.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub_test_root, pub_test_root_1;
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO test_root VALUES (2, 3, 4);
+ INSERT INTO test_root VALUES (11, 21, 31);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_root ORDER BY a, b, c"),
+ qq(1||
+2||
+10||
+11||),
+ 'publication via partition root applies column list');
+
+
+# TEST: Multiple publications which publish schema of parent table and
+# partition. The partition is published through two publications, once
+# through a schema (so no column list) containing the parent, and then
+# also directly (with all columns). The expected outcome is there is
+# no column list.
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ DROP PUBLICATION pub1, pub2, pub3, pub4, pub5, pub6, pub7, pub8;
+
+ CREATE SCHEMA s1;
+ CREATE TABLE s1.t (a int, b int, c int) PARTITION BY RANGE (a);
+ CREATE TABLE t_1 PARTITION OF s1.t FOR VALUES FROM (1) TO (10);
+
+ CREATE PUBLICATION pub1 FOR TABLES IN SCHEMA s1;
+ CREATE PUBLICATION pub2 FOR TABLE t_1(a, b, c);
+
+ -- initial data
+ INSERT INTO s1.t VALUES (1, 2, 3);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE SCHEMA s1;
+ CREATE TABLE s1.t (a int, b int, c int) PARTITION BY RANGE (a);
+ CREATE TABLE t_1 PARTITION OF s1.t FOR VALUES FROM (1) TO (10);
+
+ ALTER SUBSCRIPTION sub1 SET PUBLICATION pub1, pub2;
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO s1.t VALUES (4, 5, 6);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM s1.t ORDER BY a"),
+ qq(1|2|3
+4|5|6),
+ 'two publications, publishing the same relation');
+
+# Now resync the subscription, but with publications in the opposite order.
+# The result should be the same.
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ TRUNCATE s1.t;
+
+ ALTER SUBSCRIPTION sub1 SET PUBLICATION pub2, pub1;
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO s1.t VALUES (7, 8, 9);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM s1.t ORDER BY a"),
+ qq(7|8|9),
+ 'two publications, publishing the same relation');
+
+
+# TEST: One publication, containing both the parent and child relations.
+# The expected outcome is list "a", because that's the column list defined
+# for the top-most ancestor added to the publication.
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ DROP SCHEMA s1 CASCADE;
+ CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
+ CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
+ PARTITION BY RANGE (a);
+ CREATE TABLE t_2 PARTITION OF t_1 FOR VALUES FROM (1) TO (10);
+
+ CREATE PUBLICATION pub3 FOR TABLE t_1 (a), t_2
+ WITH (PUBLISH_VIA_PARTITION_ROOT);
+
+ -- initial data
+ INSERT INTO t VALUES (1, 2, 3);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ DROP SCHEMA s1 CASCADE;
+ CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
+ CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
+ PARTITION BY RANGE (a);
+ CREATE TABLE t_2 PARTITION OF t_1 FOR VALUES FROM (1) TO (10);
+
+ ALTER SUBSCRIPTION sub1 SET PUBLICATION pub3;
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO t VALUES (4, 5, 6);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM t ORDER BY a, b, c"),
+ qq(1||
+4||),
+ 'publication containing both parent and child relation');
+
+
+# TEST: One publication, containing both the parent and child relations.
+# The expected outcome is list "a", because that's the column list defined
+# for the top-most ancestor added to the publication.
+# Note: The difference from the preceding test is that in this case both
+# relations have a column list defined.
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ DROP TABLE t;
+ CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
+ CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
+ PARTITION BY RANGE (a);
+ CREATE TABLE t_2 PARTITION OF t_1 FOR VALUES FROM (1) TO (10);
+
+ CREATE PUBLICATION pub4 FOR TABLE t_1 (a), t_2 (b)
+ WITH (PUBLISH_VIA_PARTITION_ROOT);
+
+ -- initial data
+ INSERT INTO t VALUES (1, 2, 3);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ DROP TABLE t;
+ CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
+ CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
+ PARTITION BY RANGE (a);
+ CREATE TABLE t_2 PARTITION OF t_1 FOR VALUES FROM (1) TO (10);
+
+ ALTER SUBSCRIPTION sub1 SET PUBLICATION pub4;
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO t VALUES (4, 5, 6);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM t ORDER BY a, b, c"),
+ qq(1||
+4||),
+ 'publication containing both parent and child relation');
+
+# TEST: Only columns in the column list should exist in the old tuple of UPDATE
+# and DELETE.
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_oldtuple_col (a int PRIMARY KEY, b int, c int);
+ CREATE PUBLICATION pub_check_oldtuple FOR TABLE test_oldtuple_col (a, b);
+ INSERT INTO test_oldtuple_col VALUES(1, 2, 3);
+ SELECT * FROM pg_create_logical_replication_slot('test_slot', 'pgoutput');
+ UPDATE test_oldtuple_col SET a = 2;
+ DELETE FROM test_oldtuple_col;
+));
+
+
+# Check at 7th byte of binary data for the number of columns in the old tuple.
+#
+# 7 = 1 (count from 1) + 1 byte (message type) + 4 byte (relid) + 1 byte (flag
+# for old key).
+#
+# The message type of UPDATE is 85('U').
+# The message type of DELETE is 68('D').
+$result = $node_publisher->safe_psql(
+ 'postgres', qq(
+ SELECT substr(data, 7, 2) = int2send(2::smallint)
+ FROM pg_logical_slot_peek_binary_changes('test_slot', NULL, NULL,
+ 'proto_version', '1',
+ 'publication_names', 'pub_check_oldtuple')
+ WHERE get_byte(data, 0) = 85 OR get_byte(data, 0) = 68
+));
+
+is( $result, qq(t
+t), 'check the number of columns in the old tuple');
+
+# TEST: Generated and dropped columns are not considered for the column list.
+# So, the publication having a column list except for those columns and a
+# publication without any column (aka all columns as part of the columns
+# list) are considered to have the same column list.
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_mix_4 (a int PRIMARY KEY, b int, c int, d int GENERATED ALWAYS AS (a + 1) STORED);
+ ALTER TABLE test_mix_4 DROP COLUMN c;
+
+ CREATE PUBLICATION pub_mix_7 FOR TABLE test_mix_4 (a, b);
+ CREATE PUBLICATION pub_mix_8 FOR TABLE test_mix_4;
+
+ -- initial data
+ INSERT INTO test_mix_4 VALUES (1, 2);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ DROP SUBSCRIPTION sub1;
+ CREATE TABLE test_mix_4 (a int PRIMARY KEY, b int, c int, d int);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub_mix_7, pub_mix_8;
+));
+
+$node_subscriber->wait_for_subscription_sync;
+
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_mix_4 ORDER BY a"),
+ qq(1|2||),
+ 'initial synchronization with multiple publications with the same column list'
+);
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ INSERT INTO test_mix_4 VALUES (3, 4);
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_mix_4 ORDER BY a"),
+ qq(1|2||
+3|4||),
+ 'replication with multiple publications with the same column list');
+
+# TEST: With a table included in multiple publications with different column
+# lists, we should catch the error when creating the subscription.
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE test_mix_1 (a int PRIMARY KEY, b int, c int);
+ CREATE PUBLICATION pub_mix_1 FOR TABLE test_mix_1 (a, b);
+ CREATE PUBLICATION pub_mix_2 FOR TABLE test_mix_1 (a, c);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ DROP SUBSCRIPTION sub1;
+ CREATE TABLE test_mix_1 (a int PRIMARY KEY, b int, c int);
+));
+
+my ($cmdret, $stdout, $stderr) = $node_subscriber->psql(
+ 'postgres', qq(
+ CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub_mix_1, pub_mix_2;
+));
+
+ok( $stderr =~
+ qr/cannot use different column lists for table "public.test_mix_1" in different publications/,
+ 'different column lists detected');
+
+# TEST: If the column list is changed after creating the subscription, we
+# should catch the error reported by walsender.
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ ALTER PUBLICATION pub_mix_1 SET TABLE test_mix_1 (a, c);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub_mix_1, pub_mix_2;
+));
+
+$node_publisher->wait_for_catchup('sub1');
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ ALTER PUBLICATION pub_mix_1 SET TABLE test_mix_1 (a, b);
+ INSERT INTO test_mix_1 VALUES(1, 1, 1);
+));
+
+$offset = $node_publisher->wait_for_log(
+ qr/cannot use different column lists for table "public.test_mix_1" in different publications/,
+ $offset);
+
+$node_subscriber->stop('fast');
+$node_publisher->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/032_subscribe_use_index.pl b/src/test/subscription/t/032_subscribe_use_index.pl
new file mode 100644
index 0000000..576eec6
--- /dev/null
+++ b/src/test/subscription/t/032_subscribe_use_index.pl
@@ -0,0 +1,484 @@
+# Copyright (c) 2022-2023, PostgreSQL Global Development Group
+
+# Test logical replication behavior with subscriber using available index
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# create publisher node
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+# create subscriber node
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+my $appname = 'tap_sub';
+my $result = '';
+
+# =============================================================================
+# Testcase start: Subscription can use index with multiple rows and columns
+#
+
+# create tables pub and sub
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE test_replica_id_full (x int, y text)");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE test_replica_id_full REPLICA IDENTITY FULL");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_replica_id_full (x int, y text)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE INDEX test_replica_id_full_idx ON test_replica_id_full(x,y)");
+
+# insert some initial data within the range 0-9 for x and y
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_replica_id_full SELECT (i%10), (i%10)::text FROM generate_series(0,10) i"
+);
+
+# create pub/sub
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_rep_full FOR TABLE test_replica_id_full");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub_rep_full CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub_rep_full"
+);
+
+# wait for initial table synchronization to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+# delete 2 rows
+$node_publisher->safe_psql('postgres',
+ "DELETE FROM test_replica_id_full WHERE x IN (5, 6)");
+
+# update 2 rows
+$node_publisher->safe_psql('postgres',
+ "UPDATE test_replica_id_full SET x = 100, y = '200' WHERE x IN (1, 2)");
+
+# wait until the index is used on the subscriber
+$node_publisher->wait_for_catchup($appname);
+$node_subscriber->poll_query_until('postgres',
+ q{select (idx_scan = 4) from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idx';}
+ )
+ or die
+ "Timed out while waiting for check subscriber tap_sub_rep_full updates 4 rows via index";
+
+# make sure that the subscriber has the correct data after the UPDATE
+$result = $node_subscriber->safe_psql('postgres',
+ "select count(*) from test_replica_id_full WHERE (x = 100 and y = '200')"
+);
+is($result, qq(2),
+ 'ensure subscriber has the correct data at the end of the test');
+
+# make sure that the subscriber has the correct data after the first DELETE
+$result = $node_subscriber->safe_psql('postgres',
+ "select count(*) from test_replica_id_full where x in (5, 6)");
+is($result, qq(0),
+ 'ensure subscriber has the correct data at the end of the test');
+
+# cleanup pub
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full");
+$node_publisher->safe_psql('postgres', "DROP TABLE test_replica_id_full");
+# cleanup sub
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_rep_full");
+$node_subscriber->safe_psql('postgres', "DROP TABLE test_replica_id_full");
+
+# Testcase end: Subscription can use index with multiple rows and columns
+# =============================================================================
+
+# =============================================================================
+# Testcase start: Subscription can use index on partitioned tables
+
+# create tables pub and sub
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE users_table_part(user_id bigint, value_1 int, value_2 int) PARTITION BY RANGE (value_1)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE users_table_part_0 PARTITION OF users_table_part FOR VALUES FROM (0) TO (10)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE users_table_part_1 PARTITION OF users_table_part FOR VALUES FROM (10) TO (20)"
+);
+
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE users_table_part REPLICA IDENTITY FULL");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE users_table_part_0 REPLICA IDENTITY FULL");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE users_table_part_1 REPLICA IDENTITY FULL");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE users_table_part(user_id bigint, value_1 int, value_2 int) PARTITION BY RANGE (value_1)"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE users_table_part_0 PARTITION OF users_table_part FOR VALUES FROM (0) TO (10)"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE users_table_part_1 PARTITION OF users_table_part FOR VALUES FROM (10) TO (20)"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE INDEX users_table_part_idx ON users_table_part(user_id, value_1)"
+);
+
+# insert some initial data
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO users_table_part SELECT (i%100), (i%20), i FROM generate_series(0,100) i"
+);
+
+# create pub/sub
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_rep_full FOR TABLE users_table_part");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub_rep_full CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub_rep_full"
+);
+
+# wait for initial table synchronization to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+# update rows, moving them to other partitions
+$node_publisher->safe_psql('postgres',
+ "UPDATE users_table_part SET value_1 = 0 WHERE user_id = 4");
+
+# delete rows from different partitions
+$node_publisher->safe_psql('postgres',
+ "DELETE FROM users_table_part WHERE user_id = 1 and value_1 = 1");
+$node_publisher->safe_psql('postgres',
+ "DELETE FROM users_table_part WHERE user_id = 12 and value_1 = 12");
+
+# wait until the index is used on the subscriber
+$node_publisher->wait_for_catchup($appname);
+$node_subscriber->poll_query_until('postgres',
+ q{select sum(idx_scan)=3 from pg_stat_all_indexes where indexrelname ilike 'users_table_part_%';}
+ )
+ or die
+ "Timed out while waiting for check subscriber tap_sub_rep_full updates partitioned table";
+
+# make sure that the subscriber has the correct data
+$result = $node_subscriber->safe_psql('postgres',
+ "select sum(user_id+value_1+value_2) from users_table_part");
+is($result, qq(10907),
+ 'ensure subscriber has the correct data at the end of the test');
+$result = $node_subscriber->safe_psql('postgres',
+ "select count(DISTINCT(user_id,value_1, value_2)) from users_table_part");
+is($result, qq(99),
+ 'ensure subscriber has the correct data at the end of the test');
+
+# cleanup pub
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full");
+$node_publisher->safe_psql('postgres', "DROP TABLE users_table_part");
+
+# cleanup sub
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_rep_full");
+$node_subscriber->safe_psql('postgres', "DROP TABLE users_table_part");
+
+# Testcase end: Subscription can use index on partitioned tables
+# =============================================================================
+
+# =============================================================================
+# Testcase start: Subscription will not use indexes with only expressions or
+# partial index
+
+# create tables pub and sub
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE people (firstname text, lastname text)");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE people REPLICA IDENTITY FULL");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE people (firstname text, lastname text)");
+
+# index with only an expression
+$node_subscriber->safe_psql('postgres',
+ "CREATE INDEX people_names_expr_only ON people ((firstname || ' ' || lastname))"
+);
+
+# partial index
+$node_subscriber->safe_psql('postgres',
+ "CREATE INDEX people_names_partial ON people(firstname) WHERE (firstname = 'first_name_1')"
+);
+
+# insert some initial data
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO people SELECT 'first_name_' || i::text, 'last_name_' || i::text FROM generate_series(0,200) i"
+);
+
+# create pub/sub
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_rep_full FOR TABLE people");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub_rep_full CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub_rep_full"
+);
+
+# wait for initial table synchronization to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+# update 2 rows
+$node_publisher->safe_psql('postgres',
+ "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_1'"
+);
+$node_publisher->safe_psql('postgres',
+ "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_2' AND lastname = 'last_name_2'"
+);
+
+# make sure none of the indexes is used on the subscriber
+$node_publisher->wait_for_catchup($appname);
+$result = $node_subscriber->safe_psql('postgres',
+ "select sum(idx_scan) from pg_stat_all_indexes where indexrelname IN ('people_names_expr_only', 'people_names_partial')"
+);
+is($result, qq(0),
+ 'ensure subscriber tap_sub_rep_full updates two rows via seq. scan with index on expressions'
+);
+
+$node_publisher->safe_psql('postgres',
+ "DELETE FROM people WHERE firstname = 'first_name_3'");
+$node_publisher->safe_psql('postgres',
+ "DELETE FROM people WHERE firstname = 'first_name_4' AND lastname = 'last_name_4'"
+);
+
+# make sure the index is not used on the subscriber
+$node_publisher->wait_for_catchup($appname);
+$result = $node_subscriber->safe_psql('postgres',
+ "select sum(idx_scan) from pg_stat_all_indexes where indexrelname IN ('people_names_expr_only', 'people_names_partial')"
+);
+is($result, qq(0),
+ 'ensure subscriber tap_sub_rep_full updates two rows via seq. scan with index on expressions'
+);
+
+# make sure that the subscriber has the correct data
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM people");
+is($result, qq(199),
+ 'ensure subscriber has the correct data at the end of the test');
+
+# cleanup pub
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full");
+$node_publisher->safe_psql('postgres', "DROP TABLE people");
+# cleanup sub
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_rep_full");
+$node_subscriber->safe_psql('postgres', "DROP TABLE people");
+
+# Testcase end: Subscription will not use indexes with only expressions or
+# partial index
+# =============================================================================
+
+# =============================================================================
+# Testcase start: Subscription can use index having expressions and columns
+
+# create tables pub and sub
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE people (firstname text, lastname text)");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE people REPLICA IDENTITY FULL");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE people (firstname text, lastname text)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE INDEX people_names ON people (firstname, lastname, (firstname || ' ' || lastname))"
+);
+
+# insert some initial data
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO people SELECT 'first_name_' || i::text, 'last_name_' || i::text FROM generate_series(0, 20) i"
+);
+
+# create pub/sub
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_rep_full FOR TABLE people");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub_rep_full CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub_rep_full"
+);
+
+# wait for initial table synchronization to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+# update 1 row
+$node_publisher->safe_psql('postgres',
+ "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_1'"
+);
+
+# delete the updated row
+$node_publisher->safe_psql('postgres',
+ "DELETE FROM people WHERE firstname = 'no-name'");
+
+# wait until the index is used on the subscriber
+$node_publisher->wait_for_catchup($appname);
+$node_subscriber->poll_query_until('postgres',
+ q{select idx_scan=2 from pg_stat_all_indexes where indexrelname = 'people_names';}
+ )
+ or die
+ "Timed out while waiting for check subscriber tap_sub_rep_full deletes two rows via index scan with index on expressions and columns";
+
+# make sure that the subscriber has the correct data
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM people");
+is($result, qq(20),
+ 'ensure subscriber has the correct data at the end of the test');
+
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM people WHERE firstname = 'no-name'");
+is($result, qq(0),
+ 'ensure subscriber has the correct data at the end of the test');
+
+# now, drop the index with the expression, we'll use sequential scan
+$node_subscriber->safe_psql('postgres', "DROP INDEX people_names");
+
+# delete 1 row
+$node_publisher->safe_psql('postgres',
+ "DELETE FROM people WHERE lastname = 'last_name_18'");
+
+# make sure that the subscriber has the correct data
+$node_publisher->wait_for_catchup($appname);
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM people WHERE lastname = 'last_name_18'");
+is($result, qq(0),
+ 'ensure subscriber has the correct data at the end of the test');
+
+# cleanup pub
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full");
+$node_publisher->safe_psql('postgres', "DROP TABLE people");
+# cleanup sub
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_rep_full");
+$node_subscriber->safe_psql('postgres', "DROP TABLE people");
+
+# Testcase end: Subscription can use index having expressions and columns
+# =============================================================================
+
+# =============================================================================
+# Testcase start: Null values and missing column
+
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE test_replica_id_full (x int)");
+
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE test_replica_id_full REPLICA IDENTITY FULL");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_replica_id_full (x int, y int)");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE INDEX test_replica_id_full_idx ON test_replica_id_full(x,y)");
+
+# create pub/sub
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_rep_full FOR TABLE test_replica_id_full");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub_rep_full CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub_rep_full"
+);
+
+# wait for initial table synchronization to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+# load some data, and update 2 tuples
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_replica_id_full VALUES (1), (2), (3)");
+$node_publisher->safe_psql('postgres',
+ "UPDATE test_replica_id_full SET x = x + 1 WHERE x = 1");
+
+# check if the index is used even when the index has NULL values
+$node_publisher->wait_for_catchup($appname);
+$node_subscriber->poll_query_until('postgres',
+ q{select idx_scan=1 from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idx';}
+ )
+ or die
+ "Timed out while waiting for check subscriber tap_sub_rep_full updates test_replica_id_full table";
+
+# make sure that the subscriber has the correct data
+$result = $node_subscriber->safe_psql('postgres',
+ "select sum(x) from test_replica_id_full WHERE y IS NULL");
+is($result, qq(7),
+ 'ensure subscriber has the correct data at the end of the test');
+
+# make sure that the subscriber has the correct data
+$result = $node_subscriber->safe_psql('postgres',
+ "select count(*) from test_replica_id_full WHERE y IS NULL");
+is($result, qq(3),
+ 'ensure subscriber has the correct data at the end of the test');
+
+# cleanup pub
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full");
+$node_publisher->safe_psql('postgres', "DROP TABLE test_replica_id_full");
+
+# cleanup sub
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_rep_full");
+$node_subscriber->safe_psql('postgres', "DROP TABLE test_replica_id_full");
+
+# Testcase end: Null values And missing column
+# =============================================================================
+
+# =============================================================================
+# Testcase start: Subscription using a unique index when Pub/Sub has different
+# data
+#
+# The subscriber has duplicate tuples that publisher does not have. When
+# publisher updates/deletes 1 row, subscriber uses indexes and updates/deletes
+# exactly 1 row.
+#
+
+# create tables pub and sub
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE test_replica_id_full (x int, y int)");
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE test_replica_id_full REPLICA IDENTITY FULL");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_replica_id_full (x int, y int)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE UNIQUE INDEX test_replica_id_full_idxy ON test_replica_id_full(x,y)"
+);
+
+# insert some initial data
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_replica_id_full SELECT i, i FROM generate_series(0,21) i"
+);
+
+# create pub/sub
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_rep_full FOR TABLE test_replica_id_full");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub_rep_full CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub_rep_full"
+);
+
+# wait for initial table synchronization to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, $appname);
+
+# duplicate the data in subscriber for y column
+$node_subscriber->safe_psql('postgres',
+ "INSERT INTO test_replica_id_full SELECT i+100, i FROM generate_series(0,21) i"
+);
+
+# now, we update only 1 row on the publisher and expect the subscriber to only
+# update 1 row although there are two tuples with y = 15 on the subscriber
+$node_publisher->safe_psql('postgres',
+ "UPDATE test_replica_id_full SET x = 2000 WHERE y = 15");
+
+# wait until the index is used on the subscriber
+$node_publisher->wait_for_catchup($appname);
+$node_subscriber->poll_query_until('postgres',
+ q{select (idx_scan = 1) from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idxy';}
+ )
+ or die
+ "Timed out while waiting for check subscriber tap_sub_rep_full updates one row via index";
+
+# make sure that the subscriber has the correct data
+# we only updated 1 row
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM test_replica_id_full WHERE x = 2000");
+is($result, qq(1),
+ 'ensure subscriber has the correct data at the end of the test');
+
+# cleanup pub
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full");
+$node_publisher->safe_psql('postgres', "DROP TABLE test_replica_id_full");
+# cleanup sub
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_rep_full");
+$node_subscriber->safe_psql('postgres', "DROP TABLE test_replica_id_full");
+
+# Testcase start: Subscription using a unique index when Pub/Sub has different
+# data
+# =============================================================================
+
+$node_subscriber->stop('fast');
+$node_publisher->stop('fast');
+
+done_testing();
diff --git a/src/test/subscription/t/033_run_as_table_owner.pl b/src/test/subscription/t/033_run_as_table_owner.pl
new file mode 100644
index 0000000..f408320
--- /dev/null
+++ b/src/test/subscription/t/033_run_as_table_owner.pl
@@ -0,0 +1,259 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Test that logical replication respects permissions
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use Test::More;
+
+my ($node_publisher, $node_subscriber, $publisher_connstr, $result, $offset);
+$offset = 0;
+
+sub publish_insert
+{
+ my ($tbl, $new_i) = @_;
+ $node_publisher->safe_psql(
+ 'postgres', qq(
+ SET SESSION AUTHORIZATION regress_alice;
+ INSERT INTO $tbl (i) VALUES ($new_i);
+ ));
+}
+
+sub publish_update
+{
+ my ($tbl, $old_i, $new_i) = @_;
+ $node_publisher->safe_psql(
+ 'postgres', qq(
+ SET SESSION AUTHORIZATION regress_alice;
+ UPDATE $tbl SET i = $new_i WHERE i = $old_i;
+ ));
+}
+
+sub publish_delete
+{
+ my ($tbl, $old_i) = @_;
+ $node_publisher->safe_psql(
+ 'postgres', qq(
+ SET SESSION AUTHORIZATION regress_alice;
+ DELETE FROM $tbl WHERE i = $old_i;
+ ));
+}
+
+sub expect_replication
+{
+ my ($tbl, $cnt, $min, $max, $testname) = @_;
+ $node_publisher->wait_for_catchup('admin_sub');
+ $result = $node_subscriber->safe_psql(
+ 'postgres', qq(
+ SELECT COUNT(i), MIN(i), MAX(i) FROM $tbl));
+ is($result, "$cnt|$min|$max", $testname);
+}
+
+sub expect_failure
+{
+ my ($tbl, $cnt, $min, $max, $re, $testname) = @_;
+ $offset = $node_subscriber->wait_for_log($re, $offset);
+ $result = $node_subscriber->safe_psql(
+ 'postgres', qq(
+ SELECT COUNT(i), MIN(i), MAX(i) FROM $tbl));
+ is($result, "$cnt|$min|$max", $testname);
+}
+
+sub revoke_superuser
+{
+ my ($role) = @_;
+ $node_subscriber->safe_psql(
+ 'postgres', qq(
+ ALTER ROLE $role NOSUPERUSER));
+}
+
+# Create publisher and subscriber nodes with schemas owned and published by
+# "regress_alice" but subscribed and replicated by different role
+# "regress_admin" and "regress_admin2". For partitioned tables, layout the
+# partitions differently on the publisher than on the subscriber.
+#
+$node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_publisher->init(allows_streaming => 'logical');
+$node_subscriber->init;
+$node_publisher->start;
+$node_subscriber->start;
+$publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+
+for my $node ($node_publisher, $node_subscriber)
+{
+ $node->safe_psql(
+ 'postgres', qq(
+ CREATE ROLE regress_admin SUPERUSER LOGIN;
+ CREATE ROLE regress_admin2 SUPERUSER LOGIN;
+ CREATE ROLE regress_alice NOSUPERUSER LOGIN;
+ GRANT CREATE ON DATABASE postgres TO regress_alice;
+ SET SESSION AUTHORIZATION regress_alice;
+ CREATE SCHEMA alice;
+ GRANT USAGE ON SCHEMA alice TO regress_admin;
+
+ CREATE TABLE alice.unpartitioned (i INTEGER);
+ ALTER TABLE alice.unpartitioned REPLICA IDENTITY FULL;
+ GRANT SELECT ON TABLE alice.unpartitioned TO regress_admin;
+ ));
+}
+$node_publisher->safe_psql(
+ 'postgres', qq(
+SET SESSION AUTHORIZATION regress_alice;
+
+CREATE PUBLICATION alice FOR TABLE alice.unpartitioned
+ WITH (publish_via_partition_root = true);
+));
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+SET SESSION AUTHORIZATION regress_admin;
+CREATE SUBSCRIPTION admin_sub CONNECTION '$publisher_connstr' PUBLICATION alice
+ WITH (run_as_owner = true, password_required = false);
+));
+
+# Wait for initial sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'admin_sub');
+
+# Verify that "regress_admin" can replicate into the tables
+publish_insert("alice.unpartitioned", 1);
+publish_insert("alice.unpartitioned", 3);
+publish_insert("alice.unpartitioned", 5);
+publish_update("alice.unpartitioned", 1 => 7);
+publish_delete("alice.unpartitioned", 3);
+expect_replication("alice.unpartitioned", 2, 5, 7, "superuser can replicate");
+
+# Revoke superuser privilege for "regress_admin", and verify that we now
+# fail to replicate an insert.
+revoke_superuser("regress_admin");
+publish_insert("alice.unpartitioned", 9);
+expect_failure(
+ "alice.unpartitioned", 2, 5, 7,
+ qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi,
+ "with no privileges cannot replicate");
+
+# Now grant DML privileges and verify that we can replicate an INSERT.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ALTER ROLE regress_admin NOSUPERUSER;
+SET SESSION AUTHORIZATION regress_alice;
+GRANT INSERT,UPDATE,DELETE ON alice.unpartitioned TO regress_admin;
+REVOKE SELECT ON alice.unpartitioned FROM regress_admin;
+));
+expect_replication("alice.unpartitioned", 3, 5, 9,
+ "with INSERT privilege can replicate INSERT");
+
+# We can't yet replicate an UPDATE because we don't have SELECT.
+publish_update("alice.unpartitioned", 5 => 11);
+publish_delete("alice.unpartitioned", 9);
+expect_failure(
+ "alice.unpartitioned",
+ 3,
+ 5,
+ 9,
+ qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi,
+ "without SELECT privilege cannot replicate UPDATE or DELETE");
+
+# After granting SELECT, replication resumes.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+SET SESSION AUTHORIZATION regress_alice;
+GRANT SELECT ON alice.unpartitioned TO regress_admin;
+));
+expect_replication("alice.unpartitioned", 2, 7, 11,
+ "with all privileges can replicate");
+
+# Remove all privileges again. Instead, give the ability to SET ROLE to
+# regress_alice.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+SET SESSION AUTHORIZATION regress_alice;
+REVOKE ALL PRIVILEGES ON alice.unpartitioned FROM regress_admin;
+RESET SESSION AUTHORIZATION;
+GRANT regress_alice TO regress_admin WITH INHERIT FALSE, SET TRUE;
+));
+
+# Because replication is running as the subscription owner in this test,
+# the above grant doesn't help: it gives the ability to SET ROLE, but not
+# privileges on the table.
+publish_insert("alice.unpartitioned", 13);
+expect_failure(
+ "alice.unpartitioned",
+ 2,
+ 7,
+ 11,
+ qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi,
+ "with SET ROLE but not INHERIT cannot replicate");
+
+# Now remove SET ROLE and add INHERIT and check that things start working.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+GRANT regress_alice TO regress_admin WITH INHERIT TRUE, SET FALSE;
+));
+expect_replication("alice.unpartitioned", 3, 7, 13,
+ "with INHERIT but not SET ROLE can replicate");
+
+# Similar to the previous test, remove all privileges again and instead,
+# give the ability to SET ROLE to regress_alice.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+SET SESSION AUTHORIZATION regress_alice;
+REVOKE ALL PRIVILEGES ON alice.unpartitioned FROM regress_admin;
+RESET SESSION AUTHORIZATION;
+GRANT regress_alice TO regress_admin WITH INHERIT FALSE, SET TRUE;
+));
+
+# Because replication is running as the subscription owner in this test,
+# the above grant doesn't help.
+publish_insert("alice.unpartitioned", 14);
+expect_failure(
+ "alice.unpartitioned",
+ 3,
+ 7,
+ 13,
+ qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi,
+ "with no privileges cannot replicate");
+
+# Allow the replication to run as table owner and check that things start
+# working.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ALTER SUBSCRIPTION admin_sub SET (run_as_owner = false);
+));
+
+expect_replication("alice.unpartitioned", 4, 7, 14,
+ "can replicate after setting run_as_owner to false");
+
+# Remove the subscrition and truncate the table for the initial data sync
+# tests.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+DROP SUBSCRIPTION admin_sub;
+TRUNCATE alice.unpartitioned;
+));
+
+# Create a new subscription "admin_sub" owned by regress_admin2. It's
+# disabled so that we revoke superuser privilege after creation.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+SET SESSION AUTHORIZATION regress_admin2;
+CREATE SUBSCRIPTION admin_sub CONNECTION '$publisher_connstr' PUBLICATION alice
+WITH (run_as_owner = false, password_required = false, copy_data = true, enabled = false);
+));
+
+# Revoke superuser privilege for "regress_admin2", and give it the
+# ability to SET ROLE. Then enable the subscription "admin_sub".
+revoke_superuser("regress_admin2");
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+GRANT regress_alice TO regress_admin2 WITH INHERIT FALSE, SET TRUE;
+ALTER SUBSCRIPTION admin_sub ENABLE;
+));
+
+# Because the initial data sync is working as the table owner, all
+# data should be copied.
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'admin_sub');
+expect_replication("alice.unpartitioned", 4, 7, 14,
+ "table owner can do the initial data copy");
+
+done_testing();
diff --git a/src/test/subscription/t/100_bugs.pl b/src/test/subscription/t/100_bugs.pl
new file mode 100644
index 0000000..091da5a
--- /dev/null
+++ b/src/test/subscription/t/100_bugs.pl
@@ -0,0 +1,494 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# Tests for various bugs found over time
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Bug #15114
+
+# The bug was that determining which columns are part of the replica
+# identity index using RelationGetIndexAttrBitmap() would run
+# eval_const_expressions() on index expressions and predicates across
+# all indexes of the table, which in turn might require a snapshot,
+# but there wasn't one set, so it crashes. There were actually two
+# separate bugs, one on the publisher and one on the subscriber. The
+# fix was to avoid the constant expressions simplification in
+# RelationGetIndexAttrBitmap(), so it's safe to call in more contexts.
+
+my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher->init(allows_streaming => 'logical');
+$node_publisher->start;
+
+my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
+$node_subscriber->init(allows_streaming => 'logical');
+$node_subscriber->start;
+
+my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab1 (a int PRIMARY KEY, b int)");
+
+$node_publisher->safe_psql('postgres',
+ "CREATE FUNCTION double(x int) RETURNS int IMMUTABLE LANGUAGE SQL AS 'select x * 2'"
+);
+
+# an index with a predicate that lends itself to constant expressions
+# evaluation
+$node_publisher->safe_psql('postgres',
+ "CREATE INDEX ON tab1 (b) WHERE a > double(1)");
+
+# and the same setup on the subscriber
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab1 (a int PRIMARY KEY, b int)");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE FUNCTION double(x int) RETURNS int IMMUTABLE LANGUAGE SQL AS 'select x * 2'"
+);
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE INDEX ON tab1 (b) WHERE a > double(1)");
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION pub1 FOR ALL TABLES");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1"
+);
+
+$node_publisher->wait_for_catchup('sub1');
+
+# This would crash, first on the publisher, and then (if the publisher
+# is fixed) on the subscriber.
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (1, 2)");
+
+$node_publisher->wait_for_catchup('sub1');
+
+pass('index predicates do not cause crash');
+
+# We'll re-use these nodes below, so drop their replication state.
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION sub1");
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION pub1");
+# Drop the tables too.
+$node_publisher->safe_psql('postgres', "DROP TABLE tab1");
+
+$node_publisher->stop('fast');
+$node_subscriber->stop('fast');
+
+
+# Handling of temporary and unlogged tables with FOR ALL TABLES publications
+
+# If a FOR ALL TABLES publication exists, temporary and unlogged
+# tables are ignored for publishing changes. The bug was that we
+# would still check in that case that such a table has a replica
+# identity set before accepting updates. If it did not it would cause
+# an error when an update was attempted.
+
+$node_publisher->rotate_logfile();
+$node_publisher->start();
+
+# Although we don't use node_subscriber in this test, keep its logfile
+# name in step with node_publisher for later tests.
+$node_subscriber->rotate_logfile();
+
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION pub FOR ALL TABLES");
+
+is( $node_publisher->psql(
+ 'postgres',
+ "CREATE TEMPORARY TABLE tt1 AS SELECT 1 AS a; UPDATE tt1 SET a = 2;"),
+ 0,
+ 'update to temporary table without replica identity with FOR ALL TABLES publication'
+);
+
+is( $node_publisher->psql(
+ 'postgres',
+ "CREATE UNLOGGED TABLE tu1 AS SELECT 1 AS a; UPDATE tu1 SET a = 2;"),
+ 0,
+ 'update to unlogged table without replica identity with FOR ALL TABLES publication'
+);
+
+# Again, drop replication state but not tables.
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION pub");
+
+$node_publisher->stop('fast');
+
+
+# Bug #16643 - https://postgr.es/m/16643-eaadeb2a1a58d28c@postgresql.org
+#
+# Initial sync doesn't complete; the protocol was not being followed per
+# expectations after commit 07082b08cc5d.
+my $node_twoways = PostgreSQL::Test::Cluster->new('twoways');
+$node_twoways->init(allows_streaming => 'logical');
+$node_twoways->start;
+for my $db (qw(d1 d2))
+{
+ $node_twoways->safe_psql('postgres', "CREATE DATABASE $db");
+ $node_twoways->safe_psql($db, "CREATE TABLE t (f int)");
+ $node_twoways->safe_psql($db, "CREATE TABLE t2 (f int)");
+}
+
+my $rows = 3000;
+$node_twoways->safe_psql(
+ 'd1', qq{
+ INSERT INTO t SELECT * FROM generate_series(1, $rows);
+ INSERT INTO t2 SELECT * FROM generate_series(1, $rows);
+ CREATE PUBLICATION testpub FOR TABLE t;
+ SELECT pg_create_logical_replication_slot('testslot', 'pgoutput');
+ });
+
+$node_twoways->safe_psql('d2',
+ "CREATE SUBSCRIPTION testsub CONNECTION \$\$"
+ . $node_twoways->connstr('d1')
+ . "\$\$ PUBLICATION testpub WITH (create_slot=false, "
+ . "slot_name='testslot')");
+$node_twoways->safe_psql(
+ 'd1', qq{
+ INSERT INTO t SELECT * FROM generate_series(1, $rows);
+ INSERT INTO t2 SELECT * FROM generate_series(1, $rows);
+ });
+$node_twoways->safe_psql('d1', 'ALTER PUBLICATION testpub ADD TABLE t2');
+$node_twoways->safe_psql('d2',
+ 'ALTER SUBSCRIPTION testsub REFRESH PUBLICATION');
+
+# We cannot rely solely on wait_for_catchup() here; it isn't sufficient
+# when tablesync workers might still be running. So in addition to that,
+# verify that tables are synced.
+$node_twoways->wait_for_subscription_sync($node_twoways, 'testsub', 'd2');
+
+is($node_twoways->safe_psql('d2', "SELECT count(f) FROM t"),
+ $rows * 2, "2x$rows rows in t");
+is($node_twoways->safe_psql('d2', "SELECT count(f) FROM t2"),
+ $rows * 2, "2x$rows rows in t2");
+
+# Verify table data is synced with cascaded replication setup. This is mainly
+# to test whether the data written by tablesync worker gets replicated.
+my $node_pub = PostgreSQL::Test::Cluster->new('testpublisher1');
+$node_pub->init(allows_streaming => 'logical');
+$node_pub->start;
+
+my $node_pub_sub = PostgreSQL::Test::Cluster->new('testpublisher_subscriber');
+$node_pub_sub->init(allows_streaming => 'logical');
+$node_pub_sub->start;
+
+my $node_sub = PostgreSQL::Test::Cluster->new('testsubscriber1');
+$node_sub->init(allows_streaming => 'logical');
+$node_sub->start;
+
+# Create the tables in all nodes.
+$node_pub->safe_psql('postgres', "CREATE TABLE tab1 (a int)");
+$node_pub_sub->safe_psql('postgres', "CREATE TABLE tab1 (a int)");
+$node_sub->safe_psql('postgres', "CREATE TABLE tab1 (a int)");
+
+# Create a cascaded replication setup like:
+# N1 - Create publication testpub1.
+# N2 - Create publication testpub2 and also include subscriber which subscribes
+# to testpub1.
+# N3 - Create subscription testsub2 subscribes to testpub2.
+#
+# Note that subscription on N3 needs to be created before subscription on N2 to
+# test whether the data written by tablesync worker of N2 gets replicated.
+$node_pub->safe_psql('postgres',
+ "CREATE PUBLICATION testpub1 FOR TABLE tab1");
+
+$node_pub_sub->safe_psql('postgres',
+ "CREATE PUBLICATION testpub2 FOR TABLE tab1");
+
+my $publisher1_connstr = $node_pub->connstr . ' dbname=postgres';
+my $publisher2_connstr = $node_pub_sub->connstr . ' dbname=postgres';
+
+$node_sub->safe_psql('postgres',
+ "CREATE SUBSCRIPTION testsub2 CONNECTION '$publisher2_connstr' PUBLICATION testpub2"
+);
+
+$node_pub_sub->safe_psql('postgres',
+ "CREATE SUBSCRIPTION testsub1 CONNECTION '$publisher1_connstr' PUBLICATION testpub1"
+);
+
+$node_pub->safe_psql('postgres',
+ "INSERT INTO tab1 values(generate_series(1,10))");
+
+# Verify that the data is cascaded from testpub1 to testsub1 and further from
+# testpub2 (which had testsub1) to testsub2.
+$node_pub->wait_for_catchup('testsub1');
+$node_pub_sub->wait_for_catchup('testsub2');
+
+# Drop subscriptions as we don't need them anymore
+$node_pub_sub->safe_psql('postgres', "DROP SUBSCRIPTION testsub1");
+$node_sub->safe_psql('postgres', "DROP SUBSCRIPTION testsub2");
+
+# Drop publications as we don't need them anymore
+$node_pub->safe_psql('postgres', "DROP PUBLICATION testpub1");
+$node_pub_sub->safe_psql('postgres', "DROP PUBLICATION testpub2");
+
+# Clean up the tables on both publisher and subscriber as we don't need them
+$node_pub->safe_psql('postgres', "DROP TABLE tab1");
+$node_pub_sub->safe_psql('postgres', "DROP TABLE tab1");
+$node_sub->safe_psql('postgres', "DROP TABLE tab1");
+
+$node_pub->stop('fast');
+$node_pub_sub->stop('fast');
+$node_sub->stop('fast');
+
+# https://postgr.es/m/OS0PR01MB61133CA11630DAE45BC6AD95FB939%40OS0PR01MB6113.jpnprd01.prod.outlook.com
+
+# The bug was that when changing the REPLICA IDENTITY INDEX to another one, the
+# target table's relcache was not being invalidated. This leads to skipping
+# UPDATE/DELETE operations during apply on the subscriber side as the columns
+# required to search corresponding rows won't get logged.
+
+$node_publisher->rotate_logfile();
+$node_publisher->start();
+
+$node_subscriber->rotate_logfile();
+$node_subscriber->start();
+
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_replidentity_index(a int not null, b int not null)");
+$node_publisher->safe_psql('postgres',
+ "CREATE UNIQUE INDEX idx_replidentity_index_a ON tab_replidentity_index(a)"
+);
+$node_publisher->safe_psql('postgres',
+ "CREATE UNIQUE INDEX idx_replidentity_index_b ON tab_replidentity_index(b)"
+);
+
+# use index idx_replidentity_index_a as REPLICA IDENTITY on publisher.
+$node_publisher->safe_psql('postgres',
+ "ALTER TABLE tab_replidentity_index REPLICA IDENTITY USING INDEX idx_replidentity_index_a"
+);
+
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_replidentity_index VALUES(1, 1),(2, 2)");
+
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_replidentity_index(a int not null, b int not null)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE UNIQUE INDEX idx_replidentity_index_a ON tab_replidentity_index(a)"
+);
+$node_subscriber->safe_psql('postgres',
+ "CREATE UNIQUE INDEX idx_replidentity_index_b ON tab_replidentity_index(b)"
+);
+# use index idx_replidentity_index_b as REPLICA IDENTITY on subscriber because
+# it reflects the future scenario we are testing: changing REPLICA IDENTITY
+# INDEX.
+$node_subscriber->safe_psql('postgres',
+ "ALTER TABLE tab_replidentity_index REPLICA IDENTITY USING INDEX idx_replidentity_index_b"
+);
+
+$publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE tab_replidentity_index");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub"
+);
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM tab_replidentity_index"),
+ qq(1|1
+2|2),
+ "check initial data on subscriber");
+
+# Set REPLICA IDENTITY to idx_replidentity_index_b on publisher, then run UPDATE and DELETE.
+$node_publisher->safe_psql(
+ 'postgres', qq[
+ ALTER TABLE tab_replidentity_index REPLICA IDENTITY USING INDEX idx_replidentity_index_b;
+ UPDATE tab_replidentity_index SET a = -a WHERE a = 1;
+ DELETE FROM tab_replidentity_index WHERE a = 2;
+]);
+
+$node_publisher->wait_for_catchup('tap_sub');
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM tab_replidentity_index"),
+ qq(-1|1),
+ "update works with REPLICA IDENTITY");
+
+# Clean up
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub");
+$node_publisher->safe_psql('postgres', "DROP TABLE tab_replidentity_index");
+$node_subscriber->safe_psql('postgres', "DROP TABLE tab_replidentity_index");
+
+# Test schema invalidation by renaming the schema
+
+# Create tables on publisher
+$node_publisher->safe_psql('postgres', "CREATE SCHEMA sch1");
+$node_publisher->safe_psql('postgres', "CREATE TABLE sch1.t1 (c1 int)");
+
+# Create tables on subscriber
+$node_subscriber->safe_psql('postgres', "CREATE SCHEMA sch1");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE sch1.t1 (c1 int)");
+$node_subscriber->safe_psql('postgres', "CREATE SCHEMA sch2");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE sch2.t1 (c1 int)");
+
+# Setup logical replication that will cover t1 under both schema names
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub_sch FOR ALL TABLES");
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION tap_sub_sch CONNECTION '$publisher_connstr' PUBLICATION tap_pub_sch"
+);
+
+# Wait for initial table sync to finish
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub_sch');
+
+# Check what happens to data inserted before and after schema rename
+$node_publisher->safe_psql(
+ 'postgres',
+ "begin;
+insert into sch1.t1 values(1);
+alter schema sch1 rename to sch2;
+create schema sch1;
+create table sch1.t1(c1 int);
+insert into sch1.t1 values(2);
+insert into sch2.t1 values(3);
+commit;");
+
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub_sch');
+
+# Subscriber's sch1.t1 should receive the row inserted into the new sch1.t1,
+# but not the row inserted into the old sch1.t1 post-rename.
+my $result = $node_subscriber->safe_psql('postgres', "SELECT * FROM sch1.t1");
+is( $result, qq(1
+2), 'check data in subscriber sch1.t1 after schema rename');
+
+# Subscriber's sch2.t1 won't have gotten anything yet ...
+$result = $node_subscriber->safe_psql('postgres', "SELECT * FROM sch2.t1");
+is($result, '', 'no data yet in subscriber sch2.t1 after schema rename');
+
+# ... but it should show up after REFRESH.
+$node_subscriber->safe_psql('postgres',
+ 'ALTER SUBSCRIPTION tap_sub_sch REFRESH PUBLICATION');
+
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub_sch');
+
+$result = $node_subscriber->safe_psql('postgres', "SELECT * FROM sch2.t1");
+is( $result, qq(1
+3), 'check data in subscriber sch2.t1 after schema rename');
+
+# Again, drop replication state but not tables.
+$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_sch");
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_sch");
+
+$node_publisher->stop('fast');
+$node_subscriber->stop('fast');
+
+# The bug was that when the REPLICA IDENTITY FULL is used with dropped or
+# generated columns, we fail to apply updates and deletes
+$node_publisher->rotate_logfile();
+$node_publisher->start();
+
+$node_subscriber->rotate_logfile();
+$node_subscriber->start();
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE dropped_cols (a int, b_drop int, c int);
+ ALTER TABLE dropped_cols REPLICA IDENTITY FULL;
+ CREATE TABLE generated_cols (a int, b_gen int GENERATED ALWAYS AS (5 * a) STORED, c int);
+ ALTER TABLE generated_cols REPLICA IDENTITY FULL;
+ CREATE PUBLICATION pub_dropped_cols FOR TABLE dropped_cols, generated_cols;
+ -- some initial data
+ INSERT INTO dropped_cols VALUES (1, 1, 1);
+ INSERT INTO generated_cols (a, c) VALUES (1, 1);
+));
+
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE dropped_cols (a int, b_drop int, c int);
+ CREATE TABLE generated_cols (a int, b_gen int GENERATED ALWAYS AS (5 * a) STORED, c int);
+));
+
+$publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
+$node_subscriber->safe_psql('postgres',
+ "CREATE SUBSCRIPTION sub_dropped_cols CONNECTION '$publisher_connstr' PUBLICATION pub_dropped_cols"
+);
+$node_subscriber->wait_for_subscription_sync;
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ ALTER TABLE dropped_cols DROP COLUMN b_drop;
+));
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ ALTER TABLE dropped_cols DROP COLUMN b_drop;
+));
+
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ UPDATE dropped_cols SET a = 100;
+ UPDATE generated_cols SET a = 100;
+));
+$node_publisher->wait_for_catchup('sub_dropped_cols');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT count(*) FROM dropped_cols WHERE a = 100"),
+ qq(1),
+ 'replication with RI FULL and dropped columns');
+
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT count(*) FROM generated_cols WHERE a = 100"),
+ qq(1),
+ 'replication with RI FULL and generated columns');
+
+$node_publisher->stop('fast');
+$node_subscriber->stop('fast');
+
+# The bug was that pgoutput was incorrectly replacing missing attributes in
+# tuples with NULL. This could result in incorrect replication with
+# `REPLICA IDENTITY FULL`.
+
+$node_publisher->rotate_logfile();
+$node_publisher->start();
+
+$node_subscriber->rotate_logfile();
+$node_subscriber->start();
+
+# Set up a table with schema `(a int, b bool)` where the `b` attribute is
+# missing for one row due to the `ALTER TABLE ... ADD COLUMN ... DEFAULT`
+# fast path.
+$node_publisher->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE tab_default (a int);
+ ALTER TABLE tab_default REPLICA IDENTITY FULL;
+ INSERT INTO tab_default VALUES (1);
+ ALTER TABLE tab_default ADD COLUMN b bool DEFAULT false NOT NULL;
+ INSERT INTO tab_default VALUES (2, true);
+ CREATE PUBLICATION pub1 FOR TABLE tab_default;
+));
+
+# Replicate to the subscriber.
+$node_subscriber->safe_psql(
+ 'postgres', qq(
+ CREATE TABLE tab_default (a int, b bool);
+ CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1;
+));
+
+$node_subscriber->wait_for_subscription_sync($node_publisher, 'sub1');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT a, b FROM tab_default");
+is($result, qq(1|f
+2|t), 'check snapshot on subscriber');
+
+# Update all rows in the table and ensure the rows with the missing `b`
+# attribute replicate correctly.
+$node_publisher->safe_psql('postgres',
+ "UPDATE tab_default SET a = a + 1");
+$node_publisher->wait_for_catchup('sub1');
+
+# When the bug is present, the `1|f` row will not be updated to `2|f` because
+# the publisher incorrectly fills in `NULL` for `b` and publishes an update
+# for `1|NULL`, which doesn't exist in the subscriber.
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT a, b FROM tab_default");
+is($result, qq(2|f
+3|t), 'check replicated update on subscriber');
+
+$node_publisher->stop('fast');
+$node_subscriber->stop('fast');
+
+done_testing();