summaryrefslogtreecommitdiffstats
path: root/src/test/perl/PostgreSQL
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 13:44:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 13:44:03 +0000
commit293913568e6a7a86fd1479e1cff8e2ecb58d6568 (patch)
treefc3b469a3ec5ab71b36ea97cc7aaddb838423a0c /src/test/perl/PostgreSQL
parentInitial commit. (diff)
downloadpostgresql-16-293913568e6a7a86fd1479e1cff8e2ecb58d6568.tar.xz
postgresql-16-293913568e6a7a86fd1479e1cff8e2ecb58d6568.zip
Adding upstream version 16.2.upstream/16.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/test/perl/PostgreSQL')
-rw-r--r--src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm667
-rw-r--r--src/test/perl/PostgreSQL/Test/BackgroundPsql.pm308
-rw-r--r--src/test/perl/PostgreSQL/Test/Cluster.pm3188
-rw-r--r--src/test/perl/PostgreSQL/Test/RecursiveCopy.pm157
-rw-r--r--src/test/perl/PostgreSQL/Test/SimpleTee.pm63
-rw-r--r--src/test/perl/PostgreSQL/Test/Utils.pm1074
-rw-r--r--src/test/perl/PostgreSQL/Version.pm167
7 files changed, 5624 insertions, 0 deletions
diff --git a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
new file mode 100644
index 0000000..e34dfb9
--- /dev/null
+++ b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
@@ -0,0 +1,667 @@
+
+# Copyright (c) 2023, PostgreSQL Global Development Group
+
+=pod
+
+=head1 NAME
+
+PostgreSQL::Test::AdjustUpgrade - helper module for cross-version upgrade tests
+
+=head1 SYNOPSIS
+
+ use PostgreSQL::Test::AdjustUpgrade;
+
+ # Build commands to adjust contents of old-version database before dumping
+ $statements = adjust_database_contents($old_version, %dbnames);
+
+ # Adjust contents of old pg_dumpall output file to match newer version
+ $dump = adjust_old_dumpfile($old_version, $dump);
+
+ # Adjust contents of new pg_dumpall output file to match older version
+ $dump = adjust_new_dumpfile($old_version, $dump);
+
+=head1 DESCRIPTION
+
+C<PostgreSQL::Test::AdjustUpgrade> encapsulates various hacks needed to
+compare the results of cross-version upgrade tests.
+
+=cut
+
+package PostgreSQL::Test::AdjustUpgrade;
+
+use strict;
+use warnings;
+
+use Exporter 'import';
+use PostgreSQL::Version;
+
+our @EXPORT = qw(
+ adjust_database_contents
+ adjust_old_dumpfile
+ adjust_new_dumpfile
+);
+
+=pod
+
+=head1 ROUTINES
+
+=over
+
+=item $statements = adjust_database_contents($old_version, %dbnames)
+
+Generate SQL commands to perform any changes to an old-version installation
+that are needed before we can pg_upgrade it into the current PostgreSQL
+version.
+
+Typically this involves dropping or adjusting no-longer-supported objects.
+
+Arguments:
+
+=over
+
+=item C<old_version>: Branch we are upgrading from, represented as a
+PostgreSQL::Version object.
+
+=item C<dbnames>: Hash of database names present in the old installation.
+
+=back
+
+Returns a reference to a hash, wherein the keys are database names and the
+values are arrayrefs to lists of statements to be run in those databases.
+
+=cut
+
+sub adjust_database_contents
+{
+ my ($old_version, %dbnames) = @_;
+ my $result = {};
+
+ die "wrong type for \$old_version\n"
+ unless $old_version->isa("PostgreSQL::Version");
+
+ # The version tests can be sensitive if fixups have been applied in a
+ # recent version and pg_upgrade is run with a beta version, or such.
+ # Therefore, use a modified version object that only contains the major.
+ $old_version = PostgreSQL::Version->new($old_version->major);
+
+ # remove dbs of modules known to cause pg_upgrade to fail
+ # anything not builtin and incompatible should clean up its own db
+ foreach my $bad_module ('test_ddl_deparse', 'tsearch2')
+ {
+ if ($dbnames{"contrib_regression_$bad_module"})
+ {
+ _add_st($result, 'postgres',
+ "drop database contrib_regression_$bad_module");
+ delete($dbnames{"contrib_regression_$bad_module"});
+ }
+ }
+
+ # avoid no-path-to-downgrade-extension-version issues
+ if ($dbnames{contrib_regression_test_extensions})
+ {
+ _add_st(
+ $result,
+ 'contrib_regression_test_extensions',
+ 'drop extension if exists test_ext_cine',
+ 'drop extension if exists test_ext7');
+ }
+
+ # stuff not supported from release 16
+ if ($old_version >= 12 && $old_version < 16)
+ {
+ # Can't upgrade aclitem in user tables from pre 16 to 16+.
+ _add_st($result, 'regression',
+ 'alter table public.tab_core_types drop column aclitem');
+ # Can't handle child tables with locally-generated columns.
+ _add_st(
+ $result, 'regression',
+ 'drop table public.gtest_normal_child',
+ 'drop table public.gtest_normal_child2');
+ }
+
+ # stuff not supported from release 14
+ if ($old_version < 14)
+ {
+ # postfix operators (some don't exist in very old versions)
+ _add_st(
+ $result,
+ 'regression',
+ 'drop operator #@# (bigint,NONE)',
+ 'drop operator #%# (bigint,NONE)',
+ 'drop operator if exists !=- (bigint,NONE)',
+ 'drop operator if exists #@%# (bigint,NONE)');
+
+ # get rid of dblink's dependencies on regress.so
+ my $regrdb =
+ $old_version le '9.4'
+ ? 'contrib_regression'
+ : 'contrib_regression_dblink';
+
+ if ($dbnames{$regrdb})
+ {
+ _add_st(
+ $result, $regrdb,
+ 'drop function if exists public.putenv(text)',
+ 'drop function if exists public.wait_pid(integer)');
+ }
+ }
+
+ # user table OIDs are gone from release 12 on
+ if ($old_version < 12)
+ {
+ my $nooid_stmt = q{
+ DO $stmt$
+ DECLARE
+ rec text;
+ BEGIN
+ FOR rec in
+ select oid::regclass::text
+ from pg_class
+ where relname !~ '^pg_'
+ and relhasoids
+ and relkind in ('r','m')
+ order by 1
+ LOOP
+ execute 'ALTER TABLE ' || rec || ' SET WITHOUT OIDS';
+ RAISE NOTICE 'removing oids from table %', rec;
+ END LOOP;
+ END; $stmt$;
+ };
+
+ foreach my $oiddb ('regression', 'contrib_regression_btree_gist')
+ {
+ next unless $dbnames{$oiddb};
+ _add_st($result, $oiddb, $nooid_stmt);
+ }
+
+ # this table had OIDs too, but we'll just drop it
+ if ($old_version >= 10 && $dbnames{'contrib_regression_postgres_fdw'})
+ {
+ _add_st(
+ $result,
+ 'contrib_regression_postgres_fdw',
+ 'drop foreign table ft_pg_type');
+ }
+ }
+
+ # abstime+friends are gone from release 12 on; but these tables
+ # might or might not be present depending on regression test vintage
+ if ($old_version < 12)
+ {
+ _add_st($result, 'regression',
+ 'drop table if exists abstime_tbl, reltime_tbl, tinterval_tbl');
+ }
+
+ # some regression functions gone from release 11 on
+ if ($old_version < 11)
+ {
+ _add_st(
+ $result, 'regression',
+ 'drop function if exists public.boxarea(box)',
+ 'drop function if exists public.funny_dup17()');
+ }
+
+ # version-0 C functions are no longer supported
+ if ($old_version < 10)
+ {
+ _add_st($result, 'regression',
+ 'drop function oldstyle_length(integer, text)');
+ }
+
+ if ($old_version lt '9.5')
+ {
+ # cope with changes of underlying functions
+ _add_st(
+ $result,
+ 'regression',
+ 'drop operator @#@ (NONE, bigint)',
+ 'CREATE OPERATOR @#@ ('
+ . 'PROCEDURE = factorial, RIGHTARG = bigint )',
+ 'drop aggregate public.array_cat_accum(anyarray)',
+ 'CREATE AGGREGATE array_larger_accum (anyarray) ' . ' ( '
+ . ' sfunc = array_larger, '
+ . ' stype = anyarray, '
+ . ' initcond = $${}$$ ' . ' ) ');
+
+ # "=>" is no longer valid as an operator name
+ _add_st($result, 'regression',
+ 'drop operator if exists public.=> (bigint, NONE)');
+ }
+
+ return $result;
+}
+
+# Internal subroutine to add statement(s) to the list for the given db.
+sub _add_st
+{
+ my ($result, $db, @st) = @_;
+
+ $result->{$db} ||= [];
+ push(@{ $result->{$db} }, @st);
+}
+
+=pod
+
+=item adjust_old_dumpfile($old_version, $dump)
+
+Edit a dump output file, taken from the adjusted old-version installation
+by current-version C<pg_dumpall -s>, so that it will match the results of
+C<pg_dumpall -s> on the pg_upgrade'd installation.
+
+Typically this involves coping with cosmetic differences in the output
+of backend subroutines used by pg_dump.
+
+Arguments:
+
+=over
+
+=item C<old_version>: Branch we are upgrading from, represented as a
+PostgreSQL::Version object.
+
+=item C<dump>: Contents of dump file
+
+=back
+
+Returns the modified dump text.
+
+=cut
+
+sub adjust_old_dumpfile
+{
+ my ($old_version, $dump) = @_;
+
+ die "wrong type for \$old_version\n"
+ unless $old_version->isa("PostgreSQL::Version");
+ # See adjust_database_contents about this
+ $old_version = PostgreSQL::Version->new($old_version->major);
+
+ # use Unix newlines
+ $dump =~ s/\r\n/\n/g;
+
+ # Version comments will certainly not match.
+ $dump =~ s/^-- Dumped from database version.*\n//mg;
+
+ if ($old_version < 16)
+ {
+ # Fix up some view queries that no longer require table-qualification.
+ $dump = _mash_view_qualifiers($dump);
+ }
+
+ if ($old_version < 14)
+ {
+ # Remove mentions of extended hash functions.
+ $dump =~ s {^(\s+OPERATOR\s1\s=\(integer,integer\))\s,\n
+ \s+FUNCTION\s2\s\(integer,\sinteger\)\spublic\.part_hashint4_noop\(integer,bigint\);}
+ {$1;}mxg;
+ $dump =~ s {^(\s+OPERATOR\s1\s=\(text,text\))\s,\n
+ \s+FUNCTION\s2\s\(text,\stext\)\spublic\.part_hashtext_length\(text,bigint\);}
+ {$1;}mxg;
+ }
+
+ # Change trigger definitions to say ... EXECUTE FUNCTION ...
+ if ($old_version < 12)
+ {
+ # would like to use lookbehind here but perl complains
+ # so do it this way
+ $dump =~ s/
+ (^CREATE\sTRIGGER\s.*?)
+ \sEXECUTE\sPROCEDURE
+ /$1 EXECUTE FUNCTION/mgx;
+ }
+
+ if ($old_version lt '9.6')
+ {
+ # adjust some places where we don't print so many parens anymore
+
+ my $prefix =
+ "'New York'\tnew & york | big & apple | nyc\t'new' & 'york'\t";
+ my $orig = "( 'new' & 'york' | 'big' & 'appl' ) | 'nyc'";
+ my $repl = "'new' & 'york' | 'big' & 'appl' | 'nyc'";
+ $dump =~ s/(?<=^\Q$prefix\E)\Q$orig\E/$repl/mg;
+
+ $prefix =
+ "'Sanct Peter'\tPeterburg | peter | 'Sanct Peterburg'\t'sanct' & 'peter'\t";
+ $orig = "( 'peterburg' | 'peter' ) | 'sanct' & 'peterburg'";
+ $repl = "'peterburg' | 'peter' | 'sanct' & 'peterburg'";
+ $dump =~ s/(?<=^\Q$prefix\E)\Q$orig\E/$repl/mg;
+ }
+
+ if ($old_version lt '9.5')
+ {
+ # adjust some places where we don't print so many parens anymore
+
+ my $prefix = "CONSTRAINT (?:sequence|copy)_con CHECK [(][(]";
+ my $orig = "((x > 3) AND (y <> 'check failed'::text))";
+ my $repl = "(x > 3) AND (y <> 'check failed'::text)";
+ $dump =~ s/($prefix)\Q$orig\E/$1$repl/mg;
+
+ $prefix = "CONSTRAINT insert_con CHECK [(][(]";
+ $orig = "((x >= 3) AND (y <> 'check failed'::text))";
+ $repl = "(x >= 3) AND (y <> 'check failed'::text)";
+ $dump =~ s/($prefix)\Q$orig\E/$1$repl/mg;
+
+ $orig = "DEFAULT ((-1) * currval('public.insert_seq'::regclass))";
+ $repl =
+ "DEFAULT ('-1'::integer * currval('public.insert_seq'::regclass))";
+ $dump =~ s/\Q$orig\E/$repl/mg;
+
+ my $expr =
+ "(rsl.sl_color = rsh.slcolor) AND (rsl.sl_len_cm >= rsh.slminlen_cm)";
+ $dump =~ s/WHERE \(\(\Q$expr\E\)/WHERE ($expr/g;
+
+ $expr =
+ "(rule_and_refint_t3.id3a = new.id3a) AND (rule_and_refint_t3.id3b = new.id3b)";
+ $dump =~ s/WHERE \(\(\Q$expr\E\)/WHERE ($expr/g;
+
+ $expr =
+ "(rule_and_refint_t3_1.id3a = new.id3a) AND (rule_and_refint_t3_1.id3b = new.id3b)";
+ $dump =~ s/WHERE \(\(\Q$expr\E\)/WHERE ($expr/g;
+ }
+
+ if ($old_version lt '9.3')
+ {
+ # CREATE VIEW/RULE statements were not pretty-printed before 9.3.
+ # To cope, reduce all whitespace sequences within them to one space.
+ # This must be done on both old and new dumps.
+ $dump = _mash_view_whitespace($dump);
+
+ # _mash_view_whitespace doesn't handle multi-command rules;
+ # rather than trying to fix that, just hack the exceptions manually.
+
+ my $prefix =
+ "CREATE RULE rtest_sys_del AS ON DELETE TO public.rtest_system DO (DELETE FROM public.rtest_interface WHERE (rtest_interface.sysname = old.sysname);";
+ my $line2 = " DELETE FROM public.rtest_admin";
+ my $line3 = " WHERE (rtest_admin.sysname = old.sysname);";
+ $dump =~
+ s/(?<=\Q$prefix\E)\Q$line2$line3\E \);/\n$line2\n $line3\n);/mg;
+
+ $prefix =
+ "CREATE RULE rtest_sys_upd AS ON UPDATE TO public.rtest_system DO (UPDATE public.rtest_interface SET sysname = new.sysname WHERE (rtest_interface.sysname = old.sysname);";
+ $line2 = " UPDATE public.rtest_admin SET sysname = new.sysname";
+ $line3 = " WHERE (rtest_admin.sysname = old.sysname);";
+ $dump =~
+ s/(?<=\Q$prefix\E)\Q$line2$line3\E \);/\n$line2\n $line3\n);/mg;
+
+ # and there's one place where pre-9.3 uses a different table alias
+ $dump =~ s {^(CREATE\sRULE\srule_and_refint_t3_ins\sAS\s
+ ON\sINSERT\sTO\spublic\.rule_and_refint_t3\s
+ WHERE\s\(EXISTS\s\(SELECT\s1\sFROM\spublic\.rule_and_refint_t3)\s
+ (WHERE\s\(\(rule_and_refint_t3)
+ (\.id3a\s=\snew\.id3a\)\sAND\s\(rule_and_refint_t3)
+ (\.id3b\s=\snew\.id3b\)\sAND\s\(rule_and_refint_t3)}
+ {$1 rule_and_refint_t3_1 $2_1$3_1$4_1}mx;
+
+ # Also fix old use of NATURAL JOIN syntax
+ $dump =~ s {NATURAL JOIN public\.credit_card r}
+ {JOIN public.credit_card r USING (cid)}mg;
+ $dump =~ s {NATURAL JOIN public\.credit_usage r}
+ {JOIN public.credit_usage r USING (cid)}mg;
+ }
+
+ # Suppress blank lines, as some places in pg_dump emit more or fewer.
+ $dump =~ s/\n\n+/\n/g;
+
+ return $dump;
+}
+
+
+# Data for _mash_view_qualifiers
+my @_unused_view_qualifiers = (
+ # Present at least since 9.2
+ { obj => 'VIEW public.trigger_test_view', qual => 'trigger_test' },
+ { obj => 'VIEW public.domview', qual => 'domtab' },
+ { obj => 'VIEW public.my_property_normal', qual => 'customer' },
+ { obj => 'VIEW public.my_property_secure', qual => 'customer' },
+ { obj => 'VIEW public.pfield_v1', qual => 'pf' },
+ { obj => 'VIEW public.rtest_v1', qual => 'rtest_t1' },
+ { obj => 'VIEW public.rtest_vview1', qual => 'x' },
+ { obj => 'VIEW public.rtest_vview2', qual => 'rtest_view1' },
+ { obj => 'VIEW public.rtest_vview3', qual => 'x' },
+ { obj => 'VIEW public.rtest_vview5', qual => 'rtest_view1' },
+ { obj => 'VIEW public.shoelace_obsolete', qual => 'shoelace' },
+ { obj => 'VIEW public.shoelace_candelete', qual => 'shoelace_obsolete' },
+ { obj => 'VIEW public.toyemp', qual => 'emp' },
+ { obj => 'VIEW public.xmlview4', qual => 'emp' },
+ # Since 9.3 (some of these were removed in 9.6)
+ { obj => 'VIEW public.tv', qual => 't' },
+ { obj => 'MATERIALIZED VIEW mvschema.tvm', qual => 'tv' },
+ { obj => 'VIEW public.tvv', qual => 'tv' },
+ { obj => 'MATERIALIZED VIEW public.tvvm', qual => 'tvv' },
+ { obj => 'VIEW public.tvvmv', qual => 'tvvm' },
+ { obj => 'MATERIALIZED VIEW public.bb', qual => 'tvvmv' },
+ { obj => 'VIEW public.nums', qual => 'nums' },
+ { obj => 'VIEW public.sums_1_100', qual => 't' },
+ { obj => 'MATERIALIZED VIEW public.tm', qual => 't' },
+ { obj => 'MATERIALIZED VIEW public.tmm', qual => 'tm' },
+ { obj => 'MATERIALIZED VIEW public.tvmm', qual => 'tvm' },
+ # Since 9.4
+ {
+ obj => 'MATERIALIZED VIEW public.citext_matview',
+ qual => 'citext_table'
+ },
+ {
+ obj => 'OR REPLACE VIEW public.key_dependent_view',
+ qual => 'view_base_table'
+ },
+ {
+ obj => 'OR REPLACE VIEW public.key_dependent_view_no_cols',
+ qual => 'view_base_table'
+ },
+ # Since 9.5
+ {
+ obj => 'VIEW public.dummy_seclabel_view1',
+ qual => 'dummy_seclabel_tbl2'
+ },
+ { obj => 'VIEW public.vv', qual => 'test_tablesample' },
+ { obj => 'VIEW public.test_tablesample_v1', qual => 'test_tablesample' },
+ { obj => 'VIEW public.test_tablesample_v2', qual => 'test_tablesample' },
+ # Since 9.6
+ {
+ obj => 'MATERIALIZED VIEW public.test_pg_dump_mv1',
+ qual => 'test_pg_dump_t1'
+ },
+ { obj => 'VIEW public.test_pg_dump_v1', qual => 'test_pg_dump_t1' },
+ { obj => 'VIEW public.mvtest_tv', qual => 'mvtest_t' },
+ {
+ obj => 'MATERIALIZED VIEW mvtest_mvschema.mvtest_tvm',
+ qual => 'mvtest_tv'
+ },
+ { obj => 'VIEW public.mvtest_tvv', qual => 'mvtest_tv' },
+ { obj => 'MATERIALIZED VIEW public.mvtest_tvvm', qual => 'mvtest_tvv' },
+ { obj => 'VIEW public.mvtest_tvvmv', qual => 'mvtest_tvvm' },
+ { obj => 'MATERIALIZED VIEW public.mvtest_bb', qual => 'mvtest_tvvmv' },
+ { obj => 'MATERIALIZED VIEW public.mvtest_tm', qual => 'mvtest_t' },
+ { obj => 'MATERIALIZED VIEW public.mvtest_tmm', qual => 'mvtest_tm' },
+ { obj => 'MATERIALIZED VIEW public.mvtest_tvmm', qual => 'mvtest_tvm' },
+ # Since 10 (some removed in 12)
+ { obj => 'VIEW public.itestv10', qual => 'itest10' },
+ { obj => 'VIEW public.itestv11', qual => 'itest11' },
+ { obj => 'VIEW public.xmltableview2', qual => '"xmltable"' },
+ # Since 12
+ {
+ obj => 'MATERIALIZED VIEW public.tableam_tblmv_heap2',
+ qual => 'tableam_tbl_heap2'
+ },
+ # Since 13
+ { obj => 'VIEW public.limit_thousand_v_1', qual => 'onek' },
+ { obj => 'VIEW public.limit_thousand_v_2', qual => 'onek' },
+ { obj => 'VIEW public.limit_thousand_v_3', qual => 'onek' },
+ { obj => 'VIEW public.limit_thousand_v_4', qual => 'onek' },
+ # Since 14
+ { obj => 'MATERIALIZED VIEW public.compressmv', qual => 'cmdata1' });
+
+# Internal subroutine to remove no-longer-used table qualifiers from
+# CREATE [MATERIALIZED] VIEW commands. See list of targeted views above.
+sub _mash_view_qualifiers
+{
+ my ($dump) = @_;
+
+ for my $uvq (@_unused_view_qualifiers)
+ {
+ my $leader = "CREATE $uvq->{obj} ";
+ my $qualifier = $uvq->{qual};
+ # Note: we loop because there are presently some cases where the same
+ # view name appears in multiple databases. Fortunately, the same
+ # qualifier removal applies or is harmless for each instance ... but
+ # we might want to rename some things to avoid assuming that.
+ my @splitchunks = split $leader, $dump;
+ $dump = shift(@splitchunks);
+ foreach my $chunk (@splitchunks)
+ {
+ my @thischunks = split /;/, $chunk, 2;
+ my $stmt = shift(@thischunks);
+ my $ostmt = $stmt;
+
+ # now $stmt is just the body of the CREATE [MATERIALIZED] VIEW
+ $stmt =~ s/$qualifier\.//g;
+
+ $dump .= $leader . $stmt . ';' . $thischunks[0];
+ }
+ }
+
+ # Further hack a few cases where not all occurrences of the qualifier
+ # should be removed.
+ $dump =~ s {^(CREATE VIEW public\.rtest_vview1 .*?)(a\)\)\);)}
+ {$1x.$2}ms;
+ $dump =~ s {^(CREATE VIEW public\.rtest_vview3 .*?)(a\)\)\);)}
+ {$1x.$2}ms;
+ $dump =~
+ s {^(CREATE VIEW public\.shoelace_obsolete .*?)(sl_color\)\)\)\);)}
+ {$1shoelace.$2}ms;
+
+ return $dump;
+}
+
+
+# Internal subroutine to mangle whitespace within view/rule commands.
+# Any consecutive sequence of whitespace is reduced to one space.
+sub _mash_view_whitespace
+{
+ my ($dump) = @_;
+
+ foreach my $leader ('CREATE VIEW', 'CREATE RULE')
+ {
+ my @splitchunks = split $leader, $dump;
+
+ $dump = shift(@splitchunks);
+ foreach my $chunk (@splitchunks)
+ {
+ my @thischunks = split /;/, $chunk, 2;
+ my $stmt = shift(@thischunks);
+
+ # now $stmt is just the body of the CREATE VIEW/RULE
+ $stmt =~ s/\s+/ /sg;
+ # we also need to smash these forms for sub-selects and rules
+ $stmt =~ s/\( SELECT/(SELECT/g;
+ $stmt =~ s/\( INSERT/(INSERT/g;
+ $stmt =~ s/\( UPDATE/(UPDATE/g;
+ $stmt =~ s/\( DELETE/(DELETE/g;
+
+ $dump .= $leader . $stmt . ';' . $thischunks[0];
+ }
+ }
+ return $dump;
+}
+
+=pod
+
+=item adjust_new_dumpfile($old_version, $dump)
+
+Edit a dump output file, taken from the pg_upgrade'd installation
+by current-version C<pg_dumpall -s>, so that it will match the old
+dump output file as adjusted by C<adjust_old_dumpfile>.
+
+Typically this involves deleting data not present in the old installation.
+
+Arguments:
+
+=over
+
+=item C<old_version>: Branch we are upgrading from, represented as a
+PostgreSQL::Version object.
+
+=item C<dump>: Contents of dump file
+
+=back
+
+Returns the modified dump text.
+
+=cut
+
+sub adjust_new_dumpfile
+{
+ my ($old_version, $dump) = @_;
+
+ die "wrong type for \$old_version\n"
+ unless $old_version->isa("PostgreSQL::Version");
+ # See adjust_database_contents about this
+ $old_version = PostgreSQL::Version->new($old_version->major);
+
+ # use Unix newlines
+ $dump =~ s/\r\n/\n/g;
+
+ # Version comments will certainly not match.
+ $dump =~ s/^-- Dumped from database version.*\n//mg;
+
+ if ($old_version < 14)
+ {
+ # Suppress noise-word uses of IN in CREATE/ALTER PROCEDURE.
+ $dump =~ s/^(CREATE PROCEDURE .*?)\(IN /$1(/mg;
+ $dump =~ s/^(ALTER PROCEDURE .*?)\(IN /$1(/mg;
+ $dump =~ s/^(CREATE PROCEDURE .*?), IN /$1, /mg;
+ $dump =~ s/^(ALTER PROCEDURE .*?), IN /$1, /mg;
+ $dump =~ s/^(CREATE PROCEDURE .*?), IN /$1, /mg;
+ $dump =~ s/^(ALTER PROCEDURE .*?), IN /$1, /mg;
+
+ # Remove SUBSCRIPT clauses in CREATE TYPE.
+ $dump =~ s/^\s+SUBSCRIPT = raw_array_subscript_handler,\n//mg;
+
+ # Remove multirange_type_name clauses in CREATE TYPE AS RANGE.
+ $dump =~ s {,\n\s+multirange_type_name = .*?(,?)$} {$1}mg;
+
+ # Remove mentions of extended hash functions.
+ $dump =~
+ s {^ALTER\sOPERATOR\sFAMILY\spublic\.part_test_int4_ops\sUSING\shash\sADD\n
+ \s+FUNCTION\s2\s\(integer,\sinteger\)\spublic\.part_hashint4_noop\(integer,bigint\);} {}mxg;
+ $dump =~
+ s {^ALTER\sOPERATOR\sFAMILY\spublic\.part_test_text_ops\sUSING\shash\sADD\n
+ \s+FUNCTION\s2\s\(text,\stext\)\spublic\.part_hashtext_length\(text,bigint\);} {}mxg;
+ }
+
+ # pre-v12 dumps will not say anything about default_table_access_method.
+ if ($old_version < 12)
+ {
+ $dump =~ s/^SET default_table_access_method = heap;\n//mg;
+ }
+
+ # dumps from pre-9.6 dblink may include redundant ACL settings
+ if ($old_version lt '9.6')
+ {
+ my $comment =
+ "-- Name: FUNCTION dblink_connect_u\(.*?\); Type: ACL; Schema: public; Owner: .*";
+ my $sql =
+ "REVOKE ALL ON FUNCTION public\.dblink_connect_u\(.*?\) FROM PUBLIC;";
+ $dump =~ s/^--\n$comment\n--\n+$sql\n+//mg;
+ }
+
+ if ($old_version lt '9.3')
+ {
+ # CREATE VIEW/RULE statements were not pretty-printed before 9.3.
+ # To cope, reduce all whitespace sequences within them to one space.
+ # This must be done on both old and new dumps.
+ $dump = _mash_view_whitespace($dump);
+ }
+
+ # Suppress blank lines, as some places in pg_dump emit more or fewer.
+ $dump =~ s/\n\n+/\n/g;
+
+ return $dump;
+}
+
+=pod
+
+=back
+
+=cut
+
+1;
diff --git a/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm b/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm
new file mode 100644
index 0000000..764f0a5
--- /dev/null
+++ b/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm
@@ -0,0 +1,308 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+=pod
+
+=head1 NAME
+
+PostgreSQL::Test::BackgroundPsql - class for controlling background psql processes
+
+=head1 SYNOPSIS
+
+ use PostgreSQL::Test::Cluster;
+
+ my $node = PostgreSQL::Test::Cluster->new('mynode');
+
+ # Create a data directory with initdb
+ $node->init();
+
+ # Start the PostgreSQL server
+ $node->start();
+
+ # Create and start an interactive psql session
+ my $isession = $node->interactive_psql('postgres');
+ # Apply timeout per query rather than per session
+ $isession->set_query_timer_restart();
+ # Run a query and get the output as seen by psql
+ my $ret = $isession->query("SELECT 1");
+ # Run a backslash command and wait until the prompt returns
+ $isession->query_until(qr/postgres #/, "\\d foo\n");
+ # Close the session and exit psql
+ $isession->quit;
+
+ # Create and start a background psql session
+ my $bsession = $node->background_psql('postgres');
+
+ # Run a query which is guaranteed to not return in case it fails
+ $bsession->query_safe("SELECT 1");
+ # Initiate a command which can be expected to terminate at a later stage
+ $bsession->query_until(qr/start/, q(
+ \echo start
+ CREATE INDEX CONCURRENTLY idx ON t(a);
+ ));
+ # Close the session and exit psql
+ $bsession->quit;
+
+=head1 DESCRIPTION
+
+PostgreSQL::Test::BackgroundPsql contains functionality for controlling
+a background or interactive psql session operating on a PostgreSQL node
+initiated by PostgreSQL::Test::Cluster.
+
+=cut
+
+package PostgreSQL::Test::BackgroundPsql;
+
+use strict;
+use warnings;
+
+use Carp;
+use Config;
+use IPC::Run;
+use PostgreSQL::Test::Utils qw(pump_until);
+use Test::More;
+
+=pod
+
+=head1 METHODS
+
+=over
+
+=item PostgreSQL::Test::BackgroundPsql->new(interactive, @params)
+
+Builds a new object of class C<PostgreSQL::Test::BackgroundPsql> for either
+an interactive or background session and starts it. If C<interactive> is
+true then a PTY will be attached. C<psql_params> should contain the full
+command to run psql with all desired parameters and a complete connection
+string. For C<interactive> sessions, IO::Pty is required.
+
+=cut
+
+sub new
+{
+ my $class = shift;
+ my ($interactive, $psql_params) = @_;
+ my $psql = {
+ 'stdin' => '',
+ 'stdout' => '',
+ 'stderr' => '',
+ 'query_timer_restart' => undef
+ };
+ my $run;
+
+ # This constructor should only be called from PostgreSQL::Test::Cluster
+ my ($package, $file, $line) = caller;
+ die
+ "Forbidden caller of constructor: package: $package, file: $file:$line"
+ unless $package->isa('PostgreSQL::Test::Cluster');
+
+ $psql->{timeout} =
+ IPC::Run::timeout($PostgreSQL::Test::Utils::timeout_default);
+
+ if ($interactive)
+ {
+ $run = IPC::Run::start $psql_params,
+ '<pty<', \$psql->{stdin}, '>pty>', \$psql->{stdout}, '2>',
+ \$psql->{stderr},
+ $psql->{timeout};
+ }
+ else
+ {
+ $run = IPC::Run::start $psql_params,
+ '<', \$psql->{stdin}, '>', \$psql->{stdout}, '2>', \$psql->{stderr},
+ $psql->{timeout};
+ }
+
+ $psql->{run} = $run;
+
+ my $self = bless $psql, $class;
+
+ $self->_wait_connect();
+
+ return $self;
+}
+
+# Internal routine for awaiting psql starting up and being ready to consume
+# input.
+sub _wait_connect
+{
+ my ($self) = @_;
+
+ # Request some output, and pump until we see it. This means that psql
+ # connection failures are caught here, relieving callers of the need to
+ # handle those. (Right now, we have no particularly good handling for
+ # errors anyway, but that might be added later.)
+ my $banner = "background_psql: ready";
+ $self->{stdin} .= "\\echo $banner\n";
+ $self->{run}->pump()
+ until $self->{stdout} =~ /$banner/ || $self->{timeout}->is_expired;
+ $self->{stdout} = ''; # clear out banner
+
+ die "psql startup timed out" if $self->{timeout}->is_expired;
+}
+
+=pod
+
+=item $session->quit
+
+Close the session and clean up resources. Each test run must be closed with
+C<quit>.
+
+=cut
+
+sub quit
+{
+ my ($self) = @_;
+
+ $self->{stdin} .= "\\q\n";
+
+ return $self->{run}->finish;
+}
+
+=pod
+
+=item $session->reconnect_and_clear
+
+Terminate the current session and connect again.
+
+=cut
+
+sub reconnect_and_clear
+{
+ my ($self) = @_;
+
+ # If psql isn't dead already, tell it to quit as \q, when already dead,
+ # causes IPC::Run to unhelpfully error out with "ack Broken pipe:".
+ $self->{run}->pump_nb();
+ if ($self->{run}->pumpable())
+ {
+ $self->{stdin} .= "\\q\n";
+ }
+ $self->{run}->finish;
+
+ # restart
+ $self->{run}->run();
+ $self->{stdin} = '';
+ $self->{stdout} = '';
+
+ $self->_wait_connect();
+}
+
+=pod
+
+=item $session->query()
+
+Executes a query in the current session and returns the output in scalar
+context and (output, error) in list context where error is 1 in case there
+was output generated on stderr when executing the query.
+
+=cut
+
+sub query
+{
+ my ($self, $query) = @_;
+ my $ret;
+ my $output;
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ note "issuing query via background psql: $query";
+
+ $self->{timeout}->start() if (defined($self->{query_timer_restart}));
+
+ # Feed the query to psql's stdin, followed by \n (so psql processes the
+ # line), by a ; (so that psql issues the query, if it doesn't include a ;
+ # itself), and a separator echoed with \echo, that we can wait on.
+ my $banner = "background_psql: QUERY_SEPARATOR";
+ $self->{stdin} .= "$query\n;\n\\echo $banner\n";
+
+ pump_until($self->{run}, $self->{timeout}, \$self->{stdout}, qr/$banner/);
+
+ die "psql query timed out" if $self->{timeout}->is_expired;
+ $output = $self->{stdout};
+
+ # remove banner again, our caller doesn't care
+ $output =~ s/\n$banner$//s;
+
+ # clear out output for the next query
+ $self->{stdout} = '';
+
+ $ret = $self->{stderr} eq "" ? 0 : 1;
+
+ return wantarray ? ($output, $ret) : $output;
+}
+
+=pod
+
+=item $session->query_safe()
+
+Wrapper around C<query> which errors out if the query failed to execute.
+Query failure is determined by it producing output on stderr.
+
+=cut
+
+sub query_safe
+{
+ my ($self, $query) = @_;
+
+ my $ret = $self->query($query);
+
+ if ($self->{stderr} ne "")
+ {
+ die "query failed: $self->{stderr}";
+ }
+
+ return $ret;
+}
+
+=pod
+
+=item $session->query_until(until, query)
+
+Issue C<query> and wait for C<until> appearing in the query output rather than
+waiting for query completion. C<query> needs to end with newline and semicolon
+(if applicable, interactive psql input may not require it) for psql to process
+the input.
+
+=cut
+
+sub query_until
+{
+ my ($self, $until, $query) = @_;
+ my $ret;
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ $self->{timeout}->start() if (defined($self->{query_timer_restart}));
+ $self->{stdin} .= $query;
+
+ pump_until($self->{run}, $self->{timeout}, \$self->{stdout}, $until);
+
+ die "psql query timed out" if $self->{timeout}->is_expired;
+
+ $ret = $self->{stdout};
+
+ # clear out output for the next query
+ $self->{stdout} = '';
+
+ return $ret;
+}
+
+=pod
+
+=item $session->set_query_timer_restart()
+
+Configures the timer to be restarted before each query such that the defined
+timeout is valid per query rather than per test run.
+
+=back
+
+=cut
+
+sub set_query_timer_restart
+{
+ my $self = shift;
+
+ $self->{query_timer_restart} = 1;
+ return $self->{query_timer_restart};
+}
+
+1;
diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
new file mode 100644
index 0000000..5f0f04b
--- /dev/null
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -0,0 +1,3188 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+=pod
+
+=head1 NAME
+
+PostgreSQL::Test::Cluster - class representing PostgreSQL server instance
+
+=head1 SYNOPSIS
+
+ use PostgreSQL::Test::Cluster;
+
+ my $node = PostgreSQL::Test::Cluster->new('mynode');
+
+ # Create a data directory with initdb
+ $node->init();
+
+ # Start the PostgreSQL server
+ $node->start();
+
+ # Add a setting and restart
+ $node->append_conf('postgresql.conf', 'hot_standby = on');
+ $node->restart();
+
+ # Modify or delete an existing setting
+ $node->adjust_conf('postgresql.conf', 'max_wal_senders', '10');
+
+ # get pg_config settings
+ # all the settings in one string
+ $pgconfig = $node->config_data;
+ # all the settings as a map
+ %config_map = ($node->config_data);
+ # specified settings
+ ($incdir, $sharedir) = $node->config_data(qw(--includedir --sharedir));
+
+ # run a query with psql, like:
+ # echo 'SELECT 1' | psql -qAXt postgres -v ON_ERROR_STOP=1
+ $psql_stdout = $node->safe_psql('postgres', 'SELECT 1');
+
+ # Run psql with a timeout, capturing stdout and stderr
+ # as well as the psql exit code. Pass some extra psql
+ # options. If there's an error from psql raise an exception.
+ my ($stdout, $stderr, $timed_out);
+ my $cmdret = $node->psql('postgres', 'SELECT pg_sleep(600)',
+ stdout => \$stdout, stderr => \$stderr,
+ timeout => $PostgreSQL::Test::Utils::timeout_default,
+ timed_out => \$timed_out,
+ extra_params => ['--single-transaction'],
+ on_error_die => 1)
+ print "Sleep timed out" if $timed_out;
+
+ # Similar thing, more convenient in common cases
+ my ($cmdret, $stdout, $stderr) =
+ $node->psql('postgres', 'SELECT 1');
+
+ # run query every second until it returns 't'
+ # or times out
+ $node->poll_query_until('postgres', q|SELECT random() < 0.1;|')
+ or die "timed out";
+
+ # Do an online pg_basebackup
+ my $ret = $node->backup('testbackup1');
+
+ # Take a backup of a running server
+ my $ret = $node->backup_fs_hot('testbackup2');
+
+ # Take a backup of a stopped server
+ $node->stop;
+ my $ret = $node->backup_fs_cold('testbackup3')
+
+ # Restore it to create a new independent node (not a replica)
+ my $other_node = PostgreSQL::Test::Cluster->new('mycopy');
+ $other_node->init_from_backup($node, 'testbackup');
+ $other_node->start;
+
+ # Stop the server
+ $node->stop('fast');
+
+ # Find a free, unprivileged TCP port to bind some other service to
+ my $port = PostgreSQL::Test::Cluster::get_free_port();
+
+=head1 DESCRIPTION
+
+PostgreSQL::Test::Cluster contains a set of routines able to work on a PostgreSQL node,
+allowing to start, stop, backup and initialize it with various options.
+The set of nodes managed by a given test is also managed by this module.
+
+In addition to node management, PostgreSQL::Test::Cluster instances have some wrappers
+around Test::More functions to run commands with an environment set up to
+point to the instance.
+
+The IPC::Run module is required.
+
+=cut
+
+package PostgreSQL::Test::Cluster;
+
+use strict;
+use warnings;
+
+use Carp;
+use Config;
+use Fcntl qw(:mode :flock :seek :DEFAULT);
+use File::Basename;
+use File::Path qw(rmtree mkpath);
+use File::Spec;
+use File::stat qw(stat);
+use File::Temp ();
+use IPC::Run;
+use PostgreSQL::Version;
+use PostgreSQL::Test::RecursiveCopy;
+use Socket;
+use Test::More;
+use PostgreSQL::Test::Utils ();
+use PostgreSQL::Test::BackgroundPsql ();
+use Time::HiRes qw(usleep);
+use Scalar::Util qw(blessed);
+
+our ($use_tcp, $test_localhost, $test_pghost, $last_host_assigned,
+ $last_port_assigned, @all_nodes, $died, $portdir);
+
+# the minimum version we believe to be compatible with this package without
+# subclassing.
+our $min_compat = 12;
+
+# list of file reservations made by get_free_port
+my @port_reservation_files;
+
+INIT
+{
+
+ # Set PGHOST for backward compatibility. This doesn't work for own_host
+ # nodes, so prefer to not rely on this when writing new tests.
+ $use_tcp = !$PostgreSQL::Test::Utils::use_unix_sockets;
+ $test_localhost = "127.0.0.1";
+ $last_host_assigned = 1;
+ if ($use_tcp)
+ {
+ $test_pghost = $test_localhost;
+ }
+ else
+ {
+ # On windows, replace windows-style \ path separators with / when
+ # putting socket directories either in postgresql.conf or libpq
+ # connection strings, otherwise they are interpreted as escapes.
+ $test_pghost = PostgreSQL::Test::Utils::tempdir_short;
+ $test_pghost =~ s!\\!/!g if $PostgreSQL::Test::Utils::windows_os;
+ }
+ $ENV{PGHOST} = $test_pghost;
+ $ENV{PGDATABASE} = 'postgres';
+
+ # Tracking of last port value assigned to accelerate free port lookup.
+ $last_port_assigned = int(rand() * 16384) + 49152;
+
+ # Set the port lock directory
+
+ # If we're told to use a directory (e.g. from a buildfarm client)
+ # explicitly, use that
+ $portdir = $ENV{PG_TEST_PORT_DIR};
+ # Otherwise, try to use a directory at the top of the build tree
+ # or as a last resort use the tmp_check directory
+ my $build_dir =
+ $ENV{MESON_BUILD_ROOT}
+ || $ENV{top_builddir}
+ || $PostgreSQL::Test::Utils::tmp_check;
+ $portdir ||= "$build_dir/portlock";
+ $portdir =~ s!\\!/!g;
+ # Make sure the directory exists
+ mkpath($portdir) unless -d $portdir;
+}
+
+=pod
+
+=head1 METHODS
+
+=over
+
+=item $node->port()
+
+Get the port number assigned to the host. This won't necessarily be a TCP port
+open on the local host since we prefer to use unix sockets if possible.
+
+Use $node->connstr() if you want a connection string.
+
+=cut
+
+sub port
+{
+ my ($self) = @_;
+ return $self->{_port};
+}
+
+=pod
+
+=item $node->host()
+
+Return the host (like PGHOST) for this instance. May be a UNIX socket path.
+
+Use $node->connstr() if you want a connection string.
+
+=cut
+
+sub host
+{
+ my ($self) = @_;
+ return $self->{_host};
+}
+
+=pod
+
+=item $node->basedir()
+
+The directory all the node's files will be within - datadir, archive directory,
+backups, etc.
+
+=cut
+
+sub basedir
+{
+ my ($self) = @_;
+ return $self->{_basedir};
+}
+
+=pod
+
+=item $node->name()
+
+The name assigned to the node at creation time.
+
+=cut
+
+sub name
+{
+ my ($self) = @_;
+ return $self->{_name};
+}
+
+=pod
+
+=item $node->logfile()
+
+Path to the PostgreSQL log file for this instance.
+
+=cut
+
+sub logfile
+{
+ my ($self) = @_;
+ return $self->{_logfile};
+}
+
+=pod
+
+=item $node->connstr()
+
+Get a libpq connection string that will establish a connection to
+this node. Suitable for passing to psql, DBD::Pg, etc.
+
+=cut
+
+sub connstr
+{
+ my ($self, $dbname) = @_;
+ my $pgport = $self->port;
+ my $pghost = $self->host;
+ if (!defined($dbname))
+ {
+ return "port=$pgport host=$pghost";
+ }
+
+ # Escape properly the database string before using it, only
+ # single quotes and backslashes need to be treated this way.
+ $dbname =~ s#\\#\\\\#g;
+ $dbname =~ s#\'#\\\'#g;
+
+ return "port=$pgport host=$pghost dbname='$dbname'";
+}
+
+=pod
+
+=item $node->group_access()
+
+Does the data dir allow group access?
+
+=cut
+
+sub group_access
+{
+ my ($self) = @_;
+
+ my $dir_stat = stat($self->data_dir);
+
+ defined($dir_stat)
+ or die('unable to stat ' . $self->data_dir);
+
+ return (S_IMODE($dir_stat->mode) == 0750);
+}
+
+=pod
+
+=item $node->data_dir()
+
+Returns the path to the data directory. postgresql.conf and pg_hba.conf are
+always here.
+
+=cut
+
+sub data_dir
+{
+ my ($self) = @_;
+ my $res = $self->basedir;
+ return "$res/pgdata";
+}
+
+=pod
+
+=item $node->archive_dir()
+
+If archiving is enabled, WAL files go here.
+
+=cut
+
+sub archive_dir
+{
+ my ($self) = @_;
+ my $basedir = $self->basedir;
+ return "$basedir/archives";
+}
+
+=pod
+
+=item $node->backup_dir()
+
+The output path for backups taken with $node->backup()
+
+=cut
+
+sub backup_dir
+{
+ my ($self) = @_;
+ my $basedir = $self->basedir;
+ return "$basedir/backup";
+}
+
+=pod
+
+=item $node->install_path()
+
+The configured install path (if any) for the node.
+
+=cut
+
+sub install_path
+{
+ my ($self) = @_;
+ return $self->{_install_path};
+}
+
+=pod
+
+=item $node->pg_version()
+
+The version number for the node, from PostgreSQL::Version.
+
+=cut
+
+sub pg_version
+{
+ my ($self) = @_;
+ return $self->{_pg_version};
+}
+
+=pod
+
+=item $node->config_data( option ...)
+
+Return configuration data from pg_config, using options (if supplied).
+The options will be things like '--sharedir'.
+
+If no options are supplied, return a string in scalar context or a map in
+array context.
+
+If options are supplied, return the list of values.
+
+=cut
+
+sub config_data
+{
+ my ($self, @options) = @_;
+ local %ENV = $self->_get_env();
+
+ my ($stdout, $stderr);
+ my $result =
+ IPC::Run::run [ $self->installed_command('pg_config'), @options ],
+ '>', \$stdout, '2>', \$stderr
+ or die "could not execute pg_config";
+ # standardize line endings
+ $stdout =~ s/\r(?=\n)//g;
+ # no options, scalar context: just hand back the output
+ return $stdout unless (wantarray || @options);
+ chomp($stdout);
+ # exactly one option: hand back the output (minus LF)
+ return $stdout if (@options == 1);
+ my @lines = split(/\n/, $stdout);
+ # more than one option: hand back the list of values;
+ return @lines if (@options);
+ # no options, array context: return a map
+ my @map;
+ foreach my $line (@lines)
+ {
+ my ($k, $v) = split(/ = /, $line, 2);
+ push(@map, $k, $v);
+ }
+ return @map;
+}
+
+=pod
+
+=item $node->info()
+
+Return a string containing human-readable diagnostic information (paths, etc)
+about this node.
+
+=cut
+
+sub info
+{
+ my ($self) = @_;
+ my $_info = '';
+ open my $fh, '>', \$_info or die;
+ print $fh "Name: " . $self->name . "\n";
+ print $fh "Version: " . $self->{_pg_version} . "\n"
+ if $self->{_pg_version};
+ print $fh "Data directory: " . $self->data_dir . "\n";
+ print $fh "Backup directory: " . $self->backup_dir . "\n";
+ print $fh "Archive directory: " . $self->archive_dir . "\n";
+ print $fh "Connection string: " . $self->connstr . "\n";
+ print $fh "Log file: " . $self->logfile . "\n";
+ print $fh "Install Path: ", $self->{_install_path} . "\n"
+ if $self->{_install_path};
+ close $fh or die;
+ return $_info;
+}
+
+=pod
+
+=item $node->dump_info()
+
+Print $node->info()
+
+=cut
+
+sub dump_info
+{
+ my ($self) = @_;
+ print $self->info;
+ return;
+}
+
+
+# Internal method to set up trusted pg_hba.conf for replication. Not
+# documented because you shouldn't use it, it's called automatically if needed.
+sub set_replication_conf
+{
+ my ($self) = @_;
+ my $pgdata = $self->data_dir;
+
+ $self->host eq $test_pghost
+ or croak "set_replication_conf only works with the default host";
+
+ open my $hba, '>>', "$pgdata/pg_hba.conf";
+ print $hba
+ "\n# Allow replication (set up by PostgreSQL::Test::Cluster.pm)\n";
+ if ($PostgreSQL::Test::Utils::windows_os
+ && !$PostgreSQL::Test::Utils::use_unix_sockets)
+ {
+ print $hba
+ "host replication all $test_localhost/32 sspi include_realm=1 map=regress\n";
+ }
+ close $hba;
+ return;
+}
+
+=pod
+
+=item $node->init(...)
+
+Initialize a new cluster for testing.
+
+Authentication is set up so that only the current OS user can access the
+cluster. On Unix, we use Unix domain socket connections, with the socket in
+a directory that's only accessible to the current user to ensure that.
+On Windows, we use SSPI authentication to ensure the same (by pg_regress
+--config-auth).
+
+WAL archiving can be enabled on this node by passing the keyword parameter
+has_archiving => 1. This is disabled by default.
+
+postgresql.conf can be set up for replication by passing the keyword
+parameter allows_streaming => 'logical' or 'physical' (passing 1 will also
+suffice for physical replication) depending on type of replication that
+should be enabled. This is disabled by default.
+
+The new node is set up in a fast but unsafe configuration where fsync is
+disabled.
+
+=cut
+
+sub init
+{
+ my ($self, %params) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $host = $self->host;
+
+ local %ENV = $self->_get_env();
+
+ $params{allows_streaming} = 0 unless defined $params{allows_streaming};
+ $params{has_archiving} = 0 unless defined $params{has_archiving};
+
+ mkdir $self->backup_dir;
+ mkdir $self->archive_dir;
+
+ PostgreSQL::Test::Utils::system_or_bail('initdb', '-D', $pgdata, '-A',
+ 'trust', '-N', @{ $params{extra} });
+ PostgreSQL::Test::Utils::system_or_bail($ENV{PG_REGRESS},
+ '--config-auth', $pgdata, @{ $params{auth_extra} });
+
+ open my $conf, '>>', "$pgdata/postgresql.conf";
+ print $conf "\n# Added by PostgreSQL::Test::Cluster.pm\n";
+ print $conf "fsync = off\n";
+ print $conf "restart_after_crash = off\n";
+ print $conf "log_line_prefix = '%m [%p] %q%a '\n";
+ print $conf "log_statement = all\n";
+ print $conf "log_replication_commands = on\n";
+ print $conf "wal_retrieve_retry_interval = '500ms'\n";
+
+ # If a setting tends to affect whether tests pass or fail, print it after
+ # TEMP_CONFIG. Otherwise, print it before TEMP_CONFIG, thereby permitting
+ # overrides. Settings that merely improve performance or ease debugging
+ # belong before TEMP_CONFIG.
+ print $conf PostgreSQL::Test::Utils::slurp_file($ENV{TEMP_CONFIG})
+ if defined $ENV{TEMP_CONFIG};
+
+ if ($params{allows_streaming})
+ {
+ if ($params{allows_streaming} eq "logical")
+ {
+ print $conf "wal_level = logical\n";
+ }
+ else
+ {
+ print $conf "wal_level = replica\n";
+ }
+ print $conf "max_wal_senders = 10\n";
+ print $conf "max_replication_slots = 10\n";
+ print $conf "wal_log_hints = on\n";
+ print $conf "hot_standby = on\n";
+ # conservative settings to ensure we can run multiple postmasters:
+ print $conf "shared_buffers = 1MB\n";
+ print $conf "max_connections = 10\n";
+ # limit disk space consumption, too:
+ print $conf "max_wal_size = 128MB\n";
+ }
+ else
+ {
+ print $conf "wal_level = minimal\n";
+ print $conf "max_wal_senders = 0\n";
+ }
+
+ print $conf "port = $port\n";
+ if ($use_tcp)
+ {
+ print $conf "unix_socket_directories = ''\n";
+ print $conf "listen_addresses = '$host'\n";
+ }
+ else
+ {
+ print $conf "unix_socket_directories = '$host'\n";
+ print $conf "listen_addresses = ''\n";
+ }
+ close $conf;
+
+ chmod($self->group_access ? 0640 : 0600, "$pgdata/postgresql.conf")
+ or die("unable to set permissions for $pgdata/postgresql.conf");
+
+ $self->set_replication_conf if $params{allows_streaming};
+ $self->enable_archiving if $params{has_archiving};
+ return;
+}
+
+=pod
+
+=item $node->append_conf(filename, str)
+
+A shortcut method to append to files like pg_hba.conf and postgresql.conf.
+
+Does no validation or sanity checking. Does not reload the configuration
+after writing.
+
+A newline is automatically appended to the string.
+
+=cut
+
+sub append_conf
+{
+ my ($self, $filename, $str) = @_;
+
+ my $conffile = $self->data_dir . '/' . $filename;
+
+ PostgreSQL::Test::Utils::append_to_file($conffile, $str . "\n");
+
+ chmod($self->group_access() ? 0640 : 0600, $conffile)
+ or die("unable to set permissions for $conffile");
+
+ return;
+}
+
+=pod
+
+=item $node->adjust_conf(filename, setting, value, skip_equals)
+
+Modify the named config file setting with the value. If the value is undefined,
+instead delete the setting. If the setting is not present no action is taken.
+
+This will write "$setting = $value\n" in place of the existing line,
+unless skip_equals is true, in which case it will write
+"$setting $value\n". If the value needs to be quoted it is the caller's
+responsibility to do that.
+
+=cut
+
+sub adjust_conf
+{
+ my ($self, $filename, $setting, $value, $skip_equals) = @_;
+
+ my $conffile = $self->data_dir . '/' . $filename;
+
+ my $contents = PostgreSQL::Test::Utils::slurp_file($conffile);
+ my @lines = split(/\n/, $contents);
+ my @result;
+ my $eq = $skip_equals ? '' : '= ';
+ foreach my $line (@lines)
+ {
+ if ($line !~ /^$setting\W/)
+ {
+ push(@result, "$line\n");
+ }
+ elsif (defined $value)
+ {
+ push(@result, "$setting $eq$value\n");
+ }
+ }
+ open my $fh, ">", $conffile
+ or croak "could not write \"$conffile\": $!";
+ print $fh @result;
+ close $fh;
+
+ chmod($self->group_access() ? 0640 : 0600, $conffile)
+ or die("unable to set permissions for $conffile");
+}
+
+=pod
+
+=item $node->backup(backup_name)
+
+Create a hot backup with B<pg_basebackup> in subdirectory B<backup_name> of
+B<< $node->backup_dir >>, including the WAL.
+
+By default, WAL files are fetched at the end of the backup, not streamed.
+You can adjust that and other things by passing an array of additional
+B<pg_basebackup> command line options in the keyword parameter backup_options.
+
+You'll have to configure a suitable B<max_wal_senders> on the
+target server since it isn't done by default.
+
+=cut
+
+sub backup
+{
+ my ($self, $backup_name, %params) = @_;
+ my $backup_path = $self->backup_dir . '/' . $backup_name;
+ my $name = $self->name;
+
+ local %ENV = $self->_get_env();
+
+ print "# Taking pg_basebackup $backup_name from node \"$name\"\n";
+ PostgreSQL::Test::Utils::system_or_bail(
+ 'pg_basebackup', '-D',
+ $backup_path, '-h',
+ $self->host, '-p',
+ $self->port, '--checkpoint',
+ 'fast', '--no-sync',
+ @{ $params{backup_options} });
+ print "# Backup finished\n";
+ return;
+}
+
+=item $node->backup_fs_cold(backup_name)
+
+Create a backup with a filesystem level copy in subdirectory B<backup_name> of
+B<< $node->backup_dir >>, including WAL. The server must be
+stopped as no attempt to handle concurrent writes is made.
+
+Use B<backup> or B<backup_fs_hot> if you want to back up a running server.
+
+=cut
+
+sub backup_fs_cold
+{
+ my ($self, $backup_name) = @_;
+
+ PostgreSQL::Test::RecursiveCopy::copypath(
+ $self->data_dir,
+ $self->backup_dir . '/' . $backup_name,
+ filterfn => sub {
+ my $src = shift;
+ return ($src ne 'log' and $src ne 'postmaster.pid');
+ });
+
+ return;
+}
+
+=pod
+
+=item $node->init_from_backup(root_node, backup_name)
+
+Initialize a node from a backup, which may come from this node or a different
+node. root_node must be a PostgreSQL::Test::Cluster reference, backup_name the string name
+of a backup previously created on that node with $node->backup.
+
+Does not start the node after initializing it.
+
+By default, the backup is assumed to be plain format. To restore from
+a tar-format backup, pass the name of the tar program to use in the
+keyword parameter tar_program. Note that tablespace tar files aren't
+handled here.
+
+Streaming replication can be enabled on this node by passing the keyword
+parameter has_streaming => 1. This is disabled by default.
+
+Restoring WAL segments from archives using restore_command can be enabled
+by passing the keyword parameter has_restoring => 1. This is disabled by
+default.
+
+If has_restoring is used, standby mode is used by default. To use
+recovery mode instead, pass the keyword parameter standby => 0.
+
+The backup is copied, leaving the original unmodified. pg_hba.conf is
+unconditionally set to enable replication connections.
+
+=cut
+
+sub init_from_backup
+{
+ my ($self, $root_node, $backup_name, %params) = @_;
+ my $backup_path = $root_node->backup_dir . '/' . $backup_name;
+ my $host = $self->host;
+ my $port = $self->port;
+ my $node_name = $self->name;
+ my $root_name = $root_node->name;
+
+ $params{has_streaming} = 0 unless defined $params{has_streaming};
+ $params{has_restoring} = 0 unless defined $params{has_restoring};
+ $params{standby} = 1 unless defined $params{standby};
+
+ print
+ "# Initializing node \"$node_name\" from backup \"$backup_name\" of node \"$root_name\"\n";
+ croak "Backup \"$backup_name\" does not exist at $backup_path"
+ unless -d $backup_path;
+
+ mkdir $self->backup_dir;
+ mkdir $self->archive_dir;
+
+ my $data_path = $self->data_dir;
+ if (defined $params{tar_program})
+ {
+ mkdir($data_path);
+ PostgreSQL::Test::Utils::system_or_bail($params{tar_program}, 'xf',
+ $backup_path . '/base.tar',
+ '-C', $data_path);
+ PostgreSQL::Test::Utils::system_or_bail(
+ $params{tar_program}, 'xf',
+ $backup_path . '/pg_wal.tar', '-C',
+ $data_path . '/pg_wal');
+ }
+ else
+ {
+ rmdir($data_path);
+ PostgreSQL::Test::RecursiveCopy::copypath($backup_path, $data_path);
+ }
+ chmod(0700, $data_path);
+
+ # Base configuration for this node
+ $self->append_conf(
+ 'postgresql.conf',
+ qq(
+port = $port
+));
+ if ($use_tcp)
+ {
+ $self->append_conf('postgresql.conf', "listen_addresses = '$host'");
+ }
+ else
+ {
+ $self->append_conf('postgresql.conf',
+ "unix_socket_directories = '$host'");
+ }
+ $self->enable_streaming($root_node) if $params{has_streaming};
+ $self->enable_restoring($root_node, $params{standby})
+ if $params{has_restoring};
+ return;
+}
+
+=pod
+
+=item $node->rotate_logfile()
+
+Switch to a new PostgreSQL log file. This does not alter any running
+PostgreSQL process. Subsequent method calls, including pg_ctl invocations,
+will use the new name. Return the new name.
+
+=cut
+
+sub rotate_logfile
+{
+ my ($self) = @_;
+ $self->{_logfile} = sprintf('%s_%d.log',
+ $self->{_logfile_base},
+ ++$self->{_logfile_generation});
+ return $self->{_logfile};
+}
+
+=pod
+
+=item $node->start(%params) => success_or_failure
+
+Wrapper for pg_ctl start
+
+Start the node and wait until it is ready to accept connections.
+
+=over
+
+=item fail_ok => 1
+
+By default, failure terminates the entire F<prove> invocation. If given,
+instead return a true or false value to indicate success or failure.
+
+=back
+
+=cut
+
+sub start
+{
+ my ($self, %params) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $name = $self->name;
+ my $ret;
+
+ BAIL_OUT("node \"$name\" is already running") if defined $self->{_pid};
+
+ print("### Starting node \"$name\"\n");
+
+ # Temporarily unset PGAPPNAME so that the server doesn't
+ # inherit it. Otherwise this could affect libpqwalreceiver
+ # connections in confusing ways.
+ local %ENV = $self->_get_env(PGAPPNAME => undef);
+
+ # Note: We set the cluster_name here, not in postgresql.conf (in
+ # sub init) so that it does not get copied to standbys.
+ # -w is now the default but having it here does no harm and helps
+ # compatibility with older versions.
+ $ret = PostgreSQL::Test::Utils::system_log(
+ 'pg_ctl', '-w', '-D', $self->data_dir,
+ '-l', $self->logfile, '-o', "--cluster-name=$name",
+ 'start');
+
+ if ($ret != 0)
+ {
+ print "# pg_ctl start failed; logfile:\n";
+ print PostgreSQL::Test::Utils::slurp_file($self->logfile);
+
+ # pg_ctl could have timed out, so check to see if there's a pid file;
+ # otherwise our END block will fail to shut down the new postmaster.
+ $self->_update_pid(-1);
+
+ BAIL_OUT("pg_ctl start failed") unless $params{fail_ok};
+ return 0;
+ }
+
+ $self->_update_pid(1);
+ return 1;
+}
+
+=pod
+
+=item $node->kill9()
+
+Send SIGKILL (signal 9) to the postmaster.
+
+Note: if the node is already known stopped, this does nothing.
+However, if we think it's running and it's not, it's important for
+this to fail. Otherwise, tests might fail to detect server crashes.
+
+=cut
+
+sub kill9
+{
+ my ($self) = @_;
+ my $name = $self->name;
+ return unless defined $self->{_pid};
+
+ local %ENV = $self->_get_env();
+
+ print "### Killing node \"$name\" using signal 9\n";
+ kill(9, $self->{_pid});
+ $self->{_pid} = undef;
+ return;
+}
+
+=pod
+
+=item $node->stop(mode)
+
+Stop the node using pg_ctl -m $mode and wait for it to stop.
+
+Note: if the node is already known stopped, this does nothing.
+However, if we think it's running and it's not, it's important for
+this to fail. Otherwise, tests might fail to detect server crashes.
+
+With optional extra param fail_ok => 1, returns 0 for failure
+instead of bailing out.
+
+=cut
+
+sub stop
+{
+ my ($self, $mode, %params) = @_;
+ my $pgdata = $self->data_dir;
+ my $name = $self->name;
+ my $ret;
+
+ local %ENV = $self->_get_env();
+
+ $mode = 'fast' unless defined $mode;
+ return 1 unless defined $self->{_pid};
+
+ print "### Stopping node \"$name\" using mode $mode\n";
+ $ret = PostgreSQL::Test::Utils::system_log('pg_ctl', '-D', $pgdata,
+ '-m', $mode, 'stop');
+
+ if ($ret != 0)
+ {
+ print "# pg_ctl stop failed: $ret\n";
+
+ # Check to see if we still have a postmaster or not.
+ $self->_update_pid(-1);
+
+ BAIL_OUT("pg_ctl stop failed") unless $params{fail_ok};
+ return 0;
+ }
+
+ $self->_update_pid(0);
+ return 1;
+}
+
+=pod
+
+=item $node->reload()
+
+Reload configuration parameters on the node.
+
+=cut
+
+sub reload
+{
+ my ($self) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $name = $self->name;
+
+ local %ENV = $self->_get_env();
+
+ print "### Reloading node \"$name\"\n";
+ PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata,
+ 'reload');
+ return;
+}
+
+=pod
+
+=item $node->restart()
+
+Wrapper for pg_ctl restart
+
+=cut
+
+sub restart
+{
+ my ($self) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $logfile = $self->logfile;
+ my $name = $self->name;
+
+ local %ENV = $self->_get_env(PGAPPNAME => undef);
+
+ print "### Restarting node \"$name\"\n";
+
+ # -w is now the default but having it here does no harm and helps
+ # compatibility with older versions.
+ PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-w', '-D', $pgdata,
+ '-l', $logfile, 'restart');
+
+ $self->_update_pid(1);
+ return;
+}
+
+=pod
+
+=item $node->promote()
+
+Wrapper for pg_ctl promote
+
+=cut
+
+sub promote
+{
+ my ($self) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $logfile = $self->logfile;
+ my $name = $self->name;
+
+ local %ENV = $self->_get_env();
+
+ print "### Promoting node \"$name\"\n";
+ PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l',
+ $logfile, 'promote');
+ return;
+}
+
+=pod
+
+=item $node->logrotate()
+
+Wrapper for pg_ctl logrotate
+
+=cut
+
+sub logrotate
+{
+ my ($self) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $logfile = $self->logfile;
+ my $name = $self->name;
+
+ local %ENV = $self->_get_env();
+
+ print "### Rotating log in node \"$name\"\n";
+ PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l',
+ $logfile, 'logrotate');
+ return;
+}
+
+# Internal routine to enable streaming replication on a standby node.
+sub enable_streaming
+{
+ my ($self, $root_node) = @_;
+ my $root_connstr = $root_node->connstr;
+ my $name = $self->name;
+
+ print "### Enabling streaming replication for node \"$name\"\n";
+ $self->append_conf(
+ $self->_recovery_file, qq(
+primary_conninfo='$root_connstr'
+));
+ $self->set_standby_mode();
+ return;
+}
+
+# Internal routine to enable archive recovery command on a standby node
+sub enable_restoring
+{
+ my ($self, $root_node, $standby) = @_;
+ my $path = $root_node->archive_dir;
+ my $name = $self->name;
+
+ print "### Enabling WAL restore for node \"$name\"\n";
+
+ # On Windows, the path specified in the restore command needs to use
+ # double back-slashes to work properly and to be able to detect properly
+ # the file targeted by the copy command, so the directory value used
+ # in this routine, using only one back-slash, need to be properly changed
+ # first. Paths also need to be double-quoted to prevent failures where
+ # the path contains spaces.
+ $path =~ s{\\}{\\\\}g if ($PostgreSQL::Test::Utils::windows_os);
+ my $copy_command =
+ $PostgreSQL::Test::Utils::windows_os
+ ? qq{copy "$path\\\\%f" "%p"}
+ : qq{cp "$path/%f" "%p"};
+
+ $self->append_conf(
+ $self->_recovery_file, qq(
+restore_command = '$copy_command'
+));
+ if ($standby)
+ {
+ $self->set_standby_mode();
+ }
+ else
+ {
+ $self->set_recovery_mode();
+ }
+ return;
+}
+
+sub _recovery_file { return "postgresql.conf"; }
+
+=pod
+
+=item $node->set_recovery_mode()
+
+Place recovery.signal file.
+
+=cut
+
+sub set_recovery_mode
+{
+ my ($self) = @_;
+
+ $self->append_conf('recovery.signal', '');
+ return;
+}
+
+=pod
+
+=item $node->set_standby_mode()
+
+Place standby.signal file.
+
+=cut
+
+sub set_standby_mode
+{
+ my ($self) = @_;
+
+ $self->append_conf('standby.signal', '');
+ return;
+}
+
+# Internal routine to enable archiving
+sub enable_archiving
+{
+ my ($self) = @_;
+ my $path = $self->archive_dir;
+ my $name = $self->name;
+
+ print "### Enabling WAL archiving for node \"$name\"\n";
+
+ # On Windows, the path specified in the restore command needs to use
+ # double back-slashes to work properly and to be able to detect properly
+ # the file targeted by the copy command, so the directory value used
+ # in this routine, using only one back-slash, need to be properly changed
+ # first. Paths also need to be double-quoted to prevent failures where
+ # the path contains spaces.
+ $path =~ s{\\}{\\\\}g if ($PostgreSQL::Test::Utils::windows_os);
+ my $copy_command =
+ $PostgreSQL::Test::Utils::windows_os
+ ? qq{copy "%p" "$path\\\\%f"}
+ : qq{cp "%p" "$path/%f"};
+
+ # Enable archive_mode and archive_command on node
+ $self->append_conf(
+ 'postgresql.conf', qq(
+archive_mode = on
+archive_command = '$copy_command'
+));
+ return;
+}
+
+# Internal method to update $self->{_pid}
+# $is_running = 1: pid file should be there
+# $is_running = 0: pid file should NOT be there
+# $is_running = -1: we aren't sure
+sub _update_pid
+{
+ my ($self, $is_running) = @_;
+ my $name = $self->name;
+
+ # If we can open the PID file, read its first line and that's the PID we
+ # want.
+ if (open my $pidfile, '<', $self->data_dir . "/postmaster.pid")
+ {
+ chomp($self->{_pid} = <$pidfile>);
+ close $pidfile;
+
+ # If we aren't sure what to expect, validate the PID using kill().
+ # This protects against stale PID files left by crashed postmasters.
+ if ($is_running == -1 && kill(0, $self->{_pid}) == 0)
+ {
+ print
+ "# Stale postmaster.pid file for node \"$name\": PID $self->{_pid} no longer exists\n";
+ $self->{_pid} = undef;
+ return;
+ }
+
+ print "# Postmaster PID for node \"$name\" is $self->{_pid}\n";
+
+ # If we found a pidfile when there shouldn't be one, complain.
+ BAIL_OUT("postmaster.pid unexpectedly present") if $is_running == 0;
+ return;
+ }
+
+ $self->{_pid} = undef;
+ print "# No postmaster PID for node \"$name\"\n";
+
+ # Complain if we expected to find a pidfile.
+ BAIL_OUT("postmaster.pid unexpectedly not present") if $is_running == 1;
+ return;
+}
+
+=pod
+
+=item PostgreSQL::Test::Cluster->new(node_name, %params)
+
+Build a new object of class C<PostgreSQL::Test::Cluster> (or of a subclass, if you have
+one), assigning a free port number. Remembers the node, to prevent its port
+number from being reused for another node, and to ensure that it gets
+shut down when the test script exits.
+
+=over
+
+=item port => [1,65535]
+
+By default, this function assigns a port number to each node. Specify this to
+force a particular port number. The caller is responsible for evaluating
+potential conflicts and privilege requirements.
+
+=item own_host => 1
+
+By default, all nodes use the same PGHOST value. If specified, generate a
+PGHOST specific to this node. This allows multiple nodes to use the same
+port.
+
+=item install_path => '/path/to/postgres/installation'
+
+Using this parameter is it possible to have nodes pointing to different
+installations, for testing different versions together or the same version
+with different build parameters. The provided path must be the parent of the
+installation's 'bin' and 'lib' directories. In the common case where this is
+not provided, Postgres binaries will be found in the caller's PATH.
+
+=back
+
+=cut
+
+sub new
+{
+ my $class = shift;
+ my ($name, %params) = @_;
+
+ # Select a port.
+ my $port;
+ if (defined $params{port})
+ {
+ $port = $params{port};
+ }
+ else
+ {
+ # When selecting a port, we look for an unassigned TCP port number,
+ # even if we intend to use only Unix-domain sockets. This is clearly
+ # necessary on $use_tcp (Windows) configurations, and it seems like a
+ # good idea on Unixen as well.
+ $port = get_free_port();
+ }
+
+ # Select a host.
+ my $host = $test_pghost;
+ if ($params{own_host})
+ {
+ if ($use_tcp)
+ {
+ $last_host_assigned++;
+ $last_host_assigned > 254 and BAIL_OUT("too many own_host nodes");
+ $host = '127.0.0.' . $last_host_assigned;
+ }
+ else
+ {
+ $host = "$test_pghost/$name"; # Assume $name =~ /^[-_a-zA-Z0-9]+$/
+ mkdir $host;
+ }
+ }
+
+ my $testname = basename($0);
+ $testname =~ s/\.[^.]+$//;
+ my $node = {
+ _port => $port,
+ _host => $host,
+ _basedir =>
+ "$PostgreSQL::Test::Utils::tmp_check/t_${testname}_${name}_data",
+ _name => $name,
+ _logfile_generation => 0,
+ _logfile_base =>
+ "$PostgreSQL::Test::Utils::log_path/${testname}_${name}",
+ _logfile =>
+ "$PostgreSQL::Test::Utils::log_path/${testname}_${name}.log"
+ };
+
+ if ($params{install_path})
+ {
+ $node->{_install_path} = $params{install_path};
+ }
+
+ bless $node, $class;
+ mkdir $node->{_basedir}
+ or
+ BAIL_OUT("could not create data directory \"$node->{_basedir}\": $!");
+
+ $node->dump_info;
+
+ $node->_set_pg_version;
+
+ my $ver = $node->{_pg_version};
+
+ # Use a subclass as defined below (or elsewhere) if this version
+ # isn't fully compatible. Warn if the version is too old and thus we don't
+ # have a subclass of this class.
+ if (ref $ver && $ver < $min_compat)
+ {
+ my $maj = $ver->major(separator => '_');
+ my $subclass = $class . "::V_$maj";
+ if ($subclass->isa($class))
+ {
+ bless $node, $subclass;
+ }
+ else
+ {
+ carp
+ "PostgreSQL::Test::Cluster isn't fully compatible with version $ver";
+ }
+ }
+
+ # Add node to list of nodes
+ push(@all_nodes, $node);
+
+ return $node;
+}
+
+# Private routine to run the pg_config binary found in our environment (or in
+# our install_path, if we have one), and set the version from it
+#
+sub _set_pg_version
+{
+ my ($self) = @_;
+ my $inst = $self->{_install_path};
+ my $pg_config = "pg_config";
+
+ if (defined $inst)
+ {
+ # If the _install_path is invalid, our PATH variables might find an
+ # unrelated pg_config executable elsewhere. Sanity check the
+ # directory.
+ BAIL_OUT("directory not found: $inst")
+ unless -d $inst;
+
+ # If the directory exists but is not the root of a postgresql
+ # installation, or if the user configured using
+ # --bindir=$SOMEWHERE_ELSE, we're not going to find pg_config, so
+ # complain about that, too.
+ $pg_config = "$inst/bin/pg_config";
+ BAIL_OUT("pg_config not found: $pg_config")
+ unless -e $pg_config
+ or ($PostgreSQL::Test::Utils::windows_os and -e "$pg_config.exe");
+ BAIL_OUT("pg_config not executable: $pg_config")
+ unless $PostgreSQL::Test::Utils::windows_os or -x $pg_config;
+
+ # Leave $pg_config install_path qualified, to be sure we get the right
+ # version information, below, or die trying
+ }
+
+ local %ENV = $self->_get_env();
+
+ # We only want the version field
+ my $version_line = qx{$pg_config --version};
+ BAIL_OUT("$pg_config failed: $!") if $?;
+
+ $self->{_pg_version} = PostgreSQL::Version->new($version_line);
+
+ BAIL_OUT("could not parse pg_config --version output: $version_line")
+ unless defined $self->{_pg_version};
+}
+
+# Private routine to return a copy of the environment with the PATH and
+# (DY)LD_LIBRARY_PATH correctly set when there is an install path set for
+# the node.
+#
+# Routines that call Postgres binaries need to call this routine like this:
+#
+# local %ENV = $self->_get_env([%extra_settings]);
+#
+# A copy of the environment is taken and node's host and port settings are
+# added as PGHOST and PGPORT, then the extra settings (if any) are applied.
+# Any setting in %extra_settings with a value that is undefined is deleted;
+# the remainder are set. Then the PATH and (DY)LD_LIBRARY_PATH are adjusted
+# if the node's install path is set, and the copy environment is returned.
+#
+# The install path set in new() needs to be a directory containing
+# bin and lib subdirectories as in a standard PostgreSQL installation, so this
+# can't be used with installations where the bin and lib directories don't have
+# a common parent directory.
+sub _get_env
+{
+ my $self = shift;
+ my %inst_env = (%ENV, PGHOST => $self->{_host}, PGPORT => $self->{_port});
+ # the remaining arguments are modifications to make to the environment
+ my %mods = (@_);
+ while (my ($k, $v) = each %mods)
+ {
+ if (defined $v)
+ {
+ $inst_env{$k} = "$v";
+ }
+ else
+ {
+ delete $inst_env{$k};
+ }
+ }
+ # now fix up the new environment for the install path
+ my $inst = $self->{_install_path};
+ if ($inst)
+ {
+ if ($PostgreSQL::Test::Utils::windows_os)
+ {
+ # Windows picks up DLLs from the PATH rather than *LD_LIBRARY_PATH
+ # choose the right path separator
+ if ($Config{osname} eq 'MSWin32')
+ {
+ $inst_env{PATH} = "$inst/bin;$inst/lib;$ENV{PATH}";
+ }
+ else
+ {
+ $inst_env{PATH} = "$inst/bin:$inst/lib:$ENV{PATH}";
+ }
+ }
+ else
+ {
+ my $dylib_name =
+ $Config{osname} eq 'darwin'
+ ? "DYLD_LIBRARY_PATH"
+ : "LD_LIBRARY_PATH";
+ $inst_env{PATH} = "$inst/bin:$ENV{PATH}";
+ if (exists $ENV{$dylib_name})
+ {
+ $inst_env{$dylib_name} = "$inst/lib:$ENV{$dylib_name}";
+ }
+ else
+ {
+ $inst_env{$dylib_name} = "$inst/lib";
+ }
+ }
+ }
+ return (%inst_env);
+}
+
+# Private routine to get an installation path qualified command.
+#
+# IPC::Run maintains a cache, %cmd_cache, mapping commands to paths. Tests
+# which use nodes spanning more than one postgres installation path need to
+# avoid confusing which installation's binaries get run. Setting $ENV{PATH} is
+# insufficient, as IPC::Run does not check to see if the path has changed since
+# caching a command.
+sub installed_command
+{
+ my ($self, $cmd) = @_;
+
+ # Nodes using alternate installation locations use their installation's
+ # bin/ directory explicitly
+ return join('/', $self->{_install_path}, 'bin', $cmd)
+ if defined $self->{_install_path};
+
+ # Nodes implicitly using the default installation location rely on IPC::Run
+ # to find the right binary, which should not cause %cmd_cache confusion,
+ # because no nodes with other installation paths do it that way.
+ return $cmd;
+}
+
+=pod
+
+=item get_free_port()
+
+Locate an unprivileged (high) TCP port that's not currently bound to
+anything. This is used by C<new()>, and also by some test cases that need to
+start other, non-Postgres servers.
+
+Ports assigned to existing PostgreSQL::Test::Cluster objects are automatically
+excluded, even if those servers are not currently running.
+
+The port number is reserved so that other concurrent test programs will not
+try to use the same port.
+
+Note: this is not an instance method. As it's not exported it should be
+called from outside the module as C<PostgreSQL::Test::Cluster::get_free_port()>.
+
+=cut
+
+sub get_free_port
+{
+ my $found = 0;
+ my $port = $last_port_assigned;
+
+ while ($found == 0)
+ {
+
+ # advance $port, wrapping correctly around range end
+ $port = 49152 if ++$port >= 65536;
+ print "# Checking port $port\n";
+
+ # Check first that candidate port number is not included in
+ # the list of already-registered nodes.
+ $found = 1;
+ foreach my $node (@all_nodes)
+ {
+ $found = 0 if ($node->port == $port);
+ }
+
+ # Check to see if anything else is listening on this TCP port.
+ # Seek a port available for all possible listen_addresses values,
+ # so callers can harness this port for the widest range of purposes.
+ # The 0.0.0.0 test achieves that for MSYS, which automatically sets
+ # SO_EXCLUSIVEADDRUSE. Testing 0.0.0.0 is insufficient for Windows
+ # native Perl (https://stackoverflow.com/a/14388707), so we also
+ # have to test individual addresses. Doing that for 127.0.0/24
+ # addresses other than 127.0.0.1 might fail with EADDRNOTAVAIL on
+ # non-Linux, non-Windows kernels.
+ #
+ # Thus, 0.0.0.0 and individual 127.0.0/24 addresses are tested
+ # only on Windows and only when TCP usage is requested.
+ if ($found == 1)
+ {
+ foreach my $addr (qw(127.0.0.1),
+ ($use_tcp && $PostgreSQL::Test::Utils::windows_os)
+ ? qw(127.0.0.2 127.0.0.3 0.0.0.0)
+ : ())
+ {
+ if (!can_bind($addr, $port))
+ {
+ $found = 0;
+ last;
+ }
+ }
+ $found = _reserve_port($port) if $found;
+ }
+ }
+
+ print "# Found port $port\n";
+
+ # Update port for next time
+ $last_port_assigned = $port;
+
+ return $port;
+}
+
+# Internal routine to check whether a host:port is available to bind
+sub can_bind
+{
+ my ($host, $port) = @_;
+ my $iaddr = inet_aton($host);
+ my $paddr = sockaddr_in($port, $iaddr);
+ my $proto = getprotobyname("tcp");
+
+ socket(SOCK, PF_INET, SOCK_STREAM, $proto)
+ or die "socket failed: $!";
+
+ # As in postmaster, don't use SO_REUSEADDR on Windows
+ setsockopt(SOCK, SOL_SOCKET, SO_REUSEADDR, pack("l", 1))
+ unless $PostgreSQL::Test::Utils::windows_os;
+ my $ret = bind(SOCK, $paddr) && listen(SOCK, SOMAXCONN);
+ close(SOCK);
+ return $ret;
+}
+
+# Internal routine to reserve a port number
+# Returns 1 if successful, 0 if port is already reserved.
+sub _reserve_port
+{
+ my $port = shift;
+ # open in rw mode so we don't have to reopen it and lose the lock
+ my $filename = "$portdir/$port.rsv";
+ sysopen(my $portfile, $filename, O_RDWR | O_CREAT)
+ || die "opening port file $filename: $!";
+ # take an exclusive lock to avoid concurrent access
+ flock($portfile, LOCK_EX) || die "locking port file $filename: $!";
+ # see if someone else has or had a reservation of this port
+ my $pid = <$portfile> || "0";
+ chomp $pid;
+ if ($pid + 0 > 0)
+ {
+ if (kill 0, $pid)
+ {
+ # process exists and is owned by us, so we can't reserve this port
+ flock($portfile, LOCK_UN);
+ close($portfile);
+ return 0;
+ }
+ }
+ # All good, go ahead and reserve the port
+ seek($portfile, 0, SEEK_SET);
+ # print the pid with a fixed width so we don't leave any trailing junk
+ print $portfile sprintf("%10d\n", $$);
+ flock($portfile, LOCK_UN);
+ close($portfile);
+ push(@port_reservation_files, $filename);
+ return 1;
+}
+
+# Automatically shut down any still-running nodes (in the same order the nodes
+# were created in) when the test script exits.
+END
+{
+
+ # take care not to change the script's exit value
+ my $exit_code = $?;
+
+ foreach my $node (@all_nodes)
+ {
+ # During unclean termination (which could be a signal or some
+ # other failure), we're not sure that the status of our nodes
+ # has been correctly set up already, so try and update it to
+ # improve our chances of shutting them down.
+ $node->_update_pid(-1) if $exit_code != 0;
+
+ # If that fails, don't let that foil other nodes' shutdown
+ $node->teardown_node(fail_ok => 1);
+
+ # skip clean if we are requested to retain the basedir
+ next if defined $ENV{'PG_TEST_NOCLEAN'};
+
+ # clean basedir on clean test invocation
+ $node->clean_node
+ if $exit_code == 0 && PostgreSQL::Test::Utils::all_tests_passing();
+ }
+
+ unlink @port_reservation_files;
+
+ $? = $exit_code;
+}
+
+=pod
+
+=item $node->teardown_node()
+
+Do an immediate stop of the node
+
+Any optional extra parameter is passed to ->stop.
+
+=cut
+
+sub teardown_node
+{
+ my ($self, %params) = @_;
+
+ $self->stop('immediate', %params);
+ return;
+}
+
+=pod
+
+=item $node->clean_node()
+
+Remove the base directory of the node if the node has been stopped.
+
+=cut
+
+sub clean_node
+{
+ my $self = shift;
+
+ rmtree $self->{_basedir} unless defined $self->{_pid};
+ return;
+}
+
+=pod
+
+=item $node->safe_psql($dbname, $sql) => stdout
+
+Invoke B<psql> to run B<sql> on B<dbname> and return its stdout on success.
+Die if the SQL produces an error. Runs with B<ON_ERROR_STOP> set.
+
+Takes optional extra params like timeout and timed_out parameters with the same
+options as psql.
+
+=cut
+
+sub safe_psql
+{
+ my ($self, $dbname, $sql, %params) = @_;
+
+ local %ENV = $self->_get_env();
+
+ my ($stdout, $stderr);
+
+ my $ret = $self->psql(
+ $dbname, $sql,
+ %params,
+ stdout => \$stdout,
+ stderr => \$stderr,
+ on_error_die => 1,
+ on_error_stop => 1);
+
+ # psql can emit stderr from NOTICEs etc
+ if ($stderr ne "")
+ {
+ print "#### Begin standard error\n";
+ print $stderr;
+ print "\n#### End standard error\n";
+ }
+
+ return $stdout;
+}
+
+=pod
+
+=item $node->psql($dbname, $sql, %params) => psql_retval
+
+Invoke B<psql> to execute B<$sql> on B<$dbname> and return the return value
+from B<psql>, which is run with on_error_stop by default so that it will
+stop running sql and return 3 if the passed SQL results in an error.
+
+As a convenience, if B<psql> is called in array context it returns an
+array containing ($retval, $stdout, $stderr).
+
+psql is invoked in tuples-only unaligned mode with reading of B<.psqlrc>
+disabled. That may be overridden by passing extra psql parameters.
+
+stdout and stderr are transformed to UNIX line endings if on Windows. Any
+trailing newline is removed.
+
+Dies on failure to invoke psql but not if psql exits with a nonzero
+return code (unless on_error_die specified).
+
+If psql exits because of a signal, an exception is raised.
+
+=over
+
+=item stdout => \$stdout
+
+B<stdout>, if given, must be a scalar reference to which standard output is
+written. If not given, standard output is not redirected and will be printed
+unless B<psql> is called in array context, in which case it's captured and
+returned.
+
+=item stderr => \$stderr
+
+Same as B<stdout> but gets standard error. If the same scalar is passed for
+both B<stdout> and B<stderr> the results may be interleaved unpredictably.
+
+=item on_error_stop => 1
+
+By default, the B<psql> method invokes the B<psql> program with ON_ERROR_STOP=1
+set, so SQL execution is stopped at the first error and exit code 3 is
+returned. Set B<on_error_stop> to 0 to ignore errors instead.
+
+=item on_error_die => 0
+
+By default, this method returns psql's result code. Pass on_error_die to
+instead die with an informative message.
+
+=item timeout => 'interval'
+
+Set a timeout for the psql call as an interval accepted by B<IPC::Run::timer>
+(integer seconds is fine). This method raises an exception on timeout, unless
+the B<timed_out> parameter is also given.
+
+=item timed_out => \$timed_out
+
+If B<timeout> is set and this parameter is given, the scalar it references
+is set to true if the psql call times out.
+
+=item connstr => B<value>
+
+If set, use this as the connection string for the connection to the
+backend.
+
+=item replication => B<value>
+
+If set, add B<replication=value> to the conninfo string.
+Passing the literal value C<database> results in a logical replication
+connection.
+
+=item extra_params => ['--single-transaction']
+
+If given, it must be an array reference containing additional parameters to B<psql>.
+
+=back
+
+e.g.
+
+ my ($stdout, $stderr, $timed_out);
+ my $cmdret = $node->psql('postgres', 'SELECT pg_sleep(600)',
+ stdout => \$stdout, stderr => \$stderr,
+ timeout => $PostgreSQL::Test::Utils::timeout_default,
+ timed_out => \$timed_out,
+ extra_params => ['--single-transaction'])
+
+will set $cmdret to undef and $timed_out to a true value.
+
+ $node->psql('postgres', $sql, on_error_die => 1);
+
+dies with an informative message if $sql fails.
+
+=cut
+
+sub psql
+{
+ my ($self, $dbname, $sql, %params) = @_;
+
+ local %ENV = $self->_get_env();
+
+ my $stdout = $params{stdout};
+ my $stderr = $params{stderr};
+ my $replication = $params{replication};
+ my $timeout = undef;
+ my $timeout_exception = 'psql timed out';
+
+ # Build the connection string.
+ my $psql_connstr;
+ if (defined $params{connstr})
+ {
+ $psql_connstr = $params{connstr};
+ }
+ else
+ {
+ $psql_connstr = $self->connstr($dbname);
+ }
+ $psql_connstr .= defined $replication ? " replication=$replication" : "";
+
+ my @psql_params = (
+ $self->installed_command('psql'),
+ '-XAtq', '-d', $psql_connstr, '-f', '-');
+
+ # If the caller wants an array and hasn't passed stdout/stderr
+ # references, allocate temporary ones to capture them so we
+ # can return them. Otherwise we won't redirect them at all.
+ if (wantarray)
+ {
+ if (!defined($stdout))
+ {
+ my $temp_stdout = "";
+ $stdout = \$temp_stdout;
+ }
+ if (!defined($stderr))
+ {
+ my $temp_stderr = "";
+ $stderr = \$temp_stderr;
+ }
+ }
+
+ $params{on_error_stop} = 1 unless defined $params{on_error_stop};
+ $params{on_error_die} = 0 unless defined $params{on_error_die};
+
+ push @psql_params, '-v', 'ON_ERROR_STOP=1' if $params{on_error_stop};
+ push @psql_params, @{ $params{extra_params} }
+ if defined $params{extra_params};
+
+ $timeout =
+ IPC::Run::timeout($params{timeout}, exception => $timeout_exception)
+ if (defined($params{timeout}));
+
+ ${ $params{timed_out} } = 0 if defined $params{timed_out};
+
+ # IPC::Run would otherwise append to existing contents:
+ $$stdout = "" if ref($stdout);
+ $$stderr = "" if ref($stderr);
+
+ my $ret;
+
+ # Run psql and capture any possible exceptions. If the exception is
+ # because of a timeout and the caller requested to handle that, just return
+ # and set the flag. Otherwise, and for any other exception, rethrow.
+ #
+ # For background, see
+ # https://metacpan.org/release/ETHER/Try-Tiny-0.24/view/lib/Try/Tiny.pm
+ do
+ {
+ local $@;
+ eval {
+ my @ipcrun_opts = (\@psql_params, '<', \$sql);
+ push @ipcrun_opts, '>', $stdout if defined $stdout;
+ push @ipcrun_opts, '2>', $stderr if defined $stderr;
+ push @ipcrun_opts, $timeout if defined $timeout;
+
+ IPC::Run::run @ipcrun_opts;
+ $ret = $?;
+ };
+ my $exc_save = $@;
+ if ($exc_save)
+ {
+
+ # IPC::Run::run threw an exception. re-throw unless it's a
+ # timeout, which we'll handle by testing is_expired
+ die $exc_save
+ if (blessed($exc_save)
+ || $exc_save !~ /^\Q$timeout_exception\E/);
+
+ $ret = undef;
+
+ die "Got timeout exception '$exc_save' but timer not expired?!"
+ unless $timeout->is_expired;
+
+ if (defined($params{timed_out}))
+ {
+ ${ $params{timed_out} } = 1;
+ }
+ else
+ {
+ die "psql timed out: stderr: '$$stderr'\n"
+ . "while running '@psql_params'";
+ }
+ }
+ };
+
+ if (defined $$stdout)
+ {
+ chomp $$stdout;
+ }
+
+ if (defined $$stderr)
+ {
+ chomp $$stderr;
+ }
+
+ # See http://perldoc.perl.org/perlvar.html#%24CHILD_ERROR
+ # We don't use IPC::Run::Simple to limit dependencies.
+ #
+ # We always die on signal.
+ my $core = $ret & 128 ? " (core dumped)" : "";
+ die "psql exited with signal "
+ . ($ret & 127)
+ . "$core: '$$stderr' while running '@psql_params'"
+ if $ret & 127;
+ $ret = $ret >> 8;
+
+ if ($ret && $params{on_error_die})
+ {
+ die "psql error: stderr: '$$stderr'\nwhile running '@psql_params'"
+ if $ret == 1;
+ die "connection error: '$$stderr'\nwhile running '@psql_params'"
+ if $ret == 2;
+ die
+ "error running SQL: '$$stderr'\nwhile running '@psql_params' with sql '$sql'"
+ if $ret == 3;
+ die "psql returns $ret: '$$stderr'\nwhile running '@psql_params'";
+ }
+
+ if (wantarray)
+ {
+ return ($ret, $$stdout, $$stderr);
+ }
+ else
+ {
+ return $ret;
+ }
+}
+
+=pod
+
+=item $node->background_psql($dbname, %params) => PostgreSQL::Test::BackgroundPsql instance
+
+Invoke B<psql> on B<$dbname> and return a BackgroundPsql object.
+
+A timeout of $PostgreSQL::Test::Utils::timeout_default is set up.
+
+psql is invoked in tuples-only unaligned mode with reading of B<.psqlrc>
+disabled. That may be overridden by passing extra psql parameters.
+
+Dies on failure to invoke psql, or if psql fails to connect. Errors occurring
+later are the caller's problem. psql runs with on_error_stop by default so
+that it will stop running sql and return 3 if passed SQL results in an error.
+
+Be sure to "quit" the returned object when done with it.
+
+=over
+
+=item on_error_stop => 1
+
+By default, the B<psql> method invokes the B<psql> program with ON_ERROR_STOP=1
+set, so SQL execution is stopped at the first error and exit code 3 is
+returned. Set B<on_error_stop> to 0 to ignore errors instead.
+
+=item replication => B<value>
+
+If set, add B<replication=value> to the conninfo string.
+Passing the literal value C<database> results in a logical replication
+connection.
+
+=item extra_params => ['--single-transaction']
+
+If given, it must be an array reference containing additional parameters to B<psql>.
+
+=back
+
+=cut
+
+sub background_psql
+{
+ my ($self, $dbname, %params) = @_;
+
+ local %ENV = $self->_get_env();
+
+ my $replication = $params{replication};
+
+ my @psql_params = (
+ $self->installed_command('psql'),
+ '-XAtq',
+ '-d',
+ $self->connstr($dbname)
+ . (defined $replication ? " replication=$replication" : ""),
+ '-f',
+ '-');
+
+ $params{on_error_stop} = 1 unless defined $params{on_error_stop};
+
+ push @psql_params, '-v', 'ON_ERROR_STOP=1' if $params{on_error_stop};
+ push @psql_params, @{ $params{extra_params} }
+ if defined $params{extra_params};
+
+ return PostgreSQL::Test::BackgroundPsql->new(0, \@psql_params);
+}
+
+=pod
+
+=item $node->interactive_psql($dbname, %params) => BackgroundPsql instance
+
+Invoke B<psql> on B<$dbname> and return a BackgroundPsql object, which the
+caller may use to send interactive input to B<psql>.
+
+A timeout of $PostgreSQL::Test::Utils::timeout_default is set up.
+
+psql is invoked in tuples-only unaligned mode with reading of B<.psqlrc>
+disabled. That may be overridden by passing extra psql parameters.
+
+Dies on failure to invoke psql, or if psql fails to connect.
+Errors occurring later are the caller's problem.
+
+Be sure to "quit" the returned object when done with it.
+
+=over
+
+=item extra_params => ['--single-transaction']
+
+If given, it must be an array reference containing additional parameters to B<psql>.
+
+=item history_file => B<path>
+
+Cause the interactive B<psql> session to write its command history to B<path>.
+If not given, the history is sent to B</dev/null>.
+
+=back
+
+This requires IO::Pty in addition to IPC::Run.
+
+=cut
+
+sub interactive_psql
+{
+ my ($self, $dbname, %params) = @_;
+
+ local %ENV = $self->_get_env();
+
+ # Since the invoked psql will believe it's interactive, it will use
+ # readline/libedit if available. We need to adjust some environment
+ # settings to prevent unwanted side-effects.
+
+ # Developers would not appreciate tests adding a bunch of junk to
+ # their ~/.psql_history, so redirect readline history somewhere else.
+ # If the calling script doesn't specify anything, just bit-bucket it.
+ $ENV{PSQL_HISTORY} = $params{history_file} || '/dev/null';
+
+ # Another pitfall for developers is that they might have a ~/.inputrc
+ # file that changes readline's behavior enough to affect the test.
+ # So ignore any such file.
+ $ENV{INPUTRC} = '/dev/null';
+
+ # Unset TERM so that readline/libedit won't use any terminal-dependent
+ # escape sequences; that leads to way too many cross-version variations
+ # in the output.
+ delete $ENV{TERM};
+ # Some versions of readline inspect LS_COLORS, so for luck unset that too.
+ delete $ENV{LS_COLORS};
+
+ my @psql_params = (
+ $self->installed_command('psql'),
+ '-XAt', '-d', $self->connstr($dbname));
+
+ push @psql_params, @{ $params{extra_params} }
+ if defined $params{extra_params};
+
+ return PostgreSQL::Test::BackgroundPsql->new(1, \@psql_params);
+}
+
+# Common sub of pgbench-invoking interfaces. Makes any requested script files
+# and returns pgbench command-line options causing use of those files.
+sub _pgbench_make_files
+{
+ my ($self, $files) = @_;
+ my @file_opts;
+
+ if (defined $files)
+ {
+
+ # note: files are ordered for determinism
+ for my $fn (sort keys %$files)
+ {
+ my $filename = $self->basedir . '/' . $fn;
+ push @file_opts, '-f', $filename;
+
+ # cleanup file weight
+ $filename =~ s/\@\d+$//;
+
+ #push @filenames, $filename;
+ # filenames are expected to be unique on a test
+ if (-e $filename)
+ {
+ ok(0, "$filename must not already exist");
+ unlink $filename or die "cannot unlink $filename: $!";
+ }
+ PostgreSQL::Test::Utils::append_to_file($filename, $$files{$fn});
+ }
+ }
+
+ return @file_opts;
+}
+
+=pod
+
+=item $node->pgbench($opts, $stat, $out, $err, $name, $files, @args)
+
+Invoke B<pgbench>, with parameters and files.
+
+=over
+
+=item $opts
+
+Options as a string to be split on spaces.
+
+=item $stat
+
+Expected exit status.
+
+=item $out
+
+Reference to a regexp list that must match stdout.
+
+=item $err
+
+Reference to a regexp list that must match stderr.
+
+=item $name
+
+Name of test for error messages.
+
+=item $files
+
+Reference to filename/contents dictionary.
+
+=item @args
+
+Further raw options or arguments.
+
+=back
+
+=cut
+
+sub pgbench
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my ($self, $opts, $stat, $out, $err, $name, $files, @args) = @_;
+ my @cmd = (
+ 'pgbench',
+ split(/\s+/, $opts),
+ $self->_pgbench_make_files($files), @args);
+
+ $self->command_checks_all(\@cmd, $stat, $out, $err, $name);
+}
+
+=pod
+
+=item $node->connect_ok($connstr, $test_name, %params)
+
+Attempt a connection with a custom connection string. This is expected
+to succeed.
+
+=over
+
+=item sql => B<value>
+
+If this parameter is set, this query is used for the connection attempt
+instead of the default.
+
+=item expected_stdout => B<value>
+
+If this regular expression is set, matches it with the output generated.
+
+=item log_like => [ qr/required message/ ]
+
+=item log_unlike => [ qr/prohibited message/ ]
+
+See C<log_check(...)>.
+
+=back
+
+=cut
+
+sub connect_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($self, $connstr, $test_name, %params) = @_;
+
+ my $sql;
+ if (defined($params{sql}))
+ {
+ $sql = $params{sql};
+ }
+ else
+ {
+ $sql = "SELECT \$\$connected with $connstr\$\$";
+ }
+
+ my $log_location = -s $self->logfile;
+
+ # Never prompt for a password, any callers of this routine should
+ # have set up things properly, and this should not block.
+ my ($ret, $stdout, $stderr) = $self->psql(
+ 'postgres',
+ $sql,
+ extra_params => ['-w'],
+ connstr => "$connstr",
+ on_error_stop => 0);
+
+ is($ret, 0, $test_name);
+
+ if (defined($params{expected_stdout}))
+ {
+ like($stdout, $params{expected_stdout}, "$test_name: stdout matches");
+ }
+
+ is($stderr, "", "$test_name: no stderr");
+
+ $self->log_check($test_name, $log_location, %params);
+}
+
+=pod
+
+=item $node->connect_fails($connstr, $test_name, %params)
+
+Attempt a connection with a custom connection string. This is expected
+to fail.
+
+=over
+
+=item expected_stderr => B<value>
+
+If this regular expression is set, matches it with the output generated.
+
+=item log_like => [ qr/required message/ ]
+
+=item log_unlike => [ qr/prohibited message/ ]
+
+See C<log_check(...)>.
+
+=back
+
+=cut
+
+sub connect_fails
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($self, $connstr, $test_name, %params) = @_;
+
+ my $log_location = -s $self->logfile;
+
+ # Never prompt for a password, any callers of this routine should
+ # have set up things properly, and this should not block.
+ my ($ret, $stdout, $stderr) = $self->psql(
+ 'postgres',
+ undef,
+ extra_params => ['-w'],
+ connstr => "$connstr");
+
+ isnt($ret, 0, $test_name);
+
+ if (defined($params{expected_stderr}))
+ {
+ like($stderr, $params{expected_stderr}, "$test_name: matches");
+ }
+
+ $self->log_check($test_name, $log_location, %params);
+}
+
+=pod
+
+=item $node->poll_query_until($dbname, $query [, $expected ])
+
+Run B<$query> repeatedly, until it returns the B<$expected> result
+('t', or SQL boolean true, by default).
+Continues polling if B<psql> returns an error result.
+Times out after $PostgreSQL::Test::Utils::timeout_default seconds.
+Returns 1 if successful, 0 if timed out.
+
+=cut
+
+sub poll_query_until
+{
+ my ($self, $dbname, $query, $expected) = @_;
+
+ local %ENV = $self->_get_env();
+
+ $expected = 't' unless defined($expected); # default value
+
+ my $cmd = [
+ $self->installed_command('psql'), '-XAt',
+ '-d', $self->connstr($dbname)
+ ];
+ my ($stdout, $stderr);
+ my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default;
+ my $attempts = 0;
+
+ while ($attempts < $max_attempts)
+ {
+ my $result = IPC::Run::run $cmd, '<', \$query,
+ '>', \$stdout, '2>', \$stderr;
+
+ chomp($stdout);
+ chomp($stderr);
+
+ if ($stdout eq $expected && $stderr eq '')
+ {
+ return 1;
+ }
+
+ # Wait 0.1 second before retrying.
+ usleep(100_000);
+
+ $attempts++;
+ }
+
+ # Give up. Print the output from the last attempt, hopefully that's useful
+ # for debugging.
+ diag qq(poll_query_until timed out executing this query:
+$query
+expecting this output:
+$expected
+last actual query output:
+$stdout
+with stderr:
+$stderr);
+ return 0;
+}
+
+=pod
+
+=item $node->command_ok(...)
+
+Runs a shell command like PostgreSQL::Test::Utils::command_ok, but with PGHOST and PGPORT set
+so that the command will default to connecting to this PostgreSQL::Test::Cluster.
+
+=cut
+
+sub command_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my $self = shift;
+
+ local %ENV = $self->_get_env();
+
+ PostgreSQL::Test::Utils::command_ok(@_);
+ return;
+}
+
+=pod
+
+=item $node->command_fails(...)
+
+PostgreSQL::Test::Utils::command_fails with our connection parameters. See command_ok(...)
+
+=cut
+
+sub command_fails
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my $self = shift;
+
+ local %ENV = $self->_get_env();
+
+ PostgreSQL::Test::Utils::command_fails(@_);
+ return;
+}
+
+=pod
+
+=item $node->command_like(...)
+
+PostgreSQL::Test::Utils::command_like with our connection parameters. See command_ok(...)
+
+=cut
+
+sub command_like
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my $self = shift;
+
+ local %ENV = $self->_get_env();
+
+ PostgreSQL::Test::Utils::command_like(@_);
+ return;
+}
+
+=pod
+
+=item $node->command_fails_like(...)
+
+PostgreSQL::Test::Utils::command_fails_like with our connection parameters. See command_ok(...)
+
+=cut
+
+sub command_fails_like
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my $self = shift;
+
+ local %ENV = $self->_get_env();
+
+ PostgreSQL::Test::Utils::command_fails_like(@_);
+ return;
+}
+
+=pod
+
+=item $node->command_checks_all(...)
+
+PostgreSQL::Test::Utils::command_checks_all with our connection parameters. See
+command_ok(...)
+
+=cut
+
+sub command_checks_all
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my $self = shift;
+
+ local %ENV = $self->_get_env();
+
+ PostgreSQL::Test::Utils::command_checks_all(@_);
+ return;
+}
+
+=pod
+
+=item $node->issues_sql_like(cmd, expected_sql, test_name)
+
+Run a command on the node, then verify that $expected_sql appears in the
+server log file.
+
+=cut
+
+sub issues_sql_like
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my ($self, $cmd, $expected_sql, $test_name) = @_;
+
+ local %ENV = $self->_get_env();
+
+ my $log_location = -s $self->logfile;
+
+ my $result = PostgreSQL::Test::Utils::run_log($cmd);
+ ok($result, "@$cmd exit code 0");
+ my $log =
+ PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
+ like($log, $expected_sql, "$test_name: SQL found in server log");
+ return;
+}
+
+=pod
+
+=item $node->log_content()
+
+Returns the contents of log of the node
+
+=cut
+
+sub log_content
+{
+ my ($self) = @_;
+ return PostgreSQL::Test::Utils::slurp_file($self->logfile);
+}
+
+=pod
+
+=item $node->log_check($offset, $test_name, %parameters)
+
+Check contents of server logs.
+
+=over
+
+=item $test_name
+
+Name of test for error messages.
+
+=item $offset
+
+Offset of the log file.
+
+=item log_like => [ qr/required message/ ]
+
+If given, it must be an array reference containing a list of regular
+expressions that must match against the server log, using
+C<Test::More::like()>.
+
+=item log_unlike => [ qr/prohibited message/ ]
+
+If given, it must be an array reference containing a list of regular
+expressions that must NOT match against the server log. They will be
+passed to C<Test::More::unlike()>.
+
+=back
+
+=cut
+
+sub log_check
+{
+ my ($self, $test_name, $offset, %params) = @_;
+
+ my (@log_like, @log_unlike);
+ if (defined($params{log_like}))
+ {
+ @log_like = @{ $params{log_like} };
+ }
+ if (defined($params{log_unlike}))
+ {
+ @log_unlike = @{ $params{log_unlike} };
+ }
+
+ if (@log_like or @log_unlike)
+ {
+ my $log_contents =
+ PostgreSQL::Test::Utils::slurp_file($self->logfile, $offset);
+
+ while (my $regex = shift @log_like)
+ {
+ like($log_contents, $regex, "$test_name: log matches");
+ }
+ while (my $regex = shift @log_unlike)
+ {
+ unlike($log_contents, $regex, "$test_name: log does not match");
+ }
+ }
+}
+
+=pod
+
+=item log_contains(pattern, offset)
+
+Find pattern in logfile of node after offset byte.
+
+=cut
+
+sub log_contains
+{
+ my ($self, $pattern, $offset) = @_;
+
+ return PostgreSQL::Test::Utils::slurp_file($self->logfile, $offset) =~
+ m/$pattern/;
+}
+
+=pod
+
+=item $node->run_log(...)
+
+Runs a shell command like PostgreSQL::Test::Utils::run_log, but with connection parameters set
+so that the command will default to connecting to this PostgreSQL::Test::Cluster.
+
+=cut
+
+sub run_log
+{
+ my $self = shift;
+
+ local %ENV = $self->_get_env();
+
+ return PostgreSQL::Test::Utils::run_log(@_);
+}
+
+=pod
+
+=item $node->lsn(mode)
+
+Look up WAL locations on the server:
+
+ * insert location (primary only, error on replica)
+ * write location (primary only, error on replica)
+ * flush location (primary only, error on replica)
+ * receive location (always undef on primary)
+ * replay location (always undef on primary)
+
+mode must be specified.
+
+=cut
+
+sub lsn
+{
+ my ($self, $mode) = @_;
+ my %modes = (
+ 'insert' => 'pg_current_wal_insert_lsn()',
+ 'flush' => 'pg_current_wal_flush_lsn()',
+ 'write' => 'pg_current_wal_lsn()',
+ 'receive' => 'pg_last_wal_receive_lsn()',
+ 'replay' => 'pg_last_wal_replay_lsn()');
+
+ $mode = '<undef>' if !defined($mode);
+ croak "unknown mode for 'lsn': '$mode', valid modes are "
+ . join(', ', keys %modes)
+ if !defined($modes{$mode});
+
+ my $result = $self->safe_psql('postgres', "SELECT $modes{$mode}");
+ chomp($result);
+ if ($result eq '')
+ {
+ return;
+ }
+ else
+ {
+ return $result;
+ }
+}
+
+=pod
+
+=item $node->wait_for_catchup(standby_name, mode, target_lsn)
+
+Wait for the replication connection with application_name standby_name until
+its 'mode' replication column in pg_stat_replication equals or passes the
+specified or default target_lsn. By default the replay_lsn is waited for,
+but 'mode' may be specified to wait for any of sent|write|flush|replay.
+The replication connection must be in a streaming state.
+
+When doing physical replication, the standby is usually identified by
+passing its PostgreSQL::Test::Cluster instance. When doing logical
+replication, standby_name identifies a subscription.
+
+When not in recovery, the default value of target_lsn is $node->lsn('write'),
+which ensures that the standby has caught up to what has been committed on
+the primary.
+
+When in recovery, the default value of target_lsn is $node->lsn('replay')
+instead which ensures that the cascaded standby has caught up to what has been
+replayed on the standby.
+
+If you pass an explicit value of target_lsn, it should almost always be
+the primary's write LSN; so this parameter is seldom needed except when
+querying some intermediate replication node rather than the primary.
+
+If there is no active replication connection from this peer, waits until
+poll_query_until timeout.
+
+Requires that the 'postgres' db exists and is accessible.
+
+This is not a test. It die()s on failure.
+
+=cut
+
+sub wait_for_catchup
+{
+ my ($self, $standby_name, $mode, $target_lsn) = @_;
+ $mode = defined($mode) ? $mode : 'replay';
+ my %valid_modes =
+ ('sent' => 1, 'write' => 1, 'flush' => 1, 'replay' => 1);
+ croak "unknown mode $mode for 'wait_for_catchup', valid modes are "
+ . join(', ', keys(%valid_modes))
+ unless exists($valid_modes{$mode});
+
+ # Allow passing of a PostgreSQL::Test::Cluster instance as shorthand
+ if (blessed($standby_name)
+ && $standby_name->isa("PostgreSQL::Test::Cluster"))
+ {
+ $standby_name = $standby_name->name;
+ }
+ if (!defined($target_lsn))
+ {
+ my $isrecovery =
+ $self->safe_psql('postgres', "SELECT pg_is_in_recovery()");
+ chomp($isrecovery);
+ if ($isrecovery eq 't')
+ {
+ $target_lsn = $self->lsn('replay');
+ }
+ else
+ {
+ $target_lsn = $self->lsn('write');
+ }
+ }
+ print "Waiting for replication conn "
+ . $standby_name . "'s "
+ . $mode
+ . "_lsn to pass "
+ . $target_lsn . " on "
+ . $self->name . "\n";
+ # Before release 12 walreceiver just set the application name to
+ # "walreceiver"
+ my $query = qq[SELECT '$target_lsn' <= ${mode}_lsn AND state = 'streaming'
+ FROM pg_catalog.pg_stat_replication
+ WHERE application_name IN ('$standby_name', 'walreceiver')];
+ if (!$self->poll_query_until('postgres', $query))
+ {
+ if (PostgreSQL::Test::Utils::has_wal_read_bug)
+ {
+ # Mimic having skipped the test file. If >0 tests have run, the
+ # harness won't accept a skip; otherwise, it won't accept
+ # done_testing(). Force a nonzero count by running one test.
+ ok(1, 'dummy test before skip for filesystem bug');
+ carp "skip rest: timed out waiting for catchup & filesystem bug";
+ done_testing();
+ exit 0;
+ }
+ else
+ {
+ croak "timed out waiting for catchup";
+ }
+ }
+ print "done\n";
+ return;
+}
+
+=pod
+
+=item $node->wait_for_replay_catchup($standby_name [, $base_node ])
+
+Wait for the replication connection with application_name I<$standby_name>
+until its B<replay> replication column in pg_stat_replication in I<$node>
+equals or passes the I<$base_node>'s B<replay_lsn>. If I<$base_node> is
+omitted, the LSN to wait for is obtained from I<$node>.
+
+The replication connection must be in a streaming state.
+
+Requires that the 'postgres' db exists and is accessible.
+
+This is not a test. It die()s on failure.
+
+=cut
+
+sub wait_for_replay_catchup
+{
+ my ($self, $standby_name, $node) = @_;
+ $node = defined($node) ? $node : $self;
+
+ $self->wait_for_catchup($standby_name, 'replay', $node->lsn('flush'));
+}
+
+=item $node->wait_for_slot_catchup(slot_name, mode, target_lsn)
+
+Wait for the named replication slot to equal or pass the supplied target_lsn.
+The location used is the restart_lsn unless mode is given, in which case it may
+be 'restart' or 'confirmed_flush'.
+
+Requires that the 'postgres' db exists and is accessible.
+
+This is not a test. It die()s on failure.
+
+If the slot is not active, will time out after poll_query_until's timeout.
+
+target_lsn may be any arbitrary lsn, but is typically $primary_node->lsn('insert').
+
+Note that for logical slots, restart_lsn is held down by the oldest in-progress tx.
+
+=cut
+
+sub wait_for_slot_catchup
+{
+ my ($self, $slot_name, $mode, $target_lsn) = @_;
+ $mode = defined($mode) ? $mode : 'restart';
+ if (!($mode eq 'restart' || $mode eq 'confirmed_flush'))
+ {
+ croak "valid modes are restart, confirmed_flush";
+ }
+ croak 'target lsn must be specified' unless defined($target_lsn);
+ print "Waiting for replication slot "
+ . $slot_name . "'s "
+ . $mode
+ . "_lsn to pass "
+ . $target_lsn . " on "
+ . $self->name . "\n";
+ my $query =
+ qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name';];
+ $self->poll_query_until('postgres', $query)
+ or croak "timed out waiting for catchup";
+ print "done\n";
+ return;
+}
+
+=pod
+
+=item $node->wait_for_subscription_sync(publisher, subname, dbname)
+
+Wait for all tables in pg_subscription_rel to complete the initial
+synchronization (i.e to be either in 'syncdone' or 'ready' state).
+
+If the publisher node is given, additionally, check if the subscriber has
+caught up to what has been committed on the primary. This is useful to
+ensure that the initial data synchronization has been completed after
+creating a new subscription.
+
+If there is no active replication connection from this peer, wait until
+poll_query_until timeout.
+
+This is not a test. It die()s on failure.
+
+=cut
+
+sub wait_for_subscription_sync
+{
+ my ($self, $publisher, $subname, $dbname) = @_;
+ my $name = $self->name;
+
+ $dbname = defined($dbname) ? $dbname : 'postgres';
+
+ # Wait for all tables to finish initial sync.
+ print "Waiting for all subscriptions in \"$name\" to synchronize data\n";
+ my $query =
+ qq[SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');];
+ $self->poll_query_until($dbname, $query)
+ or croak "timed out waiting for subscriber to synchronize data";
+
+ # Then, wait for the replication to catchup if required.
+ if (defined($publisher))
+ {
+ croak 'subscription name must be specified' unless defined($subname);
+ $publisher->wait_for_catchup($subname);
+ }
+
+ print "done\n";
+ return;
+}
+
+=pod
+
+=item $node->wait_for_log(regexp, offset)
+
+Waits for the contents of the server log file, starting at the given offset, to
+match the supplied regular expression. Checks the entire log if no offset is
+given. Times out after $PostgreSQL::Test::Utils::timeout_default seconds.
+
+If successful, returns the length of the entire log file, in bytes.
+
+=cut
+
+sub wait_for_log
+{
+ my ($self, $regexp, $offset) = @_;
+ $offset = 0 unless defined $offset;
+
+ my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default;
+ my $attempts = 0;
+
+ while ($attempts < $max_attempts)
+ {
+ my $log =
+ PostgreSQL::Test::Utils::slurp_file($self->logfile, $offset);
+
+ return $offset + length($log) if ($log =~ m/$regexp/);
+
+ # Wait 0.1 second before retrying.
+ usleep(100_000);
+
+ $attempts++;
+ }
+
+ croak "timed out waiting for match: $regexp";
+}
+
+=pod
+
+=item $node->query_hash($dbname, $query, @columns)
+
+Execute $query on $dbname, replacing any appearance of the string __COLUMNS__
+within the query with a comma-separated list of @columns.
+
+If __COLUMNS__ does not appear in the query, its result columns must EXACTLY
+match the order and number (but not necessarily alias) of supplied @columns.
+
+The query must return zero or one rows.
+
+Return a hash-ref representation of the results of the query, with any empty
+or null results as defined keys with an empty-string value. There is no way
+to differentiate between null and empty-string result fields.
+
+If the query returns zero rows, return a hash with all columns empty. There
+is no way to differentiate between zero rows returned and a row with only
+null columns.
+
+=cut
+
+sub query_hash
+{
+ my ($self, $dbname, $query, @columns) = @_;
+ croak 'calls in array context for multi-row results not supported yet'
+ if (wantarray);
+
+ # Replace __COLUMNS__ if found
+ substr($query, index($query, '__COLUMNS__'), length('__COLUMNS__')) =
+ join(', ', @columns)
+ if index($query, '__COLUMNS__') >= 0;
+ my $result = $self->safe_psql($dbname, $query);
+
+ # hash slice, see http://stackoverflow.com/a/16755894/398670 .
+ #
+ # Fills the hash with empty strings produced by x-operator element
+ # duplication if result is an empty row
+ #
+ my %val;
+ @val{@columns} =
+ $result ne '' ? split(qr/\|/, $result, -1) : ('',) x scalar(@columns);
+ return \%val;
+}
+
+=pod
+
+=item $node->slot(slot_name)
+
+Return hash-ref of replication slot data for the named slot, or a hash-ref with
+all values '' if not found. Does not differentiate between null and empty string
+for fields, no field is ever undef.
+
+The restart_lsn and confirmed_flush_lsn fields are returned verbatim, and also
+as a 2-list of [highword, lowword] integer. Since we rely on Perl 5.14 we can't
+"use bigint", it's from 5.20, and we can't assume we have Math::Bigint from CPAN
+either.
+
+=cut
+
+sub slot
+{
+ my ($self, $slot_name) = @_;
+ my @columns = (
+ 'plugin', 'slot_type', 'datoid', 'database',
+ 'active', 'active_pid', 'xmin', 'catalog_xmin',
+ 'restart_lsn');
+ return $self->query_hash(
+ 'postgres',
+ "SELECT __COLUMNS__ FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'",
+ @columns);
+}
+
+=pod
+
+=item $node->pg_recvlogical_upto(self, dbname, slot_name, endpos, timeout_secs, ...)
+
+Invoke pg_recvlogical to read from slot_name on dbname until LSN endpos, which
+corresponds to pg_recvlogical --endpos. Gives up after timeout (if nonzero).
+
+Disallows pg_recvlogical from internally retrying on error by passing --no-loop.
+
+Plugin options are passed as additional keyword arguments.
+
+If called in scalar context, returns stdout, and die()s on timeout or nonzero return.
+
+If called in array context, returns a tuple of (retval, stdout, stderr, timeout).
+timeout is the IPC::Run::Timeout object whose is_expired method can be tested
+to check for timeout. retval is undef on timeout.
+
+=cut
+
+sub pg_recvlogical_upto
+{
+ my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options)
+ = @_;
+
+ local %ENV = $self->_get_env();
+
+ my ($stdout, $stderr);
+
+ my $timeout_exception = 'pg_recvlogical timed out';
+
+ croak 'slot name must be specified' unless defined($slot_name);
+ croak 'endpos must be specified' unless defined($endpos);
+
+ my @cmd = (
+ $self->installed_command('pg_recvlogical'),
+ '-S', $slot_name, '--dbname', $self->connstr($dbname));
+ push @cmd, '--endpos', $endpos;
+ push @cmd, '-f', '-', '--no-loop', '--start';
+
+ while (my ($k, $v) = each %plugin_options)
+ {
+ croak "= is not permitted to appear in replication option name"
+ if ($k =~ qr/=/);
+ push @cmd, "-o", "$k=$v";
+ }
+
+ my $timeout;
+ $timeout =
+ IPC::Run::timeout($timeout_secs, exception => $timeout_exception)
+ if $timeout_secs;
+ my $ret = 0;
+
+ do
+ {
+ local $@;
+ eval {
+ IPC::Run::run(\@cmd, ">", \$stdout, "2>", \$stderr, $timeout);
+ $ret = $?;
+ };
+ my $exc_save = $@;
+ if ($exc_save)
+ {
+
+ # IPC::Run::run threw an exception. re-throw unless it's a
+ # timeout, which we'll handle by testing is_expired
+ die $exc_save
+ if (blessed($exc_save) || $exc_save !~ qr/$timeout_exception/);
+
+ $ret = undef;
+
+ die "Got timeout exception '$exc_save' but timer not expired?!"
+ unless $timeout->is_expired;
+
+ die
+ "$exc_save waiting for endpos $endpos with stdout '$stdout', stderr '$stderr'"
+ unless wantarray;
+ }
+ };
+
+ if (wantarray)
+ {
+ return ($ret, $stdout, $stderr, $timeout);
+ }
+ else
+ {
+ die
+ "pg_recvlogical exited with code '$ret', stdout '$stdout' and stderr '$stderr'"
+ if $ret;
+ return $stdout;
+ }
+}
+
+=pod
+
+=item $node->corrupt_page_checksum(self, file, page_offset)
+
+Intentionally corrupt the checksum field of one page in a file.
+The server must be stopped for this to work reliably.
+
+The file name should be specified relative to the cluster datadir.
+page_offset had better be a multiple of the cluster's block size.
+
+=cut
+
+sub corrupt_page_checksum
+{
+ my ($self, $file, $page_offset) = @_;
+ my $pgdata = $self->data_dir;
+ my $pageheader;
+
+ open my $fh, '+<', "$pgdata/$file" or die "open($file) failed: $!";
+ binmode $fh;
+ sysseek($fh, $page_offset, 0) or die "sysseek failed: $!";
+ sysread($fh, $pageheader, 24) or die "sysread failed: $!";
+ # This inverts the pd_checksum field (only); see struct PageHeaderData
+ $pageheader ^= "\0\0\0\0\0\0\0\0\xff\xff";
+ sysseek($fh, $page_offset, 0) or die "sysseek failed: $!";
+ syswrite($fh, $pageheader) or die "syswrite failed: $!";
+ close $fh;
+
+ return;
+}
+
+#
+# Signal handlers
+#
+$SIG{TERM} = $SIG{INT} = sub {
+ die "death by signal";
+};
+
+=pod
+
+=item $node->create_logical_slot_on_standby(self, primary, slot_name, dbname)
+
+Create logical replication slot on given standby
+
+=cut
+
+sub create_logical_slot_on_standby
+{
+ my ($self, $primary, $slot_name, $dbname) = @_;
+ my ($stdout, $stderr);
+
+ my $handle;
+
+ $handle = IPC::Run::start(
+ [
+ 'pg_recvlogical', '-d',
+ $self->connstr($dbname), '-P',
+ 'test_decoding', '-S',
+ $slot_name, '--create-slot'
+ ],
+ '>',
+ \$stdout,
+ '2>',
+ \$stderr);
+
+ # Once the slot's restart_lsn is determined, the standby looks for
+ # xl_running_xacts WAL record from the restart_lsn onwards. First wait
+ # until the slot restart_lsn is determined.
+
+ $self->poll_query_until(
+ 'postgres', qq[
+ SELECT restart_lsn IS NOT NULL
+ FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'
+ ])
+ or die
+ "timed out waiting for logical slot to calculate its restart_lsn";
+
+ # Then arrange for the xl_running_xacts record for which pg_recvlogical is
+ # waiting.
+ $primary->safe_psql('postgres', 'SELECT pg_log_standby_snapshot()');
+
+ $handle->finish();
+
+ is($self->slot($slot_name)->{'slot_type'},
+ 'logical', $slot_name . ' on standby created')
+ or die "could not create slot" . $slot_name;
+}
+
+=pod
+
+=back
+
+=cut
+
+##########################################################################
+
+package PostgreSQL::Test::Cluster::V_11
+ ; ## no critic (ProhibitMultiplePackages)
+
+use parent -norequire, qw(PostgreSQL::Test::Cluster);
+
+# https://www.postgresql.org/docs/11/release-11.html
+
+# max_wal_senders + superuser_reserved_connections must be < max_connections
+# uses recovery.conf
+
+sub _recovery_file { return "recovery.conf"; }
+
+sub set_standby_mode
+{
+ my $self = shift;
+ $self->append_conf("recovery.conf", "standby_mode = on\n");
+}
+
+sub init
+{
+ my ($self, %params) = @_;
+ $self->SUPER::init(%params);
+ $self->adjust_conf('postgresql.conf', 'max_wal_senders',
+ $params{allows_streaming} ? 5 : 0);
+}
+
+##########################################################################
+
+package PostgreSQL::Test::Cluster::V_10
+ ; ## no critic (ProhibitMultiplePackages)
+
+use parent -norequire, qw(PostgreSQL::Test::Cluster::V_11);
+
+# https://www.postgresql.org/docs/10/release-10.html
+
+########################################################################
+
+1;
diff --git a/src/test/perl/PostgreSQL/Test/RecursiveCopy.pm b/src/test/perl/PostgreSQL/Test/RecursiveCopy.pm
new file mode 100644
index 0000000..15964e6
--- /dev/null
+++ b/src/test/perl/PostgreSQL/Test/RecursiveCopy.pm
@@ -0,0 +1,157 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+=pod
+
+=head1 NAME
+
+PostgreSQL::Test::RecursiveCopy - simple recursive copy implementation
+
+=head1 SYNOPSIS
+
+use PostgreSQL::Test::RecursiveCopy;
+
+PostgreSQL::Test::RecursiveCopy::copypath($from, $to, filterfn => sub { return 1; });
+PostgreSQL::Test::RecursiveCopy::copypath($from, $to);
+
+=cut
+
+package PostgreSQL::Test::RecursiveCopy;
+
+use strict;
+use warnings;
+
+use Carp;
+use File::Basename;
+use File::Copy;
+
+=pod
+
+=head1 DESCRIPTION
+
+=head2 copypath($from, $to, %params)
+
+Recursively copy all files and directories from $from to $to.
+Does not preserve file metadata (e.g., permissions).
+
+Only regular files and subdirectories are copied. Trying to copy other types
+of directory entries raises an exception.
+
+Raises an exception if a file would be overwritten, the source directory can't
+be read, or any I/O operation fails. However, we silently ignore ENOENT on
+open, because when copying from a live database it's possible for a file/dir
+to be deleted after we see its directory entry but before we can open it.
+
+Always returns true.
+
+If the B<filterfn> parameter is given, it must be a subroutine reference.
+This subroutine will be called for each entry in the source directory with its
+relative path as only parameter; if the subroutine returns true the entry is
+copied, otherwise the file is skipped.
+
+On failure the target directory may be in some incomplete state; no cleanup is
+attempted.
+
+=head1 EXAMPLES
+
+ PostgreSQL::Test::RecursiveCopy::copypath('/some/path', '/empty/dir',
+ filterfn => sub {
+ # omit log/ and contents
+ my $src = shift;
+ return $src ne 'log';
+ }
+ );
+
+=cut
+
+sub copypath
+{
+ my ($base_src_dir, $base_dest_dir, %params) = @_;
+ my $filterfn;
+
+ if (defined $params{filterfn})
+ {
+ croak "if specified, filterfn must be a subroutine reference"
+ unless defined(ref $params{filterfn})
+ and (ref $params{filterfn} eq 'CODE');
+
+ $filterfn = $params{filterfn};
+ }
+ else
+ {
+ $filterfn = sub { return 1; };
+ }
+
+ # Complain if original path is bogus, because _copypath_recurse won't.
+ croak "\"$base_src_dir\" does not exist" if !-e $base_src_dir;
+
+ # Start recursive copy from current directory
+ return _copypath_recurse($base_src_dir, $base_dest_dir, "", $filterfn);
+}
+
+# Recursive private guts of copypath
+sub _copypath_recurse
+{
+ my ($base_src_dir, $base_dest_dir, $curr_path, $filterfn) = @_;
+ my $srcpath = "$base_src_dir/$curr_path";
+ my $destpath = "$base_dest_dir/$curr_path";
+
+ # invoke the filter and skip all further operation if it returns false
+ return 1 unless &$filterfn($curr_path);
+
+ # Check for symlink -- needed only on source dir
+ # (note: this will fall through quietly if file is already gone)
+ croak "Cannot operate on symlink \"$srcpath\"" if -l $srcpath;
+
+ # Abort if destination path already exists. Should we allow directories
+ # to exist already?
+ croak "Destination path \"$destpath\" already exists" if -e $destpath;
+
+ # If this source path is a file, simply copy it to destination with the
+ # same name and we're done.
+ if (-f $srcpath)
+ {
+ my $fh;
+ unless (open($fh, '<', $srcpath))
+ {
+ return 1 if ($!{ENOENT});
+ die "open($srcpath) failed: $!";
+ }
+ copy($fh, $destpath)
+ or die "copy $srcpath -> $destpath failed: $!";
+ close $fh;
+ return 1;
+ }
+
+ # If it's a directory, create it on dest and recurse into it.
+ if (-d $srcpath)
+ {
+ my $directory;
+ unless (opendir($directory, $srcpath))
+ {
+ return 1 if ($!{ENOENT});
+ die "opendir($srcpath) failed: $!";
+ }
+
+ mkdir($destpath) or die "mkdir($destpath) failed: $!";
+
+ while (my $entry = readdir($directory))
+ {
+ next if ($entry eq '.' or $entry eq '..');
+ _copypath_recurse($base_src_dir, $base_dest_dir,
+ $curr_path eq '' ? $entry : "$curr_path/$entry", $filterfn)
+ or die "copypath $srcpath/$entry -> $destpath/$entry failed";
+ }
+
+ closedir($directory);
+ return 1;
+ }
+
+ # If it disappeared from sight, that's OK.
+ return 1 if !-e $srcpath;
+
+ # Else it's some weird file type; complain.
+ croak "Source path \"$srcpath\" is not a regular file or directory";
+}
+
+1;
diff --git a/src/test/perl/PostgreSQL/Test/SimpleTee.pm b/src/test/perl/PostgreSQL/Test/SimpleTee.pm
new file mode 100644
index 0000000..82099bf
--- /dev/null
+++ b/src/test/perl/PostgreSQL/Test/SimpleTee.pm
@@ -0,0 +1,63 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+# A simple 'tee' implementation, using perl tie.
+#
+# Whenever you print to the handle, it gets forwarded to a list of
+# handles. The list of output filehandles is passed to the constructor.
+#
+# This is similar to IO::Tee, but only used for output. Only the PRINT
+# method is currently implemented; that's all we need. We don't want to
+# depend on IO::Tee just for this.
+
+# The package is enhanced to add timestamp and elapsed time decorations to
+# the log file traces sent through this interface from Test::More functions
+# (ok, is, note, diag etc.). Elapsed time is shown as the time since the last
+# log trace.
+
+package PostgreSQL::Test::SimpleTee;
+use strict;
+use warnings;
+
+use Time::HiRes qw(time);
+
+my $last_time;
+
+BEGIN { $last_time = time; }
+
+sub _time_str
+{
+ my $tm = time;
+ my $diff = $tm - $last_time;
+ $last_time = $tm;
+ my ($sec, $min, $hour) = localtime($tm);
+ my $msec = int(1000 * ($tm - int($tm)));
+ return sprintf("[%.2d:%.2d:%.2d.%.3d](%.3fs) ",
+ $hour, $min, $sec, $msec, $diff);
+}
+
+sub TIEHANDLE
+{
+ my $self = shift;
+ return bless \@_, $self;
+}
+
+sub PRINT
+{
+ my $self = shift;
+ my $ok = 1;
+ # The first file argument passed to tiehandle in PostgreSQL::Test::Utils is
+ # the original stdout, which is what PROVE sees. Additional decorations
+ # confuse it, so only put out the time string on files after the first.
+ my $skip = 1;
+ my $ts = _time_str;
+ for my $fh (@$self)
+ {
+ print $fh ($skip ? "" : $ts), @_ or $ok = 0;
+ $fh->flush or $ok = 0;
+ $skip = 0;
+ }
+ return $ok;
+}
+
+1;
diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm
new file mode 100644
index 0000000..d66fe1c
--- /dev/null
+++ b/src/test/perl/PostgreSQL/Test/Utils.pm
@@ -0,0 +1,1074 @@
+
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+
+=pod
+
+=head1 NAME
+
+PostgreSQL::Test::Utils - helper module for writing PostgreSQL's C<prove> tests.
+
+=head1 SYNOPSIS
+
+ use PostgreSQL::Test::Utils;
+
+ # Test basic output of a command
+ program_help_ok('initdb');
+ program_version_ok('initdb');
+ program_options_handling_ok('initdb');
+
+ # Test option combinations
+ command_fails(['initdb', '--invalid-option'],
+ 'command fails with invalid option');
+ my $tempdir = PostgreSQL::Test::Utils::tempdir;
+ command_ok('initdb', '-D', $tempdir);
+
+ # Miscellanea
+ print "on Windows" if $PostgreSQL::Test::Utils::windows_os;
+ ok(check_mode_recursive($stream_dir, 0700, 0600),
+ "check stream dir permissions");
+ PostgreSQL::Test::Utils::system_log('pg_ctl', 'kill', 'QUIT', $slow_pid);
+
+=head1 DESCRIPTION
+
+C<PostgreSQL::Test::Utils> contains a set of routines dedicated to environment setup for
+a PostgreSQL regression test run and includes some low-level routines
+aimed at controlling command execution, logging and test functions.
+
+=cut
+
+# This module should never depend on any other PostgreSQL regression test
+# modules.
+
+package PostgreSQL::Test::Utils;
+
+use strict;
+use warnings;
+
+use Carp;
+use Config;
+use Cwd;
+use Exporter 'import';
+use Fcntl qw(:mode :seek);
+use File::Basename;
+use File::Find;
+use File::Spec;
+use File::stat qw(stat);
+use File::Temp ();
+use IPC::Run;
+use POSIX qw(locale_h);
+use PostgreSQL::Test::SimpleTee;
+
+# We need a version of Test::More recent enough to support subtests
+use Test::More 0.98;
+
+our @EXPORT = qw(
+ generate_ascii_string
+ slurp_dir
+ slurp_file
+ append_to_file
+ string_replace_file
+ check_mode_recursive
+ chmod_recursive
+ check_pg_config
+ dir_symlink
+ scan_server_header
+ system_or_bail
+ system_log
+ run_log
+ run_command
+ pump_until
+
+ command_ok
+ command_fails
+ command_exit_is
+ program_help_ok
+ program_version_ok
+ program_options_handling_ok
+ command_like
+ command_like_safe
+ command_fails_like
+ command_checks_all
+
+ $windows_os
+ $is_msys2
+ $use_unix_sockets
+);
+
+our ($windows_os, $is_msys2, $use_unix_sockets, $timeout_default,
+ $tmp_check, $log_path, $test_logfile);
+
+BEGIN
+{
+
+ # Set to untranslated messages, to be able to compare program output
+ # with expected strings.
+ delete $ENV{LANGUAGE};
+ delete $ENV{LC_ALL};
+ $ENV{LC_MESSAGES} = 'C';
+ setlocale(LC_ALL, "");
+
+ # This list should be kept in sync with pg_regress.c.
+ my @envkeys = qw (
+ PGCHANNELBINDING
+ PGCLIENTENCODING
+ PGCONNECT_TIMEOUT
+ PGDATA
+ PGDATABASE
+ PGGSSDELEGATION
+ PGGSSENCMODE
+ PGGSSLIB
+ PGHOSTADDR
+ PGKRBSRVNAME
+ PGPASSFILE
+ PGPASSWORD
+ PGREQUIREPEER
+ PGREQUIRESSL
+ PGSERVICE
+ PGSERVICEFILE
+ PGSSLCERT
+ PGSSLCRL
+ PGSSLCRLDIR
+ PGSSLKEY
+ PGSSLMAXPROTOCOLVERSION
+ PGSSLMINPROTOCOLVERSION
+ PGSSLMODE
+ PGSSLROOTCERT
+ PGSSLSNI
+ PGTARGETSESSIONATTRS
+ PGUSER
+ PGPORT
+ PGHOST
+ PG_COLOR
+ );
+ delete @ENV{@envkeys};
+
+ $ENV{PGAPPNAME} = basename($0);
+
+ # Must be set early
+ $windows_os = $Config{osname} eq 'MSWin32' || $Config{osname} eq 'msys';
+ # Check if this environment is MSYS2.
+ $is_msys2 =
+ $windows_os
+ && -x '/usr/bin/uname'
+ && `uname -or` =~ /^[2-9].*Msys/;
+
+ if ($windows_os)
+ {
+ require Win32API::File;
+ Win32API::File->import(qw(createFile OsFHandleOpen CloseHandle));
+ }
+
+ # Specifies whether to use Unix sockets for test setups. On
+ # Windows we don't use them by default since it's not universally
+ # supported, but it can be overridden if desired.
+ $use_unix_sockets =
+ (!$windows_os || defined $ENV{PG_TEST_USE_UNIX_SOCKETS});
+
+ $timeout_default = $ENV{PG_TEST_TIMEOUT_DEFAULT};
+ $timeout_default = 180
+ if not defined $timeout_default or $timeout_default eq '';
+}
+
+=pod
+
+=head1 EXPORTED VARIABLES
+
+=over
+
+=item C<$windows_os>
+
+Set to true when running under Windows, except on Cygwin.
+
+=item C<$is_msys2>
+
+Set to true when running under MSYS2.
+
+=back
+
+=cut
+
+INIT
+{
+
+ # Return EPIPE instead of killing the process with SIGPIPE. An affected
+ # test may still fail, but it's more likely to report useful facts.
+ $SIG{PIPE} = 'IGNORE';
+
+ # Determine output directories, and create them. The base paths are the
+ # TESTDATADIR / TESTLOGDIR environment variables, which are normally set
+ # by the invoking Makefile.
+ $tmp_check = $ENV{TESTDATADIR} ? "$ENV{TESTDATADIR}" : "tmp_check";
+ $log_path = $ENV{TESTLOGDIR} ? "$ENV{TESTLOGDIR}" : "log";
+
+ mkdir $tmp_check;
+ mkdir $log_path;
+
+ # Open the test log file, whose name depends on the test name.
+ $test_logfile = basename($0);
+ $test_logfile =~ s/\.[^.]+$//;
+ $test_logfile = "$log_path/regress_log_$test_logfile";
+ open my $testlog, '>', $test_logfile
+ or die "could not open STDOUT to logfile \"$test_logfile\": $!";
+
+ # Hijack STDOUT and STDERR to the log file
+ open(my $orig_stdout, '>&', \*STDOUT);
+ open(my $orig_stderr, '>&', \*STDERR);
+ open(STDOUT, '>&', $testlog);
+ open(STDERR, '>&', $testlog);
+
+ # The test output (ok ...) needs to be printed to the original STDOUT so
+ # that the 'prove' program can parse it, and display it to the user in
+ # real time. But also copy it to the log file, to provide more context
+ # in the log.
+ my $builder = Test::More->builder;
+ my $fh = $builder->output;
+ tie *$fh, "PostgreSQL::Test::SimpleTee", $orig_stdout, $testlog;
+ $fh = $builder->failure_output;
+ tie *$fh, "PostgreSQL::Test::SimpleTee", $orig_stderr, $testlog;
+
+ # Enable auto-flushing for all the file handles. Stderr and stdout are
+ # redirected to the same file, and buffering causes the lines to appear
+ # in the log in confusing order.
+ autoflush STDOUT 1;
+ autoflush STDERR 1;
+ autoflush $testlog 1;
+}
+
+END
+{
+
+ # Test files have several ways of causing prove_check to fail:
+ # 1. Exit with a non-zero status.
+ # 2. Call ok(0) or similar, indicating that a constituent test failed.
+ # 3. Deviate from the planned number of tests.
+ #
+ # Preserve temporary directories after (1) and after (2).
+ $File::Temp::KEEP_ALL = 1 unless $? == 0 && all_tests_passing();
+}
+
+=pod
+
+=head1 ROUTINES
+
+=over
+
+=item all_tests_passing()
+
+Return 1 if all the tests run so far have passed. Otherwise, return 0.
+
+=cut
+
+sub all_tests_passing
+{
+ foreach my $status (Test::More->builder->summary)
+ {
+ return 0 unless $status;
+ }
+ return 1;
+}
+
+=pod
+
+=item tempdir(prefix)
+
+Securely create a temporary directory inside C<$tmp_check>, like C<mkdtemp>,
+and return its name. The directory will be removed automatically at the
+end of the tests, unless the environment variable PG_TEST_NOCLEAN is provided.
+
+If C<prefix> is given, the new directory is templated as C<${prefix}_XXXX>.
+Otherwise the template is C<tmp_test_XXXX>.
+
+=cut
+
+sub tempdir
+{
+ my ($prefix) = @_;
+ $prefix = "tmp_test" unless defined $prefix;
+ return File::Temp::tempdir(
+ $prefix . '_XXXX',
+ DIR => $tmp_check,
+ CLEANUP => not defined $ENV{'PG_TEST_NOCLEAN'});
+}
+
+=pod
+
+=item tempdir_short()
+
+As above, but the directory is outside the build tree so that it has a short
+name, to avoid path length issues.
+
+=cut
+
+sub tempdir_short
+{
+
+ return File::Temp::tempdir(
+ CLEANUP => not defined $ENV{'PG_TEST_NOCLEAN'});
+}
+
+=pod
+
+=item has_wal_read_bug()
+
+Returns true if $tmp_check is subject to a sparc64+ext4 bug that causes WAL
+readers to see zeros if another process simultaneously wrote the same offsets.
+Consult this in tests that fail frequently on affected configurations. The
+bug has made streaming standbys fail to advance, reporting corrupt WAL. It
+has made COMMIT PREPARED fail with "could not read two-phase state from WAL".
+Non-WAL PostgreSQL reads haven't been affected, likely because those readers
+and writers have buffering systems in common. See
+https://postgr.es/m/20220116210241.GC756210@rfd.leadboat.com for details.
+
+=cut
+
+sub has_wal_read_bug
+{
+ return
+ $Config{osname} eq 'linux'
+ && $Config{archname} =~ /^sparc/
+ && !run_log([ qw(df -x ext4), $tmp_check ], '>', '/dev/null', '2>&1');
+}
+
+=pod
+
+=item system_log(@cmd)
+
+Run (via C<system()>) the command passed as argument; the return
+value is passed through.
+
+=cut
+
+sub system_log
+{
+ print("# Running: " . join(" ", @_) . "\n");
+ return system(@_);
+}
+
+=pod
+
+=item system_or_bail(@cmd)
+
+Run (via C<system()>) the command passed as argument, and returns
+if the command is successful.
+On failure, abandon further tests and exit the program.
+
+=cut
+
+sub system_or_bail
+{
+ if (system_log(@_) != 0)
+ {
+ if ($? == -1)
+ {
+ BAIL_OUT(
+ sprintf(
+ "failed to execute command \"%s\": $!", join(" ", @_)));
+ }
+ elsif ($? & 127)
+ {
+ BAIL_OUT(
+ sprintf(
+ "command \"%s\" died with signal %d",
+ join(" ", @_),
+ $? & 127));
+ }
+ else
+ {
+ BAIL_OUT(
+ sprintf(
+ "command \"%s\" exited with value %d",
+ join(" ", @_),
+ $? >> 8));
+ }
+ }
+}
+
+=pod
+
+=item run_log(@cmd)
+
+Run the given command via C<IPC::Run::run()>, noting it in the log.
+The return value from the command is passed through.
+
+=cut
+
+sub run_log
+{
+ print("# Running: " . join(" ", @{ $_[0] }) . "\n");
+ return IPC::Run::run(@_);
+}
+
+=pod
+
+=item run_command(cmd)
+
+Run (via C<IPC::Run::run()>) the command passed as argument.
+The return value from the command is ignored.
+The return value is C<($stdout, $stderr)>.
+
+=cut
+
+sub run_command
+{
+ my ($cmd) = @_;
+ my ($stdout, $stderr);
+ my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr;
+ chomp($stdout);
+ chomp($stderr);
+ return ($stdout, $stderr);
+}
+
+=pod
+
+=item pump_until(proc, timeout, stream, until)
+
+Pump until string is matched on the specified stream, or timeout occurs.
+
+=cut
+
+sub pump_until
+{
+ my ($proc, $timeout, $stream, $until) = @_;
+ $proc->pump_nb();
+ while (1)
+ {
+ last if $$stream =~ /$until/;
+ if ($timeout->is_expired)
+ {
+ diag(
+ "pump_until: timeout expired when searching for \"$until\" with stream: \"$$stream\""
+ );
+ return 0;
+ }
+ if (not $proc->pumpable())
+ {
+ diag(
+ "pump_until: process terminated unexpectedly when searching for \"$until\" with stream: \"$$stream\""
+ );
+ return 0;
+ }
+ $proc->pump();
+ }
+ return 1;
+}
+
+=pod
+
+=item generate_ascii_string(from_char, to_char)
+
+Generate a string made of the given range of ASCII characters.
+
+=cut
+
+sub generate_ascii_string
+{
+ my ($from_char, $to_char) = @_;
+ my $res;
+
+ for my $i ($from_char .. $to_char)
+ {
+ $res .= sprintf("%c", $i);
+ }
+ return $res;
+}
+
+=pod
+
+=item slurp_dir(dir)
+
+Return the complete list of entries in the specified directory.
+
+=cut
+
+sub slurp_dir
+{
+ my ($dir) = @_;
+ opendir(my $dh, $dir)
+ or croak "could not opendir \"$dir\": $!";
+ my @direntries = readdir $dh;
+ closedir $dh;
+ return @direntries;
+}
+
+=pod
+
+=item slurp_file(filename [, $offset])
+
+Return the full contents of the specified file, beginning from an
+offset position if specified.
+
+=cut
+
+sub slurp_file
+{
+ my ($filename, $offset) = @_;
+ local $/;
+ my $contents;
+ my $fh;
+
+ # On windows open file using win32 APIs, to allow us to set the
+ # FILE_SHARE_DELETE flag ("d" below), otherwise other accesses to the file
+ # may fail.
+ if ($Config{osname} ne 'MSWin32')
+ {
+ open($fh, '<', $filename)
+ or croak "could not read \"$filename\": $!";
+ }
+ else
+ {
+ my $fHandle = createFile($filename, "r", "rwd")
+ or croak "could not open \"$filename\": $^E";
+ OsFHandleOpen($fh = IO::Handle->new(), $fHandle, 'r')
+ or croak "could not read \"$filename\": $^E\n";
+ }
+
+ if (defined($offset))
+ {
+ seek($fh, $offset, SEEK_SET)
+ or croak "could not seek \"$filename\": $!";
+ }
+
+ $contents = <$fh>;
+ close $fh;
+
+ return $contents;
+}
+
+=pod
+
+=item append_to_file(filename, str)
+
+Append a string at the end of a given file. (Note: no newline is appended at
+end of file.)
+
+=cut
+
+sub append_to_file
+{
+ my ($filename, $str) = @_;
+ open my $fh, ">>", $filename
+ or croak "could not write \"$filename\": $!";
+ print $fh $str;
+ close $fh;
+ return;
+}
+
+=pod
+
+=item string_replace_file(filename, find, replace)
+
+Find and replace string of a given file.
+
+=cut
+
+sub string_replace_file
+{
+ my ($filename, $find, $replace) = @_;
+ open(my $in, '<', $filename);
+ my $content = '';
+ while (<$in>)
+ {
+ $_ =~ s/$find/$replace/;
+ $content = $content . $_;
+ }
+ close $in;
+ open(my $out, '>', $filename);
+ print $out $content;
+ close($out);
+
+ return;
+}
+
+=pod
+
+=item check_mode_recursive(dir, expected_dir_mode, expected_file_mode, ignore_list)
+
+Check that all file/dir modes in a directory match the expected values,
+ignoring files in C<ignore_list> (basename only).
+
+=cut
+
+sub check_mode_recursive
+{
+ my ($dir, $expected_dir_mode, $expected_file_mode, $ignore_list) = @_;
+
+ # Result defaults to true
+ my $result = 1;
+
+ find(
+ {
+ follow_fast => 1,
+ wanted => sub {
+ # Is file in the ignore list?
+ foreach my $ignore ($ignore_list ? @{$ignore_list} : [])
+ {
+ if ("$dir/$ignore" eq $File::Find::name)
+ {
+ return;
+ }
+ }
+
+ # Allow ENOENT. A running server can delete files, such as
+ # those in pg_stat. Other stat() failures are fatal.
+ my $file_stat = stat($File::Find::name);
+ unless (defined($file_stat))
+ {
+ my $is_ENOENT = $!{ENOENT};
+ my $msg = "unable to stat $File::Find::name: $!";
+ if ($is_ENOENT)
+ {
+ warn $msg;
+ return;
+ }
+ else
+ {
+ die $msg;
+ }
+ }
+
+ my $file_mode = S_IMODE($file_stat->mode);
+
+ # Is this a file?
+ if (S_ISREG($file_stat->mode))
+ {
+ if ($file_mode != $expected_file_mode)
+ {
+ print(
+ *STDERR,
+ sprintf("$File::Find::name mode must be %04o\n",
+ $expected_file_mode));
+
+ $result = 0;
+ return;
+ }
+ }
+
+ # Else a directory?
+ elsif (S_ISDIR($file_stat->mode))
+ {
+ if ($file_mode != $expected_dir_mode)
+ {
+ print(
+ *STDERR,
+ sprintf("$File::Find::name mode must be %04o\n",
+ $expected_dir_mode));
+
+ $result = 0;
+ return;
+ }
+ }
+
+ # Else something we can't handle
+ else
+ {
+ die "unknown file type for $File::Find::name";
+ }
+ }
+ },
+ $dir);
+
+ return $result;
+}
+
+=pod
+
+=item chmod_recursive(dir, dir_mode, file_mode)
+
+C<chmod> recursively each file and directory within the given directory.
+
+=cut
+
+sub chmod_recursive
+{
+ my ($dir, $dir_mode, $file_mode) = @_;
+
+ find(
+ {
+ follow_fast => 1,
+ wanted => sub {
+ my $file_stat = stat($File::Find::name);
+
+ if (defined($file_stat))
+ {
+ chmod(
+ S_ISDIR($file_stat->mode) ? $dir_mode : $file_mode,
+ $File::Find::name
+ ) or die "unable to chmod $File::Find::name";
+ }
+ }
+ },
+ $dir);
+ return;
+}
+
+=pod
+
+=item scan_server_header(header_path, regexp)
+
+Returns an array that stores all the matches of the given regular expression
+within the PostgreSQL installation's C<header_path>. This can be used to
+retrieve specific value patterns from the installation's header files.
+
+=cut
+
+sub scan_server_header
+{
+ my ($header_path, $regexp) = @_;
+
+ my ($stdout, $stderr);
+ my $result = IPC::Run::run [ 'pg_config', '--includedir-server' ], '>',
+ \$stdout, '2>', \$stderr
+ or die "could not execute pg_config";
+ chomp($stdout);
+ $stdout =~ s/\r$//;
+
+ open my $header_h, '<', "$stdout/$header_path" or die "$!";
+
+ my @match = undef;
+ while (<$header_h>)
+ {
+ my $line = $_;
+
+ if (@match = $line =~ /^$regexp/)
+ {
+ last;
+ }
+ }
+
+ close $header_h;
+ die "could not find match in header $header_path\n"
+ unless @match;
+ return @match;
+}
+
+=pod
+
+=item check_pg_config(regexp)
+
+Return the number of matches of the given regular expression
+within the installation's C<pg_config.h>.
+
+=cut
+
+sub check_pg_config
+{
+ my ($regexp) = @_;
+ my ($stdout, $stderr);
+ my $result = IPC::Run::run [ 'pg_config', '--includedir' ], '>',
+ \$stdout, '2>', \$stderr
+ or die "could not execute pg_config";
+ chomp($stdout);
+ $stdout =~ s/\r$//;
+
+ open my $pg_config_h, '<', "$stdout/pg_config.h" or die "$!";
+ my $match = (grep { /^$regexp/ } <$pg_config_h>);
+ close $pg_config_h;
+ return $match;
+}
+
+=pod
+
+=item dir_symlink(oldname, newname)
+
+Portably create a symlink for a directory. On Windows this creates a junction
+point. Elsewhere it just calls perl's builtin symlink.
+
+=cut
+
+sub dir_symlink
+{
+ my $oldname = shift;
+ my $newname = shift;
+ if ($windows_os)
+ {
+ $oldname =~ s,/,\\,g;
+ $newname =~ s,/,\\,g;
+ my $cmd = qq{mklink /j "$newname" "$oldname"};
+ if ($Config{osname} eq 'msys')
+ {
+ # need some indirection on msys
+ $cmd = qq{echo '$cmd' | \$COMSPEC /Q};
+ }
+ system($cmd);
+ }
+ else
+ {
+ symlink $oldname, $newname;
+ }
+ die "No $newname" unless -e $newname;
+}
+
+=pod
+
+=back
+
+=head1 Test::More-LIKE METHODS
+
+=over
+
+=item command_ok(cmd, test_name)
+
+Check that the command runs (via C<run_log>) successfully.
+
+=cut
+
+sub command_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $test_name) = @_;
+ my $result = run_log($cmd);
+ ok($result, $test_name);
+ return;
+}
+
+=pod
+
+=item command_fails(cmd, test_name)
+
+Check that the command fails (when run via C<run_log>).
+
+=cut
+
+sub command_fails
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $test_name) = @_;
+ my $result = run_log($cmd);
+ ok(!$result, $test_name);
+ return;
+}
+
+=pod
+
+=item command_exit_is(cmd, expected, test_name)
+
+Check that the command exit code matches the expected exit code.
+
+=cut
+
+sub command_exit_is
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $expected, $test_name) = @_;
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ my $h = IPC::Run::start $cmd;
+ $h->finish();
+
+ # Normally, if the child called exit(N), IPC::Run::result() returns N. On
+ # Windows, with IPC::Run v20220807.0 and earlier, full_results() is the
+ # method that returns N (https://github.com/toddr/IPC-Run/issues/161).
+ my $result =
+ ($Config{osname} eq "MSWin32" && $IPC::Run::VERSION <= 20220807.0)
+ ? ($h->full_results)[0]
+ : $h->result(0);
+ is($result, $expected, $test_name);
+ return;
+}
+
+=pod
+
+=item program_help_ok(cmd)
+
+Check that the command supports the C<--help> option.
+
+=cut
+
+sub program_help_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd) = @_;
+ my ($stdout, $stderr);
+ print("# Running: $cmd --help\n");
+ my $result = IPC::Run::run [ $cmd, '--help' ], '>', \$stdout, '2>',
+ \$stderr;
+ ok($result, "$cmd --help exit code 0");
+ isnt($stdout, '', "$cmd --help goes to stdout");
+ is($stderr, '', "$cmd --help nothing to stderr");
+ return;
+}
+
+=pod
+
+=item program_version_ok(cmd)
+
+Check that the command supports the C<--version> option.
+
+=cut
+
+sub program_version_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd) = @_;
+ my ($stdout, $stderr);
+ print("# Running: $cmd --version\n");
+ my $result = IPC::Run::run [ $cmd, '--version' ], '>', \$stdout, '2>',
+ \$stderr;
+ ok($result, "$cmd --version exit code 0");
+ isnt($stdout, '', "$cmd --version goes to stdout");
+ is($stderr, '', "$cmd --version nothing to stderr");
+ return;
+}
+
+=pod
+
+=item program_options_handling_ok(cmd)
+
+Check that a command with an invalid option returns a non-zero
+exit code and error message.
+
+=cut
+
+sub program_options_handling_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd) = @_;
+ my ($stdout, $stderr);
+ print("# Running: $cmd --not-a-valid-option\n");
+ my $result = IPC::Run::run [ $cmd, '--not-a-valid-option' ], '>',
+ \$stdout,
+ '2>', \$stderr;
+ ok(!$result, "$cmd with invalid option nonzero exit code");
+ isnt($stderr, '', "$cmd with invalid option prints error message");
+ return;
+}
+
+=pod
+
+=item command_like(cmd, expected_stdout, test_name)
+
+Check that the command runs successfully and the output
+matches the given regular expression.
+
+=cut
+
+sub command_like
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $expected_stdout, $test_name) = @_;
+ my ($stdout, $stderr);
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr;
+ ok($result, "$test_name: exit code 0");
+ is($stderr, '', "$test_name: no stderr");
+ like($stdout, $expected_stdout, "$test_name: matches");
+ return;
+}
+
+=pod
+
+=item command_like_safe(cmd, expected_stdout, test_name)
+
+Check that the command runs successfully and the output
+matches the given regular expression. Doesn't assume that the
+output files are closed.
+
+=cut
+
+sub command_like_safe
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ # Doesn't rely on detecting end of file on the file descriptors,
+ # which can fail, causing the process to hang, notably on Msys
+ # when used with 'pg_ctl start'
+ my ($cmd, $expected_stdout, $test_name) = @_;
+ my ($stdout, $stderr);
+ my $stdoutfile = File::Temp->new();
+ my $stderrfile = File::Temp->new();
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ my $result = IPC::Run::run $cmd, '>', $stdoutfile, '2>', $stderrfile;
+ $stdout = slurp_file($stdoutfile);
+ $stderr = slurp_file($stderrfile);
+ ok($result, "$test_name: exit code 0");
+ is($stderr, '', "$test_name: no stderr");
+ like($stdout, $expected_stdout, "$test_name: matches");
+ return;
+}
+
+=pod
+
+=item command_fails_like(cmd, expected_stderr, test_name)
+
+Check that the command fails and the error message matches
+the given regular expression.
+
+=cut
+
+sub command_fails_like
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $expected_stderr, $test_name) = @_;
+ my ($stdout, $stderr);
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr;
+ ok(!$result, "$test_name: exit code not 0");
+ like($stderr, $expected_stderr, "$test_name: matches");
+ return;
+}
+
+=pod
+
+=item command_checks_all(cmd, ret, out, err, test_name)
+
+Run a command and check its status and outputs.
+Arguments:
+
+=over
+
+=item C<cmd>: Array reference of command and arguments to run
+
+=item C<ret>: Expected exit code
+
+=item C<out>: Expected stdout from command
+
+=item C<err>: Expected stderr from command
+
+=item C<test_name>: test name
+
+=back
+
+=cut
+
+sub command_checks_all
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my ($cmd, $expected_ret, $out, $err, $test_name) = @_;
+
+ # run command
+ my ($stdout, $stderr);
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ IPC::Run::run($cmd, '>', \$stdout, '2>', \$stderr);
+
+ # See http://perldoc.perl.org/perlvar.html#%24CHILD_ERROR
+ my $ret = $?;
+ die "command exited with signal " . ($ret & 127)
+ if $ret & 127;
+ $ret = $ret >> 8;
+
+ # check status
+ ok($ret == $expected_ret,
+ "$test_name status (got $ret vs expected $expected_ret)");
+
+ # check stdout
+ for my $re (@$out)
+ {
+ like($stdout, $re, "$test_name stdout /$re/");
+ }
+
+ # check stderr
+ for my $re (@$err)
+ {
+ like($stderr, $re, "$test_name stderr /$re/");
+ }
+
+ return;
+}
+
+=pod
+
+=back
+
+=cut
+
+1;
diff --git a/src/test/perl/PostgreSQL/Version.pm b/src/test/perl/PostgreSQL/Version.pm
new file mode 100644
index 0000000..3705c1b
--- /dev/null
+++ b/src/test/perl/PostgreSQL/Version.pm
@@ -0,0 +1,167 @@
+############################################################################
+#
+# PostgreSQL/Version.pm
+#
+# Module encapsulating Postgres Version numbers
+#
+# Copyright (c) 2021-2023, PostgreSQL Global Development Group
+#
+############################################################################
+
+=pod
+
+=head1 NAME
+
+PostgreSQL::Version - class representing PostgreSQL version numbers
+
+=head1 SYNOPSIS
+
+ use PostgreSQL::Version;
+
+ my $version = PostgreSQL::Version->new($version_arg);
+
+ # compare two versions
+ my $bool = $version1 <= $version2;
+
+ # or compare with a number
+ $bool = $version < 12;
+
+ # or with a string
+ $bool = $version lt "13.1";
+
+ # interpolate in a string
+ my $stringyval = "version: $version";
+
+ # get the major version
+ my $maj = $version->major;
+
+=head1 DESCRIPTION
+
+PostgreSQL::Version encapsulates Postgres version numbers, providing parsing
+of common version formats and comparison operations.
+
+=cut
+
+package PostgreSQL::Version;
+
+use strict;
+use warnings;
+
+use Scalar::Util qw(blessed);
+
+use overload
+ '<=>' => \&_version_cmp,
+ 'cmp' => \&_version_cmp,
+ '""' => \&_stringify;
+
+=pod
+
+=head1 METHODS
+
+=over
+
+=item PostgreSQL::Version->new($version)
+
+Create a new PostgreSQL::Version instance.
+
+The argument can be a number like 12, or a string like '12.2' or the output
+of a Postgres command like `psql --version` or `pg_config --version`;
+
+=back
+
+=cut
+
+sub new
+{
+ my $class = shift;
+ my $arg = shift;
+
+ chomp $arg;
+
+ # Accept standard formats, in case caller has handed us the output of a
+ # postgres command line tool
+ my $devel;
+ ($arg, $devel) = ($1, $2)
+ if (
+ $arg =~ m!^ # beginning of line
+ (?:\(?PostgreSQL\)?\s)? # ignore PostgreSQL marker
+ (\d+(?:\.\d+)*) # version number, dotted notation
+ (devel|(?:alpha|beta|rc)\d+)? # dev marker - see version_stamp.pl
+ !x);
+
+ # Split into an array
+ my @numbers = split(/\./, $arg);
+
+ # Treat development versions as having a minor/micro version one less than
+ # the first released version of that branch.
+ push @numbers, -1 if ($devel);
+
+ $devel ||= "";
+
+ return bless { str => "$arg$devel", num => \@numbers }, $class;
+}
+
+# Routine which compares the _pg_version_array obtained for the two
+# arguments and returns -1, 0, or 1, allowing comparison between two
+# PostgreSQL::Version objects or a PostgreSQL::Version and a version string or number.
+#
+# If the second argument is not a blessed object we call the constructor
+# to make one.
+#
+# Because we're overloading '<=>' and 'cmp' this function supplies us with
+# all the comparison operators ('<' and friends, 'gt' and friends)
+#
+sub _version_cmp
+{
+ my ($a, $b, $swapped) = @_;
+
+ $b = __PACKAGE__->new($b) unless blessed($b);
+
+ ($a, $b) = ($b, $a) if $swapped;
+
+ my ($an, $bn) = ($a->{num}, $b->{num});
+
+ for (my $idx = 0;; $idx++)
+ {
+ return 0
+ if ($idx >= @$an && $idx >= @$bn);
+ # treat a missing number as 0
+ my ($anum, $bnum) = ($an->[$idx] || 0, $bn->[$idx] || 0);
+ return $anum <=> $bnum
+ if ($anum <=> $bnum);
+ }
+}
+
+# Render the version number using the saved string.
+sub _stringify
+{
+ my $self = shift;
+ return $self->{str};
+}
+
+=pod
+
+=over
+
+=item major([separator => 'char'])
+
+Returns the major version. For versions before 10 the parts are separated by
+a dot unless the separator argument is given.
+
+=back
+
+=cut
+
+sub major
+{
+ my ($self, %params) = @_;
+ my $result = $self->{num}->[0];
+ if ($result + 0 < 10)
+ {
+ my $sep = $params{separator} || '.';
+ $result .= "$sep$self->{num}->[1]";
+ }
+ return $result;
+}
+
+1;