summaryrefslogtreecommitdiffstats
path: root/src/test/perl
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:15:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:15:05 +0000
commit46651ce6fe013220ed397add242004d764fc0153 (patch)
tree6e5299f990f88e60174a1d3ae6e48eedd2688b2b /src/test/perl
parentInitial commit. (diff)
downloadpostgresql-14-46651ce6fe013220ed397add242004d764fc0153.tar.xz
postgresql-14-46651ce6fe013220ed397add242004d764fc0153.zip
Adding upstream version 14.5.upstream/14.5upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--src/test/perl/Makefile35
-rw-r--r--src/test/perl/PostgreSQL/Test/Cluster.pm13
-rw-r--r--src/test/perl/PostgreSQL/Test/Utils.pm11
-rw-r--r--src/test/perl/PostgresNode.pm2810
-rw-r--r--src/test/perl/PostgresVersion.pm136
-rw-r--r--src/test/perl/README97
-rw-r--r--src/test/perl/RecursiveCopy.pm157
-rw-r--r--src/test/perl/SimpleTee.pm35
-rw-r--r--src/test/perl/TestLib.pm982
9 files changed, 4276 insertions, 0 deletions
diff --git a/src/test/perl/Makefile b/src/test/perl/Makefile
new file mode 100644
index 0000000..3d3a95b
--- /dev/null
+++ b/src/test/perl/Makefile
@@ -0,0 +1,35 @@
+#-------------------------------------------------------------------------
+#
+# Makefile for src/test/perl
+#
+# Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+# Portions Copyright (c) 1994, Regents of the University of California
+#
+# src/test/perl/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/test/perl
+top_builddir = ../../..
+include $(top_builddir)/src/Makefile.global
+
+ifeq ($(enable_tap_tests),yes)
+
+installdirs:
+ $(MKDIR_P) '$(DESTDIR)$(pgxsdir)/$(subdir)'
+
+install: all installdirs
+ $(INSTALL_DATA) $(srcdir)/TestLib.pm '$(DESTDIR)$(pgxsdir)/$(subdir)/TestLib.pm'
+ $(INSTALL_DATA) $(srcdir)/SimpleTee.pm '$(DESTDIR)$(pgxsdir)/$(subdir)/SimpleTee.pm'
+ $(INSTALL_DATA) $(srcdir)/RecursiveCopy.pm '$(DESTDIR)$(pgxsdir)/$(subdir)/RecursiveCopy.pm'
+ $(INSTALL_DATA) $(srcdir)/PostgresNode.pm '$(DESTDIR)$(pgxsdir)/$(subdir)/PostgresNode.pm'
+ $(INSTALL_DATA) $(srcdir)/PostgresVersion.pm '$(DESTDIR)$(pgxsdir)/$(subdir)/PostgresVersion.pm'
+
+uninstall:
+ rm -f '$(DESTDIR)$(pgxsdir)/$(subdir)/TestLib.pm'
+ rm -f '$(DESTDIR)$(pgxsdir)/$(subdir)/SimpleTee.pm'
+ rm -f '$(DESTDIR)$(pgxsdir)/$(subdir)/RecursiveCopy.pm'
+ rm -f '$(DESTDIR)$(pgxsdir)/$(subdir)/PostgresNode.pm'
+ rm -f '$(DESTDIR)$(pgxsdir)/$(subdir)/PostgresVersion.pm'
+
+endif
diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
new file mode 100644
index 0000000..14e9138
--- /dev/null
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -0,0 +1,13 @@
+
+# Copyright (c) 2022, PostgreSQL Global Development Group
+
+# Allow use of release 15+ Perl package name in older branches, by giving that
+# package the same symbol table as the older package. See PostgresNode::new
+# for supporting heuristics.
+
+use strict;
+use warnings;
+BEGIN { *PostgreSQL::Test::Cluster:: = \*PostgresNode::; }
+use PostgresNode ();
+
+1;
diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm
new file mode 100644
index 0000000..2d15bbf
--- /dev/null
+++ b/src/test/perl/PostgreSQL/Test/Utils.pm
@@ -0,0 +1,11 @@
+# Copyright (c) 2022, PostgreSQL Global Development Group
+
+# Allow use of release 15+ Perl package name in older branches, by giving that
+# package the same symbol table as the older package.
+
+use strict;
+use warnings;
+BEGIN { *PostgreSQL::Test::Utils:: = \*TestLib::; }
+use TestLib ();
+
+1;
diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm
new file mode 100644
index 0000000..4b17048
--- /dev/null
+++ b/src/test/perl/PostgresNode.pm
@@ -0,0 +1,2810 @@
+
+# Copyright (c) 2021, PostgreSQL Global Development Group
+
+=pod
+
+=head1 NAME
+
+PostgresNode - class representing PostgreSQL server instance
+
+=head1 SYNOPSIS
+
+ use PostgresNode;
+
+ my $node = PostgresNode->get_new_node('mynode');
+
+ # Create a data directory with initdb
+ $node->init();
+
+ # Start the PostgreSQL server
+ $node->start();
+
+ # Change a setting and restart
+ $node->append_conf('postgresql.conf', 'hot_standby = on');
+ $node->restart();
+
+ # run a query with psql, like:
+ # echo 'SELECT 1' | psql -qAXt postgres -v ON_ERROR_STOP=1
+ $psql_stdout = $node->safe_psql('postgres', 'SELECT 1');
+
+ # Run psql with a timeout, capturing stdout and stderr
+ # as well as the psql exit code. Pass some extra psql
+ # options. If there's an error from psql raise an exception.
+ my ($stdout, $stderr, $timed_out);
+ my $cmdret = $node->psql('postgres', 'SELECT pg_sleep(600)',
+ stdout => \$stdout, stderr => \$stderr,
+ timeout => $TestLib::timeout_default,
+ timed_out => \$timed_out,
+ extra_params => ['--single-transaction'],
+ on_error_die => 1)
+ print "Sleep timed out" if $timed_out;
+
+ # Similar thing, more convenient in common cases
+ my ($cmdret, $stdout, $stderr) =
+ $node->psql('postgres', 'SELECT 1');
+
+ # run query every second until it returns 't'
+ # or times out
+ $node->poll_query_until('postgres', q|SELECT random() < 0.1;|')
+ or die "timed out";
+
+ # Do an online pg_basebackup
+ my $ret = $node->backup('testbackup1');
+
+ # Take a backup of a running server
+ my $ret = $node->backup_fs_hot('testbackup2');
+
+ # Take a backup of a stopped server
+ $node->stop;
+ my $ret = $node->backup_fs_cold('testbackup3')
+
+ # Restore it to create a new independent node (not a replica)
+ my $replica = get_new_node('replica');
+ $replica->init_from_backup($node, 'testbackup');
+ $replica->start;
+
+ # Stop the server
+ $node->stop('fast');
+
+ # Find a free, unprivileged TCP port to bind some other service to
+ my $port = get_free_port();
+
+=head1 DESCRIPTION
+
+PostgresNode contains a set of routines able to work on a PostgreSQL node,
+allowing to start, stop, backup and initialize it with various options.
+The set of nodes managed by a given test is also managed by this module.
+
+In addition to node management, PostgresNode instances have some wrappers
+around Test::More functions to run commands with an environment set up to
+point to the instance.
+
+The IPC::Run module is required.
+
+=cut
+
+package PostgresNode;
+
+use strict;
+use warnings;
+
+use Carp;
+use Config;
+use Cwd;
+use Exporter 'import';
+use Fcntl qw(:mode);
+use File::Basename;
+use File::Path qw(rmtree);
+use File::Spec;
+use File::stat qw(stat);
+use File::Temp ();
+use IPC::Run;
+use PostgresVersion;
+use RecursiveCopy;
+use Socket;
+use Test::More;
+use TestLib ();
+use Time::HiRes qw(usleep);
+use Scalar::Util qw(blessed);
+
+our @EXPORT = qw(
+ get_new_node
+ get_free_port
+);
+
+our ($use_tcp, $test_localhost, $test_pghost, $last_host_assigned,
+ $last_port_assigned, @all_nodes, $died);
+
+INIT
+{
+
+ # Set PGHOST for backward compatibility. This doesn't work for own_host
+ # nodes, so prefer to not rely on this when writing new tests.
+ $use_tcp = !$TestLib::use_unix_sockets;
+ $test_localhost = "127.0.0.1";
+ $last_host_assigned = 1;
+ $test_pghost = $use_tcp ? $test_localhost : TestLib::tempdir_short;
+ $ENV{PGHOST} = $test_pghost;
+ $ENV{PGDATABASE} = 'postgres';
+
+ # Tracking of last port value assigned to accelerate free port lookup.
+ $last_port_assigned = int(rand() * 16384) + 49152;
+}
+
+=pod
+
+=head1 METHODS
+
+=over
+
+=item PostgresNode::new($class, $name, $pghost, $pgport)
+
+Create a new PostgresNode instance. Does not initdb or start it.
+
+You should generally prefer to use get_new_node() instead since it takes care
+of finding port numbers, registering instances for cleanup, etc.
+
+=cut
+
+sub new
+{
+ my ($class, $name, $pghost, $pgport) = @_;
+
+ # Use release 15+ semantics when the arguments look like (node_name,
+ # %params). We can't use $class to decide, because get_new_node() passes
+ # a v14- argument list regardless of the class. $class might be an
+ # out-of-core subclass. $class->isa('PostgresNode') returns true even for
+ # descendants of PostgreSQL::Test::Cluster, so it doesn't help.
+ return $class->get_new_node(@_[ 1 .. $#_ ])
+ if !$pghost
+ or !$pgport
+ or $pghost =~ /^[a-zA-Z0-9_]$/;
+
+ my $testname = basename($0);
+ $testname =~ s/\.[^.]+$//;
+ my $self = {
+ _port => $pgport,
+ _host => $pghost,
+ _basedir => "$TestLib::tmp_check/t_${testname}_${name}_data",
+ _name => $name,
+ _logfile_generation => 0,
+ _logfile_base => "$TestLib::log_path/${testname}_${name}",
+ _logfile => "$TestLib::log_path/${testname}_${name}.log"
+ };
+
+ bless $self, $class;
+ mkdir $self->{_basedir}
+ or
+ BAIL_OUT("could not create data directory \"$self->{_basedir}\": $!");
+ $self->dump_info;
+
+ return $self;
+}
+
+=pod
+
+=item $node->port()
+
+Get the port number assigned to the host. This won't necessarily be a TCP port
+open on the local host since we prefer to use unix sockets if possible.
+
+Use $node->connstr() if you want a connection string.
+
+=cut
+
+sub port
+{
+ my ($self) = @_;
+ return $self->{_port};
+}
+
+=pod
+
+=item $node->host()
+
+Return the host (like PGHOST) for this instance. May be a UNIX socket path.
+
+Use $node->connstr() if you want a connection string.
+
+=cut
+
+sub host
+{
+ my ($self) = @_;
+ return $self->{_host};
+}
+
+=pod
+
+=item $node->basedir()
+
+The directory all the node's files will be within - datadir, archive directory,
+backups, etc.
+
+=cut
+
+sub basedir
+{
+ my ($self) = @_;
+ return $self->{_basedir};
+}
+
+=pod
+
+=item $node->name()
+
+The name assigned to the node at creation time.
+
+=cut
+
+sub name
+{
+ my ($self) = @_;
+ return $self->{_name};
+}
+
+=pod
+
+=item $node->logfile()
+
+Path to the PostgreSQL log file for this instance.
+
+=cut
+
+sub logfile
+{
+ my ($self) = @_;
+ return $self->{_logfile};
+}
+
+=pod
+
+=item $node->connstr()
+
+Get a libpq connection string that will establish a connection to
+this node. Suitable for passing to psql, DBD::Pg, etc.
+
+=cut
+
+sub connstr
+{
+ my ($self, $dbname) = @_;
+ my $pgport = $self->port;
+ my $pghost = $self->host;
+ if (!defined($dbname))
+ {
+ return "port=$pgport host=$pghost";
+ }
+
+ # Escape properly the database string before using it, only
+ # single quotes and backslashes need to be treated this way.
+ $dbname =~ s#\\#\\\\#g;
+ $dbname =~ s#\'#\\\'#g;
+
+ return "port=$pgport host=$pghost dbname='$dbname'";
+}
+
+=pod
+
+=item $node->group_access()
+
+Does the data dir allow group access?
+
+=cut
+
+sub group_access
+{
+ my ($self) = @_;
+
+ my $dir_stat = stat($self->data_dir);
+
+ defined($dir_stat)
+ or die('unable to stat ' . $self->data_dir);
+
+ return (S_IMODE($dir_stat->mode) == 0750);
+}
+
+=pod
+
+=item $node->data_dir()
+
+Returns the path to the data directory. postgresql.conf and pg_hba.conf are
+always here.
+
+=cut
+
+sub data_dir
+{
+ my ($self) = @_;
+ my $res = $self->basedir;
+ return "$res/pgdata";
+}
+
+=pod
+
+=item $node->archive_dir()
+
+If archiving is enabled, WAL files go here.
+
+=cut
+
+sub archive_dir
+{
+ my ($self) = @_;
+ my $basedir = $self->basedir;
+ return "$basedir/archives";
+}
+
+=pod
+
+=item $node->backup_dir()
+
+The output path for backups taken with $node->backup()
+
+=cut
+
+sub backup_dir
+{
+ my ($self) = @_;
+ my $basedir = $self->basedir;
+ return "$basedir/backup";
+}
+
+=pod
+
+=item $node->info()
+
+Return a string containing human-readable diagnostic information (paths, etc)
+about this node.
+
+=cut
+
+sub info
+{
+ my ($self) = @_;
+ my $_info = '';
+ open my $fh, '>', \$_info or die;
+ print $fh "Name: " . $self->name . "\n";
+ print $fh "Version: " . $self->{_pg_version} . "\n"
+ if $self->{_pg_version};
+ print $fh "Data directory: " . $self->data_dir . "\n";
+ print $fh "Backup directory: " . $self->backup_dir . "\n";
+ print $fh "Archive directory: " . $self->archive_dir . "\n";
+ print $fh "Connection string: " . $self->connstr . "\n";
+ print $fh "Log file: " . $self->logfile . "\n";
+ print $fh "Install Path: ", $self->{_install_path} . "\n"
+ if $self->{_install_path};
+ close $fh or die;
+ return $_info;
+}
+
+=pod
+
+=item $node->dump_info()
+
+Print $node->info()
+
+=cut
+
+sub dump_info
+{
+ my ($self) = @_;
+ print $self->info;
+ return;
+}
+
+
+# Internal method to set up trusted pg_hba.conf for replication. Not
+# documented because you shouldn't use it, it's called automatically if needed.
+sub set_replication_conf
+{
+ my ($self) = @_;
+ my $pgdata = $self->data_dir;
+
+ $self->host eq $test_pghost
+ or croak "set_replication_conf only works with the default host";
+
+ open my $hba, '>>', "$pgdata/pg_hba.conf";
+ print $hba "\n# Allow replication (set up by PostgresNode.pm)\n";
+ if ($TestLib::windows_os && !$TestLib::use_unix_sockets)
+ {
+ print $hba
+ "host replication all $test_localhost/32 sspi include_realm=1 map=regress\n";
+ }
+ close $hba;
+ return;
+}
+
+=pod
+
+=item $node->init(...)
+
+Initialize a new cluster for testing.
+
+Authentication is set up so that only the current OS user can access the
+cluster. On Unix, we use Unix domain socket connections, with the socket in
+a directory that's only accessible to the current user to ensure that.
+On Windows, we use SSPI authentication to ensure the same (by pg_regress
+--config-auth).
+
+WAL archiving can be enabled on this node by passing the keyword parameter
+has_archiving => 1. This is disabled by default.
+
+postgresql.conf can be set up for replication by passing the keyword
+parameter allows_streaming => 'logical' or 'physical' (passing 1 will also
+suffice for physical replication) depending on type of replication that
+should be enabled. This is disabled by default.
+
+The new node is set up in a fast but unsafe configuration where fsync is
+disabled.
+
+=cut
+
+sub init
+{
+ my ($self, %params) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $host = $self->host;
+
+ local %ENV = $self->_get_env();
+
+ $params{allows_streaming} = 0 unless defined $params{allows_streaming};
+ $params{has_archiving} = 0 unless defined $params{has_archiving};
+
+ mkdir $self->backup_dir;
+ mkdir $self->archive_dir;
+
+ TestLib::system_or_bail('initdb', '-D', $pgdata, '-A', 'trust', '-N',
+ @{ $params{extra} });
+ TestLib::system_or_bail($ENV{PG_REGRESS}, '--config-auth', $pgdata,
+ @{ $params{auth_extra} });
+
+ open my $conf, '>>', "$pgdata/postgresql.conf";
+ print $conf "\n# Added by PostgresNode.pm\n";
+ print $conf "fsync = off\n";
+ print $conf "restart_after_crash = off\n";
+ print $conf "log_line_prefix = '%m [%p] %q%a '\n";
+ print $conf "log_statement = all\n";
+ print $conf "log_replication_commands = on\n";
+ print $conf "wal_retrieve_retry_interval = '500ms'\n";
+
+ # If a setting tends to affect whether tests pass or fail, print it after
+ # TEMP_CONFIG. Otherwise, print it before TEMP_CONFIG, thereby permitting
+ # overrides. Settings that merely improve performance or ease debugging
+ # belong before TEMP_CONFIG.
+ print $conf TestLib::slurp_file($ENV{TEMP_CONFIG})
+ if defined $ENV{TEMP_CONFIG};
+
+ # XXX Neutralize any stats_temp_directory in TEMP_CONFIG. Nodes running
+ # concurrently must not share a stats_temp_directory.
+ print $conf "stats_temp_directory = 'pg_stat_tmp'\n";
+
+ if ($params{allows_streaming})
+ {
+ if ($params{allows_streaming} eq "logical")
+ {
+ print $conf "wal_level = logical\n";
+ }
+ else
+ {
+ print $conf "wal_level = replica\n";
+ }
+ print $conf "max_wal_senders = 10\n";
+ print $conf "max_replication_slots = 10\n";
+ print $conf "wal_log_hints = on\n";
+ print $conf "hot_standby = on\n";
+ # conservative settings to ensure we can run multiple postmasters:
+ print $conf "shared_buffers = 1MB\n";
+ print $conf "max_connections = 10\n";
+ # limit disk space consumption, too:
+ print $conf "max_wal_size = 128MB\n";
+ }
+ else
+ {
+ print $conf "wal_level = minimal\n";
+ print $conf "max_wal_senders = 0\n";
+ }
+
+ print $conf "port = $port\n";
+ if ($use_tcp)
+ {
+ print $conf "unix_socket_directories = ''\n";
+ print $conf "listen_addresses = '$host'\n";
+ }
+ else
+ {
+ print $conf "unix_socket_directories = '$host'\n";
+ print $conf "listen_addresses = ''\n";
+ }
+ close $conf;
+
+ chmod($self->group_access ? 0640 : 0600, "$pgdata/postgresql.conf")
+ or die("unable to set permissions for $pgdata/postgresql.conf");
+
+ $self->set_replication_conf if $params{allows_streaming};
+ $self->enable_archiving if $params{has_archiving};
+ return;
+}
+
+=pod
+
+=item $node->append_conf(filename, str)
+
+A shortcut method to append to files like pg_hba.conf and postgresql.conf.
+
+Does no validation or sanity checking. Does not reload the configuration
+after writing.
+
+A newline is automatically appended to the string.
+
+=cut
+
+sub append_conf
+{
+ my ($self, $filename, $str) = @_;
+
+ my $conffile = $self->data_dir . '/' . $filename;
+
+ TestLib::append_to_file($conffile, $str . "\n");
+
+ chmod($self->group_access() ? 0640 : 0600, $conffile)
+ or die("unable to set permissions for $conffile");
+
+ return;
+}
+
+=pod
+
+=item $node->backup(backup_name)
+
+Create a hot backup with B<pg_basebackup> in subdirectory B<backup_name> of
+B<< $node->backup_dir >>, including the WAL.
+
+By default, WAL files are fetched at the end of the backup, not streamed.
+You can adjust that and other things by passing an array of additional
+B<pg_basebackup> command line options in the keyword parameter backup_options.
+
+You'll have to configure a suitable B<max_wal_senders> on the
+target server since it isn't done by default.
+
+=cut
+
+sub backup
+{
+ my ($self, $backup_name, %params) = @_;
+ my $backup_path = $self->backup_dir . '/' . $backup_name;
+ my $name = $self->name;
+
+ local %ENV = $self->_get_env();
+
+ print "# Taking pg_basebackup $backup_name from node \"$name\"\n";
+ TestLib::system_or_bail(
+ 'pg_basebackup', '-D',
+ $backup_path, '-h',
+ $self->host, '-p',
+ $self->port, '--checkpoint',
+ 'fast', '--no-sync',
+ @{ $params{backup_options} });
+ print "# Backup finished\n";
+ return;
+}
+
+=item $node->backup_fs_hot(backup_name)
+
+Create a backup with a filesystem level copy in subdirectory B<backup_name> of
+B<< $node->backup_dir >>, including WAL.
+
+Archiving must be enabled, as B<pg_start_backup()> and B<pg_stop_backup()> are
+used. This is not checked or enforced.
+
+The backup name is passed as the backup label to B<pg_start_backup()>.
+
+=cut
+
+sub backup_fs_hot
+{
+ my ($self, $backup_name) = @_;
+ $self->_backup_fs($backup_name, 1);
+ return;
+}
+
+=item $node->backup_fs_cold(backup_name)
+
+Create a backup with a filesystem level copy in subdirectory B<backup_name> of
+B<< $node->backup_dir >>, including WAL. The server must be
+stopped as no attempt to handle concurrent writes is made.
+
+Use B<backup> or B<backup_fs_hot> if you want to back up a running server.
+
+=cut
+
+sub backup_fs_cold
+{
+ my ($self, $backup_name) = @_;
+ $self->_backup_fs($backup_name, 0);
+ return;
+}
+
+
+# Common sub of backup_fs_hot and backup_fs_cold
+sub _backup_fs
+{
+ my ($self, $backup_name, $hot) = @_;
+ my $backup_path = $self->backup_dir . '/' . $backup_name;
+ my $port = $self->port;
+ my $name = $self->name;
+
+ print "# Taking filesystem backup $backup_name from node \"$name\"\n";
+
+ if ($hot)
+ {
+ my $stdout = $self->safe_psql('postgres',
+ "SELECT * FROM pg_start_backup('$backup_name');");
+ print "# pg_start_backup: $stdout\n";
+ }
+
+ RecursiveCopy::copypath(
+ $self->data_dir,
+ $backup_path,
+ filterfn => sub {
+ my $src = shift;
+ return ($src ne 'log' and $src ne 'postmaster.pid');
+ });
+
+ if ($hot)
+ {
+
+ # We ignore pg_stop_backup's return value. We also assume archiving
+ # is enabled; otherwise the caller will have to copy the remaining
+ # segments.
+ my $stdout =
+ $self->safe_psql('postgres', 'SELECT * FROM pg_stop_backup();');
+ print "# pg_stop_backup: $stdout\n";
+ }
+
+ print "# Backup finished\n";
+ return;
+}
+
+
+
+=pod
+
+=item $node->init_from_backup(root_node, backup_name)
+
+Initialize a node from a backup, which may come from this node or a different
+node. root_node must be a PostgresNode reference, backup_name the string name
+of a backup previously created on that node with $node->backup.
+
+Does not start the node after initializing it.
+
+By default, the backup is assumed to be plain format. To restore from
+a tar-format backup, pass the name of the tar program to use in the
+keyword parameter tar_program. Note that tablespace tar files aren't
+handled here.
+
+Streaming replication can be enabled on this node by passing the keyword
+parameter has_streaming => 1. This is disabled by default.
+
+Restoring WAL segments from archives using restore_command can be enabled
+by passing the keyword parameter has_restoring => 1. This is disabled by
+default.
+
+If has_restoring is used, standby mode is used by default. To use
+recovery mode instead, pass the keyword parameter standby => 0.
+
+The backup is copied, leaving the original unmodified. pg_hba.conf is
+unconditionally set to enable replication connections.
+
+=cut
+
+sub init_from_backup
+{
+ my ($self, $root_node, $backup_name, %params) = @_;
+ my $backup_path = $root_node->backup_dir . '/' . $backup_name;
+ my $host = $self->host;
+ my $port = $self->port;
+ my $node_name = $self->name;
+ my $root_name = $root_node->name;
+
+ $params{has_streaming} = 0 unless defined $params{has_streaming};
+ $params{has_restoring} = 0 unless defined $params{has_restoring};
+ $params{standby} = 1 unless defined $params{standby};
+
+ print
+ "# Initializing node \"$node_name\" from backup \"$backup_name\" of node \"$root_name\"\n";
+ croak "Backup \"$backup_name\" does not exist at $backup_path"
+ unless -d $backup_path;
+
+ mkdir $self->backup_dir;
+ mkdir $self->archive_dir;
+
+ my $data_path = $self->data_dir;
+ if (defined $params{tar_program})
+ {
+ mkdir($data_path);
+ TestLib::system_or_bail($params{tar_program}, 'xf',
+ $backup_path . '/base.tar',
+ '-C', $data_path);
+ TestLib::system_or_bail(
+ $params{tar_program}, 'xf',
+ $backup_path . '/pg_wal.tar', '-C',
+ $data_path . '/pg_wal');
+ }
+ else
+ {
+ rmdir($data_path);
+ RecursiveCopy::copypath($backup_path, $data_path);
+ }
+ chmod(0700, $data_path);
+
+ # Base configuration for this node
+ $self->append_conf(
+ 'postgresql.conf',
+ qq(
+port = $port
+));
+ if ($use_tcp)
+ {
+ $self->append_conf('postgresql.conf', "listen_addresses = '$host'");
+ }
+ else
+ {
+ $self->append_conf('postgresql.conf',
+ "unix_socket_directories = '$host'");
+ }
+ $self->enable_streaming($root_node) if $params{has_streaming};
+ $self->enable_restoring($root_node, $params{standby})
+ if $params{has_restoring};
+ return;
+}
+
+=pod
+
+=item $node->rotate_logfile()
+
+Switch to a new PostgreSQL log file. This does not alter any running
+PostgreSQL process. Subsequent method calls, including pg_ctl invocations,
+will use the new name. Return the new name.
+
+=cut
+
+sub rotate_logfile
+{
+ my ($self) = @_;
+ $self->{_logfile} = sprintf('%s_%d.log',
+ $self->{_logfile_base},
+ ++$self->{_logfile_generation});
+ return $self->{_logfile};
+}
+
+=pod
+
+=item $node->start(%params) => success_or_failure
+
+Wrapper for pg_ctl start
+
+Start the node and wait until it is ready to accept connections.
+
+=over
+
+=item fail_ok => 1
+
+By default, failure terminates the entire F<prove> invocation. If given,
+instead return a true or false value to indicate success or failure.
+
+=back
+
+=cut
+
+sub start
+{
+ my ($self, %params) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $name = $self->name;
+ my $ret;
+
+ BAIL_OUT("node \"$name\" is already running") if defined $self->{_pid};
+
+ print("### Starting node \"$name\"\n");
+
+ # Temporarily unset PGAPPNAME so that the server doesn't
+ # inherit it. Otherwise this could affect libpqwalreceiver
+ # connections in confusing ways.
+ local %ENV = $self->_get_env(PGAPPNAME => undef);
+
+ # Note: We set the cluster_name here, not in postgresql.conf (in
+ # sub init) so that it does not get copied to standbys.
+ $ret = TestLib::system_log('pg_ctl', '-D', $self->data_dir, '-l',
+ $self->logfile, '-o', "--cluster-name=$name", 'start');
+
+ if ($ret != 0)
+ {
+ print "# pg_ctl start failed; logfile:\n";
+ print TestLib::slurp_file($self->logfile);
+
+ # pg_ctl could have timed out, so check to see if there's a pid file;
+ # otherwise our END block will fail to shut down the new postmaster.
+ $self->_update_pid(-1);
+
+ BAIL_OUT("pg_ctl start failed") unless $params{fail_ok};
+ return 0;
+ }
+
+ $self->_update_pid(1);
+ return 1;
+}
+
+=pod
+
+=item $node->kill9()
+
+Send SIGKILL (signal 9) to the postmaster.
+
+Note: if the node is already known stopped, this does nothing.
+However, if we think it's running and it's not, it's important for
+this to fail. Otherwise, tests might fail to detect server crashes.
+
+=cut
+
+sub kill9
+{
+ my ($self) = @_;
+ my $name = $self->name;
+ return unless defined $self->{_pid};
+
+ local %ENV = $self->_get_env();
+
+ print "### Killing node \"$name\" using signal 9\n";
+ kill(9, $self->{_pid});
+ $self->{_pid} = undef;
+ return;
+}
+
+=pod
+
+=item $node->stop(mode)
+
+Stop the node using pg_ctl -m $mode and wait for it to stop.
+
+Note: if the node is already known stopped, this does nothing.
+However, if we think it's running and it's not, it's important for
+this to fail. Otherwise, tests might fail to detect server crashes.
+
+With optional extra param fail_ok => 1, returns 0 for failure
+instead of bailing out.
+
+=cut
+
+sub stop
+{
+ my ($self, $mode, %params) = @_;
+ my $pgdata = $self->data_dir;
+ my $name = $self->name;
+ my $ret;
+
+ local %ENV = $self->_get_env();
+
+ $mode = 'fast' unless defined $mode;
+ return 1 unless defined $self->{_pid};
+
+ print "### Stopping node \"$name\" using mode $mode\n";
+ $ret = TestLib::system_log('pg_ctl', '-D', $pgdata,
+ '-m', $mode, 'stop');
+
+ if ($ret != 0)
+ {
+ print "# pg_ctl stop failed: $ret\n";
+
+ # Check to see if we still have a postmaster or not.
+ $self->_update_pid(-1);
+
+ BAIL_OUT("pg_ctl stop failed") unless $params{fail_ok};
+ return 0;
+ }
+
+ $self->_update_pid(0);
+ return 1;
+}
+
+=pod
+
+=item $node->reload()
+
+Reload configuration parameters on the node.
+
+=cut
+
+sub reload
+{
+ my ($self) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $name = $self->name;
+
+ local %ENV = $self->_get_env();
+
+ print "### Reloading node \"$name\"\n";
+ TestLib::system_or_bail('pg_ctl', '-D', $pgdata, 'reload');
+ return;
+}
+
+=pod
+
+=item $node->restart()
+
+Wrapper for pg_ctl restart
+
+=cut
+
+sub restart
+{
+ my ($self) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $logfile = $self->logfile;
+ my $name = $self->name;
+
+ local %ENV = $self->_get_env(PGAPPNAME => undef);
+
+ print "### Restarting node \"$name\"\n";
+
+ TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
+ 'restart');
+
+ $self->_update_pid(1);
+ return;
+}
+
+=pod
+
+=item $node->promote()
+
+Wrapper for pg_ctl promote
+
+=cut
+
+sub promote
+{
+ my ($self) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $logfile = $self->logfile;
+ my $name = $self->name;
+
+ local %ENV = $self->_get_env();
+
+ print "### Promoting node \"$name\"\n";
+ TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
+ 'promote');
+ return;
+}
+
+=pod
+
+=item $node->logrotate()
+
+Wrapper for pg_ctl logrotate
+
+=cut
+
+sub logrotate
+{
+ my ($self) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $logfile = $self->logfile;
+ my $name = $self->name;
+
+ local %ENV = $self->_get_env();
+
+ print "### Rotating log in node \"$name\"\n";
+ TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
+ 'logrotate');
+ return;
+}
+
+# Internal routine to enable streaming replication on a standby node.
+sub enable_streaming
+{
+ my ($self, $root_node) = @_;
+ my $root_connstr = $root_node->connstr;
+ my $name = $self->name;
+
+ print "### Enabling streaming replication for node \"$name\"\n";
+ $self->append_conf(
+ 'postgresql.conf', qq(
+primary_conninfo='$root_connstr'
+));
+ $self->set_standby_mode();
+ return;
+}
+
+# Internal routine to enable archive recovery command on a standby node
+sub enable_restoring
+{
+ my ($self, $root_node, $standby) = @_;
+ my $path = $root_node->archive_dir;
+ my $name = $self->name;
+
+ print "### Enabling WAL restore for node \"$name\"\n";
+
+ # On Windows, the path specified in the restore command needs to use
+ # double back-slashes to work properly and to be able to detect properly
+ # the file targeted by the copy command, so the directory value used
+ # in this routine, using only one back-slash, need to be properly changed
+ # first. Paths also need to be double-quoted to prevent failures where
+ # the path contains spaces.
+ $path =~ s{\\}{\\\\}g if ($TestLib::windows_os);
+ my $copy_command =
+ $TestLib::windows_os
+ ? qq{copy "$path\\\\%f" "%p"}
+ : qq{cp "$path/%f" "%p"};
+
+ $self->append_conf(
+ 'postgresql.conf', qq(
+restore_command = '$copy_command'
+));
+ if ($standby)
+ {
+ $self->set_standby_mode();
+ }
+ else
+ {
+ $self->set_recovery_mode();
+ }
+ return;
+}
+
+=pod
+
+=item $node->set_recovery_mode()
+
+Place recovery.signal file.
+
+=cut
+
+sub set_recovery_mode
+{
+ my ($self) = @_;
+
+ $self->append_conf('recovery.signal', '');
+ return;
+}
+
+=pod
+
+=item $node->set_standby_mode()
+
+Place standby.signal file.
+
+=cut
+
+sub set_standby_mode
+{
+ my ($self) = @_;
+
+ $self->append_conf('standby.signal', '');
+ return;
+}
+
+# Internal routine to enable archiving
+sub enable_archiving
+{
+ my ($self) = @_;
+ my $path = $self->archive_dir;
+ my $name = $self->name;
+
+ print "### Enabling WAL archiving for node \"$name\"\n";
+
+ # On Windows, the path specified in the restore command needs to use
+ # double back-slashes to work properly and to be able to detect properly
+ # the file targeted by the copy command, so the directory value used
+ # in this routine, using only one back-slash, need to be properly changed
+ # first. Paths also need to be double-quoted to prevent failures where
+ # the path contains spaces.
+ $path =~ s{\\}{\\\\}g if ($TestLib::windows_os);
+ my $copy_command =
+ $TestLib::windows_os
+ ? qq{copy "%p" "$path\\\\%f"}
+ : qq{cp "%p" "$path/%f"};
+
+ # Enable archive_mode and archive_command on node
+ $self->append_conf(
+ 'postgresql.conf', qq(
+archive_mode = on
+archive_command = '$copy_command'
+));
+ return;
+}
+
+# Internal method to update $self->{_pid}
+# $is_running = 1: pid file should be there
+# $is_running = 0: pid file should NOT be there
+# $is_running = -1: we aren't sure
+sub _update_pid
+{
+ my ($self, $is_running) = @_;
+ my $name = $self->name;
+
+ # If we can open the PID file, read its first line and that's the PID we
+ # want.
+ if (open my $pidfile, '<', $self->data_dir . "/postmaster.pid")
+ {
+ chomp($self->{_pid} = <$pidfile>);
+ close $pidfile;
+
+ # If we aren't sure what to expect, validate the PID using kill().
+ # This protects against stale PID files left by crashed postmasters.
+ if ($is_running == -1 && kill(0, $self->{_pid}) == 0)
+ {
+ print
+ "# Stale postmaster.pid file for node \"$name\": PID $self->{_pid} no longer exists\n";
+ $self->{_pid} = undef;
+ return;
+ }
+
+ print "# Postmaster PID for node \"$name\" is $self->{_pid}\n";
+
+ # If we found a pidfile when there shouldn't be one, complain.
+ BAIL_OUT("postmaster.pid unexpectedly present") if $is_running == 0;
+ return;
+ }
+
+ $self->{_pid} = undef;
+ print "# No postmaster PID for node \"$name\"\n";
+
+ # Complain if we expected to find a pidfile.
+ BAIL_OUT("postmaster.pid unexpectedly not present") if $is_running == 1;
+ return;
+}
+
+=pod
+
+=item PostgresNode->get_new_node(node_name, %params)
+
+Build a new object of class C<PostgresNode> (or of a subclass, if you have
+one), assigning a free port number. Remembers the node, to prevent its port
+number from being reused for another node, and to ensure that it gets
+shut down when the test script exits.
+
+You should generally use this instead of C<PostgresNode::new(...)>.
+
+=over
+
+=item port => [1,65535]
+
+By default, this function assigns a port number to each node. Specify this to
+force a particular port number. The caller is responsible for evaluating
+potential conflicts and privilege requirements.
+
+=item own_host => 1
+
+By default, all nodes use the same PGHOST value. If specified, generate a
+PGHOST specific to this node. This allows multiple nodes to use the same
+port.
+
+=item install_path => '/path/to/postgres/installation'
+
+Using this parameter is it possible to have nodes pointing to different
+installations, for testing different versions together or the same version
+with different build parameters. The provided path must be the parent of the
+installation's 'bin' and 'lib' directories. In the common case where this is
+not provided, Postgres binaries will be found in the caller's PATH.
+
+=back
+
+For backwards compatibility, it is also exported as a standalone function,
+which can only create objects of class C<PostgresNode>.
+
+=cut
+
+sub get_new_node
+{
+ my $class = 'PostgresNode';
+ $class = shift if scalar(@_) % 2 != 1;
+ my ($name, %params) = @_;
+
+ # Select a port.
+ my $port;
+ if (defined $params{port})
+ {
+ $port = $params{port};
+ }
+ else
+ {
+ # When selecting a port, we look for an unassigned TCP port number,
+ # even if we intend to use only Unix-domain sockets. This is clearly
+ # necessary on $use_tcp (Windows) configurations, and it seems like a
+ # good idea on Unixen as well.
+ $port = get_free_port();
+ }
+
+ # Select a host.
+ my $host = $test_pghost;
+ if ($params{own_host})
+ {
+ if ($use_tcp)
+ {
+ $last_host_assigned++;
+ $last_host_assigned > 254 and BAIL_OUT("too many own_host nodes");
+ $host = '127.0.0.' . $last_host_assigned;
+ }
+ else
+ {
+ $host = "$test_pghost/$name"; # Assume $name =~ /^[-_a-zA-Z0-9]+$/
+ mkdir $host;
+ }
+ }
+
+ # Lock port number found by creating a new node
+ my $node = $class->new($name, $host, $port);
+
+ if ($params{install_path})
+ {
+ $node->{_install_path} = $params{install_path};
+ }
+
+ # Add node to list of nodes
+ push(@all_nodes, $node);
+
+ $node->_set_pg_version;
+
+ my $v = $node->{_pg_version};
+
+ carp("PostgresNode isn't fully compatible with version " . $v)
+ if $v < 12;
+
+ return $node;
+}
+
+# Private routine to run the pg_config binary found in our environment (or in
+# our install_path, if we have one), and set the version from it
+#
+sub _set_pg_version
+{
+ my ($self) = @_;
+ my $inst = $self->{_install_path};
+ my $pg_config = "pg_config";
+
+ if (defined $inst)
+ {
+ # If the _install_path is invalid, our PATH variables might find an
+ # unrelated pg_config executable elsewhere. Sanity check the
+ # directory.
+ BAIL_OUT("directory not found: $inst")
+ unless -d $inst;
+
+ # If the directory exists but is not the root of a postgresql
+ # installation, or if the user configured using
+ # --bindir=$SOMEWHERE_ELSE, we're not going to find pg_config, so
+ # complain about that, too.
+ $pg_config = "$inst/bin/pg_config";
+ BAIL_OUT("pg_config not found: $pg_config")
+ unless -e $pg_config
+ or ($TestLib::windows_os and -e "$pg_config.exe");
+ BAIL_OUT("pg_config not executable: $pg_config")
+ unless $TestLib::windows_os or -x $pg_config;
+
+ # Leave $pg_config install_path qualified, to be sure we get the right
+ # version information, below, or die trying
+ }
+
+ local %ENV = $self->_get_env();
+
+ # We only want the version field
+ my $version_line = qx{$pg_config --version};
+ BAIL_OUT("$pg_config failed: $!") if $?;
+
+ $self->{_pg_version} = PostgresVersion->new($version_line);
+
+ BAIL_OUT("could not parse pg_config --version output: $version_line")
+ unless defined $self->{_pg_version};
+}
+
+# Private routine to return a copy of the environment with the PATH and
+# (DY)LD_LIBRARY_PATH correctly set when there is an install path set for
+# the node.
+#
+# Routines that call Postgres binaries need to call this routine like this:
+#
+# local %ENV = $self->_get_env{[%extra_settings]);
+#
+# A copy of the environment is taken and node's host and port settings are
+# added as PGHOST and PGPORT, Then the extra settings (if any) are applied.
+# Any setting in %extra_settings with a value that is undefined is deleted
+# the remainder are# set. Then the PATH and (DY)LD_LIBRARY_PATH are adjusted
+# if the node's install path is set, and the copy environment is returned.
+#
+# The install path set in get_new_node needs to be a directory containing
+# bin and lib subdirectories as in a standard PostgreSQL installation, so this
+# can't be used with installations where the bin and lib directories don't have
+# a common parent directory.
+sub _get_env
+{
+ my $self = shift;
+ my %inst_env = (%ENV, PGHOST => $self->{_host}, PGPORT => $self->{_port});
+ # the remaining arguments are modifications to make to the environment
+ my %mods = (@_);
+ while (my ($k, $v) = each %mods)
+ {
+ if (defined $v)
+ {
+ $inst_env{$k} = "$v";
+ }
+ else
+ {
+ delete $inst_env{$k};
+ }
+ }
+ # now fix up the new environment for the install path
+ my $inst = $self->{_install_path};
+ if ($inst)
+ {
+ if ($TestLib::windows_os)
+ {
+ # Windows picks up DLLs from the PATH rather than *LD_LIBRARY_PATH
+ # choose the right path separator
+ if ($Config{osname} eq 'MSWin32')
+ {
+ $inst_env{PATH} = "$inst/bin;$inst/lib;$ENV{PATH}";
+ }
+ else
+ {
+ $inst_env{PATH} = "$inst/bin:$inst/lib:$ENV{PATH}";
+ }
+ }
+ else
+ {
+ my $dylib_name =
+ $Config{osname} eq 'darwin'
+ ? "DYLD_LIBRARY_PATH"
+ : "LD_LIBRARY_PATH";
+ $inst_env{PATH} = "$inst/bin:$ENV{PATH}";
+ if (exists $ENV{$dylib_name})
+ {
+ $inst_env{$dylib_name} = "$inst/lib:$ENV{$dylib_name}";
+ }
+ else
+ {
+ $inst_env{$dylib_name} = "$inst/lib";
+ }
+ }
+ }
+ return (%inst_env);
+}
+
+# Private routine to get an installation path qualified command.
+#
+# IPC::Run maintains a cache, %cmd_cache, mapping commands to paths. Tests
+# which use nodes spanning more than one postgres installation path need to
+# avoid confusing which installation's binaries get run. Setting $ENV{PATH} is
+# insufficient, as IPC::Run does not check to see if the path has changed since
+# caching a command.
+sub installed_command
+{
+ my ($self, $cmd) = @_;
+
+ # Nodes using alternate installation locations use their installation's
+ # bin/ directory explicitly
+ return join('/', $self->{_install_path}, 'bin', $cmd)
+ if defined $self->{_install_path};
+
+ # Nodes implicitly using the default installation location rely on IPC::Run
+ # to find the right binary, which should not cause %cmd_cache confusion,
+ # because no nodes with other installation paths do it that way.
+ return $cmd;
+}
+
+=pod
+
+=item get_free_port()
+
+Locate an unprivileged (high) TCP port that's not currently bound to
+anything. This is used by get_new_node, and is also exported for use
+by test cases that need to start other, non-Postgres servers.
+
+Ports assigned to existing PostgresNode objects are automatically
+excluded, even if those servers are not currently running.
+
+XXX A port available now may become unavailable by the time we start
+the desired service.
+
+=cut
+
+sub get_free_port
+{
+ my $found = 0;
+ my $port = $last_port_assigned;
+
+ while ($found == 0)
+ {
+
+ # advance $port, wrapping correctly around range end
+ $port = 49152 if ++$port >= 65536;
+ print "# Checking port $port\n";
+
+ # Check first that candidate port number is not included in
+ # the list of already-registered nodes.
+ $found = 1;
+ foreach my $node (@all_nodes)
+ {
+ $found = 0 if ($node->port == $port);
+ }
+
+ # Check to see if anything else is listening on this TCP port.
+ # Seek a port available for all possible listen_addresses values,
+ # so callers can harness this port for the widest range of purposes.
+ # The 0.0.0.0 test achieves that for MSYS, which automatically sets
+ # SO_EXCLUSIVEADDRUSE. Testing 0.0.0.0 is insufficient for Windows
+ # native Perl (https://stackoverflow.com/a/14388707), so we also
+ # have to test individual addresses. Doing that for 127.0.0/24
+ # addresses other than 127.0.0.1 might fail with EADDRNOTAVAIL on
+ # non-Linux, non-Windows kernels.
+ #
+ # Thus, 0.0.0.0 and individual 127.0.0/24 addresses are tested
+ # only on Windows and only when TCP usage is requested.
+ if ($found == 1)
+ {
+ foreach my $addr (qw(127.0.0.1),
+ ($use_tcp && $TestLib::windows_os)
+ ? qw(127.0.0.2 127.0.0.3 0.0.0.0)
+ : ())
+ {
+ if (!can_bind($addr, $port))
+ {
+ $found = 0;
+ last;
+ }
+ }
+ }
+ }
+
+ print "# Found port $port\n";
+
+ # Update port for next time
+ $last_port_assigned = $port;
+
+ return $port;
+}
+
+# Internal routine to check whether a host:port is available to bind
+sub can_bind
+{
+ my ($host, $port) = @_;
+ my $iaddr = inet_aton($host);
+ my $paddr = sockaddr_in($port, $iaddr);
+ my $proto = getprotobyname("tcp");
+
+ socket(SOCK, PF_INET, SOCK_STREAM, $proto)
+ or die "socket failed: $!";
+
+ # As in postmaster, don't use SO_REUSEADDR on Windows
+ setsockopt(SOCK, SOL_SOCKET, SO_REUSEADDR, pack("l", 1))
+ unless $TestLib::windows_os;
+ my $ret = bind(SOCK, $paddr) && listen(SOCK, SOMAXCONN);
+ close(SOCK);
+ return $ret;
+}
+
+# Automatically shut down any still-running nodes (in the same order the nodes
+# were created in) when the test script exits.
+END
+{
+
+ # take care not to change the script's exit value
+ my $exit_code = $?;
+
+ foreach my $node (@all_nodes)
+ {
+ $node->teardown_node;
+
+ # skip clean if we are requested to retain the basedir
+ next if defined $ENV{'PG_TEST_NOCLEAN'};
+
+ # clean basedir on clean test invocation
+ $node->clean_node if $exit_code == 0 && TestLib::all_tests_passing();
+ }
+
+ $? = $exit_code;
+}
+
+=pod
+
+=item $node->teardown_node()
+
+Do an immediate stop of the node
+
+=cut
+
+sub teardown_node
+{
+ my $self = shift;
+
+ $self->stop('immediate');
+ return;
+}
+
+=pod
+
+=item $node->clean_node()
+
+Remove the base directory of the node if the node has been stopped.
+
+=cut
+
+sub clean_node
+{
+ my $self = shift;
+
+ rmtree $self->{_basedir} unless defined $self->{_pid};
+ return;
+}
+
+=pod
+
+=item $node->safe_psql($dbname, $sql) => stdout
+
+Invoke B<psql> to run B<sql> on B<dbname> and return its stdout on success.
+Die if the SQL produces an error. Runs with B<ON_ERROR_STOP> set.
+
+Takes optional extra params like timeout and timed_out parameters with the same
+options as psql.
+
+=cut
+
+sub safe_psql
+{
+ my ($self, $dbname, $sql, %params) = @_;
+
+ local %ENV = $self->_get_env();
+
+ my ($stdout, $stderr);
+
+ my $ret = $self->psql(
+ $dbname, $sql,
+ %params,
+ stdout => \$stdout,
+ stderr => \$stderr,
+ on_error_die => 1,
+ on_error_stop => 1);
+
+ # psql can emit stderr from NOTICEs etc
+ if ($stderr ne "")
+ {
+ print "#### Begin standard error\n";
+ print $stderr;
+ print "\n#### End standard error\n";
+ }
+
+ return $stdout;
+}
+
+=pod
+
+=item $node->psql($dbname, $sql, %params) => psql_retval
+
+Invoke B<psql> to execute B<$sql> on B<$dbname> and return the return value
+from B<psql>, which is run with on_error_stop by default so that it will
+stop running sql and return 3 if the passed SQL results in an error.
+
+As a convenience, if B<psql> is called in array context it returns an
+array containing ($retval, $stdout, $stderr).
+
+psql is invoked in tuples-only unaligned mode with reading of B<.psqlrc>
+disabled. That may be overridden by passing extra psql parameters.
+
+stdout and stderr are transformed to UNIX line endings if on Windows. Any
+trailing newline is removed.
+
+Dies on failure to invoke psql but not if psql exits with a nonzero
+return code (unless on_error_die specified).
+
+If psql exits because of a signal, an exception is raised.
+
+=over
+
+=item stdout => \$stdout
+
+B<stdout>, if given, must be a scalar reference to which standard output is
+written. If not given, standard output is not redirected and will be printed
+unless B<psql> is called in array context, in which case it's captured and
+returned.
+
+=item stderr => \$stderr
+
+Same as B<stdout> but gets standard error. If the same scalar is passed for
+both B<stdout> and B<stderr> the results may be interleaved unpredictably.
+
+=item on_error_stop => 1
+
+By default, the B<psql> method invokes the B<psql> program with ON_ERROR_STOP=1
+set, so SQL execution is stopped at the first error and exit code 3 is
+returned. Set B<on_error_stop> to 0 to ignore errors instead.
+
+=item on_error_die => 0
+
+By default, this method returns psql's result code. Pass on_error_die to
+instead die with an informative message.
+
+=item timeout => 'interval'
+
+Set a timeout for the psql call as an interval accepted by B<IPC::Run::timer>
+(integer seconds is fine). This method raises an exception on timeout, unless
+the B<timed_out> parameter is also given.
+
+=item timed_out => \$timed_out
+
+If B<timeout> is set and this parameter is given, the scalar it references
+is set to true if the psql call times out.
+
+=item connstr => B<value>
+
+If set, use this as the connection string for the connection to the
+backend.
+
+=item replication => B<value>
+
+If set, add B<replication=value> to the conninfo string.
+Passing the literal value C<database> results in a logical replication
+connection.
+
+=item extra_params => ['--single-transaction']
+
+If given, it must be an array reference containing additional parameters to B<psql>.
+
+=back
+
+e.g.
+
+ my ($stdout, $stderr, $timed_out);
+ my $cmdret = $node->psql('postgres', 'SELECT pg_sleep(600)',
+ stdout => \$stdout, stderr => \$stderr,
+ timeout => $TestLib::timeout_default,
+ timed_out => \$timed_out,
+ extra_params => ['--single-transaction'])
+
+will set $cmdret to undef and $timed_out to a true value.
+
+ $node->psql('postgres', $sql, on_error_die => 1);
+
+dies with an informative message if $sql fails.
+
+=cut
+
+sub psql
+{
+ my ($self, $dbname, $sql, %params) = @_;
+
+ local %ENV = $self->_get_env();
+
+ my $stdout = $params{stdout};
+ my $stderr = $params{stderr};
+ my $replication = $params{replication};
+ my $timeout = undef;
+ my $timeout_exception = 'psql timed out';
+
+ # Build the connection string.
+ my $psql_connstr;
+ if (defined $params{connstr})
+ {
+ $psql_connstr = $params{connstr};
+ }
+ else
+ {
+ $psql_connstr = $self->connstr($dbname);
+ }
+ $psql_connstr .= defined $replication ? " replication=$replication" : "";
+
+ my @psql_params = (
+ $self->installed_command('psql'),
+ '-XAtq', '-d', $psql_connstr, '-f', '-');
+
+ # If the caller wants an array and hasn't passed stdout/stderr
+ # references, allocate temporary ones to capture them so we
+ # can return them. Otherwise we won't redirect them at all.
+ if (wantarray)
+ {
+ if (!defined($stdout))
+ {
+ my $temp_stdout = "";
+ $stdout = \$temp_stdout;
+ }
+ if (!defined($stderr))
+ {
+ my $temp_stderr = "";
+ $stderr = \$temp_stderr;
+ }
+ }
+
+ $params{on_error_stop} = 1 unless defined $params{on_error_stop};
+ $params{on_error_die} = 0 unless defined $params{on_error_die};
+
+ push @psql_params, '-v', 'ON_ERROR_STOP=1' if $params{on_error_stop};
+ push @psql_params, @{ $params{extra_params} }
+ if defined $params{extra_params};
+
+ $timeout =
+ IPC::Run::timeout($params{timeout}, exception => $timeout_exception)
+ if (defined($params{timeout}));
+
+ ${ $params{timed_out} } = 0 if defined $params{timed_out};
+
+ # IPC::Run would otherwise append to existing contents:
+ $$stdout = "" if ref($stdout);
+ $$stderr = "" if ref($stderr);
+
+ my $ret;
+
+ # Run psql and capture any possible exceptions. If the exception is
+ # because of a timeout and the caller requested to handle that, just return
+ # and set the flag. Otherwise, and for any other exception, rethrow.
+ #
+ # For background, see
+ # https://metacpan.org/pod/release/ETHER/Try-Tiny-0.24/lib/Try/Tiny.pm
+ do
+ {
+ local $@;
+ eval {
+ my @ipcrun_opts = (\@psql_params, '<', \$sql);
+ push @ipcrun_opts, '>', $stdout if defined $stdout;
+ push @ipcrun_opts, '2>', $stderr if defined $stderr;
+ push @ipcrun_opts, $timeout if defined $timeout;
+
+ IPC::Run::run @ipcrun_opts;
+ $ret = $?;
+ };
+ my $exc_save = $@;
+ if ($exc_save)
+ {
+
+ # IPC::Run::run threw an exception. re-throw unless it's a
+ # timeout, which we'll handle by testing is_expired
+ die $exc_save
+ if (blessed($exc_save)
+ || $exc_save !~ /^\Q$timeout_exception\E/);
+
+ $ret = undef;
+
+ die "Got timeout exception '$exc_save' but timer not expired?!"
+ unless $timeout->is_expired;
+
+ if (defined($params{timed_out}))
+ {
+ ${ $params{timed_out} } = 1;
+ }
+ else
+ {
+ die "psql timed out: stderr: '$$stderr'\n"
+ . "while running '@psql_params'";
+ }
+ }
+ };
+
+ if (defined $$stdout)
+ {
+ chomp $$stdout;
+ }
+
+ if (defined $$stderr)
+ {
+ chomp $$stderr;
+ }
+
+ # See http://perldoc.perl.org/perlvar.html#%24CHILD_ERROR
+ # We don't use IPC::Run::Simple to limit dependencies.
+ #
+ # We always die on signal.
+ my $core = $ret & 128 ? " (core dumped)" : "";
+ die "psql exited with signal "
+ . ($ret & 127)
+ . "$core: '$$stderr' while running '@psql_params'"
+ if $ret & 127;
+ $ret = $ret >> 8;
+
+ if ($ret && $params{on_error_die})
+ {
+ die "psql error: stderr: '$$stderr'\nwhile running '@psql_params'"
+ if $ret == 1;
+ die "connection error: '$$stderr'\nwhile running '@psql_params'"
+ if $ret == 2;
+ die
+ "error running SQL: '$$stderr'\nwhile running '@psql_params' with sql '$sql'"
+ if $ret == 3;
+ die "psql returns $ret: '$$stderr'\nwhile running '@psql_params'";
+ }
+
+ if (wantarray)
+ {
+ return ($ret, $$stdout, $$stderr);
+ }
+ else
+ {
+ return $ret;
+ }
+}
+
+=pod
+
+=item $node->background_psql($dbname, \$stdin, \$stdout, $timer, %params) => harness
+
+Invoke B<psql> on B<$dbname> and return an IPC::Run harness object, which the
+caller may use to send input to B<psql>. The process's stdin is sourced from
+the $stdin scalar reference, and its stdout and stderr go to the $stdout
+scalar reference. This allows the caller to act on other parts of the system
+while idling this backend.
+
+The specified timer object is attached to the harness, as well. It's caller's
+responsibility to set the timeout length (usually
+$TestLib::timeout_default), and to restart the timer after
+each command if the timeout is per-command.
+
+psql is invoked in tuples-only unaligned mode with reading of B<.psqlrc>
+disabled. That may be overridden by passing extra psql parameters.
+
+Dies on failure to invoke psql, or if psql fails to connect. Errors occurring
+later are the caller's problem. psql runs with on_error_stop by default so
+that it will stop running sql and return 3 if passed SQL results in an error.
+
+Be sure to "finish" the harness when done with it.
+
+=over
+
+=item on_error_stop => 1
+
+By default, the B<psql> method invokes the B<psql> program with ON_ERROR_STOP=1
+set, so SQL execution is stopped at the first error and exit code 3 is
+returned. Set B<on_error_stop> to 0 to ignore errors instead.
+
+=item replication => B<value>
+
+If set, add B<replication=value> to the conninfo string.
+Passing the literal value C<database> results in a logical replication
+connection.
+
+=item extra_params => ['--single-transaction']
+
+If given, it must be an array reference containing additional parameters to B<psql>.
+
+=back
+
+=cut
+
+sub background_psql
+{
+ my ($self, $dbname, $stdin, $stdout, $timer, %params) = @_;
+
+ local %ENV = $self->_get_env();
+
+ my $replication = $params{replication};
+
+ my @psql_params = (
+ $self->installed_command('psql'),
+ '-XAtq',
+ '-d',
+ $self->connstr($dbname)
+ . (defined $replication ? " replication=$replication" : ""),
+ '-f',
+ '-');
+
+ $params{on_error_stop} = 1 unless defined $params{on_error_stop};
+
+ push @psql_params, '-v', 'ON_ERROR_STOP=1' if $params{on_error_stop};
+ push @psql_params, @{ $params{extra_params} }
+ if defined $params{extra_params};
+
+ # Ensure there is no data waiting to be sent:
+ $$stdin = "" if ref($stdin);
+ # IPC::Run would otherwise append to existing contents:
+ $$stdout = "" if ref($stdout);
+
+ my $harness = IPC::Run::start \@psql_params,
+ '<', $stdin, '>', $stdout, $timer;
+
+ # Request some output, and pump until we see it. This means that psql
+ # connection failures are caught here, relieving callers of the need to
+ # handle those. (Right now, we have no particularly good handling for
+ # errors anyway, but that might be added later.)
+ my $banner = "background_psql: ready";
+ $$stdin = "\\echo $banner\n";
+ pump $harness until $$stdout =~ /$banner/ || $timer->is_expired;
+
+ die "psql startup timed out" if $timer->is_expired;
+
+ return $harness;
+}
+
+=pod
+
+=item $node->interactive_psql($dbname, \$stdin, \$stdout, $timer, %params) => harness
+
+Invoke B<psql> on B<$dbname> and return an IPC::Run harness object,
+which the caller may use to send interactive input to B<psql>.
+The process's stdin is sourced from the $stdin scalar reference,
+and its stdout and stderr go to the $stdout scalar reference.
+ptys are used so that psql thinks it's being called interactively.
+
+The specified timer object is attached to the harness, as well. It's caller's
+responsibility to set the timeout length (usually
+$TestLib::timeout_default), and to restart the timer after
+each command if the timeout is per-command.
+
+psql is invoked in tuples-only unaligned mode with reading of B<.psqlrc>
+disabled. That may be overridden by passing extra psql parameters.
+
+Dies on failure to invoke psql, or if psql fails to connect.
+Errors occurring later are the caller's problem.
+
+Be sure to "finish" the harness when done with it.
+
+The only extra parameter currently accepted is
+
+=over
+
+=item extra_params => ['--single-transaction']
+
+If given, it must be an array reference containing additional parameters to B<psql>.
+
+=back
+
+This requires IO::Pty in addition to IPC::Run.
+
+=cut
+
+sub interactive_psql
+{
+ my ($self, $dbname, $stdin, $stdout, $timer, %params) = @_;
+
+ local %ENV = $self->_get_env();
+
+ my @psql_params = (
+ $self->installed_command('psql'),
+ '-XAt', '-d', $self->connstr($dbname));
+
+ push @psql_params, @{ $params{extra_params} }
+ if defined $params{extra_params};
+
+ # Ensure there is no data waiting to be sent:
+ $$stdin = "" if ref($stdin);
+ # IPC::Run would otherwise append to existing contents:
+ $$stdout = "" if ref($stdout);
+
+ my $harness = IPC::Run::start \@psql_params,
+ '<pty<', $stdin, '>pty>', $stdout, $timer;
+
+ # Pump until we see psql's help banner. This ensures that callers
+ # won't write anything to the pty before it's ready, avoiding an
+ # implementation issue in IPC::Run. Also, it means that psql
+ # connection failures are caught here, relieving callers of
+ # the need to handle those. (Right now, we have no particularly
+ # good handling for errors anyway, but that might be added later.)
+ pump $harness
+ until $$stdout =~ /Type "help" for help/ || $timer->is_expired;
+
+ die "psql startup timed out" if $timer->is_expired;
+
+ return $harness;
+}
+
+# Common sub of pgbench-invoking interfaces. Makes any requested script files
+# and returns pgbench command-line options causing use of those files.
+sub _pgbench_make_files
+{
+ my ($self, $files) = @_;
+ my @file_opts;
+
+ if (defined $files)
+ {
+
+ # note: files are ordered for determinism
+ for my $fn (sort keys %$files)
+ {
+ my $filename = $self->basedir . '/' . $fn;
+ push @file_opts, '-f', $filename;
+
+ # cleanup file weight
+ $filename =~ s/\@\d+$//;
+
+ #push @filenames, $filename;
+ # filenames are expected to be unique on a test
+ if (-e $filename)
+ {
+ ok(0, "$filename must not already exist");
+ unlink $filename or die "cannot unlink $filename: $!";
+ }
+ TestLib::append_to_file($filename, $$files{$fn});
+ }
+ }
+
+ return @file_opts;
+}
+
+=pod
+
+=item $node->pgbench($opts, $stat, $out, $err, $name, $files, @args)
+
+Invoke B<pgbench>, with parameters and files.
+
+=over
+
+=item $opts
+
+Options as a string to be split on spaces.
+
+=item $stat
+
+Expected exit status.
+
+=item $out
+
+Reference to a regexp list that must match stdout.
+
+=item $err
+
+Reference to a regexp list that must match stderr.
+
+=item $name
+
+Name of test for error messages.
+
+=item $files
+
+Reference to filename/contents dictionary.
+
+=item @args
+
+Further raw options or arguments.
+
+=back
+
+=cut
+
+sub pgbench
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my ($self, $opts, $stat, $out, $err, $name, $files, @args) = @_;
+ my @cmd = (
+ 'pgbench',
+ split(/\s+/, $opts),
+ $self->_pgbench_make_files($files), @args);
+
+ $self->command_checks_all(\@cmd, $stat, $out, $err, $name);
+}
+
+=pod
+
+=item $node->connect_ok($connstr, $test_name, %params)
+
+Attempt a connection with a custom connection string. This is expected
+to succeed.
+
+=over
+
+=item sql => B<value>
+
+If this parameter is set, this query is used for the connection attempt
+instead of the default.
+
+=item expected_stdout => B<value>
+
+If this regular expression is set, matches it with the output generated.
+
+=item log_like => [ qr/required message/ ]
+
+If given, it must be an array reference containing a list of regular
+expressions that must match against the server log, using
+C<Test::More::like()>.
+
+=item log_unlike => [ qr/prohibited message/ ]
+
+If given, it must be an array reference containing a list of regular
+expressions that must NOT match against the server log. They will be
+passed to C<Test::More::unlike()>.
+
+=back
+
+=cut
+
+sub connect_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($self, $connstr, $test_name, %params) = @_;
+
+ my $sql;
+ if (defined($params{sql}))
+ {
+ $sql = $params{sql};
+ }
+ else
+ {
+ $sql = "SELECT \$\$connected with $connstr\$\$";
+ }
+
+ my (@log_like, @log_unlike);
+ if (defined($params{log_like}))
+ {
+ @log_like = @{ $params{log_like} };
+ }
+ if (defined($params{log_unlike}))
+ {
+ @log_unlike = @{ $params{log_unlike} };
+ }
+
+ my $log_location = -s $self->logfile;
+
+ # Never prompt for a password, any callers of this routine should
+ # have set up things properly, and this should not block.
+ my ($ret, $stdout, $stderr) = $self->psql(
+ 'postgres',
+ $sql,
+ extra_params => ['-w'],
+ connstr => "$connstr",
+ on_error_stop => 0);
+
+ is($ret, 0, $test_name);
+
+ if (defined($params{expected_stdout}))
+ {
+ like($stdout, $params{expected_stdout}, "$test_name: matches");
+ }
+ if (@log_like or @log_unlike)
+ {
+ my $log_contents = TestLib::slurp_file($self->logfile, $log_location);
+
+ while (my $regex = shift @log_like)
+ {
+ like($log_contents, $regex, "$test_name: log matches");
+ }
+ while (my $regex = shift @log_unlike)
+ {
+ unlike($log_contents, $regex, "$test_name: log does not match");
+ }
+ }
+}
+
+=pod
+
+=item $node->connect_fails($connstr, $test_name, %params)
+
+Attempt a connection with a custom connection string. This is expected
+to fail.
+
+=over
+
+=item expected_stderr => B<value>
+
+If this regular expression is set, matches it with the output generated.
+
+=item log_like => [ qr/required message/ ]
+
+=item log_unlike => [ qr/prohibited message/ ]
+
+See C<connect_ok(...)>, above.
+
+=back
+
+=cut
+
+sub connect_fails
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($self, $connstr, $test_name, %params) = @_;
+
+ my (@log_like, @log_unlike);
+ if (defined($params{log_like}))
+ {
+ @log_like = @{ $params{log_like} };
+ }
+ if (defined($params{log_unlike}))
+ {
+ @log_unlike = @{ $params{log_unlike} };
+ }
+
+ my $log_location = -s $self->logfile;
+
+ # Never prompt for a password, any callers of this routine should
+ # have set up things properly, and this should not block.
+ my ($ret, $stdout, $stderr) = $self->psql(
+ 'postgres',
+ undef,
+ extra_params => ['-w'],
+ connstr => "$connstr");
+
+ isnt($ret, 0, $test_name);
+
+ if (defined($params{expected_stderr}))
+ {
+ like($stderr, $params{expected_stderr}, "$test_name: matches");
+ }
+
+ if (@log_like or @log_unlike)
+ {
+ my $log_contents = TestLib::slurp_file($self->logfile, $log_location);
+
+ while (my $regex = shift @log_like)
+ {
+ like($log_contents, $regex, "$test_name: log matches");
+ }
+ while (my $regex = shift @log_unlike)
+ {
+ unlike($log_contents, $regex, "$test_name: log does not match");
+ }
+ }
+}
+
+=pod
+
+=item $node->poll_query_until($dbname, $query [, $expected ])
+
+Run B<$query> repeatedly, until it returns the B<$expected> result
+('t', or SQL boolean true, by default).
+Continues polling if B<psql> returns an error result.
+Times out after $TestLib::timeout_default seconds.
+Returns 1 if successful, 0 if timed out.
+
+=cut
+
+sub poll_query_until
+{
+ my ($self, $dbname, $query, $expected) = @_;
+
+ local %ENV = $self->_get_env();
+
+ $expected = 't' unless defined($expected); # default value
+
+ my $cmd = [
+ $self->installed_command('psql'), '-XAt',
+ '-d', $self->connstr($dbname)
+ ];
+ my ($stdout, $stderr);
+ my $max_attempts = 10 * $TestLib::timeout_default;
+ my $attempts = 0;
+
+ while ($attempts < $max_attempts)
+ {
+ my $result = IPC::Run::run $cmd, '<', \$query,
+ '>', \$stdout, '2>', \$stderr;
+
+ chomp($stdout);
+ chomp($stderr);
+
+ if ($stdout eq $expected && $stderr eq '')
+ {
+ return 1;
+ }
+
+ # Wait 0.1 second before retrying.
+ usleep(100_000);
+
+ $attempts++;
+ }
+
+ # Give up. Print the output from the last attempt, hopefully that's useful
+ # for debugging.
+ diag qq(poll_query_until timed out executing this query:
+$query
+expecting this output:
+$expected
+last actual query output:
+$stdout
+with stderr:
+$stderr);
+ return 0;
+}
+
+=pod
+
+=item $node->command_ok(...)
+
+Runs a shell command like TestLib::command_ok, but with PGHOST and PGPORT set
+so that the command will default to connecting to this PostgresNode.
+
+=cut
+
+sub command_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my $self = shift;
+
+ local %ENV = $self->_get_env();
+
+ TestLib::command_ok(@_);
+ return;
+}
+
+=pod
+
+=item $node->command_fails(...)
+
+TestLib::command_fails with our connection parameters. See command_ok(...)
+
+=cut
+
+sub command_fails
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my $self = shift;
+
+ local %ENV = $self->_get_env();
+
+ TestLib::command_fails(@_);
+ return;
+}
+
+=pod
+
+=item $node->command_like(...)
+
+TestLib::command_like with our connection parameters. See command_ok(...)
+
+=cut
+
+sub command_like
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my $self = shift;
+
+ local %ENV = $self->_get_env();
+
+ TestLib::command_like(@_);
+ return;
+}
+
+=pod
+
+=item $node->command_checks_all(...)
+
+TestLib::command_checks_all with our connection parameters. See
+command_ok(...)
+
+=cut
+
+sub command_checks_all
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my $self = shift;
+
+ local %ENV = $self->_get_env();
+
+ TestLib::command_checks_all(@_);
+ return;
+}
+
+=pod
+
+=item $node->issues_sql_like(cmd, expected_sql, test_name)
+
+Run a command on the node, then verify that $expected_sql appears in the
+server log file.
+
+=cut
+
+sub issues_sql_like
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my ($self, $cmd, $expected_sql, $test_name) = @_;
+
+ local %ENV = $self->_get_env();
+
+ my $log_location = -s $self->logfile;
+
+ my $result = TestLib::run_log($cmd);
+ ok($result, "@$cmd exit code 0");
+ my $log = TestLib::slurp_file($self->logfile, $log_location);
+ like($log, $expected_sql, "$test_name: SQL found in server log");
+ return;
+}
+
+=pod
+
+=item $node->run_log(...)
+
+Runs a shell command like TestLib::run_log, but with connection parameters set
+so that the command will default to connecting to this PostgresNode.
+
+=cut
+
+sub run_log
+{
+ my $self = shift;
+
+ local %ENV = $self->_get_env();
+
+ TestLib::run_log(@_);
+ return;
+}
+
+=pod
+
+=item $node->lsn(mode)
+
+Look up WAL locations on the server:
+
+ * insert location (primary only, error on replica)
+ * write location (primary only, error on replica)
+ * flush location (primary only, error on replica)
+ * receive location (always undef on primary)
+ * replay location (always undef on primary)
+
+mode must be specified.
+
+=cut
+
+sub lsn
+{
+ my ($self, $mode) = @_;
+ my %modes = (
+ 'insert' => 'pg_current_wal_insert_lsn()',
+ 'flush' => 'pg_current_wal_flush_lsn()',
+ 'write' => 'pg_current_wal_lsn()',
+ 'receive' => 'pg_last_wal_receive_lsn()',
+ 'replay' => 'pg_last_wal_replay_lsn()');
+
+ $mode = '<undef>' if !defined($mode);
+ croak "unknown mode for 'lsn': '$mode', valid modes are "
+ . join(', ', keys %modes)
+ if !defined($modes{$mode});
+
+ my $result = $self->safe_psql('postgres', "SELECT $modes{$mode}");
+ chomp($result);
+ if ($result eq '')
+ {
+ return;
+ }
+ else
+ {
+ return $result;
+ }
+}
+
+=pod
+
+=item $node->wait_for_catchup(standby_name, mode, target_lsn)
+
+Wait for the node with application_name standby_name (usually from node->name,
+also works for logical subscriptions)
+until its replication location in pg_stat_replication equals or passes the
+upstream's WAL insert point at the time this function is called. By default
+the replay_lsn is waited for, but 'mode' may be specified to wait for any of
+sent|write|flush|replay. The connection catching up must be in a streaming
+state.
+
+If there is no active replication connection from this peer, waits until
+poll_query_until timeout.
+
+Requires that the 'postgres' db exists and is accessible.
+
+target_lsn may be any arbitrary lsn, but is typically $primary_node->lsn('insert').
+If omitted, pg_current_wal_lsn() is used.
+
+This is not a test. It die()s on failure.
+
+=cut
+
+sub wait_for_catchup
+{
+ my ($self, $standby_name, $mode, $target_lsn) = @_;
+ $mode = defined($mode) ? $mode : 'replay';
+ my %valid_modes =
+ ('sent' => 1, 'write' => 1, 'flush' => 1, 'replay' => 1);
+ croak "unknown mode $mode for 'wait_for_catchup', valid modes are "
+ . join(', ', keys(%valid_modes))
+ unless exists($valid_modes{$mode});
+
+ # Allow passing of a PostgresNode instance as shorthand
+ if (blessed($standby_name) && $standby_name->isa("PostgresNode"))
+ {
+ $standby_name = $standby_name->name;
+ }
+ my $lsn_expr;
+ if (defined($target_lsn))
+ {
+ $lsn_expr = "'$target_lsn'";
+ }
+ else
+ {
+ $lsn_expr = 'pg_current_wal_lsn()';
+ }
+ print "Waiting for replication conn "
+ . $standby_name . "'s "
+ . $mode
+ . "_lsn to pass "
+ . $lsn_expr . " on "
+ . $self->name . "\n";
+ my $query =
+ qq[SELECT $lsn_expr <= ${mode}_lsn AND state = 'streaming' FROM pg_catalog.pg_stat_replication WHERE application_name = '$standby_name';];
+ $self->poll_query_until('postgres', $query)
+ or croak "timed out waiting for catchup";
+ print "done\n";
+ return;
+}
+
+=pod
+
+=item $node->wait_for_slot_catchup(slot_name, mode, target_lsn)
+
+Wait for the named replication slot to equal or pass the supplied target_lsn.
+The location used is the restart_lsn unless mode is given, in which case it may
+be 'restart' or 'confirmed_flush'.
+
+Requires that the 'postgres' db exists and is accessible.
+
+This is not a test. It die()s on failure.
+
+If the slot is not active, will time out after poll_query_until's timeout.
+
+target_lsn may be any arbitrary lsn, but is typically $primary_node->lsn('insert').
+
+Note that for logical slots, restart_lsn is held down by the oldest in-progress tx.
+
+=cut
+
+sub wait_for_slot_catchup
+{
+ my ($self, $slot_name, $mode, $target_lsn) = @_;
+ $mode = defined($mode) ? $mode : 'restart';
+ if (!($mode eq 'restart' || $mode eq 'confirmed_flush'))
+ {
+ croak "valid modes are restart, confirmed_flush";
+ }
+ croak 'target lsn must be specified' unless defined($target_lsn);
+ print "Waiting for replication slot "
+ . $slot_name . "'s "
+ . $mode
+ . "_lsn to pass "
+ . $target_lsn . " on "
+ . $self->name . "\n";
+ my $query =
+ qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name';];
+ $self->poll_query_until('postgres', $query)
+ or croak "timed out waiting for catchup";
+ print "done\n";
+ return;
+}
+
+=pod
+
+=item $node->wait_for_log(regexp, offset)
+
+Waits for the contents of the server log file, starting at the given offset, to
+match the supplied regular expression. Checks the entire log if no offset is
+given. Times out after $TestLib::timeout_default seconds.
+
+If successful, returns the length of the entire log file, in bytes.
+
+=cut
+
+sub wait_for_log
+{
+ my ($self, $regexp, $offset) = @_;
+ $offset = 0 unless defined $offset;
+
+ my $max_attempts = 10 * $TestLib::timeout_default;
+ my $attempts = 0;
+
+ while ($attempts < $max_attempts)
+ {
+ my $log = TestLib::slurp_file($self->logfile, $offset);
+
+ return $offset+length($log) if ($log =~ m/$regexp/);
+
+ # Wait 0.1 second before retrying.
+ usleep(100_000);
+
+ $attempts++;
+ }
+
+ croak "timed out waiting for match: $regexp";
+}
+
+=pod
+
+=item $node->query_hash($dbname, $query, @columns)
+
+Execute $query on $dbname, replacing any appearance of the string __COLUMNS__
+within the query with a comma-separated list of @columns.
+
+If __COLUMNS__ does not appear in the query, its result columns must EXACTLY
+match the order and number (but not necessarily alias) of supplied @columns.
+
+The query must return zero or one rows.
+
+Return a hash-ref representation of the results of the query, with any empty
+or null results as defined keys with an empty-string value. There is no way
+to differentiate between null and empty-string result fields.
+
+If the query returns zero rows, return a hash with all columns empty. There
+is no way to differentiate between zero rows returned and a row with only
+null columns.
+
+=cut
+
+sub query_hash
+{
+ my ($self, $dbname, $query, @columns) = @_;
+ croak 'calls in array context for multi-row results not supported yet'
+ if (wantarray);
+
+ # Replace __COLUMNS__ if found
+ substr($query, index($query, '__COLUMNS__'), length('__COLUMNS__')) =
+ join(', ', @columns)
+ if index($query, '__COLUMNS__') >= 0;
+ my $result = $self->safe_psql($dbname, $query);
+
+ # hash slice, see http://stackoverflow.com/a/16755894/398670 .
+ #
+ # Fills the hash with empty strings produced by x-operator element
+ # duplication if result is an empty row
+ #
+ my %val;
+ @val{@columns} =
+ $result ne '' ? split(qr/\|/, $result, -1) : ('',) x scalar(@columns);
+ return \%val;
+}
+
+=pod
+
+=item $node->slot(slot_name)
+
+Return hash-ref of replication slot data for the named slot, or a hash-ref with
+all values '' if not found. Does not differentiate between null and empty string
+for fields, no field is ever undef.
+
+The restart_lsn and confirmed_flush_lsn fields are returned verbatim, and also
+as a 2-list of [highword, lowword] integer. Since we rely on Perl 5.8.8 we can't
+"use bigint", it's from 5.20, and we can't assume we have Math::Bigint from CPAN
+either.
+
+=cut
+
+sub slot
+{
+ my ($self, $slot_name) = @_;
+ my @columns = (
+ 'plugin', 'slot_type', 'datoid', 'database',
+ 'active', 'active_pid', 'xmin', 'catalog_xmin',
+ 'restart_lsn');
+ return $self->query_hash(
+ 'postgres',
+ "SELECT __COLUMNS__ FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'",
+ @columns);
+}
+
+=pod
+
+=item $node->pg_recvlogical_upto(self, dbname, slot_name, endpos, timeout_secs, ...)
+
+Invoke pg_recvlogical to read from slot_name on dbname until LSN endpos, which
+corresponds to pg_recvlogical --endpos. Gives up after timeout (if nonzero).
+
+Disallows pg_recvlogical from internally retrying on error by passing --no-loop.
+
+Plugin options are passed as additional keyword arguments.
+
+If called in scalar context, returns stdout, and die()s on timeout or nonzero return.
+
+If called in array context, returns a tuple of (retval, stdout, stderr, timeout).
+timeout is the IPC::Run::Timeout object whose is_expired method can be tested
+to check for timeout. retval is undef on timeout.
+
+=cut
+
+sub pg_recvlogical_upto
+{
+ my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options)
+ = @_;
+
+ local %ENV = $self->_get_env();
+
+ my ($stdout, $stderr);
+
+ my $timeout_exception = 'pg_recvlogical timed out';
+
+ croak 'slot name must be specified' unless defined($slot_name);
+ croak 'endpos must be specified' unless defined($endpos);
+
+ my @cmd = (
+ $self->installed_command('pg_recvlogical'),
+ '-S', $slot_name, '--dbname', $self->connstr($dbname));
+ push @cmd, '--endpos', $endpos;
+ push @cmd, '-f', '-', '--no-loop', '--start';
+
+ while (my ($k, $v) = each %plugin_options)
+ {
+ croak "= is not permitted to appear in replication option name"
+ if ($k =~ qr/=/);
+ push @cmd, "-o", "$k=$v";
+ }
+
+ my $timeout;
+ $timeout =
+ IPC::Run::timeout($timeout_secs, exception => $timeout_exception)
+ if $timeout_secs;
+ my $ret = 0;
+
+ do
+ {
+ local $@;
+ eval {
+ IPC::Run::run(\@cmd, ">", \$stdout, "2>", \$stderr, $timeout);
+ $ret = $?;
+ };
+ my $exc_save = $@;
+ if ($exc_save)
+ {
+
+ # IPC::Run::run threw an exception. re-throw unless it's a
+ # timeout, which we'll handle by testing is_expired
+ die $exc_save
+ if (blessed($exc_save) || $exc_save !~ qr/$timeout_exception/);
+
+ $ret = undef;
+
+ die "Got timeout exception '$exc_save' but timer not expired?!"
+ unless $timeout->is_expired;
+
+ die
+ "$exc_save waiting for endpos $endpos with stdout '$stdout', stderr '$stderr'"
+ unless wantarray;
+ }
+ };
+
+ if (wantarray)
+ {
+ return ($ret, $stdout, $stderr, $timeout);
+ }
+ else
+ {
+ die
+ "pg_recvlogical exited with code '$ret', stdout '$stdout' and stderr '$stderr'"
+ if $ret;
+ return $stdout;
+ }
+}
+
+=pod
+
+=item $node->corrupt_page_checksum(self, file, page_offset)
+
+Intentionally corrupt the checksum field of one page in a file.
+The server must be stopped for this to work reliably.
+
+The file name should be specified relative to the cluster datadir.
+page_offset had better be a multiple of the cluster's block size.
+
+=cut
+
+sub corrupt_page_checksum
+{
+ my ($self, $file, $page_offset) = @_;
+ my $pgdata = $self->data_dir;
+ my $pageheader;
+
+ open my $fh, '+<', "$pgdata/$file" or die "open($file) failed: $!";
+ binmode $fh;
+ sysseek($fh, $page_offset, 0) or die "sysseek failed: $!";
+ sysread($fh, $pageheader, 24) or die "sysread failed: $!";
+ # This inverts the pd_checksum field (only); see struct PageHeaderData
+ $pageheader ^= "\0\0\0\0\0\0\0\0\xff\xff";
+ sysseek($fh, $page_offset, 0) or die "sysseek failed: $!";
+ syswrite($fh, $pageheader) or die "syswrite failed: $!";
+ close $fh;
+
+ return;
+}
+
+=pod
+
+=back
+
+=cut
+
+1;
diff --git a/src/test/perl/PostgresVersion.pm b/src/test/perl/PostgresVersion.pm
new file mode 100644
index 0000000..4e764c3
--- /dev/null
+++ b/src/test/perl/PostgresVersion.pm
@@ -0,0 +1,136 @@
+############################################################################
+#
+# PostgresVersion.pm
+#
+# Module encapsulating Postgres Version numbers
+#
+# Copyright (c) 2021, PostgreSQL Global Development Group
+#
+############################################################################
+
+=pod
+
+=head1 NAME
+
+PostgresVersion - class representing PostgreSQL version numbers
+
+=head1 SYNOPSIS
+
+ use PostgresVersion;
+
+ my $version = PostgresVersion->new($version_arg);
+
+ # compare two versions
+ my $bool = $version1 <= $version2;
+
+ # or compare with a number
+ $bool = $version < 12;
+
+ # or with a string
+ $bool = $version lt "13.1";
+
+ # interpolate in a string
+ my $stringyval = "version: $version";
+
+=head1 DESCRIPTION
+
+PostgresVersion encapsulates Postgres version numbers, providing parsing
+of common version formats and comparison operations.
+
+=cut
+
+package PostgresVersion;
+
+use strict;
+use warnings;
+
+use Scalar::Util qw(blessed);
+
+use overload
+ '<=>' => \&_version_cmp,
+ 'cmp' => \&_version_cmp,
+ '""' => \&_stringify;
+
+=pod
+
+=head1 METHODS
+
+=over
+
+=item PostgresVersion->new($version)
+
+Create a new PostgresVersion instance.
+
+The argument can be a number like 12, or a string like '12.2' or the output
+of a Postgres command like `psql --version` or `pg_config --version`;
+
+=back
+
+=cut
+
+sub new
+{
+ my $class = shift;
+ my $arg = shift;
+
+ chomp $arg;
+
+ # Accept standard formats, in case caller has handed us the output of a
+ # postgres command line tool
+ my $devel;
+ ($arg, $devel) = ($1, $2)
+ if (
+ $arg =~ m!^ # beginning of line
+ (?:\(?PostgreSQL\)?\s)? # ignore PostgreSQL marker
+ (\d+(?:\.\d+)*) # version number, dotted notation
+ (devel|(?:alpha|beta|rc)\d+)? # dev marker - see version_stamp.pl
+ !x);
+
+ # Split into an array
+ my @numbers = split(/\./, $arg);
+
+ # Treat development versions as having a minor/micro version one less than
+ # the first released version of that branch.
+ push @numbers, -1 if ($devel);
+
+ $devel ||= "";
+
+ return bless { str => "$arg$devel", num => \@numbers }, $class;
+}
+
+# Routine which compares the _pg_version_array obtained for the two
+# arguments and returns -1, 0, or 1, allowing comparison between two
+# PostgresVersion objects or a PostgresVersion and a version string or number.
+#
+# If the second argument is not a blessed object we call the constructor
+# to make one.
+#
+# Because we're overloading '<=>' and 'cmp' this function supplies us with
+# all the comparison operators ('<' and friends, 'gt' and friends)
+#
+sub _version_cmp
+{
+ my ($a, $b, $swapped) = @_;
+
+ $b = __PACKAGE__->new($b) unless blessed($b);
+
+ ($a, $b) = ($b, $a) if $swapped;
+
+ my ($an, $bn) = ($a->{num}, $b->{num});
+
+ for (my $idx = 0;; $idx++)
+ {
+ return 0 unless (defined $an->[$idx] && defined $bn->[$idx]);
+ return $an->[$idx] <=> $bn->[$idx]
+ if ($an->[$idx] <=> $bn->[$idx]);
+ }
+}
+
+# Render the version number using the saved string.
+sub _stringify
+{
+ my $self = shift;
+ return $self->{str};
+}
+
+1;
diff --git a/src/test/perl/README b/src/test/perl/README
new file mode 100644
index 0000000..655448c
--- /dev/null
+++ b/src/test/perl/README
@@ -0,0 +1,97 @@
+Perl-based TAP tests
+====================
+
+src/test/perl/ contains shared infrastructure that's used by Perl-based tests
+across the source tree, particularly tests in src/bin and src/test. It's used
+to drive tests for backup and restore, replication, etc - anything that can't
+really be expressed using pg_regress or the isolation test framework.
+
+The tests are invoked via perl's 'prove' command, wrapped in PostgreSQL
+makefiles to handle instance setup etc. See the $(prove_check) and
+$(prove_installcheck) targets in Makefile.global. By default every test in the
+t/ subdirectory is run. Individual test(s) can be run instead by passing
+something like PROVE_TESTS="t/001_testname.pl t/002_othertestname.pl" to make.
+
+You should prefer to write tests using pg_regress in src/test/regress, or
+isolation tester specs in src/test/isolation, if possible. If not, check to
+see if your new tests make sense under an existing tree in src/test, like
+src/test/ssl, or should be added to one of the suites for an existing utility.
+
+Note that all tests and test tools should have perltidy run on them before
+patches are submitted, using perltidy --profile=src/tools/pgindent/perltidyrc
+
+By default, to keep the noise low during runs, we do not set any flags via
+PROVE_FLAGS, but this can be done on the 'make' command line if desired, eg:
+
+make check-world PROVE_FLAGS='--verbose'
+
+Writing tests
+-------------
+
+Tests are written using Perl's Test::More with some PostgreSQL-specific
+infrastructure from src/test/perl providing node management, support for
+invoking 'psql' to run queries and get results, etc. You should read the
+documentation for Test::More before trying to write tests.
+
+Test scripts in the t/ subdirectory of a suite are executed in alphabetical
+order.
+
+Each test script should begin with:
+
+ use strict;
+ use warnings;
+ use PostgresNode;
+ use TestLib;
+ # Replace with the number of tests to execute:
+ use Test::More tests => 1;
+
+then it will generally need to set up one or more nodes, run commands
+against them and evaluate the results. For example:
+
+ my $node = PostgresNode->get_new_node('primary');
+ $node->init;
+ $node->start;
+
+ my $ret = $node->safe_psql('postgres', 'SELECT 1');
+ is($ret, '1', 'SELECT 1 returns 1');
+
+ $node->stop('fast');
+
+Test::More::like entails use of the qr// operator. Avoid Perl 5.8.8 bug
+#39185 by not using the "$" regular expression metacharacter in qr// when also
+using the "/m" modifier. Instead of "$", use "\n" or "(?=\n|\z)".
+
+Test::Builder::Level controls how far up in the call stack a test will look
+at when reporting a failure. This should be incremented by any subroutine
+which directly or indirectly calls test routines from Test::More, such as
+ok() or is():
+
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+Read the documentation for more on how to write tests:
+
+ perldoc Test::More
+ perldoc Test::Builder
+
+For available PostgreSQL-specific test methods and some example tests read the
+perldoc for the test modules, e.g.:
+
+ perldoc src/test/perl/PostgresNode.pm
+
+Required Perl
+-------------
+
+Tests must run on perl 5.8.0 and newer. perlbrew is a good way to obtain such
+a Perl; see http://perlbrew.pl .
+
+Just install and
+
+ perlbrew --force install 5.8.0
+ perlbrew use 5.8.0
+ perlbrew install-cpanm
+ cpanm install IPC::Run
+
+then re-run configure to ensure the correct Perl is used when running
+tests. To verify that the right Perl was found:
+
+ grep ^PERL= config.log
diff --git a/src/test/perl/RecursiveCopy.pm b/src/test/perl/RecursiveCopy.pm
new file mode 100644
index 0000000..8a9cc72
--- /dev/null
+++ b/src/test/perl/RecursiveCopy.pm
@@ -0,0 +1,157 @@
+
+# Copyright (c) 2021, PostgreSQL Global Development Group
+
+=pod
+
+=head1 NAME
+
+RecursiveCopy - simple recursive copy implementation
+
+=head1 SYNOPSIS
+
+use RecursiveCopy;
+
+RecursiveCopy::copypath($from, $to, filterfn => sub { return 1; });
+RecursiveCopy::copypath($from, $to);
+
+=cut
+
+package RecursiveCopy;
+
+use strict;
+use warnings;
+
+use Carp;
+use File::Basename;
+use File::Copy;
+
+=pod
+
+=head1 DESCRIPTION
+
+=head2 copypath($from, $to, %params)
+
+Recursively copy all files and directories from $from to $to.
+Does not preserve file metadata (e.g., permissions).
+
+Only regular files and subdirectories are copied. Trying to copy other types
+of directory entries raises an exception.
+
+Raises an exception if a file would be overwritten, the source directory can't
+be read, or any I/O operation fails. However, we silently ignore ENOENT on
+open, because when copying from a live database it's possible for a file/dir
+to be deleted after we see its directory entry but before we can open it.
+
+Always returns true.
+
+If the B<filterfn> parameter is given, it must be a subroutine reference.
+This subroutine will be called for each entry in the source directory with its
+relative path as only parameter; if the subroutine returns true the entry is
+copied, otherwise the file is skipped.
+
+On failure the target directory may be in some incomplete state; no cleanup is
+attempted.
+
+=head1 EXAMPLES
+
+ RecursiveCopy::copypath('/some/path', '/empty/dir',
+ filterfn => sub {
+ # omit log/ and contents
+ my $src = shift;
+ return $src ne 'log';
+ }
+ );
+
+=cut
+
+sub copypath
+{
+ my ($base_src_dir, $base_dest_dir, %params) = @_;
+ my $filterfn;
+
+ if (defined $params{filterfn})
+ {
+ croak "if specified, filterfn must be a subroutine reference"
+ unless defined(ref $params{filterfn})
+ and (ref $params{filterfn} eq 'CODE');
+
+ $filterfn = $params{filterfn};
+ }
+ else
+ {
+ $filterfn = sub { return 1; };
+ }
+
+ # Complain if original path is bogus, because _copypath_recurse won't.
+ croak "\"$base_src_dir\" does not exist" if !-e $base_src_dir;
+
+ # Start recursive copy from current directory
+ return _copypath_recurse($base_src_dir, $base_dest_dir, "", $filterfn);
+}
+
+# Recursive private guts of copypath
+sub _copypath_recurse
+{
+ my ($base_src_dir, $base_dest_dir, $curr_path, $filterfn) = @_;
+ my $srcpath = "$base_src_dir/$curr_path";
+ my $destpath = "$base_dest_dir/$curr_path";
+
+ # invoke the filter and skip all further operation if it returns false
+ return 1 unless &$filterfn($curr_path);
+
+ # Check for symlink -- needed only on source dir
+ # (note: this will fall through quietly if file is already gone)
+ croak "Cannot operate on symlink \"$srcpath\"" if -l $srcpath;
+
+ # Abort if destination path already exists. Should we allow directories
+ # to exist already?
+ croak "Destination path \"$destpath\" already exists" if -e $destpath;
+
+ # If this source path is a file, simply copy it to destination with the
+ # same name and we're done.
+ if (-f $srcpath)
+ {
+ my $fh;
+ unless (open($fh, '<', $srcpath))
+ {
+ return 1 if ($!{ENOENT});
+ die "open($srcpath) failed: $!";
+ }
+ copy($fh, $destpath)
+ or die "copy $srcpath -> $destpath failed: $!";
+ close $fh;
+ return 1;
+ }
+
+ # If it's a directory, create it on dest and recurse into it.
+ if (-d $srcpath)
+ {
+ my $directory;
+ unless (opendir($directory, $srcpath))
+ {
+ return 1 if ($!{ENOENT});
+ die "opendir($srcpath) failed: $!";
+ }
+
+ mkdir($destpath) or die "mkdir($destpath) failed: $!";
+
+ while (my $entry = readdir($directory))
+ {
+ next if ($entry eq '.' or $entry eq '..');
+ _copypath_recurse($base_src_dir, $base_dest_dir,
+ $curr_path eq '' ? $entry : "$curr_path/$entry", $filterfn)
+ or die "copypath $srcpath/$entry -> $destpath/$entry failed";
+ }
+
+ closedir($directory);
+ return 1;
+ }
+
+ # If it disappeared from sight, that's OK.
+ return 1 if !-e $srcpath;
+
+ # Else it's some weird file type; complain.
+ croak "Source path \"$srcpath\" is not a regular file or directory";
+}
+
+1;
diff --git a/src/test/perl/SimpleTee.pm b/src/test/perl/SimpleTee.pm
new file mode 100644
index 0000000..681a36a
--- /dev/null
+++ b/src/test/perl/SimpleTee.pm
@@ -0,0 +1,35 @@
+
+# Copyright (c) 2021, PostgreSQL Global Development Group
+
+# A simple 'tee' implementation, using perl tie.
+#
+# Whenever you print to the handle, it gets forwarded to a list of
+# handles. The list of output filehandles is passed to the constructor.
+#
+# This is similar to IO::Tee, but only used for output. Only the PRINT
+# method is currently implemented; that's all we need. We don't want to
+# depend on IO::Tee just for this.
+
+package SimpleTee;
+use strict;
+use warnings;
+
+sub TIEHANDLE
+{
+ my $self = shift;
+ return bless \@_, $self;
+}
+
+sub PRINT
+{
+ my $self = shift;
+ my $ok = 1;
+ for my $fh (@$self)
+ {
+ print $fh @_ or $ok = 0;
+ $fh->flush or $ok = 0;
+ }
+ return $ok;
+}
+
+1;
diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm
new file mode 100644
index 0000000..610050e
--- /dev/null
+++ b/src/test/perl/TestLib.pm
@@ -0,0 +1,982 @@
+
+# Copyright (c) 2021, PostgreSQL Global Development Group
+
+=pod
+
+=head1 NAME
+
+TestLib - helper module for writing PostgreSQL's C<prove> tests.
+
+=head1 SYNOPSIS
+
+ use TestLib;
+
+ # Test basic output of a command
+ program_help_ok('initdb');
+ program_version_ok('initdb');
+ program_options_handling_ok('initdb');
+
+ # Test option combinations
+ command_fails(['initdb', '--invalid-option'],
+ 'command fails with invalid option');
+ my $tempdir = TestLib::tempdir;
+ command_ok('initdb', '-D', $tempdir);
+
+ # Miscellanea
+ print "on Windows" if $TestLib::windows_os;
+ ok(check_mode_recursive($stream_dir, 0700, 0600),
+ "check stream dir permissions");
+ TestLib::system_log('pg_ctl', 'kill', 'QUIT', $slow_pid);
+
+=head1 DESCRIPTION
+
+C<TestLib> contains a set of routines dedicated to environment setup for
+a PostgreSQL regression test run and includes some low-level routines
+aimed at controlling command execution, logging and test functions.
+
+=cut
+
+# This module should never depend on any other PostgreSQL regression test
+# modules.
+
+package TestLib;
+
+use strict;
+use warnings;
+
+use Carp;
+use Config;
+use Cwd;
+use Exporter 'import';
+use Fcntl qw(:mode :seek);
+use File::Basename;
+use File::Find;
+use File::Spec;
+use File::stat qw(stat);
+use File::Temp ();
+use IPC::Run;
+use SimpleTee;
+
+# specify a recent enough version of Test::More to support the
+# done_testing() function
+use Test::More 0.87;
+
+our @EXPORT = qw(
+ generate_ascii_string
+ slurp_dir
+ slurp_file
+ append_to_file
+ check_mode_recursive
+ chmod_recursive
+ check_pg_config
+ dir_symlink
+ system_or_bail
+ system_log
+ run_log
+ run_command
+ pump_until
+
+ command_ok
+ command_fails
+ command_exit_is
+ program_help_ok
+ program_version_ok
+ program_options_handling_ok
+ command_like
+ command_like_safe
+ command_fails_like
+ command_checks_all
+
+ $windows_os
+ $is_msys2
+ $use_unix_sockets
+);
+
+our ($windows_os, $is_msys2, $use_unix_sockets, $timeout_default,
+ $tmp_check, $log_path, $test_logfile);
+
+BEGIN
+{
+
+ # Set to untranslated messages, to be able to compare program output
+ # with expected strings.
+ delete $ENV{LANGUAGE};
+ delete $ENV{LC_ALL};
+ $ENV{LC_MESSAGES} = 'C';
+
+ # This list should be kept in sync with pg_regress.c.
+ my @envkeys = qw (
+ PGCHANNELBINDING
+ PGCLIENTENCODING
+ PGCONNECT_TIMEOUT
+ PGDATA
+ PGDATABASE
+ PGGSSENCMODE
+ PGGSSLIB
+ PGHOSTADDR
+ PGKRBSRVNAME
+ PGPASSFILE
+ PGPASSWORD
+ PGREQUIREPEER
+ PGREQUIRESSL
+ PGSERVICE
+ PGSERVICEFILE
+ PGSSLCERT
+ PGSSLCRL
+ PGSSLCRLDIR
+ PGSSLKEY
+ PGSSLMAXPROTOCOLVERSION
+ PGSSLMINPROTOCOLVERSION
+ PGSSLMODE
+ PGSSLROOTCERT
+ PGSSLSNI
+ PGTARGETSESSIONATTRS
+ PGUSER
+ PGPORT
+ PGHOST
+ PG_COLOR
+ );
+ delete @ENV{@envkeys};
+
+ $ENV{PGAPPNAME} = basename($0);
+
+ # Must be set early
+ $windows_os = $Config{osname} eq 'MSWin32' || $Config{osname} eq 'msys';
+ # Check if this environment is MSYS2.
+ $is_msys2 = $windows_os && -x '/usr/bin/uname' &&
+ `uname -or` =~ /^[2-9].*Msys/;
+
+ if ($windows_os)
+ {
+ require Win32API::File;
+ Win32API::File->import(
+ qw(createFile OsFHandleOpen CloseHandle));
+ }
+
+ # Specifies whether to use Unix sockets for test setups. On
+ # Windows we don't use them by default since it's not universally
+ # supported, but it can be overridden if desired.
+ $use_unix_sockets =
+ (!$windows_os || defined $ENV{PG_TEST_USE_UNIX_SOCKETS});
+
+ $timeout_default = $ENV{PG_TEST_TIMEOUT_DEFAULT};
+ $timeout_default = 180
+ if not defined $timeout_default or $timeout_default eq '';
+}
+
+=pod
+
+=head1 EXPORTED VARIABLES
+
+=over
+
+=item C<$windows_os>
+
+Set to true when running under Windows, except on Cygwin.
+
+=item C<$is_msys2>
+
+Set to true when running under MSYS2.
+
+=back
+
+=cut
+
+INIT
+{
+
+ # Return EPIPE instead of killing the process with SIGPIPE. An affected
+ # test may still fail, but it's more likely to report useful facts.
+ $SIG{PIPE} = 'IGNORE';
+
+ # Determine output directories, and create them. The base path is the
+ # TESTDIR environment variable, which is normally set by the invoking
+ # Makefile.
+ $tmp_check = $ENV{TESTDIR} ? "$ENV{TESTDIR}/tmp_check" : "tmp_check";
+ $log_path = "$tmp_check/log";
+
+ mkdir $tmp_check;
+ mkdir $log_path;
+
+ # Open the test log file, whose name depends on the test name.
+ $test_logfile = basename($0);
+ $test_logfile =~ s/\.[^.]+$//;
+ $test_logfile = "$log_path/regress_log_$test_logfile";
+ open my $testlog, '>', $test_logfile
+ or die "could not open STDOUT to logfile \"$test_logfile\": $!";
+
+ # Hijack STDOUT and STDERR to the log file
+ open(my $orig_stdout, '>&', \*STDOUT);
+ open(my $orig_stderr, '>&', \*STDERR);
+ open(STDOUT, '>&', $testlog);
+ open(STDERR, '>&', $testlog);
+
+ # The test output (ok ...) needs to be printed to the original STDOUT so
+ # that the 'prove' program can parse it, and display it to the user in
+ # real time. But also copy it to the log file, to provide more context
+ # in the log.
+ my $builder = Test::More->builder;
+ my $fh = $builder->output;
+ tie *$fh, "SimpleTee", $orig_stdout, $testlog;
+ $fh = $builder->failure_output;
+ tie *$fh, "SimpleTee", $orig_stderr, $testlog;
+
+ # Enable auto-flushing for all the file handles. Stderr and stdout are
+ # redirected to the same file, and buffering causes the lines to appear
+ # in the log in confusing order.
+ autoflush STDOUT 1;
+ autoflush STDERR 1;
+ autoflush $testlog 1;
+}
+
+END
+{
+
+ # Test files have several ways of causing prove_check to fail:
+ # 1. Exit with a non-zero status.
+ # 2. Call ok(0) or similar, indicating that a constituent test failed.
+ # 3. Deviate from the planned number of tests.
+ #
+ # Preserve temporary directories after (1) and after (2).
+ $File::Temp::KEEP_ALL = 1 unless $? == 0 && all_tests_passing();
+}
+
+=pod
+
+=head1 ROUTINES
+
+=over
+
+=item all_tests_passing()
+
+Return 1 if all the tests run so far have passed. Otherwise, return 0.
+
+=cut
+
+sub all_tests_passing
+{
+ foreach my $status (Test::More->builder->summary)
+ {
+ return 0 unless $status;
+ }
+ return 1;
+}
+
+=pod
+
+=item tempdir(prefix)
+
+Securely create a temporary directory inside C<$tmp_check>, like C<mkdtemp>,
+and return its name. The directory will be removed automatically at the
+end of the tests.
+
+If C<prefix> is given, the new directory is templated as C<${prefix}_XXXX>.
+Otherwise the template is C<tmp_test_XXXX>.
+
+=cut
+
+sub tempdir
+{
+ my ($prefix) = @_;
+ $prefix = "tmp_test" unless defined $prefix;
+ return File::Temp::tempdir(
+ $prefix . '_XXXX',
+ DIR => $tmp_check,
+ CLEANUP => 1);
+}
+
+=pod
+
+=item tempdir_short()
+
+As above, but the directory is outside the build tree so that it has a short
+name, to avoid path length issues.
+
+=cut
+
+sub tempdir_short
+{
+
+ return File::Temp::tempdir(CLEANUP => 1);
+}
+
+=pod
+
+=item has_wal_read_bug()
+
+Returns true if $tmp_check is subject to a sparc64+ext4 bug that causes WAL
+readers to see zeros if another process simultaneously wrote the same offsets.
+Consult this in tests that fail frequently on affected configurations. The
+bug has made streaming standbys fail to advance, reporting corrupt WAL. It
+has made COMMIT PREPARED fail with "could not read two-phase state from WAL".
+Non-WAL PostgreSQL reads haven't been affected, likely because those readers
+and writers have buffering systems in common. See
+https://postgr.es/m/20220116210241.GC756210@rfd.leadboat.com for details.
+
+=cut
+
+sub has_wal_read_bug
+{
+ return
+ $Config{osname} eq 'linux'
+ && $Config{archname} =~ /^sparc/
+ && !run_log([ qw(df -x ext4), $tmp_check ], '>', '/dev/null', '2>&1');
+}
+
+=pod
+
+=item system_log(@cmd)
+
+Run (via C<system()>) the command passed as argument; the return
+value is passed through.
+
+=cut
+
+sub system_log
+{
+ print("# Running: " . join(" ", @_) . "\n");
+ return system(@_);
+}
+
+=pod
+
+=item system_or_bail(@cmd)
+
+Run (via C<system()>) the command passed as argument, and returns
+if the command is successful.
+On failure, abandon further tests and exit the program.
+
+=cut
+
+sub system_or_bail
+{
+ if (system_log(@_) != 0)
+ {
+ BAIL_OUT("system $_[0] failed");
+ }
+ return;
+}
+
+=pod
+
+=item run_log(@cmd)
+
+Run the given command via C<IPC::Run::run()>, noting it in the log.
+The return value from the command is passed through.
+
+=cut
+
+sub run_log
+{
+ print("# Running: " . join(" ", @{ $_[0] }) . "\n");
+ return IPC::Run::run(@_);
+}
+
+=pod
+
+=item run_command(cmd)
+
+Run (via C<IPC::Run::run()>) the command passed as argument.
+The return value from the command is ignored.
+The return value is C<($stdout, $stderr)>.
+
+=cut
+
+sub run_command
+{
+ my ($cmd) = @_;
+ my ($stdout, $stderr);
+ my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr;
+ chomp($stdout);
+ chomp($stderr);
+ return ($stdout, $stderr);
+}
+
+=pod
+
+=item pump_until(proc, timeout, stream, until)
+
+Pump until string is matched on the specified stream, or timeout occurs.
+
+=cut
+
+sub pump_until
+{
+ my ($proc, $timeout, $stream, $until) = @_;
+ $proc->pump_nb();
+ while (1)
+ {
+ last if $$stream =~ /$until/;
+ if ($timeout->is_expired)
+ {
+ diag("pump_until: timeout expired when searching for \"$until\" with stream: \"$$stream\"");
+ return 0;
+ }
+ if (not $proc->pumpable())
+ {
+ diag("pump_until: process terminated unexpectedly when searching for \"$until\" with stream: \"$$stream\"");
+ return 0;
+ }
+ $proc->pump();
+ }
+ return 1;
+}
+
+=pod
+
+=item generate_ascii_string(from_char, to_char)
+
+Generate a string made of the given range of ASCII characters.
+
+=cut
+
+sub generate_ascii_string
+{
+ my ($from_char, $to_char) = @_;
+ my $res;
+
+ for my $i ($from_char .. $to_char)
+ {
+ $res .= sprintf("%c", $i);
+ }
+ return $res;
+}
+
+=pod
+
+=item slurp_dir(dir)
+
+Return the complete list of entries in the specified directory.
+
+=cut
+
+sub slurp_dir
+{
+ my ($dir) = @_;
+ opendir(my $dh, $dir)
+ or croak "could not opendir \"$dir\": $!";
+ my @direntries = readdir $dh;
+ closedir $dh;
+ return @direntries;
+}
+
+=pod
+
+=item slurp_file(filename [, $offset])
+
+Return the full contents of the specified file, beginning from an
+offset position if specified.
+
+=cut
+
+sub slurp_file
+{
+ my ($filename, $offset) = @_;
+ local $/;
+ my $contents;
+ my $fh;
+
+ # On windows open file using win32 APIs, to allow us to set the
+ # FILE_SHARE_DELETE flag ("d" below), otherwise other accesses to the file
+ # may fail.
+ if ($Config{osname} ne 'MSWin32')
+ {
+ open($fh, '<', $filename)
+ or croak "could not read \"$filename\": $!";
+ }
+ else
+ {
+ my $fHandle = createFile($filename, "r", "rwd")
+ or croak "could not open \"$filename\": $^E";
+ OsFHandleOpen($fh = IO::Handle->new(), $fHandle, 'r')
+ or croak "could not read \"$filename\": $^E\n";
+ }
+
+ if (defined($offset))
+ {
+ seek($fh, $offset, SEEK_SET)
+ or croak "could not seek \"$filename\": $!";
+ }
+
+ $contents = <$fh>;
+ close $fh;
+
+ return $contents;
+}
+
+=pod
+
+=item append_to_file(filename, str)
+
+Append a string at the end of a given file. (Note: no newline is appended at
+end of file.)
+
+=cut
+
+sub append_to_file
+{
+ my ($filename, $str) = @_;
+ open my $fh, ">>", $filename
+ or croak "could not write \"$filename\": $!";
+ print $fh $str;
+ close $fh;
+ return;
+}
+
+=pod
+
+=item check_mode_recursive(dir, expected_dir_mode, expected_file_mode, ignore_list)
+
+Check that all file/dir modes in a directory match the expected values,
+ignoring files in C<ignore_list> (basename only).
+
+=cut
+
+sub check_mode_recursive
+{
+ my ($dir, $expected_dir_mode, $expected_file_mode, $ignore_list) = @_;
+
+ # Result defaults to true
+ my $result = 1;
+
+ find(
+ {
+ follow_fast => 1,
+ wanted => sub {
+ # Is file in the ignore list?
+ foreach my $ignore ($ignore_list ? @{$ignore_list} : [])
+ {
+ if ("$dir/$ignore" eq $File::Find::name)
+ {
+ return;
+ }
+ }
+
+ # Allow ENOENT. A running server can delete files, such as
+ # those in pg_stat. Other stat() failures are fatal.
+ my $file_stat = stat($File::Find::name);
+ unless (defined($file_stat))
+ {
+ my $is_ENOENT = $!{ENOENT};
+ my $msg = "unable to stat $File::Find::name: $!";
+ if ($is_ENOENT)
+ {
+ warn $msg;
+ return;
+ }
+ else
+ {
+ die $msg;
+ }
+ }
+
+ my $file_mode = S_IMODE($file_stat->mode);
+
+ # Is this a file?
+ if (S_ISREG($file_stat->mode))
+ {
+ if ($file_mode != $expected_file_mode)
+ {
+ print(
+ *STDERR,
+ sprintf("$File::Find::name mode must be %04o\n",
+ $expected_file_mode));
+
+ $result = 0;
+ return;
+ }
+ }
+
+ # Else a directory?
+ elsif (S_ISDIR($file_stat->mode))
+ {
+ if ($file_mode != $expected_dir_mode)
+ {
+ print(
+ *STDERR,
+ sprintf("$File::Find::name mode must be %04o\n",
+ $expected_dir_mode));
+
+ $result = 0;
+ return;
+ }
+ }
+
+ # Else something we can't handle
+ else
+ {
+ die "unknown file type for $File::Find::name";
+ }
+ }
+ },
+ $dir);
+
+ return $result;
+}
+
+=pod
+
+=item chmod_recursive(dir, dir_mode, file_mode)
+
+C<chmod> recursively each file and directory within the given directory.
+
+=cut
+
+sub chmod_recursive
+{
+ my ($dir, $dir_mode, $file_mode) = @_;
+
+ find(
+ {
+ follow_fast => 1,
+ wanted => sub {
+ my $file_stat = stat($File::Find::name);
+
+ if (defined($file_stat))
+ {
+ chmod(
+ S_ISDIR($file_stat->mode) ? $dir_mode : $file_mode,
+ $File::Find::name
+ ) or die "unable to chmod $File::Find::name";
+ }
+ }
+ },
+ $dir);
+ return;
+}
+
+=pod
+
+=item check_pg_config(regexp)
+
+Return the number of matches of the given regular expression
+within the installation's C<pg_config.h>.
+
+=cut
+
+sub check_pg_config
+{
+ my ($regexp) = @_;
+ my ($stdout, $stderr);
+ my $result = IPC::Run::run [ 'pg_config', '--includedir' ], '>',
+ \$stdout, '2>', \$stderr
+ or die "could not execute pg_config";
+ chomp($stdout);
+ $stdout =~ s/\r$//;
+
+ open my $pg_config_h, '<', "$stdout/pg_config.h" or die "$!";
+ my $match = (grep { /^$regexp/ } <$pg_config_h>);
+ close $pg_config_h;
+ return $match;
+}
+
+=pod
+
+=item dir_symlink(oldname, newname)
+
+Portably create a symlink for a directory. On Windows this creates a junction
+point. Elsewhere it just calls perl's builtin symlink.
+
+=cut
+
+sub dir_symlink
+{
+ my $oldname = shift;
+ my $newname = shift;
+ if ($windows_os)
+ {
+ $oldname =~ s,/,\\,g;
+ $newname =~ s,/,\\,g;
+ my $cmd = qq{mklink /j "$newname" "$oldname"};
+ if ($Config{osname} eq 'msys')
+ {
+ # need some indirection on msys
+ $cmd = qq{echo '$cmd' | \$COMSPEC /Q};
+ }
+ system($cmd);
+ }
+ else
+ {
+ symlink $oldname, $newname;
+ }
+ die "No $newname" unless -e $newname;
+}
+
+=pod
+
+=back
+
+=head1 Test::More-LIKE METHODS
+
+=over
+
+=item command_ok(cmd, test_name)
+
+Check that the command runs (via C<run_log>) successfully.
+
+=cut
+
+sub command_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $test_name) = @_;
+ my $result = run_log($cmd);
+ ok($result, $test_name);
+ return;
+}
+
+=pod
+
+=item command_fails(cmd, test_name)
+
+Check that the command fails (when run via C<run_log>).
+
+=cut
+
+sub command_fails
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $test_name) = @_;
+ my $result = run_log($cmd);
+ ok(!$result, $test_name);
+ return;
+}
+
+=pod
+
+=item command_exit_is(cmd, expected, test_name)
+
+Check that the command exit code matches the expected exit code.
+
+=cut
+
+sub command_exit_is
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $expected, $test_name) = @_;
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ my $h = IPC::Run::start $cmd;
+ $h->finish();
+
+ # On Windows, the exit status of the process is returned directly as the
+ # process's exit code, while on Unix, it's returned in the high bits
+ # of the exit code (see WEXITSTATUS macro in the standard <sys/wait.h>
+ # header file). IPC::Run's result function always returns exit code >> 8,
+ # assuming the Unix convention, which will always return 0 on Windows as
+ # long as the process was not terminated by an exception. To work around
+ # that, use $h->full_results on Windows instead.
+ my $result =
+ ($Config{osname} eq "MSWin32")
+ ? ($h->full_results)[0]
+ : $h->result(0);
+ is($result, $expected, $test_name);
+ return;
+}
+
+=pod
+
+=item program_help_ok(cmd)
+
+Check that the command supports the C<--help> option.
+
+=cut
+
+sub program_help_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd) = @_;
+ my ($stdout, $stderr);
+ print("# Running: $cmd --help\n");
+ my $result = IPC::Run::run [ $cmd, '--help' ], '>', \$stdout, '2>',
+ \$stderr;
+ ok($result, "$cmd --help exit code 0");
+ isnt($stdout, '', "$cmd --help goes to stdout");
+ is($stderr, '', "$cmd --help nothing to stderr");
+ return;
+}
+
+=pod
+
+=item program_version_ok(cmd)
+
+Check that the command supports the C<--version> option.
+
+=cut
+
+sub program_version_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd) = @_;
+ my ($stdout, $stderr);
+ print("# Running: $cmd --version\n");
+ my $result = IPC::Run::run [ $cmd, '--version' ], '>', \$stdout, '2>',
+ \$stderr;
+ ok($result, "$cmd --version exit code 0");
+ isnt($stdout, '', "$cmd --version goes to stdout");
+ is($stderr, '', "$cmd --version nothing to stderr");
+ return;
+}
+
+=pod
+
+=item program_options_handling_ok(cmd)
+
+Check that a command with an invalid option returns a non-zero
+exit code and error message.
+
+=cut
+
+sub program_options_handling_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd) = @_;
+ my ($stdout, $stderr);
+ print("# Running: $cmd --not-a-valid-option\n");
+ my $result = IPC::Run::run [ $cmd, '--not-a-valid-option' ], '>',
+ \$stdout,
+ '2>', \$stderr;
+ ok(!$result, "$cmd with invalid option nonzero exit code");
+ isnt($stderr, '', "$cmd with invalid option prints error message");
+ return;
+}
+
+=pod
+
+=item command_like(cmd, expected_stdout, test_name)
+
+Check that the command runs successfully and the output
+matches the given regular expression.
+
+=cut
+
+sub command_like
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $expected_stdout, $test_name) = @_;
+ my ($stdout, $stderr);
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr;
+ ok($result, "$test_name: exit code 0");
+ is($stderr, '', "$test_name: no stderr");
+ like($stdout, $expected_stdout, "$test_name: matches");
+ return;
+}
+
+=pod
+
+=item command_like_safe(cmd, expected_stdout, test_name)
+
+Check that the command runs successfully and the output
+matches the given regular expression. Doesn't assume that the
+output files are closed.
+
+=cut
+
+sub command_like_safe
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ # Doesn't rely on detecting end of file on the file descriptors,
+ # which can fail, causing the process to hang, notably on Msys
+ # when used with 'pg_ctl start'
+ my ($cmd, $expected_stdout, $test_name) = @_;
+ my ($stdout, $stderr);
+ my $stdoutfile = File::Temp->new();
+ my $stderrfile = File::Temp->new();
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ my $result = IPC::Run::run $cmd, '>', $stdoutfile, '2>', $stderrfile;
+ $stdout = slurp_file($stdoutfile);
+ $stderr = slurp_file($stderrfile);
+ ok($result, "$test_name: exit code 0");
+ is($stderr, '', "$test_name: no stderr");
+ like($stdout, $expected_stdout, "$test_name: matches");
+ return;
+}
+
+=pod
+
+=item command_fails_like(cmd, expected_stderr, test_name)
+
+Check that the command fails and the error message matches
+the given regular expression.
+
+=cut
+
+sub command_fails_like
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $expected_stderr, $test_name) = @_;
+ my ($stdout, $stderr);
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr;
+ ok(!$result, "$test_name: exit code not 0");
+ like($stderr, $expected_stderr, "$test_name: matches");
+ return;
+}
+
+=pod
+
+=item command_checks_all(cmd, ret, out, err, test_name)
+
+Run a command and check its status and outputs.
+Arguments:
+
+=over
+
+=item C<cmd>: Array reference of command and arguments to run
+
+=item C<ret>: Expected exit code
+
+=item C<out>: Expected stdout from command
+
+=item C<err>: Expected stderr from command
+
+=item C<test_name>: test name
+
+=back
+
+=cut
+
+sub command_checks_all
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my ($cmd, $expected_ret, $out, $err, $test_name) = @_;
+
+ # run command
+ my ($stdout, $stderr);
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ IPC::Run::run($cmd, '>', \$stdout, '2>', \$stderr);
+
+ # See http://perldoc.perl.org/perlvar.html#%24CHILD_ERROR
+ my $ret = $?;
+ die "command exited with signal " . ($ret & 127)
+ if $ret & 127;
+ $ret = $ret >> 8;
+
+ # check status
+ ok($ret == $expected_ret,
+ "$test_name status (got $ret vs expected $expected_ret)");
+
+ # check stdout
+ for my $re (@$out)
+ {
+ like($stdout, $re, "$test_name stdout /$re/");
+ }
+
+ # check stderr
+ for my $re (@$err)
+ {
+ like($stderr, $re, "$test_name stderr /$re/");
+ }
+
+ return;
+}
+
+=pod
+
+=back
+
+=cut
+
+1;