summaryrefslogtreecommitdiffstats
path: root/src/test/perl
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:19:15 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:19:15 +0000
commit6eb9c5a5657d1fe77b55cc261450f3538d35a94d (patch)
tree657d8194422a5daccecfd42d654b8a245ef7b4c8 /src/test/perl
parentInitial commit. (diff)
downloadpostgresql-13-6eb9c5a5657d1fe77b55cc261450f3538d35a94d.tar.xz
postgresql-13-6eb9c5a5657d1fe77b55cc261450f3538d35a94d.zip
Adding upstream version 13.4.upstream/13.4upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/test/perl')
-rw-r--r--src/test/perl/Makefile33
-rw-r--r--src/test/perl/PostgresNode.pm2150
-rw-r--r--src/test/perl/README89
-rw-r--r--src/test/perl/RecursiveCopy.pm155
-rw-r--r--src/test/perl/SimpleTee.pm32
-rw-r--r--src/test/perl/TestLib.pm911
6 files changed, 3370 insertions, 0 deletions
diff --git a/src/test/perl/Makefile b/src/test/perl/Makefile
new file mode 100644
index 0000000..acdd160
--- /dev/null
+++ b/src/test/perl/Makefile
@@ -0,0 +1,33 @@
+#-------------------------------------------------------------------------
+#
+# Makefile for src/test/perl
+#
+# Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
+# Portions Copyright (c) 1994, Regents of the University of California
+#
+# src/test/perl/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/test/perl
+top_builddir = ../../..
+include $(top_builddir)/src/Makefile.global
+
+ifeq ($(enable_tap_tests),yes)
+
+installdirs:
+ $(MKDIR_P) '$(DESTDIR)$(pgxsdir)/$(subdir)'
+
+install: all installdirs
+ $(INSTALL_DATA) $(srcdir)/TestLib.pm '$(DESTDIR)$(pgxsdir)/$(subdir)/TestLib.pm'
+ $(INSTALL_DATA) $(srcdir)/SimpleTee.pm '$(DESTDIR)$(pgxsdir)/$(subdir)/SimpleTee.pm'
+ $(INSTALL_DATA) $(srcdir)/RecursiveCopy.pm '$(DESTDIR)$(pgxsdir)/$(subdir)/RecursiveCopy.pm'
+ $(INSTALL_DATA) $(srcdir)/PostgresNode.pm '$(DESTDIR)$(pgxsdir)/$(subdir)/PostgresNode.pm'
+
+uninstall:
+ rm -f '$(DESTDIR)$(pgxsdir)/$(subdir)/TestLib.pm'
+ rm -f '$(DESTDIR)$(pgxsdir)/$(subdir)/SimpleTee.pm'
+ rm -f '$(DESTDIR)$(pgxsdir)/$(subdir)/RecursiveCopy.pm'
+ rm -f '$(DESTDIR)$(pgxsdir)/$(subdir)/PostgresNode.pm'
+
+endif
diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm
new file mode 100644
index 0000000..52f552b
--- /dev/null
+++ b/src/test/perl/PostgresNode.pm
@@ -0,0 +1,2150 @@
+
+=pod
+
+=head1 NAME
+
+PostgresNode - class representing PostgreSQL server instance
+
+=head1 SYNOPSIS
+
+ use PostgresNode;
+
+ my $node = PostgresNode->get_new_node('mynode');
+
+ # Create a data directory with initdb
+ $node->init();
+
+ # Start the PostgreSQL server
+ $node->start();
+
+ # Change a setting and restart
+ $node->append_conf('postgresql.conf', 'hot_standby = on');
+ $node->restart();
+
+ # run a query with psql, like:
+ # echo 'SELECT 1' | psql -qAXt postgres -v ON_ERROR_STOP=1
+ $psql_stdout = $node->safe_psql('postgres', 'SELECT 1');
+
+ # Run psql with a timeout, capturing stdout and stderr
+ # as well as the psql exit code. Pass some extra psql
+ # options. If there's an error from psql raise an exception.
+ my ($stdout, $stderr, $timed_out);
+ my $cmdret = $node->psql('postgres', 'SELECT pg_sleep(600)',
+ stdout => \$stdout, stderr => \$stderr,
+ timeout => 180, timed_out => \$timed_out,
+ extra_params => ['--single-transaction'],
+ on_error_die => 1)
+ print "Sleep timed out" if $timed_out;
+
+ # Similar thing, more convenient in common cases
+ my ($cmdret, $stdout, $stderr) =
+ $node->psql('postgres', 'SELECT 1');
+
+ # run query every second until it returns 't'
+ # or times out
+ $node->poll_query_until('postgres', q|SELECT random() < 0.1;|')
+ or die "timed out";
+
+ # Do an online pg_basebackup
+ my $ret = $node->backup('testbackup1');
+
+ # Take a backup of a running server
+ my $ret = $node->backup_fs_hot('testbackup2');
+
+ # Take a backup of a stopped server
+ $node->stop;
+ my $ret = $node->backup_fs_cold('testbackup3')
+
+ # Restore it to create a new independent node (not a replica)
+ my $replica = get_new_node('replica');
+ $replica->init_from_backup($node, 'testbackup');
+ $replica->start;
+
+ # Stop the server
+ $node->stop('fast');
+
+ # Find a free, unprivileged TCP port to bind some other service to
+ my $port = get_free_port();
+
+=head1 DESCRIPTION
+
+PostgresNode contains a set of routines able to work on a PostgreSQL node,
+allowing to start, stop, backup and initialize it with various options.
+The set of nodes managed by a given test is also managed by this module.
+
+In addition to node management, PostgresNode instances have some wrappers
+around Test::More functions to run commands with an environment set up to
+point to the instance.
+
+The IPC::Run module is required.
+
+=cut
+
+package PostgresNode;
+
+use strict;
+use warnings;
+
+use Carp;
+use Config;
+use Cwd;
+use Exporter 'import';
+use Fcntl qw(:mode);
+use File::Basename;
+use File::Path qw(rmtree);
+use File::Spec;
+use File::stat qw(stat);
+use File::Temp ();
+use IPC::Run;
+use RecursiveCopy;
+use Socket;
+use Test::More;
+use TestLib ();
+use Time::HiRes qw(usleep);
+use Scalar::Util qw(blessed);
+
+our @EXPORT = qw(
+ get_new_node
+ get_free_port
+);
+
+our ($use_tcp, $test_localhost, $test_pghost, $last_host_assigned,
+ $last_port_assigned, @all_nodes, $died);
+
+INIT
+{
+
+ # Set PGHOST for backward compatibility. This doesn't work for own_host
+ # nodes, so prefer to not rely on this when writing new tests.
+ $use_tcp = !$TestLib::use_unix_sockets;
+ $test_localhost = "127.0.0.1";
+ $last_host_assigned = 1;
+ $test_pghost = $use_tcp ? $test_localhost : TestLib::tempdir_short;
+ $ENV{PGHOST} = $test_pghost;
+ $ENV{PGDATABASE} = 'postgres';
+
+ # Tracking of last port value assigned to accelerate free port lookup.
+ $last_port_assigned = int(rand() * 16384) + 49152;
+}
+
+=pod
+
+=head1 METHODS
+
+=over
+
+=item PostgresNode::new($class, $name, $pghost, $pgport)
+
+Create a new PostgresNode instance. Does not initdb or start it.
+
+You should generally prefer to use get_new_node() instead since it takes care
+of finding port numbers, registering instances for cleanup, etc.
+
+=cut
+
+sub new
+{
+ my ($class, $name, $pghost, $pgport) = @_;
+ my $testname = basename($0);
+ $testname =~ s/\.[^.]+$//;
+ my $self = {
+ _port => $pgport,
+ _host => $pghost,
+ _basedir => "$TestLib::tmp_check/t_${testname}_${name}_data",
+ _name => $name,
+ _logfile_generation => 0,
+ _logfile_base => "$TestLib::log_path/${testname}_${name}",
+ _logfile => "$TestLib::log_path/${testname}_${name}.log"
+ };
+
+ bless $self, $class;
+ mkdir $self->{_basedir}
+ or
+ BAIL_OUT("could not create data directory \"$self->{_basedir}\": $!");
+ $self->dump_info;
+
+ return $self;
+}
+
+=pod
+
+=item $node->port()
+
+Get the port number assigned to the host. This won't necessarily be a TCP port
+open on the local host since we prefer to use unix sockets if possible.
+
+Use $node->connstr() if you want a connection string.
+
+=cut
+
+sub port
+{
+ my ($self) = @_;
+ return $self->{_port};
+}
+
+=pod
+
+=item $node->host()
+
+Return the host (like PGHOST) for this instance. May be a UNIX socket path.
+
+Use $node->connstr() if you want a connection string.
+
+=cut
+
+sub host
+{
+ my ($self) = @_;
+ return $self->{_host};
+}
+
+=pod
+
+=item $node->basedir()
+
+The directory all the node's files will be within - datadir, archive directory,
+backups, etc.
+
+=cut
+
+sub basedir
+{
+ my ($self) = @_;
+ return $self->{_basedir};
+}
+
+=pod
+
+=item $node->name()
+
+The name assigned to the node at creation time.
+
+=cut
+
+sub name
+{
+ my ($self) = @_;
+ return $self->{_name};
+}
+
+=pod
+
+=item $node->logfile()
+
+Path to the PostgreSQL log file for this instance.
+
+=cut
+
+sub logfile
+{
+ my ($self) = @_;
+ return $self->{_logfile};
+}
+
+=pod
+
+=item $node->connstr()
+
+Get a libpq connection string that will establish a connection to
+this node. Suitable for passing to psql, DBD::Pg, etc.
+
+=cut
+
+sub connstr
+{
+ my ($self, $dbname) = @_;
+ my $pgport = $self->port;
+ my $pghost = $self->host;
+ if (!defined($dbname))
+ {
+ return "port=$pgport host=$pghost";
+ }
+
+ # Escape properly the database string before using it, only
+ # single quotes and backslashes need to be treated this way.
+ $dbname =~ s#\\#\\\\#g;
+ $dbname =~ s#\'#\\\'#g;
+
+ return "port=$pgport host=$pghost dbname='$dbname'";
+}
+
+=pod
+
+=item $node->group_access()
+
+Does the data dir allow group access?
+
+=cut
+
+sub group_access
+{
+ my ($self) = @_;
+
+ my $dir_stat = stat($self->data_dir);
+
+ defined($dir_stat)
+ or die('unable to stat ' . $self->data_dir);
+
+ return (S_IMODE($dir_stat->mode) == 0750);
+}
+
+=pod
+
+=item $node->data_dir()
+
+Returns the path to the data directory. postgresql.conf and pg_hba.conf are
+always here.
+
+=cut
+
+sub data_dir
+{
+ my ($self) = @_;
+ my $res = $self->basedir;
+ return "$res/pgdata";
+}
+
+=pod
+
+=item $node->archive_dir()
+
+If archiving is enabled, WAL files go here.
+
+=cut
+
+sub archive_dir
+{
+ my ($self) = @_;
+ my $basedir = $self->basedir;
+ return "$basedir/archives";
+}
+
+=pod
+
+=item $node->backup_dir()
+
+The output path for backups taken with $node->backup()
+
+=cut
+
+sub backup_dir
+{
+ my ($self) = @_;
+ my $basedir = $self->basedir;
+ return "$basedir/backup";
+}
+
+=pod
+
+=item $node->info()
+
+Return a string containing human-readable diagnostic information (paths, etc)
+about this node.
+
+=cut
+
+sub info
+{
+ my ($self) = @_;
+ my $_info = '';
+ open my $fh, '>', \$_info or die;
+ print $fh "Name: " . $self->name . "\n";
+ print $fh "Data directory: " . $self->data_dir . "\n";
+ print $fh "Backup directory: " . $self->backup_dir . "\n";
+ print $fh "Archive directory: " . $self->archive_dir . "\n";
+ print $fh "Connection string: " . $self->connstr . "\n";
+ print $fh "Log file: " . $self->logfile . "\n";
+ close $fh or die;
+ return $_info;
+}
+
+=pod
+
+=item $node->dump_info()
+
+Print $node->info()
+
+=cut
+
+sub dump_info
+{
+ my ($self) = @_;
+ print $self->info;
+ return;
+}
+
+
+# Internal method to set up trusted pg_hba.conf for replication. Not
+# documented because you shouldn't use it, it's called automatically if needed.
+sub set_replication_conf
+{
+ my ($self) = @_;
+ my $pgdata = $self->data_dir;
+
+ $self->host eq $test_pghost
+ or croak "set_replication_conf only works with the default host";
+
+ open my $hba, '>>', "$pgdata/pg_hba.conf";
+ print $hba "\n# Allow replication (set up by PostgresNode.pm)\n";
+ if ($TestLib::windows_os && !$TestLib::use_unix_sockets)
+ {
+ print $hba
+ "host replication all $test_localhost/32 sspi include_realm=1 map=regress\n";
+ }
+ close $hba;
+ return;
+}
+
+=pod
+
+=item $node->init(...)
+
+Initialize a new cluster for testing.
+
+Authentication is set up so that only the current OS user can access the
+cluster. On Unix, we use Unix domain socket connections, with the socket in
+a directory that's only accessible to the current user to ensure that.
+On Windows, we use SSPI authentication to ensure the same (by pg_regress
+--config-auth).
+
+WAL archiving can be enabled on this node by passing the keyword parameter
+has_archiving => 1. This is disabled by default.
+
+postgresql.conf can be set up for replication by passing the keyword
+parameter allows_streaming => 'logical' or 'physical' (passing 1 will also
+suffice for physical replication) depending on type of replication that
+should be enabled. This is disabled by default.
+
+The new node is set up in a fast but unsafe configuration where fsync is
+disabled.
+
+=cut
+
+sub init
+{
+ my ($self, %params) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $host = $self->host;
+
+ $params{allows_streaming} = 0 unless defined $params{allows_streaming};
+ $params{has_archiving} = 0 unless defined $params{has_archiving};
+
+ mkdir $self->backup_dir;
+ mkdir $self->archive_dir;
+
+ TestLib::system_or_bail('initdb', '-D', $pgdata, '-A', 'trust', '-N',
+ @{ $params{extra} });
+ TestLib::system_or_bail($ENV{PG_REGRESS}, '--config-auth', $pgdata,
+ @{ $params{auth_extra} });
+
+ open my $conf, '>>', "$pgdata/postgresql.conf";
+ print $conf "\n# Added by PostgresNode.pm\n";
+ print $conf "fsync = off\n";
+ print $conf "restart_after_crash = off\n";
+ print $conf "log_line_prefix = '%m [%p] %q%a '\n";
+ print $conf "log_statement = all\n";
+ print $conf "log_replication_commands = on\n";
+ print $conf "wal_retrieve_retry_interval = '500ms'\n";
+
+ # If a setting tends to affect whether tests pass or fail, print it after
+ # TEMP_CONFIG. Otherwise, print it before TEMP_CONFIG, thereby permitting
+ # overrides. Settings that merely improve performance or ease debugging
+ # belong before TEMP_CONFIG.
+ print $conf TestLib::slurp_file($ENV{TEMP_CONFIG})
+ if defined $ENV{TEMP_CONFIG};
+
+ # XXX Neutralize any stats_temp_directory in TEMP_CONFIG. Nodes running
+ # concurrently must not share a stats_temp_directory.
+ print $conf "stats_temp_directory = 'pg_stat_tmp'\n";
+
+ if ($params{allows_streaming})
+ {
+ if ($params{allows_streaming} eq "logical")
+ {
+ print $conf "wal_level = logical\n";
+ }
+ else
+ {
+ print $conf "wal_level = replica\n";
+ }
+ print $conf "max_wal_senders = 10\n";
+ print $conf "max_replication_slots = 10\n";
+ print $conf "wal_log_hints = on\n";
+ print $conf "hot_standby = on\n";
+ # conservative settings to ensure we can run multiple postmasters:
+ print $conf "shared_buffers = 1MB\n";
+ print $conf "max_connections = 10\n";
+ # limit disk space consumption, too:
+ print $conf "max_wal_size = 128MB\n";
+ }
+ else
+ {
+ print $conf "wal_level = minimal\n";
+ print $conf "max_wal_senders = 0\n";
+ }
+
+ print $conf "port = $port\n";
+ if ($use_tcp)
+ {
+ print $conf "unix_socket_directories = ''\n";
+ print $conf "listen_addresses = '$host'\n";
+ }
+ else
+ {
+ print $conf "unix_socket_directories = '$host'\n";
+ print $conf "listen_addresses = ''\n";
+ }
+ close $conf;
+
+ chmod($self->group_access ? 0640 : 0600, "$pgdata/postgresql.conf")
+ or die("unable to set permissions for $pgdata/postgresql.conf");
+
+ $self->set_replication_conf if $params{allows_streaming};
+ $self->enable_archiving if $params{has_archiving};
+ return;
+}
+
+=pod
+
+=item $node->append_conf(filename, str)
+
+A shortcut method to append to files like pg_hba.conf and postgresql.conf.
+
+Does no validation or sanity checking. Does not reload the configuration
+after writing.
+
+A newline is automatically appended to the string.
+
+=cut
+
+sub append_conf
+{
+ my ($self, $filename, $str) = @_;
+
+ my $conffile = $self->data_dir . '/' . $filename;
+
+ TestLib::append_to_file($conffile, $str . "\n");
+
+ chmod($self->group_access() ? 0640 : 0600, $conffile)
+ or die("unable to set permissions for $conffile");
+
+ return;
+}
+
+=pod
+
+=item $node->backup(backup_name)
+
+Create a hot backup with B<pg_basebackup> in subdirectory B<backup_name> of
+B<< $node->backup_dir >>, including the WAL.
+
+By default, WAL files are fetched at the end of the backup, not streamed.
+You can adjust that and other things by passing an array of additional
+B<pg_basebackup> command line options in the keyword parameter backup_options.
+
+You'll have to configure a suitable B<max_wal_senders> on the
+target server since it isn't done by default.
+
+=cut
+
+sub backup
+{
+ my ($self, $backup_name, %params) = @_;
+ my $backup_path = $self->backup_dir . '/' . $backup_name;
+ my $name = $self->name;
+
+ print "# Taking pg_basebackup $backup_name from node \"$name\"\n";
+ TestLib::system_or_bail(
+ 'pg_basebackup', '-D', $backup_path, '-h',
+ $self->host, '-p', $self->port, '--checkpoint',
+ 'fast', '--no-sync',
+ @{ $params{backup_options} });
+ print "# Backup finished\n";
+ return;
+}
+
+=item $node->backup_fs_hot(backup_name)
+
+Create a backup with a filesystem level copy in subdirectory B<backup_name> of
+B<< $node->backup_dir >>, including WAL.
+
+Archiving must be enabled, as B<pg_start_backup()> and B<pg_stop_backup()> are
+used. This is not checked or enforced.
+
+The backup name is passed as the backup label to B<pg_start_backup()>.
+
+=cut
+
+sub backup_fs_hot
+{
+ my ($self, $backup_name) = @_;
+ $self->_backup_fs($backup_name, 1);
+ return;
+}
+
+=item $node->backup_fs_cold(backup_name)
+
+Create a backup with a filesystem level copy in subdirectory B<backup_name> of
+B<< $node->backup_dir >>, including WAL. The server must be
+stopped as no attempt to handle concurrent writes is made.
+
+Use B<backup> or B<backup_fs_hot> if you want to back up a running server.
+
+=cut
+
+sub backup_fs_cold
+{
+ my ($self, $backup_name) = @_;
+ $self->_backup_fs($backup_name, 0);
+ return;
+}
+
+
+# Common sub of backup_fs_hot and backup_fs_cold
+sub _backup_fs
+{
+ my ($self, $backup_name, $hot) = @_;
+ my $backup_path = $self->backup_dir . '/' . $backup_name;
+ my $port = $self->port;
+ my $name = $self->name;
+
+ print "# Taking filesystem backup $backup_name from node \"$name\"\n";
+
+ if ($hot)
+ {
+ my $stdout = $self->safe_psql('postgres',
+ "SELECT * FROM pg_start_backup('$backup_name');");
+ print "# pg_start_backup: $stdout\n";
+ }
+
+ RecursiveCopy::copypath(
+ $self->data_dir,
+ $backup_path,
+ filterfn => sub {
+ my $src = shift;
+ return ($src ne 'log' and $src ne 'postmaster.pid');
+ });
+
+ if ($hot)
+ {
+
+ # We ignore pg_stop_backup's return value. We also assume archiving
+ # is enabled; otherwise the caller will have to copy the remaining
+ # segments.
+ my $stdout =
+ $self->safe_psql('postgres', 'SELECT * FROM pg_stop_backup();');
+ print "# pg_stop_backup: $stdout\n";
+ }
+
+ print "# Backup finished\n";
+ return;
+}
+
+
+
+=pod
+
+=item $node->init_from_backup(root_node, backup_name)
+
+Initialize a node from a backup, which may come from this node or a different
+node. root_node must be a PostgresNode reference, backup_name the string name
+of a backup previously created on that node with $node->backup.
+
+Does not start the node after initializing it.
+
+Streaming replication can be enabled on this node by passing the keyword
+parameter has_streaming => 1. This is disabled by default.
+
+Restoring WAL segments from archives using restore_command can be enabled
+by passing the keyword parameter has_restoring => 1. This is disabled by
+default.
+
+If has_restoring is used, standby mode is used by default. To use
+recovery mode instead, pass the keyword parameter standby => 0.
+
+The backup is copied, leaving the original unmodified. pg_hba.conf is
+unconditionally set to enable replication connections.
+
+=cut
+
+sub init_from_backup
+{
+ my ($self, $root_node, $backup_name, %params) = @_;
+ my $backup_path = $root_node->backup_dir . '/' . $backup_name;
+ my $host = $self->host;
+ my $port = $self->port;
+ my $node_name = $self->name;
+ my $root_name = $root_node->name;
+
+ $params{has_streaming} = 0 unless defined $params{has_streaming};
+ $params{has_restoring} = 0 unless defined $params{has_restoring};
+ $params{standby} = 1 unless defined $params{standby};
+
+ print
+ "# Initializing node \"$node_name\" from backup \"$backup_name\" of node \"$root_name\"\n";
+ croak "Backup \"$backup_name\" does not exist at $backup_path"
+ unless -d $backup_path;
+
+ mkdir $self->backup_dir;
+ mkdir $self->archive_dir;
+
+ my $data_path = $self->data_dir;
+ rmdir($data_path);
+ RecursiveCopy::copypath($backup_path, $data_path);
+ chmod(0700, $data_path);
+
+ # Base configuration for this node
+ $self->append_conf(
+ 'postgresql.conf',
+ qq(
+port = $port
+));
+ if ($use_tcp)
+ {
+ $self->append_conf('postgresql.conf', "listen_addresses = '$host'");
+ }
+ else
+ {
+ $self->append_conf('postgresql.conf',
+ "unix_socket_directories = '$host'");
+ }
+ $self->enable_streaming($root_node) if $params{has_streaming};
+ $self->enable_restoring($root_node, $params{standby})
+ if $params{has_restoring};
+ return;
+}
+
+=pod
+
+=item $node->rotate_logfile()
+
+Switch to a new PostgreSQL log file. This does not alter any running
+PostgreSQL process. Subsequent method calls, including pg_ctl invocations,
+will use the new name. Return the new name.
+
+=cut
+
+sub rotate_logfile
+{
+ my ($self) = @_;
+ $self->{_logfile} = sprintf('%s_%d.log',
+ $self->{_logfile_base},
+ ++$self->{_logfile_generation});
+ return $self->{_logfile};
+}
+
+=pod
+
+=item $node->start(%params) => success_or_failure
+
+Wrapper for pg_ctl start
+
+Start the node and wait until it is ready to accept connections.
+
+=over
+
+=item fail_ok => 1
+
+By default, failure terminates the entire F<prove> invocation. If given,
+instead return a true or false value to indicate success or failure.
+
+=back
+
+=cut
+
+sub start
+{
+ my ($self, %params) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $name = $self->name;
+ my $ret;
+
+ BAIL_OUT("node \"$name\" is already running") if defined $self->{_pid};
+
+ print("### Starting node \"$name\"\n");
+
+ {
+ # Temporarily unset PGAPPNAME so that the server doesn't
+ # inherit it. Otherwise this could affect libpqwalreceiver
+ # connections in confusing ways.
+ local %ENV = %ENV;
+ delete $ENV{PGAPPNAME};
+
+ # Note: We set the cluster_name here, not in postgresql.conf (in
+ # sub init) so that it does not get copied to standbys.
+ $ret = TestLib::system_log('pg_ctl', '-D', $self->data_dir, '-l',
+ $self->logfile, '-o', "--cluster-name=$name", 'start');
+ }
+
+ if ($ret != 0)
+ {
+ print "# pg_ctl start failed; logfile:\n";
+ print TestLib::slurp_file($self->logfile);
+ BAIL_OUT("pg_ctl start failed") unless $params{fail_ok};
+ return 0;
+ }
+
+ $self->_update_pid(1);
+ return 1;
+}
+
+=pod
+
+=item $node->kill9()
+
+Send SIGKILL (signal 9) to the postmaster.
+
+Note: if the node is already known stopped, this does nothing.
+However, if we think it's running and it's not, it's important for
+this to fail. Otherwise, tests might fail to detect server crashes.
+
+=cut
+
+sub kill9
+{
+ my ($self) = @_;
+ my $name = $self->name;
+ return unless defined $self->{_pid};
+ print "### Killing node \"$name\" using signal 9\n";
+ # kill(9, ...) fails under msys Perl 5.8.8, so fall back on pg_ctl.
+ kill(9, $self->{_pid})
+ or TestLib::system_or_bail('pg_ctl', 'kill', 'KILL', $self->{_pid});
+ $self->{_pid} = undef;
+ return;
+}
+
+=pod
+
+=item $node->stop(mode)
+
+Stop the node using pg_ctl -m $mode and wait for it to stop.
+
+Note: if the node is already known stopped, this does nothing.
+However, if we think it's running and it's not, it's important for
+this to fail. Otherwise, tests might fail to detect server crashes.
+
+=cut
+
+sub stop
+{
+ my ($self, $mode) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $name = $self->name;
+ $mode = 'fast' unless defined $mode;
+ return unless defined $self->{_pid};
+ print "### Stopping node \"$name\" using mode $mode\n";
+ TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-m', $mode, 'stop');
+ $self->_update_pid(0);
+ return;
+}
+
+=pod
+
+=item $node->reload()
+
+Reload configuration parameters on the node.
+
+=cut
+
+sub reload
+{
+ my ($self) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $name = $self->name;
+ print "### Reloading node \"$name\"\n";
+ TestLib::system_or_bail('pg_ctl', '-D', $pgdata, 'reload');
+ return;
+}
+
+=pod
+
+=item $node->restart()
+
+Wrapper for pg_ctl restart
+
+=cut
+
+sub restart
+{
+ my ($self) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $logfile = $self->logfile;
+ my $name = $self->name;
+
+ print "### Restarting node \"$name\"\n";
+
+ {
+ local %ENV = %ENV;
+ delete $ENV{PGAPPNAME};
+
+ TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
+ 'restart');
+ }
+
+ $self->_update_pid(1);
+ return;
+}
+
+=pod
+
+=item $node->promote()
+
+Wrapper for pg_ctl promote
+
+=cut
+
+sub promote
+{
+ my ($self) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $logfile = $self->logfile;
+ my $name = $self->name;
+ print "### Promoting node \"$name\"\n";
+ TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
+ 'promote');
+ return;
+}
+
+=pod
+
+=item $node->logrotate()
+
+Wrapper for pg_ctl logrotate
+
+=cut
+
+sub logrotate
+{
+ my ($self) = @_;
+ my $port = $self->port;
+ my $pgdata = $self->data_dir;
+ my $logfile = $self->logfile;
+ my $name = $self->name;
+ print "### Rotating log in node \"$name\"\n";
+ TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
+ 'logrotate');
+ return;
+}
+
+# Internal routine to enable streaming replication on a standby node.
+sub enable_streaming
+{
+ my ($self, $root_node) = @_;
+ my $root_connstr = $root_node->connstr;
+ my $name = $self->name;
+
+ print "### Enabling streaming replication for node \"$name\"\n";
+ $self->append_conf(
+ 'postgresql.conf', qq(
+primary_conninfo='$root_connstr'
+));
+ $self->set_standby_mode();
+ return;
+}
+
+# Internal routine to enable archive recovery command on a standby node
+sub enable_restoring
+{
+ my ($self, $root_node, $standby) = @_;
+ my $path = TestLib::perl2host($root_node->archive_dir);
+ my $name = $self->name;
+
+ print "### Enabling WAL restore for node \"$name\"\n";
+
+ # On Windows, the path specified in the restore command needs to use
+ # double back-slashes to work properly and to be able to detect properly
+ # the file targeted by the copy command, so the directory value used
+ # in this routine, using only one back-slash, need to be properly changed
+ # first. Paths also need to be double-quoted to prevent failures where
+ # the path contains spaces.
+ $path =~ s{\\}{\\\\}g if ($TestLib::windows_os);
+ my $copy_command =
+ $TestLib::windows_os
+ ? qq{copy "$path\\\\%f" "%p"}
+ : qq{cp "$path/%f" "%p"};
+
+ $self->append_conf(
+ 'postgresql.conf', qq(
+restore_command = '$copy_command'
+));
+ if ($standby)
+ {
+ $self->set_standby_mode();
+ }
+ else
+ {
+ $self->set_recovery_mode();
+ }
+ return;
+}
+
+=pod
+
+=item $node->set_recovery_mode()
+
+Place recovery.signal file.
+
+=cut
+
+sub set_recovery_mode
+{
+ my ($self) = @_;
+
+ $self->append_conf('recovery.signal', '');
+ return;
+}
+
+=pod
+
+=item $node->set_standby_mode()
+
+Place standby.signal file.
+
+=cut
+
+sub set_standby_mode
+{
+ my ($self) = @_;
+
+ $self->append_conf('standby.signal', '');
+ return;
+}
+
+# Internal routine to enable archiving
+sub enable_archiving
+{
+ my ($self) = @_;
+ my $path = TestLib::perl2host($self->archive_dir);
+ my $name = $self->name;
+
+ print "### Enabling WAL archiving for node \"$name\"\n";
+
+ # On Windows, the path specified in the restore command needs to use
+ # double back-slashes to work properly and to be able to detect properly
+ # the file targeted by the copy command, so the directory value used
+ # in this routine, using only one back-slash, need to be properly changed
+ # first. Paths also need to be double-quoted to prevent failures where
+ # the path contains spaces.
+ $path =~ s{\\}{\\\\}g if ($TestLib::windows_os);
+ my $copy_command =
+ $TestLib::windows_os
+ ? qq{copy "%p" "$path\\\\%f"}
+ : qq{cp "%p" "$path/%f"};
+
+ # Enable archive_mode and archive_command on node
+ $self->append_conf(
+ 'postgresql.conf', qq(
+archive_mode = on
+archive_command = '$copy_command'
+));
+ return;
+}
+
+# Internal method
+sub _update_pid
+{
+ my ($self, $is_running) = @_;
+ my $name = $self->name;
+
+ # If we can open the PID file, read its first line and that's the PID we
+ # want.
+ if (open my $pidfile, '<', $self->data_dir . "/postmaster.pid")
+ {
+ chomp($self->{_pid} = <$pidfile>);
+ print "# Postmaster PID for node \"$name\" is $self->{_pid}\n";
+ close $pidfile;
+
+ # If we found a pidfile when there shouldn't be one, complain.
+ BAIL_OUT("postmaster.pid unexpectedly present") unless $is_running;
+ return;
+ }
+
+ $self->{_pid} = undef;
+ print "# No postmaster PID for node \"$name\"\n";
+
+ # Complain if we expected to find a pidfile.
+ BAIL_OUT("postmaster.pid unexpectedly not present") if $is_running;
+ return;
+}
+
+=pod
+
+=item PostgresNode->get_new_node(node_name, %params)
+
+Build a new object of class C<PostgresNode> (or of a subclass, if you have
+one), assigning a free port number. Remembers the node, to prevent its port
+number from being reused for another node, and to ensure that it gets
+shut down when the test script exits.
+
+You should generally use this instead of C<PostgresNode::new(...)>.
+
+=over
+
+=item port => [1,65535]
+
+By default, this function assigns a port number to each node. Specify this to
+force a particular port number. The caller is responsible for evaluating
+potential conflicts and privilege requirements.
+
+=item own_host => 1
+
+By default, all nodes use the same PGHOST value. If specified, generate a
+PGHOST specific to this node. This allows multiple nodes to use the same
+port.
+
+=back
+
+For backwards compatibility, it is also exported as a standalone function,
+which can only create objects of class C<PostgresNode>.
+
+=cut
+
+sub get_new_node
+{
+ my $class = 'PostgresNode';
+ $class = shift if scalar(@_) % 2 != 1;
+ my ($name, %params) = @_;
+
+ # Select a port.
+ my $port;
+ if (defined $params{port})
+ {
+ $port = $params{port};
+ }
+ else
+ {
+ # When selecting a port, we look for an unassigned TCP port number,
+ # even if we intend to use only Unix-domain sockets. This is clearly
+ # necessary on $use_tcp (Windows) configurations, and it seems like a
+ # good idea on Unixen as well.
+ $port = get_free_port();
+ }
+
+ # Select a host.
+ my $host = $test_pghost;
+ if ($params{own_host})
+ {
+ if ($use_tcp)
+ {
+ $last_host_assigned++;
+ $last_host_assigned > 254 and BAIL_OUT("too many own_host nodes");
+ $host = '127.0.0.' . $last_host_assigned;
+ }
+ else
+ {
+ $host = "$test_pghost/$name"; # Assume $name =~ /^[-_a-zA-Z0-9]+$/
+ mkdir $host;
+ }
+ }
+
+ # Lock port number found by creating a new node
+ my $node = $class->new($name, $host, $port);
+
+ # Add node to list of nodes
+ push(@all_nodes, $node);
+
+ return $node;
+}
+
+=pod
+
+=item get_free_port()
+
+Locate an unprivileged (high) TCP port that's not currently bound to
+anything. This is used by get_new_node, and is also exported for use
+by test cases that need to start other, non-Postgres servers.
+
+Ports assigned to existing PostgresNode objects are automatically
+excluded, even if those servers are not currently running.
+
+XXX A port available now may become unavailable by the time we start
+the desired service.
+
+=cut
+
+sub get_free_port
+{
+ my $found = 0;
+ my $port = $last_port_assigned;
+
+ while ($found == 0)
+ {
+
+ # advance $port, wrapping correctly around range end
+ $port = 49152 if ++$port >= 65536;
+ print "# Checking port $port\n";
+
+ # Check first that candidate port number is not included in
+ # the list of already-registered nodes.
+ $found = 1;
+ foreach my $node (@all_nodes)
+ {
+ $found = 0 if ($node->port == $port);
+ }
+
+ # Check to see if anything else is listening on this TCP port.
+ # Seek a port available for all possible listen_addresses values,
+ # so callers can harness this port for the widest range of purposes.
+ # The 0.0.0.0 test achieves that for MSYS, which automatically sets
+ # SO_EXCLUSIVEADDRUSE. Testing 0.0.0.0 is insufficient for Windows
+ # native Perl (https://stackoverflow.com/a/14388707), so we also
+ # have to test individual addresses. Doing that for 127.0.0/24
+ # addresses other than 127.0.0.1 might fail with EADDRNOTAVAIL on
+ # non-Linux, non-Windows kernels.
+ #
+ # Thus, 0.0.0.0 and individual 127.0.0/24 addresses are tested
+ # only on Windows and only when TCP usage is requested.
+ if ($found == 1)
+ {
+ foreach my $addr (qw(127.0.0.1),
+ ($use_tcp && $TestLib::windows_os)
+ ? qw(127.0.0.2 127.0.0.3 0.0.0.0)
+ : ())
+ {
+ if (!can_bind($addr, $port))
+ {
+ $found = 0;
+ last;
+ }
+ }
+ }
+ }
+
+ print "# Found port $port\n";
+
+ # Update port for next time
+ $last_port_assigned = $port;
+
+ return $port;
+}
+
+# Internal routine to check whether a host:port is available to bind
+sub can_bind
+{
+ my ($host, $port) = @_;
+ my $iaddr = inet_aton($host);
+ my $paddr = sockaddr_in($port, $iaddr);
+ my $proto = getprotobyname("tcp");
+
+ socket(SOCK, PF_INET, SOCK_STREAM, $proto)
+ or die "socket failed: $!";
+
+ # As in postmaster, don't use SO_REUSEADDR on Windows
+ setsockopt(SOCK, SOL_SOCKET, SO_REUSEADDR, pack("l", 1))
+ unless $TestLib::windows_os;
+ my $ret = bind(SOCK, $paddr) && listen(SOCK, SOMAXCONN);
+ close(SOCK);
+ return $ret;
+}
+
+# Automatically shut down any still-running nodes when the test script exits.
+# Note that this just stops the postmasters (in the same order the nodes were
+# created in). Any temporary directories are deleted, in an unspecified
+# order, later when the File::Temp objects are destroyed.
+END
+{
+
+ # take care not to change the script's exit value
+ my $exit_code = $?;
+
+ foreach my $node (@all_nodes)
+ {
+ $node->teardown_node;
+
+ # skip clean if we are requested to retain the basedir
+ next if defined $ENV{'PG_TEST_NOCLEAN'};
+
+ # clean basedir on clean test invocation
+ $node->clean_node if $exit_code == 0 && TestLib::all_tests_passing();
+ }
+
+ $? = $exit_code;
+}
+
+=pod
+
+=item $node->teardown_node()
+
+Do an immediate stop of the node
+
+=cut
+
+sub teardown_node
+{
+ my $self = shift;
+
+ $self->stop('immediate');
+ return;
+}
+
+=pod
+
+=item $node->clean_node()
+
+Remove the base directory of the node if the node has been stopped.
+
+=cut
+
+sub clean_node
+{
+ my $self = shift;
+
+ rmtree $self->{_basedir} unless defined $self->{_pid};
+ return;
+}
+
+=pod
+
+=item $node->safe_psql($dbname, $sql) => stdout
+
+Invoke B<psql> to run B<sql> on B<dbname> and return its stdout on success.
+Die if the SQL produces an error. Runs with B<ON_ERROR_STOP> set.
+
+Takes optional extra params like timeout and timed_out parameters with the same
+options as psql.
+
+=cut
+
+sub safe_psql
+{
+ my ($self, $dbname, $sql, %params) = @_;
+
+ my ($stdout, $stderr);
+
+ my $ret = $self->psql(
+ $dbname, $sql,
+ %params,
+ stdout => \$stdout,
+ stderr => \$stderr,
+ on_error_die => 1,
+ on_error_stop => 1);
+
+ # psql can emit stderr from NOTICEs etc
+ if ($stderr ne "")
+ {
+ print "#### Begin standard error\n";
+ print $stderr;
+ print "\n#### End standard error\n";
+ }
+
+ return $stdout;
+}
+
+=pod
+
+=item $node->psql($dbname, $sql, %params) => psql_retval
+
+Invoke B<psql> to execute B<$sql> on B<$dbname> and return the return value
+from B<psql>, which is run with on_error_stop by default so that it will
+stop running sql and return 3 if the passed SQL results in an error.
+
+As a convenience, if B<psql> is called in array context it returns an
+array containing ($retval, $stdout, $stderr).
+
+psql is invoked in tuples-only unaligned mode with reading of B<.psqlrc>
+disabled. That may be overridden by passing extra psql parameters.
+
+stdout and stderr are transformed to UNIX line endings if on Windows. Any
+trailing newline is removed.
+
+Dies on failure to invoke psql but not if psql exits with a nonzero
+return code (unless on_error_die specified).
+
+If psql exits because of a signal, an exception is raised.
+
+=over
+
+=item stdout => \$stdout
+
+B<stdout>, if given, must be a scalar reference to which standard output is
+written. If not given, standard output is not redirected and will be printed
+unless B<psql> is called in array context, in which case it's captured and
+returned.
+
+=item stderr => \$stderr
+
+Same as B<stdout> but gets standard error. If the same scalar is passed for
+both B<stdout> and B<stderr> the results may be interleaved unpredictably.
+
+=item on_error_stop => 1
+
+By default, the B<psql> method invokes the B<psql> program with ON_ERROR_STOP=1
+set, so SQL execution is stopped at the first error and exit code 2 is
+returned. Set B<on_error_stop> to 0 to ignore errors instead.
+
+=item on_error_die => 0
+
+By default, this method returns psql's result code. Pass on_error_die to
+instead die with an informative message.
+
+=item timeout => 'interval'
+
+Set a timeout for the psql call as an interval accepted by B<IPC::Run::timer>
+(integer seconds is fine). This method raises an exception on timeout, unless
+the B<timed_out> parameter is also given.
+
+=item timed_out => \$timed_out
+
+If B<timeout> is set and this parameter is given, the scalar it references
+is set to true if the psql call times out.
+
+=item replication => B<value>
+
+If set, add B<replication=value> to the conninfo string.
+Passing the literal value C<database> results in a logical replication
+connection.
+
+=item extra_params => ['--single-transaction']
+
+If given, it must be an array reference containing additional parameters to B<psql>.
+
+=back
+
+e.g.
+
+ my ($stdout, $stderr, $timed_out);
+ my $cmdret = $node->psql('postgres', 'SELECT pg_sleep(600)',
+ stdout => \$stdout, stderr => \$stderr,
+ timeout => 180, timed_out => \$timed_out,
+ extra_params => ['--single-transaction'])
+
+will set $cmdret to undef and $timed_out to a true value.
+
+ $node->psql('postgres', $sql, on_error_die => 1);
+
+dies with an informative message if $sql fails.
+
+=cut
+
+sub psql
+{
+ my ($self, $dbname, $sql, %params) = @_;
+
+ my $stdout = $params{stdout};
+ my $stderr = $params{stderr};
+ my $replication = $params{replication};
+ my $timeout = undef;
+ my $timeout_exception = 'psql timed out';
+ my @psql_params = (
+ 'psql',
+ '-XAtq',
+ '-d',
+ $self->connstr($dbname)
+ . (defined $replication ? " replication=$replication" : ""),
+ '-f',
+ '-');
+
+ # If the caller wants an array and hasn't passed stdout/stderr
+ # references, allocate temporary ones to capture them so we
+ # can return them. Otherwise we won't redirect them at all.
+ if (wantarray)
+ {
+ if (!defined($stdout))
+ {
+ my $temp_stdout = "";
+ $stdout = \$temp_stdout;
+ }
+ if (!defined($stderr))
+ {
+ my $temp_stderr = "";
+ $stderr = \$temp_stderr;
+ }
+ }
+
+ $params{on_error_stop} = 1 unless defined $params{on_error_stop};
+ $params{on_error_die} = 0 unless defined $params{on_error_die};
+
+ push @psql_params, '-v', 'ON_ERROR_STOP=1' if $params{on_error_stop};
+ push @psql_params, @{ $params{extra_params} }
+ if defined $params{extra_params};
+
+ $timeout =
+ IPC::Run::timeout($params{timeout}, exception => $timeout_exception)
+ if (defined($params{timeout}));
+
+ ${ $params{timed_out} } = 0 if defined $params{timed_out};
+
+ # IPC::Run would otherwise append to existing contents:
+ $$stdout = "" if ref($stdout);
+ $$stderr = "" if ref($stderr);
+
+ my $ret;
+
+ # Run psql and capture any possible exceptions. If the exception is
+ # because of a timeout and the caller requested to handle that, just return
+ # and set the flag. Otherwise, and for any other exception, rethrow.
+ #
+ # For background, see
+ # https://metacpan.org/pod/release/ETHER/Try-Tiny-0.24/lib/Try/Tiny.pm
+ do
+ {
+ local $@;
+ eval {
+ my @ipcrun_opts = (\@psql_params, '<', \$sql);
+ push @ipcrun_opts, '>', $stdout if defined $stdout;
+ push @ipcrun_opts, '2>', $stderr if defined $stderr;
+ push @ipcrun_opts, $timeout if defined $timeout;
+
+ IPC::Run::run @ipcrun_opts;
+ $ret = $?;
+ };
+ my $exc_save = $@;
+ if ($exc_save)
+ {
+
+ # IPC::Run::run threw an exception. re-throw unless it's a
+ # timeout, which we'll handle by testing is_expired
+ die $exc_save
+ if (blessed($exc_save)
+ || $exc_save !~ /^\Q$timeout_exception\E/);
+
+ $ret = undef;
+
+ die "Got timeout exception '$exc_save' but timer not expired?!"
+ unless $timeout->is_expired;
+
+ if (defined($params{timed_out}))
+ {
+ ${ $params{timed_out} } = 1;
+ }
+ else
+ {
+ die "psql timed out: stderr: '$$stderr'\n"
+ . "while running '@psql_params'";
+ }
+ }
+ };
+
+ # Note: on Windows, IPC::Run seems to convert \r\n to \n in program output
+ # if we're using native Perl, but not if we're using MSys Perl. So do it
+ # by hand in the latter case, here and elsewhere.
+
+ if (defined $$stdout)
+ {
+ $$stdout =~ s/\r\n/\n/g if $Config{osname} eq 'msys';
+ chomp $$stdout;
+ }
+
+ if (defined $$stderr)
+ {
+ $$stderr =~ s/\r\n/\n/g if $Config{osname} eq 'msys';
+ chomp $$stderr;
+ }
+
+ # See http://perldoc.perl.org/perlvar.html#%24CHILD_ERROR
+ # We don't use IPC::Run::Simple to limit dependencies.
+ #
+ # We always die on signal.
+ my $core = $ret & 128 ? " (core dumped)" : "";
+ die "psql exited with signal "
+ . ($ret & 127)
+ . "$core: '$$stderr' while running '@psql_params'"
+ if $ret & 127;
+ $ret = $ret >> 8;
+
+ if ($ret && $params{on_error_die})
+ {
+ die "psql error: stderr: '$$stderr'\nwhile running '@psql_params'"
+ if $ret == 1;
+ die "connection error: '$$stderr'\nwhile running '@psql_params'"
+ if $ret == 2;
+ die
+ "error running SQL: '$$stderr'\nwhile running '@psql_params' with sql '$sql'"
+ if $ret == 3;
+ die "psql returns $ret: '$$stderr'\nwhile running '@psql_params'";
+ }
+
+ if (wantarray)
+ {
+ return ($ret, $$stdout, $$stderr);
+ }
+ else
+ {
+ return $ret;
+ }
+}
+
+=pod
+
+=item $node->interactive_psql($dbname, \$stdin, \$stdout, $timer, %params) => harness
+
+Invoke B<psql> on B<$dbname> and return an IPC::Run harness object,
+which the caller may use to send interactive input to B<psql>.
+The process's stdin is sourced from the $stdin scalar reference,
+and its stdout and stderr go to the $stdout scalar reference.
+ptys are used so that psql thinks it's being called interactively.
+
+The specified timer object is attached to the harness, as well.
+It's caller's responsibility to select the timeout length, and to
+restart the timer after each command if the timeout is per-command.
+
+psql is invoked in tuples-only unaligned mode with reading of B<.psqlrc>
+disabled. That may be overridden by passing extra psql parameters.
+
+Dies on failure to invoke psql, or if psql fails to connect.
+Errors occurring later are the caller's problem.
+
+Be sure to "finish" the harness when done with it.
+
+The only extra parameter currently accepted is
+
+=over
+
+=item extra_params => ['--single-transaction']
+
+If given, it must be an array reference containing additional parameters to B<psql>.
+
+=back
+
+This requires IO::Pty in addition to IPC::Run.
+
+=cut
+
+sub interactive_psql
+{
+ my ($self, $dbname, $stdin, $stdout, $timer, %params) = @_;
+
+ my @psql_params = ('psql', '-XAt', '-d', $self->connstr($dbname));
+
+ push @psql_params, @{ $params{extra_params} }
+ if defined $params{extra_params};
+
+ # Ensure there is no data waiting to be sent:
+ $$stdin = "" if ref($stdin);
+ # IPC::Run would otherwise append to existing contents:
+ $$stdout = "" if ref($stdout);
+
+ my $harness = IPC::Run::start \@psql_params,
+ '<pty<', $stdin, '>pty>', $stdout, $timer;
+
+ # Pump until we see psql's help banner. This ensures that callers
+ # won't write anything to the pty before it's ready, avoiding an
+ # implementation issue in IPC::Run. Also, it means that psql
+ # connection failures are caught here, relieving callers of
+ # the need to handle those. (Right now, we have no particularly
+ # good handling for errors anyway, but that might be added later.)
+ pump $harness
+ until $$stdout =~ /Type "help" for help/ || $timer->is_expired;
+
+ die "psql startup timed out" if $timer->is_expired;
+
+ return $harness;
+}
+
+=pod
+
+=item $node->poll_query_until($dbname, $query [, $expected ])
+
+Run B<$query> repeatedly, until it returns the B<$expected> result
+('t', or SQL boolean true, by default).
+Continues polling if B<psql> returns an error result.
+Times out after 180 seconds.
+Returns 1 if successful, 0 if timed out.
+
+=cut
+
+sub poll_query_until
+{
+ my ($self, $dbname, $query, $expected) = @_;
+
+ $expected = 't' unless defined($expected); # default value
+
+ my $cmd = [ 'psql', '-XAt', '-d', $self->connstr($dbname) ];
+ my ($stdout, $stderr);
+ my $max_attempts = 180 * 10;
+ my $attempts = 0;
+
+ while ($attempts < $max_attempts)
+ {
+ my $result = IPC::Run::run $cmd, '<', \$query,
+ '>', \$stdout, '2>', \$stderr;
+
+ $stdout =~ s/\r\n/\n/g if $Config{osname} eq 'msys';
+ chomp($stdout);
+ $stderr =~ s/\r\n/\n/g if $Config{osname} eq 'msys';
+ chomp($stderr);
+
+ if ($stdout eq $expected && $stderr eq '')
+ {
+ return 1;
+ }
+
+ # Wait 0.1 second before retrying.
+ usleep(100_000);
+
+ $attempts++;
+ }
+
+ # The query result didn't change in 180 seconds. Give up. Print the
+ # output from the last attempt, hopefully that's useful for debugging.
+ diag qq(poll_query_until timed out executing this query:
+$query
+expecting this output:
+$expected
+last actual query output:
+$stdout
+with stderr:
+$stderr);
+ return 0;
+}
+
+=pod
+
+=item $node->command_ok(...)
+
+Runs a shell command like TestLib::command_ok, but with PGHOST and PGPORT set
+so that the command will default to connecting to this PostgresNode.
+
+=cut
+
+sub command_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my $self = shift;
+
+ local $ENV{PGHOST} = $self->host;
+ local $ENV{PGPORT} = $self->port;
+
+ TestLib::command_ok(@_);
+ return;
+}
+
+=pod
+
+=item $node->command_fails(...)
+
+TestLib::command_fails with our connection parameters. See command_ok(...)
+
+=cut
+
+sub command_fails
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my $self = shift;
+
+ local $ENV{PGHOST} = $self->host;
+ local $ENV{PGPORT} = $self->port;
+
+ TestLib::command_fails(@_);
+ return;
+}
+
+=pod
+
+=item $node->command_like(...)
+
+TestLib::command_like with our connection parameters. See command_ok(...)
+
+=cut
+
+sub command_like
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my $self = shift;
+
+ local $ENV{PGHOST} = $self->host;
+ local $ENV{PGPORT} = $self->port;
+
+ TestLib::command_like(@_);
+ return;
+}
+
+=pod
+
+=item $node->command_checks_all(...)
+
+TestLib::command_checks_all with our connection parameters. See
+command_ok(...)
+
+=cut
+
+sub command_checks_all
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my $self = shift;
+
+ local $ENV{PGHOST} = $self->host;
+ local $ENV{PGPORT} = $self->port;
+
+ TestLib::command_checks_all(@_);
+ return;
+}
+
+=pod
+
+=item $node->issues_sql_like(cmd, expected_sql, test_name)
+
+Run a command on the node, then verify that $expected_sql appears in the
+server log file.
+
+=cut
+
+sub issues_sql_like
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my ($self, $cmd, $expected_sql, $test_name) = @_;
+
+ local $ENV{PGHOST} = $self->host;
+ local $ENV{PGPORT} = $self->port;
+
+ my $log_location = -s $self->logfile;
+
+ my $result = TestLib::run_log($cmd);
+ ok($result, "@$cmd exit code 0");
+ my $log = TestLib::slurp_file($self->logfile, $log_location);
+ like($log, $expected_sql, "$test_name: SQL found in server log");
+ return;
+}
+
+=pod
+
+=item $node->run_log(...)
+
+Runs a shell command like TestLib::run_log, but with connection parameters set
+so that the command will default to connecting to this PostgresNode.
+
+=cut
+
+sub run_log
+{
+ my $self = shift;
+
+ local $ENV{PGHOST} = $self->host;
+ local $ENV{PGPORT} = $self->port;
+
+ TestLib::run_log(@_);
+ return;
+}
+
+=pod
+
+=item $node->lsn(mode)
+
+Look up WAL locations on the server:
+
+ * insert location (master only, error on replica)
+ * write location (master only, error on replica)
+ * flush location (master only, error on replica)
+ * receive location (always undef on master)
+ * replay location (always undef on master)
+
+mode must be specified.
+
+=cut
+
+sub lsn
+{
+ my ($self, $mode) = @_;
+ my %modes = (
+ 'insert' => 'pg_current_wal_insert_lsn()',
+ 'flush' => 'pg_current_wal_flush_lsn()',
+ 'write' => 'pg_current_wal_lsn()',
+ 'receive' => 'pg_last_wal_receive_lsn()',
+ 'replay' => 'pg_last_wal_replay_lsn()');
+
+ $mode = '<undef>' if !defined($mode);
+ croak "unknown mode for 'lsn': '$mode', valid modes are "
+ . join(', ', keys %modes)
+ if !defined($modes{$mode});
+
+ my $result = $self->safe_psql('postgres', "SELECT $modes{$mode}");
+ chomp($result);
+ if ($result eq '')
+ {
+ return;
+ }
+ else
+ {
+ return $result;
+ }
+}
+
+=pod
+
+=item $node->wait_for_catchup(standby_name, mode, target_lsn)
+
+Wait for the node with application_name standby_name (usually from node->name,
+also works for logical subscriptions)
+until its replication location in pg_stat_replication equals or passes the
+upstream's WAL insert point at the time this function is called. By default
+the replay_lsn is waited for, but 'mode' may be specified to wait for any of
+sent|write|flush|replay. The connection catching up must be in a streaming
+state.
+
+If there is no active replication connection from this peer, waits until
+poll_query_until timeout.
+
+Requires that the 'postgres' db exists and is accessible.
+
+target_lsn may be any arbitrary lsn, but is typically $master_node->lsn('insert').
+If omitted, pg_current_wal_lsn() is used.
+
+This is not a test. It die()s on failure.
+
+=cut
+
+sub wait_for_catchup
+{
+ my ($self, $standby_name, $mode, $target_lsn) = @_;
+ $mode = defined($mode) ? $mode : 'replay';
+ my %valid_modes =
+ ('sent' => 1, 'write' => 1, 'flush' => 1, 'replay' => 1);
+ croak "unknown mode $mode for 'wait_for_catchup', valid modes are "
+ . join(', ', keys(%valid_modes))
+ unless exists($valid_modes{$mode});
+
+ # Allow passing of a PostgresNode instance as shorthand
+ if (blessed($standby_name) && $standby_name->isa("PostgresNode"))
+ {
+ $standby_name = $standby_name->name;
+ }
+ my $lsn_expr;
+ if (defined($target_lsn))
+ {
+ $lsn_expr = "'$target_lsn'";
+ }
+ else
+ {
+ $lsn_expr = 'pg_current_wal_lsn()';
+ }
+ print "Waiting for replication conn "
+ . $standby_name . "'s "
+ . $mode
+ . "_lsn to pass "
+ . $lsn_expr . " on "
+ . $self->name . "\n";
+ my $query =
+ qq[SELECT $lsn_expr <= ${mode}_lsn AND state = 'streaming' FROM pg_catalog.pg_stat_replication WHERE application_name = '$standby_name';];
+ $self->poll_query_until('postgres', $query)
+ or croak "timed out waiting for catchup";
+ print "done\n";
+ return;
+}
+
+=pod
+
+=item $node->wait_for_slot_catchup(slot_name, mode, target_lsn)
+
+Wait for the named replication slot to equal or pass the supplied target_lsn.
+The location used is the restart_lsn unless mode is given, in which case it may
+be 'restart' or 'confirmed_flush'.
+
+Requires that the 'postgres' db exists and is accessible.
+
+This is not a test. It die()s on failure.
+
+If the slot is not active, will time out after poll_query_until's timeout.
+
+target_lsn may be any arbitrary lsn, but is typically $master_node->lsn('insert').
+
+Note that for logical slots, restart_lsn is held down by the oldest in-progress tx.
+
+=cut
+
+sub wait_for_slot_catchup
+{
+ my ($self, $slot_name, $mode, $target_lsn) = @_;
+ $mode = defined($mode) ? $mode : 'restart';
+ if (!($mode eq 'restart' || $mode eq 'confirmed_flush'))
+ {
+ croak "valid modes are restart, confirmed_flush";
+ }
+ croak 'target lsn must be specified' unless defined($target_lsn);
+ print "Waiting for replication slot "
+ . $slot_name . "'s "
+ . $mode
+ . "_lsn to pass "
+ . $target_lsn . " on "
+ . $self->name . "\n";
+ my $query =
+ qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name';];
+ $self->poll_query_until('postgres', $query)
+ or croak "timed out waiting for catchup";
+ print "done\n";
+ return;
+}
+
+=pod
+
+=item $node->query_hash($dbname, $query, @columns)
+
+Execute $query on $dbname, replacing any appearance of the string __COLUMNS__
+within the query with a comma-separated list of @columns.
+
+If __COLUMNS__ does not appear in the query, its result columns must EXACTLY
+match the order and number (but not necessarily alias) of supplied @columns.
+
+The query must return zero or one rows.
+
+Return a hash-ref representation of the results of the query, with any empty
+or null results as defined keys with an empty-string value. There is no way
+to differentiate between null and empty-string result fields.
+
+If the query returns zero rows, return a hash with all columns empty. There
+is no way to differentiate between zero rows returned and a row with only
+null columns.
+
+=cut
+
+sub query_hash
+{
+ my ($self, $dbname, $query, @columns) = @_;
+ croak 'calls in array context for multi-row results not supported yet'
+ if (wantarray);
+
+ # Replace __COLUMNS__ if found
+ substr($query, index($query, '__COLUMNS__'), length('__COLUMNS__')) =
+ join(', ', @columns)
+ if index($query, '__COLUMNS__') >= 0;
+ my $result = $self->safe_psql($dbname, $query);
+
+ # hash slice, see http://stackoverflow.com/a/16755894/398670 .
+ #
+ # Fills the hash with empty strings produced by x-operator element
+ # duplication if result is an empty row
+ #
+ my %val;
+ @val{@columns} =
+ $result ne '' ? split(qr/\|/, $result, -1) : ('',) x scalar(@columns);
+ return \%val;
+}
+
+=pod
+
+=item $node->slot(slot_name)
+
+Return hash-ref of replication slot data for the named slot, or a hash-ref with
+all values '' if not found. Does not differentiate between null and empty string
+for fields, no field is ever undef.
+
+The restart_lsn and confirmed_flush_lsn fields are returned verbatim, and also
+as a 2-list of [highword, lowword] integer. Since we rely on Perl 5.8.8 we can't
+"use bigint", it's from 5.20, and we can't assume we have Math::Bigint from CPAN
+either.
+
+=cut
+
+sub slot
+{
+ my ($self, $slot_name) = @_;
+ my @columns = (
+ 'plugin', 'slot_type', 'datoid', 'database',
+ 'active', 'active_pid', 'xmin', 'catalog_xmin',
+ 'restart_lsn');
+ return $self->query_hash(
+ 'postgres',
+ "SELECT __COLUMNS__ FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'",
+ @columns);
+}
+
+=pod
+
+=item $node->pg_recvlogical_upto(self, dbname, slot_name, endpos, timeout_secs, ...)
+
+Invoke pg_recvlogical to read from slot_name on dbname until LSN endpos, which
+corresponds to pg_recvlogical --endpos. Gives up after timeout (if nonzero).
+
+Disallows pg_recvlogical from internally retrying on error by passing --no-loop.
+
+Plugin options are passed as additional keyword arguments.
+
+If called in scalar context, returns stdout, and die()s on timeout or nonzero return.
+
+If called in array context, returns a tuple of (retval, stdout, stderr, timeout).
+timeout is the IPC::Run::Timeout object whose is_expired method can be tested
+to check for timeout. retval is undef on timeout.
+
+=cut
+
+sub pg_recvlogical_upto
+{
+ my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options)
+ = @_;
+ my ($stdout, $stderr);
+
+ my $timeout_exception = 'pg_recvlogical timed out';
+
+ croak 'slot name must be specified' unless defined($slot_name);
+ croak 'endpos must be specified' unless defined($endpos);
+
+ my @cmd = (
+ 'pg_recvlogical', '-S', $slot_name, '--dbname',
+ $self->connstr($dbname));
+ push @cmd, '--endpos', $endpos;
+ push @cmd, '-f', '-', '--no-loop', '--start';
+
+ while (my ($k, $v) = each %plugin_options)
+ {
+ croak "= is not permitted to appear in replication option name"
+ if ($k =~ qr/=/);
+ push @cmd, "-o", "$k=$v";
+ }
+
+ my $timeout;
+ $timeout =
+ IPC::Run::timeout($timeout_secs, exception => $timeout_exception)
+ if $timeout_secs;
+ my $ret = 0;
+
+ do
+ {
+ local $@;
+ eval {
+ IPC::Run::run(\@cmd, ">", \$stdout, "2>", \$stderr, $timeout);
+ $ret = $?;
+ };
+ my $exc_save = $@;
+ if ($exc_save)
+ {
+
+ # IPC::Run::run threw an exception. re-throw unless it's a
+ # timeout, which we'll handle by testing is_expired
+ die $exc_save
+ if (blessed($exc_save) || $exc_save !~ qr/$timeout_exception/);
+
+ $ret = undef;
+
+ die "Got timeout exception '$exc_save' but timer not expired?!"
+ unless $timeout->is_expired;
+
+ die
+ "$exc_save waiting for endpos $endpos with stdout '$stdout', stderr '$stderr'"
+ unless wantarray;
+ }
+ };
+
+ $stdout =~ s/\r\n/\n/g if $Config{osname} eq 'msys';
+ $stderr =~ s/\r\n/\n/g if $Config{osname} eq 'msys';
+
+ if (wantarray)
+ {
+ return ($ret, $stdout, $stderr, $timeout);
+ }
+ else
+ {
+ die
+ "pg_recvlogical exited with code '$ret', stdout '$stdout' and stderr '$stderr'"
+ if $ret;
+ return $stdout;
+ }
+}
+
+=pod
+
+=back
+
+=cut
+
+1;
diff --git a/src/test/perl/README b/src/test/perl/README
new file mode 100644
index 0000000..c61c3f5
--- /dev/null
+++ b/src/test/perl/README
@@ -0,0 +1,89 @@
+Perl-based TAP tests
+====================
+
+src/test/perl/ contains shared infrastructure that's used by Perl-based tests
+across the source tree, particularly tests in src/bin and src/test. It's used
+to drive tests for backup and restore, replication, etc - anything that can't
+really be expressed using pg_regress or the isolation test framework.
+
+The tests are invoked via perl's 'prove' command, wrapped in PostgreSQL
+makefiles to handle instance setup etc. See the $(prove_check) and
+$(prove_installcheck) targets in Makefile.global. By default every test in the
+t/ subdirectory is run. Individual test(s) can be run instead by passing
+something like PROVE_TESTS="t/001_testname.pl t/002_othertestname.pl" to make.
+
+You should prefer to write tests using pg_regress in src/test/regress, or
+isolation tester specs in src/test/isolation, if possible. If not, check to
+see if your new tests make sense under an existing tree in src/test, like
+src/test/ssl, or should be added to one of the suites for an existing utility.
+
+Note that all tests and test tools should have perltidy run on them before
+patches are submitted, using perltidy --profile=src/tools/pgindent/perltidyrc
+
+By default, to keep the noise low during runs, we do not set any flags via
+PROVE_FLAGS, but this can be done on the 'make' command line if desired, eg:
+
+make check-world PROVE_FLAGS='--verbose'
+
+Writing tests
+-------------
+
+Tests are written using Perl's Test::More with some PostgreSQL-specific
+infrastructure from src/test/perl providing node management, support for
+invoking 'psql' to run queries and get results, etc. You should read the
+documentation for Test::More before trying to write tests.
+
+Test scripts in the t/ subdirectory of a suite are executed in alphabetical
+order.
+
+Each test script should begin with:
+
+ use strict;
+ use warnings;
+ use PostgresNode;
+ use TestLib;
+ # Replace with the number of tests to execute:
+ use Test::More tests => 1;
+
+then it will generally need to set up one or more nodes, run commands
+against them and evaluate the results. For example:
+
+ my $node = PostgresNode->get_new_node('master');
+ $node->init;
+ $node->start;
+
+ my $ret = $node->safe_psql('postgres', 'SELECT 1');
+ is($ret, '1', 'SELECT 1 returns 1');
+
+ $node->stop('fast');
+
+Test::More::like entails use of the qr// operator. Avoid Perl 5.8.8 bug
+#39185 by not using the "$" regular expression metacharacter in qr// when also
+using the "/m" modifier. Instead of "$", use "\n" or "(?=\n|\z)".
+
+Read the Test::More documentation for more on how to write tests:
+
+ perldoc Test::More
+
+For available PostgreSQL-specific test methods and some example tests read the
+perldoc for the test modules, e.g.:
+
+ perldoc src/test/perl/PostgresNode.pm
+
+Required Perl
+-------------
+
+Tests must run on perl 5.8.0 and newer. perlbrew is a good way to obtain such
+a Perl; see http://perlbrew.pl .
+
+Just install and
+
+ perlbrew --force install 5.8.0
+ perlbrew use 5.8.0
+ perlbrew install-cpanm
+ cpanm install IPC::Run
+
+then re-run configure to ensure the correct Perl is used when running
+tests. To verify that the right Perl was found:
+
+ grep ^PERL= config.log
diff --git a/src/test/perl/RecursiveCopy.pm b/src/test/perl/RecursiveCopy.pm
new file mode 100644
index 0000000..baf5d0a
--- /dev/null
+++ b/src/test/perl/RecursiveCopy.pm
@@ -0,0 +1,155 @@
+
+=pod
+
+=head1 NAME
+
+RecursiveCopy - simple recursive copy implementation
+
+=head1 SYNOPSIS
+
+use RecursiveCopy;
+
+RecursiveCopy::copypath($from, $to, filterfn => sub { return 1; });
+RecursiveCopy::copypath($from, $to);
+
+=cut
+
+package RecursiveCopy;
+
+use strict;
+use warnings;
+
+use Carp;
+use File::Basename;
+use File::Copy;
+
+=pod
+
+=head1 DESCRIPTION
+
+=head2 copypath($from, $to, %params)
+
+Recursively copy all files and directories from $from to $to.
+Does not preserve file metadata (e.g., permissions).
+
+Only regular files and subdirectories are copied. Trying to copy other types
+of directory entries raises an exception.
+
+Raises an exception if a file would be overwritten, the source directory can't
+be read, or any I/O operation fails. However, we silently ignore ENOENT on
+open, because when copying from a live database it's possible for a file/dir
+to be deleted after we see its directory entry but before we can open it.
+
+Always returns true.
+
+If the B<filterfn> parameter is given, it must be a subroutine reference.
+This subroutine will be called for each entry in the source directory with its
+relative path as only parameter; if the subroutine returns true the entry is
+copied, otherwise the file is skipped.
+
+On failure the target directory may be in some incomplete state; no cleanup is
+attempted.
+
+=head1 EXAMPLES
+
+ RecursiveCopy::copypath('/some/path', '/empty/dir',
+ filterfn => sub {
+ # omit log/ and contents
+ my $src = shift;
+ return $src ne 'log';
+ }
+ );
+
+=cut
+
+sub copypath
+{
+ my ($base_src_dir, $base_dest_dir, %params) = @_;
+ my $filterfn;
+
+ if (defined $params{filterfn})
+ {
+ croak "if specified, filterfn must be a subroutine reference"
+ unless defined(ref $params{filterfn})
+ and (ref $params{filterfn} eq 'CODE');
+
+ $filterfn = $params{filterfn};
+ }
+ else
+ {
+ $filterfn = sub { return 1; };
+ }
+
+ # Complain if original path is bogus, because _copypath_recurse won't.
+ croak "\"$base_src_dir\" does not exist" if !-e $base_src_dir;
+
+ # Start recursive copy from current directory
+ return _copypath_recurse($base_src_dir, $base_dest_dir, "", $filterfn);
+}
+
+# Recursive private guts of copypath
+sub _copypath_recurse
+{
+ my ($base_src_dir, $base_dest_dir, $curr_path, $filterfn) = @_;
+ my $srcpath = "$base_src_dir/$curr_path";
+ my $destpath = "$base_dest_dir/$curr_path";
+
+ # invoke the filter and skip all further operation if it returns false
+ return 1 unless &$filterfn($curr_path);
+
+ # Check for symlink -- needed only on source dir
+ # (note: this will fall through quietly if file is already gone)
+ croak "Cannot operate on symlink \"$srcpath\"" if -l $srcpath;
+
+ # Abort if destination path already exists. Should we allow directories
+ # to exist already?
+ croak "Destination path \"$destpath\" already exists" if -e $destpath;
+
+ # If this source path is a file, simply copy it to destination with the
+ # same name and we're done.
+ if (-f $srcpath)
+ {
+ my $fh;
+ unless (open($fh, '<', $srcpath))
+ {
+ return 1 if ($!{ENOENT});
+ die "open($srcpath) failed: $!";
+ }
+ copy($fh, $destpath)
+ or die "copy $srcpath -> $destpath failed: $!";
+ close $fh;
+ return 1;
+ }
+
+ # If it's a directory, create it on dest and recurse into it.
+ if (-d $srcpath)
+ {
+ my $directory;
+ unless (opendir($directory, $srcpath))
+ {
+ return 1 if ($!{ENOENT});
+ die "opendir($srcpath) failed: $!";
+ }
+
+ mkdir($destpath) or die "mkdir($destpath) failed: $!";
+
+ while (my $entry = readdir($directory))
+ {
+ next if ($entry eq '.' or $entry eq '..');
+ _copypath_recurse($base_src_dir, $base_dest_dir,
+ $curr_path eq '' ? $entry : "$curr_path/$entry", $filterfn)
+ or die "copypath $srcpath/$entry -> $destpath/$entry failed";
+ }
+
+ closedir($directory);
+ return 1;
+ }
+
+ # If it disappeared from sight, that's OK.
+ return 1 if !-e $srcpath;
+
+ # Else it's some weird file type; complain.
+ croak "Source path \"$srcpath\" is not a regular file or directory";
+}
+
+1;
diff --git a/src/test/perl/SimpleTee.pm b/src/test/perl/SimpleTee.pm
new file mode 100644
index 0000000..74409bd
--- /dev/null
+++ b/src/test/perl/SimpleTee.pm
@@ -0,0 +1,32 @@
+# A simple 'tee' implementation, using perl tie.
+#
+# Whenever you print to the handle, it gets forwarded to a list of
+# handles. The list of output filehandles is passed to the constructor.
+#
+# This is similar to IO::Tee, but only used for output. Only the PRINT
+# method is currently implemented; that's all we need. We don't want to
+# depend on IO::Tee just for this.
+
+package SimpleTee;
+use strict;
+use warnings;
+
+sub TIEHANDLE
+{
+ my $self = shift;
+ return bless \@_, $self;
+}
+
+sub PRINT
+{
+ my $self = shift;
+ my $ok = 1;
+ for my $fh (@$self)
+ {
+ print $fh @_ or $ok = 0;
+ $fh->flush or $ok = 0;
+ }
+ return $ok;
+}
+
+1;
diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm
new file mode 100644
index 0000000..deb3930
--- /dev/null
+++ b/src/test/perl/TestLib.pm
@@ -0,0 +1,911 @@
+
+=pod
+
+=head1 NAME
+
+TestLib - helper module for writing PostgreSQL's C<prove> tests.
+
+=head1 SYNOPSIS
+
+ use TestLib;
+
+ # Test basic output of a command
+ program_help_ok('initdb');
+ program_version_ok('initdb');
+ program_options_handling_ok('initdb');
+
+ # Test option combinations
+ command_fails(['initdb', '--invalid-option'],
+ 'command fails with invalid option');
+ my $tempdir = TestLib::tempdir;
+ command_ok('initdb', '-D', $tempdir);
+
+ # Miscellanea
+ print "on Windows" if $TestLib::windows_os;
+ my $path = TestLib::perl2host($backup_dir);
+ ok(check_mode_recursive($stream_dir, 0700, 0600),
+ "check stream dir permissions");
+ TestLib::system_log('pg_ctl', 'kill', 'QUIT', $slow_pid);
+
+=head1 DESCRIPTION
+
+C<TestLib> contains a set of routines dedicated to environment setup for
+a PostgreSQL regression test run and includes some low-level routines
+aimed at controlling command execution, logging and test functions.
+
+=cut
+
+# This module should never depend on any other PostgreSQL regression test
+# modules.
+
+package TestLib;
+
+use strict;
+use warnings;
+
+use Config;
+use Cwd;
+use Exporter 'import';
+use Fcntl qw(:mode :seek);
+use File::Basename;
+use File::Find;
+use File::Spec;
+use File::stat qw(stat);
+use File::Temp ();
+use IPC::Run;
+use SimpleTee;
+
+# specify a recent enough version of Test::More to support the
+# done_testing() function
+use Test::More 0.87;
+
+our @EXPORT = qw(
+ generate_ascii_string
+ slurp_dir
+ slurp_file
+ append_to_file
+ check_mode_recursive
+ chmod_recursive
+ check_pg_config
+ system_or_bail
+ system_log
+ run_log
+ run_command
+
+ command_ok
+ command_fails
+ command_exit_is
+ program_help_ok
+ program_version_ok
+ program_options_handling_ok
+ command_like
+ command_like_safe
+ command_fails_like
+ command_checks_all
+
+ $windows_os
+ $use_unix_sockets
+);
+
+our ($windows_os, $use_unix_sockets, $tmp_check, $log_path, $test_logfile);
+
+BEGIN
+{
+
+ # Set to untranslated messages, to be able to compare program output
+ # with expected strings.
+ delete $ENV{LANGUAGE};
+ delete $ENV{LC_ALL};
+ $ENV{LC_MESSAGES} = 'C';
+
+ # This list should be kept in sync with pg_regress.c.
+ my @envkeys = qw (
+ PGCHANNELBINDING
+ PGCLIENTENCODING
+ PGCONNECT_TIMEOUT
+ PGDATA
+ PGDATABASE
+ PGGSSENCMODE
+ PGGSSLIB
+ PGHOSTADDR
+ PGKRBSRVNAME
+ PGPASSFILE
+ PGPASSWORD
+ PGREQUIREPEER
+ PGREQUIRESSL
+ PGSERVICE
+ PGSERVICEFILE
+ PGSSLCERT
+ PGSSLCRL
+ PGSSLKEY
+ PGSSLMAXPROTOCOLVERSION
+ PGSSLMINPROTOCOLVERSION
+ PGSSLMODE
+ PGSSLROOTCERT
+ PGTARGETSESSIONATTRS
+ PGUSER
+ PGPORT
+ PGHOST
+ PG_COLOR
+ );
+ delete @ENV{@envkeys};
+
+ $ENV{PGAPPNAME} = basename($0);
+
+ # Must be set early
+ $windows_os = $Config{osname} eq 'MSWin32' || $Config{osname} eq 'msys';
+ if ($windows_os)
+ {
+ require Win32API::File;
+ Win32API::File->import(qw(createFile OsFHandleOpen CloseHandle setFilePointer));
+ }
+
+ # Specifies whether to use Unix sockets for test setups. On
+ # Windows we don't use them by default since it's not universally
+ # supported, but it can be overridden if desired.
+ $use_unix_sockets =
+ (!$windows_os || defined $ENV{PG_TEST_USE_UNIX_SOCKETS});
+}
+
+=pod
+
+=head1 EXPORTED VARIABLES
+
+=over
+
+=item C<$windows_os>
+
+Set to true when running under Windows, except on Cygwin.
+
+=back
+
+=cut
+
+INIT
+{
+
+ # Return EPIPE instead of killing the process with SIGPIPE. An affected
+ # test may still fail, but it's more likely to report useful facts.
+ $SIG{PIPE} = 'IGNORE';
+
+ # Determine output directories, and create them. The base path is the
+ # TESTDIR environment variable, which is normally set by the invoking
+ # Makefile.
+ $tmp_check = $ENV{TESTDIR} ? "$ENV{TESTDIR}/tmp_check" : "tmp_check";
+ $log_path = "$tmp_check/log";
+
+ mkdir $tmp_check;
+ mkdir $log_path;
+
+ # Open the test log file, whose name depends on the test name.
+ $test_logfile = basename($0);
+ $test_logfile =~ s/\.[^.]+$//;
+ $test_logfile = "$log_path/regress_log_$test_logfile";
+ open my $testlog, '>', $test_logfile
+ or die "could not open STDOUT to logfile \"$test_logfile\": $!";
+
+ # Hijack STDOUT and STDERR to the log file
+ open(my $orig_stdout, '>&', \*STDOUT);
+ open(my $orig_stderr, '>&', \*STDERR);
+ open(STDOUT, '>&', $testlog);
+ open(STDERR, '>&', $testlog);
+
+ # The test output (ok ...) needs to be printed to the original STDOUT so
+ # that the 'prove' program can parse it, and display it to the user in
+ # real time. But also copy it to the log file, to provide more context
+ # in the log.
+ my $builder = Test::More->builder;
+ my $fh = $builder->output;
+ tie *$fh, "SimpleTee", $orig_stdout, $testlog;
+ $fh = $builder->failure_output;
+ tie *$fh, "SimpleTee", $orig_stderr, $testlog;
+
+ # Enable auto-flushing for all the file handles. Stderr and stdout are
+ # redirected to the same file, and buffering causes the lines to appear
+ # in the log in confusing order.
+ autoflush STDOUT 1;
+ autoflush STDERR 1;
+ autoflush $testlog 1;
+}
+
+END
+{
+
+ # Test files have several ways of causing prove_check to fail:
+ # 1. Exit with a non-zero status.
+ # 2. Call ok(0) or similar, indicating that a constituent test failed.
+ # 3. Deviate from the planned number of tests.
+ #
+ # Preserve temporary directories after (1) and after (2).
+ $File::Temp::KEEP_ALL = 1 unless $? == 0 && all_tests_passing();
+}
+
+=pod
+
+=head1 ROUTINES
+
+=over
+
+=item all_tests_passing()
+
+Return 1 if all the tests run so far have passed. Otherwise, return 0.
+
+=cut
+
+sub all_tests_passing
+{
+ foreach my $status (Test::More->builder->summary)
+ {
+ return 0 unless $status;
+ }
+ return 1;
+}
+
+=pod
+
+=item tempdir(prefix)
+
+Securely create a temporary directory inside C<$tmp_check>, like C<mkdtemp>,
+and return its name. The directory will be removed automatically at the
+end of the tests.
+
+If C<prefix> is given, the new directory is templated as C<${prefix}_XXXX>.
+Otherwise the template is C<tmp_test_XXXX>.
+
+=cut
+
+sub tempdir
+{
+ my ($prefix) = @_;
+ $prefix = "tmp_test" unless defined $prefix;
+ return File::Temp::tempdir(
+ $prefix . '_XXXX',
+ DIR => $tmp_check,
+ CLEANUP => 1);
+}
+
+=pod
+
+=item tempdir_short()
+
+As above, but the directory is outside the build tree so that it has a short
+name, to avoid path length issues.
+
+=cut
+
+sub tempdir_short
+{
+
+ return File::Temp::tempdir(CLEANUP => 1);
+}
+
+=pod
+
+=item perl2host()
+
+Translate a Perl file name to a host file name. Currently, this is a no-op
+except for the case of Perl=msys and host=mingw32. The subject need not
+exist, but its parent directory must exist.
+
+=cut
+
+sub perl2host
+{
+ my ($subject) = @_;
+ return $subject unless $Config{osname} eq 'msys';
+ my $here = cwd;
+ my $leaf;
+ if (chdir $subject)
+ {
+ $leaf = '';
+ }
+ else
+ {
+ $leaf = '/' . basename $subject;
+ my $parent = dirname $subject;
+ chdir $parent or die "could not chdir \"$parent\": $!";
+ }
+
+ # this odd way of calling 'pwd -W' is the only way that seems to work.
+ my $dir = qx{sh -c "pwd -W"};
+ chomp $dir;
+ chdir $here;
+ return $dir . $leaf;
+}
+
+=pod
+
+=item system_log(@cmd)
+
+Run (via C<system()>) the command passed as argument; the return
+value is passed through.
+
+=cut
+
+sub system_log
+{
+ print("# Running: " . join(" ", @_) . "\n");
+ return system(@_);
+}
+
+=pod
+
+=item system_or_bail(@cmd)
+
+Run (via C<system()>) the command passed as argument, and returns
+if the command is successful.
+On failure, abandon further tests and exit the program.
+
+=cut
+
+sub system_or_bail
+{
+ if (system_log(@_) != 0)
+ {
+ BAIL_OUT("system $_[0] failed");
+ }
+ return;
+}
+
+=pod
+
+=item run_log(@cmd)
+
+Run the given command via C<IPC::Run::run()>, noting it in the log.
+The return value from the command is passed through.
+
+=cut
+
+sub run_log
+{
+ print("# Running: " . join(" ", @{ $_[0] }) . "\n");
+ return IPC::Run::run(@_);
+}
+
+=pod
+
+=item run_command(cmd)
+
+Run (via C<IPC::Run::run()>) the command passed as argument.
+The return value from the command is ignored.
+The return value is C<($stdout, $stderr)>.
+
+=cut
+
+sub run_command
+{
+ my ($cmd) = @_;
+ my ($stdout, $stderr);
+ my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr;
+ chomp($stdout);
+ chomp($stderr);
+ return ($stdout, $stderr);
+}
+
+=pod
+
+=item generate_ascii_string(from_char, to_char)
+
+Generate a string made of the given range of ASCII characters.
+
+=cut
+
+sub generate_ascii_string
+{
+ my ($from_char, $to_char) = @_;
+ my $res;
+
+ for my $i ($from_char .. $to_char)
+ {
+ $res .= sprintf("%c", $i);
+ }
+ return $res;
+}
+
+=pod
+
+=item slurp_dir(dir)
+
+Return the complete list of entries in the specified directory.
+
+=cut
+
+sub slurp_dir
+{
+ my ($dir) = @_;
+ opendir(my $dh, $dir)
+ or die "could not opendir \"$dir\": $!";
+ my @direntries = readdir $dh;
+ closedir $dh;
+ return @direntries;
+}
+
+=pod
+
+=item slurp_file(filename [, $offset])
+
+Return the full contents of the specified file, beginning from an
+offset position if specified.
+
+=cut
+
+sub slurp_file
+{
+ my ($filename, $offset) = @_;
+ local $/;
+ my $contents;
+ if ($Config{osname} ne 'MSWin32')
+ {
+ open(my $in, '<', $filename)
+ or die "could not read \"$filename\": $!";
+ if (defined($offset))
+ {
+ seek($in, $offset, SEEK_SET)
+ or die "could not seek \"$filename\": $!";
+ }
+ $contents = <$in>;
+ close $in;
+ }
+ else
+ {
+ my $fHandle = createFile($filename, "r", "rwd")
+ or die "could not open \"$filename\": $^E";
+ OsFHandleOpen(my $fh = IO::Handle->new(), $fHandle, 'r')
+ or die "could not read \"$filename\": $^E\n";
+ if (defined($offset))
+ {
+ setFilePointer($fh, $offset, qw(FILE_BEGIN))
+ or die "could not seek \"$filename\": $^E\n";
+ }
+ $contents = <$fh>;
+ CloseHandle($fHandle)
+ or die "could not close \"$filename\": $^E\n";
+ }
+ $contents =~ s/\r\n/\n/g if $Config{osname} eq 'msys';
+ return $contents;
+}
+
+=pod
+
+=item append_to_file(filename, str)
+
+Append a string at the end of a given file. (Note: no newline is appended at
+end of file.)
+
+=cut
+
+sub append_to_file
+{
+ my ($filename, $str) = @_;
+ open my $fh, ">>", $filename
+ or die "could not write \"$filename\": $!";
+ print $fh $str;
+ close $fh;
+ return;
+}
+
+=pod
+
+=item check_mode_recursive(dir, expected_dir_mode, expected_file_mode, ignore_list)
+
+Check that all file/dir modes in a directory match the expected values,
+ignoring files in C<ignore_list> (basename only).
+
+=cut
+
+sub check_mode_recursive
+{
+ my ($dir, $expected_dir_mode, $expected_file_mode, $ignore_list) = @_;
+
+ # Result defaults to true
+ my $result = 1;
+
+ find(
+ {
+ follow_fast => 1,
+ wanted => sub {
+ # Is file in the ignore list?
+ foreach my $ignore ($ignore_list ? @{$ignore_list} : [])
+ {
+ if ("$dir/$ignore" eq $File::Find::name)
+ {
+ return;
+ }
+ }
+
+ # Allow ENOENT. A running server can delete files, such as
+ # those in pg_stat. Other stat() failures are fatal.
+ my $file_stat = stat($File::Find::name);
+ unless (defined($file_stat))
+ {
+ my $is_ENOENT = $!{ENOENT};
+ my $msg = "unable to stat $File::Find::name: $!";
+ if ($is_ENOENT)
+ {
+ warn $msg;
+ return;
+ }
+ else
+ {
+ die $msg;
+ }
+ }
+
+ my $file_mode = S_IMODE($file_stat->mode);
+
+ # Is this a file?
+ if (S_ISREG($file_stat->mode))
+ {
+ if ($file_mode != $expected_file_mode)
+ {
+ print(
+ *STDERR,
+ sprintf("$File::Find::name mode must be %04o\n",
+ $expected_file_mode));
+
+ $result = 0;
+ return;
+ }
+ }
+
+ # Else a directory?
+ elsif (S_ISDIR($file_stat->mode))
+ {
+ if ($file_mode != $expected_dir_mode)
+ {
+ print(
+ *STDERR,
+ sprintf("$File::Find::name mode must be %04o\n",
+ $expected_dir_mode));
+
+ $result = 0;
+ return;
+ }
+ }
+
+ # Else something we can't handle
+ else
+ {
+ die "unknown file type for $File::Find::name";
+ }
+ }
+ },
+ $dir);
+
+ return $result;
+}
+
+=pod
+
+=item chmod_recursive(dir, dir_mode, file_mode)
+
+C<chmod> recursively each file and directory within the given directory.
+
+=cut
+
+sub chmod_recursive
+{
+ my ($dir, $dir_mode, $file_mode) = @_;
+
+ find(
+ {
+ follow_fast => 1,
+ wanted => sub {
+ my $file_stat = stat($File::Find::name);
+
+ if (defined($file_stat))
+ {
+ chmod(
+ S_ISDIR($file_stat->mode) ? $dir_mode : $file_mode,
+ $File::Find::name
+ ) or die "unable to chmod $File::Find::name";
+ }
+ }
+ },
+ $dir);
+ return;
+}
+
+=pod
+
+=item check_pg_config(regexp)
+
+Return the number of matches of the given regular expression
+within the installation's C<pg_config.h>.
+
+=cut
+
+sub check_pg_config
+{
+ my ($regexp) = @_;
+ my ($stdout, $stderr);
+ my $result = IPC::Run::run [ 'pg_config', '--includedir' ], '>',
+ \$stdout, '2>', \$stderr
+ or die "could not execute pg_config";
+ chomp($stdout);
+ $stdout =~ s/\r$//;
+
+ open my $pg_config_h, '<', "$stdout/pg_config.h" or die "$!";
+ my $match = (grep { /^$regexp/ } <$pg_config_h>);
+ close $pg_config_h;
+ return $match;
+}
+
+=pod
+
+=back
+
+=head1 Test::More-LIKE METHODS
+
+=over
+
+=item command_ok(cmd, test_name)
+
+Check that the command runs (via C<run_log>) successfully.
+
+=cut
+
+sub command_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $test_name) = @_;
+ my $result = run_log($cmd);
+ ok($result, $test_name);
+ return;
+}
+
+=pod
+
+=item command_fails(cmd, test_name)
+
+Check that the command fails (when run via C<run_log>).
+
+=cut
+
+sub command_fails
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $test_name) = @_;
+ my $result = run_log($cmd);
+ ok(!$result, $test_name);
+ return;
+}
+
+=pod
+
+=item command_exit_is(cmd, expected, test_name)
+
+Check that the command exit code matches the expected exit code.
+
+=cut
+
+sub command_exit_is
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $expected, $test_name) = @_;
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ my $h = IPC::Run::start $cmd;
+ $h->finish();
+
+ # On Windows, the exit status of the process is returned directly as the
+ # process's exit code, while on Unix, it's returned in the high bits
+ # of the exit code (see WEXITSTATUS macro in the standard <sys/wait.h>
+ # header file). IPC::Run's result function always returns exit code >> 8,
+ # assuming the Unix convention, which will always return 0 on Windows as
+ # long as the process was not terminated by an exception. To work around
+ # that, use $h->full_results on Windows instead.
+ my $result =
+ ($Config{osname} eq "MSWin32")
+ ? ($h->full_results)[0]
+ : $h->result(0);
+ is($result, $expected, $test_name);
+ return;
+}
+
+=pod
+
+=item program_help_ok(cmd)
+
+Check that the command supports the C<--help> option.
+
+=cut
+
+sub program_help_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd) = @_;
+ my ($stdout, $stderr);
+ print("# Running: $cmd --help\n");
+ my $result = IPC::Run::run [ $cmd, '--help' ], '>', \$stdout, '2>',
+ \$stderr;
+ ok($result, "$cmd --help exit code 0");
+ isnt($stdout, '', "$cmd --help goes to stdout");
+ is($stderr, '', "$cmd --help nothing to stderr");
+ return;
+}
+
+=pod
+
+=item program_version_ok(cmd)
+
+Check that the command supports the C<--version> option.
+
+=cut
+
+sub program_version_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd) = @_;
+ my ($stdout, $stderr);
+ print("# Running: $cmd --version\n");
+ my $result = IPC::Run::run [ $cmd, '--version' ], '>', \$stdout, '2>',
+ \$stderr;
+ ok($result, "$cmd --version exit code 0");
+ isnt($stdout, '', "$cmd --version goes to stdout");
+ is($stderr, '', "$cmd --version nothing to stderr");
+ return;
+}
+
+=pod
+
+=item program_options_handling_ok(cmd)
+
+Check that a command with an invalid option returns a non-zero
+exit code and error message.
+
+=cut
+
+sub program_options_handling_ok
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd) = @_;
+ my ($stdout, $stderr);
+ print("# Running: $cmd --not-a-valid-option\n");
+ my $result = IPC::Run::run [ $cmd, '--not-a-valid-option' ], '>',
+ \$stdout,
+ '2>', \$stderr;
+ ok(!$result, "$cmd with invalid option nonzero exit code");
+ isnt($stderr, '', "$cmd with invalid option prints error message");
+ return;
+}
+
+=pod
+
+=item command_like(cmd, expected_stdout, test_name)
+
+Check that the command runs successfully and the output
+matches the given regular expression.
+
+=cut
+
+sub command_like
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $expected_stdout, $test_name) = @_;
+ my ($stdout, $stderr);
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr;
+ ok($result, "$test_name: exit code 0");
+ is($stderr, '', "$test_name: no stderr");
+ like($stdout, $expected_stdout, "$test_name: matches");
+ return;
+}
+
+=pod
+
+=item command_like_safe(cmd, expected_stdout, test_name)
+
+Check that the command runs successfully and the output
+matches the given regular expression. Doesn't assume that the
+output files are closed.
+
+=cut
+
+sub command_like_safe
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ # Doesn't rely on detecting end of file on the file descriptors,
+ # which can fail, causing the process to hang, notably on Msys
+ # when used with 'pg_ctl start'
+ my ($cmd, $expected_stdout, $test_name) = @_;
+ my ($stdout, $stderr);
+ my $stdoutfile = File::Temp->new();
+ my $stderrfile = File::Temp->new();
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ my $result = IPC::Run::run $cmd, '>', $stdoutfile, '2>', $stderrfile;
+ $stdout = slurp_file($stdoutfile);
+ $stderr = slurp_file($stderrfile);
+ ok($result, "$test_name: exit code 0");
+ is($stderr, '', "$test_name: no stderr");
+ like($stdout, $expected_stdout, "$test_name: matches");
+ return;
+}
+
+=pod
+
+=item command_fails_like(cmd, expected_stderr, test_name)
+
+Check that the command fails and the error message matches
+the given regular expression.
+
+=cut
+
+sub command_fails_like
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+ my ($cmd, $expected_stderr, $test_name) = @_;
+ my ($stdout, $stderr);
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr;
+ ok(!$result, "$test_name: exit code not 0");
+ like($stderr, $expected_stderr, "$test_name: matches");
+ return;
+}
+
+=pod
+
+=item command_checks_all(cmd, ret, out, err, test_name)
+
+Run a command and check its status and outputs.
+Arguments:
+
+=over
+
+=item C<cmd>: Array reference of command and arguments to run
+
+=item C<ret>: Expected exit code
+
+=item C<out>: Expected stdout from command
+
+=item C<err>: Expected stderr from command
+
+=item C<test_name>: test name
+
+=back
+
+=cut
+
+sub command_checks_all
+{
+ local $Test::Builder::Level = $Test::Builder::Level + 1;
+
+ my ($cmd, $expected_ret, $out, $err, $test_name) = @_;
+
+ # run command
+ my ($stdout, $stderr);
+ print("# Running: " . join(" ", @{$cmd}) . "\n");
+ IPC::Run::run($cmd, '>', \$stdout, '2>', \$stderr);
+
+ # See http://perldoc.perl.org/perlvar.html#%24CHILD_ERROR
+ my $ret = $?;
+ die "command exited with signal " . ($ret & 127)
+ if $ret & 127;
+ $ret = $ret >> 8;
+
+ # check status
+ ok($ret == $expected_ret,
+ "$test_name status (got $ret vs expected $expected_ret)");
+
+ # check stdout
+ for my $re (@$out)
+ {
+ like($stdout, $re, "$test_name stdout /$re/");
+ }
+
+ # check stderr
+ for my $re (@$err)
+ {
+ like($stderr, $re, "$test_name stderr /$re/");
+ }
+
+ return;
+}
+
+=pod
+
+=back
+
+=cut
+
+1;