summaryrefslogtreecommitdiffstats
path: root/src/backend/replication
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/replication')
-rw-r--r--src/backend/replication/.gitignore4
-rw-r--r--src/backend/replication/Makefile41
-rw-r--r--src/backend/replication/README101
-rw-r--r--src/backend/replication/backup_manifest.c379
-rw-r--r--src/backend/replication/basebackup.c1987
-rw-r--r--src/backend/replication/libpqwalreceiver/Makefile37
-rw-r--r--src/backend/replication/libpqwalreceiver/libpqwalreceiver.c1072
-rw-r--r--src/backend/replication/logical/Makefile31
-rw-r--r--src/backend/replication/logical/decode.c1052
-rw-r--r--src/backend/replication/logical/launcher.c1173
-rw-r--r--src/backend/replication/logical/logical.c1093
-rw-r--r--src/backend/replication/logical/logicalfuncs.c417
-rw-r--r--src/backend/replication/logical/message.c88
-rw-r--r--src/backend/replication/logical/origin.c1566
-rw-r--r--src/backend/replication/logical/proto.c687
-rw-r--r--src/backend/replication/logical/relation.c584
-rw-r--r--src/backend/replication/logical/reorderbuffer.c3852
-rw-r--r--src/backend/replication/logical/snapbuild.c2016
-rw-r--r--src/backend/replication/logical/tablesync.c993
-rw-r--r--src/backend/replication/logical/worker.c2175
-rw-r--r--src/backend/replication/pgoutput/Makefile32
-rw-r--r--src/backend/replication/pgoutput/pgoutput.c880
-rw-r--r--src/backend/replication/repl_gram.c1998
-rw-r--r--src/backend/replication/repl_gram.y437
-rw-r--r--src/backend/replication/repl_scanner.c2586
-rw-r--r--src/backend/replication/repl_scanner.l255
-rw-r--r--src/backend/replication/slot.c1850
-rw-r--r--src/backend/replication/slotfuncs.c948
-rw-r--r--src/backend/replication/syncrep.c1092
-rw-r--r--src/backend/replication/syncrep_gram.c1587
-rw-r--r--src/backend/replication/syncrep_gram.y116
-rw-r--r--src/backend/replication/syncrep_scanner.c2163
-rw-r--r--src/backend/replication/syncrep_scanner.l163
-rw-r--r--src/backend/replication/walreceiver.c1479
-rw-r--r--src/backend/replication/walreceiverfuncs.c392
-rw-r--r--src/backend/replication/walsender.c3665
36 files changed, 38991 insertions, 0 deletions
diff --git a/src/backend/replication/.gitignore b/src/backend/replication/.gitignore
new file mode 100644
index 0000000..d1df614
--- /dev/null
+++ b/src/backend/replication/.gitignore
@@ -0,0 +1,4 @@
+/repl_gram.c
+/repl_scanner.c
+/syncrep_gram.c
+/syncrep_scanner.c
diff --git a/src/backend/replication/Makefile b/src/backend/replication/Makefile
new file mode 100644
index 0000000..a0381e5
--- /dev/null
+++ b/src/backend/replication/Makefile
@@ -0,0 +1,41 @@
+#-------------------------------------------------------------------------
+#
+# Makefile--
+# Makefile for src/backend/replication
+#
+# IDENTIFICATION
+# src/backend/replication/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/backend/replication
+top_builddir = ../../..
+include $(top_builddir)/src/Makefile.global
+
+override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS)
+
+OBJS = \
+ backup_manifest.o \
+ basebackup.o \
+ repl_gram.o \
+ slot.o \
+ slotfuncs.o \
+ syncrep.o \
+ syncrep_gram.o \
+ walreceiver.o \
+ walreceiverfuncs.o \
+ walsender.o
+
+SUBDIRS = logical
+
+include $(top_srcdir)/src/backend/common.mk
+
+# repl_scanner is compiled as part of repl_gram
+repl_gram.o: repl_scanner.c
+
+# syncrep_scanner is compiled as part of syncrep_gram
+syncrep_gram.o: syncrep_scanner.c
+
+# repl_gram.c, repl_scanner.c, syncrep_gram.c and syncrep_scanner.c
+# are in the distribution tarball, so they are not cleaned here.
+# (Our parent Makefile takes care of them during maintainer-clean.)
diff --git a/src/backend/replication/README b/src/backend/replication/README
new file mode 100644
index 0000000..8ccdd86
--- /dev/null
+++ b/src/backend/replication/README
@@ -0,0 +1,101 @@
+src/backend/replication/README
+
+Walreceiver - libpqwalreceiver API
+----------------------------------
+
+The transport-specific part of walreceiver, responsible for connecting to
+the primary server, receiving WAL files and sending messages, is loaded
+dynamically to avoid having to link the main server binary with libpq.
+The dynamically loaded module is in libpqwalreceiver subdirectory.
+
+The dynamically loaded module implements four functions:
+
+
+bool walrcv_connect(char *conninfo, XLogRecPtr startpoint)
+
+Establish connection to the primary, and starts streaming from 'startpoint'.
+Returns true on success.
+
+int walrcv_receive(char **buffer, pgsocket *wait_fd)
+
+Retrieve any message available without blocking through the
+connection. If a message was successfully read, returns its
+length. If the connection is closed, returns -1. Otherwise returns 0
+to indicate that no data is available, and sets *wait_fd to a socket
+descriptor which can be waited on before trying again. On success, a
+pointer to the message payload is stored in *buffer. The returned
+buffer is valid until the next call to walrcv_* functions, and the
+caller should not attempt to free it.
+
+void walrcv_send(const char *buffer, int nbytes)
+
+Send a message to XLOG stream.
+
+void walrcv_disconnect(void);
+
+Disconnect.
+
+
+This API should be considered internal at the moment, but we could open it
+up for 3rd party replacements of libpqwalreceiver in the future, allowing
+pluggable methods for receiving WAL.
+
+Walreceiver IPC
+---------------
+
+When the WAL replay in startup process has reached the end of archived WAL,
+restorable using restore_command, it starts up the walreceiver process
+to fetch more WAL (if streaming replication is configured).
+
+Walreceiver is a postmaster subprocess, so the startup process can't fork it
+directly. Instead, it sends a signal to postmaster, asking postmaster to launch
+it. Before that, however, startup process fills in WalRcvData->conninfo
+and WalRcvData->slotname, and initializes the starting point in
+WalRcvData->receiveStart.
+
+As walreceiver receives WAL from the master server, and writes and flushes
+it to disk (in pg_wal), it updates WalRcvData->flushedUpto and signals
+the startup process to know how far WAL replay can advance.
+
+Walreceiver sends information about replication progress to the master server
+whenever it either writes or flushes new WAL, or the specified interval elapses.
+This is used for reporting purpose.
+
+Walsender IPC
+-------------
+
+At shutdown, postmaster handles walsender processes differently from regular
+backends. It waits for regular backends to die before writing the
+shutdown checkpoint and terminating pgarch and other auxiliary processes, but
+that's not desirable for walsenders, because we want the standby servers to
+receive all the WAL, including the shutdown checkpoint, before the master
+is shut down. Therefore postmaster treats walsenders like the pgarch process,
+and instructs them to terminate at PM_SHUTDOWN_2 phase, after all regular
+backends have died and checkpointer has issued the shutdown checkpoint.
+
+When postmaster accepts a connection, it immediately forks a new process
+to handle the handshake and authentication, and the process initializes to
+become a backend. Postmaster doesn't know if the process becomes a regular
+backend or a walsender process at that time - that's indicated in the
+connection handshake - so we need some extra signaling to let postmaster
+identify walsender processes.
+
+When walsender process starts up, it marks itself as a walsender process in
+the PMSignal array. That way postmaster can tell it apart from regular
+backends.
+
+Note that no big harm is done if postmaster thinks that a walsender is a
+regular backend; it will just terminate the walsender earlier in the shutdown
+phase. A walsender will look like a regular backend until it's done with the
+initialization and has marked itself in PMSignal array, and at process
+termination, after unmarking the PMSignal slot.
+
+Each walsender allocates an entry from the WalSndCtl array, and tracks
+information about replication progress. User can monitor them via
+statistics views.
+
+
+Walsender - walreceiver protocol
+--------------------------------
+
+See manual.
diff --git a/src/backend/replication/backup_manifest.c b/src/backend/replication/backup_manifest.c
new file mode 100644
index 0000000..1e07d99
--- /dev/null
+++ b/src/backend/replication/backup_manifest.c
@@ -0,0 +1,379 @@
+/*-------------------------------------------------------------------------
+ *
+ * backup_manifest.c
+ * code for generating and sending a backup manifest
+ *
+ * Portions Copyright (c) 2010-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/backup_manifest.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/timeline.h"
+#include "libpq/libpq.h"
+#include "libpq/pqformat.h"
+#include "mb/pg_wchar.h"
+#include "replication/backup_manifest.h"
+#include "utils/builtins.h"
+#include "utils/json.h"
+
+static void AppendStringToManifest(backup_manifest_info *manifest, char *s);
+
+/*
+ * Does the user want a backup manifest?
+ *
+ * It's simplest to always have a manifest_info object, so that we don't need
+ * checks for NULL pointers in too many places. However, if the user doesn't
+ * want a manifest, we set manifest->buffile to NULL.
+ */
+static inline bool
+IsManifestEnabled(backup_manifest_info *manifest)
+{
+ return (manifest->buffile != NULL);
+}
+
+/*
+ * Convenience macro for appending data to the backup manifest.
+ */
+#define AppendToManifest(manifest, ...) \
+ { \
+ char *_manifest_s = psprintf(__VA_ARGS__); \
+ AppendStringToManifest(manifest, _manifest_s); \
+ pfree(_manifest_s); \
+ }
+
+/*
+ * Initialize state so that we can construct a backup manifest.
+ *
+ * NB: Although the checksum type for the data files is configurable, the
+ * checksum for the manifest itself always uses SHA-256. See comments in
+ * SendBackupManifest.
+ */
+void
+InitializeBackupManifest(backup_manifest_info *manifest,
+ backup_manifest_option want_manifest,
+ pg_checksum_type manifest_checksum_type)
+{
+ memset(manifest, 0, sizeof(backup_manifest_info));
+ manifest->checksum_type = manifest_checksum_type;
+
+ if (want_manifest == MANIFEST_OPTION_NO)
+ manifest->buffile = NULL;
+ else
+ {
+ manifest->buffile = BufFileCreateTemp(false);
+ pg_sha256_init(&manifest->manifest_ctx);
+ }
+
+ manifest->manifest_size = UINT64CONST(0);
+ manifest->force_encode = (want_manifest == MANIFEST_OPTION_FORCE_ENCODE);
+ manifest->first_file = true;
+ manifest->still_checksumming = true;
+
+ if (want_manifest != MANIFEST_OPTION_NO)
+ AppendToManifest(manifest,
+ "{ \"PostgreSQL-Backup-Manifest-Version\": 1,\n"
+ "\"Files\": [");
+}
+
+/*
+ * Add an entry to the backup manifest for a file.
+ */
+void
+AddFileToBackupManifest(backup_manifest_info *manifest, const char *spcoid,
+ const char *pathname, size_t size, pg_time_t mtime,
+ pg_checksum_context *checksum_ctx)
+{
+ char pathbuf[MAXPGPATH];
+ int pathlen;
+ StringInfoData buf;
+
+ if (!IsManifestEnabled(manifest))
+ return;
+
+ /*
+ * If this file is part of a tablespace, the pathname passed to this
+ * function will be relative to the tar file that contains it. We want the
+ * pathname relative to the data directory (ignoring the intermediate
+ * symlink traversal).
+ */
+ if (spcoid != NULL)
+ {
+ snprintf(pathbuf, sizeof(pathbuf), "pg_tblspc/%s/%s", spcoid,
+ pathname);
+ pathname = pathbuf;
+ }
+
+ /*
+ * Each file's entry needs to be separated from any entry that follows by
+ * a comma, but there's no comma before the first one or after the last
+ * one. To make that work, adding a file to the manifest starts by
+ * terminating the most recently added line, with a comma if appropriate,
+ * but does not terminate the line inserted for this file.
+ */
+ initStringInfo(&buf);
+ if (manifest->first_file)
+ {
+ appendStringInfoString(&buf, "\n");
+ manifest->first_file = false;
+ }
+ else
+ appendStringInfoString(&buf, ",\n");
+
+ /*
+ * Write the relative pathname to this file out to the manifest. The
+ * manifest is always stored in UTF-8, so we have to encode paths that are
+ * not valid in that encoding.
+ */
+ pathlen = strlen(pathname);
+ if (!manifest->force_encode &&
+ pg_verify_mbstr(PG_UTF8, pathname, pathlen, true))
+ {
+ appendStringInfoString(&buf, "{ \"Path\": ");
+ escape_json(&buf, pathname);
+ appendStringInfoString(&buf, ", ");
+ }
+ else
+ {
+ appendStringInfoString(&buf, "{ \"Encoded-Path\": \"");
+ enlargeStringInfo(&buf, 2 * pathlen);
+ buf.len += hex_encode(pathname, pathlen,
+ &buf.data[buf.len]);
+ appendStringInfoString(&buf, "\", ");
+ }
+
+ appendStringInfo(&buf, "\"Size\": %zu, ", size);
+
+ /*
+ * Convert last modification time to a string and append it to the
+ * manifest. Since it's not clear what time zone to use and since time
+ * zone definitions can change, possibly causing confusion, use GMT
+ * always.
+ */
+ appendStringInfoString(&buf, "\"Last-Modified\": \"");
+ enlargeStringInfo(&buf, 128);
+ buf.len += pg_strftime(&buf.data[buf.len], 128, "%Y-%m-%d %H:%M:%S %Z",
+ pg_gmtime(&mtime));
+ appendStringInfoString(&buf, "\"");
+
+ /* Add checksum information. */
+ if (checksum_ctx->type != CHECKSUM_TYPE_NONE)
+ {
+ uint8 checksumbuf[PG_CHECKSUM_MAX_LENGTH];
+ int checksumlen;
+
+ checksumlen = pg_checksum_final(checksum_ctx, checksumbuf);
+
+ appendStringInfo(&buf,
+ ", \"Checksum-Algorithm\": \"%s\", \"Checksum\": \"",
+ pg_checksum_type_name(checksum_ctx->type));
+ enlargeStringInfo(&buf, 2 * checksumlen);
+ buf.len += hex_encode((char *) checksumbuf, checksumlen,
+ &buf.data[buf.len]);
+ appendStringInfoString(&buf, "\"");
+ }
+
+ /* Close out the object. */
+ appendStringInfoString(&buf, " }");
+
+ /* OK, add it to the manifest. */
+ AppendStringToManifest(manifest, buf.data);
+
+ /* Avoid leaking memory. */
+ pfree(buf.data);
+}
+
+/*
+ * Add information about the WAL that will need to be replayed when restoring
+ * this backup to the manifest.
+ */
+void
+AddWALInfoToBackupManifest(backup_manifest_info *manifest, XLogRecPtr startptr,
+ TimeLineID starttli, XLogRecPtr endptr,
+ TimeLineID endtli)
+{
+ List *timelines;
+ ListCell *lc;
+ bool first_wal_range = true;
+ bool found_start_timeline = false;
+
+ if (!IsManifestEnabled(manifest))
+ return;
+
+ /* Terminate the list of files. */
+ AppendStringToManifest(manifest, "\n],\n");
+
+ /* Read the timeline history for the ending timeline. */
+ timelines = readTimeLineHistory(endtli);
+
+ /* Start a list of LSN ranges. */
+ AppendStringToManifest(manifest, "\"WAL-Ranges\": [\n");
+
+ foreach(lc, timelines)
+ {
+ TimeLineHistoryEntry *entry = lfirst(lc);
+ XLogRecPtr tl_beginptr;
+
+ /*
+ * We only care about timelines that were active during the backup.
+ * Skip any that ended before the backup started. (Note that if
+ * entry->end is InvalidXLogRecPtr, it means that the timeline has not
+ * yet ended.)
+ */
+ if (!XLogRecPtrIsInvalid(entry->end) && entry->end < startptr)
+ continue;
+
+ /*
+ * Because the timeline history file lists newer timelines before
+ * older ones, the first timeline we encounter that is new enough to
+ * matter ought to match the ending timeline of the backup.
+ */
+ if (first_wal_range && endtli != entry->tli)
+ ereport(ERROR,
+ errmsg("expected end timeline %u but found timeline %u",
+ starttli, entry->tli));
+
+ if (!XLogRecPtrIsInvalid(entry->begin))
+ tl_beginptr = entry->begin;
+ else
+ {
+ tl_beginptr = startptr;
+
+ /*
+ * If we reach a TLI that has no valid beginning LSN, there can't
+ * be any more timelines in the history after this point, so we'd
+ * better have arrived at the expected starting TLI. If not,
+ * something's gone horribly wrong.
+ */
+ if (starttli != entry->tli)
+ ereport(ERROR,
+ errmsg("expected start timeline %u but found timeline %u",
+ starttli, entry->tli));
+ }
+
+ AppendToManifest(manifest,
+ "%s{ \"Timeline\": %u, \"Start-LSN\": \"%X/%X\", \"End-LSN\": \"%X/%X\" }",
+ first_wal_range ? "" : ",\n",
+ entry->tli,
+ (uint32) (tl_beginptr >> 32), (uint32) tl_beginptr,
+ (uint32) (endptr >> 32), (uint32) endptr);
+
+ if (starttli == entry->tli)
+ {
+ found_start_timeline = true;
+ break;
+ }
+
+ endptr = entry->begin;
+ first_wal_range = false;
+ }
+
+ /*
+ * The last entry in the timeline history for the ending timeline should
+ * be the ending timeline itself. Verify that this is what we observed.
+ */
+ if (!found_start_timeline)
+ ereport(ERROR,
+ errmsg("start timeline %u not found in history of timeline %u",
+ starttli, endtli));
+
+ /* Terminate the list of WAL ranges. */
+ AppendStringToManifest(manifest, "\n],\n");
+}
+
+/*
+ * Finalize the backup manifest, and send it to the client.
+ */
+void
+SendBackupManifest(backup_manifest_info *manifest)
+{
+ StringInfoData protobuf;
+ uint8 checksumbuf[PG_SHA256_DIGEST_LENGTH];
+ char checksumstringbuf[PG_SHA256_DIGEST_STRING_LENGTH];
+ size_t manifest_bytes_done = 0;
+
+ if (!IsManifestEnabled(manifest))
+ return;
+
+ /*
+ * Append manifest checksum, so that the problems with the manifest itself
+ * can be detected.
+ *
+ * We always use SHA-256 for this, regardless of what algorithm is chosen
+ * for checksumming the files. If we ever want to make the checksum
+ * algorithm used for the manifest file variable, the client will need a
+ * way to figure out which algorithm to use as close to the beginning of
+ * the manifest file as possible, to avoid having to read the whole thing
+ * twice.
+ */
+ manifest->still_checksumming = false;
+ pg_sha256_final(&manifest->manifest_ctx, checksumbuf);
+ AppendStringToManifest(manifest, "\"Manifest-Checksum\": \"");
+ hex_encode((char *) checksumbuf, sizeof checksumbuf, checksumstringbuf);
+ checksumstringbuf[PG_SHA256_DIGEST_STRING_LENGTH - 1] = '\0';
+ AppendStringToManifest(manifest, checksumstringbuf);
+ AppendStringToManifest(manifest, "\"}\n");
+
+ /*
+ * We've written all the data to the manifest file. Rewind the file so
+ * that we can read it all back.
+ */
+ if (BufFileSeek(manifest->buffile, 0, 0L, SEEK_SET))
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not rewind temporary file")));
+
+ /* Send CopyOutResponse message */
+ pq_beginmessage(&protobuf, 'H');
+ pq_sendbyte(&protobuf, 0); /* overall format */
+ pq_sendint16(&protobuf, 0); /* natts */
+ pq_endmessage(&protobuf);
+
+ /*
+ * Send CopyData messages.
+ *
+ * We choose to read back the data from the temporary file in chunks of
+ * size BLCKSZ; this isn't necessary, but buffile.c uses that as the I/O
+ * size, so it seems to make sense to match that value here.
+ */
+ while (manifest_bytes_done < manifest->manifest_size)
+ {
+ char manifestbuf[BLCKSZ];
+ size_t bytes_to_read;
+ size_t rc;
+
+ bytes_to_read = Min(sizeof(manifestbuf),
+ manifest->manifest_size - manifest_bytes_done);
+ rc = BufFileRead(manifest->buffile, manifestbuf, bytes_to_read);
+ if (rc != bytes_to_read)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read from temporary file: %m")));
+ pq_putmessage('d', manifestbuf, bytes_to_read);
+ manifest_bytes_done += bytes_to_read;
+ }
+
+ /* No more data, so send CopyDone message */
+ pq_putemptymessage('c');
+
+ /* Release resources */
+ BufFileClose(manifest->buffile);
+}
+
+/*
+ * Append a cstring to the manifest.
+ */
+static void
+AppendStringToManifest(backup_manifest_info *manifest, char *s)
+{
+ int len = strlen(s);
+
+ Assert(manifest != NULL);
+ if (manifest->still_checksumming)
+ pg_sha256_update(&manifest->manifest_ctx, (uint8 *) s, len);
+ BufFileWrite(manifest->buffile, s, len);
+ manifest->manifest_size += len;
+}
diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c
new file mode 100644
index 0000000..50ae1f1
--- /dev/null
+++ b/src/backend/replication/basebackup.c
@@ -0,0 +1,1987 @@
+/*-------------------------------------------------------------------------
+ *
+ * basebackup.c
+ * code for taking a base backup and streaming it to a standby
+ *
+ * Portions Copyright (c) 2010-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/basebackup.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <sys/stat.h>
+#include <unistd.h>
+#include <time.h>
+
+#include "access/xlog_internal.h" /* for pg_start/stop_backup */
+#include "catalog/pg_type.h"
+#include "common/file_perm.h"
+#include "commands/progress.h"
+#include "lib/stringinfo.h"
+#include "libpq/libpq.h"
+#include "libpq/pqformat.h"
+#include "miscadmin.h"
+#include "nodes/pg_list.h"
+#include "pgstat.h"
+#include "pgtar.h"
+#include "port.h"
+#include "postmaster/syslogger.h"
+#include "replication/basebackup.h"
+#include "replication/backup_manifest.h"
+#include "replication/walsender.h"
+#include "replication/walsender_private.h"
+#include "storage/bufpage.h"
+#include "storage/checksum.h"
+#include "storage/dsm_impl.h"
+#include "storage/fd.h"
+#include "storage/ipc.h"
+#include "storage/reinit.h"
+#include "utils/builtins.h"
+#include "utils/ps_status.h"
+#include "utils/relcache.h"
+#include "utils/resowner.h"
+#include "utils/timestamp.h"
+
+typedef struct
+{
+ const char *label;
+ bool progress;
+ bool fastcheckpoint;
+ bool nowait;
+ bool includewal;
+ uint32 maxrate;
+ bool sendtblspcmapfile;
+ backup_manifest_option manifest;
+ pg_checksum_type manifest_checksum_type;
+} basebackup_options;
+
+static int64 sendDir(const char *path, int basepathlen, bool sizeonly,
+ List *tablespaces, bool sendtblspclinks,
+ backup_manifest_info *manifest, const char *spcoid);
+static bool sendFile(const char *readfilename, const char *tarfilename,
+ struct stat *statbuf, bool missing_ok, Oid dboid,
+ backup_manifest_info *manifest, const char *spcoid);
+static void sendFileWithContent(const char *filename, const char *content,
+ backup_manifest_info *manifest);
+static int64 _tarWriteHeader(const char *filename, const char *linktarget,
+ struct stat *statbuf, bool sizeonly);
+static int64 _tarWriteDir(const char *pathbuf, int basepathlen, struct stat *statbuf,
+ bool sizeonly);
+static void send_int8_string(StringInfoData *buf, int64 intval);
+static void SendBackupHeader(List *tablespaces);
+static void perform_base_backup(basebackup_options *opt);
+static void parse_basebackup_options(List *options, basebackup_options *opt);
+static void SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli);
+static int compareWalFileNames(const ListCell *a, const ListCell *b);
+static void throttle(size_t increment);
+static void update_basebackup_progress(int64 delta);
+static bool is_checksummed_file(const char *fullpath, const char *filename);
+
+/* Was the backup currently in-progress initiated in recovery mode? */
+static bool backup_started_in_recovery = false;
+
+/* Relative path of temporary statistics directory */
+static char *statrelpath = NULL;
+
+/*
+ * Size of each block sent into the tar stream for larger files.
+ */
+#define TAR_SEND_SIZE 32768
+
+/*
+ * How frequently to throttle, as a fraction of the specified rate-second.
+ */
+#define THROTTLING_FREQUENCY 8
+
+/*
+ * Checks whether we encountered any error in fread(). fread() doesn't give
+ * any clue what has happened, so we check with ferror(). Also, neither
+ * fread() nor ferror() set errno, so we just throw a generic error.
+ */
+#define CHECK_FREAD_ERROR(fp, filename) \
+do { \
+ if (ferror(fp)) \
+ ereport(ERROR, \
+ (errmsg("could not read from file \"%s\"", filename))); \
+} while (0)
+
+/* The actual number of bytes, transfer of which may cause sleep. */
+static uint64 throttling_sample;
+
+/* Amount of data already transferred but not yet throttled. */
+static int64 throttling_counter;
+
+/* The minimum time required to transfer throttling_sample bytes. */
+static TimeOffset elapsed_min_unit;
+
+/* The last check of the transfer rate. */
+static TimestampTz throttled_last;
+
+/* The starting XLOG position of the base backup. */
+static XLogRecPtr startptr;
+
+/* Total number of checksum failures during base backup. */
+static long long int total_checksum_failures;
+
+/* Do not verify checksums. */
+static bool noverify_checksums = false;
+
+/*
+ * Total amount of backup data that will be streamed.
+ * -1 means that the size is not estimated.
+ */
+static int64 backup_total = 0;
+
+/* Amount of backup data already streamed */
+static int64 backup_streamed = 0;
+
+/*
+ * Definition of one element part of an exclusion list, used for paths part
+ * of checksum validation or base backups. "name" is the name of the file
+ * or path to check for exclusion. If "match_prefix" is true, any items
+ * matching the name as prefix are excluded.
+ */
+struct exclude_list_item
+{
+ const char *name;
+ bool match_prefix;
+};
+
+/*
+ * The contents of these directories are removed or recreated during server
+ * start so they are not included in backups. The directories themselves are
+ * kept and included as empty to preserve access permissions.
+ *
+ * Note: this list should be kept in sync with the filter lists in pg_rewind's
+ * filemap.c.
+ */
+static const char *const excludeDirContents[] =
+{
+ /*
+ * Skip temporary statistics files. PG_STAT_TMP_DIR must be skipped even
+ * when stats_temp_directory is set because PGSS_TEXT_FILE is always
+ * created there.
+ */
+ PG_STAT_TMP_DIR,
+
+ /*
+ * It is generally not useful to backup the contents of this directory
+ * even if the intention is to restore to another master. See backup.sgml
+ * for a more detailed description.
+ */
+ "pg_replslot",
+
+ /* Contents removed on startup, see dsm_cleanup_for_mmap(). */
+ PG_DYNSHMEM_DIR,
+
+ /* Contents removed on startup, see AsyncShmemInit(). */
+ "pg_notify",
+
+ /*
+ * Old contents are loaded for possible debugging but are not required for
+ * normal operation, see SerialInit().
+ */
+ "pg_serial",
+
+ /* Contents removed on startup, see DeleteAllExportedSnapshotFiles(). */
+ "pg_snapshots",
+
+ /* Contents zeroed on startup, see StartupSUBTRANS(). */
+ "pg_subtrans",
+
+ /* end of list */
+ NULL
+};
+
+/*
+ * List of files excluded from backups.
+ */
+static const struct exclude_list_item excludeFiles[] =
+{
+ /* Skip auto conf temporary file. */
+ {PG_AUTOCONF_FILENAME ".tmp", false},
+
+ /* Skip current log file temporary file */
+ {LOG_METAINFO_DATAFILE_TMP, false},
+
+ /*
+ * Skip relation cache because it is rebuilt on startup. This includes
+ * temporary files.
+ */
+ {RELCACHE_INIT_FILENAME, true},
+
+ /*
+ * If there's a backup_label or tablespace_map file, it belongs to a
+ * backup started by the user with pg_start_backup(). It is *not* correct
+ * for this backup. Our backup_label/tablespace_map is injected into the
+ * tar separately.
+ */
+ {BACKUP_LABEL_FILE, false},
+ {TABLESPACE_MAP, false},
+
+ /*
+ * If there's a backup_manifest, it belongs to a backup that was used to
+ * start this server. It is *not* correct for this backup. Our
+ * backup_manifest is injected into the backup separately if users want
+ * it.
+ */
+ {"backup_manifest", false},
+
+ {"postmaster.pid", false},
+ {"postmaster.opts", false},
+
+ /* end of list */
+ {NULL, false}
+};
+
+/*
+ * List of files excluded from checksum validation.
+ *
+ * Note: this list should be kept in sync with what pg_checksums.c
+ * includes.
+ */
+static const struct exclude_list_item noChecksumFiles[] = {
+ {"pg_control", false},
+ {"pg_filenode.map", false},
+ {"pg_internal.init", true},
+ {"PG_VERSION", false},
+#ifdef EXEC_BACKEND
+ {"config_exec_params", true},
+#endif
+ {NULL, false}
+};
+
+/*
+ * Actually do a base backup for the specified tablespaces.
+ *
+ * This is split out mainly to avoid complaints about "variable might be
+ * clobbered by longjmp" from stupider versions of gcc.
+ */
+static void
+perform_base_backup(basebackup_options *opt)
+{
+ TimeLineID starttli;
+ XLogRecPtr endptr;
+ TimeLineID endtli;
+ StringInfo labelfile;
+ StringInfo tblspc_map_file = NULL;
+ backup_manifest_info manifest;
+ int datadirpathlen;
+ List *tablespaces = NIL;
+
+ backup_total = 0;
+ backup_streamed = 0;
+ pgstat_progress_start_command(PROGRESS_COMMAND_BASEBACKUP, InvalidOid);
+
+ /*
+ * If the estimation of the total backup size is disabled, make the
+ * backup_total column in the view return NULL by setting the parameter to
+ * -1.
+ */
+ if (!opt->progress)
+ {
+ backup_total = -1;
+ pgstat_progress_update_param(PROGRESS_BASEBACKUP_BACKUP_TOTAL,
+ backup_total);
+ }
+
+ /* we're going to use a BufFile, so we need a ResourceOwner */
+ Assert(CurrentResourceOwner == NULL);
+ CurrentResourceOwner = ResourceOwnerCreate(NULL, "base backup");
+
+ datadirpathlen = strlen(DataDir);
+
+ backup_started_in_recovery = RecoveryInProgress();
+
+ labelfile = makeStringInfo();
+ tblspc_map_file = makeStringInfo();
+ InitializeBackupManifest(&manifest, opt->manifest,
+ opt->manifest_checksum_type);
+
+ total_checksum_failures = 0;
+
+ pgstat_progress_update_param(PROGRESS_BASEBACKUP_PHASE,
+ PROGRESS_BASEBACKUP_PHASE_WAIT_CHECKPOINT);
+ startptr = do_pg_start_backup(opt->label, opt->fastcheckpoint, &starttli,
+ labelfile, &tablespaces,
+ tblspc_map_file,
+ opt->progress, opt->sendtblspcmapfile);
+
+ /*
+ * Once do_pg_start_backup has been called, ensure that any failure causes
+ * us to abort the backup so we don't "leak" a backup counter. For this
+ * reason, *all* functionality between do_pg_start_backup() and the end of
+ * do_pg_stop_backup() should be inside the error cleanup block!
+ */
+
+ PG_ENSURE_ERROR_CLEANUP(do_pg_abort_backup, BoolGetDatum(false));
+ {
+ ListCell *lc;
+ tablespaceinfo *ti;
+ int tblspc_streamed = 0;
+
+ /*
+ * Calculate the relative path of temporary statistics directory in
+ * order to skip the files which are located in that directory later.
+ */
+ if (is_absolute_path(pgstat_stat_directory) &&
+ strncmp(pgstat_stat_directory, DataDir, datadirpathlen) == 0)
+ statrelpath = psprintf("./%s", pgstat_stat_directory + datadirpathlen + 1);
+ else if (strncmp(pgstat_stat_directory, "./", 2) != 0)
+ statrelpath = psprintf("./%s", pgstat_stat_directory);
+ else
+ statrelpath = pgstat_stat_directory;
+
+ /* Add a node for the base directory at the end */
+ ti = palloc0(sizeof(tablespaceinfo));
+ if (opt->progress)
+ ti->size = sendDir(".", 1, true, tablespaces, true, NULL, NULL);
+ else
+ ti->size = -1;
+ tablespaces = lappend(tablespaces, ti);
+
+ /*
+ * Calculate the total backup size by summing up the size of each
+ * tablespace
+ */
+ if (opt->progress)
+ {
+ foreach(lc, tablespaces)
+ {
+ tablespaceinfo *tmp = (tablespaceinfo *) lfirst(lc);
+
+ backup_total += tmp->size;
+ }
+ }
+
+ /* Report that we are now streaming database files as a base backup */
+ {
+ const int index[] = {
+ PROGRESS_BASEBACKUP_PHASE,
+ PROGRESS_BASEBACKUP_BACKUP_TOTAL,
+ PROGRESS_BASEBACKUP_TBLSPC_TOTAL
+ };
+ const int64 val[] = {
+ PROGRESS_BASEBACKUP_PHASE_STREAM_BACKUP,
+ backup_total, list_length(tablespaces)
+ };
+
+ pgstat_progress_update_multi_param(3, index, val);
+ }
+
+ /* Send the starting position of the backup */
+ SendXlogRecPtrResult(startptr, starttli);
+
+ /* Send tablespace header */
+ SendBackupHeader(tablespaces);
+
+ /* Setup and activate network throttling, if client requested it */
+ if (opt->maxrate > 0)
+ {
+ throttling_sample =
+ (int64) opt->maxrate * (int64) 1024 / THROTTLING_FREQUENCY;
+
+ /*
+ * The minimum amount of time for throttling_sample bytes to be
+ * transferred.
+ */
+ elapsed_min_unit = USECS_PER_SEC / THROTTLING_FREQUENCY;
+
+ /* Enable throttling. */
+ throttling_counter = 0;
+
+ /* The 'real data' starts now (header was ignored). */
+ throttled_last = GetCurrentTimestamp();
+ }
+ else
+ {
+ /* Disable throttling. */
+ throttling_counter = -1;
+ }
+
+ /* Send off our tablespaces one by one */
+ foreach(lc, tablespaces)
+ {
+ tablespaceinfo *ti = (tablespaceinfo *) lfirst(lc);
+ StringInfoData buf;
+
+ /* Send CopyOutResponse message */
+ pq_beginmessage(&buf, 'H');
+ pq_sendbyte(&buf, 0); /* overall format */
+ pq_sendint16(&buf, 0); /* natts */
+ pq_endmessage(&buf);
+
+ if (ti->path == NULL)
+ {
+ struct stat statbuf;
+
+ /* In the main tar, include the backup_label first... */
+ sendFileWithContent(BACKUP_LABEL_FILE, labelfile->data,
+ &manifest);
+
+ /*
+ * Send tablespace_map file if required and then the bulk of
+ * the files.
+ */
+ if (tblspc_map_file && opt->sendtblspcmapfile)
+ {
+ sendFileWithContent(TABLESPACE_MAP, tblspc_map_file->data,
+ &manifest);
+ sendDir(".", 1, false, tablespaces, false,
+ &manifest, NULL);
+ }
+ else
+ sendDir(".", 1, false, tablespaces, true,
+ &manifest, NULL);
+
+ /* ... and pg_control after everything else. */
+ if (lstat(XLOG_CONTROL_FILE, &statbuf) != 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not stat file \"%s\": %m",
+ XLOG_CONTROL_FILE)));
+ sendFile(XLOG_CONTROL_FILE, XLOG_CONTROL_FILE, &statbuf,
+ false, InvalidOid, &manifest, NULL);
+ }
+ else
+ sendTablespace(ti->path, ti->oid, false, &manifest);
+
+ /*
+ * If we're including WAL, and this is the main data directory we
+ * don't terminate the tar stream here. Instead, we will append
+ * the xlog files below and terminate it then. This is safe since
+ * the main data directory is always sent *last*.
+ */
+ if (opt->includewal && ti->path == NULL)
+ {
+ Assert(lnext(tablespaces, lc) == NULL);
+ }
+ else
+ pq_putemptymessage('c'); /* CopyDone */
+
+ tblspc_streamed++;
+ pgstat_progress_update_param(PROGRESS_BASEBACKUP_TBLSPC_STREAMED,
+ tblspc_streamed);
+ }
+
+ pgstat_progress_update_param(PROGRESS_BASEBACKUP_PHASE,
+ PROGRESS_BASEBACKUP_PHASE_WAIT_WAL_ARCHIVE);
+ endptr = do_pg_stop_backup(labelfile->data, !opt->nowait, &endtli);
+ }
+ PG_END_ENSURE_ERROR_CLEANUP(do_pg_abort_backup, BoolGetDatum(false));
+
+
+ if (opt->includewal)
+ {
+ /*
+ * We've left the last tar file "open", so we can now append the
+ * required WAL files to it.
+ */
+ char pathbuf[MAXPGPATH];
+ XLogSegNo segno;
+ XLogSegNo startsegno;
+ XLogSegNo endsegno;
+ struct stat statbuf;
+ List *historyFileList = NIL;
+ List *walFileList = NIL;
+ char firstoff[MAXFNAMELEN];
+ char lastoff[MAXFNAMELEN];
+ DIR *dir;
+ struct dirent *de;
+ ListCell *lc;
+ TimeLineID tli;
+
+ pgstat_progress_update_param(PROGRESS_BASEBACKUP_PHASE,
+ PROGRESS_BASEBACKUP_PHASE_TRANSFER_WAL);
+
+ /*
+ * I'd rather not worry about timelines here, so scan pg_wal and
+ * include all WAL files in the range between 'startptr' and 'endptr',
+ * regardless of the timeline the file is stamped with. If there are
+ * some spurious WAL files belonging to timelines that don't belong in
+ * this server's history, they will be included too. Normally there
+ * shouldn't be such files, but if there are, there's little harm in
+ * including them.
+ */
+ XLByteToSeg(startptr, startsegno, wal_segment_size);
+ XLogFileName(firstoff, ThisTimeLineID, startsegno, wal_segment_size);
+ XLByteToPrevSeg(endptr, endsegno, wal_segment_size);
+ XLogFileName(lastoff, ThisTimeLineID, endsegno, wal_segment_size);
+
+ dir = AllocateDir("pg_wal");
+ while ((de = ReadDir(dir, "pg_wal")) != NULL)
+ {
+ /* Does it look like a WAL segment, and is it in the range? */
+ if (IsXLogFileName(de->d_name) &&
+ strcmp(de->d_name + 8, firstoff + 8) >= 0 &&
+ strcmp(de->d_name + 8, lastoff + 8) <= 0)
+ {
+ walFileList = lappend(walFileList, pstrdup(de->d_name));
+ }
+ /* Does it look like a timeline history file? */
+ else if (IsTLHistoryFileName(de->d_name))
+ {
+ historyFileList = lappend(historyFileList, pstrdup(de->d_name));
+ }
+ }
+ FreeDir(dir);
+
+ /*
+ * Before we go any further, check that none of the WAL segments we
+ * need were removed.
+ */
+ CheckXLogRemoved(startsegno, ThisTimeLineID);
+
+ /*
+ * Sort the WAL filenames. We want to send the files in order from
+ * oldest to newest, to reduce the chance that a file is recycled
+ * before we get a chance to send it over.
+ */
+ list_sort(walFileList, compareWalFileNames);
+
+ /*
+ * There must be at least one xlog file in the pg_wal directory, since
+ * we are doing backup-including-xlog.
+ */
+ if (walFileList == NIL)
+ ereport(ERROR,
+ (errmsg("could not find any WAL files")));
+
+ /*
+ * Sanity check: the first and last segment should cover startptr and
+ * endptr, with no gaps in between.
+ */
+ XLogFromFileName((char *) linitial(walFileList),
+ &tli, &segno, wal_segment_size);
+ if (segno != startsegno)
+ {
+ char startfname[MAXFNAMELEN];
+
+ XLogFileName(startfname, ThisTimeLineID, startsegno,
+ wal_segment_size);
+ ereport(ERROR,
+ (errmsg("could not find WAL file \"%s\"", startfname)));
+ }
+ foreach(lc, walFileList)
+ {
+ char *walFileName = (char *) lfirst(lc);
+ XLogSegNo currsegno = segno;
+ XLogSegNo nextsegno = segno + 1;
+
+ XLogFromFileName(walFileName, &tli, &segno, wal_segment_size);
+ if (!(nextsegno == segno || currsegno == segno))
+ {
+ char nextfname[MAXFNAMELEN];
+
+ XLogFileName(nextfname, ThisTimeLineID, nextsegno,
+ wal_segment_size);
+ ereport(ERROR,
+ (errmsg("could not find WAL file \"%s\"", nextfname)));
+ }
+ }
+ if (segno != endsegno)
+ {
+ char endfname[MAXFNAMELEN];
+
+ XLogFileName(endfname, ThisTimeLineID, endsegno, wal_segment_size);
+ ereport(ERROR,
+ (errmsg("could not find WAL file \"%s\"", endfname)));
+ }
+
+ /* Ok, we have everything we need. Send the WAL files. */
+ foreach(lc, walFileList)
+ {
+ char *walFileName = (char *) lfirst(lc);
+ FILE *fp;
+ char buf[TAR_SEND_SIZE];
+ size_t cnt;
+ pgoff_t len = 0;
+
+ snprintf(pathbuf, MAXPGPATH, XLOGDIR "/%s", walFileName);
+ XLogFromFileName(walFileName, &tli, &segno, wal_segment_size);
+
+ fp = AllocateFile(pathbuf, "rb");
+ if (fp == NULL)
+ {
+ int save_errno = errno;
+
+ /*
+ * Most likely reason for this is that the file was already
+ * removed by a checkpoint, so check for that to get a better
+ * error message.
+ */
+ CheckXLogRemoved(segno, tli);
+
+ errno = save_errno;
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\": %m", pathbuf)));
+ }
+
+ if (fstat(fileno(fp), &statbuf) != 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not stat file \"%s\": %m",
+ pathbuf)));
+ if (statbuf.st_size != wal_segment_size)
+ {
+ CheckXLogRemoved(segno, tli);
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("unexpected WAL file size \"%s\"", walFileName)));
+ }
+
+ /* send the WAL file itself */
+ _tarWriteHeader(pathbuf, NULL, &statbuf, false);
+
+ while ((cnt = fread(buf, 1,
+ Min(sizeof(buf), wal_segment_size - len),
+ fp)) > 0)
+ {
+ CheckXLogRemoved(segno, tli);
+ /* Send the chunk as a CopyData message */
+ if (pq_putmessage('d', buf, cnt))
+ ereport(ERROR,
+ (errmsg("base backup could not send data, aborting backup")));
+ update_basebackup_progress(cnt);
+
+ len += cnt;
+ throttle(cnt);
+
+ if (len == wal_segment_size)
+ break;
+ }
+
+ CHECK_FREAD_ERROR(fp, pathbuf);
+
+ if (len != wal_segment_size)
+ {
+ CheckXLogRemoved(segno, tli);
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("unexpected WAL file size \"%s\"", walFileName)));
+ }
+
+ /* wal_segment_size is a multiple of 512, so no need for padding */
+
+ FreeFile(fp);
+
+ /*
+ * Mark file as archived, otherwise files can get archived again
+ * after promotion of a new node. This is in line with
+ * walreceiver.c always doing an XLogArchiveForceDone() after a
+ * complete segment.
+ */
+ StatusFilePath(pathbuf, walFileName, ".done");
+ sendFileWithContent(pathbuf, "", &manifest);
+ }
+
+ /*
+ * Send timeline history files too. Only the latest timeline history
+ * file is required for recovery, and even that only if there happens
+ * to be a timeline switch in the first WAL segment that contains the
+ * checkpoint record, or if we're taking a base backup from a standby
+ * server and the target timeline changes while the backup is taken.
+ * But they are small and highly useful for debugging purposes, so
+ * better include them all, always.
+ */
+ foreach(lc, historyFileList)
+ {
+ char *fname = lfirst(lc);
+
+ snprintf(pathbuf, MAXPGPATH, XLOGDIR "/%s", fname);
+
+ if (lstat(pathbuf, &statbuf) != 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not stat file \"%s\": %m", pathbuf)));
+
+ sendFile(pathbuf, pathbuf, &statbuf, false, InvalidOid,
+ &manifest, NULL);
+
+ /* unconditionally mark file as archived */
+ StatusFilePath(pathbuf, fname, ".done");
+ sendFileWithContent(pathbuf, "", &manifest);
+ }
+
+ /* Send CopyDone message for the last tar file */
+ pq_putemptymessage('c');
+ }
+
+ AddWALInfoToBackupManifest(&manifest, startptr, starttli, endptr, endtli);
+
+ SendBackupManifest(&manifest);
+
+ SendXlogRecPtrResult(endptr, endtli);
+
+ if (total_checksum_failures)
+ {
+ if (total_checksum_failures > 1)
+ ereport(WARNING,
+ (errmsg_plural("%lld total checksum verification failure",
+ "%lld total checksum verification failures",
+ total_checksum_failures,
+ total_checksum_failures)));
+
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("checksum verification failure during base backup")));
+ }
+
+ /* clean up the resource owner we created */
+ WalSndResourceCleanup(true);
+
+ pgstat_progress_end_command();
+}
+
+/*
+ * list_sort comparison function, to compare log/seg portion of WAL segment
+ * filenames, ignoring the timeline portion.
+ */
+static int
+compareWalFileNames(const ListCell *a, const ListCell *b)
+{
+ char *fna = (char *) lfirst(a);
+ char *fnb = (char *) lfirst(b);
+
+ return strcmp(fna + 8, fnb + 8);
+}
+
+/*
+ * Parse the base backup options passed down by the parser
+ */
+static void
+parse_basebackup_options(List *options, basebackup_options *opt)
+{
+ ListCell *lopt;
+ bool o_label = false;
+ bool o_progress = false;
+ bool o_fast = false;
+ bool o_nowait = false;
+ bool o_wal = false;
+ bool o_maxrate = false;
+ bool o_tablespace_map = false;
+ bool o_noverify_checksums = false;
+ bool o_manifest = false;
+ bool o_manifest_checksums = false;
+
+ MemSet(opt, 0, sizeof(*opt));
+ opt->manifest = MANIFEST_OPTION_NO;
+ opt->manifest_checksum_type = CHECKSUM_TYPE_CRC32C;
+
+ foreach(lopt, options)
+ {
+ DefElem *defel = (DefElem *) lfirst(lopt);
+
+ if (strcmp(defel->defname, "label") == 0)
+ {
+ if (o_label)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("duplicate option \"%s\"", defel->defname)));
+ opt->label = strVal(defel->arg);
+ o_label = true;
+ }
+ else if (strcmp(defel->defname, "progress") == 0)
+ {
+ if (o_progress)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("duplicate option \"%s\"", defel->defname)));
+ opt->progress = true;
+ o_progress = true;
+ }
+ else if (strcmp(defel->defname, "fast") == 0)
+ {
+ if (o_fast)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("duplicate option \"%s\"", defel->defname)));
+ opt->fastcheckpoint = true;
+ o_fast = true;
+ }
+ else if (strcmp(defel->defname, "nowait") == 0)
+ {
+ if (o_nowait)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("duplicate option \"%s\"", defel->defname)));
+ opt->nowait = true;
+ o_nowait = true;
+ }
+ else if (strcmp(defel->defname, "wal") == 0)
+ {
+ if (o_wal)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("duplicate option \"%s\"", defel->defname)));
+ opt->includewal = true;
+ o_wal = true;
+ }
+ else if (strcmp(defel->defname, "max_rate") == 0)
+ {
+ long maxrate;
+
+ if (o_maxrate)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("duplicate option \"%s\"", defel->defname)));
+
+ maxrate = intVal(defel->arg);
+ if (maxrate < MAX_RATE_LOWER || maxrate > MAX_RATE_UPPER)
+ ereport(ERROR,
+ (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
+ errmsg("%d is outside the valid range for parameter \"%s\" (%d .. %d)",
+ (int) maxrate, "MAX_RATE", MAX_RATE_LOWER, MAX_RATE_UPPER)));
+
+ opt->maxrate = (uint32) maxrate;
+ o_maxrate = true;
+ }
+ else if (strcmp(defel->defname, "tablespace_map") == 0)
+ {
+ if (o_tablespace_map)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("duplicate option \"%s\"", defel->defname)));
+ opt->sendtblspcmapfile = true;
+ o_tablespace_map = true;
+ }
+ else if (strcmp(defel->defname, "noverify_checksums") == 0)
+ {
+ if (o_noverify_checksums)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("duplicate option \"%s\"", defel->defname)));
+ noverify_checksums = true;
+ o_noverify_checksums = true;
+ }
+ else if (strcmp(defel->defname, "manifest") == 0)
+ {
+ char *optval = strVal(defel->arg);
+ bool manifest_bool;
+
+ if (o_manifest)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("duplicate option \"%s\"", defel->defname)));
+ if (parse_bool(optval, &manifest_bool))
+ {
+ if (manifest_bool)
+ opt->manifest = MANIFEST_OPTION_YES;
+ else
+ opt->manifest = MANIFEST_OPTION_NO;
+ }
+ else if (pg_strcasecmp(optval, "force-encode") == 0)
+ opt->manifest = MANIFEST_OPTION_FORCE_ENCODE;
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("unrecognized manifest option: \"%s\"",
+ optval)));
+ o_manifest = true;
+ }
+ else if (strcmp(defel->defname, "manifest_checksums") == 0)
+ {
+ char *optval = strVal(defel->arg);
+
+ if (o_manifest_checksums)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("duplicate option \"%s\"", defel->defname)));
+ if (!pg_checksum_parse_type(optval,
+ &opt->manifest_checksum_type))
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("unrecognized checksum algorithm: \"%s\"",
+ optval)));
+ o_manifest_checksums = true;
+ }
+ else
+ elog(ERROR, "option \"%s\" not recognized",
+ defel->defname);
+ }
+ if (opt->label == NULL)
+ opt->label = "base backup";
+ if (opt->manifest == MANIFEST_OPTION_NO)
+ {
+ if (o_manifest_checksums)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("manifest checksums require a backup manifest")));
+ opt->manifest_checksum_type = CHECKSUM_TYPE_NONE;
+ }
+}
+
+
+/*
+ * SendBaseBackup() - send a complete base backup.
+ *
+ * The function will put the system into backup mode like pg_start_backup()
+ * does, so that the backup is consistent even though we read directly from
+ * the filesystem, bypassing the buffer cache.
+ */
+void
+SendBaseBackup(BaseBackupCmd *cmd)
+{
+ basebackup_options opt;
+
+ parse_basebackup_options(cmd->options, &opt);
+
+ WalSndSetState(WALSNDSTATE_BACKUP);
+
+ if (update_process_title)
+ {
+ char activitymsg[50];
+
+ snprintf(activitymsg, sizeof(activitymsg), "sending backup \"%s\"",
+ opt.label);
+ set_ps_display(activitymsg);
+ }
+
+ perform_base_backup(&opt);
+}
+
+static void
+send_int8_string(StringInfoData *buf, int64 intval)
+{
+ char is[32];
+
+ sprintf(is, INT64_FORMAT, intval);
+ pq_sendint32(buf, strlen(is));
+ pq_sendbytes(buf, is, strlen(is));
+}
+
+static void
+SendBackupHeader(List *tablespaces)
+{
+ StringInfoData buf;
+ ListCell *lc;
+
+ /* Construct and send the directory information */
+ pq_beginmessage(&buf, 'T'); /* RowDescription */
+ pq_sendint16(&buf, 3); /* 3 fields */
+
+ /* First field - spcoid */
+ pq_sendstring(&buf, "spcoid");
+ pq_sendint32(&buf, 0); /* table oid */
+ pq_sendint16(&buf, 0); /* attnum */
+ pq_sendint32(&buf, OIDOID); /* type oid */
+ pq_sendint16(&buf, 4); /* typlen */
+ pq_sendint32(&buf, 0); /* typmod */
+ pq_sendint16(&buf, 0); /* format code */
+
+ /* Second field - spclocation */
+ pq_sendstring(&buf, "spclocation");
+ pq_sendint32(&buf, 0);
+ pq_sendint16(&buf, 0);
+ pq_sendint32(&buf, TEXTOID);
+ pq_sendint16(&buf, -1);
+ pq_sendint32(&buf, 0);
+ pq_sendint16(&buf, 0);
+
+ /* Third field - size */
+ pq_sendstring(&buf, "size");
+ pq_sendint32(&buf, 0);
+ pq_sendint16(&buf, 0);
+ pq_sendint32(&buf, INT8OID);
+ pq_sendint16(&buf, 8);
+ pq_sendint32(&buf, 0);
+ pq_sendint16(&buf, 0);
+ pq_endmessage(&buf);
+
+ foreach(lc, tablespaces)
+ {
+ tablespaceinfo *ti = lfirst(lc);
+
+ /* Send one datarow message */
+ pq_beginmessage(&buf, 'D');
+ pq_sendint16(&buf, 3); /* number of columns */
+ if (ti->path == NULL)
+ {
+ pq_sendint32(&buf, -1); /* Length = -1 ==> NULL */
+ pq_sendint32(&buf, -1);
+ }
+ else
+ {
+ Size len;
+
+ len = strlen(ti->oid);
+ pq_sendint32(&buf, len);
+ pq_sendbytes(&buf, ti->oid, len);
+
+ len = strlen(ti->path);
+ pq_sendint32(&buf, len);
+ pq_sendbytes(&buf, ti->path, len);
+ }
+ if (ti->size >= 0)
+ send_int8_string(&buf, ti->size / 1024);
+ else
+ pq_sendint32(&buf, -1); /* NULL */
+
+ pq_endmessage(&buf);
+ }
+
+ /* Send a CommandComplete message */
+ pq_puttextmessage('C', "SELECT");
+}
+
+/*
+ * Send a single resultset containing just a single
+ * XLogRecPtr record (in text format)
+ */
+static void
+SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli)
+{
+ StringInfoData buf;
+ char str[MAXFNAMELEN];
+ Size len;
+
+ pq_beginmessage(&buf, 'T'); /* RowDescription */
+ pq_sendint16(&buf, 2); /* 2 fields */
+
+ /* Field headers */
+ pq_sendstring(&buf, "recptr");
+ pq_sendint32(&buf, 0); /* table oid */
+ pq_sendint16(&buf, 0); /* attnum */
+ pq_sendint32(&buf, TEXTOID); /* type oid */
+ pq_sendint16(&buf, -1);
+ pq_sendint32(&buf, 0);
+ pq_sendint16(&buf, 0);
+
+ pq_sendstring(&buf, "tli");
+ pq_sendint32(&buf, 0); /* table oid */
+ pq_sendint16(&buf, 0); /* attnum */
+
+ /*
+ * int8 may seem like a surprising data type for this, but in theory int4
+ * would not be wide enough for this, as TimeLineID is unsigned.
+ */
+ pq_sendint32(&buf, INT8OID); /* type oid */
+ pq_sendint16(&buf, -1);
+ pq_sendint32(&buf, 0);
+ pq_sendint16(&buf, 0);
+ pq_endmessage(&buf);
+
+ /* Data row */
+ pq_beginmessage(&buf, 'D');
+ pq_sendint16(&buf, 2); /* number of columns */
+
+ len = snprintf(str, sizeof(str),
+ "%X/%X", (uint32) (ptr >> 32), (uint32) ptr);
+ pq_sendint32(&buf, len);
+ pq_sendbytes(&buf, str, len);
+
+ len = snprintf(str, sizeof(str), "%u", tli);
+ pq_sendint32(&buf, len);
+ pq_sendbytes(&buf, str, len);
+
+ pq_endmessage(&buf);
+
+ /* Send a CommandComplete message */
+ pq_puttextmessage('C', "SELECT");
+}
+
+/*
+ * Inject a file with given name and content in the output tar stream.
+ */
+static void
+sendFileWithContent(const char *filename, const char *content,
+ backup_manifest_info *manifest)
+{
+ struct stat statbuf;
+ int pad,
+ len;
+ pg_checksum_context checksum_ctx;
+
+ pg_checksum_init(&checksum_ctx, manifest->checksum_type);
+
+ len = strlen(content);
+
+ /*
+ * Construct a stat struct for the backup_label file we're injecting in
+ * the tar.
+ */
+ /* Windows doesn't have the concept of uid and gid */
+#ifdef WIN32
+ statbuf.st_uid = 0;
+ statbuf.st_gid = 0;
+#else
+ statbuf.st_uid = geteuid();
+ statbuf.st_gid = getegid();
+#endif
+ statbuf.st_mtime = time(NULL);
+ statbuf.st_mode = pg_file_create_mode;
+ statbuf.st_size = len;
+
+ _tarWriteHeader(filename, NULL, &statbuf, false);
+ /* Send the contents as a CopyData message */
+ pq_putmessage('d', content, len);
+ update_basebackup_progress(len);
+
+ /* Pad to 512 byte boundary, per tar format requirements */
+ pad = ((len + 511) & ~511) - len;
+ if (pad > 0)
+ {
+ char buf[512];
+
+ MemSet(buf, 0, pad);
+ pq_putmessage('d', buf, pad);
+ update_basebackup_progress(pad);
+ }
+
+ pg_checksum_update(&checksum_ctx, (uint8 *) content, len);
+ AddFileToBackupManifest(manifest, NULL, filename, len,
+ (pg_time_t) statbuf.st_mtime, &checksum_ctx);
+}
+
+/*
+ * Include the tablespace directory pointed to by 'path' in the output tar
+ * stream. If 'sizeonly' is true, we just calculate a total length and return
+ * it, without actually sending anything.
+ *
+ * Only used to send auxiliary tablespaces, not PGDATA.
+ */
+int64
+sendTablespace(char *path, char *spcoid, bool sizeonly,
+ backup_manifest_info *manifest)
+{
+ int64 size;
+ char pathbuf[MAXPGPATH];
+ struct stat statbuf;
+
+ /*
+ * 'path' points to the tablespace location, but we only want to include
+ * the version directory in it that belongs to us.
+ */
+ snprintf(pathbuf, sizeof(pathbuf), "%s/%s", path,
+ TABLESPACE_VERSION_DIRECTORY);
+
+ /*
+ * Store a directory entry in the tar file so we get the permissions
+ * right.
+ */
+ if (lstat(pathbuf, &statbuf) != 0)
+ {
+ if (errno != ENOENT)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not stat file or directory \"%s\": %m",
+ pathbuf)));
+
+ /* If the tablespace went away while scanning, it's no error. */
+ return 0;
+ }
+
+ size = _tarWriteHeader(TABLESPACE_VERSION_DIRECTORY, NULL, &statbuf,
+ sizeonly);
+
+ /* Send all the files in the tablespace version directory */
+ size += sendDir(pathbuf, strlen(path), sizeonly, NIL, true, manifest,
+ spcoid);
+
+ return size;
+}
+
+/*
+ * Include all files from the given directory in the output tar stream. If
+ * 'sizeonly' is true, we just calculate a total length and return it, without
+ * actually sending anything.
+ *
+ * Omit any directory in the tablespaces list, to avoid backing up
+ * tablespaces twice when they were created inside PGDATA.
+ *
+ * If sendtblspclinks is true, we need to include symlink
+ * information in the tar file. If not, we can skip that
+ * as it will be sent separately in the tablespace_map file.
+ */
+static int64
+sendDir(const char *path, int basepathlen, bool sizeonly, List *tablespaces,
+ bool sendtblspclinks, backup_manifest_info *manifest,
+ const char *spcoid)
+{
+ DIR *dir;
+ struct dirent *de;
+ char pathbuf[MAXPGPATH * 2];
+ struct stat statbuf;
+ int64 size = 0;
+ const char *lastDir; /* Split last dir from parent path. */
+ bool isDbDir = false; /* Does this directory contain relations? */
+
+ /*
+ * Determine if the current path is a database directory that can contain
+ * relations.
+ *
+ * Start by finding the location of the delimiter between the parent path
+ * and the current path.
+ */
+ lastDir = last_dir_separator(path);
+
+ /* Does this path look like a database path (i.e. all digits)? */
+ if (lastDir != NULL &&
+ strspn(lastDir + 1, "0123456789") == strlen(lastDir + 1))
+ {
+ /* Part of path that contains the parent directory. */
+ int parentPathLen = lastDir - path;
+
+ /*
+ * Mark path as a database directory if the parent path is either
+ * $PGDATA/base or a tablespace version path.
+ */
+ if (strncmp(path, "./base", parentPathLen) == 0 ||
+ (parentPathLen >= (sizeof(TABLESPACE_VERSION_DIRECTORY) - 1) &&
+ strncmp(lastDir - (sizeof(TABLESPACE_VERSION_DIRECTORY) - 1),
+ TABLESPACE_VERSION_DIRECTORY,
+ sizeof(TABLESPACE_VERSION_DIRECTORY) - 1) == 0))
+ isDbDir = true;
+ }
+
+ dir = AllocateDir(path);
+ while ((de = ReadDir(dir, path)) != NULL)
+ {
+ int excludeIdx;
+ bool excludeFound;
+ ForkNumber relForkNum; /* Type of fork if file is a relation */
+ int relOidChars; /* Chars in filename that are the rel oid */
+
+ /* Skip special stuff */
+ if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0)
+ continue;
+
+ /* Skip temporary files */
+ if (strncmp(de->d_name,
+ PG_TEMP_FILE_PREFIX,
+ strlen(PG_TEMP_FILE_PREFIX)) == 0)
+ continue;
+
+ /*
+ * Check if the postmaster has signaled us to exit, and abort with an
+ * error in that case. The error handler further up will call
+ * do_pg_abort_backup() for us. Also check that if the backup was
+ * started while still in recovery, the server wasn't promoted.
+ * do_pg_stop_backup() will check that too, but it's better to stop
+ * the backup early than continue to the end and fail there.
+ */
+ CHECK_FOR_INTERRUPTS();
+ if (RecoveryInProgress() != backup_started_in_recovery)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("the standby was promoted during online backup"),
+ errhint("This means that the backup being taken is corrupt "
+ "and should not be used. "
+ "Try taking another online backup.")));
+
+ /* Scan for files that should be excluded */
+ excludeFound = false;
+ for (excludeIdx = 0; excludeFiles[excludeIdx].name != NULL; excludeIdx++)
+ {
+ int cmplen = strlen(excludeFiles[excludeIdx].name);
+
+ if (!excludeFiles[excludeIdx].match_prefix)
+ cmplen++;
+ if (strncmp(de->d_name, excludeFiles[excludeIdx].name, cmplen) == 0)
+ {
+ elog(DEBUG1, "file \"%s\" excluded from backup", de->d_name);
+ excludeFound = true;
+ break;
+ }
+ }
+
+ if (excludeFound)
+ continue;
+
+ /* Exclude all forks for unlogged tables except the init fork */
+ if (isDbDir &&
+ parse_filename_for_nontemp_relation(de->d_name, &relOidChars,
+ &relForkNum))
+ {
+ /* Never exclude init forks */
+ if (relForkNum != INIT_FORKNUM)
+ {
+ char initForkFile[MAXPGPATH];
+ char relOid[OIDCHARS + 1];
+
+ /*
+ * If any other type of fork, check if there is an init fork
+ * with the same OID. If so, the file can be excluded.
+ */
+ memcpy(relOid, de->d_name, relOidChars);
+ relOid[relOidChars] = '\0';
+ snprintf(initForkFile, sizeof(initForkFile), "%s/%s_init",
+ path, relOid);
+
+ if (lstat(initForkFile, &statbuf) == 0)
+ {
+ elog(DEBUG2,
+ "unlogged relation file \"%s\" excluded from backup",
+ de->d_name);
+
+ continue;
+ }
+ }
+ }
+
+ /* Exclude temporary relations */
+ if (isDbDir && looks_like_temp_rel_name(de->d_name))
+ {
+ elog(DEBUG2,
+ "temporary relation file \"%s\" excluded from backup",
+ de->d_name);
+
+ continue;
+ }
+
+ snprintf(pathbuf, sizeof(pathbuf), "%s/%s", path, de->d_name);
+
+ /* Skip pg_control here to back up it last */
+ if (strcmp(pathbuf, "./global/pg_control") == 0)
+ continue;
+
+ if (lstat(pathbuf, &statbuf) != 0)
+ {
+ if (errno != ENOENT)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not stat file or directory \"%s\": %m",
+ pathbuf)));
+
+ /* If the file went away while scanning, it's not an error. */
+ continue;
+ }
+
+ /* Scan for directories whose contents should be excluded */
+ excludeFound = false;
+ for (excludeIdx = 0; excludeDirContents[excludeIdx] != NULL; excludeIdx++)
+ {
+ if (strcmp(de->d_name, excludeDirContents[excludeIdx]) == 0)
+ {
+ elog(DEBUG1, "contents of directory \"%s\" excluded from backup", de->d_name);
+ size += _tarWriteDir(pathbuf, basepathlen, &statbuf, sizeonly);
+ excludeFound = true;
+ break;
+ }
+ }
+
+ if (excludeFound)
+ continue;
+
+ /*
+ * Exclude contents of directory specified by statrelpath if not set
+ * to the default (pg_stat_tmp) which is caught in the loop above.
+ */
+ if (statrelpath != NULL && strcmp(pathbuf, statrelpath) == 0)
+ {
+ elog(DEBUG1, "contents of directory \"%s\" excluded from backup", statrelpath);
+ size += _tarWriteDir(pathbuf, basepathlen, &statbuf, sizeonly);
+ continue;
+ }
+
+ /*
+ * We can skip pg_wal, the WAL segments need to be fetched from the
+ * WAL archive anyway. But include it as an empty directory anyway, so
+ * we get permissions right.
+ */
+ if (strcmp(pathbuf, "./pg_wal") == 0)
+ {
+ /* If pg_wal is a symlink, write it as a directory anyway */
+ size += _tarWriteDir(pathbuf, basepathlen, &statbuf, sizeonly);
+
+ /*
+ * Also send archive_status directory (by hackishly reusing
+ * statbuf from above ...).
+ */
+ size += _tarWriteHeader("./pg_wal/archive_status", NULL, &statbuf,
+ sizeonly);
+
+ continue; /* don't recurse into pg_wal */
+ }
+
+ /* Allow symbolic links in pg_tblspc only */
+ if (strcmp(path, "./pg_tblspc") == 0 &&
+#ifndef WIN32
+ S_ISLNK(statbuf.st_mode)
+#else
+ pgwin32_is_junction(pathbuf)
+#endif
+ )
+ {
+#if defined(HAVE_READLINK) || defined(WIN32)
+ char linkpath[MAXPGPATH];
+ int rllen;
+
+ rllen = readlink(pathbuf, linkpath, sizeof(linkpath));
+ if (rllen < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read symbolic link \"%s\": %m",
+ pathbuf)));
+ if (rllen >= sizeof(linkpath))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("symbolic link \"%s\" target is too long",
+ pathbuf)));
+ linkpath[rllen] = '\0';
+
+ size += _tarWriteHeader(pathbuf + basepathlen + 1, linkpath,
+ &statbuf, sizeonly);
+#else
+
+ /*
+ * If the platform does not have symbolic links, it should not be
+ * possible to have tablespaces - clearly somebody else created
+ * them. Warn about it and ignore.
+ */
+ ereport(WARNING,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("tablespaces are not supported on this platform")));
+ continue;
+#endif /* HAVE_READLINK */
+ }
+ else if (S_ISDIR(statbuf.st_mode))
+ {
+ bool skip_this_dir = false;
+ ListCell *lc;
+
+ /*
+ * Store a directory entry in the tar file so we can get the
+ * permissions right.
+ */
+ size += _tarWriteHeader(pathbuf + basepathlen + 1, NULL, &statbuf,
+ sizeonly);
+
+ /*
+ * Call ourselves recursively for a directory, unless it happens
+ * to be a separate tablespace located within PGDATA.
+ */
+ foreach(lc, tablespaces)
+ {
+ tablespaceinfo *ti = (tablespaceinfo *) lfirst(lc);
+
+ /*
+ * ti->rpath is the tablespace relative path within PGDATA, or
+ * NULL if the tablespace has been properly located somewhere
+ * else.
+ *
+ * Skip past the leading "./" in pathbuf when comparing.
+ */
+ if (ti->rpath && strcmp(ti->rpath, pathbuf + 2) == 0)
+ {
+ skip_this_dir = true;
+ break;
+ }
+ }
+
+ /*
+ * skip sending directories inside pg_tblspc, if not required.
+ */
+ if (strcmp(pathbuf, "./pg_tblspc") == 0 && !sendtblspclinks)
+ skip_this_dir = true;
+
+ if (!skip_this_dir)
+ size += sendDir(pathbuf, basepathlen, sizeonly, tablespaces,
+ sendtblspclinks, manifest, spcoid);
+ }
+ else if (S_ISREG(statbuf.st_mode))
+ {
+ bool sent = false;
+
+ if (!sizeonly)
+ sent = sendFile(pathbuf, pathbuf + basepathlen + 1, &statbuf,
+ true, isDbDir ? atooid(lastDir + 1) : InvalidOid,
+ manifest, spcoid);
+
+ if (sent || sizeonly)
+ {
+ /* Add size, rounded up to 512byte block */
+ size += ((statbuf.st_size + 511) & ~511);
+ size += 512; /* Size of the header of the file */
+ }
+ }
+ else
+ ereport(WARNING,
+ (errmsg("skipping special file \"%s\"", pathbuf)));
+ }
+ FreeDir(dir);
+ return size;
+}
+
+/*
+ * Check if a file should have its checksum validated.
+ * We validate checksums on files in regular tablespaces
+ * (including global and default) only, and in those there
+ * are some files that are explicitly excluded.
+ */
+static bool
+is_checksummed_file(const char *fullpath, const char *filename)
+{
+ /* Check that the file is in a tablespace */
+ if (strncmp(fullpath, "./global/", 9) == 0 ||
+ strncmp(fullpath, "./base/", 7) == 0 ||
+ strncmp(fullpath, "/", 1) == 0)
+ {
+ int excludeIdx;
+
+ /* Compare file against noChecksumFiles skip list */
+ for (excludeIdx = 0; noChecksumFiles[excludeIdx].name != NULL; excludeIdx++)
+ {
+ int cmplen = strlen(noChecksumFiles[excludeIdx].name);
+
+ if (!noChecksumFiles[excludeIdx].match_prefix)
+ cmplen++;
+ if (strncmp(filename, noChecksumFiles[excludeIdx].name,
+ cmplen) == 0)
+ return false;
+ }
+
+ return true;
+ }
+ else
+ return false;
+}
+
+/*****
+ * Functions for handling tar file format
+ *
+ * Copied from pg_dump, but modified to work with libpq for sending
+ */
+
+
+/*
+ * Given the member, write the TAR header & send the file.
+ *
+ * If 'missing_ok' is true, will not throw an error if the file is not found.
+ *
+ * If dboid is anything other than InvalidOid then any checksum failures detected
+ * will get reported to the stats collector.
+ *
+ * Returns true if the file was successfully sent, false if 'missing_ok',
+ * and the file did not exist.
+ */
+static bool
+sendFile(const char *readfilename, const char *tarfilename,
+ struct stat *statbuf, bool missing_ok, Oid dboid,
+ backup_manifest_info *manifest, const char *spcoid)
+{
+ FILE *fp;
+ BlockNumber blkno = 0;
+ bool block_retry = false;
+ char buf[TAR_SEND_SIZE];
+ uint16 checksum;
+ int checksum_failures = 0;
+ off_t cnt;
+ int i;
+ pgoff_t len = 0;
+ char *page;
+ size_t pad;
+ PageHeader phdr;
+ int segmentno = 0;
+ char *segmentpath;
+ bool verify_checksum = false;
+ pg_checksum_context checksum_ctx;
+
+ pg_checksum_init(&checksum_ctx, manifest->checksum_type);
+
+ fp = AllocateFile(readfilename, "rb");
+ if (fp == NULL)
+ {
+ if (errno == ENOENT && missing_ok)
+ return false;
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\": %m", readfilename)));
+ }
+
+ _tarWriteHeader(tarfilename, NULL, statbuf, false);
+
+ if (!noverify_checksums && DataChecksumsEnabled())
+ {
+ char *filename;
+
+ /*
+ * Get the filename (excluding path). As last_dir_separator()
+ * includes the last directory separator, we chop that off by
+ * incrementing the pointer.
+ */
+ filename = last_dir_separator(readfilename) + 1;
+
+ if (is_checksummed_file(readfilename, filename))
+ {
+ verify_checksum = true;
+
+ /*
+ * Cut off at the segment boundary (".") to get the segment number
+ * in order to mix it into the checksum.
+ */
+ segmentpath = strstr(filename, ".");
+ if (segmentpath != NULL)
+ {
+ segmentno = atoi(segmentpath + 1);
+ if (segmentno == 0)
+ ereport(ERROR,
+ (errmsg("invalid segment number %d in file \"%s\"",
+ segmentno, filename)));
+ }
+ }
+ }
+
+ while ((cnt = fread(buf, 1, Min(sizeof(buf), statbuf->st_size - len), fp)) > 0)
+ {
+ /*
+ * The checksums are verified at block level, so we iterate over the
+ * buffer in chunks of BLCKSZ, after making sure that
+ * TAR_SEND_SIZE/buf is divisible by BLCKSZ and we read a multiple of
+ * BLCKSZ bytes.
+ */
+ Assert(TAR_SEND_SIZE % BLCKSZ == 0);
+
+ if (verify_checksum && (cnt % BLCKSZ != 0))
+ {
+ ereport(WARNING,
+ (errmsg("could not verify checksum in file \"%s\", block "
+ "%d: read buffer size %d and page size %d "
+ "differ",
+ readfilename, blkno, (int) cnt, BLCKSZ)));
+ verify_checksum = false;
+ }
+
+ if (verify_checksum)
+ {
+ for (i = 0; i < cnt / BLCKSZ; i++)
+ {
+ page = buf + BLCKSZ * i;
+
+ /*
+ * Only check pages which have not been modified since the
+ * start of the base backup. Otherwise, they might have been
+ * written only halfway and the checksum would not be valid.
+ * However, replaying WAL would reinstate the correct page in
+ * this case. We also skip completely new pages, since they
+ * don't have a checksum yet.
+ */
+ if (!PageIsNew(page) && PageGetLSN(page) < startptr)
+ {
+ checksum = pg_checksum_page((char *) page, blkno + segmentno * RELSEG_SIZE);
+ phdr = (PageHeader) page;
+ if (phdr->pd_checksum != checksum)
+ {
+ /*
+ * Retry the block on the first failure. It's
+ * possible that we read the first 4K page of the
+ * block just before postgres updated the entire block
+ * so it ends up looking torn to us. We only need to
+ * retry once because the LSN should be updated to
+ * something we can ignore on the next pass. If the
+ * error happens again then it is a true validation
+ * failure.
+ */
+ if (block_retry == false)
+ {
+ /* Reread the failed block */
+ if (fseek(fp, -(cnt - BLCKSZ * i), SEEK_CUR) == -1)
+ {
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not fseek in file \"%s\": %m",
+ readfilename)));
+ }
+
+ if (fread(buf + BLCKSZ * i, 1, BLCKSZ, fp) != BLCKSZ)
+ {
+ /*
+ * If we hit end-of-file, a concurrent
+ * truncation must have occurred, so break out
+ * of this loop just as if the initial fread()
+ * returned 0. We'll drop through to the same
+ * code that handles that case. (We must fix
+ * up cnt first, though.)
+ */
+ if (feof(fp))
+ {
+ cnt = BLCKSZ * i;
+ break;
+ }
+
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not reread block %d of file \"%s\": %m",
+ blkno, readfilename)));
+ }
+
+ if (fseek(fp, cnt - BLCKSZ * i - BLCKSZ, SEEK_CUR) == -1)
+ {
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not fseek in file \"%s\": %m",
+ readfilename)));
+ }
+
+ /* Set flag so we know a retry was attempted */
+ block_retry = true;
+
+ /* Reset loop to validate the block again */
+ i--;
+ continue;
+ }
+
+ checksum_failures++;
+
+ if (checksum_failures <= 5)
+ ereport(WARNING,
+ (errmsg("checksum verification failed in "
+ "file \"%s\", block %d: calculated "
+ "%X but expected %X",
+ readfilename, blkno, checksum,
+ phdr->pd_checksum)));
+ if (checksum_failures == 5)
+ ereport(WARNING,
+ (errmsg("further checksum verification "
+ "failures in file \"%s\" will not "
+ "be reported", readfilename)));
+ }
+ }
+ block_retry = false;
+ blkno++;
+ }
+ }
+
+ /* Send the chunk as a CopyData message */
+ if (pq_putmessage('d', buf, cnt))
+ ereport(ERROR,
+ (errmsg("base backup could not send data, aborting backup")));
+ update_basebackup_progress(cnt);
+
+ /* Also feed it to the checksum machinery. */
+ pg_checksum_update(&checksum_ctx, (uint8 *) buf, cnt);
+
+ len += cnt;
+ throttle(cnt);
+
+ if (feof(fp) || len >= statbuf->st_size)
+ {
+ /*
+ * Reached end of file. The file could be longer, if it was
+ * extended while we were sending it, but for a base backup we can
+ * ignore such extended data. It will be restored from WAL.
+ */
+ break;
+ }
+ }
+
+ CHECK_FREAD_ERROR(fp, readfilename);
+
+ /* If the file was truncated while we were sending it, pad it with zeros */
+ if (len < statbuf->st_size)
+ {
+ MemSet(buf, 0, sizeof(buf));
+ while (len < statbuf->st_size)
+ {
+ cnt = Min(sizeof(buf), statbuf->st_size - len);
+ pq_putmessage('d', buf, cnt);
+ pg_checksum_update(&checksum_ctx, (uint8 *) buf, cnt);
+ update_basebackup_progress(cnt);
+ len += cnt;
+ throttle(cnt);
+ }
+ }
+
+ /*
+ * Pad to 512 byte boundary, per tar format requirements. (This small
+ * piece of data is probably not worth throttling, and is not checksummed
+ * because it's not actually part of the file.)
+ */
+ pad = ((len + 511) & ~511) - len;
+ if (pad > 0)
+ {
+ MemSet(buf, 0, pad);
+ pq_putmessage('d', buf, pad);
+ update_basebackup_progress(pad);
+ }
+
+ FreeFile(fp);
+
+ if (checksum_failures > 1)
+ {
+ ereport(WARNING,
+ (errmsg_plural("file \"%s\" has a total of %d checksum verification failure",
+ "file \"%s\" has a total of %d checksum verification failures",
+ checksum_failures,
+ readfilename, checksum_failures)));
+
+ pgstat_report_checksum_failures_in_db(dboid, checksum_failures);
+ }
+
+ total_checksum_failures += checksum_failures;
+
+ AddFileToBackupManifest(manifest, spcoid, tarfilename, statbuf->st_size,
+ (pg_time_t) statbuf->st_mtime, &checksum_ctx);
+
+ return true;
+}
+
+
+static int64
+_tarWriteHeader(const char *filename, const char *linktarget,
+ struct stat *statbuf, bool sizeonly)
+{
+ char h[512];
+ enum tarError rc;
+
+ if (!sizeonly)
+ {
+ rc = tarCreateHeader(h, filename, linktarget, statbuf->st_size,
+ statbuf->st_mode, statbuf->st_uid, statbuf->st_gid,
+ statbuf->st_mtime);
+
+ switch (rc)
+ {
+ case TAR_OK:
+ break;
+ case TAR_NAME_TOO_LONG:
+ ereport(ERROR,
+ (errmsg("file name too long for tar format: \"%s\"",
+ filename)));
+ break;
+ case TAR_SYMLINK_TOO_LONG:
+ ereport(ERROR,
+ (errmsg("symbolic link target too long for tar format: "
+ "file name \"%s\", target \"%s\"",
+ filename, linktarget)));
+ break;
+ default:
+ elog(ERROR, "unrecognized tar error: %d", rc);
+ }
+
+ pq_putmessage('d', h, sizeof(h));
+ update_basebackup_progress(sizeof(h));
+ }
+
+ return sizeof(h);
+}
+
+/*
+ * Write tar header for a directory. If the entry in statbuf is a link then
+ * write it as a directory anyway.
+ */
+static int64
+_tarWriteDir(const char *pathbuf, int basepathlen, struct stat *statbuf,
+ bool sizeonly)
+{
+ /* If symlink, write it as a directory anyway */
+#ifndef WIN32
+ if (S_ISLNK(statbuf->st_mode))
+#else
+ if (pgwin32_is_junction(pathbuf))
+#endif
+ statbuf->st_mode = S_IFDIR | pg_dir_create_mode;
+
+ return _tarWriteHeader(pathbuf + basepathlen + 1, NULL, statbuf, sizeonly);
+}
+
+/*
+ * Increment the network transfer counter by the given number of bytes,
+ * and sleep if necessary to comply with the requested network transfer
+ * rate.
+ */
+static void
+throttle(size_t increment)
+{
+ TimeOffset elapsed_min;
+
+ if (throttling_counter < 0)
+ return;
+
+ throttling_counter += increment;
+ if (throttling_counter < throttling_sample)
+ return;
+
+ /* How much time should have elapsed at minimum? */
+ elapsed_min = elapsed_min_unit *
+ (throttling_counter / throttling_sample);
+
+ /*
+ * Since the latch could be set repeatedly because of concurrently WAL
+ * activity, sleep in a loop to ensure enough time has passed.
+ */
+ for (;;)
+ {
+ TimeOffset elapsed,
+ sleep;
+ int wait_result;
+
+ /* Time elapsed since the last measurement (and possible wake up). */
+ elapsed = GetCurrentTimestamp() - throttled_last;
+
+ /* sleep if the transfer is faster than it should be */
+ sleep = elapsed_min - elapsed;
+ if (sleep <= 0)
+ break;
+
+ ResetLatch(MyLatch);
+
+ /* We're eating a potentially set latch, so check for interrupts */
+ CHECK_FOR_INTERRUPTS();
+
+ /*
+ * (TAR_SEND_SIZE / throttling_sample * elapsed_min_unit) should be
+ * the maximum time to sleep. Thus the cast to long is safe.
+ */
+ wait_result = WaitLatch(MyLatch,
+ WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
+ (long) (sleep / 1000),
+ WAIT_EVENT_BASE_BACKUP_THROTTLE);
+
+ if (wait_result & WL_LATCH_SET)
+ CHECK_FOR_INTERRUPTS();
+
+ /* Done waiting? */
+ if (wait_result & WL_TIMEOUT)
+ break;
+ }
+
+ /*
+ * As we work with integers, only whole multiple of throttling_sample was
+ * processed. The rest will be done during the next call of this function.
+ */
+ throttling_counter %= throttling_sample;
+
+ /*
+ * Time interval for the remaining amount and possible next increments
+ * starts now.
+ */
+ throttled_last = GetCurrentTimestamp();
+}
+
+/*
+ * Increment the counter for the amount of data already streamed
+ * by the given number of bytes, and update the progress report for
+ * pg_stat_progress_basebackup.
+ */
+static void
+update_basebackup_progress(int64 delta)
+{
+ const int index[] = {
+ PROGRESS_BASEBACKUP_BACKUP_STREAMED,
+ PROGRESS_BASEBACKUP_BACKUP_TOTAL
+ };
+ int64 val[2];
+ int nparam = 0;
+
+ backup_streamed += delta;
+ val[nparam++] = backup_streamed;
+
+ /*
+ * Avoid overflowing past 100% or the full size. This may make the total
+ * size number change as we approach the end of the backup (the estimate
+ * will always be wrong if WAL is included), but that's better than having
+ * the done column be bigger than the total.
+ */
+ if (backup_total > -1 && backup_streamed > backup_total)
+ {
+ backup_total = backup_streamed;
+ val[nparam++] = backup_total;
+ }
+
+ pgstat_progress_update_multi_param(nparam, index, val);
+}
diff --git a/src/backend/replication/libpqwalreceiver/Makefile b/src/backend/replication/libpqwalreceiver/Makefile
new file mode 100644
index 0000000..f26daa1
--- /dev/null
+++ b/src/backend/replication/libpqwalreceiver/Makefile
@@ -0,0 +1,37 @@
+#-------------------------------------------------------------------------
+#
+# Makefile--
+# Makefile for src/backend/replication/libpqwalreceiver
+#
+# IDENTIFICATION
+# src/backend/replication/libpqwalreceiver/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/backend/replication/libpqwalreceiver
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+
+override CPPFLAGS := -I$(srcdir) -I$(libpq_srcdir) $(CPPFLAGS)
+
+OBJS = \
+ $(WIN32RES) \
+ libpqwalreceiver.o
+SHLIB_LINK_INTERNAL = $(libpq)
+SHLIB_LINK = $(filter -lintl, $(LIBS))
+SHLIB_PREREQS = submake-libpq
+PGFILEDESC = "libpqwalreceiver - receive WAL during streaming replication"
+NAME = libpqwalreceiver
+
+all: all-shared-lib
+
+include $(top_srcdir)/src/Makefile.shlib
+
+install: all installdirs install-lib
+
+installdirs: installdirs-lib
+
+uninstall: uninstall-lib
+
+clean distclean maintainer-clean: clean-lib
+ rm -f $(OBJS)
diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
new file mode 100644
index 0000000..98b0164
--- /dev/null
+++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
@@ -0,0 +1,1072 @@
+/*-------------------------------------------------------------------------
+ *
+ * libpqwalreceiver.c
+ *
+ * This file contains the libpq-specific parts of walreceiver. It's
+ * loaded as a dynamic module to avoid linking the main server binary with
+ * libpq.
+ *
+ * Portions Copyright (c) 2010-2020, PostgreSQL Global Development Group
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <unistd.h>
+#include <sys/time.h>
+
+#include "access/xlog.h"
+#include "catalog/pg_type.h"
+#include "common/connect.h"
+#include "funcapi.h"
+#include "libpq-fe.h"
+#include "mb/pg_wchar.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "pqexpbuffer.h"
+#include "replication/walreceiver.h"
+#include "utils/builtins.h"
+#include "utils/memutils.h"
+#include "utils/pg_lsn.h"
+#include "utils/tuplestore.h"
+
+PG_MODULE_MAGIC;
+
+void _PG_init(void);
+
+struct WalReceiverConn
+{
+ /* Current connection to the primary, if any */
+ PGconn *streamConn;
+ /* Used to remember if the connection is logical or physical */
+ bool logical;
+ /* Buffer for currently read records */
+ char *recvBuf;
+};
+
+/* Prototypes for interface functions */
+static WalReceiverConn *libpqrcv_connect(const char *conninfo,
+ bool logical, const char *appname,
+ char **err);
+static void libpqrcv_check_conninfo(const char *conninfo);
+static char *libpqrcv_get_conninfo(WalReceiverConn *conn);
+static void libpqrcv_get_senderinfo(WalReceiverConn *conn,
+ char **sender_host, int *sender_port);
+static char *libpqrcv_identify_system(WalReceiverConn *conn,
+ TimeLineID *primary_tli);
+static int libpqrcv_server_version(WalReceiverConn *conn);
+static void libpqrcv_readtimelinehistoryfile(WalReceiverConn *conn,
+ TimeLineID tli, char **filename,
+ char **content, int *len);
+static bool libpqrcv_startstreaming(WalReceiverConn *conn,
+ const WalRcvStreamOptions *options);
+static void libpqrcv_endstreaming(WalReceiverConn *conn,
+ TimeLineID *next_tli);
+static int libpqrcv_receive(WalReceiverConn *conn, char **buffer,
+ pgsocket *wait_fd);
+static void libpqrcv_send(WalReceiverConn *conn, const char *buffer,
+ int nbytes);
+static char *libpqrcv_create_slot(WalReceiverConn *conn,
+ const char *slotname,
+ bool temporary,
+ CRSSnapshotAction snapshot_action,
+ XLogRecPtr *lsn);
+static pid_t libpqrcv_get_backend_pid(WalReceiverConn *conn);
+static WalRcvExecResult *libpqrcv_exec(WalReceiverConn *conn,
+ const char *query,
+ const int nRetTypes,
+ const Oid *retTypes);
+static void libpqrcv_disconnect(WalReceiverConn *conn);
+
+static WalReceiverFunctionsType PQWalReceiverFunctions = {
+ libpqrcv_connect,
+ libpqrcv_check_conninfo,
+ libpqrcv_get_conninfo,
+ libpqrcv_get_senderinfo,
+ libpqrcv_identify_system,
+ libpqrcv_server_version,
+ libpqrcv_readtimelinehistoryfile,
+ libpqrcv_startstreaming,
+ libpqrcv_endstreaming,
+ libpqrcv_receive,
+ libpqrcv_send,
+ libpqrcv_create_slot,
+ libpqrcv_get_backend_pid,
+ libpqrcv_exec,
+ libpqrcv_disconnect
+};
+
+/* Prototypes for private functions */
+static PGresult *libpqrcv_PQexec(PGconn *streamConn, const char *query);
+static PGresult *libpqrcv_PQgetResult(PGconn *streamConn);
+static char *stringlist_to_identifierstr(PGconn *conn, List *strings);
+
+/*
+ * Module initialization function
+ */
+void
+_PG_init(void)
+{
+ if (WalReceiverFunctions != NULL)
+ elog(ERROR, "libpqwalreceiver already loaded");
+ WalReceiverFunctions = &PQWalReceiverFunctions;
+}
+
+/*
+ * Establish the connection to the primary server for XLOG streaming
+ *
+ * Returns NULL on error and fills the err with palloc'ed error message.
+ */
+static WalReceiverConn *
+libpqrcv_connect(const char *conninfo, bool logical, const char *appname,
+ char **err)
+{
+ WalReceiverConn *conn;
+ PostgresPollingStatusType status;
+ const char *keys[5];
+ const char *vals[5];
+ int i = 0;
+
+ /*
+ * We use the expand_dbname parameter to process the connection string (or
+ * URI), and pass some extra options.
+ */
+ keys[i] = "dbname";
+ vals[i] = conninfo;
+ keys[++i] = "replication";
+ vals[i] = logical ? "database" : "true";
+ if (!logical)
+ {
+ /*
+ * The database name is ignored by the server in replication mode, but
+ * specify "replication" for .pgpass lookup.
+ */
+ keys[++i] = "dbname";
+ vals[i] = "replication";
+ }
+ keys[++i] = "fallback_application_name";
+ vals[i] = appname;
+ if (logical)
+ {
+ keys[++i] = "client_encoding";
+ vals[i] = GetDatabaseEncodingName();
+ }
+ keys[++i] = NULL;
+ vals[i] = NULL;
+
+ Assert(i < sizeof(keys));
+
+ conn = palloc0(sizeof(WalReceiverConn));
+ conn->streamConn = PQconnectStartParams(keys, vals,
+ /* expand_dbname = */ true);
+ if (PQstatus(conn->streamConn) == CONNECTION_BAD)
+ {
+ *err = pchomp(PQerrorMessage(conn->streamConn));
+ return NULL;
+ }
+
+ /*
+ * Poll connection until we have OK or FAILED status.
+ *
+ * Per spec for PQconnectPoll, first wait till socket is write-ready.
+ */
+ status = PGRES_POLLING_WRITING;
+ do
+ {
+ int io_flag;
+ int rc;
+
+ if (status == PGRES_POLLING_READING)
+ io_flag = WL_SOCKET_READABLE;
+#ifdef WIN32
+ /* Windows needs a different test while waiting for connection-made */
+ else if (PQstatus(conn->streamConn) == CONNECTION_STARTED)
+ io_flag = WL_SOCKET_CONNECTED;
+#endif
+ else
+ io_flag = WL_SOCKET_WRITEABLE;
+
+ rc = WaitLatchOrSocket(MyLatch,
+ WL_EXIT_ON_PM_DEATH | WL_LATCH_SET | io_flag,
+ PQsocket(conn->streamConn),
+ 0,
+ WAIT_EVENT_LIBPQWALRECEIVER_CONNECT);
+
+ /* Interrupted? */
+ if (rc & WL_LATCH_SET)
+ {
+ ResetLatch(MyLatch);
+ ProcessWalRcvInterrupts();
+ }
+
+ /* If socket is ready, advance the libpq state machine */
+ if (rc & io_flag)
+ status = PQconnectPoll(conn->streamConn);
+ } while (status != PGRES_POLLING_OK && status != PGRES_POLLING_FAILED);
+
+ if (PQstatus(conn->streamConn) != CONNECTION_OK)
+ {
+ *err = pchomp(PQerrorMessage(conn->streamConn));
+ return NULL;
+ }
+
+ if (logical)
+ {
+ PGresult *res;
+
+ res = libpqrcv_PQexec(conn->streamConn,
+ ALWAYS_SECURE_SEARCH_PATH_SQL);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ {
+ PQclear(res);
+ ereport(ERROR,
+ (errmsg("could not clear search path: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+ }
+ PQclear(res);
+ }
+
+ conn->logical = logical;
+
+ return conn;
+}
+
+/*
+ * Validate connection info string (just try to parse it)
+ */
+static void
+libpqrcv_check_conninfo(const char *conninfo)
+{
+ PQconninfoOption *opts = NULL;
+ char *err = NULL;
+
+ opts = PQconninfoParse(conninfo, &err);
+ if (opts == NULL)
+ {
+ /* The error string is malloc'd, so we must free it explicitly */
+ char *errcopy = err ? pstrdup(err) : "out of memory";
+
+ PQfreemem(err);
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("invalid connection string syntax: %s", errcopy)));
+ }
+
+ PQconninfoFree(opts);
+}
+
+/*
+ * Return a user-displayable conninfo string. Any security-sensitive fields
+ * are obfuscated.
+ */
+static char *
+libpqrcv_get_conninfo(WalReceiverConn *conn)
+{
+ PQconninfoOption *conn_opts;
+ PQconninfoOption *conn_opt;
+ PQExpBufferData buf;
+ char *retval;
+
+ Assert(conn->streamConn != NULL);
+
+ initPQExpBuffer(&buf);
+ conn_opts = PQconninfo(conn->streamConn);
+
+ if (conn_opts == NULL)
+ ereport(ERROR,
+ (errmsg("could not parse connection string: %s",
+ _("out of memory"))));
+
+ /* build a clean connection string from pieces */
+ for (conn_opt = conn_opts; conn_opt->keyword != NULL; conn_opt++)
+ {
+ bool obfuscate;
+
+ /* Skip debug and empty options */
+ if (strchr(conn_opt->dispchar, 'D') ||
+ conn_opt->val == NULL ||
+ conn_opt->val[0] == '\0')
+ continue;
+
+ /* Obfuscate security-sensitive options */
+ obfuscate = strchr(conn_opt->dispchar, '*') != NULL;
+
+ appendPQExpBuffer(&buf, "%s%s=%s",
+ buf.len == 0 ? "" : " ",
+ conn_opt->keyword,
+ obfuscate ? "********" : conn_opt->val);
+ }
+
+ PQconninfoFree(conn_opts);
+
+ retval = PQExpBufferDataBroken(buf) ? NULL : pstrdup(buf.data);
+ termPQExpBuffer(&buf);
+ return retval;
+}
+
+/*
+ * Provides information of sender this WAL receiver is connected to.
+ */
+static void
+libpqrcv_get_senderinfo(WalReceiverConn *conn, char **sender_host,
+ int *sender_port)
+{
+ char *ret = NULL;
+
+ *sender_host = NULL;
+ *sender_port = 0;
+
+ Assert(conn->streamConn != NULL);
+
+ ret = PQhost(conn->streamConn);
+ if (ret && strlen(ret) != 0)
+ *sender_host = pstrdup(ret);
+
+ ret = PQport(conn->streamConn);
+ if (ret && strlen(ret) != 0)
+ *sender_port = atoi(ret);
+}
+
+/*
+ * Check that primary's system identifier matches ours, and fetch the current
+ * timeline ID of the primary.
+ */
+static char *
+libpqrcv_identify_system(WalReceiverConn *conn, TimeLineID *primary_tli)
+{
+ PGresult *res;
+ char *primary_sysid;
+
+ /*
+ * Get the system identifier and timeline ID as a DataRow message from the
+ * primary server.
+ */
+ res = libpqrcv_PQexec(conn->streamConn, "IDENTIFY_SYSTEM");
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ {
+ PQclear(res);
+ ereport(ERROR,
+ (errmsg("could not receive database system identifier and timeline ID from "
+ "the primary server: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+ }
+ if (PQnfields(res) < 3 || PQntuples(res) != 1)
+ {
+ int ntuples = PQntuples(res);
+ int nfields = PQnfields(res);
+
+ PQclear(res);
+ ereport(ERROR,
+ (errmsg("invalid response from primary server"),
+ errdetail("Could not identify system: got %d rows and %d fields, expected %d rows and %d or more fields.",
+ ntuples, nfields, 3, 1)));
+ }
+ primary_sysid = pstrdup(PQgetvalue(res, 0, 0));
+ *primary_tli = pg_strtoint32(PQgetvalue(res, 0, 1));
+ PQclear(res);
+
+ return primary_sysid;
+}
+
+/*
+ * Thin wrapper around libpq to obtain server version.
+ */
+static int
+libpqrcv_server_version(WalReceiverConn *conn)
+{
+ return PQserverVersion(conn->streamConn);
+}
+
+/*
+ * Start streaming WAL data from given streaming options.
+ *
+ * Returns true if we switched successfully to copy-both mode. False
+ * means the server received the command and executed it successfully, but
+ * didn't switch to copy-mode. That means that there was no WAL on the
+ * requested timeline and starting point, because the server switched to
+ * another timeline at or before the requested starting point. On failure,
+ * throws an ERROR.
+ */
+static bool
+libpqrcv_startstreaming(WalReceiverConn *conn,
+ const WalRcvStreamOptions *options)
+{
+ StringInfoData cmd;
+ PGresult *res;
+
+ Assert(options->logical == conn->logical);
+ Assert(options->slotname || !options->logical);
+
+ initStringInfo(&cmd);
+
+ /* Build the command. */
+ appendStringInfoString(&cmd, "START_REPLICATION");
+ if (options->slotname != NULL)
+ appendStringInfo(&cmd, " SLOT \"%s\"",
+ options->slotname);
+
+ if (options->logical)
+ appendStringInfoString(&cmd, " LOGICAL");
+
+ appendStringInfo(&cmd, " %X/%X",
+ (uint32) (options->startpoint >> 32),
+ (uint32) options->startpoint);
+
+ /*
+ * Additional options are different depending on if we are doing logical
+ * or physical replication.
+ */
+ if (options->logical)
+ {
+ char *pubnames_str;
+ List *pubnames;
+ char *pubnames_literal;
+
+ appendStringInfoString(&cmd, " (");
+
+ appendStringInfo(&cmd, "proto_version '%u'",
+ options->proto.logical.proto_version);
+
+ pubnames = options->proto.logical.publication_names;
+ pubnames_str = stringlist_to_identifierstr(conn->streamConn, pubnames);
+ if (!pubnames_str)
+ ereport(ERROR,
+ (errmsg("could not start WAL streaming: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+ pubnames_literal = PQescapeLiteral(conn->streamConn, pubnames_str,
+ strlen(pubnames_str));
+ if (!pubnames_literal)
+ ereport(ERROR,
+ (errmsg("could not start WAL streaming: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+ appendStringInfo(&cmd, ", publication_names %s", pubnames_literal);
+ PQfreemem(pubnames_literal);
+ pfree(pubnames_str);
+
+ appendStringInfoChar(&cmd, ')');
+ }
+ else
+ appendStringInfo(&cmd, " TIMELINE %u",
+ options->proto.physical.startpointTLI);
+
+ /* Start streaming. */
+ res = libpqrcv_PQexec(conn->streamConn, cmd.data);
+ pfree(cmd.data);
+
+ if (PQresultStatus(res) == PGRES_COMMAND_OK)
+ {
+ PQclear(res);
+ return false;
+ }
+ else if (PQresultStatus(res) != PGRES_COPY_BOTH)
+ {
+ PQclear(res);
+ ereport(ERROR,
+ (errmsg("could not start WAL streaming: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+ }
+ PQclear(res);
+ return true;
+}
+
+/*
+ * Stop streaming WAL data. Returns the next timeline's ID in *next_tli, as
+ * reported by the server, or 0 if it did not report it.
+ */
+static void
+libpqrcv_endstreaming(WalReceiverConn *conn, TimeLineID *next_tli)
+{
+ PGresult *res;
+
+ /*
+ * Send copy-end message. As in libpqrcv_PQexec, this could theoretically
+ * block, but the risk seems small.
+ */
+ if (PQputCopyEnd(conn->streamConn, NULL) <= 0 ||
+ PQflush(conn->streamConn))
+ ereport(ERROR,
+ (errmsg("could not send end-of-streaming message to primary: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+
+ *next_tli = 0;
+
+ /*
+ * After COPY is finished, we should receive a result set indicating the
+ * next timeline's ID, or just CommandComplete if the server was shut
+ * down.
+ *
+ * If we had not yet received CopyDone from the backend, PGRES_COPY_OUT is
+ * also possible in case we aborted the copy in mid-stream.
+ */
+ res = libpqrcv_PQgetResult(conn->streamConn);
+ if (PQresultStatus(res) == PGRES_TUPLES_OK)
+ {
+ /*
+ * Read the next timeline's ID. The server also sends the timeline's
+ * starting point, but it is ignored.
+ */
+ if (PQnfields(res) < 2 || PQntuples(res) != 1)
+ ereport(ERROR,
+ (errmsg("unexpected result set after end-of-streaming")));
+ *next_tli = pg_strtoint32(PQgetvalue(res, 0, 0));
+ PQclear(res);
+
+ /* the result set should be followed by CommandComplete */
+ res = libpqrcv_PQgetResult(conn->streamConn);
+ }
+ else if (PQresultStatus(res) == PGRES_COPY_OUT)
+ {
+ PQclear(res);
+
+ /* End the copy */
+ if (PQendcopy(conn->streamConn))
+ ereport(ERROR,
+ (errmsg("error while shutting down streaming COPY: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+
+ /* CommandComplete should follow */
+ res = libpqrcv_PQgetResult(conn->streamConn);
+ }
+
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ ereport(ERROR,
+ (errmsg("error reading result of streaming command: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+ PQclear(res);
+
+ /* Verify that there are no more results */
+ res = libpqrcv_PQgetResult(conn->streamConn);
+ if (res != NULL)
+ ereport(ERROR,
+ (errmsg("unexpected result after CommandComplete: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+}
+
+/*
+ * Fetch the timeline history file for 'tli' from primary.
+ */
+static void
+libpqrcv_readtimelinehistoryfile(WalReceiverConn *conn,
+ TimeLineID tli, char **filename,
+ char **content, int *len)
+{
+ PGresult *res;
+ char cmd[64];
+
+ Assert(!conn->logical);
+
+ /*
+ * Request the primary to send over the history file for given timeline.
+ */
+ snprintf(cmd, sizeof(cmd), "TIMELINE_HISTORY %u", tli);
+ res = libpqrcv_PQexec(conn->streamConn, cmd);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ {
+ PQclear(res);
+ ereport(ERROR,
+ (errmsg("could not receive timeline history file from "
+ "the primary server: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+ }
+ if (PQnfields(res) != 2 || PQntuples(res) != 1)
+ {
+ int ntuples = PQntuples(res);
+ int nfields = PQnfields(res);
+
+ PQclear(res);
+ ereport(ERROR,
+ (errmsg("invalid response from primary server"),
+ errdetail("Expected 1 tuple with 2 fields, got %d tuples with %d fields.",
+ ntuples, nfields)));
+ }
+ *filename = pstrdup(PQgetvalue(res, 0, 0));
+
+ *len = PQgetlength(res, 0, 1);
+ *content = palloc(*len);
+ memcpy(*content, PQgetvalue(res, 0, 1), *len);
+ PQclear(res);
+}
+
+/*
+ * Send a query and wait for the results by using the asynchronous libpq
+ * functions and socket readiness events.
+ *
+ * We must not use the regular blocking libpq functions like PQexec()
+ * since they are uninterruptible by signals on some platforms, such as
+ * Windows.
+ *
+ * The function is modeled on PQexec() in libpq, but only implements
+ * those parts that are in use in the walreceiver api.
+ *
+ * May return NULL, rather than an error result, on failure.
+ */
+static PGresult *
+libpqrcv_PQexec(PGconn *streamConn, const char *query)
+{
+ PGresult *lastResult = NULL;
+
+ /*
+ * PQexec() silently discards any prior query results on the connection.
+ * This is not required for this function as it's expected that the caller
+ * (which is this library in all cases) will behave correctly and we don't
+ * have to be backwards compatible with old libpq.
+ */
+
+ /*
+ * Submit the query. Since we don't use non-blocking mode, this could
+ * theoretically block. In practice, since we don't send very long query
+ * strings, the risk seems negligible.
+ */
+ if (!PQsendQuery(streamConn, query))
+ return NULL;
+
+ for (;;)
+ {
+ /* Wait for, and collect, the next PGresult. */
+ PGresult *result;
+
+ result = libpqrcv_PQgetResult(streamConn);
+ if (result == NULL)
+ break; /* query is complete, or failure */
+
+ /*
+ * Emulate PQexec()'s behavior of returning the last result when there
+ * are many. We are fine with returning just last error message.
+ */
+ PQclear(lastResult);
+ lastResult = result;
+
+ if (PQresultStatus(lastResult) == PGRES_COPY_IN ||
+ PQresultStatus(lastResult) == PGRES_COPY_OUT ||
+ PQresultStatus(lastResult) == PGRES_COPY_BOTH ||
+ PQstatus(streamConn) == CONNECTION_BAD)
+ break;
+ }
+
+ return lastResult;
+}
+
+/*
+ * Perform the equivalent of PQgetResult(), but watch for interrupts.
+ */
+static PGresult *
+libpqrcv_PQgetResult(PGconn *streamConn)
+{
+ /*
+ * Collect data until PQgetResult is ready to get the result without
+ * blocking.
+ */
+ while (PQisBusy(streamConn))
+ {
+ int rc;
+
+ /*
+ * We don't need to break down the sleep into smaller increments,
+ * since we'll get interrupted by signals and can handle any
+ * interrupts here.
+ */
+ rc = WaitLatchOrSocket(MyLatch,
+ WL_EXIT_ON_PM_DEATH | WL_SOCKET_READABLE |
+ WL_LATCH_SET,
+ PQsocket(streamConn),
+ 0,
+ WAIT_EVENT_LIBPQWALRECEIVER_RECEIVE);
+
+ /* Interrupted? */
+ if (rc & WL_LATCH_SET)
+ {
+ ResetLatch(MyLatch);
+ ProcessWalRcvInterrupts();
+ }
+
+ /* Consume whatever data is available from the socket */
+ if (PQconsumeInput(streamConn) == 0)
+ {
+ /* trouble; return NULL */
+ return NULL;
+ }
+ }
+
+ /* Now we can collect and return the next PGresult */
+ return PQgetResult(streamConn);
+}
+
+/*
+ * Disconnect connection to primary, if any.
+ */
+static void
+libpqrcv_disconnect(WalReceiverConn *conn)
+{
+ PQfinish(conn->streamConn);
+ if (conn->recvBuf != NULL)
+ PQfreemem(conn->recvBuf);
+ pfree(conn);
+}
+
+/*
+ * Receive a message available from XLOG stream.
+ *
+ * Returns:
+ *
+ * If data was received, returns the length of the data. *buffer is set to
+ * point to a buffer holding the received message. The buffer is only valid
+ * until the next libpqrcv_* call.
+ *
+ * If no data was available immediately, returns 0, and *wait_fd is set to a
+ * socket descriptor which can be waited on before trying again.
+ *
+ * -1 if the server ended the COPY.
+ *
+ * ereports on error.
+ */
+static int
+libpqrcv_receive(WalReceiverConn *conn, char **buffer,
+ pgsocket *wait_fd)
+{
+ int rawlen;
+
+ if (conn->recvBuf != NULL)
+ PQfreemem(conn->recvBuf);
+ conn->recvBuf = NULL;
+
+ /* Try to receive a CopyData message */
+ rawlen = PQgetCopyData(conn->streamConn, &conn->recvBuf, 1);
+ if (rawlen == 0)
+ {
+ /* Try consuming some data. */
+ if (PQconsumeInput(conn->streamConn) == 0)
+ ereport(ERROR,
+ (errmsg("could not receive data from WAL stream: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+
+ /* Now that we've consumed some input, try again */
+ rawlen = PQgetCopyData(conn->streamConn, &conn->recvBuf, 1);
+ if (rawlen == 0)
+ {
+ /* Tell caller to try again when our socket is ready. */
+ *wait_fd = PQsocket(conn->streamConn);
+ return 0;
+ }
+ }
+ if (rawlen == -1) /* end-of-streaming or error */
+ {
+ PGresult *res;
+
+ res = libpqrcv_PQgetResult(conn->streamConn);
+ if (PQresultStatus(res) == PGRES_COMMAND_OK)
+ {
+ PQclear(res);
+
+ /* Verify that there are no more results. */
+ res = libpqrcv_PQgetResult(conn->streamConn);
+ if (res != NULL)
+ {
+ PQclear(res);
+
+ /*
+ * If the other side closed the connection orderly (otherwise
+ * we'd seen an error, or PGRES_COPY_IN) don't report an error
+ * here, but let callers deal with it.
+ */
+ if (PQstatus(conn->streamConn) == CONNECTION_BAD)
+ return -1;
+
+ ereport(ERROR,
+ (errmsg("unexpected result after CommandComplete: %s",
+ PQerrorMessage(conn->streamConn))));
+ }
+
+ return -1;
+ }
+ else if (PQresultStatus(res) == PGRES_COPY_IN)
+ {
+ PQclear(res);
+ return -1;
+ }
+ else
+ {
+ PQclear(res);
+ ereport(ERROR,
+ (errmsg("could not receive data from WAL stream: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+ }
+ }
+ if (rawlen < -1)
+ ereport(ERROR,
+ (errmsg("could not receive data from WAL stream: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+
+ /* Return received messages to caller */
+ *buffer = conn->recvBuf;
+ return rawlen;
+}
+
+/*
+ * Send a message to XLOG stream.
+ *
+ * ereports on error.
+ */
+static void
+libpqrcv_send(WalReceiverConn *conn, const char *buffer, int nbytes)
+{
+ if (PQputCopyData(conn->streamConn, buffer, nbytes) <= 0 ||
+ PQflush(conn->streamConn))
+ ereport(ERROR,
+ (errmsg("could not send data to WAL stream: %s",
+ pchomp(PQerrorMessage(conn->streamConn)))));
+}
+
+/*
+ * Create new replication slot.
+ * Returns the name of the exported snapshot for logical slot or NULL for
+ * physical slot.
+ */
+static char *
+libpqrcv_create_slot(WalReceiverConn *conn, const char *slotname,
+ bool temporary, CRSSnapshotAction snapshot_action,
+ XLogRecPtr *lsn)
+{
+ PGresult *res;
+ StringInfoData cmd;
+ char *snapshot;
+
+ initStringInfo(&cmd);
+
+ appendStringInfo(&cmd, "CREATE_REPLICATION_SLOT \"%s\"", slotname);
+
+ if (temporary)
+ appendStringInfoString(&cmd, " TEMPORARY");
+
+ if (conn->logical)
+ {
+ appendStringInfoString(&cmd, " LOGICAL pgoutput");
+ switch (snapshot_action)
+ {
+ case CRS_EXPORT_SNAPSHOT:
+ appendStringInfoString(&cmd, " EXPORT_SNAPSHOT");
+ break;
+ case CRS_NOEXPORT_SNAPSHOT:
+ appendStringInfoString(&cmd, " NOEXPORT_SNAPSHOT");
+ break;
+ case CRS_USE_SNAPSHOT:
+ appendStringInfoString(&cmd, " USE_SNAPSHOT");
+ break;
+ }
+ }
+ else
+ {
+ appendStringInfoString(&cmd, " PHYSICAL RESERVE_WAL");
+ }
+
+ res = libpqrcv_PQexec(conn->streamConn, cmd.data);
+ pfree(cmd.data);
+
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ {
+ PQclear(res);
+ ereport(ERROR,
+ (errmsg("could not create replication slot \"%s\": %s",
+ slotname, pchomp(PQerrorMessage(conn->streamConn)))));
+ }
+
+ if (lsn)
+ *lsn = DatumGetLSN(DirectFunctionCall1Coll(pg_lsn_in, InvalidOid,
+ CStringGetDatum(PQgetvalue(res, 0, 1))));
+
+ if (!PQgetisnull(res, 0, 2))
+ snapshot = pstrdup(PQgetvalue(res, 0, 2));
+ else
+ snapshot = NULL;
+
+ PQclear(res);
+
+ return snapshot;
+}
+
+/*
+ * Return PID of remote backend process.
+ */
+static pid_t
+libpqrcv_get_backend_pid(WalReceiverConn *conn)
+{
+ return PQbackendPID(conn->streamConn);
+}
+
+/*
+ * Convert tuple query result to tuplestore.
+ */
+static void
+libpqrcv_processTuples(PGresult *pgres, WalRcvExecResult *walres,
+ const int nRetTypes, const Oid *retTypes)
+{
+ int tupn;
+ int coln;
+ int nfields = PQnfields(pgres);
+ HeapTuple tuple;
+ AttInMetadata *attinmeta;
+ MemoryContext rowcontext;
+ MemoryContext oldcontext;
+
+ /* Make sure we got expected number of fields. */
+ if (nfields != nRetTypes)
+ ereport(ERROR,
+ (errmsg("invalid query response"),
+ errdetail("Expected %d fields, got %d fields.",
+ nRetTypes, nfields)));
+
+ walres->tuplestore = tuplestore_begin_heap(true, false, work_mem);
+
+ /* Create tuple descriptor corresponding to expected result. */
+ walres->tupledesc = CreateTemplateTupleDesc(nRetTypes);
+ for (coln = 0; coln < nRetTypes; coln++)
+ TupleDescInitEntry(walres->tupledesc, (AttrNumber) coln + 1,
+ PQfname(pgres, coln), retTypes[coln], -1, 0);
+ attinmeta = TupleDescGetAttInMetadata(walres->tupledesc);
+
+ /* No point in doing more here if there were no tuples returned. */
+ if (PQntuples(pgres) == 0)
+ return;
+
+ /* Create temporary context for local allocations. */
+ rowcontext = AllocSetContextCreate(CurrentMemoryContext,
+ "libpqrcv query result context",
+ ALLOCSET_DEFAULT_SIZES);
+
+ /* Process returned rows. */
+ for (tupn = 0; tupn < PQntuples(pgres); tupn++)
+ {
+ char *cstrs[MaxTupleAttributeNumber];
+
+ ProcessWalRcvInterrupts();
+
+ /* Do the allocations in temporary context. */
+ oldcontext = MemoryContextSwitchTo(rowcontext);
+
+ /*
+ * Fill cstrs with null-terminated strings of column values.
+ */
+ for (coln = 0; coln < nfields; coln++)
+ {
+ if (PQgetisnull(pgres, tupn, coln))
+ cstrs[coln] = NULL;
+ else
+ cstrs[coln] = PQgetvalue(pgres, tupn, coln);
+ }
+
+ /* Convert row to a tuple, and add it to the tuplestore */
+ tuple = BuildTupleFromCStrings(attinmeta, cstrs);
+ tuplestore_puttuple(walres->tuplestore, tuple);
+
+ /* Clean up */
+ MemoryContextSwitchTo(oldcontext);
+ MemoryContextReset(rowcontext);
+ }
+
+ MemoryContextDelete(rowcontext);
+}
+
+/*
+ * Public interface for sending generic queries (and commands).
+ *
+ * This can only be called from process connected to database.
+ */
+static WalRcvExecResult *
+libpqrcv_exec(WalReceiverConn *conn, const char *query,
+ const int nRetTypes, const Oid *retTypes)
+{
+ PGresult *pgres = NULL;
+ WalRcvExecResult *walres = palloc0(sizeof(WalRcvExecResult));
+
+ if (MyDatabaseId == InvalidOid)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("the query interface requires a database connection")));
+
+ pgres = libpqrcv_PQexec(conn->streamConn, query);
+
+ switch (PQresultStatus(pgres))
+ {
+ case PGRES_SINGLE_TUPLE:
+ case PGRES_TUPLES_OK:
+ walres->status = WALRCV_OK_TUPLES;
+ libpqrcv_processTuples(pgres, walres, nRetTypes, retTypes);
+ break;
+
+ case PGRES_COPY_IN:
+ walres->status = WALRCV_OK_COPY_IN;
+ break;
+
+ case PGRES_COPY_OUT:
+ walres->status = WALRCV_OK_COPY_OUT;
+ break;
+
+ case PGRES_COPY_BOTH:
+ walres->status = WALRCV_OK_COPY_BOTH;
+ break;
+
+ case PGRES_COMMAND_OK:
+ walres->status = WALRCV_OK_COMMAND;
+ break;
+
+ /* Empty query is considered error. */
+ case PGRES_EMPTY_QUERY:
+ walres->status = WALRCV_ERROR;
+ walres->err = _("empty query");
+ break;
+
+ case PGRES_NONFATAL_ERROR:
+ case PGRES_FATAL_ERROR:
+ case PGRES_BAD_RESPONSE:
+ walres->status = WALRCV_ERROR;
+ walres->err = pchomp(PQerrorMessage(conn->streamConn));
+ break;
+ }
+
+ PQclear(pgres);
+
+ return walres;
+}
+
+/*
+ * Given a List of strings, return it as single comma separated
+ * string, quoting identifiers as needed.
+ *
+ * This is essentially the reverse of SplitIdentifierString.
+ *
+ * The caller should free the result.
+ */
+static char *
+stringlist_to_identifierstr(PGconn *conn, List *strings)
+{
+ ListCell *lc;
+ StringInfoData res;
+ bool first = true;
+
+ initStringInfo(&res);
+
+ foreach(lc, strings)
+ {
+ char *val = strVal(lfirst(lc));
+ char *val_escaped;
+
+ if (first)
+ first = false;
+ else
+ appendStringInfoChar(&res, ',');
+
+ val_escaped = PQescapeIdentifier(conn, val, strlen(val));
+ if (!val_escaped)
+ {
+ free(res.data);
+ return NULL;
+ }
+ appendStringInfoString(&res, val_escaped);
+ PQfreemem(val_escaped);
+ }
+
+ return res.data;
+}
diff --git a/src/backend/replication/logical/Makefile b/src/backend/replication/logical/Makefile
new file mode 100644
index 0000000..c4e2fde
--- /dev/null
+++ b/src/backend/replication/logical/Makefile
@@ -0,0 +1,31 @@
+#-------------------------------------------------------------------------
+#
+# Makefile--
+# Makefile for src/backend/replication/logical
+#
+# IDENTIFICATION
+# src/backend/replication/logical/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/backend/replication/logical
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+
+override CPPFLAGS := -I$(srcdir) $(CPPFLAGS)
+
+OBJS = \
+ decode.o \
+ launcher.o \
+ logical.o \
+ logicalfuncs.o \
+ message.o \
+ origin.o \
+ proto.o \
+ relation.o \
+ reorderbuffer.o \
+ snapbuild.o \
+ tablesync.o \
+ worker.o
+
+include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
new file mode 100644
index 0000000..4985c2a
--- /dev/null
+++ b/src/backend/replication/logical/decode.c
@@ -0,0 +1,1052 @@
+/* -------------------------------------------------------------------------
+ *
+ * decode.c
+ * This module decodes WAL records read using xlogreader.h's APIs for the
+ * purpose of logical decoding by passing information to the
+ * reorderbuffer module (containing the actual changes) and to the
+ * snapbuild module to build a fitting catalog snapshot (to be able to
+ * properly decode the changes in the reorderbuffer).
+ *
+ * NOTE:
+ * This basically tries to handle all low level xlog stuff for
+ * reorderbuffer.c and snapbuild.c. There's some minor leakage where a
+ * specific record's struct is used to pass data along, but those just
+ * happen to contain the right amount of data in a convenient
+ * format. There isn't and shouldn't be much intelligence about the
+ * contents of records in here except turning them into a more usable
+ * format.
+ *
+ * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ * src/backend/replication/logical/decode.c
+ *
+ * -------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/heapam.h"
+#include "access/heapam_xlog.h"
+#include "access/transam.h"
+#include "access/xact.h"
+#include "access/xlog_internal.h"
+#include "access/xlogreader.h"
+#include "access/xlogrecord.h"
+#include "access/xlogutils.h"
+#include "catalog/pg_control.h"
+#include "replication/decode.h"
+#include "replication/logical.h"
+#include "replication/message.h"
+#include "replication/origin.h"
+#include "replication/reorderbuffer.h"
+#include "replication/snapbuild.h"
+#include "storage/standby.h"
+
+typedef struct XLogRecordBuffer
+{
+ XLogRecPtr origptr;
+ XLogRecPtr endptr;
+ XLogReaderState *record;
+} XLogRecordBuffer;
+
+/* RMGR Handlers */
+static void DecodeXLogOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
+static void DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
+static void DecodeHeap2Op(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
+static void DecodeXactOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
+static void DecodeStandbyOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
+static void DecodeLogicalMsgOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
+
+/* individual record(group)'s handlers */
+static void DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
+static void DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
+static void DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
+static void DecodeTruncate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
+static void DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
+static void DecodeSpecConfirm(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
+
+static void DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
+ xl_xact_parsed_commit *parsed, TransactionId xid);
+static void DecodeAbort(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
+ xl_xact_parsed_abort *parsed, TransactionId xid);
+
+/* common function to decode tuples */
+static void DecodeXLogTuple(char *data, Size len, ReorderBufferTupleBuf *tup);
+
+/*
+ * Take every XLogReadRecord()ed record and perform the actions required to
+ * decode it using the output plugin already setup in the logical decoding
+ * context.
+ *
+ * NB: Note that every record's xid needs to be processed by reorderbuffer
+ * (xids contained in the content of records are not relevant for this rule).
+ * That means that for records which'd otherwise not go through the
+ * reorderbuffer ReorderBufferProcessXid() has to be called. We don't want to
+ * call ReorderBufferProcessXid for each record type by default, because
+ * e.g. empty xacts can be handled more efficiently if there's no previous
+ * state for them.
+ *
+ * We also support the ability to fast forward thru records, skipping some
+ * record types completely - see individual record types for details.
+ */
+void
+LogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogReaderState *record)
+{
+ XLogRecordBuffer buf;
+
+ buf.origptr = ctx->reader->ReadRecPtr;
+ buf.endptr = ctx->reader->EndRecPtr;
+ buf.record = record;
+
+ /* cast so we get a warning when new rmgrs are added */
+ switch ((RmgrId) XLogRecGetRmid(record))
+ {
+ /*
+ * Rmgrs we care about for logical decoding. Add new rmgrs in
+ * rmgrlist.h's order.
+ */
+ case RM_XLOG_ID:
+ DecodeXLogOp(ctx, &buf);
+ break;
+
+ case RM_XACT_ID:
+ DecodeXactOp(ctx, &buf);
+ break;
+
+ case RM_STANDBY_ID:
+ DecodeStandbyOp(ctx, &buf);
+ break;
+
+ case RM_HEAP2_ID:
+ DecodeHeap2Op(ctx, &buf);
+ break;
+
+ case RM_HEAP_ID:
+ DecodeHeapOp(ctx, &buf);
+ break;
+
+ case RM_LOGICALMSG_ID:
+ DecodeLogicalMsgOp(ctx, &buf);
+ break;
+
+ /*
+ * Rmgrs irrelevant for logical decoding; they describe stuff not
+ * represented in logical decoding. Add new rmgrs in rmgrlist.h's
+ * order.
+ */
+ case RM_SMGR_ID:
+ case RM_CLOG_ID:
+ case RM_DBASE_ID:
+ case RM_TBLSPC_ID:
+ case RM_MULTIXACT_ID:
+ case RM_RELMAP_ID:
+ case RM_BTREE_ID:
+ case RM_HASH_ID:
+ case RM_GIN_ID:
+ case RM_GIST_ID:
+ case RM_SEQ_ID:
+ case RM_SPGIST_ID:
+ case RM_BRIN_ID:
+ case RM_COMMIT_TS_ID:
+ case RM_REPLORIGIN_ID:
+ case RM_GENERIC_ID:
+ /* just deal with xid, and done */
+ ReorderBufferProcessXid(ctx->reorder, XLogRecGetXid(record),
+ buf.origptr);
+ break;
+ case RM_NEXT_ID:
+ elog(ERROR, "unexpected RM_NEXT_ID rmgr_id: %u", (RmgrIds) XLogRecGetRmid(buf.record));
+ }
+}
+
+/*
+ * Handle rmgr XLOG_ID records for DecodeRecordIntoReorderBuffer().
+ */
+static void
+DecodeXLogOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
+{
+ SnapBuild *builder = ctx->snapshot_builder;
+ uint8 info = XLogRecGetInfo(buf->record) & ~XLR_INFO_MASK;
+
+ ReorderBufferProcessXid(ctx->reorder, XLogRecGetXid(buf->record),
+ buf->origptr);
+
+ switch (info)
+ {
+ /* this is also used in END_OF_RECOVERY checkpoints */
+ case XLOG_CHECKPOINT_SHUTDOWN:
+ case XLOG_END_OF_RECOVERY:
+ SnapBuildSerializationPoint(builder, buf->origptr);
+
+ break;
+ case XLOG_CHECKPOINT_ONLINE:
+
+ /*
+ * a RUNNING_XACTS record will have been logged near to this, we
+ * can restart from there.
+ */
+ break;
+ case XLOG_NOOP:
+ case XLOG_NEXTOID:
+ case XLOG_SWITCH:
+ case XLOG_BACKUP_END:
+ case XLOG_PARAMETER_CHANGE:
+ case XLOG_RESTORE_POINT:
+ case XLOG_FPW_CHANGE:
+ case XLOG_FPI_FOR_HINT:
+ case XLOG_FPI:
+ break;
+ default:
+ elog(ERROR, "unexpected RM_XLOG_ID record type: %u", info);
+ }
+}
+
+/*
+ * Handle rmgr XACT_ID records for DecodeRecordIntoReorderBuffer().
+ */
+static void
+DecodeXactOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
+{
+ SnapBuild *builder = ctx->snapshot_builder;
+ ReorderBuffer *reorder = ctx->reorder;
+ XLogReaderState *r = buf->record;
+ uint8 info = XLogRecGetInfo(r) & XLOG_XACT_OPMASK;
+
+ /*
+ * If the snapshot isn't yet fully built, we cannot decode anything, so
+ * bail out.
+ *
+ * However, it's critical to process XLOG_XACT_ASSIGNMENT records even
+ * when the snapshot is being built: it is possible to get later records
+ * that require subxids to be properly assigned.
+ */
+ if (SnapBuildCurrentState(builder) < SNAPBUILD_FULL_SNAPSHOT &&
+ info != XLOG_XACT_ASSIGNMENT)
+ return;
+
+ switch (info)
+ {
+ case XLOG_XACT_COMMIT:
+ case XLOG_XACT_COMMIT_PREPARED:
+ {
+ xl_xact_commit *xlrec;
+ xl_xact_parsed_commit parsed;
+ TransactionId xid;
+
+ xlrec = (xl_xact_commit *) XLogRecGetData(r);
+ ParseCommitRecord(XLogRecGetInfo(buf->record), xlrec, &parsed);
+
+ if (!TransactionIdIsValid(parsed.twophase_xid))
+ xid = XLogRecGetXid(r);
+ else
+ xid = parsed.twophase_xid;
+
+ DecodeCommit(ctx, buf, &parsed, xid);
+ break;
+ }
+ case XLOG_XACT_ABORT:
+ case XLOG_XACT_ABORT_PREPARED:
+ {
+ xl_xact_abort *xlrec;
+ xl_xact_parsed_abort parsed;
+ TransactionId xid;
+
+ xlrec = (xl_xact_abort *) XLogRecGetData(r);
+ ParseAbortRecord(XLogRecGetInfo(buf->record), xlrec, &parsed);
+
+ if (!TransactionIdIsValid(parsed.twophase_xid))
+ xid = XLogRecGetXid(r);
+ else
+ xid = parsed.twophase_xid;
+
+ DecodeAbort(ctx, buf, &parsed, xid);
+ break;
+ }
+ case XLOG_XACT_ASSIGNMENT:
+ {
+ xl_xact_assignment *xlrec;
+ int i;
+ TransactionId *sub_xid;
+
+ xlrec = (xl_xact_assignment *) XLogRecGetData(r);
+
+ sub_xid = &xlrec->xsub[0];
+
+ for (i = 0; i < xlrec->nsubxacts; i++)
+ {
+ ReorderBufferAssignChild(reorder, xlrec->xtop,
+ *(sub_xid++), buf->origptr);
+ }
+ break;
+ }
+ case XLOG_XACT_PREPARE:
+
+ /*
+ * Currently decoding ignores PREPARE TRANSACTION and will just
+ * decode the transaction when the COMMIT PREPARED is sent or
+ * throw away the transaction's contents when a ROLLBACK PREPARED
+ * is received. In the future we could add code to expose prepared
+ * transactions in the changestream allowing for a kind of
+ * distributed 2PC.
+ */
+ ReorderBufferProcessXid(reorder, XLogRecGetXid(r), buf->origptr);
+ break;
+ default:
+ elog(ERROR, "unexpected RM_XACT_ID record type: %u", info);
+ }
+}
+
+/*
+ * Handle rmgr STANDBY_ID records for DecodeRecordIntoReorderBuffer().
+ */
+static void
+DecodeStandbyOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
+{
+ SnapBuild *builder = ctx->snapshot_builder;
+ XLogReaderState *r = buf->record;
+ uint8 info = XLogRecGetInfo(r) & ~XLR_INFO_MASK;
+
+ ReorderBufferProcessXid(ctx->reorder, XLogRecGetXid(r), buf->origptr);
+
+ switch (info)
+ {
+ case XLOG_RUNNING_XACTS:
+ {
+ xl_running_xacts *running = (xl_running_xacts *) XLogRecGetData(r);
+
+ SnapBuildProcessRunningXacts(builder, buf->origptr, running);
+
+ /*
+ * Abort all transactions that we keep track of, that are
+ * older than the record's oldestRunningXid. This is the most
+ * convenient spot for doing so since, in contrast to shutdown
+ * or end-of-recovery checkpoints, we have information about
+ * all running transactions which includes prepared ones,
+ * while shutdown checkpoints just know that no non-prepared
+ * transactions are in progress.
+ */
+ ReorderBufferAbortOld(ctx->reorder, running->oldestRunningXid);
+ }
+ break;
+ case XLOG_STANDBY_LOCK:
+ break;
+ case XLOG_INVALIDATIONS:
+ {
+ xl_invalidations *invalidations =
+ (xl_invalidations *) XLogRecGetData(r);
+
+ if (!ctx->fast_forward)
+ ReorderBufferImmediateInvalidation(ctx->reorder,
+ invalidations->nmsgs,
+ invalidations->msgs);
+ }
+ break;
+ default:
+ elog(ERROR, "unexpected RM_STANDBY_ID record type: %u", info);
+ }
+}
+
+/*
+ * Handle rmgr HEAP2_ID records for DecodeRecordIntoReorderBuffer().
+ */
+static void
+DecodeHeap2Op(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
+{
+ uint8 info = XLogRecGetInfo(buf->record) & XLOG_HEAP_OPMASK;
+ TransactionId xid = XLogRecGetXid(buf->record);
+ SnapBuild *builder = ctx->snapshot_builder;
+
+ ReorderBufferProcessXid(ctx->reorder, xid, buf->origptr);
+
+ /*
+ * If we don't have snapshot or we are just fast-forwarding, there is no
+ * point in decoding changes.
+ */
+ if (SnapBuildCurrentState(builder) < SNAPBUILD_FULL_SNAPSHOT ||
+ ctx->fast_forward)
+ return;
+
+ switch (info)
+ {
+ case XLOG_HEAP2_MULTI_INSERT:
+ if (!ctx->fast_forward &&
+ SnapBuildProcessChange(builder, xid, buf->origptr))
+ DecodeMultiInsert(ctx, buf);
+ break;
+ case XLOG_HEAP2_NEW_CID:
+ {
+ xl_heap_new_cid *xlrec;
+
+ xlrec = (xl_heap_new_cid *) XLogRecGetData(buf->record);
+ SnapBuildProcessNewCid(builder, xid, buf->origptr, xlrec);
+
+ break;
+ }
+ case XLOG_HEAP2_REWRITE:
+
+ /*
+ * Although these records only exist to serve the needs of logical
+ * decoding, all the work happens as part of crash or archive
+ * recovery, so we don't need to do anything here.
+ */
+ break;
+
+ /*
+ * Everything else here is just low level physical stuff we're not
+ * interested in.
+ */
+ case XLOG_HEAP2_FREEZE_PAGE:
+ case XLOG_HEAP2_CLEAN:
+ case XLOG_HEAP2_CLEANUP_INFO:
+ case XLOG_HEAP2_VISIBLE:
+ case XLOG_HEAP2_LOCK_UPDATED:
+ break;
+ default:
+ elog(ERROR, "unexpected RM_HEAP2_ID record type: %u", info);
+ }
+}
+
+/*
+ * Handle rmgr HEAP_ID records for DecodeRecordIntoReorderBuffer().
+ */
+static void
+DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
+{
+ uint8 info = XLogRecGetInfo(buf->record) & XLOG_HEAP_OPMASK;
+ TransactionId xid = XLogRecGetXid(buf->record);
+ SnapBuild *builder = ctx->snapshot_builder;
+
+ ReorderBufferProcessXid(ctx->reorder, xid, buf->origptr);
+
+ /*
+ * If we don't have snapshot or we are just fast-forwarding, there is no
+ * point in decoding data changes.
+ */
+ if (SnapBuildCurrentState(builder) < SNAPBUILD_FULL_SNAPSHOT ||
+ ctx->fast_forward)
+ return;
+
+ switch (info)
+ {
+ case XLOG_HEAP_INSERT:
+ if (SnapBuildProcessChange(builder, xid, buf->origptr))
+ DecodeInsert(ctx, buf);
+ break;
+
+ /*
+ * Treat HOT update as normal updates. There is no useful
+ * information in the fact that we could make it a HOT update
+ * locally and the WAL layout is compatible.
+ */
+ case XLOG_HEAP_HOT_UPDATE:
+ case XLOG_HEAP_UPDATE:
+ if (SnapBuildProcessChange(builder, xid, buf->origptr))
+ DecodeUpdate(ctx, buf);
+ break;
+
+ case XLOG_HEAP_DELETE:
+ if (SnapBuildProcessChange(builder, xid, buf->origptr))
+ DecodeDelete(ctx, buf);
+ break;
+
+ case XLOG_HEAP_TRUNCATE:
+ if (SnapBuildProcessChange(builder, xid, buf->origptr))
+ DecodeTruncate(ctx, buf);
+ break;
+
+ case XLOG_HEAP_INPLACE:
+
+ /*
+ * Inplace updates are only ever performed on catalog tuples and
+ * can, per definition, not change tuple visibility. Since we
+ * don't decode catalog tuples, we're not interested in the
+ * record's contents.
+ *
+ * In-place updates can be used either by XID-bearing transactions
+ * (e.g. in CREATE INDEX CONCURRENTLY) or by XID-less
+ * transactions (e.g. VACUUM). In the former case, the commit
+ * record will include cache invalidations, so we mark the
+ * transaction as catalog modifying here. Currently that's
+ * redundant because the commit will do that as well, but once we
+ * support decoding in-progress relations, this will be important.
+ */
+ if (!TransactionIdIsValid(xid))
+ break;
+
+ SnapBuildProcessChange(builder, xid, buf->origptr);
+ ReorderBufferXidSetCatalogChanges(ctx->reorder, xid, buf->origptr);
+ break;
+
+ case XLOG_HEAP_CONFIRM:
+ if (SnapBuildProcessChange(builder, xid, buf->origptr))
+ DecodeSpecConfirm(ctx, buf);
+ break;
+
+ case XLOG_HEAP_LOCK:
+ /* we don't care about row level locks for now */
+ break;
+
+ default:
+ elog(ERROR, "unexpected RM_HEAP_ID record type: %u", info);
+ break;
+ }
+}
+
+static inline bool
+FilterByOrigin(LogicalDecodingContext *ctx, RepOriginId origin_id)
+{
+ if (ctx->callbacks.filter_by_origin_cb == NULL)
+ return false;
+
+ return filter_by_origin_cb_wrapper(ctx, origin_id);
+}
+
+/*
+ * Handle rmgr LOGICALMSG_ID records for DecodeRecordIntoReorderBuffer().
+ */
+static void
+DecodeLogicalMsgOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
+{
+ SnapBuild *builder = ctx->snapshot_builder;
+ XLogReaderState *r = buf->record;
+ TransactionId xid = XLogRecGetXid(r);
+ uint8 info = XLogRecGetInfo(r) & ~XLR_INFO_MASK;
+ RepOriginId origin_id = XLogRecGetOrigin(r);
+ Snapshot snapshot;
+ xl_logical_message *message;
+
+ if (info != XLOG_LOGICAL_MESSAGE)
+ elog(ERROR, "unexpected RM_LOGICALMSG_ID record type: %u", info);
+
+ ReorderBufferProcessXid(ctx->reorder, XLogRecGetXid(r), buf->origptr);
+
+ /*
+ * If we don't have snapshot or we are just fast-forwarding, there is no
+ * point in decoding messages.
+ */
+ if (SnapBuildCurrentState(builder) < SNAPBUILD_FULL_SNAPSHOT ||
+ ctx->fast_forward)
+ return;
+
+ message = (xl_logical_message *) XLogRecGetData(r);
+
+ if (message->dbId != ctx->slot->data.database ||
+ FilterByOrigin(ctx, origin_id))
+ return;
+
+ if (message->transactional &&
+ !SnapBuildProcessChange(builder, xid, buf->origptr))
+ return;
+ else if (!message->transactional &&
+ (SnapBuildCurrentState(builder) != SNAPBUILD_CONSISTENT ||
+ SnapBuildXactNeedsSkip(builder, buf->origptr)))
+ return;
+
+ snapshot = SnapBuildGetOrBuildSnapshot(builder, xid);
+ ReorderBufferQueueMessage(ctx->reorder, xid, snapshot, buf->endptr,
+ message->transactional,
+ message->message, /* first part of message is
+ * prefix */
+ message->message_size,
+ message->message + message->prefix_size);
+}
+
+/*
+ * Consolidated commit record handling between the different form of commit
+ * records.
+ */
+static void
+DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
+ xl_xact_parsed_commit *parsed, TransactionId xid)
+{
+ XLogRecPtr origin_lsn = InvalidXLogRecPtr;
+ TimestampTz commit_time = parsed->xact_time;
+ RepOriginId origin_id = XLogRecGetOrigin(buf->record);
+ int i;
+
+ if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN)
+ {
+ origin_lsn = parsed->origin_lsn;
+ commit_time = parsed->origin_timestamp;
+ }
+
+ /*
+ * Process invalidation messages, even if we're not interested in the
+ * transaction's contents, since the various caches need to always be
+ * consistent.
+ */
+ if (parsed->nmsgs > 0)
+ {
+ if (!ctx->fast_forward)
+ ReorderBufferAddInvalidations(ctx->reorder, xid, buf->origptr,
+ parsed->nmsgs, parsed->msgs);
+ ReorderBufferXidSetCatalogChanges(ctx->reorder, xid, buf->origptr);
+ }
+
+ SnapBuildCommitTxn(ctx->snapshot_builder, buf->origptr, xid,
+ parsed->nsubxacts, parsed->subxacts);
+
+ /* ----
+ * Check whether we are interested in this specific transaction, and tell
+ * the reorderbuffer to forget the content of the (sub-)transactions
+ * if not.
+ *
+ * There can be several reasons we might not be interested in this
+ * transaction:
+ * 1) We might not be interested in decoding transactions up to this
+ * LSN. This can happen because we previously decoded it and now just
+ * are restarting or if we haven't assembled a consistent snapshot yet.
+ * 2) The transaction happened in another database.
+ * 3) The output plugin is not interested in the origin.
+ * 4) We are doing fast-forwarding
+ *
+ * We can't just use ReorderBufferAbort() here, because we need to execute
+ * the transaction's invalidations. This currently won't be needed if
+ * we're just skipping over the transaction because currently we only do
+ * so during startup, to get to the first transaction the client needs. As
+ * we have reset the catalog caches before starting to read WAL, and we
+ * haven't yet touched any catalogs, there can't be anything to invalidate.
+ * But if we're "forgetting" this commit because it's it happened in
+ * another database, the invalidations might be important, because they
+ * could be for shared catalogs and we might have loaded data into the
+ * relevant syscaches.
+ * ---
+ */
+ if (SnapBuildXactNeedsSkip(ctx->snapshot_builder, buf->origptr) ||
+ (parsed->dbId != InvalidOid && parsed->dbId != ctx->slot->data.database) ||
+ ctx->fast_forward || FilterByOrigin(ctx, origin_id))
+ {
+ for (i = 0; i < parsed->nsubxacts; i++)
+ {
+ ReorderBufferForget(ctx->reorder, parsed->subxacts[i], buf->origptr);
+ }
+ ReorderBufferForget(ctx->reorder, xid, buf->origptr);
+
+ return;
+ }
+
+ /* tell the reorderbuffer about the surviving subtransactions */
+ for (i = 0; i < parsed->nsubxacts; i++)
+ {
+ ReorderBufferCommitChild(ctx->reorder, xid, parsed->subxacts[i],
+ buf->origptr, buf->endptr);
+ }
+
+ /* replay actions of all transaction + subtransactions in order */
+ ReorderBufferCommit(ctx->reorder, xid, buf->origptr, buf->endptr,
+ commit_time, origin_id, origin_lsn);
+}
+
+/*
+ * Get the data from the various forms of abort records and pass it on to
+ * snapbuild.c and reorderbuffer.c
+ */
+static void
+DecodeAbort(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
+ xl_xact_parsed_abort *parsed, TransactionId xid)
+{
+ int i;
+
+ for (i = 0; i < parsed->nsubxacts; i++)
+ {
+ ReorderBufferAbort(ctx->reorder, parsed->subxacts[i],
+ buf->record->EndRecPtr);
+ }
+
+ ReorderBufferAbort(ctx->reorder, xid, buf->record->EndRecPtr);
+}
+
+/*
+ * Parse XLOG_HEAP_INSERT (not MULTI_INSERT!) records into tuplebufs.
+ *
+ * Deletes can contain the new tuple.
+ */
+static void
+DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
+{
+ Size datalen;
+ char *tupledata;
+ Size tuplelen;
+ XLogReaderState *r = buf->record;
+ xl_heap_insert *xlrec;
+ ReorderBufferChange *change;
+ RelFileNode target_node;
+
+ xlrec = (xl_heap_insert *) XLogRecGetData(r);
+
+ /*
+ * Ignore insert records without new tuples (this does happen when
+ * raw_heap_insert marks the TOAST record as HEAP_INSERT_NO_LOGICAL).
+ */
+ if (!(xlrec->flags & XLH_INSERT_CONTAINS_NEW_TUPLE))
+ return;
+
+ /* only interested in our database */
+ XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL);
+ if (target_node.dbNode != ctx->slot->data.database)
+ return;
+
+ /* output plugin doesn't look for this origin, no need to queue */
+ if (FilterByOrigin(ctx, XLogRecGetOrigin(r)))
+ return;
+
+ change = ReorderBufferGetChange(ctx->reorder);
+ if (!(xlrec->flags & XLH_INSERT_IS_SPECULATIVE))
+ change->action = REORDER_BUFFER_CHANGE_INSERT;
+ else
+ change->action = REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT;
+ change->origin_id = XLogRecGetOrigin(r);
+
+ memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
+
+ tupledata = XLogRecGetBlockData(r, 0, &datalen);
+ tuplelen = datalen - SizeOfHeapHeader;
+
+ change->data.tp.newtuple =
+ ReorderBufferGetTupleBuf(ctx->reorder, tuplelen);
+
+ DecodeXLogTuple(tupledata, datalen, change->data.tp.newtuple);
+
+ change->data.tp.clear_toast_afterwards = true;
+
+ ReorderBufferQueueChange(ctx->reorder, XLogRecGetXid(r), buf->origptr, change);
+}
+
+/*
+ * Parse XLOG_HEAP_UPDATE and XLOG_HEAP_HOT_UPDATE, which have the same layout
+ * in the record, from wal into proper tuplebufs.
+ *
+ * Updates can possibly contain a new tuple and the old primary key.
+ */
+static void
+DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
+{
+ XLogReaderState *r = buf->record;
+ xl_heap_update *xlrec;
+ ReorderBufferChange *change;
+ char *data;
+ RelFileNode target_node;
+
+ xlrec = (xl_heap_update *) XLogRecGetData(r);
+
+ /* only interested in our database */
+ XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL);
+ if (target_node.dbNode != ctx->slot->data.database)
+ return;
+
+ /* output plugin doesn't look for this origin, no need to queue */
+ if (FilterByOrigin(ctx, XLogRecGetOrigin(r)))
+ return;
+
+ change = ReorderBufferGetChange(ctx->reorder);
+ change->action = REORDER_BUFFER_CHANGE_UPDATE;
+ change->origin_id = XLogRecGetOrigin(r);
+ memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
+
+ if (xlrec->flags & XLH_UPDATE_CONTAINS_NEW_TUPLE)
+ {
+ Size datalen;
+ Size tuplelen;
+
+ data = XLogRecGetBlockData(r, 0, &datalen);
+
+ tuplelen = datalen - SizeOfHeapHeader;
+
+ change->data.tp.newtuple =
+ ReorderBufferGetTupleBuf(ctx->reorder, tuplelen);
+
+ DecodeXLogTuple(data, datalen, change->data.tp.newtuple);
+ }
+
+ if (xlrec->flags & XLH_UPDATE_CONTAINS_OLD)
+ {
+ Size datalen;
+ Size tuplelen;
+
+ /* caution, remaining data in record is not aligned */
+ data = XLogRecGetData(r) + SizeOfHeapUpdate;
+ datalen = XLogRecGetDataLen(r) - SizeOfHeapUpdate;
+ tuplelen = datalen - SizeOfHeapHeader;
+
+ change->data.tp.oldtuple =
+ ReorderBufferGetTupleBuf(ctx->reorder, tuplelen);
+
+ DecodeXLogTuple(data, datalen, change->data.tp.oldtuple);
+ }
+
+ change->data.tp.clear_toast_afterwards = true;
+
+ ReorderBufferQueueChange(ctx->reorder, XLogRecGetXid(r), buf->origptr, change);
+}
+
+/*
+ * Parse XLOG_HEAP_DELETE from wal into proper tuplebufs.
+ *
+ * Deletes can possibly contain the old primary key.
+ */
+static void
+DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
+{
+ XLogReaderState *r = buf->record;
+ xl_heap_delete *xlrec;
+ ReorderBufferChange *change;
+ RelFileNode target_node;
+
+ xlrec = (xl_heap_delete *) XLogRecGetData(r);
+
+ /* only interested in our database */
+ XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL);
+ if (target_node.dbNode != ctx->slot->data.database)
+ return;
+
+ /* output plugin doesn't look for this origin, no need to queue */
+ if (FilterByOrigin(ctx, XLogRecGetOrigin(r)))
+ return;
+
+ change = ReorderBufferGetChange(ctx->reorder);
+
+ if (xlrec->flags & XLH_DELETE_IS_SUPER)
+ change->action = REORDER_BUFFER_CHANGE_INTERNAL_SPEC_ABORT;
+ else
+ change->action = REORDER_BUFFER_CHANGE_DELETE;
+
+ change->origin_id = XLogRecGetOrigin(r);
+
+ memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
+
+ /* old primary key stored */
+ if (xlrec->flags & XLH_DELETE_CONTAINS_OLD)
+ {
+ Size datalen = XLogRecGetDataLen(r) - SizeOfHeapDelete;
+ Size tuplelen = datalen - SizeOfHeapHeader;
+
+ Assert(XLogRecGetDataLen(r) > (SizeOfHeapDelete + SizeOfHeapHeader));
+
+ change->data.tp.oldtuple =
+ ReorderBufferGetTupleBuf(ctx->reorder, tuplelen);
+
+ DecodeXLogTuple((char *) xlrec + SizeOfHeapDelete,
+ datalen, change->data.tp.oldtuple);
+ }
+
+ change->data.tp.clear_toast_afterwards = true;
+
+ ReorderBufferQueueChange(ctx->reorder, XLogRecGetXid(r), buf->origptr, change);
+}
+
+/*
+ * Parse XLOG_HEAP_TRUNCATE from wal
+ */
+static void
+DecodeTruncate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
+{
+ XLogReaderState *r = buf->record;
+ xl_heap_truncate *xlrec;
+ ReorderBufferChange *change;
+
+ xlrec = (xl_heap_truncate *) XLogRecGetData(r);
+
+ /* only interested in our database */
+ if (xlrec->dbId != ctx->slot->data.database)
+ return;
+
+ /* output plugin doesn't look for this origin, no need to queue */
+ if (FilterByOrigin(ctx, XLogRecGetOrigin(r)))
+ return;
+
+ change = ReorderBufferGetChange(ctx->reorder);
+ change->action = REORDER_BUFFER_CHANGE_TRUNCATE;
+ change->origin_id = XLogRecGetOrigin(r);
+ if (xlrec->flags & XLH_TRUNCATE_CASCADE)
+ change->data.truncate.cascade = true;
+ if (xlrec->flags & XLH_TRUNCATE_RESTART_SEQS)
+ change->data.truncate.restart_seqs = true;
+ change->data.truncate.nrelids = xlrec->nrelids;
+ change->data.truncate.relids = ReorderBufferGetRelids(ctx->reorder,
+ xlrec->nrelids);
+ memcpy(change->data.truncate.relids, xlrec->relids,
+ xlrec->nrelids * sizeof(Oid));
+ ReorderBufferQueueChange(ctx->reorder, XLogRecGetXid(r),
+ buf->origptr, change);
+}
+
+/*
+ * Decode XLOG_HEAP2_MULTI_INSERT_insert record into multiple tuplebufs.
+ *
+ * Currently MULTI_INSERT will always contain the full tuples.
+ */
+static void
+DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
+{
+ XLogReaderState *r = buf->record;
+ xl_heap_multi_insert *xlrec;
+ int i;
+ char *data;
+ char *tupledata;
+ Size tuplelen;
+ RelFileNode rnode;
+
+ xlrec = (xl_heap_multi_insert *) XLogRecGetData(r);
+
+ /*
+ * Ignore insert records without new tuples. This happens when a
+ * multi_insert is done on a catalog or on a non-persistent relation.
+ */
+ if (!(xlrec->flags & XLH_INSERT_CONTAINS_NEW_TUPLE))
+ return;
+
+ /* only interested in our database */
+ XLogRecGetBlockTag(r, 0, &rnode, NULL, NULL);
+ if (rnode.dbNode != ctx->slot->data.database)
+ return;
+
+ /* output plugin doesn't look for this origin, no need to queue */
+ if (FilterByOrigin(ctx, XLogRecGetOrigin(r)))
+ return;
+
+ /*
+ * We know that this multi_insert isn't for a catalog, so the block should
+ * always have data even if a full-page write of it is taken.
+ */
+ tupledata = XLogRecGetBlockData(r, 0, &tuplelen);
+ Assert(tupledata != NULL);
+
+ data = tupledata;
+ for (i = 0; i < xlrec->ntuples; i++)
+ {
+ ReorderBufferChange *change;
+ xl_multi_insert_tuple *xlhdr;
+ int datalen;
+ ReorderBufferTupleBuf *tuple;
+ HeapTupleHeader header;
+
+ change = ReorderBufferGetChange(ctx->reorder);
+ change->action = REORDER_BUFFER_CHANGE_INSERT;
+ change->origin_id = XLogRecGetOrigin(r);
+
+ memcpy(&change->data.tp.relnode, &rnode, sizeof(RelFileNode));
+
+ xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(data);
+ data = ((char *) xlhdr) + SizeOfMultiInsertTuple;
+ datalen = xlhdr->datalen;
+
+ change->data.tp.newtuple =
+ ReorderBufferGetTupleBuf(ctx->reorder, datalen);
+
+ tuple = change->data.tp.newtuple;
+ header = tuple->tuple.t_data;
+
+ /* not a disk based tuple */
+ ItemPointerSetInvalid(&tuple->tuple.t_self);
+
+ /*
+ * We can only figure this out after reassembling the transactions.
+ */
+ tuple->tuple.t_tableOid = InvalidOid;
+
+ tuple->tuple.t_len = datalen + SizeofHeapTupleHeader;
+
+ memset(header, 0, SizeofHeapTupleHeader);
+
+ memcpy((char *) tuple->tuple.t_data + SizeofHeapTupleHeader,
+ (char *) data,
+ datalen);
+ header->t_infomask = xlhdr->t_infomask;
+ header->t_infomask2 = xlhdr->t_infomask2;
+ header->t_hoff = xlhdr->t_hoff;
+
+ /*
+ * Reset toast reassembly state only after the last row in the last
+ * xl_multi_insert_tuple record emitted by one heap_multi_insert()
+ * call.
+ */
+ if (xlrec->flags & XLH_INSERT_LAST_IN_MULTI &&
+ (i + 1) == xlrec->ntuples)
+ change->data.tp.clear_toast_afterwards = true;
+ else
+ change->data.tp.clear_toast_afterwards = false;
+
+ ReorderBufferQueueChange(ctx->reorder, XLogRecGetXid(r),
+ buf->origptr, change);
+
+ /* move to the next xl_multi_insert_tuple entry */
+ data += datalen;
+ }
+ Assert(data == tupledata + tuplelen);
+}
+
+/*
+ * Parse XLOG_HEAP_CONFIRM from wal into a confirmation change.
+ *
+ * This is pretty trivial, all the state essentially already setup by the
+ * speculative insertion.
+ */
+static void
+DecodeSpecConfirm(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
+{
+ XLogReaderState *r = buf->record;
+ ReorderBufferChange *change;
+ RelFileNode target_node;
+
+ /* only interested in our database */
+ XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL);
+ if (target_node.dbNode != ctx->slot->data.database)
+ return;
+
+ /* output plugin doesn't look for this origin, no need to queue */
+ if (FilterByOrigin(ctx, XLogRecGetOrigin(r)))
+ return;
+
+ change = ReorderBufferGetChange(ctx->reorder);
+ change->action = REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM;
+ change->origin_id = XLogRecGetOrigin(r);
+
+ memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
+
+ change->data.tp.clear_toast_afterwards = true;
+
+ ReorderBufferQueueChange(ctx->reorder, XLogRecGetXid(r), buf->origptr, change);
+}
+
+
+/*
+ * Read a HeapTuple as WAL logged by heap_insert, heap_update and heap_delete
+ * (but not by heap_multi_insert) into a tuplebuf.
+ *
+ * The size 'len' and the pointer 'data' in the record need to be
+ * computed outside as they are record specific.
+ */
+static void
+DecodeXLogTuple(char *data, Size len, ReorderBufferTupleBuf *tuple)
+{
+ xl_heap_header xlhdr;
+ int datalen = len - SizeOfHeapHeader;
+ HeapTupleHeader header;
+
+ Assert(datalen >= 0);
+
+ tuple->tuple.t_len = datalen + SizeofHeapTupleHeader;
+ header = tuple->tuple.t_data;
+
+ /* not a disk based tuple */
+ ItemPointerSetInvalid(&tuple->tuple.t_self);
+
+ /* we can only figure this out after reassembling the transactions */
+ tuple->tuple.t_tableOid = InvalidOid;
+
+ /* data is not stored aligned, copy to aligned storage */
+ memcpy((char *) &xlhdr,
+ data,
+ SizeOfHeapHeader);
+
+ memset(header, 0, SizeofHeapTupleHeader);
+
+ memcpy(((char *) tuple->tuple.t_data) + SizeofHeapTupleHeader,
+ data + SizeOfHeapHeader,
+ datalen);
+
+ header->t_infomask = xlhdr.t_infomask;
+ header->t_infomask2 = xlhdr.t_infomask2;
+ header->t_hoff = xlhdr.t_hoff;
+}
diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c
new file mode 100644
index 0000000..0a34d26
--- /dev/null
+++ b/src/backend/replication/logical/launcher.c
@@ -0,0 +1,1173 @@
+/*-------------------------------------------------------------------------
+ * launcher.c
+ * PostgreSQL logical replication worker launcher process
+ *
+ * Copyright (c) 2016-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/logical/launcher.c
+ *
+ * NOTES
+ * This module contains the logical replication worker launcher which
+ * uses the background worker infrastructure to start the logical
+ * replication workers for every enabled subscription.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/heapam.h"
+#include "access/htup.h"
+#include "access/htup_details.h"
+#include "access/tableam.h"
+#include "access/xact.h"
+#include "catalog/pg_subscription.h"
+#include "catalog/pg_subscription_rel.h"
+#include "funcapi.h"
+#include "libpq/pqsignal.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "postmaster/bgworker.h"
+#include "postmaster/fork_process.h"
+#include "postmaster/interrupt.h"
+#include "postmaster/postmaster.h"
+#include "replication/logicallauncher.h"
+#include "replication/logicalworker.h"
+#include "replication/slot.h"
+#include "replication/walreceiver.h"
+#include "replication/worker_internal.h"
+#include "storage/ipc.h"
+#include "storage/proc.h"
+#include "storage/procarray.h"
+#include "storage/procsignal.h"
+#include "tcop/tcopprot.h"
+#include "utils/memutils.h"
+#include "utils/pg_lsn.h"
+#include "utils/ps_status.h"
+#include "utils/snapmgr.h"
+#include "utils/timeout.h"
+
+/* max sleep time between cycles (3min) */
+#define DEFAULT_NAPTIME_PER_CYCLE 180000L
+
+int max_logical_replication_workers = 4;
+int max_sync_workers_per_subscription = 2;
+
+LogicalRepWorker *MyLogicalRepWorker = NULL;
+
+typedef struct LogicalRepCtxStruct
+{
+ /* Supervisor process. */
+ pid_t launcher_pid;
+
+ /* Background workers. */
+ LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER];
+} LogicalRepCtxStruct;
+
+LogicalRepCtxStruct *LogicalRepCtx;
+
+typedef struct LogicalRepWorkerId
+{
+ Oid subid;
+ Oid relid;
+} LogicalRepWorkerId;
+
+typedef struct StopWorkersData
+{
+ int nestDepth; /* Sub-transaction nest level */
+ List *workers; /* List of LogicalRepWorkerId */
+ struct StopWorkersData *parent; /* This need not be an immediate
+ * subtransaction parent */
+} StopWorkersData;
+
+/*
+ * Stack of StopWorkersData elements. Each stack element contains the workers
+ * to be stopped for that subtransaction.
+ */
+static StopWorkersData *on_commit_stop_workers = NULL;
+
+static void ApplyLauncherWakeup(void);
+static void logicalrep_launcher_onexit(int code, Datum arg);
+static void logicalrep_worker_onexit(int code, Datum arg);
+static void logicalrep_worker_detach(void);
+static void logicalrep_worker_cleanup(LogicalRepWorker *worker);
+
+static bool on_commit_launcher_wakeup = false;
+
+Datum pg_stat_get_subscription(PG_FUNCTION_ARGS);
+
+
+/*
+ * Load the list of subscriptions.
+ *
+ * Only the fields interesting for worker start/stop functions are filled for
+ * each subscription.
+ */
+static List *
+get_subscription_list(void)
+{
+ List *res = NIL;
+ Relation rel;
+ TableScanDesc scan;
+ HeapTuple tup;
+ MemoryContext resultcxt;
+
+ /* This is the context that we will allocate our output data in */
+ resultcxt = CurrentMemoryContext;
+
+ /*
+ * Start a transaction so we can access pg_database, and get a snapshot.
+ * We don't have a use for the snapshot itself, but we're interested in
+ * the secondary effect that it sets RecentGlobalXmin. (This is critical
+ * for anything that reads heap pages, because HOT may decide to prune
+ * them even if the process doesn't attempt to modify any tuples.)
+ */
+ StartTransactionCommand();
+ (void) GetTransactionSnapshot();
+
+ rel = table_open(SubscriptionRelationId, AccessShareLock);
+ scan = table_beginscan_catalog(rel, 0, NULL);
+
+ while (HeapTupleIsValid(tup = heap_getnext(scan, ForwardScanDirection)))
+ {
+ Form_pg_subscription subform = (Form_pg_subscription) GETSTRUCT(tup);
+ Subscription *sub;
+ MemoryContext oldcxt;
+
+ /*
+ * Allocate our results in the caller's context, not the
+ * transaction's. We do this inside the loop, and restore the original
+ * context at the end, so that leaky things like heap_getnext() are
+ * not called in a potentially long-lived context.
+ */
+ oldcxt = MemoryContextSwitchTo(resultcxt);
+
+ sub = (Subscription *) palloc0(sizeof(Subscription));
+ sub->oid = subform->oid;
+ sub->dbid = subform->subdbid;
+ sub->owner = subform->subowner;
+ sub->enabled = subform->subenabled;
+ sub->name = pstrdup(NameStr(subform->subname));
+ /* We don't fill fields we are not interested in. */
+
+ res = lappend(res, sub);
+ MemoryContextSwitchTo(oldcxt);
+ }
+
+ table_endscan(scan);
+ table_close(rel, AccessShareLock);
+
+ CommitTransactionCommand();
+
+ return res;
+}
+
+/*
+ * Wait for a background worker to start up and attach to the shmem context.
+ *
+ * This is only needed for cleaning up the shared memory in case the worker
+ * fails to attach.
+ */
+static void
+WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
+ uint16 generation,
+ BackgroundWorkerHandle *handle)
+{
+ BgwHandleStatus status;
+ int rc;
+
+ for (;;)
+ {
+ pid_t pid;
+
+ CHECK_FOR_INTERRUPTS();
+
+ LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
+
+ /* Worker either died or has started; no need to do anything. */
+ if (!worker->in_use || worker->proc)
+ {
+ LWLockRelease(LogicalRepWorkerLock);
+ return;
+ }
+
+ LWLockRelease(LogicalRepWorkerLock);
+
+ /* Check if worker has died before attaching, and clean up after it. */
+ status = GetBackgroundWorkerPid(handle, &pid);
+
+ if (status == BGWH_STOPPED)
+ {
+ LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
+ /* Ensure that this was indeed the worker we waited for. */
+ if (generation == worker->generation)
+ logicalrep_worker_cleanup(worker);
+ LWLockRelease(LogicalRepWorkerLock);
+ return;
+ }
+
+ /*
+ * We need timeout because we generally don't get notified via latch
+ * about the worker attach. But we don't expect to have to wait long.
+ */
+ rc = WaitLatch(MyLatch,
+ WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
+ 10L, WAIT_EVENT_BGWORKER_STARTUP);
+
+ if (rc & WL_LATCH_SET)
+ {
+ ResetLatch(MyLatch);
+ CHECK_FOR_INTERRUPTS();
+ }
+ }
+}
+
+/*
+ * Walks the workers array and searches for one that matches given
+ * subscription id and relid.
+ */
+LogicalRepWorker *
+logicalrep_worker_find(Oid subid, Oid relid, bool only_running)
+{
+ int i;
+ LogicalRepWorker *res = NULL;
+
+ Assert(LWLockHeldByMe(LogicalRepWorkerLock));
+
+ /* Search for attached worker for a given subscription id. */
+ for (i = 0; i < max_logical_replication_workers; i++)
+ {
+ LogicalRepWorker *w = &LogicalRepCtx->workers[i];
+
+ if (w->in_use && w->subid == subid && w->relid == relid &&
+ (!only_running || w->proc))
+ {
+ res = w;
+ break;
+ }
+ }
+
+ return res;
+}
+
+/*
+ * Similar to logicalrep_worker_find(), but returns list of all workers for
+ * the subscription, instead just one.
+ */
+List *
+logicalrep_workers_find(Oid subid, bool only_running)
+{
+ int i;
+ List *res = NIL;
+
+ Assert(LWLockHeldByMe(LogicalRepWorkerLock));
+
+ /* Search for attached worker for a given subscription id. */
+ for (i = 0; i < max_logical_replication_workers; i++)
+ {
+ LogicalRepWorker *w = &LogicalRepCtx->workers[i];
+
+ if (w->in_use && w->subid == subid && (!only_running || w->proc))
+ res = lappend(res, w);
+ }
+
+ return res;
+}
+
+/*
+ * Start new apply background worker, if possible.
+ */
+void
+logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid,
+ Oid relid)
+{
+ BackgroundWorker bgw;
+ BackgroundWorkerHandle *bgw_handle;
+ uint16 generation;
+ int i;
+ int slot = 0;
+ LogicalRepWorker *worker = NULL;
+ int nsyncworkers;
+ TimestampTz now;
+
+ ereport(DEBUG1,
+ (errmsg("starting logical replication worker for subscription \"%s\"",
+ subname)));
+
+ /* Report this after the initial starting message for consistency. */
+ if (max_replication_slots == 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
+ errmsg("cannot start logical replication workers when max_replication_slots = 0")));
+
+ /*
+ * We need to do the modification of the shared memory under lock so that
+ * we have consistent view.
+ */
+ LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
+
+retry:
+ /* Find unused worker slot. */
+ for (i = 0; i < max_logical_replication_workers; i++)
+ {
+ LogicalRepWorker *w = &LogicalRepCtx->workers[i];
+
+ if (!w->in_use)
+ {
+ worker = w;
+ slot = i;
+ break;
+ }
+ }
+
+ nsyncworkers = logicalrep_sync_worker_count(subid);
+
+ now = GetCurrentTimestamp();
+
+ /*
+ * If we didn't find a free slot, try to do garbage collection. The
+ * reason we do this is because if some worker failed to start up and its
+ * parent has crashed while waiting, the in_use state was never cleared.
+ */
+ if (worker == NULL || nsyncworkers >= max_sync_workers_per_subscription)
+ {
+ bool did_cleanup = false;
+
+ for (i = 0; i < max_logical_replication_workers; i++)
+ {
+ LogicalRepWorker *w = &LogicalRepCtx->workers[i];
+
+ /*
+ * If the worker was marked in use but didn't manage to attach in
+ * time, clean it up.
+ */
+ if (w->in_use && !w->proc &&
+ TimestampDifferenceExceeds(w->launch_time, now,
+ wal_receiver_timeout))
+ {
+ elog(WARNING,
+ "logical replication worker for subscription %u took too long to start; canceled",
+ w->subid);
+
+ logicalrep_worker_cleanup(w);
+ did_cleanup = true;
+ }
+ }
+
+ if (did_cleanup)
+ goto retry;
+ }
+
+ /*
+ * If we reached the sync worker limit per subscription, just exit
+ * silently as we might get here because of an otherwise harmless race
+ * condition.
+ */
+ if (nsyncworkers >= max_sync_workers_per_subscription)
+ {
+ LWLockRelease(LogicalRepWorkerLock);
+ return;
+ }
+
+ /*
+ * However if there are no more free worker slots, inform user about it
+ * before exiting.
+ */
+ if (worker == NULL)
+ {
+ LWLockRelease(LogicalRepWorkerLock);
+ ereport(WARNING,
+ (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
+ errmsg("out of logical replication worker slots"),
+ errhint("You might need to increase max_logical_replication_workers.")));
+ return;
+ }
+
+ /* Prepare the worker slot. */
+ worker->launch_time = now;
+ worker->in_use = true;
+ worker->generation++;
+ worker->proc = NULL;
+ worker->dbid = dbid;
+ worker->userid = userid;
+ worker->subid = subid;
+ worker->relid = relid;
+ worker->relstate = SUBREL_STATE_UNKNOWN;
+ worker->relstate_lsn = InvalidXLogRecPtr;
+ worker->last_lsn = InvalidXLogRecPtr;
+ TIMESTAMP_NOBEGIN(worker->last_send_time);
+ TIMESTAMP_NOBEGIN(worker->last_recv_time);
+ worker->reply_lsn = InvalidXLogRecPtr;
+ TIMESTAMP_NOBEGIN(worker->reply_time);
+
+ /* Before releasing lock, remember generation for future identification. */
+ generation = worker->generation;
+
+ LWLockRelease(LogicalRepWorkerLock);
+
+ /* Register the new dynamic worker. */
+ memset(&bgw, 0, sizeof(bgw));
+ bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
+ BGWORKER_BACKEND_DATABASE_CONNECTION;
+ bgw.bgw_start_time = BgWorkerStart_RecoveryFinished;
+ snprintf(bgw.bgw_library_name, BGW_MAXLEN, "postgres");
+ snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ApplyWorkerMain");
+ if (OidIsValid(relid))
+ snprintf(bgw.bgw_name, BGW_MAXLEN,
+ "logical replication worker for subscription %u sync %u", subid, relid);
+ else
+ snprintf(bgw.bgw_name, BGW_MAXLEN,
+ "logical replication worker for subscription %u", subid);
+ snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication worker");
+
+ bgw.bgw_restart_time = BGW_NEVER_RESTART;
+ bgw.bgw_notify_pid = MyProcPid;
+ bgw.bgw_main_arg = Int32GetDatum(slot);
+
+ if (!RegisterDynamicBackgroundWorker(&bgw, &bgw_handle))
+ {
+ /* Failed to start worker, so clean up the worker slot. */
+ LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
+ Assert(generation == worker->generation);
+ logicalrep_worker_cleanup(worker);
+ LWLockRelease(LogicalRepWorkerLock);
+
+ ereport(WARNING,
+ (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
+ errmsg("out of background worker slots"),
+ errhint("You might need to increase max_worker_processes.")));
+ return;
+ }
+
+ /* Now wait until it attaches. */
+ WaitForReplicationWorkerAttach(worker, generation, bgw_handle);
+}
+
+/*
+ * Stop the logical replication worker for subid/relid, if any, and wait until
+ * it detaches from the slot.
+ */
+void
+logicalrep_worker_stop(Oid subid, Oid relid)
+{
+ LogicalRepWorker *worker;
+ uint16 generation;
+
+ LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
+
+ worker = logicalrep_worker_find(subid, relid, false);
+
+ /* No worker, nothing to do. */
+ if (!worker)
+ {
+ LWLockRelease(LogicalRepWorkerLock);
+ return;
+ }
+
+ /*
+ * Remember which generation was our worker so we can check if what we see
+ * is still the same one.
+ */
+ generation = worker->generation;
+
+ /*
+ * If we found a worker but it does not have proc set then it is still
+ * starting up; wait for it to finish starting and then kill it.
+ */
+ while (worker->in_use && !worker->proc)
+ {
+ int rc;
+
+ LWLockRelease(LogicalRepWorkerLock);
+
+ /* Wait a bit --- we don't expect to have to wait long. */
+ rc = WaitLatch(MyLatch,
+ WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
+ 10L, WAIT_EVENT_BGWORKER_STARTUP);
+
+ if (rc & WL_LATCH_SET)
+ {
+ ResetLatch(MyLatch);
+ CHECK_FOR_INTERRUPTS();
+ }
+
+ /* Recheck worker status. */
+ LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
+
+ /*
+ * Check whether the worker slot is no longer used, which would mean
+ * that the worker has exited, or whether the worker generation is
+ * different, meaning that a different worker has taken the slot.
+ */
+ if (!worker->in_use || worker->generation != generation)
+ {
+ LWLockRelease(LogicalRepWorkerLock);
+ return;
+ }
+
+ /* Worker has assigned proc, so it has started. */
+ if (worker->proc)
+ break;
+ }
+
+ /* Now terminate the worker ... */
+ kill(worker->proc->pid, SIGTERM);
+
+ /* ... and wait for it to die. */
+ for (;;)
+ {
+ int rc;
+
+ /* is it gone? */
+ if (!worker->proc || worker->generation != generation)
+ break;
+
+ LWLockRelease(LogicalRepWorkerLock);
+
+ /* Wait a bit --- we don't expect to have to wait long. */
+ rc = WaitLatch(MyLatch,
+ WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
+ 10L, WAIT_EVENT_BGWORKER_SHUTDOWN);
+
+ if (rc & WL_LATCH_SET)
+ {
+ ResetLatch(MyLatch);
+ CHECK_FOR_INTERRUPTS();
+ }
+
+ LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
+ }
+
+ LWLockRelease(LogicalRepWorkerLock);
+}
+
+/*
+ * Request worker for specified sub/rel to be stopped on commit.
+ */
+void
+logicalrep_worker_stop_at_commit(Oid subid, Oid relid)
+{
+ int nestDepth = GetCurrentTransactionNestLevel();
+ LogicalRepWorkerId *wid;
+ MemoryContext oldctx;
+
+ /* Make sure we store the info in context that survives until commit. */
+ oldctx = MemoryContextSwitchTo(TopTransactionContext);
+
+ /* Check that previous transactions were properly cleaned up. */
+ Assert(on_commit_stop_workers == NULL ||
+ nestDepth >= on_commit_stop_workers->nestDepth);
+
+ /*
+ * Push a new stack element if we don't already have one for the current
+ * nestDepth.
+ */
+ if (on_commit_stop_workers == NULL ||
+ nestDepth > on_commit_stop_workers->nestDepth)
+ {
+ StopWorkersData *newdata = palloc(sizeof(StopWorkersData));
+
+ newdata->nestDepth = nestDepth;
+ newdata->workers = NIL;
+ newdata->parent = on_commit_stop_workers;
+ on_commit_stop_workers = newdata;
+ }
+
+ /*
+ * Finally add a new worker into the worker list of the current
+ * subtransaction.
+ */
+ wid = palloc(sizeof(LogicalRepWorkerId));
+ wid->subid = subid;
+ wid->relid = relid;
+ on_commit_stop_workers->workers =
+ lappend(on_commit_stop_workers->workers, wid);
+
+ MemoryContextSwitchTo(oldctx);
+}
+
+/*
+ * Wake up (using latch) any logical replication worker for specified sub/rel.
+ */
+void
+logicalrep_worker_wakeup(Oid subid, Oid relid)
+{
+ LogicalRepWorker *worker;
+
+ LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
+
+ worker = logicalrep_worker_find(subid, relid, true);
+
+ if (worker)
+ logicalrep_worker_wakeup_ptr(worker);
+
+ LWLockRelease(LogicalRepWorkerLock);
+}
+
+/*
+ * Wake up (using latch) the specified logical replication worker.
+ *
+ * Caller must hold lock, else worker->proc could change under us.
+ */
+void
+logicalrep_worker_wakeup_ptr(LogicalRepWorker *worker)
+{
+ Assert(LWLockHeldByMe(LogicalRepWorkerLock));
+
+ SetLatch(&worker->proc->procLatch);
+}
+
+/*
+ * Attach to a slot.
+ */
+void
+logicalrep_worker_attach(int slot)
+{
+ /* Block concurrent access. */
+ LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
+
+ Assert(slot >= 0 && slot < max_logical_replication_workers);
+ MyLogicalRepWorker = &LogicalRepCtx->workers[slot];
+
+ if (!MyLogicalRepWorker->in_use)
+ {
+ LWLockRelease(LogicalRepWorkerLock);
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("logical replication worker slot %d is empty, cannot attach",
+ slot)));
+ }
+
+ if (MyLogicalRepWorker->proc)
+ {
+ LWLockRelease(LogicalRepWorkerLock);
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("logical replication worker slot %d is already used by "
+ "another worker, cannot attach", slot)));
+ }
+
+ MyLogicalRepWorker->proc = MyProc;
+ before_shmem_exit(logicalrep_worker_onexit, (Datum) 0);
+
+ LWLockRelease(LogicalRepWorkerLock);
+}
+
+/*
+ * Detach the worker (cleans up the worker info).
+ */
+static void
+logicalrep_worker_detach(void)
+{
+ /* Block concurrent access. */
+ LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE);
+
+ logicalrep_worker_cleanup(MyLogicalRepWorker);
+
+ LWLockRelease(LogicalRepWorkerLock);
+}
+
+/*
+ * Clean up worker info.
+ */
+static void
+logicalrep_worker_cleanup(LogicalRepWorker *worker)
+{
+ Assert(LWLockHeldByMeInMode(LogicalRepWorkerLock, LW_EXCLUSIVE));
+
+ worker->in_use = false;
+ worker->proc = NULL;
+ worker->dbid = InvalidOid;
+ worker->userid = InvalidOid;
+ worker->subid = InvalidOid;
+ worker->relid = InvalidOid;
+}
+
+/*
+ * Cleanup function for logical replication launcher.
+ *
+ * Called on logical replication launcher exit.
+ */
+static void
+logicalrep_launcher_onexit(int code, Datum arg)
+{
+ LogicalRepCtx->launcher_pid = 0;
+}
+
+/*
+ * Cleanup function.
+ *
+ * Called on logical replication worker exit.
+ */
+static void
+logicalrep_worker_onexit(int code, Datum arg)
+{
+ /* Disconnect gracefully from the remote side. */
+ if (LogRepWorkerWalRcvConn)
+ walrcv_disconnect(LogRepWorkerWalRcvConn);
+
+ logicalrep_worker_detach();
+
+ ApplyLauncherWakeup();
+}
+
+/*
+ * Count the number of registered (not necessarily running) sync workers
+ * for a subscription.
+ */
+int
+logicalrep_sync_worker_count(Oid subid)
+{
+ int i;
+ int res = 0;
+
+ Assert(LWLockHeldByMe(LogicalRepWorkerLock));
+
+ /* Search for attached worker for a given subscription id. */
+ for (i = 0; i < max_logical_replication_workers; i++)
+ {
+ LogicalRepWorker *w = &LogicalRepCtx->workers[i];
+
+ if (w->subid == subid && OidIsValid(w->relid))
+ res++;
+ }
+
+ return res;
+}
+
+/*
+ * ApplyLauncherShmemSize
+ * Compute space needed for replication launcher shared memory
+ */
+Size
+ApplyLauncherShmemSize(void)
+{
+ Size size;
+
+ /*
+ * Need the fixed struct and the array of LogicalRepWorker.
+ */
+ size = sizeof(LogicalRepCtxStruct);
+ size = MAXALIGN(size);
+ size = add_size(size, mul_size(max_logical_replication_workers,
+ sizeof(LogicalRepWorker)));
+ return size;
+}
+
+/*
+ * ApplyLauncherRegister
+ * Register a background worker running the logical replication launcher.
+ */
+void
+ApplyLauncherRegister(void)
+{
+ BackgroundWorker bgw;
+
+ if (max_logical_replication_workers == 0)
+ return;
+
+ memset(&bgw, 0, sizeof(bgw));
+ bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
+ BGWORKER_BACKEND_DATABASE_CONNECTION;
+ bgw.bgw_start_time = BgWorkerStart_RecoveryFinished;
+ snprintf(bgw.bgw_library_name, BGW_MAXLEN, "postgres");
+ snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ApplyLauncherMain");
+ snprintf(bgw.bgw_name, BGW_MAXLEN,
+ "logical replication launcher");
+ snprintf(bgw.bgw_type, BGW_MAXLEN,
+ "logical replication launcher");
+ bgw.bgw_restart_time = 5;
+ bgw.bgw_notify_pid = 0;
+ bgw.bgw_main_arg = (Datum) 0;
+
+ RegisterBackgroundWorker(&bgw);
+}
+
+/*
+ * ApplyLauncherShmemInit
+ * Allocate and initialize replication launcher shared memory
+ */
+void
+ApplyLauncherShmemInit(void)
+{
+ bool found;
+
+ LogicalRepCtx = (LogicalRepCtxStruct *)
+ ShmemInitStruct("Logical Replication Launcher Data",
+ ApplyLauncherShmemSize(),
+ &found);
+
+ if (!found)
+ {
+ int slot;
+
+ memset(LogicalRepCtx, 0, ApplyLauncherShmemSize());
+
+ /* Initialize memory and spin locks for each worker slot. */
+ for (slot = 0; slot < max_logical_replication_workers; slot++)
+ {
+ LogicalRepWorker *worker = &LogicalRepCtx->workers[slot];
+
+ memset(worker, 0, sizeof(LogicalRepWorker));
+ SpinLockInit(&worker->relmutex);
+ }
+ }
+}
+
+/*
+ * Check whether current transaction has manipulated logical replication
+ * workers.
+ */
+bool
+XactManipulatesLogicalReplicationWorkers(void)
+{
+ return (on_commit_stop_workers != NULL);
+}
+
+/*
+ * Wakeup the launcher on commit if requested.
+ */
+void
+AtEOXact_ApplyLauncher(bool isCommit)
+{
+
+ Assert(on_commit_stop_workers == NULL ||
+ (on_commit_stop_workers->nestDepth == 1 &&
+ on_commit_stop_workers->parent == NULL));
+
+ if (isCommit)
+ {
+ ListCell *lc;
+
+ if (on_commit_stop_workers != NULL)
+ {
+ List *workers = on_commit_stop_workers->workers;
+
+ foreach(lc, workers)
+ {
+ LogicalRepWorkerId *wid = lfirst(lc);
+
+ logicalrep_worker_stop(wid->subid, wid->relid);
+ }
+ }
+
+ if (on_commit_launcher_wakeup)
+ ApplyLauncherWakeup();
+ }
+
+ /*
+ * No need to pfree on_commit_stop_workers. It was allocated in
+ * transaction memory context, which is going to be cleaned soon.
+ */
+ on_commit_stop_workers = NULL;
+ on_commit_launcher_wakeup = false;
+}
+
+/*
+ * On commit, merge the current on_commit_stop_workers list into the
+ * immediate parent, if present.
+ * On rollback, discard the current on_commit_stop_workers list.
+ * Pop out the stack.
+ */
+void
+AtEOSubXact_ApplyLauncher(bool isCommit, int nestDepth)
+{
+ StopWorkersData *parent;
+
+ /* Exit immediately if there's no work to do at this level. */
+ if (on_commit_stop_workers == NULL ||
+ on_commit_stop_workers->nestDepth < nestDepth)
+ return;
+
+ Assert(on_commit_stop_workers->nestDepth == nestDepth);
+
+ parent = on_commit_stop_workers->parent;
+
+ if (isCommit)
+ {
+ /*
+ * If the upper stack element is not an immediate parent
+ * subtransaction, just decrement the notional nesting depth without
+ * doing any real work. Else, we need to merge the current workers
+ * list into the parent.
+ */
+ if (!parent || parent->nestDepth < nestDepth - 1)
+ {
+ on_commit_stop_workers->nestDepth--;
+ return;
+ }
+
+ parent->workers =
+ list_concat(parent->workers, on_commit_stop_workers->workers);
+ }
+ else
+ {
+ /*
+ * Abandon everything that was done at this nesting level. Explicitly
+ * free memory to avoid a transaction-lifespan leak.
+ */
+ list_free_deep(on_commit_stop_workers->workers);
+ }
+
+ /*
+ * We have taken care of the current subtransaction workers list for both
+ * abort or commit. So we are ready to pop the stack.
+ */
+ pfree(on_commit_stop_workers);
+ on_commit_stop_workers = parent;
+}
+
+/*
+ * Request wakeup of the launcher on commit of the transaction.
+ *
+ * This is used to send launcher signal to stop sleeping and process the
+ * subscriptions when current transaction commits. Should be used when new
+ * tuple was added to the pg_subscription catalog.
+*/
+void
+ApplyLauncherWakeupAtCommit(void)
+{
+ if (!on_commit_launcher_wakeup)
+ on_commit_launcher_wakeup = true;
+}
+
+static void
+ApplyLauncherWakeup(void)
+{
+ if (LogicalRepCtx->launcher_pid != 0)
+ kill(LogicalRepCtx->launcher_pid, SIGUSR1);
+}
+
+/*
+ * Main loop for the apply launcher process.
+ */
+void
+ApplyLauncherMain(Datum main_arg)
+{
+ TimestampTz last_start_time = 0;
+
+ ereport(DEBUG1,
+ (errmsg("logical replication launcher started")));
+
+ before_shmem_exit(logicalrep_launcher_onexit, (Datum) 0);
+
+ Assert(LogicalRepCtx->launcher_pid == 0);
+ LogicalRepCtx->launcher_pid = MyProcPid;
+
+ /* Establish signal handlers. */
+ pqsignal(SIGHUP, SignalHandlerForConfigReload);
+ pqsignal(SIGTERM, die);
+ BackgroundWorkerUnblockSignals();
+
+ /*
+ * Establish connection to nailed catalogs (we only ever access
+ * pg_subscription).
+ */
+ BackgroundWorkerInitializeConnection(NULL, NULL, 0);
+
+ /* Enter main loop */
+ for (;;)
+ {
+ int rc;
+ List *sublist;
+ ListCell *lc;
+ MemoryContext subctx;
+ MemoryContext oldctx;
+ TimestampTz now;
+ long wait_time = DEFAULT_NAPTIME_PER_CYCLE;
+
+ CHECK_FOR_INTERRUPTS();
+
+ now = GetCurrentTimestamp();
+
+ /* Limit the start retry to once a wal_retrieve_retry_interval */
+ if (TimestampDifferenceExceeds(last_start_time, now,
+ wal_retrieve_retry_interval))
+ {
+ /* Use temporary context for the database list and worker info. */
+ subctx = AllocSetContextCreate(TopMemoryContext,
+ "Logical Replication Launcher sublist",
+ ALLOCSET_DEFAULT_SIZES);
+ oldctx = MemoryContextSwitchTo(subctx);
+
+ /* search for subscriptions to start or stop. */
+ sublist = get_subscription_list();
+
+ /* Start the missing workers for enabled subscriptions. */
+ foreach(lc, sublist)
+ {
+ Subscription *sub = (Subscription *) lfirst(lc);
+ LogicalRepWorker *w;
+
+ if (!sub->enabled)
+ continue;
+
+ LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
+ w = logicalrep_worker_find(sub->oid, InvalidOid, false);
+ LWLockRelease(LogicalRepWorkerLock);
+
+ if (w == NULL)
+ {
+ last_start_time = now;
+ wait_time = wal_retrieve_retry_interval;
+
+ logicalrep_worker_launch(sub->dbid, sub->oid, sub->name,
+ sub->owner, InvalidOid);
+ }
+ }
+
+ /* Switch back to original memory context. */
+ MemoryContextSwitchTo(oldctx);
+ /* Clean the temporary memory. */
+ MemoryContextDelete(subctx);
+ }
+ else
+ {
+ /*
+ * The wait in previous cycle was interrupted in less than
+ * wal_retrieve_retry_interval since last worker was started, this
+ * usually means crash of the worker, so we should retry in
+ * wal_retrieve_retry_interval again.
+ */
+ wait_time = wal_retrieve_retry_interval;
+ }
+
+ /* Wait for more work. */
+ rc = WaitLatch(MyLatch,
+ WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
+ wait_time,
+ WAIT_EVENT_LOGICAL_LAUNCHER_MAIN);
+
+ if (rc & WL_LATCH_SET)
+ {
+ ResetLatch(MyLatch);
+ CHECK_FOR_INTERRUPTS();
+ }
+
+ if (ConfigReloadPending)
+ {
+ ConfigReloadPending = false;
+ ProcessConfigFile(PGC_SIGHUP);
+ }
+ }
+
+ /* Not reachable */
+}
+
+/*
+ * Is current process the logical replication launcher?
+ */
+bool
+IsLogicalLauncher(void)
+{
+ return LogicalRepCtx->launcher_pid == MyProcPid;
+}
+
+/*
+ * Returns state of the subscriptions.
+ */
+Datum
+pg_stat_get_subscription(PG_FUNCTION_ARGS)
+{
+#define PG_STAT_GET_SUBSCRIPTION_COLS 8
+ Oid subid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
+ int i;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+
+ /* check to see if caller supports us returning a tuplestore */
+ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("set-valued function called in context that cannot accept a set")));
+ if (!(rsinfo->allowedModes & SFRM_Materialize))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("materialize mode required, but it is not allowed in this context")));
+
+ /* Build a tuple descriptor for our result type */
+ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
+ oldcontext = MemoryContextSwitchTo(per_query_ctx);
+
+ tupstore = tuplestore_begin_heap(true, false, work_mem);
+ rsinfo->returnMode = SFRM_Materialize;
+ rsinfo->setResult = tupstore;
+ rsinfo->setDesc = tupdesc;
+
+ MemoryContextSwitchTo(oldcontext);
+
+ /* Make sure we get consistent view of the workers. */
+ LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
+
+ for (i = 0; i <= max_logical_replication_workers; i++)
+ {
+ /* for each row */
+ Datum values[PG_STAT_GET_SUBSCRIPTION_COLS];
+ bool nulls[PG_STAT_GET_SUBSCRIPTION_COLS];
+ int worker_pid;
+ LogicalRepWorker worker;
+
+ memcpy(&worker, &LogicalRepCtx->workers[i],
+ sizeof(LogicalRepWorker));
+ if (!worker.proc || !IsBackendPid(worker.proc->pid))
+ continue;
+
+ if (OidIsValid(subid) && worker.subid != subid)
+ continue;
+
+ worker_pid = worker.proc->pid;
+
+ MemSet(values, 0, sizeof(values));
+ MemSet(nulls, 0, sizeof(nulls));
+
+ values[0] = ObjectIdGetDatum(worker.subid);
+ if (OidIsValid(worker.relid))
+ values[1] = ObjectIdGetDatum(worker.relid);
+ else
+ nulls[1] = true;
+ values[2] = Int32GetDatum(worker_pid);
+ if (XLogRecPtrIsInvalid(worker.last_lsn))
+ nulls[3] = true;
+ else
+ values[3] = LSNGetDatum(worker.last_lsn);
+ if (worker.last_send_time == 0)
+ nulls[4] = true;
+ else
+ values[4] = TimestampTzGetDatum(worker.last_send_time);
+ if (worker.last_recv_time == 0)
+ nulls[5] = true;
+ else
+ values[5] = TimestampTzGetDatum(worker.last_recv_time);
+ if (XLogRecPtrIsInvalid(worker.reply_lsn))
+ nulls[6] = true;
+ else
+ values[6] = LSNGetDatum(worker.reply_lsn);
+ if (worker.reply_time == 0)
+ nulls[7] = true;
+ else
+ values[7] = TimestampTzGetDatum(worker.reply_time);
+
+ tuplestore_putvalues(tupstore, tupdesc, values, nulls);
+
+ /*
+ * If only a single subscription was requested, and we found it,
+ * break.
+ */
+ if (OidIsValid(subid))
+ break;
+ }
+
+ LWLockRelease(LogicalRepWorkerLock);
+
+ /* clean up and return the tuplestore */
+ tuplestore_donestoring(tupstore);
+
+ return (Datum) 0;
+}
diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c
new file mode 100644
index 0000000..61902be
--- /dev/null
+++ b/src/backend/replication/logical/logical.c
@@ -0,0 +1,1093 @@
+/*-------------------------------------------------------------------------
+ * logical.c
+ * PostgreSQL logical decoding coordination
+ *
+ * Copyright (c) 2012-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/logical/logical.c
+ *
+ * NOTES
+ * This file coordinates interaction between the various modules that
+ * together provide logical decoding, primarily by providing so
+ * called LogicalDecodingContexts. The goal is to encapsulate most of the
+ * internal complexity for consumers of logical decoding, so they can
+ * create and consume a changestream with a low amount of code. Builtin
+ * consumers are the walsender and SQL SRF interface, but it's possible to
+ * add further ones without changing core code, e.g. to consume changes in
+ * a bgworker.
+ *
+ * The idea is that a consumer provides three callbacks, one to read WAL,
+ * one to prepare a data write, and a final one for actually writing since
+ * their implementation depends on the type of consumer. Check
+ * logicalfuncs.c for an example implementation of a fairly simple consumer
+ * and an implementation of a WAL reading callback that's suitable for
+ * simple consumers.
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/xact.h"
+#include "access/xlog_internal.h"
+#include "fmgr.h"
+#include "miscadmin.h"
+#include "replication/decode.h"
+#include "replication/logical.h"
+#include "replication/origin.h"
+#include "replication/reorderbuffer.h"
+#include "replication/snapbuild.h"
+#include "storage/proc.h"
+#include "storage/procarray.h"
+#include "utils/memutils.h"
+
+/* data for errcontext callback */
+typedef struct LogicalErrorCallbackState
+{
+ LogicalDecodingContext *ctx;
+ const char *callback_name;
+ XLogRecPtr report_location;
+} LogicalErrorCallbackState;
+
+/* wrappers around output plugin callbacks */
+static void output_plugin_error_callback(void *arg);
+static void startup_cb_wrapper(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
+ bool is_init);
+static void shutdown_cb_wrapper(LogicalDecodingContext *ctx);
+static void begin_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn);
+static void commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
+ XLogRecPtr commit_lsn);
+static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
+ Relation relation, ReorderBufferChange *change);
+static void truncate_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
+ int nrelations, Relation relations[], ReorderBufferChange *change);
+static void message_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
+ XLogRecPtr message_lsn, bool transactional,
+ const char *prefix, Size message_size, const char *message);
+
+static void LoadOutputPlugin(OutputPluginCallbacks *callbacks, char *plugin);
+
+/*
+ * Make sure the current settings & environment are capable of doing logical
+ * decoding.
+ */
+void
+CheckLogicalDecodingRequirements(void)
+{
+ CheckSlotRequirements();
+
+ /*
+ * NB: Adding a new requirement likely means that RestoreSlotFromDisk()
+ * needs the same check.
+ */
+
+ if (wal_level < WAL_LEVEL_LOGICAL)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("logical decoding requires wal_level >= logical")));
+
+ if (MyDatabaseId == InvalidOid)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("logical decoding requires a database connection")));
+
+ /* ----
+ * TODO: We got to change that someday soon...
+ *
+ * There's basically three things missing to allow this:
+ * 1) We need to be able to correctly and quickly identify the timeline a
+ * LSN belongs to
+ * 2) We need to force hot_standby_feedback to be enabled at all times so
+ * the primary cannot remove rows we need.
+ * 3) support dropping replication slots referring to a database, in
+ * dbase_redo. There can't be any active ones due to HS recovery
+ * conflicts, so that should be relatively easy.
+ * ----
+ */
+ if (RecoveryInProgress())
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("logical decoding cannot be used while in recovery")));
+}
+
+/*
+ * Helper function for CreateInitDecodingContext() and
+ * CreateDecodingContext() performing common tasks.
+ */
+static LogicalDecodingContext *
+StartupDecodingContext(List *output_plugin_options,
+ XLogRecPtr start_lsn,
+ TransactionId xmin_horizon,
+ bool need_full_snapshot,
+ bool fast_forward,
+ XLogReaderRoutine *xl_routine,
+ LogicalOutputPluginWriterPrepareWrite prepare_write,
+ LogicalOutputPluginWriterWrite do_write,
+ LogicalOutputPluginWriterUpdateProgress update_progress)
+{
+ ReplicationSlot *slot;
+ MemoryContext context,
+ old_context;
+ LogicalDecodingContext *ctx;
+
+ /* shorter lines... */
+ slot = MyReplicationSlot;
+
+ context = AllocSetContextCreate(CurrentMemoryContext,
+ "Logical decoding context",
+ ALLOCSET_DEFAULT_SIZES);
+ old_context = MemoryContextSwitchTo(context);
+ ctx = palloc0(sizeof(LogicalDecodingContext));
+
+ ctx->context = context;
+
+ /*
+ * (re-)load output plugins, so we detect a bad (removed) output plugin
+ * now.
+ */
+ if (!fast_forward)
+ LoadOutputPlugin(&ctx->callbacks, NameStr(slot->data.plugin));
+
+ /*
+ * Now that the slot's xmin has been set, we can announce ourselves as a
+ * logical decoding backend which doesn't need to be checked individually
+ * when computing the xmin horizon because the xmin is enforced via
+ * replication slots.
+ *
+ * We can only do so if we're outside of a transaction (i.e. the case when
+ * streaming changes via walsender), otherwise an already setup
+ * snapshot/xid would end up being ignored. That's not a particularly
+ * bothersome restriction since the SQL interface can't be used for
+ * streaming anyway.
+ */
+ if (!IsTransactionOrTransactionBlock())
+ {
+ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ MyPgXact->vacuumFlags |= PROC_IN_LOGICAL_DECODING;
+ LWLockRelease(ProcArrayLock);
+ }
+
+ ctx->slot = slot;
+
+ ctx->reader = XLogReaderAllocate(wal_segment_size, NULL, xl_routine, ctx);
+ if (!ctx->reader)
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
+
+ ctx->reorder = ReorderBufferAllocate();
+ ctx->snapshot_builder =
+ AllocateSnapshotBuilder(ctx->reorder, xmin_horizon, start_lsn,
+ need_full_snapshot);
+
+ ctx->reorder->private_data = ctx;
+
+ /* wrap output plugin callbacks, so we can add error context information */
+ ctx->reorder->begin = begin_cb_wrapper;
+ ctx->reorder->apply_change = change_cb_wrapper;
+ ctx->reorder->apply_truncate = truncate_cb_wrapper;
+ ctx->reorder->commit = commit_cb_wrapper;
+ ctx->reorder->message = message_cb_wrapper;
+
+ ctx->out = makeStringInfo();
+ ctx->prepare_write = prepare_write;
+ ctx->write = do_write;
+ ctx->update_progress = update_progress;
+
+ ctx->output_plugin_options = output_plugin_options;
+
+ ctx->fast_forward = fast_forward;
+
+ MemoryContextSwitchTo(old_context);
+
+ return ctx;
+}
+
+/*
+ * Create a new decoding context, for a new logical slot.
+ *
+ * plugin -- contains the name of the output plugin
+ * output_plugin_options -- contains options passed to the output plugin
+ * need_full_snapshot -- if true, must obtain a snapshot able to read all
+ * tables; if false, one that can read only catalogs is acceptable.
+ * restart_lsn -- if given as invalid, it's this routine's responsibility to
+ * mark WAL as reserved by setting a convenient restart_lsn for the slot.
+ * Otherwise, we set for decoding to start from the given LSN without
+ * marking WAL reserved beforehand. In that scenario, it's up to the
+ * caller to guarantee that WAL remains available.
+ * xl_routine -- XLogReaderRoutine for underlying XLogReader
+ * prepare_write, do_write, update_progress --
+ * callbacks that perform the use-case dependent, actual, work.
+ *
+ * Needs to be called while in a memory context that's at least as long lived
+ * as the decoding context because further memory contexts will be created
+ * inside it.
+ *
+ * Returns an initialized decoding context after calling the output plugin's
+ * startup function.
+ */
+LogicalDecodingContext *
+CreateInitDecodingContext(char *plugin,
+ List *output_plugin_options,
+ bool need_full_snapshot,
+ XLogRecPtr restart_lsn,
+ XLogReaderRoutine *xl_routine,
+ LogicalOutputPluginWriterPrepareWrite prepare_write,
+ LogicalOutputPluginWriterWrite do_write,
+ LogicalOutputPluginWriterUpdateProgress update_progress)
+{
+ TransactionId xmin_horizon = InvalidTransactionId;
+ ReplicationSlot *slot;
+ LogicalDecodingContext *ctx;
+ MemoryContext old_context;
+
+ /* shorter lines... */
+ slot = MyReplicationSlot;
+
+ /* first some sanity checks that are unlikely to be violated */
+ if (slot == NULL)
+ elog(ERROR, "cannot perform logical decoding without an acquired slot");
+
+ if (plugin == NULL)
+ elog(ERROR, "cannot initialize logical decoding without a specified plugin");
+
+ /* Make sure the passed slot is suitable. These are user facing errors. */
+ if (SlotIsPhysical(slot))
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot use physical replication slot for logical decoding")));
+
+ if (slot->data.database != MyDatabaseId)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("replication slot \"%s\" was not created in this database",
+ NameStr(slot->data.name))));
+
+ if (IsTransactionState() &&
+ GetTopTransactionIdIfAny() != InvalidTransactionId)
+ ereport(ERROR,
+ (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
+ errmsg("cannot create logical replication slot in transaction that has performed writes")));
+
+ /* register output plugin name with slot */
+ SpinLockAcquire(&slot->mutex);
+ StrNCpy(NameStr(slot->data.plugin), plugin, NAMEDATALEN);
+ SpinLockRelease(&slot->mutex);
+
+ if (XLogRecPtrIsInvalid(restart_lsn))
+ ReplicationSlotReserveWal();
+ else
+ {
+ SpinLockAcquire(&slot->mutex);
+ slot->data.restart_lsn = restart_lsn;
+ SpinLockRelease(&slot->mutex);
+ }
+
+ /* ----
+ * This is a bit tricky: We need to determine a safe xmin horizon to start
+ * decoding from, to avoid starting from a running xacts record referring
+ * to xids whose rows have been vacuumed or pruned
+ * already. GetOldestSafeDecodingTransactionId() returns such a value, but
+ * without further interlock its return value might immediately be out of
+ * date.
+ *
+ * So we have to acquire the ProcArrayLock to prevent computation of new
+ * xmin horizons by other backends, get the safe decoding xid, and inform
+ * the slot machinery about the new limit. Once that's done the
+ * ProcArrayLock can be released as the slot machinery now is
+ * protecting against vacuum.
+ *
+ * Note that, temporarily, the data, not just the catalog, xmin has to be
+ * reserved if a data snapshot is to be exported. Otherwise the initial
+ * data snapshot created here is not guaranteed to be valid. After that
+ * the data xmin doesn't need to be managed anymore and the global xmin
+ * should be recomputed. As we are fine with losing the pegged data xmin
+ * after crash - no chance a snapshot would get exported anymore - we can
+ * get away with just setting the slot's
+ * effective_xmin. ReplicationSlotRelease will reset it again.
+ *
+ * ----
+ */
+ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+
+ xmin_horizon = GetOldestSafeDecodingTransactionId(!need_full_snapshot);
+
+ SpinLockAcquire(&slot->mutex);
+ slot->effective_catalog_xmin = xmin_horizon;
+ slot->data.catalog_xmin = xmin_horizon;
+ if (need_full_snapshot)
+ slot->effective_xmin = xmin_horizon;
+ SpinLockRelease(&slot->mutex);
+
+ ReplicationSlotsComputeRequiredXmin(true);
+
+ LWLockRelease(ProcArrayLock);
+
+ ReplicationSlotMarkDirty();
+ ReplicationSlotSave();
+
+ ctx = StartupDecodingContext(NIL, restart_lsn, xmin_horizon,
+ need_full_snapshot, false,
+ xl_routine, prepare_write, do_write,
+ update_progress);
+
+ /* call output plugin initialization callback */
+ old_context = MemoryContextSwitchTo(ctx->context);
+ if (ctx->callbacks.startup_cb != NULL)
+ startup_cb_wrapper(ctx, &ctx->options, true);
+ MemoryContextSwitchTo(old_context);
+
+ ctx->reorder->output_rewrites = ctx->options.receive_rewrites;
+
+ return ctx;
+}
+
+/*
+ * Create a new decoding context, for a logical slot that has previously been
+ * used already.
+ *
+ * start_lsn
+ * The LSN at which to start decoding. If InvalidXLogRecPtr, restart
+ * from the slot's confirmed_flush; otherwise, start from the specified
+ * location (but move it forwards to confirmed_flush if it's older than
+ * that, see below).
+ *
+ * output_plugin_options
+ * options passed to the output plugin.
+ *
+ * fast_forward
+ * bypass the generation of logical changes.
+ *
+ * xl_routine
+ * XLogReaderRoutine used by underlying xlogreader
+ *
+ * prepare_write, do_write, update_progress
+ * callbacks that have to be filled to perform the use-case dependent,
+ * actual work.
+ *
+ * Needs to be called while in a memory context that's at least as long lived
+ * as the decoding context because further memory contexts will be created
+ * inside it.
+ *
+ * Returns an initialized decoding context after calling the output plugin's
+ * startup function.
+ */
+LogicalDecodingContext *
+CreateDecodingContext(XLogRecPtr start_lsn,
+ List *output_plugin_options,
+ bool fast_forward,
+ XLogReaderRoutine *xl_routine,
+ LogicalOutputPluginWriterPrepareWrite prepare_write,
+ LogicalOutputPluginWriterWrite do_write,
+ LogicalOutputPluginWriterUpdateProgress update_progress)
+{
+ LogicalDecodingContext *ctx;
+ ReplicationSlot *slot;
+ MemoryContext old_context;
+
+ /* shorter lines... */
+ slot = MyReplicationSlot;
+
+ /* first some sanity checks that are unlikely to be violated */
+ if (slot == NULL)
+ elog(ERROR, "cannot perform logical decoding without an acquired slot");
+
+ /* make sure the passed slot is suitable, these are user facing errors */
+ if (SlotIsPhysical(slot))
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot use physical replication slot for logical decoding")));
+
+ if (slot->data.database != MyDatabaseId)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("replication slot \"%s\" was not created in this database",
+ NameStr(slot->data.name))));
+
+ if (start_lsn == InvalidXLogRecPtr)
+ {
+ /* continue from last position */
+ start_lsn = slot->data.confirmed_flush;
+ }
+ else if (start_lsn < slot->data.confirmed_flush)
+ {
+ /*
+ * It might seem like we should error out in this case, but it's
+ * pretty common for a client to acknowledge a LSN it doesn't have to
+ * do anything for, and thus didn't store persistently, because the
+ * xlog records didn't result in anything relevant for logical
+ * decoding. Clients have to be able to do that to support synchronous
+ * replication.
+ */
+ elog(DEBUG1, "cannot stream from %X/%X, minimum is %X/%X, forwarding",
+ (uint32) (start_lsn >> 32), (uint32) start_lsn,
+ (uint32) (slot->data.confirmed_flush >> 32),
+ (uint32) slot->data.confirmed_flush);
+
+ start_lsn = slot->data.confirmed_flush;
+ }
+
+ ctx = StartupDecodingContext(output_plugin_options,
+ start_lsn, InvalidTransactionId, false,
+ fast_forward, xl_routine, prepare_write,
+ do_write, update_progress);
+
+ /* call output plugin initialization callback */
+ old_context = MemoryContextSwitchTo(ctx->context);
+ if (ctx->callbacks.startup_cb != NULL)
+ startup_cb_wrapper(ctx, &ctx->options, false);
+ MemoryContextSwitchTo(old_context);
+
+ ctx->reorder->output_rewrites = ctx->options.receive_rewrites;
+
+ ereport(LOG,
+ (errmsg("starting logical decoding for slot \"%s\"",
+ NameStr(slot->data.name)),
+ errdetail("Streaming transactions committing after %X/%X, reading WAL from %X/%X.",
+ (uint32) (slot->data.confirmed_flush >> 32),
+ (uint32) slot->data.confirmed_flush,
+ (uint32) (slot->data.restart_lsn >> 32),
+ (uint32) slot->data.restart_lsn)));
+
+ return ctx;
+}
+
+/*
+ * Returns true if a consistent initial decoding snapshot has been built.
+ */
+bool
+DecodingContextReady(LogicalDecodingContext *ctx)
+{
+ return SnapBuildCurrentState(ctx->snapshot_builder) == SNAPBUILD_CONSISTENT;
+}
+
+/*
+ * Read from the decoding slot, until it is ready to start extracting changes.
+ */
+void
+DecodingContextFindStartpoint(LogicalDecodingContext *ctx)
+{
+ ReplicationSlot *slot = ctx->slot;
+
+ /* Initialize from where to start reading WAL. */
+ XLogBeginRead(ctx->reader, slot->data.restart_lsn);
+
+ elog(DEBUG1, "searching for logical decoding starting point, starting at %X/%X",
+ (uint32) (slot->data.restart_lsn >> 32),
+ (uint32) slot->data.restart_lsn);
+
+ /* Wait for a consistent starting point */
+ for (;;)
+ {
+ XLogRecord *record;
+ char *err = NULL;
+
+ /* the read_page callback waits for new WAL */
+ record = XLogReadRecord(ctx->reader, &err);
+ if (err)
+ elog(ERROR, "%s", err);
+ if (!record)
+ elog(ERROR, "no record found"); /* shouldn't happen */
+
+ LogicalDecodingProcessRecord(ctx, ctx->reader);
+
+ /* only continue till we found a consistent spot */
+ if (DecodingContextReady(ctx))
+ break;
+
+ CHECK_FOR_INTERRUPTS();
+ }
+
+ SpinLockAcquire(&slot->mutex);
+ slot->data.confirmed_flush = ctx->reader->EndRecPtr;
+ SpinLockRelease(&slot->mutex);
+}
+
+/*
+ * Free a previously allocated decoding context, invoking the shutdown
+ * callback if necessary.
+ */
+void
+FreeDecodingContext(LogicalDecodingContext *ctx)
+{
+ if (ctx->callbacks.shutdown_cb != NULL)
+ shutdown_cb_wrapper(ctx);
+
+ ReorderBufferFree(ctx->reorder);
+ FreeSnapshotBuilder(ctx->snapshot_builder);
+ XLogReaderFree(ctx->reader);
+ MemoryContextDelete(ctx->context);
+}
+
+/*
+ * Prepare a write using the context's output routine.
+ */
+void
+OutputPluginPrepareWrite(struct LogicalDecodingContext *ctx, bool last_write)
+{
+ if (!ctx->accept_writes)
+ elog(ERROR, "writes are only accepted in commit, begin and change callbacks");
+
+ ctx->prepare_write(ctx, ctx->write_location, ctx->write_xid, last_write);
+ ctx->prepared_write = true;
+}
+
+/*
+ * Perform a write using the context's output routine.
+ */
+void
+OutputPluginWrite(struct LogicalDecodingContext *ctx, bool last_write)
+{
+ if (!ctx->prepared_write)
+ elog(ERROR, "OutputPluginPrepareWrite needs to be called before OutputPluginWrite");
+
+ ctx->write(ctx, ctx->write_location, ctx->write_xid, last_write);
+ ctx->prepared_write = false;
+}
+
+/*
+ * Update progress tracking (if supported).
+ */
+void
+OutputPluginUpdateProgress(struct LogicalDecodingContext *ctx)
+{
+ if (!ctx->update_progress)
+ return;
+
+ ctx->update_progress(ctx, ctx->write_location, ctx->write_xid);
+}
+
+/*
+ * Load the output plugin, lookup its output plugin init function, and check
+ * that it provides the required callbacks.
+ */
+static void
+LoadOutputPlugin(OutputPluginCallbacks *callbacks, char *plugin)
+{
+ LogicalOutputPluginInit plugin_init;
+
+ plugin_init = (LogicalOutputPluginInit)
+ load_external_function(plugin, "_PG_output_plugin_init", false, NULL);
+
+ if (plugin_init == NULL)
+ elog(ERROR, "output plugins have to declare the _PG_output_plugin_init symbol");
+
+ /* ask the output plugin to fill the callback struct */
+ plugin_init(callbacks);
+
+ if (callbacks->begin_cb == NULL)
+ elog(ERROR, "output plugins have to register a begin callback");
+ if (callbacks->change_cb == NULL)
+ elog(ERROR, "output plugins have to register a change callback");
+ if (callbacks->commit_cb == NULL)
+ elog(ERROR, "output plugins have to register a commit callback");
+}
+
+static void
+output_plugin_error_callback(void *arg)
+{
+ LogicalErrorCallbackState *state = (LogicalErrorCallbackState *) arg;
+
+ /* not all callbacks have an associated LSN */
+ if (state->report_location != InvalidXLogRecPtr)
+ errcontext("slot \"%s\", output plugin \"%s\", in the %s callback, associated LSN %X/%X",
+ NameStr(state->ctx->slot->data.name),
+ NameStr(state->ctx->slot->data.plugin),
+ state->callback_name,
+ (uint32) (state->report_location >> 32),
+ (uint32) state->report_location);
+ else
+ errcontext("slot \"%s\", output plugin \"%s\", in the %s callback",
+ NameStr(state->ctx->slot->data.name),
+ NameStr(state->ctx->slot->data.plugin),
+ state->callback_name);
+}
+
+static void
+startup_cb_wrapper(LogicalDecodingContext *ctx, OutputPluginOptions *opt, bool is_init)
+{
+ LogicalErrorCallbackState state;
+ ErrorContextCallback errcallback;
+
+ Assert(!ctx->fast_forward);
+
+ /* Push callback + info on the error context stack */
+ state.ctx = ctx;
+ state.callback_name = "startup";
+ state.report_location = InvalidXLogRecPtr;
+ errcallback.callback = output_plugin_error_callback;
+ errcallback.arg = (void *) &state;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ /* set output state */
+ ctx->accept_writes = false;
+
+ /* do the actual work: call callback */
+ ctx->callbacks.startup_cb(ctx, opt, is_init);
+
+ /* Pop the error context stack */
+ error_context_stack = errcallback.previous;
+}
+
+static void
+shutdown_cb_wrapper(LogicalDecodingContext *ctx)
+{
+ LogicalErrorCallbackState state;
+ ErrorContextCallback errcallback;
+
+ Assert(!ctx->fast_forward);
+
+ /* Push callback + info on the error context stack */
+ state.ctx = ctx;
+ state.callback_name = "shutdown";
+ state.report_location = InvalidXLogRecPtr;
+ errcallback.callback = output_plugin_error_callback;
+ errcallback.arg = (void *) &state;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ /* set output state */
+ ctx->accept_writes = false;
+
+ /* do the actual work: call callback */
+ ctx->callbacks.shutdown_cb(ctx);
+
+ /* Pop the error context stack */
+ error_context_stack = errcallback.previous;
+}
+
+
+/*
+ * Callbacks for ReorderBuffer which add in some more information and then call
+ * output_plugin.h plugins.
+ */
+static void
+begin_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn)
+{
+ LogicalDecodingContext *ctx = cache->private_data;
+ LogicalErrorCallbackState state;
+ ErrorContextCallback errcallback;
+
+ Assert(!ctx->fast_forward);
+
+ /* Push callback + info on the error context stack */
+ state.ctx = ctx;
+ state.callback_name = "begin";
+ state.report_location = txn->first_lsn;
+ errcallback.callback = output_plugin_error_callback;
+ errcallback.arg = (void *) &state;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ /* set output state */
+ ctx->accept_writes = true;
+ ctx->write_xid = txn->xid;
+ ctx->write_location = txn->first_lsn;
+
+ /* do the actual work: call callback */
+ ctx->callbacks.begin_cb(ctx, txn);
+
+ /* Pop the error context stack */
+ error_context_stack = errcallback.previous;
+}
+
+static void
+commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
+ XLogRecPtr commit_lsn)
+{
+ LogicalDecodingContext *ctx = cache->private_data;
+ LogicalErrorCallbackState state;
+ ErrorContextCallback errcallback;
+
+ Assert(!ctx->fast_forward);
+
+ /* Push callback + info on the error context stack */
+ state.ctx = ctx;
+ state.callback_name = "commit";
+ state.report_location = txn->final_lsn; /* beginning of commit record */
+ errcallback.callback = output_plugin_error_callback;
+ errcallback.arg = (void *) &state;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ /* set output state */
+ ctx->accept_writes = true;
+ ctx->write_xid = txn->xid;
+ ctx->write_location = txn->end_lsn; /* points to the end of the record */
+
+ /* do the actual work: call callback */
+ ctx->callbacks.commit_cb(ctx, txn, commit_lsn);
+
+ /* Pop the error context stack */
+ error_context_stack = errcallback.previous;
+}
+
+static void
+change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
+ Relation relation, ReorderBufferChange *change)
+{
+ LogicalDecodingContext *ctx = cache->private_data;
+ LogicalErrorCallbackState state;
+ ErrorContextCallback errcallback;
+
+ Assert(!ctx->fast_forward);
+
+ /* Push callback + info on the error context stack */
+ state.ctx = ctx;
+ state.callback_name = "change";
+ state.report_location = change->lsn;
+ errcallback.callback = output_plugin_error_callback;
+ errcallback.arg = (void *) &state;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ /* set output state */
+ ctx->accept_writes = true;
+ ctx->write_xid = txn->xid;
+
+ /*
+ * report this change's lsn so replies from clients can give an up2date
+ * answer. This won't ever be enough (and shouldn't be!) to confirm
+ * receipt of this transaction, but it might allow another transaction's
+ * commit to be confirmed with one message.
+ */
+ ctx->write_location = change->lsn;
+
+ ctx->callbacks.change_cb(ctx, txn, relation, change);
+
+ /* Pop the error context stack */
+ error_context_stack = errcallback.previous;
+}
+
+static void
+truncate_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
+ int nrelations, Relation relations[], ReorderBufferChange *change)
+{
+ LogicalDecodingContext *ctx = cache->private_data;
+ LogicalErrorCallbackState state;
+ ErrorContextCallback errcallback;
+
+ Assert(!ctx->fast_forward);
+
+ if (!ctx->callbacks.truncate_cb)
+ return;
+
+ /* Push callback + info on the error context stack */
+ state.ctx = ctx;
+ state.callback_name = "truncate";
+ state.report_location = change->lsn;
+ errcallback.callback = output_plugin_error_callback;
+ errcallback.arg = (void *) &state;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ /* set output state */
+ ctx->accept_writes = true;
+ ctx->write_xid = txn->xid;
+
+ /*
+ * report this change's lsn so replies from clients can give an up2date
+ * answer. This won't ever be enough (and shouldn't be!) to confirm
+ * receipt of this transaction, but it might allow another transaction's
+ * commit to be confirmed with one message.
+ */
+ ctx->write_location = change->lsn;
+
+ ctx->callbacks.truncate_cb(ctx, txn, nrelations, relations, change);
+
+ /* Pop the error context stack */
+ error_context_stack = errcallback.previous;
+}
+
+bool
+filter_by_origin_cb_wrapper(LogicalDecodingContext *ctx, RepOriginId origin_id)
+{
+ LogicalErrorCallbackState state;
+ ErrorContextCallback errcallback;
+ bool ret;
+
+ Assert(!ctx->fast_forward);
+
+ /* Push callback + info on the error context stack */
+ state.ctx = ctx;
+ state.callback_name = "filter_by_origin";
+ state.report_location = InvalidXLogRecPtr;
+ errcallback.callback = output_plugin_error_callback;
+ errcallback.arg = (void *) &state;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ /* set output state */
+ ctx->accept_writes = false;
+
+ /* do the actual work: call callback */
+ ret = ctx->callbacks.filter_by_origin_cb(ctx, origin_id);
+
+ /* Pop the error context stack */
+ error_context_stack = errcallback.previous;
+
+ return ret;
+}
+
+static void
+message_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
+ XLogRecPtr message_lsn, bool transactional,
+ const char *prefix, Size message_size, const char *message)
+{
+ LogicalDecodingContext *ctx = cache->private_data;
+ LogicalErrorCallbackState state;
+ ErrorContextCallback errcallback;
+
+ Assert(!ctx->fast_forward);
+
+ if (ctx->callbacks.message_cb == NULL)
+ return;
+
+ /* Push callback + info on the error context stack */
+ state.ctx = ctx;
+ state.callback_name = "message";
+ state.report_location = message_lsn;
+ errcallback.callback = output_plugin_error_callback;
+ errcallback.arg = (void *) &state;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ /* set output state */
+ ctx->accept_writes = true;
+ ctx->write_xid = txn != NULL ? txn->xid : InvalidTransactionId;
+ ctx->write_location = message_lsn;
+
+ /* do the actual work: call callback */
+ ctx->callbacks.message_cb(ctx, txn, message_lsn, transactional, prefix,
+ message_size, message);
+
+ /* Pop the error context stack */
+ error_context_stack = errcallback.previous;
+}
+
+/*
+ * Set the required catalog xmin horizon for historic snapshots in the current
+ * replication slot.
+ *
+ * Note that in the most cases, we won't be able to immediately use the xmin
+ * to increase the xmin horizon: we need to wait till the client has confirmed
+ * receiving current_lsn with LogicalConfirmReceivedLocation().
+ */
+void
+LogicalIncreaseXminForSlot(XLogRecPtr current_lsn, TransactionId xmin)
+{
+ bool updated_xmin = false;
+ ReplicationSlot *slot;
+
+ slot = MyReplicationSlot;
+
+ Assert(slot != NULL);
+
+ SpinLockAcquire(&slot->mutex);
+
+ /*
+ * don't overwrite if we already have a newer xmin. This can happen if we
+ * restart decoding in a slot.
+ */
+ if (TransactionIdPrecedesOrEquals(xmin, slot->data.catalog_xmin))
+ {
+ }
+
+ /*
+ * If the client has already confirmed up to this lsn, we directly can
+ * mark this as accepted. This can happen if we restart decoding in a
+ * slot.
+ */
+ else if (current_lsn <= slot->data.confirmed_flush)
+ {
+ slot->candidate_catalog_xmin = xmin;
+ slot->candidate_xmin_lsn = current_lsn;
+
+ /* our candidate can directly be used */
+ updated_xmin = true;
+ }
+
+ /*
+ * Only increase if the previous values have been applied, otherwise we
+ * might never end up updating if the receiver acks too slowly.
+ */
+ else if (slot->candidate_xmin_lsn == InvalidXLogRecPtr)
+ {
+ slot->candidate_catalog_xmin = xmin;
+ slot->candidate_xmin_lsn = current_lsn;
+ }
+ SpinLockRelease(&slot->mutex);
+
+ /* candidate already valid with the current flush position, apply */
+ if (updated_xmin)
+ LogicalConfirmReceivedLocation(slot->data.confirmed_flush);
+}
+
+/*
+ * Mark the minimal LSN (restart_lsn) we need to read to replay all
+ * transactions that have not yet committed at current_lsn.
+ *
+ * Just like LogicalIncreaseXminForSlot this only takes effect when the
+ * client has confirmed to have received current_lsn.
+ */
+void
+LogicalIncreaseRestartDecodingForSlot(XLogRecPtr current_lsn, XLogRecPtr restart_lsn)
+{
+ bool updated_lsn = false;
+ ReplicationSlot *slot;
+
+ slot = MyReplicationSlot;
+
+ Assert(slot != NULL);
+ Assert(restart_lsn != InvalidXLogRecPtr);
+ Assert(current_lsn != InvalidXLogRecPtr);
+
+ SpinLockAcquire(&slot->mutex);
+
+ /* don't overwrite if have a newer restart lsn */
+ if (restart_lsn <= slot->data.restart_lsn)
+ {
+ }
+
+ /*
+ * We might have already flushed far enough to directly accept this lsn,
+ * in this case there is no need to check for existing candidate LSNs
+ */
+ else if (current_lsn <= slot->data.confirmed_flush)
+ {
+ slot->candidate_restart_valid = current_lsn;
+ slot->candidate_restart_lsn = restart_lsn;
+
+ /* our candidate can directly be used */
+ updated_lsn = true;
+ }
+
+ /*
+ * Only increase if the previous values have been applied, otherwise we
+ * might never end up updating if the receiver acks too slowly. A missed
+ * value here will just cause some extra effort after reconnecting.
+ */
+ if (slot->candidate_restart_valid == InvalidXLogRecPtr)
+ {
+ slot->candidate_restart_valid = current_lsn;
+ slot->candidate_restart_lsn = restart_lsn;
+ SpinLockRelease(&slot->mutex);
+
+ elog(DEBUG1, "got new restart lsn %X/%X at %X/%X",
+ (uint32) (restart_lsn >> 32), (uint32) restart_lsn,
+ (uint32) (current_lsn >> 32), (uint32) current_lsn);
+ }
+ else
+ {
+ XLogRecPtr candidate_restart_lsn;
+ XLogRecPtr candidate_restart_valid;
+ XLogRecPtr confirmed_flush;
+
+ candidate_restart_lsn = slot->candidate_restart_lsn;
+ candidate_restart_valid = slot->candidate_restart_valid;
+ confirmed_flush = slot->data.confirmed_flush;
+ SpinLockRelease(&slot->mutex);
+
+ elog(DEBUG1, "failed to increase restart lsn: proposed %X/%X, after %X/%X, current candidate %X/%X, current after %X/%X, flushed up to %X/%X",
+ (uint32) (restart_lsn >> 32), (uint32) restart_lsn,
+ (uint32) (current_lsn >> 32), (uint32) current_lsn,
+ (uint32) (candidate_restart_lsn >> 32),
+ (uint32) candidate_restart_lsn,
+ (uint32) (candidate_restart_valid >> 32),
+ (uint32) candidate_restart_valid,
+ (uint32) (confirmed_flush >> 32),
+ (uint32) confirmed_flush);
+ }
+
+ /* candidates are already valid with the current flush position, apply */
+ if (updated_lsn)
+ LogicalConfirmReceivedLocation(slot->data.confirmed_flush);
+}
+
+/*
+ * Handle a consumer's confirmation having received all changes up to lsn.
+ */
+void
+LogicalConfirmReceivedLocation(XLogRecPtr lsn)
+{
+ Assert(lsn != InvalidXLogRecPtr);
+
+ /* Do an unlocked check for candidate_lsn first. */
+ if (MyReplicationSlot->candidate_xmin_lsn != InvalidXLogRecPtr ||
+ MyReplicationSlot->candidate_restart_valid != InvalidXLogRecPtr)
+ {
+ bool updated_xmin = false;
+ bool updated_restart = false;
+
+ SpinLockAcquire(&MyReplicationSlot->mutex);
+
+ MyReplicationSlot->data.confirmed_flush = lsn;
+
+ /* if we're past the location required for bumping xmin, do so */
+ if (MyReplicationSlot->candidate_xmin_lsn != InvalidXLogRecPtr &&
+ MyReplicationSlot->candidate_xmin_lsn <= lsn)
+ {
+ /*
+ * We have to write the changed xmin to disk *before* we change
+ * the in-memory value, otherwise after a crash we wouldn't know
+ * that some catalog tuples might have been removed already.
+ *
+ * Ensure that by first writing to ->xmin and only update
+ * ->effective_xmin once the new state is synced to disk. After a
+ * crash ->effective_xmin is set to ->xmin.
+ */
+ if (TransactionIdIsValid(MyReplicationSlot->candidate_catalog_xmin) &&
+ MyReplicationSlot->data.catalog_xmin != MyReplicationSlot->candidate_catalog_xmin)
+ {
+ MyReplicationSlot->data.catalog_xmin = MyReplicationSlot->candidate_catalog_xmin;
+ MyReplicationSlot->candidate_catalog_xmin = InvalidTransactionId;
+ MyReplicationSlot->candidate_xmin_lsn = InvalidXLogRecPtr;
+ updated_xmin = true;
+ }
+ }
+
+ if (MyReplicationSlot->candidate_restart_valid != InvalidXLogRecPtr &&
+ MyReplicationSlot->candidate_restart_valid <= lsn)
+ {
+ Assert(MyReplicationSlot->candidate_restart_lsn != InvalidXLogRecPtr);
+
+ MyReplicationSlot->data.restart_lsn = MyReplicationSlot->candidate_restart_lsn;
+ MyReplicationSlot->candidate_restart_lsn = InvalidXLogRecPtr;
+ MyReplicationSlot->candidate_restart_valid = InvalidXLogRecPtr;
+ updated_restart = true;
+ }
+
+ SpinLockRelease(&MyReplicationSlot->mutex);
+
+ /* first write new xmin to disk, so we know what's up after a crash */
+ if (updated_xmin || updated_restart)
+ {
+ ReplicationSlotMarkDirty();
+ ReplicationSlotSave();
+ elog(DEBUG1, "updated xmin: %u restart: %u", updated_xmin, updated_restart);
+ }
+
+ /*
+ * Now the new xmin is safely on disk, we can let the global value
+ * advance. We do not take ProcArrayLock or similar since we only
+ * advance xmin here and there's not much harm done by a concurrent
+ * computation missing that.
+ */
+ if (updated_xmin)
+ {
+ SpinLockAcquire(&MyReplicationSlot->mutex);
+ MyReplicationSlot->effective_catalog_xmin = MyReplicationSlot->data.catalog_xmin;
+ SpinLockRelease(&MyReplicationSlot->mutex);
+
+ ReplicationSlotsComputeRequiredXmin(false);
+ ReplicationSlotsComputeRequiredLSN();
+ }
+ }
+ else
+ {
+ SpinLockAcquire(&MyReplicationSlot->mutex);
+ MyReplicationSlot->data.confirmed_flush = lsn;
+ SpinLockRelease(&MyReplicationSlot->mutex);
+ }
+}
diff --git a/src/backend/replication/logical/logicalfuncs.c b/src/backend/replication/logical/logicalfuncs.c
new file mode 100644
index 0000000..b99c94e
--- /dev/null
+++ b/src/backend/replication/logical/logicalfuncs.c
@@ -0,0 +1,417 @@
+/*-------------------------------------------------------------------------
+ *
+ * logicalfuncs.c
+ *
+ * Support functions for using logical decoding and management of
+ * logical replication slots via SQL.
+ *
+ *
+ * Copyright (c) 2012-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/logicalfuncs.c
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include <unistd.h>
+
+#include "access/xact.h"
+#include "access/xlog_internal.h"
+#include "access/xlogutils.h"
+#include "catalog/pg_type.h"
+#include "fmgr.h"
+#include "funcapi.h"
+#include "mb/pg_wchar.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "replication/decode.h"
+#include "replication/logical.h"
+#include "replication/message.h"
+#include "storage/fd.h"
+#include "utils/array.h"
+#include "utils/builtins.h"
+#include "utils/inval.h"
+#include "utils/lsyscache.h"
+#include "utils/memutils.h"
+#include "utils/pg_lsn.h"
+#include "utils/regproc.h"
+#include "utils/resowner.h"
+
+/* private date for writing out data */
+typedef struct DecodingOutputState
+{
+ Tuplestorestate *tupstore;
+ TupleDesc tupdesc;
+ bool binary_output;
+ int64 returned_rows;
+} DecodingOutputState;
+
+/*
+ * Prepare for an output plugin write.
+ */
+static void
+LogicalOutputPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
+ bool last_write)
+{
+ resetStringInfo(ctx->out);
+}
+
+/*
+ * Perform output plugin write into tuplestore.
+ */
+static void
+LogicalOutputWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
+ bool last_write)
+{
+ Datum values[3];
+ bool nulls[3];
+ DecodingOutputState *p;
+
+ /* SQL Datums can only be of a limited length... */
+ if (ctx->out->len > MaxAllocSize - VARHDRSZ)
+ elog(ERROR, "too much output for sql interface");
+
+ p = (DecodingOutputState *) ctx->output_writer_private;
+
+ memset(nulls, 0, sizeof(nulls));
+ values[0] = LSNGetDatum(lsn);
+ values[1] = TransactionIdGetDatum(xid);
+
+ /*
+ * Assert ctx->out is in database encoding when we're writing textual
+ * output.
+ */
+ if (!p->binary_output)
+ Assert(pg_verify_mbstr(GetDatabaseEncoding(),
+ ctx->out->data, ctx->out->len,
+ false));
+
+ /* ick, but cstring_to_text_with_len works for bytea perfectly fine */
+ values[2] = PointerGetDatum(cstring_to_text_with_len(ctx->out->data, ctx->out->len));
+
+ tuplestore_putvalues(p->tupstore, p->tupdesc, values, nulls);
+ p->returned_rows++;
+}
+
+static void
+check_permissions(void)
+{
+ if (!superuser() && !has_rolreplication(GetUserId()))
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("must be superuser or replication role to use replication slots")));
+}
+
+/*
+ * Helper function for the various SQL callable logical decoding functions.
+ */
+static Datum
+pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool binary)
+{
+ Name name;
+ XLogRecPtr upto_lsn;
+ int32 upto_nchanges;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ XLogRecPtr end_of_wal;
+ LogicalDecodingContext *ctx;
+ ResourceOwner old_resowner = CurrentResourceOwner;
+ ArrayType *arr;
+ Size ndim;
+ List *options = NIL;
+ DecodingOutputState *p;
+
+ check_permissions();
+
+ CheckLogicalDecodingRequirements();
+
+ if (PG_ARGISNULL(0))
+ ereport(ERROR,
+ (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
+ errmsg("slot name must not be null")));
+ name = PG_GETARG_NAME(0);
+
+ if (PG_ARGISNULL(1))
+ upto_lsn = InvalidXLogRecPtr;
+ else
+ upto_lsn = PG_GETARG_LSN(1);
+
+ if (PG_ARGISNULL(2))
+ upto_nchanges = InvalidXLogRecPtr;
+ else
+ upto_nchanges = PG_GETARG_INT32(2);
+
+ if (PG_ARGISNULL(3))
+ ereport(ERROR,
+ (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
+ errmsg("options array must not be null")));
+ arr = PG_GETARG_ARRAYTYPE_P(3);
+
+ /* check to see if caller supports us returning a tuplestore */
+ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("set-valued function called in context that cannot accept a set")));
+ if (!(rsinfo->allowedModes & SFRM_Materialize))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("materialize mode required, but it is not allowed in this context")));
+
+ /* state to write output to */
+ p = palloc0(sizeof(DecodingOutputState));
+
+ p->binary_output = binary;
+
+ /* Build a tuple descriptor for our result type */
+ if (get_call_result_type(fcinfo, NULL, &p->tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
+ oldcontext = MemoryContextSwitchTo(per_query_ctx);
+
+ /* Deconstruct options array */
+ ndim = ARR_NDIM(arr);
+ if (ndim > 1)
+ {
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("array must be one-dimensional")));
+ }
+ else if (array_contains_nulls(arr))
+ {
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("array must not contain nulls")));
+ }
+ else if (ndim == 1)
+ {
+ int nelems;
+ Datum *datum_opts;
+ int i;
+
+ Assert(ARR_ELEMTYPE(arr) == TEXTOID);
+
+ deconstruct_array(arr, TEXTOID, -1, false, TYPALIGN_INT,
+ &datum_opts, NULL, &nelems);
+
+ if (nelems % 2 != 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("array must have even number of elements")));
+
+ for (i = 0; i < nelems; i += 2)
+ {
+ char *name = TextDatumGetCString(datum_opts[i]);
+ char *opt = TextDatumGetCString(datum_opts[i + 1]);
+
+ options = lappend(options, makeDefElem(name, (Node *) makeString(opt), -1));
+ }
+ }
+
+ p->tupstore = tuplestore_begin_heap(true, false, work_mem);
+ rsinfo->returnMode = SFRM_Materialize;
+ rsinfo->setResult = p->tupstore;
+ rsinfo->setDesc = p->tupdesc;
+
+ /*
+ * Compute the current end-of-wal and maintain ThisTimeLineID.
+ * RecoveryInProgress() will update ThisTimeLineID on promotion.
+ */
+ if (!RecoveryInProgress())
+ end_of_wal = GetFlushRecPtr();
+ else
+ end_of_wal = GetXLogReplayRecPtr(&ThisTimeLineID);
+
+ (void) ReplicationSlotAcquire(NameStr(*name), SAB_Error);
+
+ PG_TRY();
+ {
+ /* restart at slot's confirmed_flush */
+ ctx = CreateDecodingContext(InvalidXLogRecPtr,
+ options,
+ false,
+ XL_ROUTINE(.page_read = read_local_xlog_page,
+ .segment_open = wal_segment_open,
+ .segment_close = wal_segment_close),
+ LogicalOutputPrepareWrite,
+ LogicalOutputWrite, NULL);
+
+ /*
+ * After the sanity checks in CreateDecodingContext, make sure the
+ * restart_lsn is valid. Avoid "cannot get changes" wording in this
+ * errmsg because that'd be confusingly ambiguous about no changes
+ * being available.
+ */
+ if (XLogRecPtrIsInvalid(MyReplicationSlot->data.restart_lsn))
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("can no longer get changes from replication slot \"%s\"",
+ NameStr(*name)),
+ errdetail("This slot has never previously reserved WAL, or has been invalidated.")));
+
+ MemoryContextSwitchTo(oldcontext);
+
+ /*
+ * Check whether the output plugin writes textual output if that's
+ * what we need.
+ */
+ if (!binary &&
+ ctx->options.output_type !=OUTPUT_PLUGIN_TEXTUAL_OUTPUT)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("logical decoding output plugin \"%s\" produces binary output, but function \"%s\" expects textual data",
+ NameStr(MyReplicationSlot->data.plugin),
+ format_procedure(fcinfo->flinfo->fn_oid))));
+
+ ctx->output_writer_private = p;
+
+ /*
+ * Decoding of WAL must start at restart_lsn so that the entirety of
+ * xacts that committed after the slot's confirmed_flush can be
+ * accumulated into reorder buffers.
+ */
+ XLogBeginRead(ctx->reader, MyReplicationSlot->data.restart_lsn);
+
+ /* invalidate non-timetravel entries */
+ InvalidateSystemCaches();
+
+ /* Decode until we run out of records */
+ while (ctx->reader->EndRecPtr < end_of_wal)
+ {
+ XLogRecord *record;
+ char *errm = NULL;
+
+ record = XLogReadRecord(ctx->reader, &errm);
+ if (errm)
+ elog(ERROR, "%s", errm);
+
+ /*
+ * The {begin_txn,change,commit_txn}_wrapper callbacks above will
+ * store the description into our tuplestore.
+ */
+ if (record != NULL)
+ LogicalDecodingProcessRecord(ctx, ctx->reader);
+
+ /* check limits */
+ if (upto_lsn != InvalidXLogRecPtr &&
+ upto_lsn <= ctx->reader->EndRecPtr)
+ break;
+ if (upto_nchanges != 0 &&
+ upto_nchanges <= p->returned_rows)
+ break;
+ CHECK_FOR_INTERRUPTS();
+ }
+
+ tuplestore_donestoring(tupstore);
+
+ /*
+ * Logical decoding could have clobbered CurrentResourceOwner during
+ * transaction management, so restore the executor's value. (This is
+ * a kluge, but it's not worth cleaning up right now.)
+ */
+ CurrentResourceOwner = old_resowner;
+
+ /*
+ * Next time, start where we left off. (Hunting things, the family
+ * business..)
+ */
+ if (ctx->reader->EndRecPtr != InvalidXLogRecPtr && confirm)
+ {
+ LogicalConfirmReceivedLocation(ctx->reader->EndRecPtr);
+
+ /*
+ * If only the confirmed_flush_lsn has changed the slot won't get
+ * marked as dirty by the above. Callers on the walsender
+ * interface are expected to keep track of their own progress and
+ * don't need it written out. But SQL-interface users cannot
+ * specify their own start positions and it's harder for them to
+ * keep track of their progress, so we should make more of an
+ * effort to save it for them.
+ *
+ * Dirty the slot so it's written out at the next checkpoint.
+ * We'll still lose its position on crash, as documented, but it's
+ * better than always losing the position even on clean restart.
+ */
+ ReplicationSlotMarkDirty();
+ }
+
+ /* free context, call shutdown callback */
+ FreeDecodingContext(ctx);
+
+ ReplicationSlotRelease();
+ InvalidateSystemCaches();
+ }
+ PG_CATCH();
+ {
+ /* clear all timetravel entries */
+ InvalidateSystemCaches();
+
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ return (Datum) 0;
+}
+
+/*
+ * SQL function returning the changestream as text, consuming the data.
+ */
+Datum
+pg_logical_slot_get_changes(PG_FUNCTION_ARGS)
+{
+ return pg_logical_slot_get_changes_guts(fcinfo, true, false);
+}
+
+/*
+ * SQL function returning the changestream as text, only peeking ahead.
+ */
+Datum
+pg_logical_slot_peek_changes(PG_FUNCTION_ARGS)
+{
+ return pg_logical_slot_get_changes_guts(fcinfo, false, false);
+}
+
+/*
+ * SQL function returning the changestream in binary, consuming the data.
+ */
+Datum
+pg_logical_slot_get_binary_changes(PG_FUNCTION_ARGS)
+{
+ return pg_logical_slot_get_changes_guts(fcinfo, true, true);
+}
+
+/*
+ * SQL function returning the changestream in binary, only peeking ahead.
+ */
+Datum
+pg_logical_slot_peek_binary_changes(PG_FUNCTION_ARGS)
+{
+ return pg_logical_slot_get_changes_guts(fcinfo, false, true);
+}
+
+
+/*
+ * SQL function for writing logical decoding message into WAL.
+ */
+Datum
+pg_logical_emit_message_bytea(PG_FUNCTION_ARGS)
+{
+ bool transactional = PG_GETARG_BOOL(0);
+ char *prefix = text_to_cstring(PG_GETARG_TEXT_PP(1));
+ bytea *data = PG_GETARG_BYTEA_PP(2);
+ XLogRecPtr lsn;
+
+ lsn = LogLogicalMessage(prefix, VARDATA_ANY(data), VARSIZE_ANY_EXHDR(data),
+ transactional);
+ PG_RETURN_LSN(lsn);
+}
+
+Datum
+pg_logical_emit_message_text(PG_FUNCTION_ARGS)
+{
+ /* bytea and text are compatible */
+ return pg_logical_emit_message_bytea(fcinfo);
+}
diff --git a/src/backend/replication/logical/message.c b/src/backend/replication/logical/message.c
new file mode 100644
index 0000000..db33cbe
--- /dev/null
+++ b/src/backend/replication/logical/message.c
@@ -0,0 +1,88 @@
+/*-------------------------------------------------------------------------
+ *
+ * message.c
+ * Generic logical messages.
+ *
+ * Copyright (c) 2013-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/logical/message.c
+ *
+ * NOTES
+ *
+ * Generic logical messages allow XLOG logging of arbitrary binary blobs that
+ * get passed to the logical decoding plugin. In normal XLOG processing they
+ * are same as NOOP.
+ *
+ * These messages can be either transactional or non-transactional.
+ * Transactional messages are part of current transaction and will be sent to
+ * decoding plugin using in a same way as DML operations.
+ * Non-transactional messages are sent to the plugin at the time when the
+ * logical decoding reads them from XLOG. This also means that transactional
+ * messages won't be delivered if the transaction was rolled back but the
+ * non-transactional one will always be delivered.
+ *
+ * Every message carries prefix to avoid conflicts between different decoding
+ * plugins. The plugin authors must take extra care to use unique prefix,
+ * good options seems to be for example to use the name of the extension.
+ *
+ * ---------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/xact.h"
+#include "catalog/indexing.h"
+#include "miscadmin.h"
+#include "nodes/execnodes.h"
+#include "replication/logical.h"
+#include "replication/message.h"
+#include "utils/memutils.h"
+
+/*
+ * Write logical decoding message into XLog.
+ */
+XLogRecPtr
+LogLogicalMessage(const char *prefix, const char *message, size_t size,
+ bool transactional)
+{
+ xl_logical_message xlrec;
+
+ /*
+ * Force xid to be allocated if we're emitting a transactional message.
+ */
+ if (transactional)
+ {
+ Assert(IsTransactionState());
+ GetCurrentTransactionId();
+ }
+
+ xlrec.dbId = MyDatabaseId;
+ xlrec.transactional = transactional;
+ xlrec.prefix_size = strlen(prefix) + 1;
+ xlrec.message_size = size;
+
+ XLogBeginInsert();
+ XLogRegisterData((char *) &xlrec, SizeOfLogicalMessage);
+ XLogRegisterData(unconstify(char *, prefix), xlrec.prefix_size);
+ XLogRegisterData(unconstify(char *, message), size);
+
+ /* allow origin filtering */
+ XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
+
+ return XLogInsert(RM_LOGICALMSG_ID, XLOG_LOGICAL_MESSAGE);
+}
+
+/*
+ * Redo is basically just noop for logical decoding messages.
+ */
+void
+logicalmsg_redo(XLogReaderState *record)
+{
+ uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
+
+ if (info != XLOG_LOGICAL_MESSAGE)
+ elog(PANIC, "logicalmsg_redo: unknown op code %u", info);
+
+ /* This is only interesting for logical decoding, see decode.c. */
+}
diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c
new file mode 100644
index 0000000..1ca4479
--- /dev/null
+++ b/src/backend/replication/logical/origin.c
@@ -0,0 +1,1566 @@
+/*-------------------------------------------------------------------------
+ *
+ * origin.c
+ * Logical replication progress tracking support.
+ *
+ * Copyright (c) 2013-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/logical/origin.c
+ *
+ * NOTES
+ *
+ * This file provides the following:
+ * * An infrastructure to name nodes in a replication setup
+ * * A facility to efficiently store and persist replication progress in an
+ * efficient and durable manner.
+ *
+ * Replication origin consist out of a descriptive, user defined, external
+ * name and a short, thus space efficient, internal 2 byte one. This split
+ * exists because replication origin have to be stored in WAL and shared
+ * memory and long descriptors would be inefficient. For now only use 2 bytes
+ * for the internal id of a replication origin as it seems unlikely that there
+ * soon will be more than 65k nodes in one replication setup; and using only
+ * two bytes allow us to be more space efficient.
+ *
+ * Replication progress is tracked in a shared memory table
+ * (ReplicationState) that's dumped to disk every checkpoint. Entries
+ * ('slots') in this table are identified by the internal id. That's the case
+ * because it allows to increase replication progress during crash
+ * recovery. To allow doing so we store the original LSN (from the originating
+ * system) of a transaction in the commit record. That allows to recover the
+ * precise replayed state after crash recovery; without requiring synchronous
+ * commits. Allowing logical replication to use asynchronous commit is
+ * generally good for performance, but especially important as it allows a
+ * single threaded replay process to keep up with a source that has multiple
+ * backends generating changes concurrently. For efficiency and simplicity
+ * reasons a backend can setup one replication origin that's from then used as
+ * the source of changes produced by the backend, until reset again.
+ *
+ * This infrastructure is intended to be used in cooperation with logical
+ * decoding. When replaying from a remote system the configured origin is
+ * provided to output plugins, allowing prevention of replication loops and
+ * other filtering.
+ *
+ * There are several levels of locking at work:
+ *
+ * * To create and drop replication origins an exclusive lock on
+ * pg_replication_slot is required for the duration. That allows us to
+ * safely and conflict free assign new origins using a dirty snapshot.
+ *
+ * * When creating an in-memory replication progress slot the ReplicationOrigin
+ * LWLock has to be held exclusively; when iterating over the replication
+ * progress a shared lock has to be held, the same when advancing the
+ * replication progress of an individual backend that has not setup as the
+ * session's replication origin.
+ *
+ * * When manipulating or looking at the remote_lsn and local_lsn fields of a
+ * replication progress slot that slot's lwlock has to be held. That's
+ * primarily because we do not assume 8 byte writes (the LSN) is atomic on
+ * all our platforms, but it also simplifies memory ordering concerns
+ * between the remote and local lsn. We use a lwlock instead of a spinlock
+ * so it's less harmful to hold the lock over a WAL write
+ * (cf. AdvanceReplicationProgress).
+ *
+ * ---------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include <unistd.h>
+#include <sys/stat.h>
+
+#include "access/genam.h"
+#include "access/htup_details.h"
+#include "access/table.h"
+#include "access/xact.h"
+#include "catalog/catalog.h"
+#include "catalog/indexing.h"
+#include "funcapi.h"
+#include "miscadmin.h"
+#include "nodes/execnodes.h"
+#include "pgstat.h"
+#include "replication/logical.h"
+#include "replication/origin.h"
+#include "storage/condition_variable.h"
+#include "storage/copydir.h"
+#include "storage/fd.h"
+#include "storage/ipc.h"
+#include "storage/lmgr.h"
+#include "utils/builtins.h"
+#include "utils/fmgroids.h"
+#include "utils/pg_lsn.h"
+#include "utils/rel.h"
+#include "utils/snapmgr.h"
+#include "utils/syscache.h"
+
+/*
+ * Replay progress of a single remote node.
+ */
+typedef struct ReplicationState
+{
+ /*
+ * Local identifier for the remote node.
+ */
+ RepOriginId roident;
+
+ /*
+ * Location of the latest commit from the remote side.
+ */
+ XLogRecPtr remote_lsn;
+
+ /*
+ * Remember the local lsn of the commit record so we can XLogFlush() to it
+ * during a checkpoint so we know the commit record actually is safe on
+ * disk.
+ */
+ XLogRecPtr local_lsn;
+
+ /*
+ * PID of backend that's acquired slot, or 0 if none.
+ */
+ int acquired_by;
+
+ /*
+ * Condition variable that's signaled when acquired_by changes.
+ */
+ ConditionVariable origin_cv;
+
+ /*
+ * Lock protecting remote_lsn and local_lsn.
+ */
+ LWLock lock;
+} ReplicationState;
+
+/*
+ * On disk version of ReplicationState.
+ */
+typedef struct ReplicationStateOnDisk
+{
+ RepOriginId roident;
+ XLogRecPtr remote_lsn;
+} ReplicationStateOnDisk;
+
+
+typedef struct ReplicationStateCtl
+{
+ /* Tranche to use for per-origin LWLocks */
+ int tranche_id;
+ /* Array of length max_replication_slots */
+ ReplicationState states[FLEXIBLE_ARRAY_MEMBER];
+} ReplicationStateCtl;
+
+/* external variables */
+RepOriginId replorigin_session_origin = InvalidRepOriginId; /* assumed identity */
+XLogRecPtr replorigin_session_origin_lsn = InvalidXLogRecPtr;
+TimestampTz replorigin_session_origin_timestamp = 0;
+
+/*
+ * Base address into a shared memory array of replication states of size
+ * max_replication_slots.
+ *
+ * XXX: Should we use a separate variable to size this rather than
+ * max_replication_slots?
+ */
+static ReplicationState *replication_states;
+
+/*
+ * Actual shared memory block (replication_states[] is now part of this).
+ */
+static ReplicationStateCtl *replication_states_ctl;
+
+/*
+ * Backend-local, cached element from ReplicationState for use in a backend
+ * replaying remote commits, so we don't have to search ReplicationState for
+ * the backends current RepOriginId.
+ */
+static ReplicationState *session_replication_state = NULL;
+
+/* Magic for on disk files. */
+#define REPLICATION_STATE_MAGIC ((uint32) 0x1257DADE)
+
+static void
+replorigin_check_prerequisites(bool check_slots, bool recoveryOK)
+{
+ if (!superuser())
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("only superusers can query or manipulate replication origins")));
+
+ if (check_slots && max_replication_slots == 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot query or manipulate replication origin when max_replication_slots = 0")));
+
+ if (!recoveryOK && RecoveryInProgress())
+ ereport(ERROR,
+ (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
+ errmsg("cannot manipulate replication origins during recovery")));
+
+}
+
+
+/* ---------------------------------------------------------------------------
+ * Functions for working with replication origins themselves.
+ * ---------------------------------------------------------------------------
+ */
+
+/*
+ * Check for a persistent replication origin identified by name.
+ *
+ * Returns InvalidOid if the node isn't known yet and missing_ok is true.
+ */
+RepOriginId
+replorigin_by_name(char *roname, bool missing_ok)
+{
+ Form_pg_replication_origin ident;
+ Oid roident = InvalidOid;
+ HeapTuple tuple;
+ Datum roname_d;
+
+ roname_d = CStringGetTextDatum(roname);
+
+ tuple = SearchSysCache1(REPLORIGNAME, roname_d);
+ if (HeapTupleIsValid(tuple))
+ {
+ ident = (Form_pg_replication_origin) GETSTRUCT(tuple);
+ roident = ident->roident;
+ ReleaseSysCache(tuple);
+ }
+ else if (!missing_ok)
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("replication origin \"%s\" does not exist",
+ roname)));
+
+ return roident;
+}
+
+/*
+ * Create a replication origin.
+ *
+ * Needs to be called in a transaction.
+ */
+RepOriginId
+replorigin_create(char *roname)
+{
+ Oid roident;
+ HeapTuple tuple = NULL;
+ Relation rel;
+ Datum roname_d;
+ SnapshotData SnapshotDirty;
+ SysScanDesc scan;
+ ScanKeyData key;
+
+ roname_d = CStringGetTextDatum(roname);
+
+ Assert(IsTransactionState());
+
+ /*
+ * We need the numeric replication origin to be 16bit wide, so we cannot
+ * rely on the normal oid allocation. Instead we simply scan
+ * pg_replication_origin for the first unused id. That's not particularly
+ * efficient, but this should be a fairly infrequent operation - we can
+ * easily spend a bit more code on this when it turns out it needs to be
+ * faster.
+ *
+ * We handle concurrency by taking an exclusive lock (allowing reads!)
+ * over the table for the duration of the search. Because we use a "dirty
+ * snapshot" we can read rows that other in-progress sessions have
+ * written, even though they would be invisible with normal snapshots. Due
+ * to the exclusive lock there's no danger that new rows can appear while
+ * we're checking.
+ */
+ InitDirtySnapshot(SnapshotDirty);
+
+ rel = table_open(ReplicationOriginRelationId, ExclusiveLock);
+
+ for (roident = InvalidOid + 1; roident < PG_UINT16_MAX; roident++)
+ {
+ bool nulls[Natts_pg_replication_origin];
+ Datum values[Natts_pg_replication_origin];
+ bool collides;
+
+ CHECK_FOR_INTERRUPTS();
+
+ ScanKeyInit(&key,
+ Anum_pg_replication_origin_roident,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(roident));
+
+ scan = systable_beginscan(rel, ReplicationOriginIdentIndex,
+ true /* indexOK */ ,
+ &SnapshotDirty,
+ 1, &key);
+
+ collides = HeapTupleIsValid(systable_getnext(scan));
+
+ systable_endscan(scan);
+
+ if (!collides)
+ {
+ /*
+ * Ok, found an unused roident, insert the new row and do a CCI,
+ * so our callers can look it up if they want to.
+ */
+ memset(&nulls, 0, sizeof(nulls));
+
+ values[Anum_pg_replication_origin_roident - 1] = ObjectIdGetDatum(roident);
+ values[Anum_pg_replication_origin_roname - 1] = roname_d;
+
+ tuple = heap_form_tuple(RelationGetDescr(rel), values, nulls);
+ CatalogTupleInsert(rel, tuple);
+ CommandCounterIncrement();
+ break;
+ }
+ }
+
+ /* now release lock again, */
+ table_close(rel, ExclusiveLock);
+
+ if (tuple == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("could not find free replication origin OID")));
+
+ heap_freetuple(tuple);
+ return roident;
+}
+
+
+/*
+ * Drop replication origin.
+ *
+ * Needs to be called in a transaction.
+ */
+void
+replorigin_drop(RepOriginId roident, bool nowait)
+{
+ HeapTuple tuple;
+ Relation rel;
+ int i;
+
+ Assert(IsTransactionState());
+
+ /*
+ * To interlock against concurrent drops, we hold ExclusiveLock on
+ * pg_replication_origin throughout this function.
+ */
+ rel = table_open(ReplicationOriginRelationId, ExclusiveLock);
+
+ /*
+ * First, clean up the slot state info, if there is any matching slot.
+ */
+restart:
+ tuple = NULL;
+ LWLockAcquire(ReplicationOriginLock, LW_EXCLUSIVE);
+
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationState *state = &replication_states[i];
+
+ if (state->roident == roident)
+ {
+ /* found our slot, is it busy? */
+ if (state->acquired_by != 0)
+ {
+ ConditionVariable *cv;
+
+ if (nowait)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_IN_USE),
+ errmsg("could not drop replication origin with OID %d, in use by PID %d",
+ state->roident,
+ state->acquired_by)));
+
+ /*
+ * We must wait and then retry. Since we don't know which CV
+ * to wait on until here, we can't readily use
+ * ConditionVariablePrepareToSleep (calling it here would be
+ * wrong, since we could miss the signal if we did so); just
+ * use ConditionVariableSleep directly.
+ */
+ cv = &state->origin_cv;
+
+ LWLockRelease(ReplicationOriginLock);
+
+ ConditionVariableSleep(cv, WAIT_EVENT_REPLICATION_ORIGIN_DROP);
+ goto restart;
+ }
+
+ /* first make a WAL log entry */
+ {
+ xl_replorigin_drop xlrec;
+
+ xlrec.node_id = roident;
+ XLogBeginInsert();
+ XLogRegisterData((char *) (&xlrec), sizeof(xlrec));
+ XLogInsert(RM_REPLORIGIN_ID, XLOG_REPLORIGIN_DROP);
+ }
+
+ /* then clear the in-memory slot */
+ state->roident = InvalidRepOriginId;
+ state->remote_lsn = InvalidXLogRecPtr;
+ state->local_lsn = InvalidXLogRecPtr;
+ break;
+ }
+ }
+ LWLockRelease(ReplicationOriginLock);
+ ConditionVariableCancelSleep();
+
+ /*
+ * Now, we can delete the catalog entry.
+ */
+ tuple = SearchSysCache1(REPLORIGIDENT, ObjectIdGetDatum(roident));
+ if (!HeapTupleIsValid(tuple))
+ elog(ERROR, "cache lookup failed for replication origin with oid %u",
+ roident);
+
+ CatalogTupleDelete(rel, &tuple->t_self);
+ ReleaseSysCache(tuple);
+
+ CommandCounterIncrement();
+
+ /* now release lock again */
+ table_close(rel, ExclusiveLock);
+}
+
+
+/*
+ * Lookup replication origin via it's oid and return the name.
+ *
+ * The external name is palloc'd in the calling context.
+ *
+ * Returns true if the origin is known, false otherwise.
+ */
+bool
+replorigin_by_oid(RepOriginId roident, bool missing_ok, char **roname)
+{
+ HeapTuple tuple;
+ Form_pg_replication_origin ric;
+
+ Assert(OidIsValid((Oid) roident));
+ Assert(roident != InvalidRepOriginId);
+ Assert(roident != DoNotReplicateId);
+
+ tuple = SearchSysCache1(REPLORIGIDENT,
+ ObjectIdGetDatum((Oid) roident));
+
+ if (HeapTupleIsValid(tuple))
+ {
+ ric = (Form_pg_replication_origin) GETSTRUCT(tuple);
+ *roname = text_to_cstring(&ric->roname);
+ ReleaseSysCache(tuple);
+
+ return true;
+ }
+ else
+ {
+ *roname = NULL;
+
+ if (!missing_ok)
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("replication origin with OID %u does not exist",
+ roident)));
+
+ return false;
+ }
+}
+
+
+/* ---------------------------------------------------------------------------
+ * Functions for handling replication progress.
+ * ---------------------------------------------------------------------------
+ */
+
+Size
+ReplicationOriginShmemSize(void)
+{
+ Size size = 0;
+
+ /*
+ * XXX: max_replication_slots is arguably the wrong thing to use, as here
+ * we keep the replay state of *remote* transactions. But for now it seems
+ * sufficient to reuse it, rather than introduce a separate GUC.
+ */
+ if (max_replication_slots == 0)
+ return size;
+
+ size = add_size(size, offsetof(ReplicationStateCtl, states));
+
+ size = add_size(size,
+ mul_size(max_replication_slots, sizeof(ReplicationState)));
+ return size;
+}
+
+void
+ReplicationOriginShmemInit(void)
+{
+ bool found;
+
+ if (max_replication_slots == 0)
+ return;
+
+ replication_states_ctl = (ReplicationStateCtl *)
+ ShmemInitStruct("ReplicationOriginState",
+ ReplicationOriginShmemSize(),
+ &found);
+ replication_states = replication_states_ctl->states;
+
+ if (!found)
+ {
+ int i;
+
+ MemSet(replication_states_ctl, 0, ReplicationOriginShmemSize());
+
+ replication_states_ctl->tranche_id = LWTRANCHE_REPLICATION_ORIGIN_STATE;
+
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ LWLockInitialize(&replication_states[i].lock,
+ replication_states_ctl->tranche_id);
+ ConditionVariableInit(&replication_states[i].origin_cv);
+ }
+ }
+}
+
+/* ---------------------------------------------------------------------------
+ * Perform a checkpoint of each replication origin's progress with respect to
+ * the replayed remote_lsn. Make sure that all transactions we refer to in the
+ * checkpoint (local_lsn) are actually on-disk. This might not yet be the case
+ * if the transactions were originally committed asynchronously.
+ *
+ * We store checkpoints in the following format:
+ * +-------+------------------------+------------------+-----+--------+
+ * | MAGIC | ReplicationStateOnDisk | struct Replic... | ... | CRC32C | EOF
+ * +-------+------------------------+------------------+-----+--------+
+ *
+ * So its just the magic, followed by the statically sized
+ * ReplicationStateOnDisk structs. Note that the maximum number of
+ * ReplicationState is determined by max_replication_slots.
+ * ---------------------------------------------------------------------------
+ */
+void
+CheckPointReplicationOrigin(void)
+{
+ const char *tmppath = "pg_logical/replorigin_checkpoint.tmp";
+ const char *path = "pg_logical/replorigin_checkpoint";
+ int tmpfd;
+ int i;
+ uint32 magic = REPLICATION_STATE_MAGIC;
+ pg_crc32c crc;
+
+ if (max_replication_slots == 0)
+ return;
+
+ INIT_CRC32C(crc);
+
+ /* make sure no old temp file is remaining */
+ if (unlink(tmppath) < 0 && errno != ENOENT)
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not remove file \"%s\": %m",
+ tmppath)));
+
+ /*
+ * no other backend can perform this at the same time, we're protected by
+ * CheckpointLock.
+ */
+ tmpfd = OpenTransientFile(tmppath,
+ O_CREAT | O_EXCL | O_WRONLY | PG_BINARY);
+ if (tmpfd < 0)
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not create file \"%s\": %m",
+ tmppath)));
+
+ /* write magic */
+ errno = 0;
+ if ((write(tmpfd, &magic, sizeof(magic))) != sizeof(magic))
+ {
+ /* if write didn't set errno, assume problem is no disk space */
+ if (errno == 0)
+ errno = ENOSPC;
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not write to file \"%s\": %m",
+ tmppath)));
+ }
+ COMP_CRC32C(crc, &magic, sizeof(magic));
+
+ /* prevent concurrent creations/drops */
+ LWLockAcquire(ReplicationOriginLock, LW_SHARED);
+
+ /* write actual data */
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationStateOnDisk disk_state;
+ ReplicationState *curstate = &replication_states[i];
+ XLogRecPtr local_lsn;
+
+ if (curstate->roident == InvalidRepOriginId)
+ continue;
+
+ /* zero, to avoid uninitialized padding bytes */
+ memset(&disk_state, 0, sizeof(disk_state));
+
+ LWLockAcquire(&curstate->lock, LW_SHARED);
+
+ disk_state.roident = curstate->roident;
+
+ disk_state.remote_lsn = curstate->remote_lsn;
+ local_lsn = curstate->local_lsn;
+
+ LWLockRelease(&curstate->lock);
+
+ /* make sure we only write out a commit that's persistent */
+ XLogFlush(local_lsn);
+
+ errno = 0;
+ if ((write(tmpfd, &disk_state, sizeof(disk_state))) !=
+ sizeof(disk_state))
+ {
+ /* if write didn't set errno, assume problem is no disk space */
+ if (errno == 0)
+ errno = ENOSPC;
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not write to file \"%s\": %m",
+ tmppath)));
+ }
+
+ COMP_CRC32C(crc, &disk_state, sizeof(disk_state));
+ }
+
+ LWLockRelease(ReplicationOriginLock);
+
+ /* write out the CRC */
+ FIN_CRC32C(crc);
+ errno = 0;
+ if ((write(tmpfd, &crc, sizeof(crc))) != sizeof(crc))
+ {
+ /* if write didn't set errno, assume problem is no disk space */
+ if (errno == 0)
+ errno = ENOSPC;
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not write to file \"%s\": %m",
+ tmppath)));
+ }
+
+ if (CloseTransientFile(tmpfd) != 0)
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not close file \"%s\": %m",
+ tmppath)));
+
+ /* fsync, rename to permanent file, fsync file and directory */
+ durable_rename(tmppath, path, PANIC);
+}
+
+/*
+ * Recover replication replay status from checkpoint data saved earlier by
+ * CheckPointReplicationOrigin.
+ *
+ * This only needs to be called at startup and *not* during every checkpoint
+ * read during recovery (e.g. in HS or PITR from a base backup) afterwards. All
+ * state thereafter can be recovered by looking at commit records.
+ */
+void
+StartupReplicationOrigin(void)
+{
+ const char *path = "pg_logical/replorigin_checkpoint";
+ int fd;
+ int readBytes;
+ uint32 magic = REPLICATION_STATE_MAGIC;
+ int last_state = 0;
+ pg_crc32c file_crc;
+ pg_crc32c crc;
+
+ /* don't want to overwrite already existing state */
+#ifdef USE_ASSERT_CHECKING
+ static bool already_started = false;
+
+ Assert(!already_started);
+ already_started = true;
+#endif
+
+ if (max_replication_slots == 0)
+ return;
+
+ INIT_CRC32C(crc);
+
+ elog(DEBUG2, "starting up replication origin progress state");
+
+ fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
+
+ /*
+ * might have had max_replication_slots == 0 last run, or we just brought
+ * up a standby.
+ */
+ if (fd < 0 && errno == ENOENT)
+ return;
+ else if (fd < 0)
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\": %m",
+ path)));
+
+ /* verify magic, that is written even if nothing was active */
+ readBytes = read(fd, &magic, sizeof(magic));
+ if (readBytes != sizeof(magic))
+ {
+ if (readBytes < 0)
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": %m",
+ path)));
+ else
+ ereport(PANIC,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("could not read file \"%s\": read %d of %zu",
+ path, readBytes, sizeof(magic))));
+ }
+ COMP_CRC32C(crc, &magic, sizeof(magic));
+
+ if (magic != REPLICATION_STATE_MAGIC)
+ ereport(PANIC,
+ (errmsg("replication checkpoint has wrong magic %u instead of %u",
+ magic, REPLICATION_STATE_MAGIC)));
+
+ /* we can skip locking here, no other access is possible */
+
+ /* recover individual states, until there are no more to be found */
+ while (true)
+ {
+ ReplicationStateOnDisk disk_state;
+
+ readBytes = read(fd, &disk_state, sizeof(disk_state));
+
+ /* no further data */
+ if (readBytes == sizeof(crc))
+ {
+ /* not pretty, but simple ... */
+ file_crc = *(pg_crc32c *) &disk_state;
+ break;
+ }
+
+ if (readBytes < 0)
+ {
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": %m",
+ path)));
+ }
+
+ if (readBytes != sizeof(disk_state))
+ {
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": read %d of %zu",
+ path, readBytes, sizeof(disk_state))));
+ }
+
+ COMP_CRC32C(crc, &disk_state, sizeof(disk_state));
+
+ if (last_state == max_replication_slots)
+ ereport(PANIC,
+ (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
+ errmsg("could not find free replication state, increase max_replication_slots")));
+
+ /* copy data to shared memory */
+ replication_states[last_state].roident = disk_state.roident;
+ replication_states[last_state].remote_lsn = disk_state.remote_lsn;
+ last_state++;
+
+ elog(LOG, "recovered replication state of node %u to %X/%X",
+ disk_state.roident,
+ (uint32) (disk_state.remote_lsn >> 32),
+ (uint32) disk_state.remote_lsn);
+ }
+
+ /* now check checksum */
+ FIN_CRC32C(crc);
+ if (file_crc != crc)
+ ereport(PANIC,
+ (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
+ errmsg("replication slot checkpoint has wrong checksum %u, expected %u",
+ crc, file_crc)));
+
+ if (CloseTransientFile(fd) != 0)
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not close file \"%s\": %m",
+ path)));
+}
+
+void
+replorigin_redo(XLogReaderState *record)
+{
+ uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
+
+ switch (info)
+ {
+ case XLOG_REPLORIGIN_SET:
+ {
+ xl_replorigin_set *xlrec =
+ (xl_replorigin_set *) XLogRecGetData(record);
+
+ replorigin_advance(xlrec->node_id,
+ xlrec->remote_lsn, record->EndRecPtr,
+ xlrec->force /* backward */ ,
+ false /* WAL log */ );
+ break;
+ }
+ case XLOG_REPLORIGIN_DROP:
+ {
+ xl_replorigin_drop *xlrec;
+ int i;
+
+ xlrec = (xl_replorigin_drop *) XLogRecGetData(record);
+
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationState *state = &replication_states[i];
+
+ /* found our slot */
+ if (state->roident == xlrec->node_id)
+ {
+ /* reset entry */
+ state->roident = InvalidRepOriginId;
+ state->remote_lsn = InvalidXLogRecPtr;
+ state->local_lsn = InvalidXLogRecPtr;
+ break;
+ }
+ }
+ break;
+ }
+ default:
+ elog(PANIC, "replorigin_redo: unknown op code %u", info);
+ }
+}
+
+
+/*
+ * Tell the replication origin progress machinery that a commit from 'node'
+ * that originated at the LSN remote_commit on the remote node was replayed
+ * successfully and that we don't need to do so again. In combination with
+ * setting up replorigin_session_origin_lsn and replorigin_session_origin
+ * that ensures we won't loose knowledge about that after a crash if the
+ * transaction had a persistent effect (think of asynchronous commits).
+ *
+ * local_commit needs to be a local LSN of the commit so that we can make sure
+ * upon a checkpoint that enough WAL has been persisted to disk.
+ *
+ * Needs to be called with a RowExclusiveLock on pg_replication_origin,
+ * unless running in recovery.
+ */
+void
+replorigin_advance(RepOriginId node,
+ XLogRecPtr remote_commit, XLogRecPtr local_commit,
+ bool go_backward, bool wal_log)
+{
+ int i;
+ ReplicationState *replication_state = NULL;
+ ReplicationState *free_state = NULL;
+
+ Assert(node != InvalidRepOriginId);
+
+ /* we don't track DoNotReplicateId */
+ if (node == DoNotReplicateId)
+ return;
+
+ /*
+ * XXX: For the case where this is called by WAL replay, it'd be more
+ * efficient to restore into a backend local hashtable and only dump into
+ * shmem after recovery is finished. Let's wait with implementing that
+ * till it's shown to be a measurable expense
+ */
+
+ /* Lock exclusively, as we may have to create a new table entry. */
+ LWLockAcquire(ReplicationOriginLock, LW_EXCLUSIVE);
+
+ /*
+ * Search for either an existing slot for the origin, or a free one we can
+ * use.
+ */
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationState *curstate = &replication_states[i];
+
+ /* remember where to insert if necessary */
+ if (curstate->roident == InvalidRepOriginId &&
+ free_state == NULL)
+ {
+ free_state = curstate;
+ continue;
+ }
+
+ /* not our slot */
+ if (curstate->roident != node)
+ {
+ continue;
+ }
+
+ /* ok, found slot */
+ replication_state = curstate;
+
+ LWLockAcquire(&replication_state->lock, LW_EXCLUSIVE);
+
+ /* Make sure it's not used by somebody else */
+ if (replication_state->acquired_by != 0)
+ {
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_IN_USE),
+ errmsg("replication origin with OID %d is already active for PID %d",
+ replication_state->roident,
+ replication_state->acquired_by)));
+ }
+
+ break;
+ }
+
+ if (replication_state == NULL && free_state == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
+ errmsg("could not find free replication state slot for replication origin with OID %u",
+ node),
+ errhint("Increase max_replication_slots and try again.")));
+
+ if (replication_state == NULL)
+ {
+ /* initialize new slot */
+ LWLockAcquire(&free_state->lock, LW_EXCLUSIVE);
+ replication_state = free_state;
+ Assert(replication_state->remote_lsn == InvalidXLogRecPtr);
+ Assert(replication_state->local_lsn == InvalidXLogRecPtr);
+ replication_state->roident = node;
+ }
+
+ Assert(replication_state->roident != InvalidRepOriginId);
+
+ /*
+ * If somebody "forcefully" sets this slot, WAL log it, so it's durable
+ * and the standby gets the message. Primarily this will be called during
+ * WAL replay (of commit records) where no WAL logging is necessary.
+ */
+ if (wal_log)
+ {
+ xl_replorigin_set xlrec;
+
+ xlrec.remote_lsn = remote_commit;
+ xlrec.node_id = node;
+ xlrec.force = go_backward;
+
+ XLogBeginInsert();
+ XLogRegisterData((char *) (&xlrec), sizeof(xlrec));
+
+ XLogInsert(RM_REPLORIGIN_ID, XLOG_REPLORIGIN_SET);
+ }
+
+ /*
+ * Due to - harmless - race conditions during a checkpoint we could see
+ * values here that are older than the ones we already have in memory.
+ * Don't overwrite those.
+ */
+ if (go_backward || replication_state->remote_lsn < remote_commit)
+ replication_state->remote_lsn = remote_commit;
+ if (local_commit != InvalidXLogRecPtr &&
+ (go_backward || replication_state->local_lsn < local_commit))
+ replication_state->local_lsn = local_commit;
+ LWLockRelease(&replication_state->lock);
+
+ /*
+ * Release *after* changing the LSNs, slot isn't acquired and thus could
+ * otherwise be dropped anytime.
+ */
+ LWLockRelease(ReplicationOriginLock);
+}
+
+
+XLogRecPtr
+replorigin_get_progress(RepOriginId node, bool flush)
+{
+ int i;
+ XLogRecPtr local_lsn = InvalidXLogRecPtr;
+ XLogRecPtr remote_lsn = InvalidXLogRecPtr;
+
+ /* prevent slots from being concurrently dropped */
+ LWLockAcquire(ReplicationOriginLock, LW_SHARED);
+
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationState *state;
+
+ state = &replication_states[i];
+
+ if (state->roident == node)
+ {
+ LWLockAcquire(&state->lock, LW_SHARED);
+
+ remote_lsn = state->remote_lsn;
+ local_lsn = state->local_lsn;
+
+ LWLockRelease(&state->lock);
+
+ break;
+ }
+ }
+
+ LWLockRelease(ReplicationOriginLock);
+
+ if (flush && local_lsn != InvalidXLogRecPtr)
+ XLogFlush(local_lsn);
+
+ return remote_lsn;
+}
+
+/*
+ * Tear down a (possibly) configured session replication origin during process
+ * exit.
+ */
+static void
+ReplicationOriginExitCleanup(int code, Datum arg)
+{
+ ConditionVariable *cv = NULL;
+
+ LWLockAcquire(ReplicationOriginLock, LW_EXCLUSIVE);
+
+ if (session_replication_state != NULL &&
+ session_replication_state->acquired_by == MyProcPid)
+ {
+ cv = &session_replication_state->origin_cv;
+
+ session_replication_state->acquired_by = 0;
+ session_replication_state = NULL;
+ }
+
+ LWLockRelease(ReplicationOriginLock);
+
+ if (cv)
+ ConditionVariableBroadcast(cv);
+}
+
+/*
+ * Setup a replication origin in the shared memory struct if it doesn't
+ * already exists and cache access to the specific ReplicationSlot so the
+ * array doesn't have to be searched when calling
+ * replorigin_session_advance().
+ *
+ * Obviously only one such cached origin can exist per process and the current
+ * cached value can only be set again after the previous value is torn down
+ * with replorigin_session_reset().
+ */
+void
+replorigin_session_setup(RepOriginId node)
+{
+ static bool registered_cleanup;
+ int i;
+ int free_slot = -1;
+
+ if (!registered_cleanup)
+ {
+ on_shmem_exit(ReplicationOriginExitCleanup, 0);
+ registered_cleanup = true;
+ }
+
+ Assert(max_replication_slots > 0);
+
+ if (session_replication_state != NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot setup replication origin when one is already setup")));
+
+ /* Lock exclusively, as we may have to create a new table entry. */
+ LWLockAcquire(ReplicationOriginLock, LW_EXCLUSIVE);
+
+ /*
+ * Search for either an existing slot for the origin, or a free one we can
+ * use.
+ */
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationState *curstate = &replication_states[i];
+
+ /* remember where to insert if necessary */
+ if (curstate->roident == InvalidRepOriginId &&
+ free_slot == -1)
+ {
+ free_slot = i;
+ continue;
+ }
+
+ /* not our slot */
+ if (curstate->roident != node)
+ continue;
+
+ else if (curstate->acquired_by != 0)
+ {
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_IN_USE),
+ errmsg("replication origin with OID %d is already active for PID %d",
+ curstate->roident, curstate->acquired_by)));
+ }
+
+ /* ok, found slot */
+ session_replication_state = curstate;
+ }
+
+
+ if (session_replication_state == NULL && free_slot == -1)
+ ereport(ERROR,
+ (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
+ errmsg("could not find free replication state slot for replication origin with OID %u",
+ node),
+ errhint("Increase max_replication_slots and try again.")));
+ else if (session_replication_state == NULL)
+ {
+ /* initialize new slot */
+ session_replication_state = &replication_states[free_slot];
+ Assert(session_replication_state->remote_lsn == InvalidXLogRecPtr);
+ Assert(session_replication_state->local_lsn == InvalidXLogRecPtr);
+ session_replication_state->roident = node;
+ }
+
+
+ Assert(session_replication_state->roident != InvalidRepOriginId);
+
+ session_replication_state->acquired_by = MyProcPid;
+
+ LWLockRelease(ReplicationOriginLock);
+
+ /* probably this one is pointless */
+ ConditionVariableBroadcast(&session_replication_state->origin_cv);
+}
+
+/*
+ * Reset replay state previously setup in this session.
+ *
+ * This function may only be called if an origin was setup with
+ * replorigin_session_setup().
+ */
+void
+replorigin_session_reset(void)
+{
+ ConditionVariable *cv;
+
+ Assert(max_replication_slots != 0);
+
+ if (session_replication_state == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("no replication origin is configured")));
+
+ LWLockAcquire(ReplicationOriginLock, LW_EXCLUSIVE);
+
+ session_replication_state->acquired_by = 0;
+ cv = &session_replication_state->origin_cv;
+ session_replication_state = NULL;
+
+ LWLockRelease(ReplicationOriginLock);
+
+ ConditionVariableBroadcast(cv);
+}
+
+/*
+ * Do the same work replorigin_advance() does, just on the session's
+ * configured origin.
+ *
+ * This is noticeably cheaper than using replorigin_advance().
+ */
+void
+replorigin_session_advance(XLogRecPtr remote_commit, XLogRecPtr local_commit)
+{
+ Assert(session_replication_state != NULL);
+ Assert(session_replication_state->roident != InvalidRepOriginId);
+
+ LWLockAcquire(&session_replication_state->lock, LW_EXCLUSIVE);
+ if (session_replication_state->local_lsn < local_commit)
+ session_replication_state->local_lsn = local_commit;
+ if (session_replication_state->remote_lsn < remote_commit)
+ session_replication_state->remote_lsn = remote_commit;
+ LWLockRelease(&session_replication_state->lock);
+}
+
+/*
+ * Ask the machinery about the point up to which we successfully replayed
+ * changes from an already setup replication origin.
+ */
+XLogRecPtr
+replorigin_session_get_progress(bool flush)
+{
+ XLogRecPtr remote_lsn;
+ XLogRecPtr local_lsn;
+
+ Assert(session_replication_state != NULL);
+
+ LWLockAcquire(&session_replication_state->lock, LW_SHARED);
+ remote_lsn = session_replication_state->remote_lsn;
+ local_lsn = session_replication_state->local_lsn;
+ LWLockRelease(&session_replication_state->lock);
+
+ if (flush && local_lsn != InvalidXLogRecPtr)
+ XLogFlush(local_lsn);
+
+ return remote_lsn;
+}
+
+
+
+/* ---------------------------------------------------------------------------
+ * SQL functions for working with replication origin.
+ *
+ * These mostly should be fairly short wrappers around more generic functions.
+ * ---------------------------------------------------------------------------
+ */
+
+/*
+ * Create replication origin for the passed in name, and return the assigned
+ * oid.
+ */
+Datum
+pg_replication_origin_create(PG_FUNCTION_ARGS)
+{
+ char *name;
+ RepOriginId roident;
+
+ replorigin_check_prerequisites(false, false);
+
+ name = text_to_cstring((text *) DatumGetPointer(PG_GETARG_DATUM(0)));
+
+ /* Replication origins "pg_xxx" are reserved for internal use */
+ if (IsReservedName(name))
+ ereport(ERROR,
+ (errcode(ERRCODE_RESERVED_NAME),
+ errmsg("replication origin name \"%s\" is reserved",
+ name),
+ errdetail("Origin names starting with \"pg_\" are reserved.")));
+
+ /*
+ * If built with appropriate switch, whine when regression-testing
+ * conventions for replication origin names are violated.
+ */
+#ifdef ENFORCE_REGRESSION_TEST_NAME_RESTRICTIONS
+ if (strncmp(name, "regress_", 8) != 0)
+ elog(WARNING, "replication origins created by regression test cases should have names starting with \"regress_\"");
+#endif
+
+ roident = replorigin_create(name);
+
+ pfree(name);
+
+ PG_RETURN_OID(roident);
+}
+
+/*
+ * Drop replication origin.
+ */
+Datum
+pg_replication_origin_drop(PG_FUNCTION_ARGS)
+{
+ char *name;
+ RepOriginId roident;
+
+ replorigin_check_prerequisites(false, false);
+
+ name = text_to_cstring((text *) DatumGetPointer(PG_GETARG_DATUM(0)));
+
+ roident = replorigin_by_name(name, false);
+ Assert(OidIsValid(roident));
+
+ replorigin_drop(roident, true);
+
+ pfree(name);
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Return oid of a replication origin.
+ */
+Datum
+pg_replication_origin_oid(PG_FUNCTION_ARGS)
+{
+ char *name;
+ RepOriginId roident;
+
+ replorigin_check_prerequisites(false, false);
+
+ name = text_to_cstring((text *) DatumGetPointer(PG_GETARG_DATUM(0)));
+ roident = replorigin_by_name(name, true);
+
+ pfree(name);
+
+ if (OidIsValid(roident))
+ PG_RETURN_OID(roident);
+ PG_RETURN_NULL();
+}
+
+/*
+ * Setup a replication origin for this session.
+ */
+Datum
+pg_replication_origin_session_setup(PG_FUNCTION_ARGS)
+{
+ char *name;
+ RepOriginId origin;
+
+ replorigin_check_prerequisites(true, false);
+
+ name = text_to_cstring((text *) DatumGetPointer(PG_GETARG_DATUM(0)));
+ origin = replorigin_by_name(name, false);
+ replorigin_session_setup(origin);
+
+ replorigin_session_origin = origin;
+
+ pfree(name);
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Reset previously setup origin in this session
+ */
+Datum
+pg_replication_origin_session_reset(PG_FUNCTION_ARGS)
+{
+ replorigin_check_prerequisites(true, false);
+
+ replorigin_session_reset();
+
+ replorigin_session_origin = InvalidRepOriginId;
+ replorigin_session_origin_lsn = InvalidXLogRecPtr;
+ replorigin_session_origin_timestamp = 0;
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Has a replication origin been setup for this session.
+ */
+Datum
+pg_replication_origin_session_is_setup(PG_FUNCTION_ARGS)
+{
+ replorigin_check_prerequisites(false, false);
+
+ PG_RETURN_BOOL(replorigin_session_origin != InvalidRepOriginId);
+}
+
+
+/*
+ * Return the replication progress for origin setup in the current session.
+ *
+ * If 'flush' is set to true it is ensured that the returned value corresponds
+ * to a local transaction that has been flushed. This is useful if asynchronous
+ * commits are used when replaying replicated transactions.
+ */
+Datum
+pg_replication_origin_session_progress(PG_FUNCTION_ARGS)
+{
+ XLogRecPtr remote_lsn = InvalidXLogRecPtr;
+ bool flush = PG_GETARG_BOOL(0);
+
+ replorigin_check_prerequisites(true, false);
+
+ if (session_replication_state == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("no replication origin is configured")));
+
+ remote_lsn = replorigin_session_get_progress(flush);
+
+ if (remote_lsn == InvalidXLogRecPtr)
+ PG_RETURN_NULL();
+
+ PG_RETURN_LSN(remote_lsn);
+}
+
+Datum
+pg_replication_origin_xact_setup(PG_FUNCTION_ARGS)
+{
+ XLogRecPtr location = PG_GETARG_LSN(0);
+
+ replorigin_check_prerequisites(true, false);
+
+ if (session_replication_state == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("no replication origin is configured")));
+
+ replorigin_session_origin_lsn = location;
+ replorigin_session_origin_timestamp = PG_GETARG_TIMESTAMPTZ(1);
+
+ PG_RETURN_VOID();
+}
+
+Datum
+pg_replication_origin_xact_reset(PG_FUNCTION_ARGS)
+{
+ replorigin_check_prerequisites(true, false);
+
+ replorigin_session_origin_lsn = InvalidXLogRecPtr;
+ replorigin_session_origin_timestamp = 0;
+
+ PG_RETURN_VOID();
+}
+
+
+Datum
+pg_replication_origin_advance(PG_FUNCTION_ARGS)
+{
+ text *name = PG_GETARG_TEXT_PP(0);
+ XLogRecPtr remote_commit = PG_GETARG_LSN(1);
+ RepOriginId node;
+
+ replorigin_check_prerequisites(true, false);
+
+ /* lock to prevent the replication origin from vanishing */
+ LockRelationOid(ReplicationOriginRelationId, RowExclusiveLock);
+
+ node = replorigin_by_name(text_to_cstring(name), false);
+
+ /*
+ * Can't sensibly pass a local commit to be flushed at checkpoint - this
+ * xact hasn't committed yet. This is why this function should be used to
+ * set up the initial replication state, but not for replay.
+ */
+ replorigin_advance(node, remote_commit, InvalidXLogRecPtr,
+ true /* go backward */ , true /* WAL log */ );
+
+ UnlockRelationOid(ReplicationOriginRelationId, RowExclusiveLock);
+
+ PG_RETURN_VOID();
+}
+
+
+/*
+ * Return the replication progress for an individual replication origin.
+ *
+ * If 'flush' is set to true it is ensured that the returned value corresponds
+ * to a local transaction that has been flushed. This is useful if asynchronous
+ * commits are used when replaying replicated transactions.
+ */
+Datum
+pg_replication_origin_progress(PG_FUNCTION_ARGS)
+{
+ char *name;
+ bool flush;
+ RepOriginId roident;
+ XLogRecPtr remote_lsn = InvalidXLogRecPtr;
+
+ replorigin_check_prerequisites(true, true);
+
+ name = text_to_cstring((text *) DatumGetPointer(PG_GETARG_DATUM(0)));
+ flush = PG_GETARG_BOOL(1);
+
+ roident = replorigin_by_name(name, false);
+ Assert(OidIsValid(roident));
+
+ remote_lsn = replorigin_get_progress(roident, flush);
+
+ if (remote_lsn == InvalidXLogRecPtr)
+ PG_RETURN_NULL();
+
+ PG_RETURN_LSN(remote_lsn);
+}
+
+
+Datum
+pg_show_replication_origin_status(PG_FUNCTION_ARGS)
+{
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ int i;
+#define REPLICATION_ORIGIN_PROGRESS_COLS 4
+
+ /* we want to return 0 rows if slot is set to zero */
+ replorigin_check_prerequisites(false, true);
+
+ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("set-valued function called in context that cannot accept a set")));
+ if (!(rsinfo->allowedModes & SFRM_Materialize))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("materialize mode required, but it is not allowed in this context")));
+ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ if (tupdesc->natts != REPLICATION_ORIGIN_PROGRESS_COLS)
+ elog(ERROR, "wrong function definition");
+
+ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
+ oldcontext = MemoryContextSwitchTo(per_query_ctx);
+
+ tupstore = tuplestore_begin_heap(true, false, work_mem);
+ rsinfo->returnMode = SFRM_Materialize;
+ rsinfo->setResult = tupstore;
+ rsinfo->setDesc = tupdesc;
+
+ MemoryContextSwitchTo(oldcontext);
+
+
+ /* prevent slots from being concurrently dropped */
+ LWLockAcquire(ReplicationOriginLock, LW_SHARED);
+
+ /*
+ * Iterate through all possible replication_states, display if they are
+ * filled. Note that we do not take any locks, so slightly corrupted/out
+ * of date values are a possibility.
+ */
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationState *state;
+ Datum values[REPLICATION_ORIGIN_PROGRESS_COLS];
+ bool nulls[REPLICATION_ORIGIN_PROGRESS_COLS];
+ char *roname;
+
+ state = &replication_states[i];
+
+ /* unused slot, nothing to display */
+ if (state->roident == InvalidRepOriginId)
+ continue;
+
+ memset(values, 0, sizeof(values));
+ memset(nulls, 1, sizeof(nulls));
+
+ values[0] = ObjectIdGetDatum(state->roident);
+ nulls[0] = false;
+
+ /*
+ * We're not preventing the origin to be dropped concurrently, so
+ * silently accept that it might be gone.
+ */
+ if (replorigin_by_oid(state->roident, true,
+ &roname))
+ {
+ values[1] = CStringGetTextDatum(roname);
+ nulls[1] = false;
+ }
+
+ LWLockAcquire(&state->lock, LW_SHARED);
+
+ values[2] = LSNGetDatum(state->remote_lsn);
+ nulls[2] = false;
+
+ values[3] = LSNGetDatum(state->local_lsn);
+ nulls[3] = false;
+
+ LWLockRelease(&state->lock);
+
+ tuplestore_putvalues(tupstore, tupdesc, values, nulls);
+ }
+
+ tuplestore_donestoring(tupstore);
+
+ LWLockRelease(ReplicationOriginLock);
+
+#undef REPLICATION_ORIGIN_PROGRESS_COLS
+
+ return (Datum) 0;
+}
diff --git a/src/backend/replication/logical/proto.c b/src/backend/replication/logical/proto.c
new file mode 100644
index 0000000..3c6d0cd
--- /dev/null
+++ b/src/backend/replication/logical/proto.c
@@ -0,0 +1,687 @@
+/*-------------------------------------------------------------------------
+ *
+ * proto.c
+ * logical replication protocol functions
+ *
+ * Copyright (c) 2015-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/logical/proto.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/sysattr.h"
+#include "catalog/pg_namespace.h"
+#include "catalog/pg_type.h"
+#include "libpq/pqformat.h"
+#include "replication/logicalproto.h"
+#include "utils/builtins.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+
+/*
+ * Protocol message flags.
+ */
+#define LOGICALREP_IS_REPLICA_IDENTITY 1
+
+#define TRUNCATE_CASCADE (1<<0)
+#define TRUNCATE_RESTART_SEQS (1<<1)
+
+static void logicalrep_write_attrs(StringInfo out, Relation rel);
+static void logicalrep_write_tuple(StringInfo out, Relation rel,
+ HeapTuple tuple);
+
+static void logicalrep_read_attrs(StringInfo in, LogicalRepRelation *rel);
+static void logicalrep_read_tuple(StringInfo in, LogicalRepTupleData *tuple);
+
+static void logicalrep_write_namespace(StringInfo out, Oid nspid);
+static const char *logicalrep_read_namespace(StringInfo in);
+
+/*
+ * Write BEGIN to the output stream.
+ */
+void
+logicalrep_write_begin(StringInfo out, ReorderBufferTXN *txn)
+{
+ pq_sendbyte(out, 'B'); /* BEGIN */
+
+ /* fixed fields */
+ pq_sendint64(out, txn->final_lsn);
+ pq_sendint64(out, txn->commit_time);
+ pq_sendint32(out, txn->xid);
+}
+
+/*
+ * Read transaction BEGIN from the stream.
+ */
+void
+logicalrep_read_begin(StringInfo in, LogicalRepBeginData *begin_data)
+{
+ /* read fields */
+ begin_data->final_lsn = pq_getmsgint64(in);
+ if (begin_data->final_lsn == InvalidXLogRecPtr)
+ elog(ERROR, "final_lsn not set in begin message");
+ begin_data->committime = pq_getmsgint64(in);
+ begin_data->xid = pq_getmsgint(in, 4);
+}
+
+
+/*
+ * Write COMMIT to the output stream.
+ */
+void
+logicalrep_write_commit(StringInfo out, ReorderBufferTXN *txn,
+ XLogRecPtr commit_lsn)
+{
+ uint8 flags = 0;
+
+ pq_sendbyte(out, 'C'); /* sending COMMIT */
+
+ /* send the flags field (unused for now) */
+ pq_sendbyte(out, flags);
+
+ /* send fields */
+ pq_sendint64(out, commit_lsn);
+ pq_sendint64(out, txn->end_lsn);
+ pq_sendint64(out, txn->commit_time);
+}
+
+/*
+ * Read transaction COMMIT from the stream.
+ */
+void
+logicalrep_read_commit(StringInfo in, LogicalRepCommitData *commit_data)
+{
+ /* read flags (unused for now) */
+ uint8 flags = pq_getmsgbyte(in);
+
+ if (flags != 0)
+ elog(ERROR, "unrecognized flags %u in commit message", flags);
+
+ /* read fields */
+ commit_data->commit_lsn = pq_getmsgint64(in);
+ commit_data->end_lsn = pq_getmsgint64(in);
+ commit_data->committime = pq_getmsgint64(in);
+}
+
+/*
+ * Write ORIGIN to the output stream.
+ */
+void
+logicalrep_write_origin(StringInfo out, const char *origin,
+ XLogRecPtr origin_lsn)
+{
+ pq_sendbyte(out, 'O'); /* ORIGIN */
+
+ /* fixed fields */
+ pq_sendint64(out, origin_lsn);
+
+ /* origin string */
+ pq_sendstring(out, origin);
+}
+
+/*
+ * Read ORIGIN from the output stream.
+ */
+char *
+logicalrep_read_origin(StringInfo in, XLogRecPtr *origin_lsn)
+{
+ /* fixed fields */
+ *origin_lsn = pq_getmsgint64(in);
+
+ /* return origin */
+ return pstrdup(pq_getmsgstring(in));
+}
+
+/*
+ * Write INSERT to the output stream.
+ */
+void
+logicalrep_write_insert(StringInfo out, Relation rel, HeapTuple newtuple)
+{
+ pq_sendbyte(out, 'I'); /* action INSERT */
+
+ /* use Oid as relation identifier */
+ pq_sendint32(out, RelationGetRelid(rel));
+
+ pq_sendbyte(out, 'N'); /* new tuple follows */
+ logicalrep_write_tuple(out, rel, newtuple);
+}
+
+/*
+ * Read INSERT from stream.
+ *
+ * Fills the new tuple.
+ */
+LogicalRepRelId
+logicalrep_read_insert(StringInfo in, LogicalRepTupleData *newtup)
+{
+ char action;
+ LogicalRepRelId relid;
+
+ /* read the relation id */
+ relid = pq_getmsgint(in, 4);
+
+ action = pq_getmsgbyte(in);
+ if (action != 'N')
+ elog(ERROR, "expected new tuple but got %d",
+ action);
+
+ logicalrep_read_tuple(in, newtup);
+
+ return relid;
+}
+
+/*
+ * Write UPDATE to the output stream.
+ */
+void
+logicalrep_write_update(StringInfo out, Relation rel, HeapTuple oldtuple,
+ HeapTuple newtuple)
+{
+ pq_sendbyte(out, 'U'); /* action UPDATE */
+
+ Assert(rel->rd_rel->relreplident == REPLICA_IDENTITY_DEFAULT ||
+ rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL ||
+ rel->rd_rel->relreplident == REPLICA_IDENTITY_INDEX);
+
+ /* use Oid as relation identifier */
+ pq_sendint32(out, RelationGetRelid(rel));
+
+ if (oldtuple != NULL)
+ {
+ if (rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
+ pq_sendbyte(out, 'O'); /* old tuple follows */
+ else
+ pq_sendbyte(out, 'K'); /* old key follows */
+ logicalrep_write_tuple(out, rel, oldtuple);
+ }
+
+ pq_sendbyte(out, 'N'); /* new tuple follows */
+ logicalrep_write_tuple(out, rel, newtuple);
+}
+
+/*
+ * Read UPDATE from stream.
+ */
+LogicalRepRelId
+logicalrep_read_update(StringInfo in, bool *has_oldtuple,
+ LogicalRepTupleData *oldtup,
+ LogicalRepTupleData *newtup)
+{
+ char action;
+ LogicalRepRelId relid;
+
+ /* read the relation id */
+ relid = pq_getmsgint(in, 4);
+
+ /* read and verify action */
+ action = pq_getmsgbyte(in);
+ if (action != 'K' && action != 'O' && action != 'N')
+ elog(ERROR, "expected action 'N', 'O' or 'K', got %c",
+ action);
+
+ /* check for old tuple */
+ if (action == 'K' || action == 'O')
+ {
+ logicalrep_read_tuple(in, oldtup);
+ *has_oldtuple = true;
+
+ action = pq_getmsgbyte(in);
+ }
+ else
+ *has_oldtuple = false;
+
+ /* check for new tuple */
+ if (action != 'N')
+ elog(ERROR, "expected action 'N', got %c",
+ action);
+
+ logicalrep_read_tuple(in, newtup);
+
+ return relid;
+}
+
+/*
+ * Write DELETE to the output stream.
+ */
+void
+logicalrep_write_delete(StringInfo out, Relation rel, HeapTuple oldtuple)
+{
+ Assert(rel->rd_rel->relreplident == REPLICA_IDENTITY_DEFAULT ||
+ rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL ||
+ rel->rd_rel->relreplident == REPLICA_IDENTITY_INDEX);
+
+ pq_sendbyte(out, 'D'); /* action DELETE */
+
+ /* use Oid as relation identifier */
+ pq_sendint32(out, RelationGetRelid(rel));
+
+ if (rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
+ pq_sendbyte(out, 'O'); /* old tuple follows */
+ else
+ pq_sendbyte(out, 'K'); /* old key follows */
+
+ logicalrep_write_tuple(out, rel, oldtuple);
+}
+
+/*
+ * Read DELETE from stream.
+ *
+ * Fills the old tuple.
+ */
+LogicalRepRelId
+logicalrep_read_delete(StringInfo in, LogicalRepTupleData *oldtup)
+{
+ char action;
+ LogicalRepRelId relid;
+
+ /* read the relation id */
+ relid = pq_getmsgint(in, 4);
+
+ /* read and verify action */
+ action = pq_getmsgbyte(in);
+ if (action != 'K' && action != 'O')
+ elog(ERROR, "expected action 'O' or 'K', got %c", action);
+
+ logicalrep_read_tuple(in, oldtup);
+
+ return relid;
+}
+
+/*
+ * Write TRUNCATE to the output stream.
+ */
+void
+logicalrep_write_truncate(StringInfo out,
+ int nrelids,
+ Oid relids[],
+ bool cascade, bool restart_seqs)
+{
+ int i;
+ uint8 flags = 0;
+
+ pq_sendbyte(out, 'T'); /* action TRUNCATE */
+
+ pq_sendint32(out, nrelids);
+
+ /* encode and send truncate flags */
+ if (cascade)
+ flags |= TRUNCATE_CASCADE;
+ if (restart_seqs)
+ flags |= TRUNCATE_RESTART_SEQS;
+ pq_sendint8(out, flags);
+
+ for (i = 0; i < nrelids; i++)
+ pq_sendint32(out, relids[i]);
+}
+
+/*
+ * Read TRUNCATE from stream.
+ */
+List *
+logicalrep_read_truncate(StringInfo in,
+ bool *cascade, bool *restart_seqs)
+{
+ int i;
+ int nrelids;
+ List *relids = NIL;
+ uint8 flags;
+
+ nrelids = pq_getmsgint(in, 4);
+
+ /* read and decode truncate flags */
+ flags = pq_getmsgint(in, 1);
+ *cascade = (flags & TRUNCATE_CASCADE) > 0;
+ *restart_seqs = (flags & TRUNCATE_RESTART_SEQS) > 0;
+
+ for (i = 0; i < nrelids; i++)
+ relids = lappend_oid(relids, pq_getmsgint(in, 4));
+
+ return relids;
+}
+
+/*
+ * Write relation description to the output stream.
+ */
+void
+logicalrep_write_rel(StringInfo out, Relation rel)
+{
+ char *relname;
+
+ pq_sendbyte(out, 'R'); /* sending RELATION */
+
+ /* use Oid as relation identifier */
+ pq_sendint32(out, RelationGetRelid(rel));
+
+ /* send qualified relation name */
+ logicalrep_write_namespace(out, RelationGetNamespace(rel));
+ relname = RelationGetRelationName(rel);
+ pq_sendstring(out, relname);
+
+ /* send replica identity */
+ pq_sendbyte(out, rel->rd_rel->relreplident);
+
+ /* send the attribute info */
+ logicalrep_write_attrs(out, rel);
+}
+
+/*
+ * Read the relation info from stream and return as LogicalRepRelation.
+ */
+LogicalRepRelation *
+logicalrep_read_rel(StringInfo in)
+{
+ LogicalRepRelation *rel = palloc(sizeof(LogicalRepRelation));
+
+ rel->remoteid = pq_getmsgint(in, 4);
+
+ /* Read relation name from stream */
+ rel->nspname = pstrdup(logicalrep_read_namespace(in));
+ rel->relname = pstrdup(pq_getmsgstring(in));
+
+ /* Read the replica identity. */
+ rel->replident = pq_getmsgbyte(in);
+
+ /* Get attribute description */
+ logicalrep_read_attrs(in, rel);
+
+ return rel;
+}
+
+/*
+ * Write type info to the output stream.
+ *
+ * This function will always write base type info.
+ */
+void
+logicalrep_write_typ(StringInfo out, Oid typoid)
+{
+ Oid basetypoid = getBaseType(typoid);
+ HeapTuple tup;
+ Form_pg_type typtup;
+
+ pq_sendbyte(out, 'Y'); /* sending TYPE */
+
+ tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(basetypoid));
+ if (!HeapTupleIsValid(tup))
+ elog(ERROR, "cache lookup failed for type %u", basetypoid);
+ typtup = (Form_pg_type) GETSTRUCT(tup);
+
+ /* use Oid as relation identifier */
+ pq_sendint32(out, typoid);
+
+ /* send qualified type name */
+ logicalrep_write_namespace(out, typtup->typnamespace);
+ pq_sendstring(out, NameStr(typtup->typname));
+
+ ReleaseSysCache(tup);
+}
+
+/*
+ * Read type info from the output stream.
+ */
+void
+logicalrep_read_typ(StringInfo in, LogicalRepTyp *ltyp)
+{
+ ltyp->remoteid = pq_getmsgint(in, 4);
+
+ /* Read type name from stream */
+ ltyp->nspname = pstrdup(logicalrep_read_namespace(in));
+ ltyp->typname = pstrdup(pq_getmsgstring(in));
+}
+
+/*
+ * Write a tuple to the outputstream, in the most efficient format possible.
+ */
+static void
+logicalrep_write_tuple(StringInfo out, Relation rel, HeapTuple tuple)
+{
+ TupleDesc desc;
+ Datum values[MaxTupleAttributeNumber];
+ bool isnull[MaxTupleAttributeNumber];
+ int i;
+ uint16 nliveatts = 0;
+
+ desc = RelationGetDescr(rel);
+
+ for (i = 0; i < desc->natts; i++)
+ {
+ if (TupleDescAttr(desc, i)->attisdropped || TupleDescAttr(desc, i)->attgenerated)
+ continue;
+ nliveatts++;
+ }
+ pq_sendint16(out, nliveatts);
+
+ /* try to allocate enough memory from the get-go */
+ enlargeStringInfo(out, tuple->t_len +
+ nliveatts * (1 + 4));
+
+ heap_deform_tuple(tuple, desc, values, isnull);
+
+ /* Write the values */
+ for (i = 0; i < desc->natts; i++)
+ {
+ HeapTuple typtup;
+ Form_pg_type typclass;
+ Form_pg_attribute att = TupleDescAttr(desc, i);
+ char *outputstr;
+
+ if (att->attisdropped || att->attgenerated)
+ continue;
+
+ if (isnull[i])
+ {
+ pq_sendbyte(out, 'n'); /* null column */
+ continue;
+ }
+ else if (att->attlen == -1 && VARATT_IS_EXTERNAL_ONDISK(values[i]))
+ {
+ pq_sendbyte(out, 'u'); /* unchanged toast column */
+ continue;
+ }
+
+ typtup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(att->atttypid));
+ if (!HeapTupleIsValid(typtup))
+ elog(ERROR, "cache lookup failed for type %u", att->atttypid);
+ typclass = (Form_pg_type) GETSTRUCT(typtup);
+
+ pq_sendbyte(out, 't'); /* 'text' data follows */
+
+ outputstr = OidOutputFunctionCall(typclass->typoutput, values[i]);
+ pq_sendcountedtext(out, outputstr, strlen(outputstr), false);
+ pfree(outputstr);
+
+ ReleaseSysCache(typtup);
+ }
+}
+
+/*
+ * Read tuple in remote format from stream.
+ *
+ * The returned tuple points into the input stringinfo.
+ */
+static void
+logicalrep_read_tuple(StringInfo in, LogicalRepTupleData *tuple)
+{
+ int i;
+ int natts;
+
+ /* Get number of attributes */
+ natts = pq_getmsgint(in, 2);
+
+ memset(tuple->changed, 0, sizeof(tuple->changed));
+
+ /* Read the data */
+ for (i = 0; i < natts; i++)
+ {
+ char kind;
+
+ kind = pq_getmsgbyte(in);
+
+ switch (kind)
+ {
+ case 'n': /* null */
+ tuple->values[i] = NULL;
+ tuple->changed[i] = true;
+ break;
+ case 'u': /* unchanged column */
+ /* we don't receive the value of an unchanged column */
+ tuple->values[i] = NULL;
+ break;
+ case 't': /* text formatted value */
+ {
+ int len;
+
+ tuple->changed[i] = true;
+
+ len = pq_getmsgint(in, 4); /* read length */
+
+ /* and data */
+ tuple->values[i] = palloc(len + 1);
+ pq_copymsgbytes(in, tuple->values[i], len);
+ tuple->values[i][len] = '\0';
+ }
+ break;
+ default:
+ elog(ERROR, "unrecognized data representation type '%c'", kind);
+ }
+ }
+}
+
+/*
+ * Write relation attributes to the stream.
+ */
+static void
+logicalrep_write_attrs(StringInfo out, Relation rel)
+{
+ TupleDesc desc;
+ int i;
+ uint16 nliveatts = 0;
+ Bitmapset *idattrs = NULL;
+ bool replidentfull;
+
+ desc = RelationGetDescr(rel);
+
+ /* send number of live attributes */
+ for (i = 0; i < desc->natts; i++)
+ {
+ if (TupleDescAttr(desc, i)->attisdropped || TupleDescAttr(desc, i)->attgenerated)
+ continue;
+ nliveatts++;
+ }
+ pq_sendint16(out, nliveatts);
+
+ /* fetch bitmap of REPLICATION IDENTITY attributes */
+ replidentfull = (rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL);
+ if (!replidentfull)
+ idattrs = RelationGetIndexAttrBitmap(rel,
+ INDEX_ATTR_BITMAP_IDENTITY_KEY);
+
+ /* send the attributes */
+ for (i = 0; i < desc->natts; i++)
+ {
+ Form_pg_attribute att = TupleDescAttr(desc, i);
+ uint8 flags = 0;
+
+ if (att->attisdropped || att->attgenerated)
+ continue;
+
+ /* REPLICA IDENTITY FULL means all columns are sent as part of key. */
+ if (replidentfull ||
+ bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
+ idattrs))
+ flags |= LOGICALREP_IS_REPLICA_IDENTITY;
+
+ pq_sendbyte(out, flags);
+
+ /* attribute name */
+ pq_sendstring(out, NameStr(att->attname));
+
+ /* attribute type id */
+ pq_sendint32(out, (int) att->atttypid);
+
+ /* attribute mode */
+ pq_sendint32(out, att->atttypmod);
+ }
+
+ bms_free(idattrs);
+}
+
+/*
+ * Read relation attribute names from the stream.
+ */
+static void
+logicalrep_read_attrs(StringInfo in, LogicalRepRelation *rel)
+{
+ int i;
+ int natts;
+ char **attnames;
+ Oid *atttyps;
+ Bitmapset *attkeys = NULL;
+
+ natts = pq_getmsgint(in, 2);
+ attnames = palloc(natts * sizeof(char *));
+ atttyps = palloc(natts * sizeof(Oid));
+
+ /* read the attributes */
+ for (i = 0; i < natts; i++)
+ {
+ uint8 flags;
+
+ /* Check for replica identity column */
+ flags = pq_getmsgbyte(in);
+ if (flags & LOGICALREP_IS_REPLICA_IDENTITY)
+ attkeys = bms_add_member(attkeys, i);
+
+ /* attribute name */
+ attnames[i] = pstrdup(pq_getmsgstring(in));
+
+ /* attribute type id */
+ atttyps[i] = (Oid) pq_getmsgint(in, 4);
+
+ /* we ignore attribute mode for now */
+ (void) pq_getmsgint(in, 4);
+ }
+
+ rel->attnames = attnames;
+ rel->atttyps = atttyps;
+ rel->attkeys = attkeys;
+ rel->natts = natts;
+}
+
+/*
+ * Write the namespace name or empty string for pg_catalog (to save space).
+ */
+static void
+logicalrep_write_namespace(StringInfo out, Oid nspid)
+{
+ if (nspid == PG_CATALOG_NAMESPACE)
+ pq_sendbyte(out, '\0');
+ else
+ {
+ char *nspname = get_namespace_name(nspid);
+
+ if (nspname == NULL)
+ elog(ERROR, "cache lookup failed for namespace %u",
+ nspid);
+
+ pq_sendstring(out, nspname);
+ }
+}
+
+/*
+ * Read the namespace name while treating empty string as pg_catalog.
+ */
+static const char *
+logicalrep_read_namespace(StringInfo in)
+{
+ const char *nspname = pq_getmsgstring(in);
+
+ if (nspname[0] == '\0')
+ nspname = "pg_catalog";
+
+ return nspname;
+}
diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c
new file mode 100644
index 0000000..901bff9
--- /dev/null
+++ b/src/backend/replication/logical/relation.c
@@ -0,0 +1,584 @@
+/*-------------------------------------------------------------------------
+ * relation.c
+ * PostgreSQL logical replication
+ *
+ * Copyright (c) 2016-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/logical/relation.c
+ *
+ * NOTES
+ * This file contains helper functions for logical replication relation
+ * mapping cache.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/relation.h"
+#include "access/table.h"
+#include "catalog/namespace.h"
+#include "catalog/pg_subscription_rel.h"
+#include "executor/executor.h"
+#include "nodes/makefuncs.h"
+#include "replication/logicalrelation.h"
+#include "replication/worker_internal.h"
+#include "utils/inval.h"
+
+
+static MemoryContext LogicalRepRelMapContext = NULL;
+
+static HTAB *LogicalRepRelMap = NULL;
+
+/*
+ * Partition map (LogicalRepPartMap)
+ *
+ * When a partitioned table is used as replication target, replicated
+ * operations are actually performed on its leaf partitions, which requires
+ * the partitions to also be mapped to the remote relation. Parent's entry
+ * (LogicalRepRelMapEntry) cannot be used as-is for all partitions, because
+ * individual partitions may have different attribute numbers, which means
+ * attribute mappings to remote relation's attributes must be maintained
+ * separately for each partition.
+ */
+static MemoryContext LogicalRepPartMapContext = NULL;
+static HTAB *LogicalRepPartMap = NULL;
+typedef struct LogicalRepPartMapEntry
+{
+ Oid partoid; /* LogicalRepPartMap's key */
+ LogicalRepRelMapEntry relmapentry;
+} LogicalRepPartMapEntry;
+
+/*
+ * Relcache invalidation callback for our relation map cache.
+ */
+static void
+logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid)
+{
+ LogicalRepRelMapEntry *entry;
+
+ /* Just to be sure. */
+ if (LogicalRepRelMap == NULL)
+ return;
+
+ if (reloid != InvalidOid)
+ {
+ HASH_SEQ_STATUS status;
+
+ hash_seq_init(&status, LogicalRepRelMap);
+
+ /* TODO, use inverse lookup hashtable? */
+ while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL)
+ {
+ if (entry->localreloid == reloid)
+ {
+ entry->localrelvalid = false;
+ hash_seq_term(&status);
+ break;
+ }
+ }
+ }
+ else
+ {
+ /* invalidate all cache entries */
+ HASH_SEQ_STATUS status;
+
+ hash_seq_init(&status, LogicalRepRelMap);
+
+ while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL)
+ entry->localrelvalid = false;
+ }
+}
+
+/*
+ * Initialize the relation map cache.
+ */
+static void
+logicalrep_relmap_init(void)
+{
+ HASHCTL ctl;
+
+ if (!LogicalRepRelMapContext)
+ LogicalRepRelMapContext =
+ AllocSetContextCreate(CacheMemoryContext,
+ "LogicalRepRelMapContext",
+ ALLOCSET_DEFAULT_SIZES);
+
+ /* Initialize the relation hash table. */
+ MemSet(&ctl, 0, sizeof(ctl));
+ ctl.keysize = sizeof(LogicalRepRelId);
+ ctl.entrysize = sizeof(LogicalRepRelMapEntry);
+ ctl.hcxt = LogicalRepRelMapContext;
+
+ LogicalRepRelMap = hash_create("logicalrep relation map cache", 128, &ctl,
+ HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+
+ /* Watch for invalidation events. */
+ CacheRegisterRelcacheCallback(logicalrep_relmap_invalidate_cb,
+ (Datum) 0);
+}
+
+/*
+ * Free the entry of a relation map cache.
+ */
+static void
+logicalrep_relmap_free_entry(LogicalRepRelMapEntry *entry)
+{
+ LogicalRepRelation *remoterel;
+
+ remoterel = &entry->remoterel;
+
+ pfree(remoterel->nspname);
+ pfree(remoterel->relname);
+
+ if (remoterel->natts > 0)
+ {
+ int i;
+
+ for (i = 0; i < remoterel->natts; i++)
+ pfree(remoterel->attnames[i]);
+
+ pfree(remoterel->attnames);
+ pfree(remoterel->atttyps);
+ }
+ bms_free(remoterel->attkeys);
+
+ if (entry->attrmap)
+ pfree(entry->attrmap);
+}
+
+/*
+ * Add new entry or update existing entry in the relation map cache.
+ *
+ * Called when new relation mapping is sent by the publisher to update
+ * our expected view of incoming data from said publisher.
+ */
+void
+logicalrep_relmap_update(LogicalRepRelation *remoterel)
+{
+ MemoryContext oldctx;
+ LogicalRepRelMapEntry *entry;
+ bool found;
+ int i;
+
+ if (LogicalRepRelMap == NULL)
+ logicalrep_relmap_init();
+
+ /*
+ * HASH_ENTER returns the existing entry if present or creates a new one.
+ */
+ entry = hash_search(LogicalRepRelMap, (void *) &remoterel->remoteid,
+ HASH_ENTER, &found);
+
+ if (found)
+ logicalrep_relmap_free_entry(entry);
+
+ memset(entry, 0, sizeof(LogicalRepRelMapEntry));
+
+ /* Make cached copy of the data */
+ oldctx = MemoryContextSwitchTo(LogicalRepRelMapContext);
+ entry->remoterel.remoteid = remoterel->remoteid;
+ entry->remoterel.nspname = pstrdup(remoterel->nspname);
+ entry->remoterel.relname = pstrdup(remoterel->relname);
+ entry->remoterel.natts = remoterel->natts;
+ entry->remoterel.attnames = palloc(remoterel->natts * sizeof(char *));
+ entry->remoterel.atttyps = palloc(remoterel->natts * sizeof(Oid));
+ for (i = 0; i < remoterel->natts; i++)
+ {
+ entry->remoterel.attnames[i] = pstrdup(remoterel->attnames[i]);
+ entry->remoterel.atttyps[i] = remoterel->atttyps[i];
+ }
+ entry->remoterel.replident = remoterel->replident;
+ entry->remoterel.attkeys = bms_copy(remoterel->attkeys);
+ MemoryContextSwitchTo(oldctx);
+}
+
+/*
+ * Find attribute index in TupleDesc struct by attribute name.
+ *
+ * Returns -1 if not found.
+ */
+static int
+logicalrep_rel_att_by_name(LogicalRepRelation *remoterel, const char *attname)
+{
+ int i;
+
+ for (i = 0; i < remoterel->natts; i++)
+ {
+ if (strcmp(remoterel->attnames[i], attname) == 0)
+ return i;
+ }
+
+ return -1;
+}
+
+/*
+ * Open the local relation associated with the remote one.
+ *
+ * Rebuilds the Relcache mapping if it was invalidated by local DDL.
+ */
+LogicalRepRelMapEntry *
+logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
+{
+ LogicalRepRelMapEntry *entry;
+ bool found;
+ LogicalRepRelation *remoterel;
+
+ if (LogicalRepRelMap == NULL)
+ logicalrep_relmap_init();
+
+ /* Search for existing entry. */
+ entry = hash_search(LogicalRepRelMap, (void *) &remoteid,
+ HASH_FIND, &found);
+
+ if (!found)
+ elog(ERROR, "no relation map entry for remote relation ID %u",
+ remoteid);
+
+ remoterel = &entry->remoterel;
+
+ /* Ensure we don't leak a relcache refcount. */
+ if (entry->localrel)
+ elog(ERROR, "remote relation ID %u is already open", remoteid);
+
+ /*
+ * When opening and locking a relation, pending invalidation messages are
+ * processed which can invalidate the relation. Hence, if the entry is
+ * currently considered valid, try to open the local relation by OID and
+ * see if invalidation ensues.
+ */
+ if (entry->localrelvalid)
+ {
+ entry->localrel = try_relation_open(entry->localreloid, lockmode);
+ if (!entry->localrel)
+ {
+ /* Table was renamed or dropped. */
+ entry->localrelvalid = false;
+ }
+ else if (!entry->localrelvalid)
+ {
+ /* Note we release the no-longer-useful lock here. */
+ table_close(entry->localrel, lockmode);
+ entry->localrel = NULL;
+ }
+ }
+
+ /*
+ * If the entry has been marked invalid since we last had lock on it,
+ * re-open the local relation by name and rebuild all derived data.
+ */
+ if (!entry->localrelvalid)
+ {
+ Oid relid;
+ int found;
+ Bitmapset *idkey;
+ TupleDesc desc;
+ MemoryContext oldctx;
+ int i;
+
+ /* Try to find and lock the relation by name. */
+ relid = RangeVarGetRelid(makeRangeVar(remoterel->nspname,
+ remoterel->relname, -1),
+ lockmode, true);
+ if (!OidIsValid(relid))
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("logical replication target relation \"%s.%s\" does not exist",
+ remoterel->nspname, remoterel->relname)));
+ entry->localrel = table_open(relid, NoLock);
+ entry->localreloid = relid;
+
+ /* Check for supported relkind. */
+ CheckSubscriptionRelkind(entry->localrel->rd_rel->relkind,
+ remoterel->nspname, remoterel->relname);
+
+ /*
+ * Build the mapping of local attribute numbers to remote attribute
+ * numbers and validate that we don't miss any replicated columns as
+ * that would result in potentially unwanted data loss.
+ */
+ desc = RelationGetDescr(entry->localrel);
+ oldctx = MemoryContextSwitchTo(LogicalRepRelMapContext);
+ entry->attrmap = make_attrmap(desc->natts);
+ MemoryContextSwitchTo(oldctx);
+
+ found = 0;
+ for (i = 0; i < desc->natts; i++)
+ {
+ int attnum;
+ Form_pg_attribute attr = TupleDescAttr(desc, i);
+
+ if (attr->attisdropped || attr->attgenerated)
+ {
+ entry->attrmap->attnums[i] = -1;
+ continue;
+ }
+
+ attnum = logicalrep_rel_att_by_name(remoterel,
+ NameStr(attr->attname));
+
+ entry->attrmap->attnums[i] = attnum;
+ if (attnum >= 0)
+ found++;
+ }
+
+ /* TODO, detail message with names of missing columns */
+ if (found < remoterel->natts)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("logical replication target relation \"%s.%s\" is missing "
+ "some replicated columns",
+ remoterel->nspname, remoterel->relname)));
+
+ /*
+ * Check that replica identity matches. We allow for stricter replica
+ * identity (fewer columns) on subscriber as that will not stop us
+ * from finding unique tuple. IE, if publisher has identity
+ * (id,timestamp) and subscriber just (id) this will not be a problem,
+ * but in the opposite scenario it will.
+ *
+ * Don't throw any error here just mark the relation entry as not
+ * updatable, as replica identity is only for updates and deletes but
+ * inserts can be replicated even without it.
+ */
+ entry->updatable = true;
+ idkey = RelationGetIndexAttrBitmap(entry->localrel,
+ INDEX_ATTR_BITMAP_IDENTITY_KEY);
+ /* fallback to PK if no replica identity */
+ if (idkey == NULL)
+ {
+ idkey = RelationGetIndexAttrBitmap(entry->localrel,
+ INDEX_ATTR_BITMAP_PRIMARY_KEY);
+
+ /*
+ * If no replica identity index and no PK, the published table
+ * must have replica identity FULL.
+ */
+ if (idkey == NULL && remoterel->replident != REPLICA_IDENTITY_FULL)
+ entry->updatable = false;
+ }
+
+ i = -1;
+ while ((i = bms_next_member(idkey, i)) >= 0)
+ {
+ int attnum = i + FirstLowInvalidHeapAttributeNumber;
+
+ if (!AttrNumberIsForUserDefinedAttr(attnum))
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("logical replication target relation \"%s.%s\" uses "
+ "system columns in REPLICA IDENTITY index",
+ remoterel->nspname, remoterel->relname)));
+
+ attnum = AttrNumberGetAttrOffset(attnum);
+
+ if (entry->attrmap->attnums[attnum] < 0 ||
+ !bms_is_member(entry->attrmap->attnums[attnum], remoterel->attkeys))
+ {
+ entry->updatable = false;
+ break;
+ }
+ }
+
+ entry->localrelvalid = true;
+ }
+
+ if (entry->state != SUBREL_STATE_READY)
+ entry->state = GetSubscriptionRelState(MySubscription->oid,
+ entry->localreloid,
+ &entry->statelsn,
+ true);
+
+ return entry;
+}
+
+/*
+ * Close the previously opened logical relation.
+ */
+void
+logicalrep_rel_close(LogicalRepRelMapEntry *rel, LOCKMODE lockmode)
+{
+ table_close(rel->localrel, lockmode);
+ rel->localrel = NULL;
+}
+
+/*
+ * Partition cache: look up partition LogicalRepRelMapEntry's
+ *
+ * Unlike relation map cache, this is keyed by partition OID, not remote
+ * relation OID, because we only have to use this cache in the case where
+ * partitions are not directly mapped to any remote relation, such as when
+ * replication is occurring with one of their ancestors as target.
+ */
+
+/*
+ * Relcache invalidation callback
+ */
+static void
+logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid)
+{
+ LogicalRepRelMapEntry *entry;
+
+ /* Just to be sure. */
+ if (LogicalRepPartMap == NULL)
+ return;
+
+ if (reloid != InvalidOid)
+ {
+ HASH_SEQ_STATUS status;
+
+ hash_seq_init(&status, LogicalRepPartMap);
+
+ /* TODO, use inverse lookup hashtable? */
+ while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL)
+ {
+ if (entry->localreloid == reloid)
+ {
+ entry->localrelvalid = false;
+ hash_seq_term(&status);
+ break;
+ }
+ }
+ }
+ else
+ {
+ /* invalidate all cache entries */
+ HASH_SEQ_STATUS status;
+
+ hash_seq_init(&status, LogicalRepPartMap);
+
+ while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL)
+ entry->localrelvalid = false;
+ }
+}
+
+/*
+ * Initialize the partition map cache.
+ */
+static void
+logicalrep_partmap_init(void)
+{
+ HASHCTL ctl;
+
+ if (!LogicalRepPartMapContext)
+ LogicalRepPartMapContext =
+ AllocSetContextCreate(CacheMemoryContext,
+ "LogicalRepPartMapContext",
+ ALLOCSET_DEFAULT_SIZES);
+
+ /* Initialize the relation hash table. */
+ MemSet(&ctl, 0, sizeof(ctl));
+ ctl.keysize = sizeof(Oid); /* partition OID */
+ ctl.entrysize = sizeof(LogicalRepPartMapEntry);
+ ctl.hcxt = LogicalRepPartMapContext;
+
+ LogicalRepPartMap = hash_create("logicalrep partition map cache", 64, &ctl,
+ HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+
+ /* Watch for invalidation events. */
+ CacheRegisterRelcacheCallback(logicalrep_partmap_invalidate_cb,
+ (Datum) 0);
+}
+
+/*
+ * logicalrep_partition_open
+ *
+ * Returned entry reuses most of the values of the root table's entry, save
+ * the attribute map, which can be different for the partition. However,
+ * we must physically copy all the data, in case the root table's entry
+ * gets freed/rebuilt.
+ *
+ * Note there's no logicalrep_partition_close, because the caller closes the
+ * the component relation.
+ */
+LogicalRepRelMapEntry *
+logicalrep_partition_open(LogicalRepRelMapEntry *root,
+ Relation partrel, AttrMap *map)
+{
+ LogicalRepRelMapEntry *entry;
+ LogicalRepPartMapEntry *part_entry;
+ LogicalRepRelation *remoterel = &root->remoterel;
+ Oid partOid = RelationGetRelid(partrel);
+ AttrMap *attrmap = root->attrmap;
+ bool found;
+ int i;
+ MemoryContext oldctx;
+
+ if (LogicalRepPartMap == NULL)
+ logicalrep_partmap_init();
+
+ /* Search for existing entry. */
+ part_entry = (LogicalRepPartMapEntry *) hash_search(LogicalRepPartMap,
+ (void *) &partOid,
+ HASH_ENTER, &found);
+
+ if (found)
+ return &part_entry->relmapentry;
+
+ memset(part_entry, 0, sizeof(LogicalRepPartMapEntry));
+
+ /* Switch to longer-lived context. */
+ oldctx = MemoryContextSwitchTo(LogicalRepPartMapContext);
+
+ part_entry->partoid = partOid;
+
+ /* Remote relation is copied as-is from the root entry. */
+ entry = &part_entry->relmapentry;
+ entry->remoterel.remoteid = remoterel->remoteid;
+ entry->remoterel.nspname = pstrdup(remoterel->nspname);
+ entry->remoterel.relname = pstrdup(remoterel->relname);
+ entry->remoterel.natts = remoterel->natts;
+ entry->remoterel.attnames = palloc(remoterel->natts * sizeof(char *));
+ entry->remoterel.atttyps = palloc(remoterel->natts * sizeof(Oid));
+ for (i = 0; i < remoterel->natts; i++)
+ {
+ entry->remoterel.attnames[i] = pstrdup(remoterel->attnames[i]);
+ entry->remoterel.atttyps[i] = remoterel->atttyps[i];
+ }
+ entry->remoterel.replident = remoterel->replident;
+ entry->remoterel.attkeys = bms_copy(remoterel->attkeys);
+
+ entry->localrel = partrel;
+ entry->localreloid = partOid;
+
+ /*
+ * If the partition's attributes don't match the root relation's, we'll
+ * need to make a new attrmap which maps partition attribute numbers to
+ * remoterel's, instead of the original which maps root relation's
+ * attribute numbers to remoterel's.
+ *
+ * Note that 'map' which comes from the tuple routing data structure
+ * contains 1-based attribute numbers (of the parent relation). However,
+ * the map in 'entry', a logical replication data structure, contains
+ * 0-based attribute numbers (of the remote relation).
+ */
+ if (map)
+ {
+ AttrNumber attno;
+
+ entry->attrmap = make_attrmap(map->maplen);
+ for (attno = 0; attno < entry->attrmap->maplen; attno++)
+ {
+ AttrNumber root_attno = map->attnums[attno];
+
+ entry->attrmap->attnums[attno] = attrmap->attnums[root_attno - 1];
+ }
+ }
+ else
+ {
+ /* Lacking copy_attmap, do this the hard way. */
+ entry->attrmap = make_attrmap(attrmap->maplen);
+ memcpy(entry->attrmap->attnums, attrmap->attnums,
+ attrmap->maplen * sizeof(AttrNumber));
+ }
+
+ entry->updatable = root->updatable;
+
+ entry->localrelvalid = true;
+
+ /* state and statelsn are left set to 0. */
+ MemoryContextSwitchTo(oldctx);
+
+ return entry;
+}
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
new file mode 100644
index 0000000..b904697
--- /dev/null
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -0,0 +1,3852 @@
+/*-------------------------------------------------------------------------
+ *
+ * reorderbuffer.c
+ * PostgreSQL logical replay/reorder buffer management
+ *
+ *
+ * Copyright (c) 2012-2020, PostgreSQL Global Development Group
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/replication/reorderbuffer.c
+ *
+ * NOTES
+ * This module gets handed individual pieces of transactions in the order
+ * they are written to the WAL and is responsible to reassemble them into
+ * toplevel transaction sized pieces. When a transaction is completely
+ * reassembled - signaled by reading the transaction commit record - it
+ * will then call the output plugin (cf. ReorderBufferCommit()) with the
+ * individual changes. The output plugins rely on snapshots built by
+ * snapbuild.c which hands them to us.
+ *
+ * Transactions and subtransactions/savepoints in postgres are not
+ * immediately linked to each other from outside the performing
+ * backend. Only at commit/abort (or special xact_assignment records) they
+ * are linked together. Which means that we will have to splice together a
+ * toplevel transaction from its subtransactions. To do that efficiently we
+ * build a binary heap indexed by the smallest current lsn of the individual
+ * subtransactions' changestreams. As the individual streams are inherently
+ * ordered by LSN - since that is where we build them from - the transaction
+ * can easily be reassembled by always using the subtransaction with the
+ * smallest current LSN from the heap.
+ *
+ * In order to cope with large transactions - which can be several times as
+ * big as the available memory - this module supports spooling the contents
+ * of a large transactions to disk. When the transaction is replayed the
+ * contents of individual (sub-)transactions will be read from disk in
+ * chunks.
+ *
+ * This module also has to deal with reassembling toast records from the
+ * individual chunks stored in WAL. When a new (or initial) version of a
+ * tuple is stored in WAL it will always be preceded by the toast chunks
+ * emitted for the columns stored out of line. Within a single toplevel
+ * transaction there will be no other data carrying records between a row's
+ * toast chunks and the row data itself. See ReorderBufferToast* for
+ * details.
+ *
+ * ReorderBuffer uses two special memory context types - SlabContext for
+ * allocations of fixed-length structures (changes and transactions), and
+ * GenerationContext for the variable-length transaction data (allocated
+ * and freed in groups with similar lifespan).
+ *
+ * To limit the amount of memory used by decoded changes, we track memory
+ * used at the reorder buffer level (i.e. total amount of memory), and for
+ * each transaction. When the total amount of used memory exceeds the
+ * limit, the transaction consuming the most memory is then serialized to
+ * disk.
+ *
+ * Only decoded changes are evicted from memory (spilled to disk), not the
+ * transaction records. The number of toplevel transactions is limited,
+ * but a transaction with many subtransactions may still consume significant
+ * amounts of memory. The transaction records are fairly small, though, and
+ * are not included in the memory limit.
+ *
+ * The current eviction algorithm is very simple - the transaction is
+ * picked merely by size, while it might be useful to also consider age
+ * (LSN) of the changes for example. With the new Generational memory
+ * allocator, evicting the oldest changes would make it more likely the
+ * memory gets actually freed.
+ *
+ * We still rely on max_changes_in_memory when loading serialized changes
+ * back into memory. At that point we can't use the memory limit directly
+ * as we load the subxacts independently. One option do deal with this
+ * would be to count the subxacts, and allow each to allocate 1/N of the
+ * memory limit. That however does not seem very appealing, because with
+ * many subtransactions it may easily cause trashing (short cycles of
+ * deserializing and applying very few changes). We probably should give
+ * a bit more memory to the oldest subtransactions, because it's likely
+ * the source for the next sequence of changes.
+ *
+ * -------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <unistd.h>
+#include <sys/stat.h>
+
+#include "access/detoast.h"
+#include "access/heapam.h"
+#include "access/rewriteheap.h"
+#include "access/transam.h"
+#include "access/xact.h"
+#include "access/xlog_internal.h"
+#include "catalog/catalog.h"
+#include "lib/binaryheap.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "replication/logical.h"
+#include "replication/reorderbuffer.h"
+#include "replication/slot.h"
+#include "replication/snapbuild.h" /* just for SnapBuildSnapDecRefcount */
+#include "storage/bufmgr.h"
+#include "storage/fd.h"
+#include "storage/sinval.h"
+#include "utils/builtins.h"
+#include "utils/combocid.h"
+#include "utils/memdebug.h"
+#include "utils/memutils.h"
+#include "utils/rel.h"
+#include "utils/relfilenodemap.h"
+
+
+/* entry for a hash table we use to map from xid to our transaction state */
+typedef struct ReorderBufferTXNByIdEnt
+{
+ TransactionId xid;
+ ReorderBufferTXN *txn;
+} ReorderBufferTXNByIdEnt;
+
+/* data structures for (relfilenode, ctid) => (cmin, cmax) mapping */
+typedef struct ReorderBufferTupleCidKey
+{
+ RelFileNode relnode;
+ ItemPointerData tid;
+} ReorderBufferTupleCidKey;
+
+typedef struct ReorderBufferTupleCidEnt
+{
+ ReorderBufferTupleCidKey key;
+ CommandId cmin;
+ CommandId cmax;
+ CommandId combocid; /* just for debugging */
+} ReorderBufferTupleCidEnt;
+
+/* Virtual file descriptor with file offset tracking */
+typedef struct TXNEntryFile
+{
+ File vfd; /* -1 when the file is closed */
+ off_t curOffset; /* offset for next write or read. Reset to 0
+ * when vfd is opened. */
+} TXNEntryFile;
+
+/* k-way in-order change iteration support structures */
+typedef struct ReorderBufferIterTXNEntry
+{
+ XLogRecPtr lsn;
+ ReorderBufferChange *change;
+ ReorderBufferTXN *txn;
+ TXNEntryFile file;
+ XLogSegNo segno;
+} ReorderBufferIterTXNEntry;
+
+typedef struct ReorderBufferIterTXNState
+{
+ binaryheap *heap;
+ Size nr_txns;
+ dlist_head old_change;
+ ReorderBufferIterTXNEntry entries[FLEXIBLE_ARRAY_MEMBER];
+} ReorderBufferIterTXNState;
+
+/* toast datastructures */
+typedef struct ReorderBufferToastEnt
+{
+ Oid chunk_id; /* toast_table.chunk_id */
+ int32 last_chunk_seq; /* toast_table.chunk_seq of the last chunk we
+ * have seen */
+ Size num_chunks; /* number of chunks we've already seen */
+ Size size; /* combined size of chunks seen */
+ dlist_head chunks; /* linked list of chunks */
+ struct varlena *reconstructed; /* reconstructed varlena now pointed to in
+ * main tup */
+} ReorderBufferToastEnt;
+
+/* Disk serialization support datastructures */
+typedef struct ReorderBufferDiskChange
+{
+ Size size;
+ ReorderBufferChange change;
+ /* data follows */
+} ReorderBufferDiskChange;
+
+/*
+ * Maximum number of changes kept in memory, per transaction. After that,
+ * changes are spooled to disk.
+ *
+ * The current value should be sufficient to decode the entire transaction
+ * without hitting disk in OLTP workloads, while starting to spool to disk in
+ * other workloads reasonably fast.
+ *
+ * At some point in the future it probably makes sense to have a more elaborate
+ * resource management here, but it's not entirely clear what that would look
+ * like.
+ */
+int logical_decoding_work_mem;
+static const Size max_changes_in_memory = 4096; /* XXX for restore only */
+
+/* ---------------------------------------
+ * primary reorderbuffer support routines
+ * ---------------------------------------
+ */
+static ReorderBufferTXN *ReorderBufferGetTXN(ReorderBuffer *rb);
+static void ReorderBufferReturnTXN(ReorderBuffer *rb, ReorderBufferTXN *txn);
+static ReorderBufferTXN *ReorderBufferTXNByXid(ReorderBuffer *rb,
+ TransactionId xid, bool create, bool *is_new,
+ XLogRecPtr lsn, bool create_as_top);
+static void ReorderBufferTransferSnapToParent(ReorderBufferTXN *txn,
+ ReorderBufferTXN *subtxn);
+
+static void AssertTXNLsnOrder(ReorderBuffer *rb);
+
+/* ---------------------------------------
+ * support functions for lsn-order iterating over the ->changes of a
+ * transaction and its subtransactions
+ *
+ * used for iteration over the k-way heap merge of a transaction and its
+ * subtransactions
+ * ---------------------------------------
+ */
+static void ReorderBufferIterTXNInit(ReorderBuffer *rb, ReorderBufferTXN *txn,
+ ReorderBufferIterTXNState *volatile *iter_state);
+static ReorderBufferChange *ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state);
+static void ReorderBufferIterTXNFinish(ReorderBuffer *rb,
+ ReorderBufferIterTXNState *state);
+static void ReorderBufferExecuteInvalidations(ReorderBuffer *rb, ReorderBufferTXN *txn);
+
+/*
+ * ---------------------------------------
+ * Disk serialization support functions
+ * ---------------------------------------
+ */
+static void ReorderBufferCheckMemoryLimit(ReorderBuffer *rb);
+static void ReorderBufferSerializeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn);
+static void ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
+ int fd, ReorderBufferChange *change);
+static Size ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn,
+ TXNEntryFile *file, XLogSegNo *segno);
+static void ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
+ char *change);
+static void ReorderBufferRestoreCleanup(ReorderBuffer *rb, ReorderBufferTXN *txn);
+static void ReorderBufferCleanupSerializedTXNs(const char *slotname);
+static void ReorderBufferSerializedPath(char *path, ReplicationSlot *slot,
+ TransactionId xid, XLogSegNo segno);
+
+static void ReorderBufferFreeSnap(ReorderBuffer *rb, Snapshot snap);
+static Snapshot ReorderBufferCopySnap(ReorderBuffer *rb, Snapshot orig_snap,
+ ReorderBufferTXN *txn, CommandId cid);
+
+/* ---------------------------------------
+ * toast reassembly support
+ * ---------------------------------------
+ */
+static void ReorderBufferToastInitHash(ReorderBuffer *rb, ReorderBufferTXN *txn);
+static void ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn);
+static void ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn,
+ Relation relation, ReorderBufferChange *change);
+static void ReorderBufferToastAppendChunk(ReorderBuffer *rb, ReorderBufferTXN *txn,
+ Relation relation, ReorderBufferChange *change);
+
+/*
+ * ---------------------------------------
+ * memory accounting
+ * ---------------------------------------
+ */
+static Size ReorderBufferChangeSize(ReorderBufferChange *change);
+static void ReorderBufferChangeMemoryUpdate(ReorderBuffer *rb,
+ ReorderBufferChange *change, bool addition);
+
+/*
+ * Allocate a new ReorderBuffer and clean out any old serialized state from
+ * prior ReorderBuffer instances for the same slot.
+ */
+ReorderBuffer *
+ReorderBufferAllocate(void)
+{
+ ReorderBuffer *buffer;
+ HASHCTL hash_ctl;
+ MemoryContext new_ctx;
+
+ Assert(MyReplicationSlot != NULL);
+
+ /* allocate memory in own context, to have better accountability */
+ new_ctx = AllocSetContextCreate(CurrentMemoryContext,
+ "ReorderBuffer",
+ ALLOCSET_DEFAULT_SIZES);
+
+ buffer =
+ (ReorderBuffer *) MemoryContextAlloc(new_ctx, sizeof(ReorderBuffer));
+
+ memset(&hash_ctl, 0, sizeof(hash_ctl));
+
+ buffer->context = new_ctx;
+
+ buffer->change_context = SlabContextCreate(new_ctx,
+ "Change",
+ SLAB_DEFAULT_BLOCK_SIZE,
+ sizeof(ReorderBufferChange));
+
+ buffer->txn_context = SlabContextCreate(new_ctx,
+ "TXN",
+ SLAB_DEFAULT_BLOCK_SIZE,
+ sizeof(ReorderBufferTXN));
+
+ buffer->tup_context = GenerationContextCreate(new_ctx,
+ "Tuples",
+ SLAB_LARGE_BLOCK_SIZE);
+
+ hash_ctl.keysize = sizeof(TransactionId);
+ hash_ctl.entrysize = sizeof(ReorderBufferTXNByIdEnt);
+ hash_ctl.hcxt = buffer->context;
+
+ buffer->by_txn = hash_create("ReorderBufferByXid", 1000, &hash_ctl,
+ HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+
+ buffer->by_txn_last_xid = InvalidTransactionId;
+ buffer->by_txn_last_txn = NULL;
+
+ buffer->outbuf = NULL;
+ buffer->outbufsize = 0;
+ buffer->size = 0;
+
+ buffer->current_restart_decoding_lsn = InvalidXLogRecPtr;
+
+ dlist_init(&buffer->toplevel_by_lsn);
+ dlist_init(&buffer->txns_by_base_snapshot_lsn);
+
+ /*
+ * Ensure there's no stale data from prior uses of this slot, in case some
+ * prior exit avoided calling ReorderBufferFree. Failure to do this can
+ * produce duplicated txns, and it's very cheap if there's nothing there.
+ */
+ ReorderBufferCleanupSerializedTXNs(NameStr(MyReplicationSlot->data.name));
+
+ return buffer;
+}
+
+/*
+ * Free a ReorderBuffer
+ */
+void
+ReorderBufferFree(ReorderBuffer *rb)
+{
+ MemoryContext context = rb->context;
+
+ /*
+ * We free separately allocated data by entirely scrapping reorderbuffer's
+ * memory context.
+ */
+ MemoryContextDelete(context);
+
+ /* Free disk space used by unconsumed reorder buffers */
+ ReorderBufferCleanupSerializedTXNs(NameStr(MyReplicationSlot->data.name));
+}
+
+/*
+ * Get an unused, possibly preallocated, ReorderBufferTXN.
+ */
+static ReorderBufferTXN *
+ReorderBufferGetTXN(ReorderBuffer *rb)
+{
+ ReorderBufferTXN *txn;
+
+ txn = (ReorderBufferTXN *)
+ MemoryContextAlloc(rb->txn_context, sizeof(ReorderBufferTXN));
+
+ memset(txn, 0, sizeof(ReorderBufferTXN));
+
+ dlist_init(&txn->changes);
+ dlist_init(&txn->tuplecids);
+ dlist_init(&txn->subtxns);
+
+ return txn;
+}
+
+/*
+ * Free a ReorderBufferTXN.
+ */
+static void
+ReorderBufferReturnTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
+{
+ /* clean the lookup cache if we were cached (quite likely) */
+ if (rb->by_txn_last_xid == txn->xid)
+ {
+ rb->by_txn_last_xid = InvalidTransactionId;
+ rb->by_txn_last_txn = NULL;
+ }
+
+ /* free data that's contained */
+
+ if (txn->tuplecid_hash != NULL)
+ {
+ hash_destroy(txn->tuplecid_hash);
+ txn->tuplecid_hash = NULL;
+ }
+
+ if (txn->invalidations)
+ {
+ pfree(txn->invalidations);
+ txn->invalidations = NULL;
+ }
+
+ /* Reset the toast hash */
+ ReorderBufferToastReset(rb, txn);
+
+ pfree(txn);
+}
+
+/*
+ * Get an fresh ReorderBufferChange.
+ */
+ReorderBufferChange *
+ReorderBufferGetChange(ReorderBuffer *rb)
+{
+ ReorderBufferChange *change;
+
+ change = (ReorderBufferChange *)
+ MemoryContextAlloc(rb->change_context, sizeof(ReorderBufferChange));
+
+ memset(change, 0, sizeof(ReorderBufferChange));
+ return change;
+}
+
+/*
+ * Free an ReorderBufferChange.
+ */
+void
+ReorderBufferReturnChange(ReorderBuffer *rb, ReorderBufferChange *change)
+{
+ /* update memory accounting info */
+ ReorderBufferChangeMemoryUpdate(rb, change, false);
+
+ /* free contained data */
+ switch (change->action)
+ {
+ case REORDER_BUFFER_CHANGE_INSERT:
+ case REORDER_BUFFER_CHANGE_UPDATE:
+ case REORDER_BUFFER_CHANGE_DELETE:
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
+ if (change->data.tp.newtuple)
+ {
+ ReorderBufferReturnTupleBuf(rb, change->data.tp.newtuple);
+ change->data.tp.newtuple = NULL;
+ }
+
+ if (change->data.tp.oldtuple)
+ {
+ ReorderBufferReturnTupleBuf(rb, change->data.tp.oldtuple);
+ change->data.tp.oldtuple = NULL;
+ }
+ break;
+ case REORDER_BUFFER_CHANGE_MESSAGE:
+ if (change->data.msg.prefix != NULL)
+ pfree(change->data.msg.prefix);
+ change->data.msg.prefix = NULL;
+ if (change->data.msg.message != NULL)
+ pfree(change->data.msg.message);
+ change->data.msg.message = NULL;
+ break;
+ case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT:
+ if (change->data.snapshot)
+ {
+ ReorderBufferFreeSnap(rb, change->data.snapshot);
+ change->data.snapshot = NULL;
+ }
+ break;
+ /* no data in addition to the struct itself */
+ case REORDER_BUFFER_CHANGE_TRUNCATE:
+ if (change->data.truncate.relids != NULL)
+ {
+ ReorderBufferReturnRelids(rb, change->data.truncate.relids);
+ change->data.truncate.relids = NULL;
+ }
+ break;
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_ABORT:
+ case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID:
+ case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID:
+ break;
+ }
+
+ pfree(change);
+}
+
+/*
+ * Get a fresh ReorderBufferTupleBuf fitting at least a tuple of size
+ * tuple_len (excluding header overhead).
+ */
+ReorderBufferTupleBuf *
+ReorderBufferGetTupleBuf(ReorderBuffer *rb, Size tuple_len)
+{
+ ReorderBufferTupleBuf *tuple;
+ Size alloc_len;
+
+ alloc_len = tuple_len + SizeofHeapTupleHeader;
+
+ tuple = (ReorderBufferTupleBuf *)
+ MemoryContextAlloc(rb->tup_context,
+ sizeof(ReorderBufferTupleBuf) +
+ MAXIMUM_ALIGNOF + alloc_len);
+ tuple->alloc_tuple_size = alloc_len;
+ tuple->tuple.t_data = ReorderBufferTupleBufData(tuple);
+
+ return tuple;
+}
+
+/*
+ * Free an ReorderBufferTupleBuf.
+ */
+void
+ReorderBufferReturnTupleBuf(ReorderBuffer *rb, ReorderBufferTupleBuf *tuple)
+{
+ pfree(tuple);
+}
+
+/*
+ * Get an array for relids of truncated relations.
+ *
+ * We use the global memory context (for the whole reorder buffer), because
+ * none of the existing ones seems like a good match (some are SLAB, so we
+ * can't use those, and tup_context is meant for tuple data, not relids). We
+ * could add yet another context, but it seems like an overkill - TRUNCATE is
+ * not particularly common operation, so it does not seem worth it.
+ */
+Oid *
+ReorderBufferGetRelids(ReorderBuffer *rb, int nrelids)
+{
+ Oid *relids;
+ Size alloc_len;
+
+ alloc_len = sizeof(Oid) * nrelids;
+
+ relids = (Oid *) MemoryContextAlloc(rb->context, alloc_len);
+
+ return relids;
+}
+
+/*
+ * Free an array of relids.
+ */
+void
+ReorderBufferReturnRelids(ReorderBuffer *rb, Oid *relids)
+{
+ pfree(relids);
+}
+
+/*
+ * Return the ReorderBufferTXN from the given buffer, specified by Xid.
+ * If create is true, and a transaction doesn't already exist, create it
+ * (with the given LSN, and as top transaction if that's specified);
+ * when this happens, is_new is set to true.
+ */
+static ReorderBufferTXN *
+ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create,
+ bool *is_new, XLogRecPtr lsn, bool create_as_top)
+{
+ ReorderBufferTXN *txn;
+ ReorderBufferTXNByIdEnt *ent;
+ bool found;
+
+ Assert(TransactionIdIsValid(xid));
+
+ /*
+ * Check the one-entry lookup cache first
+ */
+ if (TransactionIdIsValid(rb->by_txn_last_xid) &&
+ rb->by_txn_last_xid == xid)
+ {
+ txn = rb->by_txn_last_txn;
+
+ if (txn != NULL)
+ {
+ /* found it, and it's valid */
+ if (is_new)
+ *is_new = false;
+ return txn;
+ }
+
+ /*
+ * cached as non-existent, and asked not to create? Then nothing else
+ * to do.
+ */
+ if (!create)
+ return NULL;
+ /* otherwise fall through to create it */
+ }
+
+ /*
+ * If the cache wasn't hit or it yielded an "does-not-exist" and we want
+ * to create an entry.
+ */
+
+ /* search the lookup table */
+ ent = (ReorderBufferTXNByIdEnt *)
+ hash_search(rb->by_txn,
+ (void *) &xid,
+ create ? HASH_ENTER : HASH_FIND,
+ &found);
+ if (found)
+ txn = ent->txn;
+ else if (create)
+ {
+ /* initialize the new entry, if creation was requested */
+ Assert(ent != NULL);
+ Assert(lsn != InvalidXLogRecPtr);
+
+ ent->txn = ReorderBufferGetTXN(rb);
+ ent->txn->xid = xid;
+ txn = ent->txn;
+ txn->first_lsn = lsn;
+ txn->restart_decoding_lsn = rb->current_restart_decoding_lsn;
+
+ if (create_as_top)
+ {
+ dlist_push_tail(&rb->toplevel_by_lsn, &txn->node);
+ AssertTXNLsnOrder(rb);
+ }
+ }
+ else
+ txn = NULL; /* not found and not asked to create */
+
+ /* update cache */
+ rb->by_txn_last_xid = xid;
+ rb->by_txn_last_txn = txn;
+
+ if (is_new)
+ *is_new = !found;
+
+ Assert(!create || txn != NULL);
+ return txn;
+}
+
+/*
+ * Queue a change into a transaction so it can be replayed upon commit.
+ */
+void
+ReorderBufferQueueChange(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn,
+ ReorderBufferChange *change)
+{
+ ReorderBufferTXN *txn;
+
+ txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
+
+ change->lsn = lsn;
+ change->txn = txn;
+
+ Assert(InvalidXLogRecPtr != lsn);
+ dlist_push_tail(&txn->changes, &change->node);
+ txn->nentries++;
+ txn->nentries_mem++;
+
+ /* update memory accounting information */
+ ReorderBufferChangeMemoryUpdate(rb, change, true);
+
+ /* check the memory limits and evict something if needed */
+ ReorderBufferCheckMemoryLimit(rb);
+}
+
+/*
+ * Queue message into a transaction so it can be processed upon commit.
+ */
+void
+ReorderBufferQueueMessage(ReorderBuffer *rb, TransactionId xid,
+ Snapshot snapshot, XLogRecPtr lsn,
+ bool transactional, const char *prefix,
+ Size message_size, const char *message)
+{
+ if (transactional)
+ {
+ MemoryContext oldcontext;
+ ReorderBufferChange *change;
+
+ Assert(xid != InvalidTransactionId);
+
+ oldcontext = MemoryContextSwitchTo(rb->context);
+
+ change = ReorderBufferGetChange(rb);
+ change->action = REORDER_BUFFER_CHANGE_MESSAGE;
+ change->data.msg.prefix = pstrdup(prefix);
+ change->data.msg.message_size = message_size;
+ change->data.msg.message = palloc(message_size);
+ memcpy(change->data.msg.message, message, message_size);
+
+ ReorderBufferQueueChange(rb, xid, lsn, change);
+
+ MemoryContextSwitchTo(oldcontext);
+ }
+ else
+ {
+ ReorderBufferTXN *txn = NULL;
+ volatile Snapshot snapshot_now = snapshot;
+
+ if (xid != InvalidTransactionId)
+ txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
+
+ /* setup snapshot to allow catalog access */
+ SetupHistoricSnapshot(snapshot_now, NULL);
+ PG_TRY();
+ {
+ rb->message(rb, txn, lsn, false, prefix, message_size, message);
+
+ TeardownHistoricSnapshot(false);
+ }
+ PG_CATCH();
+ {
+ TeardownHistoricSnapshot(true);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+ }
+}
+
+/*
+ * AssertTXNLsnOrder
+ * Verify LSN ordering of transaction lists in the reorderbuffer
+ *
+ * Other LSN-related invariants are checked too.
+ *
+ * No-op if assertions are not in use.
+ */
+static void
+AssertTXNLsnOrder(ReorderBuffer *rb)
+{
+#ifdef USE_ASSERT_CHECKING
+ dlist_iter iter;
+ XLogRecPtr prev_first_lsn = InvalidXLogRecPtr;
+ XLogRecPtr prev_base_snap_lsn = InvalidXLogRecPtr;
+
+ dlist_foreach(iter, &rb->toplevel_by_lsn)
+ {
+ ReorderBufferTXN *cur_txn = dlist_container(ReorderBufferTXN, node,
+ iter.cur);
+
+ /* start LSN must be set */
+ Assert(cur_txn->first_lsn != InvalidXLogRecPtr);
+
+ /* If there is an end LSN, it must be higher than start LSN */
+ if (cur_txn->end_lsn != InvalidXLogRecPtr)
+ Assert(cur_txn->first_lsn <= cur_txn->end_lsn);
+
+ /* Current initial LSN must be strictly higher than previous */
+ if (prev_first_lsn != InvalidXLogRecPtr)
+ Assert(prev_first_lsn < cur_txn->first_lsn);
+
+ /* known-as-subtxn txns must not be listed */
+ Assert(!rbtxn_is_known_subxact(cur_txn));
+
+ prev_first_lsn = cur_txn->first_lsn;
+ }
+
+ dlist_foreach(iter, &rb->txns_by_base_snapshot_lsn)
+ {
+ ReorderBufferTXN *cur_txn = dlist_container(ReorderBufferTXN,
+ base_snapshot_node,
+ iter.cur);
+
+ /* base snapshot (and its LSN) must be set */
+ Assert(cur_txn->base_snapshot != NULL);
+ Assert(cur_txn->base_snapshot_lsn != InvalidXLogRecPtr);
+
+ /* current LSN must be strictly higher than previous */
+ if (prev_base_snap_lsn != InvalidXLogRecPtr)
+ Assert(prev_base_snap_lsn < cur_txn->base_snapshot_lsn);
+
+ /* known-as-subtxn txns must not be listed */
+ Assert(!rbtxn_is_known_subxact(cur_txn));
+
+ prev_base_snap_lsn = cur_txn->base_snapshot_lsn;
+ }
+#endif
+}
+
+/*
+ * ReorderBufferGetOldestTXN
+ * Return oldest transaction in reorderbuffer
+ */
+ReorderBufferTXN *
+ReorderBufferGetOldestTXN(ReorderBuffer *rb)
+{
+ ReorderBufferTXN *txn;
+
+ AssertTXNLsnOrder(rb);
+
+ if (dlist_is_empty(&rb->toplevel_by_lsn))
+ return NULL;
+
+ txn = dlist_head_element(ReorderBufferTXN, node, &rb->toplevel_by_lsn);
+
+ Assert(!rbtxn_is_known_subxact(txn));
+ Assert(txn->first_lsn != InvalidXLogRecPtr);
+ return txn;
+}
+
+/*
+ * ReorderBufferGetOldestXmin
+ * Return oldest Xmin in reorderbuffer
+ *
+ * Returns oldest possibly running Xid from the point of view of snapshots
+ * used in the transactions kept by reorderbuffer, or InvalidTransactionId if
+ * there are none.
+ *
+ * Since snapshots are assigned monotonically, this equals the Xmin of the
+ * base snapshot with minimal base_snapshot_lsn.
+ */
+TransactionId
+ReorderBufferGetOldestXmin(ReorderBuffer *rb)
+{
+ ReorderBufferTXN *txn;
+
+ AssertTXNLsnOrder(rb);
+
+ if (dlist_is_empty(&rb->txns_by_base_snapshot_lsn))
+ return InvalidTransactionId;
+
+ txn = dlist_head_element(ReorderBufferTXN, base_snapshot_node,
+ &rb->txns_by_base_snapshot_lsn);
+ return txn->base_snapshot->xmin;
+}
+
+void
+ReorderBufferSetRestartPoint(ReorderBuffer *rb, XLogRecPtr ptr)
+{
+ rb->current_restart_decoding_lsn = ptr;
+}
+
+/*
+ * ReorderBufferAssignChild
+ *
+ * Make note that we know that subxid is a subtransaction of xid, seen as of
+ * the given lsn.
+ */
+void
+ReorderBufferAssignChild(ReorderBuffer *rb, TransactionId xid,
+ TransactionId subxid, XLogRecPtr lsn)
+{
+ ReorderBufferTXN *txn;
+ ReorderBufferTXN *subtxn;
+ bool new_top;
+ bool new_sub;
+
+ txn = ReorderBufferTXNByXid(rb, xid, true, &new_top, lsn, true);
+ subtxn = ReorderBufferTXNByXid(rb, subxid, true, &new_sub, lsn, false);
+
+ if (!new_sub)
+ {
+ if (rbtxn_is_known_subxact(subtxn))
+ {
+ /* already associated, nothing to do */
+ return;
+ }
+ else
+ {
+ /*
+ * We already saw this transaction, but initially added it to the
+ * list of top-level txns. Now that we know it's not top-level,
+ * remove it from there.
+ */
+ dlist_delete(&subtxn->node);
+ }
+ }
+
+ subtxn->txn_flags |= RBTXN_IS_SUBXACT;
+ subtxn->toplevel_xid = xid;
+ Assert(subtxn->nsubtxns == 0);
+
+ /* add to subtransaction list */
+ dlist_push_tail(&txn->subtxns, &subtxn->node);
+ txn->nsubtxns++;
+
+ /* Possibly transfer the subtxn's snapshot to its top-level txn. */
+ ReorderBufferTransferSnapToParent(txn, subtxn);
+
+ /* Verify LSN-ordering invariant */
+ AssertTXNLsnOrder(rb);
+}
+
+/*
+ * ReorderBufferTransferSnapToParent
+ * Transfer base snapshot from subtxn to top-level txn, if needed
+ *
+ * This is done if the top-level txn doesn't have a base snapshot, or if the
+ * subtxn's base snapshot has an earlier LSN than the top-level txn's base
+ * snapshot's LSN. This can happen if there are no changes in the toplevel
+ * txn but there are some in the subtxn, or the first change in subtxn has
+ * earlier LSN than first change in the top-level txn and we learned about
+ * their kinship only now.
+ *
+ * The subtransaction's snapshot is cleared regardless of the transfer
+ * happening, since it's not needed anymore in either case.
+ *
+ * We do this as soon as we become aware of their kinship, to avoid queueing
+ * extra snapshots to txns known-as-subtxns -- only top-level txns will
+ * receive further snapshots.
+ */
+static void
+ReorderBufferTransferSnapToParent(ReorderBufferTXN *txn,
+ ReorderBufferTXN *subtxn)
+{
+ Assert(subtxn->toplevel_xid == txn->xid);
+
+ if (subtxn->base_snapshot != NULL)
+ {
+ if (txn->base_snapshot == NULL ||
+ subtxn->base_snapshot_lsn < txn->base_snapshot_lsn)
+ {
+ /*
+ * If the toplevel transaction already has a base snapshot but
+ * it's newer than the subxact's, purge it.
+ */
+ if (txn->base_snapshot != NULL)
+ {
+ SnapBuildSnapDecRefcount(txn->base_snapshot);
+ dlist_delete(&txn->base_snapshot_node);
+ }
+
+ /*
+ * The snapshot is now the top transaction's; transfer it, and
+ * adjust the list position of the top transaction in the list by
+ * moving it to where the subtransaction is.
+ */
+ txn->base_snapshot = subtxn->base_snapshot;
+ txn->base_snapshot_lsn = subtxn->base_snapshot_lsn;
+ dlist_insert_before(&subtxn->base_snapshot_node,
+ &txn->base_snapshot_node);
+
+ /*
+ * The subtransaction doesn't have a snapshot anymore (so it
+ * mustn't be in the list.)
+ */
+ subtxn->base_snapshot = NULL;
+ subtxn->base_snapshot_lsn = InvalidXLogRecPtr;
+ dlist_delete(&subtxn->base_snapshot_node);
+ }
+ else
+ {
+ /* Base snap of toplevel is fine, so subxact's is not needed */
+ SnapBuildSnapDecRefcount(subtxn->base_snapshot);
+ dlist_delete(&subtxn->base_snapshot_node);
+ subtxn->base_snapshot = NULL;
+ subtxn->base_snapshot_lsn = InvalidXLogRecPtr;
+ }
+ }
+}
+
+/*
+ * Associate a subtransaction with its toplevel transaction at commit
+ * time. There may be no further changes added after this.
+ */
+void
+ReorderBufferCommitChild(ReorderBuffer *rb, TransactionId xid,
+ TransactionId subxid, XLogRecPtr commit_lsn,
+ XLogRecPtr end_lsn)
+{
+ ReorderBufferTXN *subtxn;
+
+ subtxn = ReorderBufferTXNByXid(rb, subxid, false, NULL,
+ InvalidXLogRecPtr, false);
+
+ /*
+ * No need to do anything if that subtxn didn't contain any changes
+ */
+ if (!subtxn)
+ return;
+
+ subtxn->final_lsn = commit_lsn;
+ subtxn->end_lsn = end_lsn;
+
+ /*
+ * Assign this subxact as a child of the toplevel xact (no-op if already
+ * done.)
+ */
+ ReorderBufferAssignChild(rb, xid, subxid, InvalidXLogRecPtr);
+}
+
+
+/*
+ * Support for efficiently iterating over a transaction's and its
+ * subtransactions' changes.
+ *
+ * We do by doing a k-way merge between transactions/subtransactions. For that
+ * we model the current heads of the different transactions as a binary heap
+ * so we easily know which (sub-)transaction has the change with the smallest
+ * lsn next.
+ *
+ * We assume the changes in individual transactions are already sorted by LSN.
+ */
+
+/*
+ * Binary heap comparison function.
+ */
+static int
+ReorderBufferIterCompare(Datum a, Datum b, void *arg)
+{
+ ReorderBufferIterTXNState *state = (ReorderBufferIterTXNState *) arg;
+ XLogRecPtr pos_a = state->entries[DatumGetInt32(a)].lsn;
+ XLogRecPtr pos_b = state->entries[DatumGetInt32(b)].lsn;
+
+ if (pos_a < pos_b)
+ return 1;
+ else if (pos_a == pos_b)
+ return 0;
+ return -1;
+}
+
+/*
+ * Allocate & initialize an iterator which iterates in lsn order over a
+ * transaction and all its subtransactions.
+ *
+ * Note: The iterator state is returned through iter_state parameter rather
+ * than the function's return value. This is because the state gets cleaned up
+ * in a PG_CATCH block in the caller, so we want to make sure the caller gets
+ * back the state even if this function throws an exception.
+ */
+static void
+ReorderBufferIterTXNInit(ReorderBuffer *rb, ReorderBufferTXN *txn,
+ ReorderBufferIterTXNState *volatile *iter_state)
+{
+ Size nr_txns = 0;
+ ReorderBufferIterTXNState *state;
+ dlist_iter cur_txn_i;
+ int32 off;
+
+ *iter_state = NULL;
+
+ /*
+ * Calculate the size of our heap: one element for every transaction that
+ * contains changes. (Besides the transactions already in the reorder
+ * buffer, we count the one we were directly passed.)
+ */
+ if (txn->nentries > 0)
+ nr_txns++;
+
+ dlist_foreach(cur_txn_i, &txn->subtxns)
+ {
+ ReorderBufferTXN *cur_txn;
+
+ cur_txn = dlist_container(ReorderBufferTXN, node, cur_txn_i.cur);
+
+ if (cur_txn->nentries > 0)
+ nr_txns++;
+ }
+
+ /* allocate iteration state */
+ state = (ReorderBufferIterTXNState *)
+ MemoryContextAllocZero(rb->context,
+ sizeof(ReorderBufferIterTXNState) +
+ sizeof(ReorderBufferIterTXNEntry) * nr_txns);
+
+ state->nr_txns = nr_txns;
+ dlist_init(&state->old_change);
+
+ for (off = 0; off < state->nr_txns; off++)
+ {
+ state->entries[off].file.vfd = -1;
+ state->entries[off].segno = 0;
+ }
+
+ /* allocate heap */
+ state->heap = binaryheap_allocate(state->nr_txns,
+ ReorderBufferIterCompare,
+ state);
+
+ /* Now that the state fields are initialized, it is safe to return it. */
+ *iter_state = state;
+
+ /*
+ * Now insert items into the binary heap, in an unordered fashion. (We
+ * will run a heap assembly step at the end; this is more efficient.)
+ */
+
+ off = 0;
+
+ /* add toplevel transaction if it contains changes */
+ if (txn->nentries > 0)
+ {
+ ReorderBufferChange *cur_change;
+
+ if (rbtxn_is_serialized(txn))
+ {
+ /* serialize remaining changes */
+ ReorderBufferSerializeTXN(rb, txn);
+ ReorderBufferRestoreChanges(rb, txn, &state->entries[off].file,
+ &state->entries[off].segno);
+ }
+
+ cur_change = dlist_head_element(ReorderBufferChange, node,
+ &txn->changes);
+
+ state->entries[off].lsn = cur_change->lsn;
+ state->entries[off].change = cur_change;
+ state->entries[off].txn = txn;
+
+ binaryheap_add_unordered(state->heap, Int32GetDatum(off++));
+ }
+
+ /* add subtransactions if they contain changes */
+ dlist_foreach(cur_txn_i, &txn->subtxns)
+ {
+ ReorderBufferTXN *cur_txn;
+
+ cur_txn = dlist_container(ReorderBufferTXN, node, cur_txn_i.cur);
+
+ if (cur_txn->nentries > 0)
+ {
+ ReorderBufferChange *cur_change;
+
+ if (rbtxn_is_serialized(cur_txn))
+ {
+ /* serialize remaining changes */
+ ReorderBufferSerializeTXN(rb, cur_txn);
+ ReorderBufferRestoreChanges(rb, cur_txn,
+ &state->entries[off].file,
+ &state->entries[off].segno);
+ }
+ cur_change = dlist_head_element(ReorderBufferChange, node,
+ &cur_txn->changes);
+
+ state->entries[off].lsn = cur_change->lsn;
+ state->entries[off].change = cur_change;
+ state->entries[off].txn = cur_txn;
+
+ binaryheap_add_unordered(state->heap, Int32GetDatum(off++));
+ }
+ }
+
+ /* assemble a valid binary heap */
+ binaryheap_build(state->heap);
+}
+
+/*
+ * Return the next change when iterating over a transaction and its
+ * subtransactions.
+ *
+ * Returns NULL when no further changes exist.
+ */
+static ReorderBufferChange *
+ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state)
+{
+ ReorderBufferChange *change;
+ ReorderBufferIterTXNEntry *entry;
+ int32 off;
+
+ /* nothing there anymore */
+ if (state->heap->bh_size == 0)
+ return NULL;
+
+ off = DatumGetInt32(binaryheap_first(state->heap));
+ entry = &state->entries[off];
+
+ /* free memory we might have "leaked" in the previous *Next call */
+ if (!dlist_is_empty(&state->old_change))
+ {
+ change = dlist_container(ReorderBufferChange, node,
+ dlist_pop_head_node(&state->old_change));
+ ReorderBufferReturnChange(rb, change);
+ Assert(dlist_is_empty(&state->old_change));
+ }
+
+ change = entry->change;
+
+ /*
+ * update heap with information about which transaction has the next
+ * relevant change in LSN order
+ */
+
+ /* there are in-memory changes */
+ if (dlist_has_next(&entry->txn->changes, &entry->change->node))
+ {
+ dlist_node *next = dlist_next_node(&entry->txn->changes, &change->node);
+ ReorderBufferChange *next_change =
+ dlist_container(ReorderBufferChange, node, next);
+
+ /* txn stays the same */
+ state->entries[off].lsn = next_change->lsn;
+ state->entries[off].change = next_change;
+
+ binaryheap_replace_first(state->heap, Int32GetDatum(off));
+ return change;
+ }
+
+ /* try to load changes from disk */
+ if (entry->txn->nentries != entry->txn->nentries_mem)
+ {
+ /*
+ * Ugly: restoring changes will reuse *Change records, thus delete the
+ * current one from the per-tx list and only free in the next call.
+ */
+ dlist_delete(&change->node);
+ dlist_push_tail(&state->old_change, &change->node);
+
+ if (ReorderBufferRestoreChanges(rb, entry->txn, &entry->file,
+ &state->entries[off].segno))
+ {
+ /* successfully restored changes from disk */
+ ReorderBufferChange *next_change =
+ dlist_head_element(ReorderBufferChange, node,
+ &entry->txn->changes);
+
+ elog(DEBUG2, "restored %u/%u changes from disk",
+ (uint32) entry->txn->nentries_mem,
+ (uint32) entry->txn->nentries);
+
+ Assert(entry->txn->nentries_mem);
+ /* txn stays the same */
+ state->entries[off].lsn = next_change->lsn;
+ state->entries[off].change = next_change;
+ binaryheap_replace_first(state->heap, Int32GetDatum(off));
+
+ return change;
+ }
+ }
+
+ /* ok, no changes there anymore, remove */
+ binaryheap_remove_first(state->heap);
+
+ return change;
+}
+
+/*
+ * Deallocate the iterator
+ */
+static void
+ReorderBufferIterTXNFinish(ReorderBuffer *rb,
+ ReorderBufferIterTXNState *state)
+{
+ int32 off;
+
+ for (off = 0; off < state->nr_txns; off++)
+ {
+ if (state->entries[off].file.vfd != -1)
+ FileClose(state->entries[off].file.vfd);
+ }
+
+ /* free memory we might have "leaked" in the last *Next call */
+ if (!dlist_is_empty(&state->old_change))
+ {
+ ReorderBufferChange *change;
+
+ change = dlist_container(ReorderBufferChange, node,
+ dlist_pop_head_node(&state->old_change));
+ ReorderBufferReturnChange(rb, change);
+ Assert(dlist_is_empty(&state->old_change));
+ }
+
+ binaryheap_free(state->heap);
+ pfree(state);
+}
+
+/*
+ * Cleanup the contents of a transaction, usually after the transaction
+ * committed or aborted.
+ */
+static void
+ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
+{
+ bool found;
+ dlist_mutable_iter iter;
+
+ /* cleanup subtransactions & their changes */
+ dlist_foreach_modify(iter, &txn->subtxns)
+ {
+ ReorderBufferTXN *subtxn;
+
+ subtxn = dlist_container(ReorderBufferTXN, node, iter.cur);
+
+ /*
+ * Subtransactions are always associated to the toplevel TXN, even if
+ * they originally were happening inside another subtxn, so we won't
+ * ever recurse more than one level deep here.
+ */
+ Assert(rbtxn_is_known_subxact(subtxn));
+ Assert(subtxn->nsubtxns == 0);
+
+ ReorderBufferCleanupTXN(rb, subtxn);
+ }
+
+ /* cleanup changes in the toplevel txn */
+ dlist_foreach_modify(iter, &txn->changes)
+ {
+ ReorderBufferChange *change;
+
+ change = dlist_container(ReorderBufferChange, node, iter.cur);
+
+ /* Check we're not mixing changes from different transactions. */
+ Assert(change->txn == txn);
+
+ ReorderBufferReturnChange(rb, change);
+ }
+
+ /*
+ * Cleanup the tuplecids we stored for decoding catalog snapshot access.
+ * They are always stored in the toplevel transaction.
+ */
+ dlist_foreach_modify(iter, &txn->tuplecids)
+ {
+ ReorderBufferChange *change;
+
+ change = dlist_container(ReorderBufferChange, node, iter.cur);
+
+ /* Check we're not mixing changes from different transactions. */
+ Assert(change->txn == txn);
+ Assert(change->action == REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID);
+
+ ReorderBufferReturnChange(rb, change);
+ }
+
+ /*
+ * Cleanup the base snapshot, if set.
+ */
+ if (txn->base_snapshot != NULL)
+ {
+ SnapBuildSnapDecRefcount(txn->base_snapshot);
+ dlist_delete(&txn->base_snapshot_node);
+ }
+
+ /*
+ * Remove TXN from its containing list.
+ *
+ * Note: if txn is known as subxact, we are deleting the TXN from its
+ * parent's list of known subxacts; this leaves the parent's nsubxacts
+ * count too high, but we don't care. Otherwise, we are deleting the TXN
+ * from the LSN-ordered list of toplevel TXNs.
+ */
+ dlist_delete(&txn->node);
+
+ /* now remove reference from buffer */
+ hash_search(rb->by_txn,
+ (void *) &txn->xid,
+ HASH_REMOVE,
+ &found);
+ Assert(found);
+
+ /* remove entries spilled to disk */
+ if (rbtxn_is_serialized(txn))
+ ReorderBufferRestoreCleanup(rb, txn);
+
+ /* deallocate */
+ ReorderBufferReturnTXN(rb, txn);
+}
+
+/*
+ * Build a hash with a (relfilenode, ctid) -> (cmin, cmax) mapping for use by
+ * HeapTupleSatisfiesHistoricMVCC.
+ */
+static void
+ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
+{
+ dlist_iter iter;
+ HASHCTL hash_ctl;
+
+ if (!rbtxn_has_catalog_changes(txn) || dlist_is_empty(&txn->tuplecids))
+ return;
+
+ memset(&hash_ctl, 0, sizeof(hash_ctl));
+
+ hash_ctl.keysize = sizeof(ReorderBufferTupleCidKey);
+ hash_ctl.entrysize = sizeof(ReorderBufferTupleCidEnt);
+ hash_ctl.hcxt = rb->context;
+
+ /*
+ * create the hash with the exact number of to-be-stored tuplecids from
+ * the start
+ */
+ txn->tuplecid_hash =
+ hash_create("ReorderBufferTupleCid", txn->ntuplecids, &hash_ctl,
+ HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+
+ dlist_foreach(iter, &txn->tuplecids)
+ {
+ ReorderBufferTupleCidKey key;
+ ReorderBufferTupleCidEnt *ent;
+ bool found;
+ ReorderBufferChange *change;
+
+ change = dlist_container(ReorderBufferChange, node, iter.cur);
+
+ Assert(change->action == REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID);
+
+ /* be careful about padding */
+ memset(&key, 0, sizeof(ReorderBufferTupleCidKey));
+
+ key.relnode = change->data.tuplecid.node;
+
+ ItemPointerCopy(&change->data.tuplecid.tid,
+ &key.tid);
+
+ ent = (ReorderBufferTupleCidEnt *)
+ hash_search(txn->tuplecid_hash,
+ (void *) &key,
+ HASH_ENTER,
+ &found);
+ if (!found)
+ {
+ ent->cmin = change->data.tuplecid.cmin;
+ ent->cmax = change->data.tuplecid.cmax;
+ ent->combocid = change->data.tuplecid.combocid;
+ }
+ else
+ {
+ /*
+ * Maybe we already saw this tuple before in this transaction, but
+ * if so it must have the same cmin.
+ */
+ Assert(ent->cmin == change->data.tuplecid.cmin);
+
+ /*
+ * cmax may be initially invalid, but once set it can only grow,
+ * and never become invalid again.
+ */
+ Assert((ent->cmax == InvalidCommandId) ||
+ ((change->data.tuplecid.cmax != InvalidCommandId) &&
+ (change->data.tuplecid.cmax > ent->cmax)));
+ ent->cmax = change->data.tuplecid.cmax;
+ }
+ }
+}
+
+/*
+ * Copy a provided snapshot so we can modify it privately. This is needed so
+ * that catalog modifying transactions can look into intermediate catalog
+ * states.
+ */
+static Snapshot
+ReorderBufferCopySnap(ReorderBuffer *rb, Snapshot orig_snap,
+ ReorderBufferTXN *txn, CommandId cid)
+{
+ Snapshot snap;
+ dlist_iter iter;
+ int i = 0;
+ Size size;
+
+ size = sizeof(SnapshotData) +
+ sizeof(TransactionId) * orig_snap->xcnt +
+ sizeof(TransactionId) * (txn->nsubtxns + 1);
+
+ snap = MemoryContextAllocZero(rb->context, size);
+ memcpy(snap, orig_snap, sizeof(SnapshotData));
+
+ snap->copied = true;
+ snap->active_count = 1; /* mark as active so nobody frees it */
+ snap->regd_count = 0;
+ snap->xip = (TransactionId *) (snap + 1);
+
+ memcpy(snap->xip, orig_snap->xip, sizeof(TransactionId) * snap->xcnt);
+
+ /*
+ * snap->subxip contains all txids that belong to our transaction which we
+ * need to check via cmin/cmax. That's why we store the toplevel
+ * transaction in there as well.
+ */
+ snap->subxip = snap->xip + snap->xcnt;
+ snap->subxip[i++] = txn->xid;
+
+ /*
+ * subxcnt isn't decreased when subtransactions abort, so count manually.
+ * Since it's an upper boundary it is safe to use it for the allocation
+ * above.
+ */
+ snap->subxcnt = 1;
+
+ dlist_foreach(iter, &txn->subtxns)
+ {
+ ReorderBufferTXN *sub_txn;
+
+ sub_txn = dlist_container(ReorderBufferTXN, node, iter.cur);
+ snap->subxip[i++] = sub_txn->xid;
+ snap->subxcnt++;
+ }
+
+ /* sort so we can bsearch() later */
+ qsort(snap->subxip, snap->subxcnt, sizeof(TransactionId), xidComparator);
+
+ /* store the specified current CommandId */
+ snap->curcid = cid;
+
+ return snap;
+}
+
+/*
+ * Free a previously ReorderBufferCopySnap'ed snapshot
+ */
+static void
+ReorderBufferFreeSnap(ReorderBuffer *rb, Snapshot snap)
+{
+ if (snap->copied)
+ pfree(snap);
+ else
+ SnapBuildSnapDecRefcount(snap);
+}
+
+/*
+ * Perform the replay of a transaction and its non-aborted subtransactions.
+ *
+ * Subtransactions previously have to be processed by
+ * ReorderBufferCommitChild(), even if previously assigned to the toplevel
+ * transaction with ReorderBufferAssignChild.
+ *
+ * We currently can only decode a transaction's contents when its commit
+ * record is read because that's the only place where we know about cache
+ * invalidations. Thus, once a toplevel commit is read, we iterate over the top
+ * and subtransactions (using a k-way merge) and replay the changes in lsn
+ * order.
+ */
+void
+ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
+ XLogRecPtr commit_lsn, XLogRecPtr end_lsn,
+ TimestampTz commit_time,
+ RepOriginId origin_id, XLogRecPtr origin_lsn)
+{
+ ReorderBufferTXN *txn;
+ volatile Snapshot snapshot_now;
+ volatile CommandId command_id = FirstCommandId;
+ bool using_subtxn;
+ ReorderBufferIterTXNState *volatile iterstate = NULL;
+
+ txn = ReorderBufferTXNByXid(rb, xid, false, NULL, InvalidXLogRecPtr,
+ false);
+
+ /* unknown transaction, nothing to replay */
+ if (txn == NULL)
+ return;
+
+ txn->final_lsn = commit_lsn;
+ txn->end_lsn = end_lsn;
+ txn->commit_time = commit_time;
+ txn->origin_id = origin_id;
+ txn->origin_lsn = origin_lsn;
+
+ /*
+ * If this transaction has no snapshot, it didn't make any changes to the
+ * database, so there's nothing to decode. Note that
+ * ReorderBufferCommitChild will have transferred any snapshots from
+ * subtransactions if there were any.
+ */
+ if (txn->base_snapshot == NULL)
+ {
+ Assert(txn->ninvalidations == 0);
+ ReorderBufferCleanupTXN(rb, txn);
+ return;
+ }
+
+ snapshot_now = txn->base_snapshot;
+
+ /* build data to be able to lookup the CommandIds of catalog tuples */
+ ReorderBufferBuildTupleCidHash(rb, txn);
+
+ /* setup the initial snapshot */
+ SetupHistoricSnapshot(snapshot_now, txn->tuplecid_hash);
+
+ /*
+ * Decoding needs access to syscaches et al., which in turn use
+ * heavyweight locks and such. Thus we need to have enough state around to
+ * keep track of those. The easiest way is to simply use a transaction
+ * internally. That also allows us to easily enforce that nothing writes
+ * to the database by checking for xid assignments.
+ *
+ * When we're called via the SQL SRF there's already a transaction
+ * started, so start an explicit subtransaction there.
+ */
+ using_subtxn = IsTransactionOrTransactionBlock();
+
+ PG_TRY();
+ {
+ ReorderBufferChange *change;
+ ReorderBufferChange *specinsert = NULL;
+
+ if (using_subtxn)
+ BeginInternalSubTransaction("replay");
+ else
+ StartTransactionCommand();
+
+ rb->begin(rb, txn);
+
+ ReorderBufferIterTXNInit(rb, txn, &iterstate);
+ while ((change = ReorderBufferIterTXNNext(rb, iterstate)) != NULL)
+ {
+ Relation relation = NULL;
+ Oid reloid;
+
+ switch (change->action)
+ {
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
+
+ /*
+ * Confirmation for speculative insertion arrived. Simply
+ * use as a normal record. It'll be cleaned up at the end
+ * of INSERT processing.
+ */
+ if (specinsert == NULL)
+ elog(ERROR, "invalid ordering of speculative insertion changes");
+ Assert(specinsert->data.tp.oldtuple == NULL);
+ change = specinsert;
+ change->action = REORDER_BUFFER_CHANGE_INSERT;
+
+ /* intentionally fall through */
+ case REORDER_BUFFER_CHANGE_INSERT:
+ case REORDER_BUFFER_CHANGE_UPDATE:
+ case REORDER_BUFFER_CHANGE_DELETE:
+ Assert(snapshot_now);
+
+ reloid = RelidByRelfilenode(change->data.tp.relnode.spcNode,
+ change->data.tp.relnode.relNode);
+
+ /*
+ * Mapped catalog tuple without data, emitted while
+ * catalog table was in the process of being rewritten. We
+ * can fail to look up the relfilenode, because the
+ * relmapper has no "historic" view, in contrast to normal
+ * the normal catalog during decoding. Thus repeated
+ * rewrites can cause a lookup failure. That's OK because
+ * we do not decode catalog changes anyway. Normally such
+ * tuples would be skipped over below, but we can't
+ * identify whether the table should be logically logged
+ * without mapping the relfilenode to the oid.
+ */
+ if (reloid == InvalidOid &&
+ change->data.tp.newtuple == NULL &&
+ change->data.tp.oldtuple == NULL)
+ goto change_done;
+ else if (reloid == InvalidOid)
+ elog(ERROR, "could not map filenode \"%s\" to relation OID",
+ relpathperm(change->data.tp.relnode,
+ MAIN_FORKNUM));
+
+ relation = RelationIdGetRelation(reloid);
+
+ if (!RelationIsValid(relation))
+ elog(ERROR, "could not open relation with OID %u (for filenode \"%s\")",
+ reloid,
+ relpathperm(change->data.tp.relnode,
+ MAIN_FORKNUM));
+
+ if (!RelationIsLogicallyLogged(relation))
+ goto change_done;
+
+ /*
+ * Ignore temporary heaps created during DDL unless the
+ * plugin has asked for them.
+ */
+ if (relation->rd_rel->relrewrite && !rb->output_rewrites)
+ goto change_done;
+
+ /*
+ * For now ignore sequence changes entirely. Most of the
+ * time they don't log changes using records we
+ * understand, so it doesn't make sense to handle the few
+ * cases we do.
+ */
+ if (relation->rd_rel->relkind == RELKIND_SEQUENCE)
+ goto change_done;
+
+ /* user-triggered change */
+ if (!IsToastRelation(relation))
+ {
+ ReorderBufferToastReplace(rb, txn, relation, change);
+ rb->apply_change(rb, txn, relation, change);
+
+ /*
+ * Only clear reassembled toast chunks if we're sure
+ * they're not required anymore. The creator of the
+ * tuple tells us.
+ */
+ if (change->data.tp.clear_toast_afterwards)
+ ReorderBufferToastReset(rb, txn);
+ }
+ /* we're not interested in toast deletions */
+ else if (change->action == REORDER_BUFFER_CHANGE_INSERT)
+ {
+ /*
+ * Need to reassemble the full toasted Datum in
+ * memory, to ensure the chunks don't get reused till
+ * we're done remove it from the list of this
+ * transaction's changes. Otherwise it will get
+ * freed/reused while restoring spooled data from
+ * disk.
+ */
+ Assert(change->data.tp.newtuple != NULL);
+
+ dlist_delete(&change->node);
+ ReorderBufferToastAppendChunk(rb, txn, relation,
+ change);
+ }
+
+ change_done:
+
+ /*
+ * If speculative insertion was confirmed, the record isn't
+ * needed anymore.
+ */
+ if (specinsert != NULL)
+ {
+ ReorderBufferReturnChange(rb, specinsert);
+ specinsert = NULL;
+ }
+
+ if (relation != NULL)
+ {
+ RelationClose(relation);
+ relation = NULL;
+ }
+ break;
+
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
+
+ /*
+ * Speculative insertions are dealt with by delaying the
+ * processing of the insert until the confirmation record
+ * arrives. For that we simply unlink the record from the
+ * chain, so it does not get freed/reused while restoring
+ * spooled data from disk.
+ *
+ * This is safe in the face of concurrent catalog changes
+ * because the relevant relation can't be changed between
+ * speculative insertion and confirmation due to
+ * CheckTableNotInUse() and locking.
+ */
+
+ /* clear out a pending (and thus failed) speculation */
+ if (specinsert != NULL)
+ {
+ ReorderBufferReturnChange(rb, specinsert);
+ specinsert = NULL;
+ }
+
+ /* and memorize the pending insertion */
+ dlist_delete(&change->node);
+ specinsert = change;
+ break;
+
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_ABORT:
+
+ /*
+ * Abort for speculative insertion arrived. So cleanup the
+ * specinsert tuple and toast hash.
+ *
+ * Note that we get the spec abort change for each toast
+ * entry but we need to perform the cleanup only the first
+ * time we get it for the main table.
+ */
+ if (specinsert != NULL)
+ {
+ /*
+ * We must clean the toast hash before processing a
+ * completely new tuple to avoid confusion about the
+ * previous tuple's toast chunks.
+ */
+ Assert(change->data.tp.clear_toast_afterwards);
+ ReorderBufferToastReset(rb, txn);
+
+ /* We don't need this record anymore. */
+ ReorderBufferReturnChange(rb, specinsert);
+ specinsert = NULL;
+ }
+ break;
+
+ case REORDER_BUFFER_CHANGE_TRUNCATE:
+ {
+ int i;
+ int nrelids = change->data.truncate.nrelids;
+ int nrelations = 0;
+ Relation *relations;
+
+ relations = palloc0(nrelids * sizeof(Relation));
+ for (i = 0; i < nrelids; i++)
+ {
+ Oid relid = change->data.truncate.relids[i];
+ Relation relation;
+
+ relation = RelationIdGetRelation(relid);
+
+ if (!RelationIsValid(relation))
+ elog(ERROR, "could not open relation with OID %u", relid);
+
+ if (!RelationIsLogicallyLogged(relation))
+ continue;
+
+ relations[nrelations++] = relation;
+ }
+
+ rb->apply_truncate(rb, txn, nrelations, relations, change);
+
+ for (i = 0; i < nrelations; i++)
+ RelationClose(relations[i]);
+
+ break;
+ }
+
+ case REORDER_BUFFER_CHANGE_MESSAGE:
+ rb->message(rb, txn, change->lsn, true,
+ change->data.msg.prefix,
+ change->data.msg.message_size,
+ change->data.msg.message);
+ break;
+
+ case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT:
+ /* get rid of the old */
+ TeardownHistoricSnapshot(false);
+
+ if (snapshot_now->copied)
+ {
+ ReorderBufferFreeSnap(rb, snapshot_now);
+ snapshot_now =
+ ReorderBufferCopySnap(rb, change->data.snapshot,
+ txn, command_id);
+ }
+
+ /*
+ * Restored from disk, need to be careful not to double
+ * free. We could introduce refcounting for that, but for
+ * now this seems infrequent enough not to care.
+ */
+ else if (change->data.snapshot->copied)
+ {
+ snapshot_now =
+ ReorderBufferCopySnap(rb, change->data.snapshot,
+ txn, command_id);
+ }
+ else
+ {
+ snapshot_now = change->data.snapshot;
+ }
+
+
+ /* and continue with the new one */
+ SetupHistoricSnapshot(snapshot_now, txn->tuplecid_hash);
+ break;
+
+ case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID:
+ Assert(change->data.command_id != InvalidCommandId);
+
+ if (command_id < change->data.command_id)
+ {
+ command_id = change->data.command_id;
+
+ if (!snapshot_now->copied)
+ {
+ /* we don't use the global one anymore */
+ snapshot_now = ReorderBufferCopySnap(rb, snapshot_now,
+ txn, command_id);
+ }
+
+ snapshot_now->curcid = command_id;
+
+ TeardownHistoricSnapshot(false);
+ SetupHistoricSnapshot(snapshot_now, txn->tuplecid_hash);
+
+ /*
+ * Every time the CommandId is incremented, we could
+ * see new catalog contents, so execute all
+ * invalidations.
+ */
+ ReorderBufferExecuteInvalidations(rb, txn);
+ }
+
+ break;
+
+ case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID:
+ elog(ERROR, "tuplecid value in changequeue");
+ break;
+ }
+ }
+
+ /* speculative insertion record must be freed by now */
+ Assert(!specinsert);
+
+ /* clean up the iterator */
+ ReorderBufferIterTXNFinish(rb, iterstate);
+ iterstate = NULL;
+
+ /* call commit callback */
+ rb->commit(rb, txn, commit_lsn);
+
+ /* this is just a sanity check against bad output plugin behaviour */
+ if (GetCurrentTransactionIdIfAny() != InvalidTransactionId)
+ elog(ERROR, "output plugin used XID %u",
+ GetCurrentTransactionId());
+
+ /* cleanup */
+ TeardownHistoricSnapshot(false);
+
+ /*
+ * Aborting the current (sub-)transaction as a whole has the right
+ * semantics. We want all locks acquired in here to be released, not
+ * reassigned to the parent and we do not want any database access
+ * have persistent effects.
+ */
+ AbortCurrentTransaction();
+
+ /* make sure there's no cache pollution */
+ ReorderBufferExecuteInvalidations(rb, txn);
+
+ if (using_subtxn)
+ RollbackAndReleaseCurrentSubTransaction();
+
+ if (snapshot_now->copied)
+ ReorderBufferFreeSnap(rb, snapshot_now);
+
+ /* remove potential on-disk data, and deallocate */
+ ReorderBufferCleanupTXN(rb, txn);
+ }
+ PG_CATCH();
+ {
+ /* TODO: Encapsulate cleanup from the PG_TRY and PG_CATCH blocks */
+ if (iterstate)
+ ReorderBufferIterTXNFinish(rb, iterstate);
+
+ TeardownHistoricSnapshot(true);
+
+ /*
+ * Force cache invalidation to happen outside of a valid transaction
+ * to prevent catalog access as we just caught an error.
+ */
+ AbortCurrentTransaction();
+
+ /* make sure there's no cache pollution */
+ ReorderBufferExecuteInvalidations(rb, txn);
+
+ if (using_subtxn)
+ RollbackAndReleaseCurrentSubTransaction();
+
+ if (snapshot_now->copied)
+ ReorderBufferFreeSnap(rb, snapshot_now);
+
+ /* remove potential on-disk data, and deallocate */
+ ReorderBufferCleanupTXN(rb, txn);
+
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+}
+
+/*
+ * Abort a transaction that possibly has previous changes. Needs to be first
+ * called for subtransactions and then for the toplevel xid.
+ *
+ * NB: Transactions handled here have to have actively aborted (i.e. have
+ * produced an abort record). Implicitly aborted transactions are handled via
+ * ReorderBufferAbortOld(); transactions we're just not interested in, but
+ * which have committed are handled in ReorderBufferForget().
+ *
+ * This function purges this transaction and its contents from memory and
+ * disk.
+ */
+void
+ReorderBufferAbort(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn)
+{
+ ReorderBufferTXN *txn;
+
+ txn = ReorderBufferTXNByXid(rb, xid, false, NULL, InvalidXLogRecPtr,
+ false);
+
+ /* unknown, nothing to remove */
+ if (txn == NULL)
+ return;
+
+ /* cosmetic... */
+ txn->final_lsn = lsn;
+
+ /* remove potential on-disk data, and deallocate */
+ ReorderBufferCleanupTXN(rb, txn);
+}
+
+/*
+ * Abort all transactions that aren't actually running anymore because the
+ * server restarted.
+ *
+ * NB: These really have to be transactions that have aborted due to a server
+ * crash/immediate restart, as we don't deal with invalidations here.
+ */
+void
+ReorderBufferAbortOld(ReorderBuffer *rb, TransactionId oldestRunningXid)
+{
+ dlist_mutable_iter it;
+
+ /*
+ * Iterate through all (potential) toplevel TXNs and abort all that are
+ * older than what possibly can be running. Once we've found the first
+ * that is alive we stop, there might be some that acquired an xid earlier
+ * but started writing later, but it's unlikely and they will be cleaned
+ * up in a later call to this function.
+ */
+ dlist_foreach_modify(it, &rb->toplevel_by_lsn)
+ {
+ ReorderBufferTXN *txn;
+
+ txn = dlist_container(ReorderBufferTXN, node, it.cur);
+
+ if (TransactionIdPrecedes(txn->xid, oldestRunningXid))
+ {
+ elog(DEBUG2, "aborting old transaction %u", txn->xid);
+
+ /* remove potential on-disk data, and deallocate this tx */
+ ReorderBufferCleanupTXN(rb, txn);
+ }
+ else
+ return;
+ }
+}
+
+/*
+ * Forget the contents of a transaction if we aren't interested in its
+ * contents. Needs to be first called for subtransactions and then for the
+ * toplevel xid.
+ *
+ * This is significantly different to ReorderBufferAbort() because
+ * transactions that have committed need to be treated differently from aborted
+ * ones since they may have modified the catalog.
+ *
+ * Note that this is only allowed to be called in the moment a transaction
+ * commit has just been read, not earlier; otherwise later records referring
+ * to this xid might re-create the transaction incompletely.
+ */
+void
+ReorderBufferForget(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn)
+{
+ ReorderBufferTXN *txn;
+
+ txn = ReorderBufferTXNByXid(rb, xid, false, NULL, InvalidXLogRecPtr,
+ false);
+
+ /* unknown, nothing to forget */
+ if (txn == NULL)
+ return;
+
+ /* cosmetic... */
+ txn->final_lsn = lsn;
+
+ /*
+ * Process cache invalidation messages if there are any. Even if we're not
+ * interested in the transaction's contents, it could have manipulated the
+ * catalog and we need to update the caches according to that.
+ */
+ if (txn->base_snapshot != NULL && txn->ninvalidations > 0)
+ ReorderBufferImmediateInvalidation(rb, txn->ninvalidations,
+ txn->invalidations);
+ else
+ Assert(txn->ninvalidations == 0);
+
+ /* remove potential on-disk data, and deallocate */
+ ReorderBufferCleanupTXN(rb, txn);
+}
+
+/*
+ * Execute invalidations happening outside the context of a decoded
+ * transaction. That currently happens either for xid-less commits
+ * (cf. RecordTransactionCommit()) or for invalidations in uninteresting
+ * transactions (via ReorderBufferForget()).
+ */
+void
+ReorderBufferImmediateInvalidation(ReorderBuffer *rb, uint32 ninvalidations,
+ SharedInvalidationMessage *invalidations)
+{
+ bool use_subtxn = IsTransactionOrTransactionBlock();
+ int i;
+
+ if (use_subtxn)
+ BeginInternalSubTransaction("replay");
+
+ /*
+ * Force invalidations to happen outside of a valid transaction - that way
+ * entries will just be marked as invalid without accessing the catalog.
+ * That's advantageous because we don't need to setup the full state
+ * necessary for catalog access.
+ */
+ if (use_subtxn)
+ AbortCurrentTransaction();
+
+ for (i = 0; i < ninvalidations; i++)
+ LocalExecuteInvalidationMessage(&invalidations[i]);
+
+ if (use_subtxn)
+ RollbackAndReleaseCurrentSubTransaction();
+}
+
+/*
+ * Tell reorderbuffer about an xid seen in the WAL stream. Has to be called at
+ * least once for every xid in XLogRecord->xl_xid (other places in records
+ * may, but do not have to be passed through here).
+ *
+ * Reorderbuffer keeps some datastructures about transactions in LSN order,
+ * for efficiency. To do that it has to know about when transactions are seen
+ * first in the WAL. As many types of records are not actually interesting for
+ * logical decoding, they do not necessarily pass though here.
+ */
+void
+ReorderBufferProcessXid(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn)
+{
+ /* many records won't have an xid assigned, centralize check here */
+ if (xid != InvalidTransactionId)
+ ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
+}
+
+/*
+ * Add a new snapshot to this transaction that may only used after lsn 'lsn'
+ * because the previous snapshot doesn't describe the catalog correctly for
+ * following rows.
+ */
+void
+ReorderBufferAddSnapshot(ReorderBuffer *rb, TransactionId xid,
+ XLogRecPtr lsn, Snapshot snap)
+{
+ ReorderBufferChange *change = ReorderBufferGetChange(rb);
+
+ change->data.snapshot = snap;
+ change->action = REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT;
+
+ ReorderBufferQueueChange(rb, xid, lsn, change);
+}
+
+/*
+ * Set up the transaction's base snapshot.
+ *
+ * If we know that xid is a subtransaction, set the base snapshot on the
+ * top-level transaction instead.
+ */
+void
+ReorderBufferSetBaseSnapshot(ReorderBuffer *rb, TransactionId xid,
+ XLogRecPtr lsn, Snapshot snap)
+{
+ ReorderBufferTXN *txn;
+ bool is_new;
+
+ AssertArg(snap != NULL);
+
+ /*
+ * Fetch the transaction to operate on. If we know it's a subtransaction,
+ * operate on its top-level transaction instead.
+ */
+ txn = ReorderBufferTXNByXid(rb, xid, true, &is_new, lsn, true);
+ if (rbtxn_is_known_subxact(txn))
+ txn = ReorderBufferTXNByXid(rb, txn->toplevel_xid, false,
+ NULL, InvalidXLogRecPtr, false);
+ Assert(txn->base_snapshot == NULL);
+
+ txn->base_snapshot = snap;
+ txn->base_snapshot_lsn = lsn;
+ dlist_push_tail(&rb->txns_by_base_snapshot_lsn, &txn->base_snapshot_node);
+
+ AssertTXNLsnOrder(rb);
+}
+
+/*
+ * Access the catalog with this CommandId at this point in the changestream.
+ *
+ * May only be called for command ids > 1
+ */
+void
+ReorderBufferAddNewCommandId(ReorderBuffer *rb, TransactionId xid,
+ XLogRecPtr lsn, CommandId cid)
+{
+ ReorderBufferChange *change = ReorderBufferGetChange(rb);
+
+ change->data.command_id = cid;
+ change->action = REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID;
+
+ ReorderBufferQueueChange(rb, xid, lsn, change);
+}
+
+/*
+ * Update the memory accounting info. We track memory used by the whole
+ * reorder buffer and the transaction containing the change.
+ */
+static void
+ReorderBufferChangeMemoryUpdate(ReorderBuffer *rb,
+ ReorderBufferChange *change,
+ bool addition)
+{
+ Size sz;
+
+ Assert(change->txn);
+
+ /*
+ * Ignore tuple CID changes, because those are not evicted when reaching
+ * memory limit. So we just don't count them, because it might easily
+ * trigger a pointless attempt to spill.
+ */
+ if (change->action == REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID)
+ return;
+
+ sz = ReorderBufferChangeSize(change);
+
+ if (addition)
+ {
+ change->txn->size += sz;
+ rb->size += sz;
+ }
+ else
+ {
+ Assert((rb->size >= sz) && (change->txn->size >= sz));
+ change->txn->size -= sz;
+ rb->size -= sz;
+ }
+}
+
+/*
+ * Add new (relfilenode, tid) -> (cmin, cmax) mappings.
+ *
+ * We do not include this change type in memory accounting, because we
+ * keep CIDs in a separate list and do not evict them when reaching
+ * the memory limit.
+ */
+void
+ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid,
+ XLogRecPtr lsn, RelFileNode node,
+ ItemPointerData tid, CommandId cmin,
+ CommandId cmax, CommandId combocid)
+{
+ ReorderBufferChange *change = ReorderBufferGetChange(rb);
+ ReorderBufferTXN *txn;
+
+ txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
+
+ change->data.tuplecid.node = node;
+ change->data.tuplecid.tid = tid;
+ change->data.tuplecid.cmin = cmin;
+ change->data.tuplecid.cmax = cmax;
+ change->data.tuplecid.combocid = combocid;
+ change->lsn = lsn;
+ change->txn = txn;
+ change->action = REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID;
+
+ dlist_push_tail(&txn->tuplecids, &change->node);
+ txn->ntuplecids++;
+}
+
+/*
+ * Setup the invalidation of the toplevel transaction.
+ *
+ * This needs to be done before ReorderBufferCommit is called!
+ */
+void
+ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid,
+ XLogRecPtr lsn, Size nmsgs,
+ SharedInvalidationMessage *msgs)
+{
+ ReorderBufferTXN *txn;
+
+ txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
+
+ if (txn->ninvalidations != 0)
+ elog(ERROR, "only ever add one set of invalidations");
+
+ Assert(nmsgs > 0);
+
+ txn->ninvalidations = nmsgs;
+ txn->invalidations = (SharedInvalidationMessage *)
+ MemoryContextAlloc(rb->context,
+ sizeof(SharedInvalidationMessage) * nmsgs);
+ memcpy(txn->invalidations, msgs,
+ sizeof(SharedInvalidationMessage) * nmsgs);
+}
+
+/*
+ * Apply all invalidations we know. Possibly we only need parts at this point
+ * in the changestream but we don't know which those are.
+ */
+static void
+ReorderBufferExecuteInvalidations(ReorderBuffer *rb, ReorderBufferTXN *txn)
+{
+ int i;
+
+ for (i = 0; i < txn->ninvalidations; i++)
+ LocalExecuteInvalidationMessage(&txn->invalidations[i]);
+}
+
+/*
+ * Mark a transaction as containing catalog changes
+ */
+void
+ReorderBufferXidSetCatalogChanges(ReorderBuffer *rb, TransactionId xid,
+ XLogRecPtr lsn)
+{
+ ReorderBufferTXN *txn;
+
+ txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
+
+ txn->txn_flags |= RBTXN_HAS_CATALOG_CHANGES;
+}
+
+/*
+ * Query whether a transaction is already *known* to contain catalog
+ * changes. This can be wrong until directly before the commit!
+ */
+bool
+ReorderBufferXidHasCatalogChanges(ReorderBuffer *rb, TransactionId xid)
+{
+ ReorderBufferTXN *txn;
+
+ txn = ReorderBufferTXNByXid(rb, xid, false, NULL, InvalidXLogRecPtr,
+ false);
+ if (txn == NULL)
+ return false;
+
+ return rbtxn_has_catalog_changes(txn);
+}
+
+/*
+ * ReorderBufferXidHasBaseSnapshot
+ * Have we already set the base snapshot for the given txn/subtxn?
+ */
+bool
+ReorderBufferXidHasBaseSnapshot(ReorderBuffer *rb, TransactionId xid)
+{
+ ReorderBufferTXN *txn;
+
+ txn = ReorderBufferTXNByXid(rb, xid, false,
+ NULL, InvalidXLogRecPtr, false);
+
+ /* transaction isn't known yet, ergo no snapshot */
+ if (txn == NULL)
+ return false;
+
+ /* a known subtxn? operate on top-level txn instead */
+ if (rbtxn_is_known_subxact(txn))
+ txn = ReorderBufferTXNByXid(rb, txn->toplevel_xid, false,
+ NULL, InvalidXLogRecPtr, false);
+
+ return txn->base_snapshot != NULL;
+}
+
+
+/*
+ * ---------------------------------------
+ * Disk serialization support
+ * ---------------------------------------
+ */
+
+/*
+ * Ensure the IO buffer is >= sz.
+ */
+static void
+ReorderBufferSerializeReserve(ReorderBuffer *rb, Size sz)
+{
+ if (!rb->outbufsize)
+ {
+ rb->outbuf = MemoryContextAlloc(rb->context, sz);
+ rb->outbufsize = sz;
+ }
+ else if (rb->outbufsize < sz)
+ {
+ rb->outbuf = repalloc(rb->outbuf, sz);
+ rb->outbufsize = sz;
+ }
+}
+
+/*
+ * Find the largest transaction (toplevel or subxact) to evict (spill to disk).
+ *
+ * XXX With many subtransactions this might be quite slow, because we'll have
+ * to walk through all of them. There are some options how we could improve
+ * that: (a) maintain some secondary structure with transactions sorted by
+ * amount of changes, (b) not looking for the entirely largest transaction,
+ * but e.g. for transaction using at least some fraction of the memory limit,
+ * and (c) evicting multiple transactions at once, e.g. to free a given portion
+ * of the memory limit (e.g. 50%).
+ */
+static ReorderBufferTXN *
+ReorderBufferLargestTXN(ReorderBuffer *rb)
+{
+ HASH_SEQ_STATUS hash_seq;
+ ReorderBufferTXNByIdEnt *ent;
+ ReorderBufferTXN *largest = NULL;
+
+ hash_seq_init(&hash_seq, rb->by_txn);
+ while ((ent = hash_seq_search(&hash_seq)) != NULL)
+ {
+ ReorderBufferTXN *txn = ent->txn;
+
+ /* if the current transaction is larger, remember it */
+ if ((!largest) || (txn->size > largest->size))
+ largest = txn;
+ }
+
+ Assert(largest);
+ Assert(largest->size > 0);
+ Assert(largest->size <= rb->size);
+
+ return largest;
+}
+
+/*
+ * Check whether the logical_decoding_work_mem limit was reached, and if yes
+ * pick the largest (sub)transaction at-a-time to evict and spill its changes to
+ * disk until we reach under the memory limit.
+ *
+ * XXX At this point we select the transactions until we reach under the memory
+ * limit, but we might also adapt a more elaborate eviction strategy - for example
+ * evicting enough transactions to free certain fraction (e.g. 50%) of the memory
+ * limit.
+ */
+static void
+ReorderBufferCheckMemoryLimit(ReorderBuffer *rb)
+{
+ ReorderBufferTXN *txn;
+
+ /* bail out if we haven't exceeded the memory limit */
+ if (rb->size < logical_decoding_work_mem * 1024L)
+ return;
+
+ /*
+ * Loop until we reach under the memory limit. One might think that just
+ * by evicting the largest (sub)transaction we will come under the memory
+ * limit based on assumption that the selected transaction is at least as
+ * large as the most recent change (which caused us to go over the memory
+ * limit). However, that is not true because a user can reduce the
+ * logical_decoding_work_mem to a smaller value before the most recent
+ * change.
+ */
+ while (rb->size >= logical_decoding_work_mem * 1024L)
+ {
+ /*
+ * Pick the largest transaction (or subtransaction) and evict it from
+ * memory by serializing it to disk.
+ */
+ txn = ReorderBufferLargestTXN(rb);
+
+ ReorderBufferSerializeTXN(rb, txn);
+
+ /*
+ * After eviction, the transaction should have no entries in memory,
+ * and should use 0 bytes for changes.
+ */
+ Assert(txn->size == 0);
+ Assert(txn->nentries_mem == 0);
+ }
+
+ /* We must be under the memory limit now. */
+ Assert(rb->size < logical_decoding_work_mem * 1024L);
+}
+
+/*
+ * Spill data of a large transaction (and its subtransactions) to disk.
+ */
+static void
+ReorderBufferSerializeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
+{
+ dlist_iter subtxn_i;
+ dlist_mutable_iter change_i;
+ int fd = -1;
+ XLogSegNo curOpenSegNo = 0;
+ Size spilled = 0;
+
+ elog(DEBUG2, "spill %u changes in XID %u to disk",
+ (uint32) txn->nentries_mem, txn->xid);
+
+ /* do the same to all child TXs */
+ dlist_foreach(subtxn_i, &txn->subtxns)
+ {
+ ReorderBufferTXN *subtxn;
+
+ subtxn = dlist_container(ReorderBufferTXN, node, subtxn_i.cur);
+ ReorderBufferSerializeTXN(rb, subtxn);
+ }
+
+ /* serialize changestream */
+ dlist_foreach_modify(change_i, &txn->changes)
+ {
+ ReorderBufferChange *change;
+
+ change = dlist_container(ReorderBufferChange, node, change_i.cur);
+
+ /*
+ * store in segment in which it belongs by start lsn, don't split over
+ * multiple segments tho
+ */
+ if (fd == -1 ||
+ !XLByteInSeg(change->lsn, curOpenSegNo, wal_segment_size))
+ {
+ char path[MAXPGPATH];
+
+ if (fd != -1)
+ CloseTransientFile(fd);
+
+ XLByteToSeg(change->lsn, curOpenSegNo, wal_segment_size);
+
+ /*
+ * No need to care about TLIs here, only used during a single run,
+ * so each LSN only maps to a specific WAL record.
+ */
+ ReorderBufferSerializedPath(path, MyReplicationSlot, txn->xid,
+ curOpenSegNo);
+
+ /* open segment, create it if necessary */
+ fd = OpenTransientFile(path,
+ O_CREAT | O_WRONLY | O_APPEND | PG_BINARY);
+
+ if (fd < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\": %m", path)));
+ }
+
+ ReorderBufferSerializeChange(rb, txn, fd, change);
+ dlist_delete(&change->node);
+ ReorderBufferReturnChange(rb, change);
+
+ spilled++;
+ }
+
+ Assert(spilled == txn->nentries_mem);
+ Assert(dlist_is_empty(&txn->changes));
+ txn->nentries_mem = 0;
+ txn->txn_flags |= RBTXN_IS_SERIALIZED;
+
+ if (fd != -1)
+ CloseTransientFile(fd);
+}
+
+/*
+ * Serialize individual change to disk.
+ */
+static void
+ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
+ int fd, ReorderBufferChange *change)
+{
+ ReorderBufferDiskChange *ondisk;
+ Size sz = sizeof(ReorderBufferDiskChange);
+
+ ReorderBufferSerializeReserve(rb, sz);
+
+ ondisk = (ReorderBufferDiskChange *) rb->outbuf;
+ memcpy(&ondisk->change, change, sizeof(ReorderBufferChange));
+
+ switch (change->action)
+ {
+ /* fall through these, they're all similar enough */
+ case REORDER_BUFFER_CHANGE_INSERT:
+ case REORDER_BUFFER_CHANGE_UPDATE:
+ case REORDER_BUFFER_CHANGE_DELETE:
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
+ {
+ char *data;
+ ReorderBufferTupleBuf *oldtup,
+ *newtup;
+ Size oldlen = 0;
+ Size newlen = 0;
+
+ oldtup = change->data.tp.oldtuple;
+ newtup = change->data.tp.newtuple;
+
+ if (oldtup)
+ {
+ sz += sizeof(HeapTupleData);
+ oldlen = oldtup->tuple.t_len;
+ sz += oldlen;
+ }
+
+ if (newtup)
+ {
+ sz += sizeof(HeapTupleData);
+ newlen = newtup->tuple.t_len;
+ sz += newlen;
+ }
+
+ /* make sure we have enough space */
+ ReorderBufferSerializeReserve(rb, sz);
+
+ data = ((char *) rb->outbuf) + sizeof(ReorderBufferDiskChange);
+ /* might have been reallocated above */
+ ondisk = (ReorderBufferDiskChange *) rb->outbuf;
+
+ if (oldlen)
+ {
+ memcpy(data, &oldtup->tuple, sizeof(HeapTupleData));
+ data += sizeof(HeapTupleData);
+
+ memcpy(data, oldtup->tuple.t_data, oldlen);
+ data += oldlen;
+ }
+
+ if (newlen)
+ {
+ memcpy(data, &newtup->tuple, sizeof(HeapTupleData));
+ data += sizeof(HeapTupleData);
+
+ memcpy(data, newtup->tuple.t_data, newlen);
+ data += newlen;
+ }
+ break;
+ }
+ case REORDER_BUFFER_CHANGE_MESSAGE:
+ {
+ char *data;
+ Size prefix_size = strlen(change->data.msg.prefix) + 1;
+
+ sz += prefix_size + change->data.msg.message_size +
+ sizeof(Size) + sizeof(Size);
+ ReorderBufferSerializeReserve(rb, sz);
+
+ data = ((char *) rb->outbuf) + sizeof(ReorderBufferDiskChange);
+
+ /* might have been reallocated above */
+ ondisk = (ReorderBufferDiskChange *) rb->outbuf;
+
+ /* write the prefix including the size */
+ memcpy(data, &prefix_size, sizeof(Size));
+ data += sizeof(Size);
+ memcpy(data, change->data.msg.prefix,
+ prefix_size);
+ data += prefix_size;
+
+ /* write the message including the size */
+ memcpy(data, &change->data.msg.message_size, sizeof(Size));
+ data += sizeof(Size);
+ memcpy(data, change->data.msg.message,
+ change->data.msg.message_size);
+ data += change->data.msg.message_size;
+
+ break;
+ }
+ case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT:
+ {
+ Snapshot snap;
+ char *data;
+
+ snap = change->data.snapshot;
+
+ sz += sizeof(SnapshotData) +
+ sizeof(TransactionId) * snap->xcnt +
+ sizeof(TransactionId) * snap->subxcnt;
+
+ /* make sure we have enough space */
+ ReorderBufferSerializeReserve(rb, sz);
+ data = ((char *) rb->outbuf) + sizeof(ReorderBufferDiskChange);
+ /* might have been reallocated above */
+ ondisk = (ReorderBufferDiskChange *) rb->outbuf;
+
+ memcpy(data, snap, sizeof(SnapshotData));
+ data += sizeof(SnapshotData);
+
+ if (snap->xcnt)
+ {
+ memcpy(data, snap->xip,
+ sizeof(TransactionId) * snap->xcnt);
+ data += sizeof(TransactionId) * snap->xcnt;
+ }
+
+ if (snap->subxcnt)
+ {
+ memcpy(data, snap->subxip,
+ sizeof(TransactionId) * snap->subxcnt);
+ data += sizeof(TransactionId) * snap->subxcnt;
+ }
+ break;
+ }
+ case REORDER_BUFFER_CHANGE_TRUNCATE:
+ {
+ Size size;
+ char *data;
+
+ /* account for the OIDs of truncated relations */
+ size = sizeof(Oid) * change->data.truncate.nrelids;
+ sz += size;
+
+ /* make sure we have enough space */
+ ReorderBufferSerializeReserve(rb, sz);
+
+ data = ((char *) rb->outbuf) + sizeof(ReorderBufferDiskChange);
+ /* might have been reallocated above */
+ ondisk = (ReorderBufferDiskChange *) rb->outbuf;
+
+ memcpy(data, change->data.truncate.relids, size);
+ data += size;
+
+ break;
+ }
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_ABORT:
+ case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID:
+ case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID:
+ /* ReorderBufferChange contains everything important */
+ break;
+ }
+
+ ondisk->size = sz;
+
+ errno = 0;
+ pgstat_report_wait_start(WAIT_EVENT_REORDER_BUFFER_WRITE);
+ if (write(fd, rb->outbuf, ondisk->size) != ondisk->size)
+ {
+ int save_errno = errno;
+
+ CloseTransientFile(fd);
+
+ /* if write didn't set errno, assume problem is no disk space */
+ errno = save_errno ? save_errno : ENOSPC;
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not write to data file for XID %u: %m",
+ txn->xid)));
+ }
+ pgstat_report_wait_end();
+
+ /*
+ * Keep the transaction's final_lsn up to date with each change we send to
+ * disk, so that ReorderBufferRestoreCleanup works correctly. (We used to
+ * only do this on commit and abort records, but that doesn't work if a
+ * system crash leaves a transaction without its abort record).
+ *
+ * Make sure not to move it backwards.
+ */
+ if (txn->final_lsn < change->lsn)
+ txn->final_lsn = change->lsn;
+
+ Assert(ondisk->change.action == change->action);
+}
+
+/*
+ * Size of a change in memory.
+ */
+static Size
+ReorderBufferChangeSize(ReorderBufferChange *change)
+{
+ Size sz = sizeof(ReorderBufferChange);
+
+ switch (change->action)
+ {
+ /* fall through these, they're all similar enough */
+ case REORDER_BUFFER_CHANGE_INSERT:
+ case REORDER_BUFFER_CHANGE_UPDATE:
+ case REORDER_BUFFER_CHANGE_DELETE:
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
+ {
+ ReorderBufferTupleBuf *oldtup,
+ *newtup;
+ Size oldlen = 0;
+ Size newlen = 0;
+
+ oldtup = change->data.tp.oldtuple;
+ newtup = change->data.tp.newtuple;
+
+ if (oldtup)
+ {
+ sz += sizeof(HeapTupleData);
+ oldlen = oldtup->tuple.t_len;
+ sz += oldlen;
+ }
+
+ if (newtup)
+ {
+ sz += sizeof(HeapTupleData);
+ newlen = newtup->tuple.t_len;
+ sz += newlen;
+ }
+
+ break;
+ }
+ case REORDER_BUFFER_CHANGE_MESSAGE:
+ {
+ Size prefix_size = strlen(change->data.msg.prefix) + 1;
+
+ sz += prefix_size + change->data.msg.message_size +
+ sizeof(Size) + sizeof(Size);
+
+ break;
+ }
+ case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT:
+ {
+ Snapshot snap;
+
+ snap = change->data.snapshot;
+
+ sz += sizeof(SnapshotData) +
+ sizeof(TransactionId) * snap->xcnt +
+ sizeof(TransactionId) * snap->subxcnt;
+
+ break;
+ }
+ case REORDER_BUFFER_CHANGE_TRUNCATE:
+ {
+ sz += sizeof(Oid) * change->data.truncate.nrelids;
+
+ break;
+ }
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_ABORT:
+ case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID:
+ case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID:
+ /* ReorderBufferChange contains everything important */
+ break;
+ }
+
+ return sz;
+}
+
+
+/*
+ * Restore a number of changes spilled to disk back into memory.
+ */
+static Size
+ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn,
+ TXNEntryFile *file, XLogSegNo *segno)
+{
+ Size restored = 0;
+ XLogSegNo last_segno;
+ dlist_mutable_iter cleanup_iter;
+ File *fd = &file->vfd;
+
+ Assert(txn->first_lsn != InvalidXLogRecPtr);
+ Assert(txn->final_lsn != InvalidXLogRecPtr);
+
+ /* free current entries, so we have memory for more */
+ dlist_foreach_modify(cleanup_iter, &txn->changes)
+ {
+ ReorderBufferChange *cleanup =
+ dlist_container(ReorderBufferChange, node, cleanup_iter.cur);
+
+ dlist_delete(&cleanup->node);
+ ReorderBufferReturnChange(rb, cleanup);
+ }
+ txn->nentries_mem = 0;
+ Assert(dlist_is_empty(&txn->changes));
+
+ XLByteToSeg(txn->final_lsn, last_segno, wal_segment_size);
+
+ while (restored < max_changes_in_memory && *segno <= last_segno)
+ {
+ int readBytes;
+ ReorderBufferDiskChange *ondisk;
+
+ if (*fd == -1)
+ {
+ char path[MAXPGPATH];
+
+ /* first time in */
+ if (*segno == 0)
+ XLByteToSeg(txn->first_lsn, *segno, wal_segment_size);
+
+ Assert(*segno != 0 || dlist_is_empty(&txn->changes));
+
+ /*
+ * No need to care about TLIs here, only used during a single run,
+ * so each LSN only maps to a specific WAL record.
+ */
+ ReorderBufferSerializedPath(path, MyReplicationSlot, txn->xid,
+ *segno);
+
+ *fd = PathNameOpenFile(path, O_RDONLY | PG_BINARY);
+
+ /* No harm in resetting the offset even in case of failure */
+ file->curOffset = 0;
+
+ if (*fd < 0 && errno == ENOENT)
+ {
+ *fd = -1;
+ (*segno)++;
+ continue;
+ }
+ else if (*fd < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\": %m",
+ path)));
+ }
+
+ /*
+ * Read the statically sized part of a change which has information
+ * about the total size. If we couldn't read a record, we're at the
+ * end of this file.
+ */
+ ReorderBufferSerializeReserve(rb, sizeof(ReorderBufferDiskChange));
+ readBytes = FileRead(file->vfd, rb->outbuf,
+ sizeof(ReorderBufferDiskChange),
+ file->curOffset, WAIT_EVENT_REORDER_BUFFER_READ);
+
+ /* eof */
+ if (readBytes == 0)
+ {
+ FileClose(*fd);
+ *fd = -1;
+ (*segno)++;
+ continue;
+ }
+ else if (readBytes < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read from reorderbuffer spill file: %m")));
+ else if (readBytes != sizeof(ReorderBufferDiskChange))
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read from reorderbuffer spill file: read %d instead of %u bytes",
+ readBytes,
+ (uint32) sizeof(ReorderBufferDiskChange))));
+
+ file->curOffset += readBytes;
+
+ ondisk = (ReorderBufferDiskChange *) rb->outbuf;
+
+ ReorderBufferSerializeReserve(rb,
+ sizeof(ReorderBufferDiskChange) + ondisk->size);
+ ondisk = (ReorderBufferDiskChange *) rb->outbuf;
+
+ readBytes = FileRead(file->vfd,
+ rb->outbuf + sizeof(ReorderBufferDiskChange),
+ ondisk->size - sizeof(ReorderBufferDiskChange),
+ file->curOffset,
+ WAIT_EVENT_REORDER_BUFFER_READ);
+
+ if (readBytes < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read from reorderbuffer spill file: %m")));
+ else if (readBytes != ondisk->size - sizeof(ReorderBufferDiskChange))
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read from reorderbuffer spill file: read %d instead of %u bytes",
+ readBytes,
+ (uint32) (ondisk->size - sizeof(ReorderBufferDiskChange)))));
+
+ file->curOffset += readBytes;
+
+ /*
+ * ok, read a full change from disk, now restore it into proper
+ * in-memory format
+ */
+ ReorderBufferRestoreChange(rb, txn, rb->outbuf);
+ restored++;
+ }
+
+ return restored;
+}
+
+/*
+ * Convert change from its on-disk format to in-memory format and queue it onto
+ * the TXN's ->changes list.
+ *
+ * Note: although "data" is declared char*, at entry it points to a
+ * maxalign'd buffer, making it safe in most of this function to assume
+ * that the pointed-to data is suitably aligned for direct access.
+ */
+static void
+ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
+ char *data)
+{
+ ReorderBufferDiskChange *ondisk;
+ ReorderBufferChange *change;
+
+ ondisk = (ReorderBufferDiskChange *) data;
+
+ change = ReorderBufferGetChange(rb);
+
+ /* copy static part */
+ memcpy(change, &ondisk->change, sizeof(ReorderBufferChange));
+
+ data += sizeof(ReorderBufferDiskChange);
+
+ /* restore individual stuff */
+ switch (change->action)
+ {
+ /* fall through these, they're all similar enough */
+ case REORDER_BUFFER_CHANGE_INSERT:
+ case REORDER_BUFFER_CHANGE_UPDATE:
+ case REORDER_BUFFER_CHANGE_DELETE:
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
+ if (change->data.tp.oldtuple)
+ {
+ uint32 tuplelen = ((HeapTuple) data)->t_len;
+
+ change->data.tp.oldtuple =
+ ReorderBufferGetTupleBuf(rb, tuplelen - SizeofHeapTupleHeader);
+
+ /* restore ->tuple */
+ memcpy(&change->data.tp.oldtuple->tuple, data,
+ sizeof(HeapTupleData));
+ data += sizeof(HeapTupleData);
+
+ /* reset t_data pointer into the new tuplebuf */
+ change->data.tp.oldtuple->tuple.t_data =
+ ReorderBufferTupleBufData(change->data.tp.oldtuple);
+
+ /* restore tuple data itself */
+ memcpy(change->data.tp.oldtuple->tuple.t_data, data, tuplelen);
+ data += tuplelen;
+ }
+
+ if (change->data.tp.newtuple)
+ {
+ /* here, data might not be suitably aligned! */
+ uint32 tuplelen;
+
+ memcpy(&tuplelen, data + offsetof(HeapTupleData, t_len),
+ sizeof(uint32));
+
+ change->data.tp.newtuple =
+ ReorderBufferGetTupleBuf(rb, tuplelen - SizeofHeapTupleHeader);
+
+ /* restore ->tuple */
+ memcpy(&change->data.tp.newtuple->tuple, data,
+ sizeof(HeapTupleData));
+ data += sizeof(HeapTupleData);
+
+ /* reset t_data pointer into the new tuplebuf */
+ change->data.tp.newtuple->tuple.t_data =
+ ReorderBufferTupleBufData(change->data.tp.newtuple);
+
+ /* restore tuple data itself */
+ memcpy(change->data.tp.newtuple->tuple.t_data, data, tuplelen);
+ data += tuplelen;
+ }
+
+ break;
+ case REORDER_BUFFER_CHANGE_MESSAGE:
+ {
+ Size prefix_size;
+
+ /* read prefix */
+ memcpy(&prefix_size, data, sizeof(Size));
+ data += sizeof(Size);
+ change->data.msg.prefix = MemoryContextAlloc(rb->context,
+ prefix_size);
+ memcpy(change->data.msg.prefix, data, prefix_size);
+ Assert(change->data.msg.prefix[prefix_size - 1] == '\0');
+ data += prefix_size;
+
+ /* read the message */
+ memcpy(&change->data.msg.message_size, data, sizeof(Size));
+ data += sizeof(Size);
+ change->data.msg.message = MemoryContextAlloc(rb->context,
+ change->data.msg.message_size);
+ memcpy(change->data.msg.message, data,
+ change->data.msg.message_size);
+ data += change->data.msg.message_size;
+
+ break;
+ }
+ case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT:
+ {
+ Snapshot oldsnap;
+ Snapshot newsnap;
+ Size size;
+
+ oldsnap = (Snapshot) data;
+
+ size = sizeof(SnapshotData) +
+ sizeof(TransactionId) * oldsnap->xcnt +
+ sizeof(TransactionId) * (oldsnap->subxcnt + 0);
+
+ change->data.snapshot = MemoryContextAllocZero(rb->context, size);
+
+ newsnap = change->data.snapshot;
+
+ memcpy(newsnap, data, size);
+ newsnap->xip = (TransactionId *)
+ (((char *) newsnap) + sizeof(SnapshotData));
+ newsnap->subxip = newsnap->xip + newsnap->xcnt;
+ newsnap->copied = true;
+ break;
+ }
+ /* the base struct contains all the data, easy peasy */
+ case REORDER_BUFFER_CHANGE_TRUNCATE:
+ {
+ Oid *relids;
+
+ relids = ReorderBufferGetRelids(rb,
+ change->data.truncate.nrelids);
+ memcpy(relids, data, change->data.truncate.nrelids * sizeof(Oid));
+ change->data.truncate.relids = relids;
+
+ break;
+ }
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_ABORT:
+ case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID:
+ case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID:
+ break;
+ }
+
+ dlist_push_tail(&txn->changes, &change->node);
+ txn->nentries_mem++;
+
+ /*
+ * Update memory accounting for the restored change. We need to do this
+ * although we don't check the memory limit when restoring the changes in
+ * this branch (we only do that when initially queueing the changes after
+ * decoding), because we will release the changes later, and that will
+ * update the accounting too (subtracting the size from the counters). And
+ * we don't want to underflow there.
+ */
+ ReorderBufferChangeMemoryUpdate(rb, change, true);
+}
+
+/*
+ * Remove all on-disk stored for the passed in transaction.
+ */
+static void
+ReorderBufferRestoreCleanup(ReorderBuffer *rb, ReorderBufferTXN *txn)
+{
+ XLogSegNo first;
+ XLogSegNo cur;
+ XLogSegNo last;
+
+ Assert(txn->first_lsn != InvalidXLogRecPtr);
+ Assert(txn->final_lsn != InvalidXLogRecPtr);
+
+ XLByteToSeg(txn->first_lsn, first, wal_segment_size);
+ XLByteToSeg(txn->final_lsn, last, wal_segment_size);
+
+ /* iterate over all possible filenames, and delete them */
+ for (cur = first; cur <= last; cur++)
+ {
+ char path[MAXPGPATH];
+
+ ReorderBufferSerializedPath(path, MyReplicationSlot, txn->xid, cur);
+ if (unlink(path) != 0 && errno != ENOENT)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not remove file \"%s\": %m", path)));
+ }
+}
+
+/*
+ * Remove any leftover serialized reorder buffers from a slot directory after a
+ * prior crash or decoding session exit.
+ */
+static void
+ReorderBufferCleanupSerializedTXNs(const char *slotname)
+{
+ DIR *spill_dir;
+ struct dirent *spill_de;
+ struct stat statbuf;
+ char path[MAXPGPATH * 2 + 12];
+
+ sprintf(path, "pg_replslot/%s", slotname);
+
+ /* we're only handling directories here, skip if it's not ours */
+ if (lstat(path, &statbuf) == 0 && !S_ISDIR(statbuf.st_mode))
+ return;
+
+ spill_dir = AllocateDir(path);
+ while ((spill_de = ReadDirExtended(spill_dir, path, INFO)) != NULL)
+ {
+ /* only look at names that can be ours */
+ if (strncmp(spill_de->d_name, "xid", 3) == 0)
+ {
+ snprintf(path, sizeof(path),
+ "pg_replslot/%s/%s", slotname,
+ spill_de->d_name);
+
+ if (unlink(path) != 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not remove file \"%s\" during removal of pg_replslot/%s/xid*: %m",
+ path, slotname)));
+ }
+ }
+ FreeDir(spill_dir);
+}
+
+/*
+ * Given a replication slot, transaction ID and segment number, fill in the
+ * corresponding spill file into 'path', which is a caller-owned buffer of size
+ * at least MAXPGPATH.
+ */
+static void
+ReorderBufferSerializedPath(char *path, ReplicationSlot *slot, TransactionId xid,
+ XLogSegNo segno)
+{
+ XLogRecPtr recptr;
+
+ XLogSegNoOffsetToRecPtr(segno, 0, wal_segment_size, recptr);
+
+ snprintf(path, MAXPGPATH, "pg_replslot/%s/xid-%u-lsn-%X-%X.spill",
+ NameStr(MyReplicationSlot->data.name),
+ xid,
+ (uint32) (recptr >> 32), (uint32) recptr);
+}
+
+/*
+ * Delete all data spilled to disk after we've restarted/crashed. It will be
+ * recreated when the respective slots are reused.
+ */
+void
+StartupReorderBuffer(void)
+{
+ DIR *logical_dir;
+ struct dirent *logical_de;
+
+ logical_dir = AllocateDir("pg_replslot");
+ while ((logical_de = ReadDir(logical_dir, "pg_replslot")) != NULL)
+ {
+ if (strcmp(logical_de->d_name, ".") == 0 ||
+ strcmp(logical_de->d_name, "..") == 0)
+ continue;
+
+ /* if it cannot be a slot, skip the directory */
+ if (!ReplicationSlotValidateName(logical_de->d_name, DEBUG2))
+ continue;
+
+ /*
+ * ok, has to be a surviving logical slot, iterate and delete
+ * everything starting with xid-*
+ */
+ ReorderBufferCleanupSerializedTXNs(logical_de->d_name);
+ }
+ FreeDir(logical_dir);
+}
+
+/* ---------------------------------------
+ * toast reassembly support
+ * ---------------------------------------
+ */
+
+/*
+ * Initialize per tuple toast reconstruction support.
+ */
+static void
+ReorderBufferToastInitHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
+{
+ HASHCTL hash_ctl;
+
+ Assert(txn->toast_hash == NULL);
+
+ memset(&hash_ctl, 0, sizeof(hash_ctl));
+ hash_ctl.keysize = sizeof(Oid);
+ hash_ctl.entrysize = sizeof(ReorderBufferToastEnt);
+ hash_ctl.hcxt = rb->context;
+ txn->toast_hash = hash_create("ReorderBufferToastHash", 5, &hash_ctl,
+ HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+}
+
+/*
+ * Per toast-chunk handling for toast reconstruction
+ *
+ * Appends a toast chunk so we can reconstruct it when the tuple "owning" the
+ * toasted Datum comes along.
+ */
+static void
+ReorderBufferToastAppendChunk(ReorderBuffer *rb, ReorderBufferTXN *txn,
+ Relation relation, ReorderBufferChange *change)
+{
+ ReorderBufferToastEnt *ent;
+ ReorderBufferTupleBuf *newtup;
+ bool found;
+ int32 chunksize;
+ bool isnull;
+ Pointer chunk;
+ TupleDesc desc = RelationGetDescr(relation);
+ Oid chunk_id;
+ int32 chunk_seq;
+
+ if (txn->toast_hash == NULL)
+ ReorderBufferToastInitHash(rb, txn);
+
+ Assert(IsToastRelation(relation));
+
+ newtup = change->data.tp.newtuple;
+ chunk_id = DatumGetObjectId(fastgetattr(&newtup->tuple, 1, desc, &isnull));
+ Assert(!isnull);
+ chunk_seq = DatumGetInt32(fastgetattr(&newtup->tuple, 2, desc, &isnull));
+ Assert(!isnull);
+
+ ent = (ReorderBufferToastEnt *)
+ hash_search(txn->toast_hash,
+ (void *) &chunk_id,
+ HASH_ENTER,
+ &found);
+
+ if (!found)
+ {
+ Assert(ent->chunk_id == chunk_id);
+ ent->num_chunks = 0;
+ ent->last_chunk_seq = 0;
+ ent->size = 0;
+ ent->reconstructed = NULL;
+ dlist_init(&ent->chunks);
+
+ if (chunk_seq != 0)
+ elog(ERROR, "got sequence entry %d for toast chunk %u instead of seq 0",
+ chunk_seq, chunk_id);
+ }
+ else if (found && chunk_seq != ent->last_chunk_seq + 1)
+ elog(ERROR, "got sequence entry %d for toast chunk %u instead of seq %d",
+ chunk_seq, chunk_id, ent->last_chunk_seq + 1);
+
+ chunk = DatumGetPointer(fastgetattr(&newtup->tuple, 3, desc, &isnull));
+ Assert(!isnull);
+
+ /* calculate size so we can allocate the right size at once later */
+ if (!VARATT_IS_EXTENDED(chunk))
+ chunksize = VARSIZE(chunk) - VARHDRSZ;
+ else if (VARATT_IS_SHORT(chunk))
+ /* could happen due to heap_form_tuple doing its thing */
+ chunksize = VARSIZE_SHORT(chunk) - VARHDRSZ_SHORT;
+ else
+ elog(ERROR, "unexpected type of toast chunk");
+
+ ent->size += chunksize;
+ ent->last_chunk_seq = chunk_seq;
+ ent->num_chunks++;
+ dlist_push_tail(&ent->chunks, &change->node);
+}
+
+/*
+ * Rejigger change->newtuple to point to in-memory toast tuples instead to
+ * on-disk toast tuples that may not longer exist (think DROP TABLE or VACUUM).
+ *
+ * We cannot replace unchanged toast tuples though, so those will still point
+ * to on-disk toast data.
+ *
+ * While updating the existing change with detoasted tuple data, we need to
+ * update the memory accounting info, because the change size will differ.
+ * Otherwise the accounting may get out of sync, triggering serialization
+ * at unexpected times.
+ *
+ * We simply subtract size of the change before rejiggering the tuple, and
+ * then adding the new size. This makes it look like the change was removed
+ * and then added back, except it only tweaks the accounting info.
+ *
+ * In particular it can't trigger serialization, which would be pointless
+ * anyway as it happens during commit processing right before handing
+ * the change to the output plugin.
+ */
+static void
+ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn,
+ Relation relation, ReorderBufferChange *change)
+{
+ TupleDesc desc;
+ int natt;
+ Datum *attrs;
+ bool *isnull;
+ bool *free;
+ HeapTuple tmphtup;
+ Relation toast_rel;
+ TupleDesc toast_desc;
+ MemoryContext oldcontext;
+ ReorderBufferTupleBuf *newtup;
+
+ /* no toast tuples changed */
+ if (txn->toast_hash == NULL)
+ return;
+
+ /*
+ * We're going to modify the size of the change, so to make sure the
+ * accounting is correct we'll make it look like we're removing the change
+ * now (with the old size), and then re-add it at the end.
+ */
+ ReorderBufferChangeMemoryUpdate(rb, change, false);
+
+ oldcontext = MemoryContextSwitchTo(rb->context);
+
+ /* we should only have toast tuples in an INSERT or UPDATE */
+ Assert(change->data.tp.newtuple);
+
+ desc = RelationGetDescr(relation);
+
+ toast_rel = RelationIdGetRelation(relation->rd_rel->reltoastrelid);
+ if (!RelationIsValid(toast_rel))
+ elog(ERROR, "could not open relation with OID %u",
+ relation->rd_rel->reltoastrelid);
+
+ toast_desc = RelationGetDescr(toast_rel);
+
+ /* should we allocate from stack instead? */
+ attrs = palloc0(sizeof(Datum) * desc->natts);
+ isnull = palloc0(sizeof(bool) * desc->natts);
+ free = palloc0(sizeof(bool) * desc->natts);
+
+ newtup = change->data.tp.newtuple;
+
+ heap_deform_tuple(&newtup->tuple, desc, attrs, isnull);
+
+ for (natt = 0; natt < desc->natts; natt++)
+ {
+ Form_pg_attribute attr = TupleDescAttr(desc, natt);
+ ReorderBufferToastEnt *ent;
+ struct varlena *varlena;
+
+ /* va_rawsize is the size of the original datum -- including header */
+ struct varatt_external toast_pointer;
+ struct varatt_indirect redirect_pointer;
+ struct varlena *new_datum = NULL;
+ struct varlena *reconstructed;
+ dlist_iter it;
+ Size data_done = 0;
+
+ /* system columns aren't toasted */
+ if (attr->attnum < 0)
+ continue;
+
+ if (attr->attisdropped)
+ continue;
+
+ /* not a varlena datatype */
+ if (attr->attlen != -1)
+ continue;
+
+ /* no data */
+ if (isnull[natt])
+ continue;
+
+ /* ok, we know we have a toast datum */
+ varlena = (struct varlena *) DatumGetPointer(attrs[natt]);
+
+ /* no need to do anything if the tuple isn't external */
+ if (!VARATT_IS_EXTERNAL(varlena))
+ continue;
+
+ VARATT_EXTERNAL_GET_POINTER(toast_pointer, varlena);
+
+ /*
+ * Check whether the toast tuple changed, replace if so.
+ */
+ ent = (ReorderBufferToastEnt *)
+ hash_search(txn->toast_hash,
+ (void *) &toast_pointer.va_valueid,
+ HASH_FIND,
+ NULL);
+ if (ent == NULL)
+ continue;
+
+ new_datum =
+ (struct varlena *) palloc0(INDIRECT_POINTER_SIZE);
+
+ free[natt] = true;
+
+ reconstructed = palloc0(toast_pointer.va_rawsize);
+
+ ent->reconstructed = reconstructed;
+
+ /* stitch toast tuple back together from its parts */
+ dlist_foreach(it, &ent->chunks)
+ {
+ bool isnull;
+ ReorderBufferChange *cchange;
+ ReorderBufferTupleBuf *ctup;
+ Pointer chunk;
+
+ cchange = dlist_container(ReorderBufferChange, node, it.cur);
+ ctup = cchange->data.tp.newtuple;
+ chunk = DatumGetPointer(fastgetattr(&ctup->tuple, 3, toast_desc, &isnull));
+
+ Assert(!isnull);
+ Assert(!VARATT_IS_EXTERNAL(chunk));
+ Assert(!VARATT_IS_SHORT(chunk));
+
+ memcpy(VARDATA(reconstructed) + data_done,
+ VARDATA(chunk),
+ VARSIZE(chunk) - VARHDRSZ);
+ data_done += VARSIZE(chunk) - VARHDRSZ;
+ }
+ Assert(data_done == toast_pointer.va_extsize);
+
+ /* make sure its marked as compressed or not */
+ if (VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer))
+ SET_VARSIZE_COMPRESSED(reconstructed, data_done + VARHDRSZ);
+ else
+ SET_VARSIZE(reconstructed, data_done + VARHDRSZ);
+
+ memset(&redirect_pointer, 0, sizeof(redirect_pointer));
+ redirect_pointer.pointer = reconstructed;
+
+ SET_VARTAG_EXTERNAL(new_datum, VARTAG_INDIRECT);
+ memcpy(VARDATA_EXTERNAL(new_datum), &redirect_pointer,
+ sizeof(redirect_pointer));
+
+ attrs[natt] = PointerGetDatum(new_datum);
+ }
+
+ /*
+ * Build tuple in separate memory & copy tuple back into the tuplebuf
+ * passed to the output plugin. We can't directly heap_fill_tuple() into
+ * the tuplebuf because attrs[] will point back into the current content.
+ */
+ tmphtup = heap_form_tuple(desc, attrs, isnull);
+ Assert(newtup->tuple.t_len <= MaxHeapTupleSize);
+ Assert(ReorderBufferTupleBufData(newtup) == newtup->tuple.t_data);
+
+ memcpy(newtup->tuple.t_data, tmphtup->t_data, tmphtup->t_len);
+ newtup->tuple.t_len = tmphtup->t_len;
+
+ /*
+ * free resources we won't further need, more persistent stuff will be
+ * free'd in ReorderBufferToastReset().
+ */
+ RelationClose(toast_rel);
+ pfree(tmphtup);
+ for (natt = 0; natt < desc->natts; natt++)
+ {
+ if (free[natt])
+ pfree(DatumGetPointer(attrs[natt]));
+ }
+ pfree(attrs);
+ pfree(free);
+ pfree(isnull);
+
+ MemoryContextSwitchTo(oldcontext);
+
+ /* now add the change back, with the correct size */
+ ReorderBufferChangeMemoryUpdate(rb, change, true);
+}
+
+/*
+ * Free all resources allocated for toast reconstruction.
+ */
+static void
+ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn)
+{
+ HASH_SEQ_STATUS hstat;
+ ReorderBufferToastEnt *ent;
+
+ if (txn->toast_hash == NULL)
+ return;
+
+ /* sequentially walk over the hash and free everything */
+ hash_seq_init(&hstat, txn->toast_hash);
+ while ((ent = (ReorderBufferToastEnt *) hash_seq_search(&hstat)) != NULL)
+ {
+ dlist_mutable_iter it;
+
+ if (ent->reconstructed != NULL)
+ pfree(ent->reconstructed);
+
+ dlist_foreach_modify(it, &ent->chunks)
+ {
+ ReorderBufferChange *change =
+ dlist_container(ReorderBufferChange, node, it.cur);
+
+ dlist_delete(&change->node);
+ ReorderBufferReturnChange(rb, change);
+ }
+ }
+
+ hash_destroy(txn->toast_hash);
+ txn->toast_hash = NULL;
+}
+
+
+/* ---------------------------------------
+ * Visibility support for logical decoding
+ *
+ *
+ * Lookup actual cmin/cmax values when using decoding snapshot. We can't
+ * always rely on stored cmin/cmax values because of two scenarios:
+ *
+ * * A tuple got changed multiple times during a single transaction and thus
+ * has got a combocid. Combocid's are only valid for the duration of a
+ * single transaction.
+ * * A tuple with a cmin but no cmax (and thus no combocid) got
+ * deleted/updated in another transaction than the one which created it
+ * which we are looking at right now. As only one of cmin, cmax or combocid
+ * is actually stored in the heap we don't have access to the value we
+ * need anymore.
+ *
+ * To resolve those problems we have a per-transaction hash of (cmin,
+ * cmax) tuples keyed by (relfilenode, ctid) which contains the actual
+ * (cmin, cmax) values. That also takes care of combocids by simply
+ * not caring about them at all. As we have the real cmin/cmax values
+ * combocids aren't interesting.
+ *
+ * As we only care about catalog tuples here the overhead of this
+ * hashtable should be acceptable.
+ *
+ * Heap rewrites complicate this a bit, check rewriteheap.c for
+ * details.
+ * -------------------------------------------------------------------------
+ */
+
+/* struct for sorting mapping files by LSN efficiently */
+typedef struct RewriteMappingFile
+{
+ XLogRecPtr lsn;
+ char fname[MAXPGPATH];
+} RewriteMappingFile;
+
+#ifdef NOT_USED
+static void
+DisplayMapping(HTAB *tuplecid_data)
+{
+ HASH_SEQ_STATUS hstat;
+ ReorderBufferTupleCidEnt *ent;
+
+ hash_seq_init(&hstat, tuplecid_data);
+ while ((ent = (ReorderBufferTupleCidEnt *) hash_seq_search(&hstat)) != NULL)
+ {
+ elog(DEBUG3, "mapping: node: %u/%u/%u tid: %u/%u cmin: %u, cmax: %u",
+ ent->key.relnode.dbNode,
+ ent->key.relnode.spcNode,
+ ent->key.relnode.relNode,
+ ItemPointerGetBlockNumber(&ent->key.tid),
+ ItemPointerGetOffsetNumber(&ent->key.tid),
+ ent->cmin,
+ ent->cmax
+ );
+ }
+}
+#endif
+
+/*
+ * Apply a single mapping file to tuplecid_data.
+ *
+ * The mapping file has to have been verified to be a) committed b) for our
+ * transaction c) applied in LSN order.
+ */
+static void
+ApplyLogicalMappingFile(HTAB *tuplecid_data, Oid relid, const char *fname)
+{
+ char path[MAXPGPATH];
+ int fd;
+ int readBytes;
+ LogicalRewriteMappingData map;
+
+ sprintf(path, "pg_logical/mappings/%s", fname);
+ fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
+ if (fd < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\": %m", path)));
+
+ while (true)
+ {
+ ReorderBufferTupleCidKey key;
+ ReorderBufferTupleCidEnt *ent;
+ ReorderBufferTupleCidEnt *new_ent;
+ bool found;
+
+ /* be careful about padding */
+ memset(&key, 0, sizeof(ReorderBufferTupleCidKey));
+
+ /* read all mappings till the end of the file */
+ pgstat_report_wait_start(WAIT_EVENT_REORDER_LOGICAL_MAPPING_READ);
+ readBytes = read(fd, &map, sizeof(LogicalRewriteMappingData));
+ pgstat_report_wait_end();
+
+ if (readBytes < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": %m",
+ path)));
+ else if (readBytes == 0) /* EOF */
+ break;
+ else if (readBytes != sizeof(LogicalRewriteMappingData))
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read from file \"%s\": read %d instead of %d bytes",
+ path, readBytes,
+ (int32) sizeof(LogicalRewriteMappingData))));
+
+ key.relnode = map.old_node;
+ ItemPointerCopy(&map.old_tid,
+ &key.tid);
+
+
+ ent = (ReorderBufferTupleCidEnt *)
+ hash_search(tuplecid_data,
+ (void *) &key,
+ HASH_FIND,
+ NULL);
+
+ /* no existing mapping, no need to update */
+ if (!ent)
+ continue;
+
+ key.relnode = map.new_node;
+ ItemPointerCopy(&map.new_tid,
+ &key.tid);
+
+ new_ent = (ReorderBufferTupleCidEnt *)
+ hash_search(tuplecid_data,
+ (void *) &key,
+ HASH_ENTER,
+ &found);
+
+ if (found)
+ {
+ /*
+ * Make sure the existing mapping makes sense. We sometime update
+ * old records that did not yet have a cmax (e.g. pg_class' own
+ * entry while rewriting it) during rewrites, so allow that.
+ */
+ Assert(ent->cmin == InvalidCommandId || ent->cmin == new_ent->cmin);
+ Assert(ent->cmax == InvalidCommandId || ent->cmax == new_ent->cmax);
+ }
+ else
+ {
+ /* update mapping */
+ new_ent->cmin = ent->cmin;
+ new_ent->cmax = ent->cmax;
+ new_ent->combocid = ent->combocid;
+ }
+ }
+
+ if (CloseTransientFile(fd) != 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not close file \"%s\": %m", path)));
+}
+
+
+/*
+ * Check whether the TransactionId 'xid' is in the pre-sorted array 'xip'.
+ */
+static bool
+TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num)
+{
+ return bsearch(&xid, xip, num,
+ sizeof(TransactionId), xidComparator) != NULL;
+}
+
+/*
+ * list_sort() comparator for sorting RewriteMappingFiles in LSN order.
+ */
+static int
+file_sort_by_lsn(const ListCell *a_p, const ListCell *b_p)
+{
+ RewriteMappingFile *a = (RewriteMappingFile *) lfirst(a_p);
+ RewriteMappingFile *b = (RewriteMappingFile *) lfirst(b_p);
+
+ if (a->lsn < b->lsn)
+ return -1;
+ else if (a->lsn > b->lsn)
+ return 1;
+ return 0;
+}
+
+/*
+ * Apply any existing logical remapping files if there are any targeted at our
+ * transaction for relid.
+ */
+static void
+UpdateLogicalMappings(HTAB *tuplecid_data, Oid relid, Snapshot snapshot)
+{
+ DIR *mapping_dir;
+ struct dirent *mapping_de;
+ List *files = NIL;
+ ListCell *file;
+ Oid dboid = IsSharedRelation(relid) ? InvalidOid : MyDatabaseId;
+
+ mapping_dir = AllocateDir("pg_logical/mappings");
+ while ((mapping_de = ReadDir(mapping_dir, "pg_logical/mappings")) != NULL)
+ {
+ Oid f_dboid;
+ Oid f_relid;
+ TransactionId f_mapped_xid;
+ TransactionId f_create_xid;
+ XLogRecPtr f_lsn;
+ uint32 f_hi,
+ f_lo;
+ RewriteMappingFile *f;
+
+ if (strcmp(mapping_de->d_name, ".") == 0 ||
+ strcmp(mapping_de->d_name, "..") == 0)
+ continue;
+
+ /* Ignore files that aren't ours */
+ if (strncmp(mapping_de->d_name, "map-", 4) != 0)
+ continue;
+
+ if (sscanf(mapping_de->d_name, LOGICAL_REWRITE_FORMAT,
+ &f_dboid, &f_relid, &f_hi, &f_lo,
+ &f_mapped_xid, &f_create_xid) != 6)
+ elog(ERROR, "could not parse filename \"%s\"", mapping_de->d_name);
+
+ f_lsn = ((uint64) f_hi) << 32 | f_lo;
+
+ /* mapping for another database */
+ if (f_dboid != dboid)
+ continue;
+
+ /* mapping for another relation */
+ if (f_relid != relid)
+ continue;
+
+ /* did the creating transaction abort? */
+ if (!TransactionIdDidCommit(f_create_xid))
+ continue;
+
+ /* not for our transaction */
+ if (!TransactionIdInArray(f_mapped_xid, snapshot->subxip, snapshot->subxcnt))
+ continue;
+
+ /* ok, relevant, queue for apply */
+ f = palloc(sizeof(RewriteMappingFile));
+ f->lsn = f_lsn;
+ strcpy(f->fname, mapping_de->d_name);
+ files = lappend(files, f);
+ }
+ FreeDir(mapping_dir);
+
+ /* sort files so we apply them in LSN order */
+ list_sort(files, file_sort_by_lsn);
+
+ foreach(file, files)
+ {
+ RewriteMappingFile *f = (RewriteMappingFile *) lfirst(file);
+
+ elog(DEBUG1, "applying mapping: \"%s\" in %u", f->fname,
+ snapshot->subxip[0]);
+ ApplyLogicalMappingFile(tuplecid_data, relid, f->fname);
+ pfree(f);
+ }
+}
+
+/*
+ * Lookup cmin/cmax of a tuple, during logical decoding where we can't rely on
+ * combocids.
+ */
+bool
+ResolveCminCmaxDuringDecoding(HTAB *tuplecid_data,
+ Snapshot snapshot,
+ HeapTuple htup, Buffer buffer,
+ CommandId *cmin, CommandId *cmax)
+{
+ ReorderBufferTupleCidKey key;
+ ReorderBufferTupleCidEnt *ent;
+ ForkNumber forkno;
+ BlockNumber blockno;
+ bool updated_mapping = false;
+
+ /* be careful about padding */
+ memset(&key, 0, sizeof(key));
+
+ Assert(!BufferIsLocal(buffer));
+
+ /*
+ * get relfilenode from the buffer, no convenient way to access it other
+ * than that.
+ */
+ BufferGetTag(buffer, &key.relnode, &forkno, &blockno);
+
+ /* tuples can only be in the main fork */
+ Assert(forkno == MAIN_FORKNUM);
+ Assert(blockno == ItemPointerGetBlockNumber(&htup->t_self));
+
+ ItemPointerCopy(&htup->t_self,
+ &key.tid);
+
+restart:
+ ent = (ReorderBufferTupleCidEnt *)
+ hash_search(tuplecid_data,
+ (void *) &key,
+ HASH_FIND,
+ NULL);
+
+ /*
+ * failed to find a mapping, check whether the table was rewritten and
+ * apply mapping if so, but only do that once - there can be no new
+ * mappings while we are in here since we have to hold a lock on the
+ * relation.
+ */
+ if (ent == NULL && !updated_mapping)
+ {
+ UpdateLogicalMappings(tuplecid_data, htup->t_tableOid, snapshot);
+ /* now check but don't update for a mapping again */
+ updated_mapping = true;
+ goto restart;
+ }
+ else if (ent == NULL)
+ return false;
+
+ if (cmin)
+ *cmin = ent->cmin;
+ if (cmax)
+ *cmax = ent->cmax;
+ return true;
+}
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
new file mode 100644
index 0000000..76c674b
--- /dev/null
+++ b/src/backend/replication/logical/snapbuild.c
@@ -0,0 +1,2016 @@
+/*-------------------------------------------------------------------------
+ *
+ * snapbuild.c
+ *
+ * Infrastructure for building historic catalog snapshots based on contents
+ * of the WAL, for the purpose of decoding heapam.c style values in the
+ * WAL.
+ *
+ * NOTES:
+ *
+ * We build snapshots which can *only* be used to read catalog contents and we
+ * do so by reading and interpreting the WAL stream. The aim is to build a
+ * snapshot that behaves the same as a freshly taken MVCC snapshot would have
+ * at the time the XLogRecord was generated.
+ *
+ * To build the snapshots we reuse the infrastructure built for Hot
+ * Standby. The in-memory snapshots we build look different than HS' because
+ * we have different needs. To successfully decode data from the WAL we only
+ * need to access catalog tables and (sys|rel|cat)cache, not the actual user
+ * tables since the data we decode is wholly contained in the WAL
+ * records. Also, our snapshots need to be different in comparison to normal
+ * MVCC ones because in contrast to those we cannot fully rely on the clog and
+ * pg_subtrans for information about committed transactions because they might
+ * commit in the future from the POV of the WAL entry we're currently
+ * decoding. This definition has the advantage that we only need to prevent
+ * removal of catalog rows, while normal table's rows can still be
+ * removed. This is achieved by using the replication slot mechanism.
+ *
+ * As the percentage of transactions modifying the catalog normally is fairly
+ * small in comparisons to ones only manipulating user data, we keep track of
+ * the committed catalog modifying ones inside [xmin, xmax) instead of keeping
+ * track of all running transactions like it's done in a normal snapshot. Note
+ * that we're generally only looking at transactions that have acquired an
+ * xid. That is we keep a list of transactions between snapshot->(xmin, xmax)
+ * that we consider committed, everything else is considered aborted/in
+ * progress. That also allows us not to care about subtransactions before they
+ * have committed which means this module, in contrast to HS, doesn't have to
+ * care about suboverflowed subtransactions and similar.
+ *
+ * One complexity of doing this is that to e.g. handle mixed DDL/DML
+ * transactions we need Snapshots that see intermediate versions of the
+ * catalog in a transaction. During normal operation this is achieved by using
+ * CommandIds/cmin/cmax. The problem with that however is that for space
+ * efficiency reasons only one value of that is stored
+ * (cf. combocid.c). Since ComboCids are only available in memory we log
+ * additional information which allows us to get the original (cmin, cmax)
+ * pair during visibility checks. Check the reorderbuffer.c's comment above
+ * ResolveCminCmaxDuringDecoding() for details.
+ *
+ * To facilitate all this we need our own visibility routine, as the normal
+ * ones are optimized for different usecases.
+ *
+ * To replace the normal catalog snapshots with decoding ones use the
+ * SetupHistoricSnapshot() and TeardownHistoricSnapshot() functions.
+ *
+ *
+ *
+ * The snapbuild machinery is starting up in several stages, as illustrated
+ * by the following graph describing the SnapBuild->state transitions:
+ *
+ * +-------------------------+
+ * +----| START |-------------+
+ * | +-------------------------+ |
+ * | | |
+ * | | |
+ * | running_xacts #1 |
+ * | | |
+ * | | |
+ * | v |
+ * | +-------------------------+ v
+ * | | BUILDING_SNAPSHOT |------------>|
+ * | +-------------------------+ |
+ * | | |
+ * | | |
+ * | running_xacts #2, xacts from #1 finished |
+ * | | |
+ * | | |
+ * | v |
+ * | +-------------------------+ v
+ * | | FULL_SNAPSHOT |------------>|
+ * | +-------------------------+ |
+ * | | |
+ * running_xacts | saved snapshot
+ * with zero xacts | at running_xacts's lsn
+ * | | |
+ * | running_xacts with xacts from #2 finished |
+ * | | |
+ * | v |
+ * | +-------------------------+ |
+ * +--->|SNAPBUILD_CONSISTENT |<------------+
+ * +-------------------------+
+ *
+ * Initially the machinery is in the START stage. When an xl_running_xacts
+ * record is read that is sufficiently new (above the safe xmin horizon),
+ * there's a state transition. If there were no running xacts when the
+ * running_xacts record was generated, we'll directly go into CONSISTENT
+ * state, otherwise we'll switch to the BUILDING_SNAPSHOT state. Having a full
+ * snapshot means that all transactions that start henceforth can be decoded
+ * in their entirety, but transactions that started previously can't. In
+ * FULL_SNAPSHOT we'll switch into CONSISTENT once all those previously
+ * running transactions have committed or aborted.
+ *
+ * Only transactions that commit after CONSISTENT state has been reached will
+ * be replayed, even though they might have started while still in
+ * FULL_SNAPSHOT. That ensures that we'll reach a point where no previous
+ * changes has been exported, but all the following ones will be. That point
+ * is a convenient point to initialize replication from, which is why we
+ * export a snapshot at that point, which *can* be used to read normal data.
+ *
+ * Copyright (c) 2012-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/snapbuild.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "access/heapam_xlog.h"
+#include "access/transam.h"
+#include "access/xact.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "replication/logical.h"
+#include "replication/reorderbuffer.h"
+#include "replication/snapbuild.h"
+#include "storage/block.h" /* debugging output */
+#include "storage/fd.h"
+#include "storage/lmgr.h"
+#include "storage/proc.h"
+#include "storage/procarray.h"
+#include "storage/standby.h"
+#include "utils/builtins.h"
+#include "utils/memutils.h"
+#include "utils/snapmgr.h"
+#include "utils/snapshot.h"
+
+/*
+ * This struct contains the current state of the snapshot building
+ * machinery. Besides a forward declaration in the header, it is not exposed
+ * to the public, so we can easily change its contents.
+ */
+struct SnapBuild
+{
+ /* how far are we along building our first full snapshot */
+ SnapBuildState state;
+
+ /* private memory context used to allocate memory for this module. */
+ MemoryContext context;
+
+ /* all transactions < than this have committed/aborted */
+ TransactionId xmin;
+
+ /* all transactions >= than this are uncommitted */
+ TransactionId xmax;
+
+ /*
+ * Don't replay commits from an LSN < this LSN. This can be set externally
+ * but it will also be advanced (never retreat) from within snapbuild.c.
+ */
+ XLogRecPtr start_decoding_at;
+
+ /*
+ * Don't start decoding WAL until the "xl_running_xacts" information
+ * indicates there are no running xids with an xid smaller than this.
+ */
+ TransactionId initial_xmin_horizon;
+
+ /* Indicates if we are building full snapshot or just catalog one. */
+ bool building_full_snapshot;
+
+ /*
+ * Snapshot that's valid to see the catalog state seen at this moment.
+ */
+ Snapshot snapshot;
+
+ /*
+ * LSN of the last location we are sure a snapshot has been serialized to.
+ */
+ XLogRecPtr last_serialized_snapshot;
+
+ /*
+ * The reorderbuffer we need to update with usable snapshots et al.
+ */
+ ReorderBuffer *reorder;
+
+ /*
+ * Outdated: This struct isn't used for its original purpose anymore, but
+ * can't be removed / changed in a minor version, because it's stored
+ * on-disk.
+ */
+ struct
+ {
+ /*
+ * NB: This field is misused, until a major version can break on-disk
+ * compatibility. See SnapBuildNextPhaseAt() /
+ * SnapBuildStartNextPhaseAt().
+ */
+ TransactionId was_xmin;
+ TransactionId was_xmax;
+
+ size_t was_xcnt; /* number of used xip entries */
+ size_t was_xcnt_space; /* allocated size of xip */
+ TransactionId *was_xip; /* running xacts array, xidComparator-sorted */
+ } was_running;
+
+ /*
+ * Array of transactions which could have catalog changes that committed
+ * between xmin and xmax.
+ */
+ struct
+ {
+ /* number of committed transactions */
+ size_t xcnt;
+
+ /* available space for committed transactions */
+ size_t xcnt_space;
+
+ /*
+ * Until we reach a CONSISTENT state, we record commits of all
+ * transactions, not just the catalog changing ones. Record when that
+ * changes so we know we cannot export a snapshot safely anymore.
+ */
+ bool includes_all_transactions;
+
+ /*
+ * Array of committed transactions that have modified the catalog.
+ *
+ * As this array is frequently modified we do *not* keep it in
+ * xidComparator order. Instead we sort the array when building &
+ * distributing a snapshot.
+ *
+ * TODO: It's unclear whether that reasoning has much merit. Every
+ * time we add something here after becoming consistent will also
+ * require distributing a snapshot. Storing them sorted would
+ * potentially also make it easier to purge (but more complicated wrt
+ * wraparound?). Should be improved if sorting while building the
+ * snapshot shows up in profiles.
+ */
+ TransactionId *xip;
+ } committed;
+};
+
+/*
+ * Starting a transaction -- which we need to do while exporting a snapshot --
+ * removes knowledge about the previously used resowner, so we save it here.
+ */
+static ResourceOwner SavedResourceOwnerDuringExport = NULL;
+static bool ExportInProgress = false;
+
+/* ->committed manipulation */
+static void SnapBuildPurgeCommittedTxn(SnapBuild *builder);
+
+/* snapshot building/manipulation/distribution functions */
+static Snapshot SnapBuildBuildSnapshot(SnapBuild *builder);
+
+static void SnapBuildFreeSnapshot(Snapshot snap);
+
+static void SnapBuildSnapIncRefcount(Snapshot snap);
+
+static void SnapBuildDistributeNewCatalogSnapshot(SnapBuild *builder, XLogRecPtr lsn);
+
+/* xlog reading helper functions for SnapBuildProcessRunningXacts */
+static bool SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *running);
+static void SnapBuildWaitSnapshot(xl_running_xacts *running, TransactionId cutoff);
+
+/* serialization functions */
+static void SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn);
+static bool SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn);
+
+/*
+ * Return TransactionId after which the next phase of initial snapshot
+ * building will happen.
+ */
+static inline TransactionId
+SnapBuildNextPhaseAt(SnapBuild *builder)
+{
+ /*
+ * For backward compatibility reasons this has to be stored in the wrongly
+ * named field. Will be fixed in next major version.
+ */
+ return builder->was_running.was_xmax;
+}
+
+/*
+ * Set TransactionId after which the next phase of initial snapshot building
+ * will happen.
+ */
+static inline void
+SnapBuildStartNextPhaseAt(SnapBuild *builder, TransactionId at)
+{
+ /*
+ * For backward compatibility reasons this has to be stored in the wrongly
+ * named field. Will be fixed in next major version.
+ */
+ builder->was_running.was_xmax = at;
+}
+
+/*
+ * Allocate a new snapshot builder.
+ *
+ * xmin_horizon is the xid >= which we can be sure no catalog rows have been
+ * removed, start_lsn is the LSN >= we want to replay commits.
+ */
+SnapBuild *
+AllocateSnapshotBuilder(ReorderBuffer *reorder,
+ TransactionId xmin_horizon,
+ XLogRecPtr start_lsn,
+ bool need_full_snapshot)
+{
+ MemoryContext context;
+ MemoryContext oldcontext;
+ SnapBuild *builder;
+
+ /* allocate memory in own context, to have better accountability */
+ context = AllocSetContextCreate(CurrentMemoryContext,
+ "snapshot builder context",
+ ALLOCSET_DEFAULT_SIZES);
+ oldcontext = MemoryContextSwitchTo(context);
+
+ builder = palloc0(sizeof(SnapBuild));
+
+ builder->state = SNAPBUILD_START;
+ builder->context = context;
+ builder->reorder = reorder;
+ /* Other struct members initialized by zeroing via palloc0 above */
+
+ builder->committed.xcnt = 0;
+ builder->committed.xcnt_space = 128; /* arbitrary number */
+ builder->committed.xip =
+ palloc0(builder->committed.xcnt_space * sizeof(TransactionId));
+ builder->committed.includes_all_transactions = true;
+
+ builder->initial_xmin_horizon = xmin_horizon;
+ builder->start_decoding_at = start_lsn;
+ builder->building_full_snapshot = need_full_snapshot;
+
+ MemoryContextSwitchTo(oldcontext);
+
+ return builder;
+}
+
+/*
+ * Free a snapshot builder.
+ */
+void
+FreeSnapshotBuilder(SnapBuild *builder)
+{
+ MemoryContext context = builder->context;
+
+ /* free snapshot explicitly, that contains some error checking */
+ if (builder->snapshot != NULL)
+ {
+ SnapBuildSnapDecRefcount(builder->snapshot);
+ builder->snapshot = NULL;
+ }
+
+ /* other resources are deallocated via memory context reset */
+ MemoryContextDelete(context);
+}
+
+/*
+ * Free an unreferenced snapshot that has previously been built by us.
+ */
+static void
+SnapBuildFreeSnapshot(Snapshot snap)
+{
+ /* make sure we don't get passed an external snapshot */
+ Assert(snap->snapshot_type == SNAPSHOT_HISTORIC_MVCC);
+
+ /* make sure nobody modified our snapshot */
+ Assert(snap->curcid == FirstCommandId);
+ Assert(!snap->suboverflowed);
+ Assert(!snap->takenDuringRecovery);
+ Assert(snap->regd_count == 0);
+
+ /* slightly more likely, so it's checked even without c-asserts */
+ if (snap->copied)
+ elog(ERROR, "cannot free a copied snapshot");
+
+ if (snap->active_count)
+ elog(ERROR, "cannot free an active snapshot");
+
+ pfree(snap);
+}
+
+/*
+ * In which state of snapshot building are we?
+ */
+SnapBuildState
+SnapBuildCurrentState(SnapBuild *builder)
+{
+ return builder->state;
+}
+
+/*
+ * Should the contents of transaction ending at 'ptr' be decoded?
+ */
+bool
+SnapBuildXactNeedsSkip(SnapBuild *builder, XLogRecPtr ptr)
+{
+ return ptr < builder->start_decoding_at;
+}
+
+/*
+ * Increase refcount of a snapshot.
+ *
+ * This is used when handing out a snapshot to some external resource or when
+ * adding a Snapshot as builder->snapshot.
+ */
+static void
+SnapBuildSnapIncRefcount(Snapshot snap)
+{
+ snap->active_count++;
+}
+
+/*
+ * Decrease refcount of a snapshot and free if the refcount reaches zero.
+ *
+ * Externally visible, so that external resources that have been handed an
+ * IncRef'ed Snapshot can adjust its refcount easily.
+ */
+void
+SnapBuildSnapDecRefcount(Snapshot snap)
+{
+ /* make sure we don't get passed an external snapshot */
+ Assert(snap->snapshot_type == SNAPSHOT_HISTORIC_MVCC);
+
+ /* make sure nobody modified our snapshot */
+ Assert(snap->curcid == FirstCommandId);
+ Assert(!snap->suboverflowed);
+ Assert(!snap->takenDuringRecovery);
+
+ Assert(snap->regd_count == 0);
+
+ Assert(snap->active_count > 0);
+
+ /* slightly more likely, so it's checked even without casserts */
+ if (snap->copied)
+ elog(ERROR, "cannot free a copied snapshot");
+
+ snap->active_count--;
+ if (snap->active_count == 0)
+ SnapBuildFreeSnapshot(snap);
+}
+
+/*
+ * Build a new snapshot, based on currently committed catalog-modifying
+ * transactions.
+ *
+ * In-progress transactions with catalog access are *not* allowed to modify
+ * these snapshots; they have to copy them and fill in appropriate ->curcid
+ * and ->subxip/subxcnt values.
+ */
+static Snapshot
+SnapBuildBuildSnapshot(SnapBuild *builder)
+{
+ Snapshot snapshot;
+ Size ssize;
+
+ Assert(builder->state >= SNAPBUILD_FULL_SNAPSHOT);
+
+ ssize = sizeof(SnapshotData)
+ + sizeof(TransactionId) * builder->committed.xcnt
+ + sizeof(TransactionId) * 1 /* toplevel xid */ ;
+
+ snapshot = MemoryContextAllocZero(builder->context, ssize);
+
+ snapshot->snapshot_type = SNAPSHOT_HISTORIC_MVCC;
+
+ /*
+ * We misuse the original meaning of SnapshotData's xip and subxip fields
+ * to make the more fitting for our needs.
+ *
+ * In the 'xip' array we store transactions that have to be treated as
+ * committed. Since we will only ever look at tuples from transactions
+ * that have modified the catalog it's more efficient to store those few
+ * that exist between xmin and xmax (frequently there are none).
+ *
+ * Snapshots that are used in transactions that have modified the catalog
+ * also use the 'subxip' array to store their toplevel xid and all the
+ * subtransaction xids so we can recognize when we need to treat rows as
+ * visible that are not in xip but still need to be visible. Subxip only
+ * gets filled when the transaction is copied into the context of a
+ * catalog modifying transaction since we otherwise share a snapshot
+ * between transactions. As long as a txn hasn't modified the catalog it
+ * doesn't need to treat any uncommitted rows as visible, so there is no
+ * need for those xids.
+ *
+ * Both arrays are qsort'ed so that we can use bsearch() on them.
+ */
+ Assert(TransactionIdIsNormal(builder->xmin));
+ Assert(TransactionIdIsNormal(builder->xmax));
+
+ snapshot->xmin = builder->xmin;
+ snapshot->xmax = builder->xmax;
+
+ /* store all transactions to be treated as committed by this snapshot */
+ snapshot->xip =
+ (TransactionId *) ((char *) snapshot + sizeof(SnapshotData));
+ snapshot->xcnt = builder->committed.xcnt;
+ memcpy(snapshot->xip,
+ builder->committed.xip,
+ builder->committed.xcnt * sizeof(TransactionId));
+
+ /* sort so we can bsearch() */
+ qsort(snapshot->xip, snapshot->xcnt, sizeof(TransactionId), xidComparator);
+
+ /*
+ * Initially, subxip is empty, i.e. it's a snapshot to be used by
+ * transactions that don't modify the catalog. Will be filled by
+ * ReorderBufferCopySnap() if necessary.
+ */
+ snapshot->subxcnt = 0;
+ snapshot->subxip = NULL;
+
+ snapshot->suboverflowed = false;
+ snapshot->takenDuringRecovery = false;
+ snapshot->copied = false;
+ snapshot->curcid = FirstCommandId;
+ snapshot->active_count = 0;
+ snapshot->regd_count = 0;
+
+ return snapshot;
+}
+
+/*
+ * Build the initial slot snapshot and convert it to a normal snapshot that
+ * is understood by HeapTupleSatisfiesMVCC.
+ *
+ * The snapshot will be usable directly in current transaction or exported
+ * for loading in different transaction.
+ */
+Snapshot
+SnapBuildInitialSnapshot(SnapBuild *builder)
+{
+ Snapshot snap;
+ TransactionId xid;
+ TransactionId *newxip;
+ int newxcnt = 0;
+
+ Assert(!FirstSnapshotSet);
+ Assert(XactIsoLevel == XACT_REPEATABLE_READ);
+
+ if (builder->state != SNAPBUILD_CONSISTENT)
+ elog(ERROR, "cannot build an initial slot snapshot before reaching a consistent state");
+
+ if (!builder->committed.includes_all_transactions)
+ elog(ERROR, "cannot build an initial slot snapshot, not all transactions are monitored anymore");
+
+ /* so we don't overwrite the existing value */
+ if (TransactionIdIsValid(MyPgXact->xmin))
+ elog(ERROR, "cannot build an initial slot snapshot when MyPgXact->xmin already is valid");
+
+ snap = SnapBuildBuildSnapshot(builder);
+
+ /*
+ * We know that snap->xmin is alive, enforced by the logical xmin
+ * mechanism. Due to that we can do this without locks, we're only
+ * changing our own value.
+ */
+#ifdef USE_ASSERT_CHECKING
+ {
+ TransactionId safeXid;
+
+ LWLockAcquire(ProcArrayLock, LW_SHARED);
+ safeXid = GetOldestSafeDecodingTransactionId(false);
+ LWLockRelease(ProcArrayLock);
+
+ Assert(TransactionIdPrecedesOrEquals(safeXid, snap->xmin));
+ }
+#endif
+
+ MyPgXact->xmin = snap->xmin;
+
+ /* allocate in transaction context */
+ newxip = (TransactionId *)
+ palloc(sizeof(TransactionId) * GetMaxSnapshotXidCount());
+
+ /*
+ * snapbuild.c builds transactions in an "inverted" manner, which means it
+ * stores committed transactions in ->xip, not ones in progress. Build a
+ * classical snapshot by marking all non-committed transactions as
+ * in-progress. This can be expensive.
+ */
+ for (xid = snap->xmin; NormalTransactionIdPrecedes(xid, snap->xmax);)
+ {
+ void *test;
+
+ /*
+ * Check whether transaction committed using the decoding snapshot
+ * meaning of ->xip.
+ */
+ test = bsearch(&xid, snap->xip, snap->xcnt,
+ sizeof(TransactionId), xidComparator);
+
+ if (test == NULL)
+ {
+ if (newxcnt >= GetMaxSnapshotXidCount())
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("initial slot snapshot too large")));
+
+ newxip[newxcnt++] = xid;
+ }
+
+ TransactionIdAdvance(xid);
+ }
+
+ /* adjust remaining snapshot fields as needed */
+ snap->snapshot_type = SNAPSHOT_MVCC;
+ snap->xcnt = newxcnt;
+ snap->xip = newxip;
+
+ return snap;
+}
+
+/*
+ * Export a snapshot so it can be set in another session with SET TRANSACTION
+ * SNAPSHOT.
+ *
+ * For that we need to start a transaction in the current backend as the
+ * importing side checks whether the source transaction is still open to make
+ * sure the xmin horizon hasn't advanced since then.
+ */
+const char *
+SnapBuildExportSnapshot(SnapBuild *builder)
+{
+ Snapshot snap;
+ char *snapname;
+
+ if (IsTransactionOrTransactionBlock())
+ elog(ERROR, "cannot export a snapshot from within a transaction");
+
+ if (SavedResourceOwnerDuringExport)
+ elog(ERROR, "can only export one snapshot at a time");
+
+ SavedResourceOwnerDuringExport = CurrentResourceOwner;
+ ExportInProgress = true;
+
+ StartTransactionCommand();
+
+ /* There doesn't seem to a nice API to set these */
+ XactIsoLevel = XACT_REPEATABLE_READ;
+ XactReadOnly = true;
+
+ snap = SnapBuildInitialSnapshot(builder);
+
+ /*
+ * now that we've built a plain snapshot, make it active and use the
+ * normal mechanisms for exporting it
+ */
+ snapname = ExportSnapshot(snap);
+
+ ereport(LOG,
+ (errmsg_plural("exported logical decoding snapshot: \"%s\" with %u transaction ID",
+ "exported logical decoding snapshot: \"%s\" with %u transaction IDs",
+ snap->xcnt,
+ snapname, snap->xcnt)));
+ return snapname;
+}
+
+/*
+ * Ensure there is a snapshot and if not build one for current transaction.
+ */
+Snapshot
+SnapBuildGetOrBuildSnapshot(SnapBuild *builder, TransactionId xid)
+{
+ Assert(builder->state == SNAPBUILD_CONSISTENT);
+
+ /* only build a new snapshot if we don't have a prebuilt one */
+ if (builder->snapshot == NULL)
+ {
+ builder->snapshot = SnapBuildBuildSnapshot(builder);
+ /* increase refcount for the snapshot builder */
+ SnapBuildSnapIncRefcount(builder->snapshot);
+ }
+
+ return builder->snapshot;
+}
+
+/*
+ * Reset a previously SnapBuildExportSnapshot()'ed snapshot if there is
+ * any. Aborts the previously started transaction and resets the resource
+ * owner back to its original value.
+ */
+void
+SnapBuildClearExportedSnapshot(void)
+{
+ /* nothing exported, that is the usual case */
+ if (!ExportInProgress)
+ return;
+
+ if (!IsTransactionState())
+ elog(ERROR, "clearing exported snapshot in wrong transaction state");
+
+ /* make sure nothing could have ever happened */
+ AbortCurrentTransaction();
+
+ CurrentResourceOwner = SavedResourceOwnerDuringExport;
+ SavedResourceOwnerDuringExport = NULL;
+ ExportInProgress = false;
+}
+
+/*
+ * Handle the effects of a single heap change, appropriate to the current state
+ * of the snapshot builder and returns whether changes made at (xid, lsn) can
+ * be decoded.
+ */
+bool
+SnapBuildProcessChange(SnapBuild *builder, TransactionId xid, XLogRecPtr lsn)
+{
+ /*
+ * We can't handle data in transactions if we haven't built a snapshot
+ * yet, so don't store them.
+ */
+ if (builder->state < SNAPBUILD_FULL_SNAPSHOT)
+ return false;
+
+ /*
+ * No point in keeping track of changes in transactions that we don't have
+ * enough information about to decode. This means that they started before
+ * we got into the SNAPBUILD_FULL_SNAPSHOT state.
+ */
+ if (builder->state < SNAPBUILD_CONSISTENT &&
+ TransactionIdPrecedes(xid, SnapBuildNextPhaseAt(builder)))
+ return false;
+
+ /*
+ * If the reorderbuffer doesn't yet have a snapshot, add one now, it will
+ * be needed to decode the change we're currently processing.
+ */
+ if (!ReorderBufferXidHasBaseSnapshot(builder->reorder, xid))
+ {
+ /* only build a new snapshot if we don't have a prebuilt one */
+ if (builder->snapshot == NULL)
+ {
+ builder->snapshot = SnapBuildBuildSnapshot(builder);
+ /* increase refcount for the snapshot builder */
+ SnapBuildSnapIncRefcount(builder->snapshot);
+ }
+
+ /*
+ * Increase refcount for the transaction we're handing the snapshot
+ * out to.
+ */
+ SnapBuildSnapIncRefcount(builder->snapshot);
+ ReorderBufferSetBaseSnapshot(builder->reorder, xid, lsn,
+ builder->snapshot);
+ }
+
+ return true;
+}
+
+/*
+ * Do CommandId/ComboCid handling after reading an xl_heap_new_cid record.
+ * This implies that a transaction has done some form of write to system
+ * catalogs.
+ */
+void
+SnapBuildProcessNewCid(SnapBuild *builder, TransactionId xid,
+ XLogRecPtr lsn, xl_heap_new_cid *xlrec)
+{
+ CommandId cid;
+
+ /*
+ * we only log new_cid's if a catalog tuple was modified, so mark the
+ * transaction as containing catalog modifications
+ */
+ ReorderBufferXidSetCatalogChanges(builder->reorder, xid, lsn);
+
+ ReorderBufferAddNewTupleCids(builder->reorder, xlrec->top_xid, lsn,
+ xlrec->target_node, xlrec->target_tid,
+ xlrec->cmin, xlrec->cmax,
+ xlrec->combocid);
+
+ /* figure out new command id */
+ if (xlrec->cmin != InvalidCommandId &&
+ xlrec->cmax != InvalidCommandId)
+ cid = Max(xlrec->cmin, xlrec->cmax);
+ else if (xlrec->cmax != InvalidCommandId)
+ cid = xlrec->cmax;
+ else if (xlrec->cmin != InvalidCommandId)
+ cid = xlrec->cmin;
+ else
+ {
+ cid = InvalidCommandId; /* silence compiler */
+ elog(ERROR, "xl_heap_new_cid record without a valid CommandId");
+ }
+
+ ReorderBufferAddNewCommandId(builder->reorder, xid, lsn, cid + 1);
+}
+
+/*
+ * Add a new Snapshot to all transactions we're decoding that currently are
+ * in-progress so they can see new catalog contents made by the transaction
+ * that just committed. This is necessary because those in-progress
+ * transactions will use the new catalog's contents from here on (at the very
+ * least everything they do needs to be compatible with newer catalog
+ * contents).
+ */
+static void
+SnapBuildDistributeNewCatalogSnapshot(SnapBuild *builder, XLogRecPtr lsn)
+{
+ dlist_iter txn_i;
+ ReorderBufferTXN *txn;
+
+ /*
+ * Iterate through all toplevel transactions. This can include
+ * subtransactions which we just don't yet know to be that, but that's
+ * fine, they will just get an unnecessary snapshot queued.
+ */
+ dlist_foreach(txn_i, &builder->reorder->toplevel_by_lsn)
+ {
+ txn = dlist_container(ReorderBufferTXN, node, txn_i.cur);
+
+ Assert(TransactionIdIsValid(txn->xid));
+
+ /*
+ * If we don't have a base snapshot yet, there are no changes in this
+ * transaction which in turn implies we don't yet need a snapshot at
+ * all. We'll add a snapshot when the first change gets queued.
+ *
+ * NB: This works correctly even for subtransactions because
+ * ReorderBufferAssignChild() takes care to transfer the base snapshot
+ * to the top-level transaction, and while iterating the changequeue
+ * we'll get the change from the subtxn.
+ */
+ if (!ReorderBufferXidHasBaseSnapshot(builder->reorder, txn->xid))
+ continue;
+
+ elog(DEBUG2, "adding a new snapshot to %u at %X/%X",
+ txn->xid, (uint32) (lsn >> 32), (uint32) lsn);
+
+ /*
+ * increase the snapshot's refcount for the transaction we are handing
+ * it out to
+ */
+ SnapBuildSnapIncRefcount(builder->snapshot);
+ ReorderBufferAddSnapshot(builder->reorder, txn->xid, lsn,
+ builder->snapshot);
+ }
+}
+
+/*
+ * Keep track of a new catalog changing transaction that has committed.
+ */
+static void
+SnapBuildAddCommittedTxn(SnapBuild *builder, TransactionId xid)
+{
+ Assert(TransactionIdIsValid(xid));
+
+ if (builder->committed.xcnt == builder->committed.xcnt_space)
+ {
+ builder->committed.xcnt_space = builder->committed.xcnt_space * 2 + 1;
+
+ elog(DEBUG1, "increasing space for committed transactions to %u",
+ (uint32) builder->committed.xcnt_space);
+
+ builder->committed.xip = repalloc(builder->committed.xip,
+ builder->committed.xcnt_space * sizeof(TransactionId));
+ }
+
+ /*
+ * TODO: It might make sense to keep the array sorted here instead of
+ * doing it every time we build a new snapshot. On the other hand this
+ * gets called repeatedly when a transaction with subtransactions commits.
+ */
+ builder->committed.xip[builder->committed.xcnt++] = xid;
+}
+
+/*
+ * Remove knowledge about transactions we treat as committed that are smaller
+ * than ->xmin. Those won't ever get checked via the ->committed array but via
+ * the clog machinery, so we don't need to waste memory on them.
+ */
+static void
+SnapBuildPurgeCommittedTxn(SnapBuild *builder)
+{
+ int off;
+ TransactionId *workspace;
+ int surviving_xids = 0;
+
+ /* not ready yet */
+ if (!TransactionIdIsNormal(builder->xmin))
+ return;
+
+ /* TODO: Neater algorithm than just copying and iterating? */
+ workspace =
+ MemoryContextAlloc(builder->context,
+ builder->committed.xcnt * sizeof(TransactionId));
+
+ /* copy xids that still are interesting to workspace */
+ for (off = 0; off < builder->committed.xcnt; off++)
+ {
+ if (NormalTransactionIdPrecedes(builder->committed.xip[off],
+ builder->xmin))
+ ; /* remove */
+ else
+ workspace[surviving_xids++] = builder->committed.xip[off];
+ }
+
+ /* copy workspace back to persistent state */
+ memcpy(builder->committed.xip, workspace,
+ surviving_xids * sizeof(TransactionId));
+
+ elog(DEBUG3, "purged committed transactions from %u to %u, xmin: %u, xmax: %u",
+ (uint32) builder->committed.xcnt, (uint32) surviving_xids,
+ builder->xmin, builder->xmax);
+ builder->committed.xcnt = surviving_xids;
+
+ pfree(workspace);
+}
+
+/*
+ * Handle everything that needs to be done when a transaction commits
+ */
+void
+SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid,
+ int nsubxacts, TransactionId *subxacts)
+{
+ int nxact;
+
+ bool needs_snapshot = false;
+ bool needs_timetravel = false;
+ bool sub_needs_timetravel = false;
+
+ TransactionId xmax = xid;
+
+ /*
+ * Transactions preceding BUILDING_SNAPSHOT will neither be decoded, nor
+ * will they be part of a snapshot. So we don't need to record anything.
+ */
+ if (builder->state == SNAPBUILD_START ||
+ (builder->state == SNAPBUILD_BUILDING_SNAPSHOT &&
+ TransactionIdPrecedes(xid, SnapBuildNextPhaseAt(builder))))
+ {
+ /* ensure that only commits after this are getting replayed */
+ if (builder->start_decoding_at <= lsn)
+ builder->start_decoding_at = lsn + 1;
+ return;
+ }
+
+ if (builder->state < SNAPBUILD_CONSISTENT)
+ {
+ /* ensure that only commits after this are getting replayed */
+ if (builder->start_decoding_at <= lsn)
+ builder->start_decoding_at = lsn + 1;
+
+ /*
+ * If building an exportable snapshot, force xid to be tracked, even
+ * if the transaction didn't modify the catalog.
+ */
+ if (builder->building_full_snapshot)
+ {
+ needs_timetravel = true;
+ }
+ }
+
+ for (nxact = 0; nxact < nsubxacts; nxact++)
+ {
+ TransactionId subxid = subxacts[nxact];
+
+ /*
+ * Add subtransaction to base snapshot if catalog modifying, we don't
+ * distinguish to toplevel transactions there.
+ */
+ if (ReorderBufferXidHasCatalogChanges(builder->reorder, subxid))
+ {
+ sub_needs_timetravel = true;
+ needs_snapshot = true;
+
+ elog(DEBUG1, "found subtransaction %u:%u with catalog changes",
+ xid, subxid);
+
+ SnapBuildAddCommittedTxn(builder, subxid);
+
+ if (NormalTransactionIdFollows(subxid, xmax))
+ xmax = subxid;
+ }
+
+ /*
+ * If we're forcing timetravel we also need visibility information
+ * about subtransaction, so keep track of subtransaction's state, even
+ * if not catalog modifying. Don't need to distribute a snapshot in
+ * that case.
+ */
+ else if (needs_timetravel)
+ {
+ SnapBuildAddCommittedTxn(builder, subxid);
+ if (NormalTransactionIdFollows(subxid, xmax))
+ xmax = subxid;
+ }
+ }
+
+ /* if top-level modified catalog, it'll need a snapshot */
+ if (ReorderBufferXidHasCatalogChanges(builder->reorder, xid))
+ {
+ elog(DEBUG2, "found top level transaction %u, with catalog changes",
+ xid);
+ needs_snapshot = true;
+ needs_timetravel = true;
+ SnapBuildAddCommittedTxn(builder, xid);
+ }
+ else if (sub_needs_timetravel)
+ {
+ /* track toplevel txn as well, subxact alone isn't meaningful */
+ SnapBuildAddCommittedTxn(builder, xid);
+ }
+ else if (needs_timetravel)
+ {
+ elog(DEBUG2, "forced transaction %u to do timetravel", xid);
+
+ SnapBuildAddCommittedTxn(builder, xid);
+ }
+
+ if (!needs_timetravel)
+ {
+ /* record that we cannot export a general snapshot anymore */
+ builder->committed.includes_all_transactions = false;
+ }
+
+ Assert(!needs_snapshot || needs_timetravel);
+
+ /*
+ * Adjust xmax of the snapshot builder, we only do that for committed,
+ * catalog modifying, transactions, everything else isn't interesting for
+ * us since we'll never look at the respective rows.
+ */
+ if (needs_timetravel &&
+ (!TransactionIdIsValid(builder->xmax) ||
+ TransactionIdFollowsOrEquals(xmax, builder->xmax)))
+ {
+ builder->xmax = xmax;
+ TransactionIdAdvance(builder->xmax);
+ }
+
+ /* if there's any reason to build a historic snapshot, do so now */
+ if (needs_snapshot)
+ {
+ /*
+ * If we haven't built a complete snapshot yet there's no need to hand
+ * it out, it wouldn't (and couldn't) be used anyway.
+ */
+ if (builder->state < SNAPBUILD_FULL_SNAPSHOT)
+ return;
+
+ /*
+ * Decrease the snapshot builder's refcount of the old snapshot, note
+ * that it still will be used if it has been handed out to the
+ * reorderbuffer earlier.
+ */
+ if (builder->snapshot)
+ SnapBuildSnapDecRefcount(builder->snapshot);
+
+ builder->snapshot = SnapBuildBuildSnapshot(builder);
+
+ /* we might need to execute invalidations, add snapshot */
+ if (!ReorderBufferXidHasBaseSnapshot(builder->reorder, xid))
+ {
+ SnapBuildSnapIncRefcount(builder->snapshot);
+ ReorderBufferSetBaseSnapshot(builder->reorder, xid, lsn,
+ builder->snapshot);
+ }
+
+ /* refcount of the snapshot builder for the new snapshot */
+ SnapBuildSnapIncRefcount(builder->snapshot);
+
+ /* add a new catalog snapshot to all currently running transactions */
+ SnapBuildDistributeNewCatalogSnapshot(builder, lsn);
+ }
+}
+
+
+/* -----------------------------------
+ * Snapshot building functions dealing with xlog records
+ * -----------------------------------
+ */
+
+/*
+ * Process a running xacts record, and use its information to first build a
+ * historic snapshot and later to release resources that aren't needed
+ * anymore.
+ */
+void
+SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *running)
+{
+ ReorderBufferTXN *txn;
+ TransactionId xmin;
+
+ /*
+ * If we're not consistent yet, inspect the record to see whether it
+ * allows to get closer to being consistent. If we are consistent, dump
+ * our snapshot so others or we, after a restart, can use it.
+ */
+ if (builder->state < SNAPBUILD_CONSISTENT)
+ {
+ /* returns false if there's no point in performing cleanup just yet */
+ if (!SnapBuildFindSnapshot(builder, lsn, running))
+ return;
+ }
+ else
+ SnapBuildSerialize(builder, lsn);
+
+ /*
+ * Update range of interesting xids based on the running xacts
+ * information. We don't increase ->xmax using it, because once we are in
+ * a consistent state we can do that ourselves and much more efficiently
+ * so, because we only need to do it for catalog transactions since we
+ * only ever look at those.
+ *
+ * NB: We only increase xmax when a catalog modifying transaction commits
+ * (see SnapBuildCommitTxn). Because of this, xmax can be lower than
+ * xmin, which looks odd but is correct and actually more efficient, since
+ * we hit fast paths in heapam_visibility.c.
+ */
+ builder->xmin = running->oldestRunningXid;
+
+ /* Remove transactions we don't need to keep track off anymore */
+ SnapBuildPurgeCommittedTxn(builder);
+
+ /*
+ * Advance the xmin limit for the current replication slot, to allow
+ * vacuum to clean up the tuples this slot has been protecting.
+ *
+ * The reorderbuffer might have an xmin among the currently running
+ * snapshots; use it if so. If not, we need only consider the snapshots
+ * we'll produce later, which can't be less than the oldest running xid in
+ * the record we're reading now.
+ */
+ xmin = ReorderBufferGetOldestXmin(builder->reorder);
+ if (xmin == InvalidTransactionId)
+ xmin = running->oldestRunningXid;
+ elog(DEBUG3, "xmin: %u, xmax: %u, oldest running: %u, oldest xmin: %u",
+ builder->xmin, builder->xmax, running->oldestRunningXid, xmin);
+ LogicalIncreaseXminForSlot(lsn, xmin);
+
+ /*
+ * Also tell the slot where we can restart decoding from. We don't want to
+ * do that after every commit because changing that implies an fsync of
+ * the logical slot's state file, so we only do it every time we see a
+ * running xacts record.
+ *
+ * Do so by looking for the oldest in progress transaction (determined by
+ * the first LSN of any of its relevant records). Every transaction
+ * remembers the last location we stored the snapshot to disk before its
+ * beginning. That point is where we can restart from.
+ */
+
+ /*
+ * Can't know about a serialized snapshot's location if we're not
+ * consistent.
+ */
+ if (builder->state < SNAPBUILD_CONSISTENT)
+ return;
+
+ txn = ReorderBufferGetOldestTXN(builder->reorder);
+
+ /*
+ * oldest ongoing txn might have started when we didn't yet serialize
+ * anything because we hadn't reached a consistent state yet.
+ */
+ if (txn != NULL && txn->restart_decoding_lsn != InvalidXLogRecPtr)
+ LogicalIncreaseRestartDecodingForSlot(lsn, txn->restart_decoding_lsn);
+
+ /*
+ * No in-progress transaction, can reuse the last serialized snapshot if
+ * we have one.
+ */
+ else if (txn == NULL &&
+ builder->reorder->current_restart_decoding_lsn != InvalidXLogRecPtr &&
+ builder->last_serialized_snapshot != InvalidXLogRecPtr)
+ LogicalIncreaseRestartDecodingForSlot(lsn,
+ builder->last_serialized_snapshot);
+}
+
+
+/*
+ * Build the start of a snapshot that's capable of decoding the catalog.
+ *
+ * Helper function for SnapBuildProcessRunningXacts() while we're not yet
+ * consistent.
+ *
+ * Returns true if there is a point in performing internal maintenance/cleanup
+ * using the xl_running_xacts record.
+ */
+static bool
+SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *running)
+{
+ /* ---
+ * Build catalog decoding snapshot incrementally using information about
+ * the currently running transactions. There are several ways to do that:
+ *
+ * a) There were no running transactions when the xl_running_xacts record
+ * was inserted, jump to CONSISTENT immediately. We might find such a
+ * state while waiting on c)'s sub-states.
+ *
+ * b) This (in a previous run) or another decoding slot serialized a
+ * snapshot to disk that we can use. Can't use this method for the
+ * initial snapshot when slot is being created and needs full snapshot
+ * for export or direct use, as that snapshot will only contain catalog
+ * modifying transactions.
+ *
+ * c) First incrementally build a snapshot for catalog tuples
+ * (BUILDING_SNAPSHOT), that requires all, already in-progress,
+ * transactions to finish. Every transaction starting after that
+ * (FULL_SNAPSHOT state), has enough information to be decoded. But
+ * for older running transactions no viable snapshot exists yet, so
+ * CONSISTENT will only be reached once all of those have finished.
+ * ---
+ */
+
+ /*
+ * xl_running_xact record is older than what we can use, we might not have
+ * all necessary catalog rows anymore.
+ */
+ if (TransactionIdIsNormal(builder->initial_xmin_horizon) &&
+ NormalTransactionIdPrecedes(running->oldestRunningXid,
+ builder->initial_xmin_horizon))
+ {
+ ereport(DEBUG1,
+ (errmsg_internal("skipping snapshot at %X/%X while building logical decoding snapshot, xmin horizon too low",
+ (uint32) (lsn >> 32), (uint32) lsn),
+ errdetail_internal("initial xmin horizon of %u vs the snapshot's %u",
+ builder->initial_xmin_horizon, running->oldestRunningXid)));
+
+
+ SnapBuildWaitSnapshot(running, builder->initial_xmin_horizon);
+
+ return true;
+ }
+
+ /*
+ * a) No transaction were running, we can jump to consistent.
+ *
+ * This is not affected by races around xl_running_xacts, because we can
+ * miss transaction commits, but currently not transactions starting.
+ *
+ * NB: We might have already started to incrementally assemble a snapshot,
+ * so we need to be careful to deal with that.
+ */
+ if (running->oldestRunningXid == running->nextXid)
+ {
+ if (builder->start_decoding_at == InvalidXLogRecPtr ||
+ builder->start_decoding_at <= lsn)
+ /* can decode everything after this */
+ builder->start_decoding_at = lsn + 1;
+
+ /* As no transactions were running xmin/xmax can be trivially set. */
+ builder->xmin = running->nextXid; /* < are finished */
+ builder->xmax = running->nextXid; /* >= are running */
+
+ /* so we can safely use the faster comparisons */
+ Assert(TransactionIdIsNormal(builder->xmin));
+ Assert(TransactionIdIsNormal(builder->xmax));
+
+ builder->state = SNAPBUILD_CONSISTENT;
+ SnapBuildStartNextPhaseAt(builder, InvalidTransactionId);
+
+ ereport(LOG,
+ (errmsg("logical decoding found consistent point at %X/%X",
+ (uint32) (lsn >> 32), (uint32) lsn),
+ errdetail("There are no running transactions.")));
+
+ return false;
+ }
+ /* b) valid on disk state and not building full snapshot */
+ else if (!builder->building_full_snapshot &&
+ SnapBuildRestore(builder, lsn))
+ {
+ /* there won't be any state to cleanup */
+ return false;
+ }
+
+ /*
+ * c) transition from START to BUILDING_SNAPSHOT.
+ *
+ * In START state, and a xl_running_xacts record with running xacts is
+ * encountered. In that case, switch to BUILDING_SNAPSHOT state, and
+ * record xl_running_xacts->nextXid. Once all running xacts have finished
+ * (i.e. they're all >= nextXid), we have a complete catalog snapshot. It
+ * might look that we could use xl_running_xact's ->xids information to
+ * get there quicker, but that is problematic because transactions marked
+ * as running, might already have inserted their commit record - it's
+ * infeasible to change that with locking.
+ */
+ else if (builder->state == SNAPBUILD_START)
+ {
+ builder->state = SNAPBUILD_BUILDING_SNAPSHOT;
+ SnapBuildStartNextPhaseAt(builder, running->nextXid);
+
+ /*
+ * Start with an xmin/xmax that's correct for future, when all the
+ * currently running transactions have finished. We'll update both
+ * while waiting for the pending transactions to finish.
+ */
+ builder->xmin = running->nextXid; /* < are finished */
+ builder->xmax = running->nextXid; /* >= are running */
+
+ /* so we can safely use the faster comparisons */
+ Assert(TransactionIdIsNormal(builder->xmin));
+ Assert(TransactionIdIsNormal(builder->xmax));
+
+ ereport(LOG,
+ (errmsg("logical decoding found initial starting point at %X/%X",
+ (uint32) (lsn >> 32), (uint32) lsn),
+ errdetail("Waiting for transactions (approximately %d) older than %u to end.",
+ running->xcnt, running->nextXid)));
+
+ SnapBuildWaitSnapshot(running, running->nextXid);
+ }
+
+ /*
+ * c) transition from BUILDING_SNAPSHOT to FULL_SNAPSHOT.
+ *
+ * In BUILDING_SNAPSHOT state, and this xl_running_xacts' oldestRunningXid
+ * is >= than nextXid from when we switched to BUILDING_SNAPSHOT. This
+ * means all transactions starting afterwards have enough information to
+ * be decoded. Switch to FULL_SNAPSHOT.
+ */
+ else if (builder->state == SNAPBUILD_BUILDING_SNAPSHOT &&
+ TransactionIdPrecedesOrEquals(SnapBuildNextPhaseAt(builder),
+ running->oldestRunningXid))
+ {
+ builder->state = SNAPBUILD_FULL_SNAPSHOT;
+ SnapBuildStartNextPhaseAt(builder, running->nextXid);
+
+ ereport(LOG,
+ (errmsg("logical decoding found initial consistent point at %X/%X",
+ (uint32) (lsn >> 32), (uint32) lsn),
+ errdetail("Waiting for transactions (approximately %d) older than %u to end.",
+ running->xcnt, running->nextXid)));
+
+ SnapBuildWaitSnapshot(running, running->nextXid);
+ }
+
+ /*
+ * c) transition from FULL_SNAPSHOT to CONSISTENT.
+ *
+ * In FULL_SNAPSHOT state (see d) ), and this xl_running_xacts'
+ * oldestRunningXid is >= than nextXid from when we switched to
+ * FULL_SNAPSHOT. This means all transactions that are currently in
+ * progress have a catalog snapshot, and all their changes have been
+ * collected. Switch to CONSISTENT.
+ */
+ else if (builder->state == SNAPBUILD_FULL_SNAPSHOT &&
+ TransactionIdPrecedesOrEquals(SnapBuildNextPhaseAt(builder),
+ running->oldestRunningXid))
+ {
+ builder->state = SNAPBUILD_CONSISTENT;
+ SnapBuildStartNextPhaseAt(builder, InvalidTransactionId);
+
+ ereport(LOG,
+ (errmsg("logical decoding found consistent point at %X/%X",
+ (uint32) (lsn >> 32), (uint32) lsn),
+ errdetail("There are no old transactions anymore.")));
+ }
+
+ /*
+ * We already started to track running xacts and need to wait for all
+ * in-progress ones to finish. We fall through to the normal processing of
+ * records so incremental cleanup can be performed.
+ */
+ return true;
+
+}
+
+/* ---
+ * Iterate through xids in record, wait for all older than the cutoff to
+ * finish. Then, if possible, log a new xl_running_xacts record.
+ *
+ * This isn't required for the correctness of decoding, but to:
+ * a) allow isolationtester to notice that we're currently waiting for
+ * something.
+ * b) log a new xl_running_xacts record where it'd be helpful, without having
+ * to write for bgwriter or checkpointer.
+ * ---
+ */
+static void
+SnapBuildWaitSnapshot(xl_running_xacts *running, TransactionId cutoff)
+{
+ int off;
+
+ for (off = 0; off < running->xcnt; off++)
+ {
+ TransactionId xid = running->xids[off];
+
+ /*
+ * Upper layers should prevent that we ever need to wait on ourselves.
+ * Check anyway, since failing to do so would either result in an
+ * endless wait or an Assert() failure.
+ */
+ if (TransactionIdIsCurrentTransactionId(xid))
+ elog(ERROR, "waiting for ourselves");
+
+ if (TransactionIdFollows(xid, cutoff))
+ continue;
+
+ XactLockTableWait(xid, NULL, NULL, XLTW_None);
+ }
+
+ /*
+ * All transactions we needed to finish finished - try to ensure there is
+ * another xl_running_xacts record in a timely manner, without having to
+ * write for bgwriter or checkpointer to log one. During recovery we
+ * can't enforce that, so we'll have to wait.
+ */
+ if (!RecoveryInProgress())
+ {
+ LogStandbySnapshot();
+ }
+}
+
+/* -----------------------------------
+ * Snapshot serialization support
+ * -----------------------------------
+ */
+
+/*
+ * We store current state of struct SnapBuild on disk in the following manner:
+ *
+ * struct SnapBuildOnDisk;
+ * TransactionId * running.xcnt_space;
+ * TransactionId * committed.xcnt; (*not xcnt_space*)
+ *
+ */
+typedef struct SnapBuildOnDisk
+{
+ /* first part of this struct needs to be version independent */
+
+ /* data not covered by checksum */
+ uint32 magic;
+ pg_crc32c checksum;
+
+ /* data covered by checksum */
+
+ /* version, in case we want to support pg_upgrade */
+ uint32 version;
+ /* how large is the on disk data, excluding the constant sized part */
+ uint32 length;
+
+ /* version dependent part */
+ SnapBuild builder;
+
+ /* variable amount of TransactionIds follows */
+} SnapBuildOnDisk;
+
+#define SnapBuildOnDiskConstantSize \
+ offsetof(SnapBuildOnDisk, builder)
+#define SnapBuildOnDiskNotChecksummedSize \
+ offsetof(SnapBuildOnDisk, version)
+
+#define SNAPBUILD_MAGIC 0x51A1E001
+#define SNAPBUILD_VERSION 2
+
+/*
+ * Store/Load a snapshot from disk, depending on the snapshot builder's state.
+ *
+ * Supposed to be used by external (i.e. not snapbuild.c) code that just read
+ * a record that's a potential location for a serialized snapshot.
+ */
+void
+SnapBuildSerializationPoint(SnapBuild *builder, XLogRecPtr lsn)
+{
+ if (builder->state < SNAPBUILD_CONSISTENT)
+ SnapBuildRestore(builder, lsn);
+ else
+ SnapBuildSerialize(builder, lsn);
+}
+
+/*
+ * Serialize the snapshot 'builder' at the location 'lsn' if it hasn't already
+ * been done by another decoding process.
+ */
+static void
+SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn)
+{
+ Size needed_length;
+ SnapBuildOnDisk *ondisk = NULL;
+ char *ondisk_c;
+ int fd;
+ char tmppath[MAXPGPATH];
+ char path[MAXPGPATH];
+ int ret;
+ struct stat stat_buf;
+ Size sz;
+
+ Assert(lsn != InvalidXLogRecPtr);
+ Assert(builder->last_serialized_snapshot == InvalidXLogRecPtr ||
+ builder->last_serialized_snapshot <= lsn);
+
+ /*
+ * no point in serializing if we cannot continue to work immediately after
+ * restoring the snapshot
+ */
+ if (builder->state < SNAPBUILD_CONSISTENT)
+ return;
+
+ /*
+ * We identify snapshots by the LSN they are valid for. We don't need to
+ * include timelines in the name as each LSN maps to exactly one timeline
+ * unless the user used pg_resetwal or similar. If a user did so, there's
+ * no hope continuing to decode anyway.
+ */
+ sprintf(path, "pg_logical/snapshots/%X-%X.snap",
+ (uint32) (lsn >> 32), (uint32) lsn);
+
+ /*
+ * first check whether some other backend already has written the snapshot
+ * for this LSN. It's perfectly fine if there's none, so we accept ENOENT
+ * as a valid state. Everything else is an unexpected error.
+ */
+ ret = stat(path, &stat_buf);
+
+ if (ret != 0 && errno != ENOENT)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not stat file \"%s\": %m", path)));
+
+ else if (ret == 0)
+ {
+ /*
+ * somebody else has already serialized to this point, don't overwrite
+ * but remember location, so we don't need to read old data again.
+ *
+ * To be sure it has been synced to disk after the rename() from the
+ * tempfile filename to the real filename, we just repeat the fsync.
+ * That ought to be cheap because in most scenarios it should already
+ * be safely on disk.
+ */
+ fsync_fname(path, false);
+ fsync_fname("pg_logical/snapshots", true);
+
+ builder->last_serialized_snapshot = lsn;
+ goto out;
+ }
+
+ /*
+ * there is an obvious race condition here between the time we stat(2) the
+ * file and us writing the file. But we rename the file into place
+ * atomically and all files created need to contain the same data anyway,
+ * so this is perfectly fine, although a bit of a resource waste. Locking
+ * seems like pointless complication.
+ */
+ elog(DEBUG1, "serializing snapshot to %s", path);
+
+ /* to make sure only we will write to this tempfile, include pid */
+ sprintf(tmppath, "pg_logical/snapshots/%X-%X.snap.%u.tmp",
+ (uint32) (lsn >> 32), (uint32) lsn, MyProcPid);
+
+ /*
+ * Unlink temporary file if it already exists, needs to have been before a
+ * crash/error since we won't enter this function twice from within a
+ * single decoding slot/backend and the temporary file contains the pid of
+ * the current process.
+ */
+ if (unlink(tmppath) != 0 && errno != ENOENT)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not remove file \"%s\": %m", tmppath)));
+
+ needed_length = sizeof(SnapBuildOnDisk) +
+ sizeof(TransactionId) * builder->committed.xcnt;
+
+ ondisk_c = MemoryContextAllocZero(builder->context, needed_length);
+ ondisk = (SnapBuildOnDisk *) ondisk_c;
+ ondisk->magic = SNAPBUILD_MAGIC;
+ ondisk->version = SNAPBUILD_VERSION;
+ ondisk->length = needed_length;
+ INIT_CRC32C(ondisk->checksum);
+ COMP_CRC32C(ondisk->checksum,
+ ((char *) ondisk) + SnapBuildOnDiskNotChecksummedSize,
+ SnapBuildOnDiskConstantSize - SnapBuildOnDiskNotChecksummedSize);
+ ondisk_c += sizeof(SnapBuildOnDisk);
+
+ memcpy(&ondisk->builder, builder, sizeof(SnapBuild));
+ /* NULL-ify memory-only data */
+ ondisk->builder.context = NULL;
+ ondisk->builder.snapshot = NULL;
+ ondisk->builder.reorder = NULL;
+ ondisk->builder.committed.xip = NULL;
+
+ COMP_CRC32C(ondisk->checksum,
+ &ondisk->builder,
+ sizeof(SnapBuild));
+
+ /* there shouldn't be any running xacts */
+ Assert(builder->was_running.was_xcnt == 0);
+
+ /* copy committed xacts */
+ sz = sizeof(TransactionId) * builder->committed.xcnt;
+ memcpy(ondisk_c, builder->committed.xip, sz);
+ COMP_CRC32C(ondisk->checksum, ondisk_c, sz);
+ ondisk_c += sz;
+
+ FIN_CRC32C(ondisk->checksum);
+
+ /* we have valid data now, open tempfile and write it there */
+ fd = OpenTransientFile(tmppath,
+ O_CREAT | O_EXCL | O_WRONLY | PG_BINARY);
+ if (fd < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\": %m", tmppath)));
+
+ errno = 0;
+ pgstat_report_wait_start(WAIT_EVENT_SNAPBUILD_WRITE);
+ if ((write(fd, ondisk, needed_length)) != needed_length)
+ {
+ int save_errno = errno;
+
+ CloseTransientFile(fd);
+
+ /* if write didn't set errno, assume problem is no disk space */
+ errno = save_errno ? save_errno : ENOSPC;
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not write to file \"%s\": %m", tmppath)));
+ }
+ pgstat_report_wait_end();
+
+ /*
+ * fsync the file before renaming so that even if we crash after this we
+ * have either a fully valid file or nothing.
+ *
+ * It's safe to just ERROR on fsync() here because we'll retry the whole
+ * operation including the writes.
+ *
+ * TODO: Do the fsync() via checkpoints/restartpoints, doing it here has
+ * some noticeable overhead since it's performed synchronously during
+ * decoding?
+ */
+ pgstat_report_wait_start(WAIT_EVENT_SNAPBUILD_SYNC);
+ if (pg_fsync(fd) != 0)
+ {
+ int save_errno = errno;
+
+ CloseTransientFile(fd);
+ errno = save_errno;
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not fsync file \"%s\": %m", tmppath)));
+ }
+ pgstat_report_wait_end();
+
+ if (CloseTransientFile(fd) != 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not close file \"%s\": %m", tmppath)));
+
+ fsync_fname("pg_logical/snapshots", true);
+
+ /*
+ * We may overwrite the work from some other backend, but that's ok, our
+ * snapshot is valid as well, we'll just have done some superfluous work.
+ */
+ if (rename(tmppath, path) != 0)
+ {
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not rename file \"%s\" to \"%s\": %m",
+ tmppath, path)));
+ }
+
+ /* make sure we persist */
+ fsync_fname(path, false);
+ fsync_fname("pg_logical/snapshots", true);
+
+ /*
+ * Now there's no way we can loose the dumped state anymore, remember this
+ * as a serialization point.
+ */
+ builder->last_serialized_snapshot = lsn;
+
+out:
+ ReorderBufferSetRestartPoint(builder->reorder,
+ builder->last_serialized_snapshot);
+ /* be tidy */
+ if (ondisk)
+ pfree(ondisk);
+}
+
+/*
+ * Restore a snapshot into 'builder' if previously one has been stored at the
+ * location indicated by 'lsn'. Returns true if successful, false otherwise.
+ */
+static bool
+SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn)
+{
+ SnapBuildOnDisk ondisk;
+ int fd;
+ char path[MAXPGPATH];
+ Size sz;
+ int readBytes;
+ pg_crc32c checksum;
+
+ /* no point in loading a snapshot if we're already there */
+ if (builder->state == SNAPBUILD_CONSISTENT)
+ return false;
+
+ sprintf(path, "pg_logical/snapshots/%X-%X.snap",
+ (uint32) (lsn >> 32), (uint32) lsn);
+
+ fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
+
+ if (fd < 0 && errno == ENOENT)
+ return false;
+ else if (fd < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\": %m", path)));
+
+ /* ----
+ * Make sure the snapshot had been stored safely to disk, that's normally
+ * cheap.
+ * Note that we do not need PANIC here, nobody will be able to use the
+ * slot without fsyncing, and saving it won't succeed without an fsync()
+ * either...
+ * ----
+ */
+ fsync_fname(path, false);
+ fsync_fname("pg_logical/snapshots", true);
+
+
+ /* read statically sized portion of snapshot */
+ pgstat_report_wait_start(WAIT_EVENT_SNAPBUILD_READ);
+ readBytes = read(fd, &ondisk, SnapBuildOnDiskConstantSize);
+ pgstat_report_wait_end();
+ if (readBytes != SnapBuildOnDiskConstantSize)
+ {
+ int save_errno = errno;
+
+ CloseTransientFile(fd);
+
+ if (readBytes < 0)
+ {
+ errno = save_errno;
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": %m", path)));
+ }
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("could not read file \"%s\": read %d of %zu",
+ path, readBytes,
+ (Size) SnapBuildOnDiskConstantSize)));
+ }
+
+ if (ondisk.magic != SNAPBUILD_MAGIC)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("snapbuild state file \"%s\" has wrong magic number: %u instead of %u",
+ path, ondisk.magic, SNAPBUILD_MAGIC)));
+
+ if (ondisk.version != SNAPBUILD_VERSION)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("snapbuild state file \"%s\" has unsupported version: %u instead of %u",
+ path, ondisk.version, SNAPBUILD_VERSION)));
+
+ INIT_CRC32C(checksum);
+ COMP_CRC32C(checksum,
+ ((char *) &ondisk) + SnapBuildOnDiskNotChecksummedSize,
+ SnapBuildOnDiskConstantSize - SnapBuildOnDiskNotChecksummedSize);
+
+ /* read SnapBuild */
+ pgstat_report_wait_start(WAIT_EVENT_SNAPBUILD_READ);
+ readBytes = read(fd, &ondisk.builder, sizeof(SnapBuild));
+ pgstat_report_wait_end();
+ if (readBytes != sizeof(SnapBuild))
+ {
+ int save_errno = errno;
+
+ CloseTransientFile(fd);
+
+ if (readBytes < 0)
+ {
+ errno = save_errno;
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": %m", path)));
+ }
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("could not read file \"%s\": read %d of %zu",
+ path, readBytes, sizeof(SnapBuild))));
+ }
+ COMP_CRC32C(checksum, &ondisk.builder, sizeof(SnapBuild));
+
+ /* restore running xacts (dead, but kept for backward compat) */
+ sz = sizeof(TransactionId) * ondisk.builder.was_running.was_xcnt_space;
+ ondisk.builder.was_running.was_xip =
+ MemoryContextAllocZero(builder->context, sz);
+ pgstat_report_wait_start(WAIT_EVENT_SNAPBUILD_READ);
+ readBytes = read(fd, ondisk.builder.was_running.was_xip, sz);
+ pgstat_report_wait_end();
+ if (readBytes != sz)
+ {
+ int save_errno = errno;
+
+ CloseTransientFile(fd);
+
+ if (readBytes < 0)
+ {
+ errno = save_errno;
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": %m", path)));
+ }
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("could not read file \"%s\": read %d of %zu",
+ path, readBytes, sz)));
+ }
+ COMP_CRC32C(checksum, ondisk.builder.was_running.was_xip, sz);
+
+ /* restore committed xacts information */
+ sz = sizeof(TransactionId) * ondisk.builder.committed.xcnt;
+ ondisk.builder.committed.xip = MemoryContextAllocZero(builder->context, sz);
+ pgstat_report_wait_start(WAIT_EVENT_SNAPBUILD_READ);
+ readBytes = read(fd, ondisk.builder.committed.xip, sz);
+ pgstat_report_wait_end();
+ if (readBytes != sz)
+ {
+ int save_errno = errno;
+
+ CloseTransientFile(fd);
+
+ if (readBytes < 0)
+ {
+ errno = save_errno;
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": %m", path)));
+ }
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("could not read file \"%s\": read %d of %zu",
+ path, readBytes, sz)));
+ }
+ COMP_CRC32C(checksum, ondisk.builder.committed.xip, sz);
+
+ if (CloseTransientFile(fd) != 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not close file \"%s\": %m", path)));
+
+ FIN_CRC32C(checksum);
+
+ /* verify checksum of what we've read */
+ if (!EQ_CRC32C(checksum, ondisk.checksum))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("checksum mismatch for snapbuild state file \"%s\": is %u, should be %u",
+ path, checksum, ondisk.checksum)));
+
+ /*
+ * ok, we now have a sensible snapshot here, figure out if it has more
+ * information than we have.
+ */
+
+ /*
+ * We are only interested in consistent snapshots for now, comparing
+ * whether one incomplete snapshot is more "advanced" seems to be
+ * unnecessarily complex.
+ */
+ if (ondisk.builder.state < SNAPBUILD_CONSISTENT)
+ goto snapshot_not_interesting;
+
+ /*
+ * Don't use a snapshot that requires an xmin that we cannot guarantee to
+ * be available.
+ */
+ if (TransactionIdPrecedes(ondisk.builder.xmin, builder->initial_xmin_horizon))
+ goto snapshot_not_interesting;
+
+
+ /* ok, we think the snapshot is sensible, copy over everything important */
+ builder->xmin = ondisk.builder.xmin;
+ builder->xmax = ondisk.builder.xmax;
+ builder->state = ondisk.builder.state;
+
+ builder->committed.xcnt = ondisk.builder.committed.xcnt;
+ /* We only allocated/stored xcnt, not xcnt_space xids ! */
+ /* don't overwrite preallocated xip, if we don't have anything here */
+ if (builder->committed.xcnt > 0)
+ {
+ pfree(builder->committed.xip);
+ builder->committed.xcnt_space = ondisk.builder.committed.xcnt;
+ builder->committed.xip = ondisk.builder.committed.xip;
+ }
+ ondisk.builder.committed.xip = NULL;
+
+ /* our snapshot is not interesting anymore, build a new one */
+ if (builder->snapshot != NULL)
+ {
+ SnapBuildSnapDecRefcount(builder->snapshot);
+ }
+ builder->snapshot = SnapBuildBuildSnapshot(builder);
+ SnapBuildSnapIncRefcount(builder->snapshot);
+
+ ReorderBufferSetRestartPoint(builder->reorder, lsn);
+
+ Assert(builder->state == SNAPBUILD_CONSISTENT);
+
+ ereport(LOG,
+ (errmsg("logical decoding found consistent point at %X/%X",
+ (uint32) (lsn >> 32), (uint32) lsn),
+ errdetail("Logical decoding will begin using saved snapshot.")));
+ return true;
+
+snapshot_not_interesting:
+ if (ondisk.builder.committed.xip != NULL)
+ pfree(ondisk.builder.committed.xip);
+ return false;
+}
+
+/*
+ * Remove all serialized snapshots that are not required anymore because no
+ * slot can need them. This doesn't actually have to run during a checkpoint,
+ * but it's a convenient point to schedule this.
+ *
+ * NB: We run this during checkpoints even if logical decoding is disabled so
+ * we cleanup old slots at some point after it got disabled.
+ */
+void
+CheckPointSnapBuild(void)
+{
+ XLogRecPtr cutoff;
+ XLogRecPtr redo;
+ DIR *snap_dir;
+ struct dirent *snap_de;
+ char path[MAXPGPATH + 21];
+
+ /*
+ * We start off with a minimum of the last redo pointer. No new
+ * replication slot will start before that, so that's a safe upper bound
+ * for removal.
+ */
+ redo = GetRedoRecPtr();
+
+ /* now check for the restart ptrs from existing slots */
+ cutoff = ReplicationSlotsComputeLogicalRestartLSN();
+
+ /* don't start earlier than the restart lsn */
+ if (redo < cutoff)
+ cutoff = redo;
+
+ snap_dir = AllocateDir("pg_logical/snapshots");
+ while ((snap_de = ReadDir(snap_dir, "pg_logical/snapshots")) != NULL)
+ {
+ uint32 hi;
+ uint32 lo;
+ XLogRecPtr lsn;
+ struct stat statbuf;
+
+ if (strcmp(snap_de->d_name, ".") == 0 ||
+ strcmp(snap_de->d_name, "..") == 0)
+ continue;
+
+ snprintf(path, sizeof(path), "pg_logical/snapshots/%s", snap_de->d_name);
+
+ if (lstat(path, &statbuf) == 0 && !S_ISREG(statbuf.st_mode))
+ {
+ elog(DEBUG1, "only regular files expected: %s", path);
+ continue;
+ }
+
+ /*
+ * temporary filenames from SnapBuildSerialize() include the LSN and
+ * everything but are postfixed by .$pid.tmp. We can just remove them
+ * the same as other files because there can be none that are
+ * currently being written that are older than cutoff.
+ *
+ * We just log a message if a file doesn't fit the pattern, it's
+ * probably some editors lock/state file or similar...
+ */
+ if (sscanf(snap_de->d_name, "%X-%X.snap", &hi, &lo) != 2)
+ {
+ ereport(LOG,
+ (errmsg("could not parse file name \"%s\"", path)));
+ continue;
+ }
+
+ lsn = ((uint64) hi) << 32 | lo;
+
+ /* check whether we still need it */
+ if (lsn < cutoff || cutoff == InvalidXLogRecPtr)
+ {
+ elog(DEBUG1, "removing snapbuild snapshot %s", path);
+
+ /*
+ * It's not particularly harmful, though strange, if we can't
+ * remove the file here. Don't prevent the checkpoint from
+ * completing, that'd be a cure worse than the disease.
+ */
+ if (unlink(path) < 0)
+ {
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not remove file \"%s\": %m",
+ path)));
+ continue;
+ }
+ }
+ }
+ FreeDir(snap_dir);
+}
diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c
new file mode 100644
index 0000000..a3989d4
--- /dev/null
+++ b/src/backend/replication/logical/tablesync.c
@@ -0,0 +1,993 @@
+/*-------------------------------------------------------------------------
+ * tablesync.c
+ * PostgreSQL logical replication
+ *
+ * Copyright (c) 2012-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/logical/tablesync.c
+ *
+ * NOTES
+ * This file contains code for initial table data synchronization for
+ * logical replication.
+ *
+ * The initial data synchronization is done separately for each table,
+ * in a separate apply worker that only fetches the initial snapshot data
+ * from the publisher and then synchronizes the position in the stream with
+ * the main apply worker.
+ *
+ * There are several reasons for doing the synchronization this way:
+ * - It allows us to parallelize the initial data synchronization
+ * which lowers the time needed for it to happen.
+ * - The initial synchronization does not have to hold the xid and LSN
+ * for the time it takes to copy data of all tables, causing less
+ * bloat and lower disk consumption compared to doing the
+ * synchronization in a single process for the whole database.
+ * - It allows us to synchronize any tables added after the initial
+ * synchronization has finished.
+ *
+ * The stream position synchronization works in multiple steps.
+ * - Sync finishes copy and sets worker state as SYNCWAIT and waits for
+ * state to change in a loop.
+ * - Apply periodically checks tables that are synchronizing for SYNCWAIT.
+ * When the desired state appears, it will set the worker state to
+ * CATCHUP and starts loop-waiting until either the table state is set
+ * to SYNCDONE or the sync worker exits.
+ * - After the sync worker has seen the state change to CATCHUP, it will
+ * read the stream and apply changes (acting like an apply worker) until
+ * it catches up to the specified stream position. Then it sets the
+ * state to SYNCDONE. There might be zero changes applied between
+ * CATCHUP and SYNCDONE, because the sync worker might be ahead of the
+ * apply worker.
+ * - Once the state was set to SYNCDONE, the apply will continue tracking
+ * the table until it reaches the SYNCDONE stream position, at which
+ * point it sets state to READY and stops tracking. Again, there might
+ * be zero changes in between.
+ *
+ * So the state progression is always: INIT -> DATASYNC -> SYNCWAIT -> CATCHUP ->
+ * SYNCDONE -> READY.
+ *
+ * The catalog pg_subscription_rel is used to keep information about
+ * subscribed tables and their state. Some transient state during data
+ * synchronization is kept in shared memory. The states SYNCWAIT and
+ * CATCHUP only appear in memory.
+ *
+ * Example flows look like this:
+ * - Apply is in front:
+ * sync:8
+ * -> set in memory SYNCWAIT
+ * apply:10
+ * -> set in memory CATCHUP
+ * -> enter wait-loop
+ * sync:10
+ * -> set in catalog SYNCDONE
+ * -> exit
+ * apply:10
+ * -> exit wait-loop
+ * -> continue rep
+ * apply:11
+ * -> set in catalog READY
+ * - Sync in front:
+ * sync:10
+ * -> set in memory SYNCWAIT
+ * apply:8
+ * -> set in memory CATCHUP
+ * -> continue per-table filtering
+ * sync:10
+ * -> set in catalog SYNCDONE
+ * -> exit
+ * apply:10
+ * -> set in catalog READY
+ * -> stop per-table filtering
+ * -> continue rep
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/table.h"
+#include "access/xact.h"
+#include "catalog/pg_subscription_rel.h"
+#include "catalog/pg_type.h"
+#include "commands/copy.h"
+#include "miscadmin.h"
+#include "parser/parse_relation.h"
+#include "pgstat.h"
+#include "replication/logicallauncher.h"
+#include "replication/logicalrelation.h"
+#include "replication/walreceiver.h"
+#include "replication/worker_internal.h"
+#include "storage/ipc.h"
+#include "utils/builtins.h"
+#include "utils/lsyscache.h"
+#include "utils/memutils.h"
+#include "utils/snapmgr.h"
+
+static bool table_states_valid = false;
+
+StringInfo copybuf = NULL;
+
+/*
+ * Exit routine for synchronization worker.
+ */
+static void
+pg_attribute_noreturn()
+finish_sync_worker(void)
+{
+ /*
+ * Commit any outstanding transaction. This is the usual case, unless
+ * there was nothing to do for the table.
+ */
+ if (IsTransactionState())
+ {
+ CommitTransactionCommand();
+ pgstat_report_stat(false);
+ }
+
+ /* And flush all writes. */
+ XLogFlush(GetXLogWriteRecPtr());
+
+ StartTransactionCommand();
+ ereport(LOG,
+ (errmsg("logical replication table synchronization worker for subscription \"%s\", table \"%s\" has finished",
+ MySubscription->name,
+ get_rel_name(MyLogicalRepWorker->relid))));
+ CommitTransactionCommand();
+
+ /* Find the main apply worker and signal it. */
+ logicalrep_worker_wakeup(MyLogicalRepWorker->subid, InvalidOid);
+
+ /* Stop gracefully */
+ proc_exit(0);
+}
+
+/*
+ * Wait until the relation synchronization state is set in the catalog to the
+ * expected one.
+ *
+ * Used when transitioning from CATCHUP state to SYNCDONE.
+ *
+ * Returns false if the synchronization worker has disappeared or the table state
+ * has been reset.
+ */
+static bool
+wait_for_relation_state_change(Oid relid, char expected_state)
+{
+ char state;
+
+ for (;;)
+ {
+ LogicalRepWorker *worker;
+ XLogRecPtr statelsn;
+
+ CHECK_FOR_INTERRUPTS();
+
+ /* XXX use cache invalidation here to improve performance? */
+ PushActiveSnapshot(GetLatestSnapshot());
+ state = GetSubscriptionRelState(MyLogicalRepWorker->subid,
+ relid, &statelsn, true);
+ PopActiveSnapshot();
+
+ if (state == SUBREL_STATE_UNKNOWN)
+ return false;
+
+ if (state == expected_state)
+ return true;
+
+ /* Check if the sync worker is still running and bail if not. */
+ LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
+
+ /* Check if the opposite worker is still running and bail if not. */
+ worker = logicalrep_worker_find(MyLogicalRepWorker->subid,
+ am_tablesync_worker() ? InvalidOid : relid,
+ false);
+ LWLockRelease(LogicalRepWorkerLock);
+ if (!worker)
+ return false;
+
+ (void) WaitLatch(MyLatch,
+ WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
+ 1000L, WAIT_EVENT_LOGICAL_SYNC_STATE_CHANGE);
+
+ ResetLatch(MyLatch);
+ }
+
+ return false;
+}
+
+/*
+ * Wait until the apply worker changes the state of our synchronization
+ * worker to the expected one.
+ *
+ * Used when transitioning from SYNCWAIT state to CATCHUP.
+ *
+ * Returns false if the apply worker has disappeared.
+ */
+static bool
+wait_for_worker_state_change(char expected_state)
+{
+ int rc;
+
+ for (;;)
+ {
+ LogicalRepWorker *worker;
+
+ CHECK_FOR_INTERRUPTS();
+
+ /*
+ * Done if already in correct state. (We assume this fetch is atomic
+ * enough to not give a misleading answer if we do it with no lock.)
+ */
+ if (MyLogicalRepWorker->relstate == expected_state)
+ return true;
+
+ /*
+ * Bail out if the apply worker has died, else signal it we're
+ * waiting.
+ */
+ LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
+ worker = logicalrep_worker_find(MyLogicalRepWorker->subid,
+ InvalidOid, false);
+ if (worker && worker->proc)
+ logicalrep_worker_wakeup_ptr(worker);
+ LWLockRelease(LogicalRepWorkerLock);
+ if (!worker)
+ break;
+
+ /*
+ * Wait. We expect to get a latch signal back from the apply worker,
+ * but use a timeout in case it dies without sending one.
+ */
+ rc = WaitLatch(MyLatch,
+ WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
+ 1000L, WAIT_EVENT_LOGICAL_SYNC_STATE_CHANGE);
+
+ if (rc & WL_LATCH_SET)
+ ResetLatch(MyLatch);
+ }
+
+ return false;
+}
+
+/*
+ * Callback from syscache invalidation.
+ */
+void
+invalidate_syncing_table_states(Datum arg, int cacheid, uint32 hashvalue)
+{
+ table_states_valid = false;
+}
+
+/*
+ * Handle table synchronization cooperation from the synchronization
+ * worker.
+ *
+ * If the sync worker is in CATCHUP state and reached (or passed) the
+ * predetermined synchronization point in the WAL stream, mark the table as
+ * SYNCDONE and finish.
+ */
+static void
+process_syncing_tables_for_sync(XLogRecPtr current_lsn)
+{
+ Assert(IsTransactionState());
+
+ SpinLockAcquire(&MyLogicalRepWorker->relmutex);
+
+ if (MyLogicalRepWorker->relstate == SUBREL_STATE_CATCHUP &&
+ current_lsn >= MyLogicalRepWorker->relstate_lsn)
+ {
+ TimeLineID tli;
+
+ MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCDONE;
+ MyLogicalRepWorker->relstate_lsn = current_lsn;
+
+ SpinLockRelease(&MyLogicalRepWorker->relmutex);
+
+ UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
+ MyLogicalRepWorker->relid,
+ MyLogicalRepWorker->relstate,
+ MyLogicalRepWorker->relstate_lsn);
+
+ walrcv_endstreaming(LogRepWorkerWalRcvConn, &tli);
+ finish_sync_worker();
+ }
+ else
+ SpinLockRelease(&MyLogicalRepWorker->relmutex);
+}
+
+/*
+ * Handle table synchronization cooperation from the apply worker.
+ *
+ * Walk over all subscription tables that are individually tracked by the
+ * apply process (currently, all that have state other than
+ * SUBREL_STATE_READY) and manage synchronization for them.
+ *
+ * If there are tables that need synchronizing and are not being synchronized
+ * yet, start sync workers for them (if there are free slots for sync
+ * workers). To prevent starting the sync worker for the same relation at a
+ * high frequency after a failure, we store its last start time with each sync
+ * state info. We start the sync worker for the same relation after waiting
+ * at least wal_retrieve_retry_interval.
+ *
+ * For tables that are being synchronized already, check if sync workers
+ * either need action from the apply worker or have finished. This is the
+ * SYNCWAIT to CATCHUP transition.
+ *
+ * If the synchronization position is reached (SYNCDONE), then the table can
+ * be marked as READY and is no longer tracked.
+ */
+static void
+process_syncing_tables_for_apply(XLogRecPtr current_lsn)
+{
+ struct tablesync_start_time_mapping
+ {
+ Oid relid;
+ TimestampTz last_start_time;
+ };
+ static List *table_states = NIL;
+ static HTAB *last_start_times = NULL;
+ ListCell *lc;
+ bool started_tx = false;
+
+ Assert(!IsTransactionState());
+
+ /* We need up-to-date sync state info for subscription tables here. */
+ if (!table_states_valid)
+ {
+ MemoryContext oldctx;
+ List *rstates;
+ ListCell *lc;
+ SubscriptionRelState *rstate;
+
+ /* Clean the old list. */
+ list_free_deep(table_states);
+ table_states = NIL;
+
+ StartTransactionCommand();
+ started_tx = true;
+
+ /* Fetch all non-ready tables. */
+ rstates = GetSubscriptionNotReadyRelations(MySubscription->oid);
+
+ /* Allocate the tracking info in a permanent memory context. */
+ oldctx = MemoryContextSwitchTo(CacheMemoryContext);
+ foreach(lc, rstates)
+ {
+ rstate = palloc(sizeof(SubscriptionRelState));
+ memcpy(rstate, lfirst(lc), sizeof(SubscriptionRelState));
+ table_states = lappend(table_states, rstate);
+ }
+ MemoryContextSwitchTo(oldctx);
+
+ table_states_valid = true;
+ }
+
+ /*
+ * Prepare a hash table for tracking last start times of workers, to avoid
+ * immediate restarts. We don't need it if there are no tables that need
+ * syncing.
+ */
+ if (table_states && !last_start_times)
+ {
+ HASHCTL ctl;
+
+ memset(&ctl, 0, sizeof(ctl));
+ ctl.keysize = sizeof(Oid);
+ ctl.entrysize = sizeof(struct tablesync_start_time_mapping);
+ last_start_times = hash_create("Logical replication table sync worker start times",
+ 256, &ctl, HASH_ELEM | HASH_BLOBS);
+ }
+
+ /*
+ * Clean up the hash table when we're done with all tables (just to
+ * release the bit of memory).
+ */
+ else if (!table_states && last_start_times)
+ {
+ hash_destroy(last_start_times);
+ last_start_times = NULL;
+ }
+
+ /*
+ * Process all tables that are being synchronized.
+ */
+ foreach(lc, table_states)
+ {
+ SubscriptionRelState *rstate = (SubscriptionRelState *) lfirst(lc);
+
+ if (rstate->state == SUBREL_STATE_SYNCDONE)
+ {
+ /*
+ * Apply has caught up to the position where the table sync has
+ * finished. Mark the table as ready so that the apply will just
+ * continue to replicate it normally.
+ */
+ if (current_lsn >= rstate->lsn)
+ {
+ rstate->state = SUBREL_STATE_READY;
+ rstate->lsn = current_lsn;
+ if (!started_tx)
+ {
+ StartTransactionCommand();
+ started_tx = true;
+ }
+
+ UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
+ rstate->relid, rstate->state,
+ rstate->lsn);
+ }
+ }
+ else
+ {
+ LogicalRepWorker *syncworker;
+
+ /*
+ * Look for a sync worker for this relation.
+ */
+ LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
+
+ syncworker = logicalrep_worker_find(MyLogicalRepWorker->subid,
+ rstate->relid, false);
+
+ if (syncworker)
+ {
+ /* Found one, update our copy of its state */
+ SpinLockAcquire(&syncworker->relmutex);
+ rstate->state = syncworker->relstate;
+ rstate->lsn = syncworker->relstate_lsn;
+ if (rstate->state == SUBREL_STATE_SYNCWAIT)
+ {
+ /*
+ * Sync worker is waiting for apply. Tell sync worker it
+ * can catchup now.
+ */
+ syncworker->relstate = SUBREL_STATE_CATCHUP;
+ syncworker->relstate_lsn =
+ Max(syncworker->relstate_lsn, current_lsn);
+ }
+ SpinLockRelease(&syncworker->relmutex);
+
+ /* If we told worker to catch up, wait for it. */
+ if (rstate->state == SUBREL_STATE_SYNCWAIT)
+ {
+ /* Signal the sync worker, as it may be waiting for us. */
+ if (syncworker->proc)
+ logicalrep_worker_wakeup_ptr(syncworker);
+
+ /* Now safe to release the LWLock */
+ LWLockRelease(LogicalRepWorkerLock);
+
+ /*
+ * Enter busy loop and wait for synchronization worker to
+ * reach expected state (or die trying).
+ */
+ if (!started_tx)
+ {
+ StartTransactionCommand();
+ started_tx = true;
+ }
+
+ wait_for_relation_state_change(rstate->relid,
+ SUBREL_STATE_SYNCDONE);
+ }
+ else
+ LWLockRelease(LogicalRepWorkerLock);
+ }
+ else
+ {
+ /*
+ * If there is no sync worker for this table yet, count
+ * running sync workers for this subscription, while we have
+ * the lock.
+ */
+ int nsyncworkers =
+ logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
+
+ /* Now safe to release the LWLock */
+ LWLockRelease(LogicalRepWorkerLock);
+
+ /*
+ * If there are free sync worker slot(s), start a new sync
+ * worker for the table.
+ */
+ if (nsyncworkers < max_sync_workers_per_subscription)
+ {
+ TimestampTz now = GetCurrentTimestamp();
+ struct tablesync_start_time_mapping *hentry;
+ bool found;
+
+ hentry = hash_search(last_start_times, &rstate->relid,
+ HASH_ENTER, &found);
+
+ if (!found ||
+ TimestampDifferenceExceeds(hentry->last_start_time, now,
+ wal_retrieve_retry_interval))
+ {
+ logicalrep_worker_launch(MyLogicalRepWorker->dbid,
+ MySubscription->oid,
+ MySubscription->name,
+ MyLogicalRepWorker->userid,
+ rstate->relid);
+ hentry->last_start_time = now;
+ }
+ }
+ }
+ }
+ }
+
+ if (started_tx)
+ {
+ CommitTransactionCommand();
+ pgstat_report_stat(false);
+ }
+}
+
+/*
+ * Process possible state change(s) of tables that are being synchronized.
+ */
+void
+process_syncing_tables(XLogRecPtr current_lsn)
+{
+ if (am_tablesync_worker())
+ process_syncing_tables_for_sync(current_lsn);
+ else
+ process_syncing_tables_for_apply(current_lsn);
+}
+
+/*
+ * Create list of columns for COPY based on logical relation mapping.
+ */
+static List *
+make_copy_attnamelist(LogicalRepRelMapEntry *rel)
+{
+ List *attnamelist = NIL;
+ int i;
+
+ for (i = 0; i < rel->remoterel.natts; i++)
+ {
+ attnamelist = lappend(attnamelist,
+ makeString(rel->remoterel.attnames[i]));
+ }
+
+
+ return attnamelist;
+}
+
+/*
+ * Data source callback for the COPY FROM, which reads from the remote
+ * connection and passes the data back to our local COPY.
+ */
+static int
+copy_read_data(void *outbuf, int minread, int maxread)
+{
+ int bytesread = 0;
+ int avail;
+
+ /* If there are some leftover data from previous read, use it. */
+ avail = copybuf->len - copybuf->cursor;
+ if (avail)
+ {
+ if (avail > maxread)
+ avail = maxread;
+ memcpy(outbuf, &copybuf->data[copybuf->cursor], avail);
+ copybuf->cursor += avail;
+ maxread -= avail;
+ bytesread += avail;
+ }
+
+ while (maxread > 0 && bytesread < minread)
+ {
+ pgsocket fd = PGINVALID_SOCKET;
+ int len;
+ char *buf = NULL;
+
+ for (;;)
+ {
+ /* Try read the data. */
+ len = walrcv_receive(LogRepWorkerWalRcvConn, &buf, &fd);
+
+ CHECK_FOR_INTERRUPTS();
+
+ if (len == 0)
+ break;
+ else if (len < 0)
+ return bytesread;
+ else
+ {
+ /* Process the data */
+ copybuf->data = buf;
+ copybuf->len = len;
+ copybuf->cursor = 0;
+
+ avail = copybuf->len - copybuf->cursor;
+ if (avail > maxread)
+ avail = maxread;
+ memcpy(outbuf, &copybuf->data[copybuf->cursor], avail);
+ outbuf = (void *) ((char *) outbuf + avail);
+ copybuf->cursor += avail;
+ maxread -= avail;
+ bytesread += avail;
+ }
+
+ if (maxread <= 0 || bytesread >= minread)
+ return bytesread;
+ }
+
+ /*
+ * Wait for more data or latch.
+ */
+ (void) WaitLatchOrSocket(MyLatch,
+ WL_SOCKET_READABLE | WL_LATCH_SET |
+ WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
+ fd, 1000L, WAIT_EVENT_LOGICAL_SYNC_DATA);
+
+ ResetLatch(MyLatch);
+ }
+
+ return bytesread;
+}
+
+
+/*
+ * Get information about remote relation in similar fashion the RELATION
+ * message provides during replication.
+ */
+static void
+fetch_remote_table_info(char *nspname, char *relname,
+ LogicalRepRelation *lrel)
+{
+ WalRcvExecResult *res;
+ StringInfoData cmd;
+ TupleTableSlot *slot;
+ Oid tableRow[] = {OIDOID, CHAROID, CHAROID};
+ Oid attrRow[] = {TEXTOID, OIDOID, INT4OID, BOOLOID};
+ bool isnull;
+ int natt;
+
+ lrel->nspname = nspname;
+ lrel->relname = relname;
+
+ /* First fetch Oid and replica identity. */
+ initStringInfo(&cmd);
+ appendStringInfo(&cmd, "SELECT c.oid, c.relreplident, c.relkind"
+ " FROM pg_catalog.pg_class c"
+ " INNER JOIN pg_catalog.pg_namespace n"
+ " ON (c.relnamespace = n.oid)"
+ " WHERE n.nspname = %s"
+ " AND c.relname = %s",
+ quote_literal_cstr(nspname),
+ quote_literal_cstr(relname));
+ res = walrcv_exec(LogRepWorkerWalRcvConn, cmd.data,
+ lengthof(tableRow), tableRow);
+
+ if (res->status != WALRCV_OK_TUPLES)
+ ereport(ERROR,
+ (errmsg("could not fetch table info for table \"%s.%s\" from publisher: %s",
+ nspname, relname, res->err)));
+
+ slot = MakeSingleTupleTableSlot(res->tupledesc, &TTSOpsMinimalTuple);
+ if (!tuplestore_gettupleslot(res->tuplestore, true, false, slot))
+ ereport(ERROR,
+ (errmsg("table \"%s.%s\" not found on publisher",
+ nspname, relname)));
+
+ lrel->remoteid = DatumGetObjectId(slot_getattr(slot, 1, &isnull));
+ Assert(!isnull);
+ lrel->replident = DatumGetChar(slot_getattr(slot, 2, &isnull));
+ Assert(!isnull);
+ lrel->relkind = DatumGetChar(slot_getattr(slot, 3, &isnull));
+ Assert(!isnull);
+
+ ExecDropSingleTupleTableSlot(slot);
+ walrcv_clear_result(res);
+
+ /* Now fetch columns. */
+ resetStringInfo(&cmd);
+ appendStringInfo(&cmd,
+ "SELECT a.attname,"
+ " a.atttypid,"
+ " a.atttypmod,"
+ " a.attnum = ANY(i.indkey)"
+ " FROM pg_catalog.pg_attribute a"
+ " LEFT JOIN pg_catalog.pg_index i"
+ " ON (i.indexrelid = pg_get_replica_identity_index(%u))"
+ " WHERE a.attnum > 0::pg_catalog.int2"
+ " AND NOT a.attisdropped %s"
+ " AND a.attrelid = %u"
+ " ORDER BY a.attnum",
+ lrel->remoteid,
+ (walrcv_server_version(LogRepWorkerWalRcvConn) >= 120000 ?
+ "AND a.attgenerated = ''" : ""),
+ lrel->remoteid);
+ res = walrcv_exec(LogRepWorkerWalRcvConn, cmd.data,
+ lengthof(attrRow), attrRow);
+
+ if (res->status != WALRCV_OK_TUPLES)
+ ereport(ERROR,
+ (errmsg("could not fetch table info for table \"%s.%s\": %s",
+ nspname, relname, res->err)));
+
+ /* We don't know the number of rows coming, so allocate enough space. */
+ lrel->attnames = palloc0(MaxTupleAttributeNumber * sizeof(char *));
+ lrel->atttyps = palloc0(MaxTupleAttributeNumber * sizeof(Oid));
+ lrel->attkeys = NULL;
+
+ natt = 0;
+ slot = MakeSingleTupleTableSlot(res->tupledesc, &TTSOpsMinimalTuple);
+ while (tuplestore_gettupleslot(res->tuplestore, true, false, slot))
+ {
+ lrel->attnames[natt] =
+ TextDatumGetCString(slot_getattr(slot, 1, &isnull));
+ Assert(!isnull);
+ lrel->atttyps[natt] = DatumGetObjectId(slot_getattr(slot, 2, &isnull));
+ Assert(!isnull);
+ if (DatumGetBool(slot_getattr(slot, 4, &isnull)))
+ lrel->attkeys = bms_add_member(lrel->attkeys, natt);
+
+ /* Should never happen. */
+ if (++natt >= MaxTupleAttributeNumber)
+ elog(ERROR, "too many columns in remote table \"%s.%s\"",
+ nspname, relname);
+
+ ExecClearTuple(slot);
+ }
+ ExecDropSingleTupleTableSlot(slot);
+
+ lrel->natts = natt;
+
+ walrcv_clear_result(res);
+ pfree(cmd.data);
+}
+
+/*
+ * Copy existing data of a table from publisher.
+ *
+ * Caller is responsible for locking the local relation.
+ */
+static void
+copy_table(Relation rel)
+{
+ LogicalRepRelMapEntry *relmapentry;
+ LogicalRepRelation lrel;
+ WalRcvExecResult *res;
+ StringInfoData cmd;
+ CopyState cstate;
+ List *attnamelist;
+ ParseState *pstate;
+
+ /* Get the publisher relation info. */
+ fetch_remote_table_info(get_namespace_name(RelationGetNamespace(rel)),
+ RelationGetRelationName(rel), &lrel);
+
+ /* Put the relation into relmap. */
+ logicalrep_relmap_update(&lrel);
+
+ /* Map the publisher relation to local one. */
+ relmapentry = logicalrep_rel_open(lrel.remoteid, NoLock);
+ Assert(rel == relmapentry->localrel);
+
+ /* Start copy on the publisher. */
+ initStringInfo(&cmd);
+ if (lrel.relkind == RELKIND_RELATION)
+ appendStringInfo(&cmd, "COPY %s TO STDOUT",
+ quote_qualified_identifier(lrel.nspname, lrel.relname));
+ else
+ {
+ /*
+ * For non-tables, we need to do COPY (SELECT ...), but we can't just
+ * do SELECT * because we need to not copy generated columns.
+ */
+ appendStringInfo(&cmd, "COPY (SELECT ");
+ for (int i = 0; i < lrel.natts; i++)
+ {
+ appendStringInfoString(&cmd, quote_identifier(lrel.attnames[i]));
+ if (i < lrel.natts - 1)
+ appendStringInfoString(&cmd, ", ");
+ }
+ appendStringInfo(&cmd, " FROM %s) TO STDOUT",
+ quote_qualified_identifier(lrel.nspname, lrel.relname));
+ }
+ res = walrcv_exec(LogRepWorkerWalRcvConn, cmd.data, 0, NULL);
+ pfree(cmd.data);
+ if (res->status != WALRCV_OK_COPY_OUT)
+ ereport(ERROR,
+ (errmsg("could not start initial contents copy for table \"%s.%s\": %s",
+ lrel.nspname, lrel.relname, res->err)));
+ walrcv_clear_result(res);
+
+ copybuf = makeStringInfo();
+
+ pstate = make_parsestate(NULL);
+ (void) addRangeTableEntryForRelation(pstate, rel, AccessShareLock,
+ NULL, false, false);
+
+ attnamelist = make_copy_attnamelist(relmapentry);
+ cstate = BeginCopyFrom(pstate, rel, NULL, false, copy_read_data, attnamelist, NIL);
+
+ /* Do the copy */
+ (void) CopyFrom(cstate);
+
+ logicalrep_rel_close(relmapentry, NoLock);
+}
+
+/*
+ * Start syncing the table in the sync worker.
+ *
+ * The returned slot name is palloc'ed in current memory context.
+ */
+char *
+LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
+{
+ char *slotname;
+ char *err;
+ char relstate;
+ XLogRecPtr relstate_lsn;
+
+ /* Check the state of the table synchronization. */
+ StartTransactionCommand();
+ relstate = GetSubscriptionRelState(MyLogicalRepWorker->subid,
+ MyLogicalRepWorker->relid,
+ &relstate_lsn, true);
+ CommitTransactionCommand();
+
+ SpinLockAcquire(&MyLogicalRepWorker->relmutex);
+ MyLogicalRepWorker->relstate = relstate;
+ MyLogicalRepWorker->relstate_lsn = relstate_lsn;
+ SpinLockRelease(&MyLogicalRepWorker->relmutex);
+
+ /*
+ * To build a slot name for the sync work, we are limited to NAMEDATALEN -
+ * 1 characters. We cut the original slot name to NAMEDATALEN - 28 chars
+ * and append _%u_sync_%u (1 + 10 + 6 + 10 + '\0'). (It's actually the
+ * NAMEDATALEN on the remote that matters, but this scheme will also work
+ * reasonably if that is different.)
+ */
+ StaticAssertStmt(NAMEDATALEN >= 32, "NAMEDATALEN too small"); /* for sanity */
+ slotname = psprintf("%.*s_%u_sync_%u",
+ NAMEDATALEN - 28,
+ MySubscription->slotname,
+ MySubscription->oid,
+ MyLogicalRepWorker->relid);
+
+ /*
+ * Here we use the slot name instead of the subscription name as the
+ * application_name, so that it is different from the main apply worker,
+ * so that synchronous replication can distinguish them.
+ */
+ LogRepWorkerWalRcvConn = walrcv_connect(MySubscription->conninfo, true,
+ slotname, &err);
+ if (LogRepWorkerWalRcvConn == NULL)
+ ereport(ERROR,
+ (errmsg("could not connect to the publisher: %s", err)));
+
+ switch (MyLogicalRepWorker->relstate)
+ {
+ case SUBREL_STATE_INIT:
+ case SUBREL_STATE_DATASYNC:
+ {
+ Relation rel;
+ WalRcvExecResult *res;
+
+ SpinLockAcquire(&MyLogicalRepWorker->relmutex);
+ MyLogicalRepWorker->relstate = SUBREL_STATE_DATASYNC;
+ MyLogicalRepWorker->relstate_lsn = InvalidXLogRecPtr;
+ SpinLockRelease(&MyLogicalRepWorker->relmutex);
+
+ /* Update the state and make it visible to others. */
+ StartTransactionCommand();
+ UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
+ MyLogicalRepWorker->relid,
+ MyLogicalRepWorker->relstate,
+ MyLogicalRepWorker->relstate_lsn);
+ CommitTransactionCommand();
+ pgstat_report_stat(false);
+
+ /*
+ * We want to do the table data sync in a single transaction.
+ */
+ StartTransactionCommand();
+
+ /*
+ * Use a standard write lock here. It might be better to
+ * disallow access to the table while it's being synchronized.
+ * But we don't want to block the main apply process from
+ * working and it has to open the relation in RowExclusiveLock
+ * when remapping remote relation id to local one.
+ */
+ rel = table_open(MyLogicalRepWorker->relid, RowExclusiveLock);
+
+ /*
+ * Create a temporary slot for the sync process. We do this
+ * inside the transaction so that we can use the snapshot made
+ * by the slot to get existing data.
+ */
+ res = walrcv_exec(LogRepWorkerWalRcvConn,
+ "BEGIN READ ONLY ISOLATION LEVEL "
+ "REPEATABLE READ", 0, NULL);
+ if (res->status != WALRCV_OK_COMMAND)
+ ereport(ERROR,
+ (errmsg("table copy could not start transaction on publisher"),
+ errdetail("The error was: %s", res->err)));
+ walrcv_clear_result(res);
+
+ /*
+ * Create new temporary logical decoding slot.
+ *
+ * We'll use slot for data copy so make sure the snapshot is
+ * used for the transaction; that way the COPY will get data
+ * that is consistent with the lsn used by the slot to start
+ * decoding.
+ */
+ walrcv_create_slot(LogRepWorkerWalRcvConn, slotname, true,
+ CRS_USE_SNAPSHOT, origin_startpos);
+
+ PushActiveSnapshot(GetTransactionSnapshot());
+ copy_table(rel);
+ PopActiveSnapshot();
+
+ res = walrcv_exec(LogRepWorkerWalRcvConn, "COMMIT", 0, NULL);
+ if (res->status != WALRCV_OK_COMMAND)
+ ereport(ERROR,
+ (errmsg("table copy could not finish transaction on publisher"),
+ errdetail("The error was: %s", res->err)));
+ walrcv_clear_result(res);
+
+ table_close(rel, NoLock);
+
+ /* Make the copy visible. */
+ CommandCounterIncrement();
+
+ /*
+ * We are done with the initial data synchronization, update
+ * the state.
+ */
+ SpinLockAcquire(&MyLogicalRepWorker->relmutex);
+ MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCWAIT;
+ MyLogicalRepWorker->relstate_lsn = *origin_startpos;
+ SpinLockRelease(&MyLogicalRepWorker->relmutex);
+
+ /* Wait for main apply worker to tell us to catchup. */
+ wait_for_worker_state_change(SUBREL_STATE_CATCHUP);
+
+ /*----------
+ * There are now two possible states here:
+ * a) Sync is behind the apply. If that's the case we need to
+ * catch up with it by consuming the logical replication
+ * stream up to the relstate_lsn. For that, we exit this
+ * function and continue in ApplyWorkerMain().
+ * b) Sync is caught up with the apply. So it can just set
+ * the state to SYNCDONE and finish.
+ *----------
+ */
+ if (*origin_startpos >= MyLogicalRepWorker->relstate_lsn)
+ {
+ /*
+ * Update the new state in catalog. No need to bother
+ * with the shmem state as we are exiting for good.
+ */
+ UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
+ MyLogicalRepWorker->relid,
+ SUBREL_STATE_SYNCDONE,
+ *origin_startpos);
+ finish_sync_worker();
+ }
+ break;
+ }
+ case SUBREL_STATE_SYNCDONE:
+ case SUBREL_STATE_READY:
+ case SUBREL_STATE_UNKNOWN:
+
+ /*
+ * Nothing to do here but finish. (UNKNOWN means the relation was
+ * removed from pg_subscription_rel before the sync worker could
+ * start.)
+ */
+ finish_sync_worker();
+ break;
+ default:
+ elog(ERROR, "unknown relation state \"%c\"",
+ MyLogicalRepWorker->relstate);
+ }
+
+ return slotname;
+}
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
new file mode 100644
index 0000000..f67d4ce
--- /dev/null
+++ b/src/backend/replication/logical/worker.c
@@ -0,0 +1,2175 @@
+/*-------------------------------------------------------------------------
+ * worker.c
+ * PostgreSQL logical replication worker (apply)
+ *
+ * Copyright (c) 2016-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/logical/worker.c
+ *
+ * NOTES
+ * This file contains the worker which applies logical changes as they come
+ * from remote logical replication stream.
+ *
+ * The main worker (apply) is started by logical replication worker
+ * launcher for every enabled subscription in a database. It uses
+ * walsender protocol to communicate with publisher.
+ *
+ * This module includes server facing code and shares libpqwalreceiver
+ * module with walreceiver for providing the libpq specific functionality.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/table.h"
+#include "access/tableam.h"
+#include "access/xact.h"
+#include "access/xlog_internal.h"
+#include "catalog/catalog.h"
+#include "catalog/namespace.h"
+#include "catalog/partition.h"
+#include "catalog/pg_inherits.h"
+#include "catalog/pg_subscription.h"
+#include "catalog/pg_subscription_rel.h"
+#include "commands/tablecmds.h"
+#include "commands/trigger.h"
+#include "executor/executor.h"
+#include "executor/execPartition.h"
+#include "executor/nodeModifyTable.h"
+#include "funcapi.h"
+#include "libpq/pqformat.h"
+#include "libpq/pqsignal.h"
+#include "mb/pg_wchar.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "optimizer/optimizer.h"
+#include "pgstat.h"
+#include "postmaster/bgworker.h"
+#include "postmaster/interrupt.h"
+#include "postmaster/postmaster.h"
+#include "postmaster/walwriter.h"
+#include "replication/decode.h"
+#include "replication/logical.h"
+#include "replication/logicalproto.h"
+#include "replication/logicalrelation.h"
+#include "replication/logicalworker.h"
+#include "replication/origin.h"
+#include "replication/reorderbuffer.h"
+#include "replication/snapbuild.h"
+#include "replication/walreceiver.h"
+#include "replication/worker_internal.h"
+#include "rewrite/rewriteHandler.h"
+#include "storage/bufmgr.h"
+#include "storage/ipc.h"
+#include "storage/lmgr.h"
+#include "storage/proc.h"
+#include "storage/procarray.h"
+#include "tcop/tcopprot.h"
+#include "utils/builtins.h"
+#include "utils/catcache.h"
+#include "utils/datum.h"
+#include "utils/fmgroids.h"
+#include "utils/guc.h"
+#include "utils/inval.h"
+#include "utils/lsyscache.h"
+#include "utils/memutils.h"
+#include "utils/rel.h"
+#include "utils/syscache.h"
+#include "utils/timeout.h"
+
+#define NAPTIME_PER_CYCLE 1000 /* max sleep time between cycles (1s) */
+
+typedef struct FlushPosition
+{
+ dlist_node node;
+ XLogRecPtr local_end;
+ XLogRecPtr remote_end;
+} FlushPosition;
+
+static dlist_head lsn_mapping = DLIST_STATIC_INIT(lsn_mapping);
+
+typedef struct SlotErrCallbackArg
+{
+ LogicalRepRelMapEntry *rel;
+ int remote_attnum;
+} SlotErrCallbackArg;
+
+typedef struct ApplyExecutionData
+{
+ EState *estate; /* executor state, used to track resources */
+
+ LogicalRepRelMapEntry *targetRel; /* replication target rel */
+ ResultRelInfo *targetRelInfo; /* ResultRelInfo for same */
+
+ /* These fields are used when the target relation is partitioned: */
+ ModifyTableState *mtstate; /* dummy ModifyTable state */
+ PartitionTupleRouting *proute; /* partition routing info */
+} ApplyExecutionData;
+
+static MemoryContext ApplyMessageContext = NULL;
+MemoryContext ApplyContext = NULL;
+
+WalReceiverConn *LogRepWorkerWalRcvConn = NULL;
+
+Subscription *MySubscription = NULL;
+bool MySubscriptionValid = false;
+
+bool in_remote_transaction = false;
+static XLogRecPtr remote_final_lsn = InvalidXLogRecPtr;
+
+static void send_feedback(XLogRecPtr recvpos, bool force, bool requestReply);
+
+static void store_flush_position(XLogRecPtr remote_lsn);
+
+static void maybe_reread_subscription(void);
+
+static void apply_handle_insert_internal(ResultRelInfo *relinfo,
+ EState *estate, TupleTableSlot *remoteslot);
+static void apply_handle_update_internal(ResultRelInfo *relinfo,
+ EState *estate, TupleTableSlot *remoteslot,
+ LogicalRepTupleData *newtup,
+ LogicalRepRelMapEntry *relmapentry);
+static void apply_handle_delete_internal(ResultRelInfo *relinfo, EState *estate,
+ TupleTableSlot *remoteslot,
+ LogicalRepRelation *remoterel);
+static bool FindReplTupleInLocalRel(EState *estate, Relation localrel,
+ LogicalRepRelation *remoterel,
+ TupleTableSlot *remoteslot,
+ TupleTableSlot **localslot);
+static void apply_handle_tuple_routing(ApplyExecutionData *edata,
+ TupleTableSlot *remoteslot,
+ LogicalRepTupleData *newtup,
+ CmdType operation);
+
+/*
+ * Should this worker apply changes for given relation.
+ *
+ * This is mainly needed for initial relation data sync as that runs in
+ * separate worker process running in parallel and we need some way to skip
+ * changes coming to the main apply worker during the sync of a table.
+ *
+ * Note we need to do smaller or equals comparison for SYNCDONE state because
+ * it might hold position of end of initial slot consistent point WAL
+ * record + 1 (ie start of next record) and next record can be COMMIT of
+ * transaction we are now processing (which is what we set remote_final_lsn
+ * to in apply_handle_begin).
+ */
+static bool
+should_apply_changes_for_rel(LogicalRepRelMapEntry *rel)
+{
+ if (am_tablesync_worker())
+ return MyLogicalRepWorker->relid == rel->localreloid;
+ else
+ return (rel->state == SUBREL_STATE_READY ||
+ (rel->state == SUBREL_STATE_SYNCDONE &&
+ rel->statelsn <= remote_final_lsn));
+}
+
+/*
+ * Begin one step (one INSERT, UPDATE, etc) of a replication transaction.
+ *
+ * Start a transaction, if this is the first step (else we keep using the
+ * existing transaction).
+ * Also provide a global snapshot and ensure we run in ApplyMessageContext.
+ */
+static void
+begin_replication_step(void)
+{
+ SetCurrentStatementStartTimestamp();
+
+ if (!IsTransactionState())
+ {
+ StartTransactionCommand();
+ maybe_reread_subscription();
+ }
+
+ PushActiveSnapshot(GetTransactionSnapshot());
+
+ MemoryContextSwitchTo(ApplyMessageContext);
+}
+
+/*
+ * Finish up one step of a replication transaction.
+ * Callers of begin_replication_step() must also call this.
+ *
+ * We don't close out the transaction here, but we should increment
+ * the command counter to make the effects of this step visible.
+ */
+static void
+end_replication_step(void)
+{
+ PopActiveSnapshot();
+
+ CommandCounterIncrement();
+}
+
+
+/*
+ * Executor state preparation for evaluation of constraint expressions,
+ * indexes and triggers for the specified relation.
+ *
+ * Note that the caller must open and close any indexes to be updated.
+ */
+static ApplyExecutionData *
+create_edata_for_relation(LogicalRepRelMapEntry *rel)
+{
+ ApplyExecutionData *edata;
+ EState *estate;
+ ResultRelInfo *resultRelInfo;
+ RangeTblEntry *rte;
+
+ edata = (ApplyExecutionData *) palloc0(sizeof(ApplyExecutionData));
+ edata->targetRel = rel;
+
+ edata->estate = estate = CreateExecutorState();
+
+ rte = makeNode(RangeTblEntry);
+ rte->rtekind = RTE_RELATION;
+ rte->relid = RelationGetRelid(rel->localrel);
+ rte->relkind = rel->localrel->rd_rel->relkind;
+ rte->rellockmode = AccessShareLock;
+ ExecInitRangeTable(estate, list_make1(rte));
+
+ edata->targetRelInfo = resultRelInfo = makeNode(ResultRelInfo);
+
+ /*
+ * Use Relation opened by logicalrep_rel_open() instead of opening it
+ * again.
+ */
+ InitResultRelInfo(resultRelInfo, rel->localrel, 1, NULL, 0);
+
+ estate->es_result_relations = resultRelInfo;
+ estate->es_num_result_relations = 1;
+ estate->es_result_relation_info = resultRelInfo;
+
+ estate->es_output_cid = GetCurrentCommandId(true);
+
+ /* Prepare to catch AFTER triggers. */
+ AfterTriggerBeginQuery();
+
+ /* other fields of edata remain NULL for now */
+
+ return edata;
+}
+
+/*
+ * Finish any operations related to the executor state created by
+ * create_edata_for_relation().
+ */
+static void
+finish_edata(ApplyExecutionData *edata)
+{
+ EState *estate = edata->estate;
+
+ /* Handle any queued AFTER triggers. */
+ AfterTriggerEndQuery(estate);
+
+ /* Shut down tuple routing, if any was done. */
+ if (edata->proute)
+ ExecCleanupTupleRouting(edata->mtstate, edata->proute);
+
+ /*
+ * Cleanup. It might seem that we should call ExecCloseResultRelations()
+ * here, but we intentionally don't. It would close the rel we added to
+ * the estate above, which is wrong because we took no corresponding
+ * refcount. We rely on ExecCleanupTupleRouting() to close any other
+ * relations opened during execution.
+ */
+ ExecResetTupleTable(estate->es_tupleTable, false);
+ FreeExecutorState(estate);
+ pfree(edata);
+}
+
+/*
+ * Executes default values for columns for which we can't map to remote
+ * relation columns.
+ *
+ * This allows us to support tables which have more columns on the downstream
+ * than on the upstream.
+ */
+static void
+slot_fill_defaults(LogicalRepRelMapEntry *rel, EState *estate,
+ TupleTableSlot *slot)
+{
+ TupleDesc desc = RelationGetDescr(rel->localrel);
+ int num_phys_attrs = desc->natts;
+ int i;
+ int attnum,
+ num_defaults = 0;
+ int *defmap;
+ ExprState **defexprs;
+ ExprContext *econtext;
+
+ econtext = GetPerTupleExprContext(estate);
+
+ /* We got all the data via replication, no need to evaluate anything. */
+ if (num_phys_attrs == rel->remoterel.natts)
+ return;
+
+ defmap = (int *) palloc(num_phys_attrs * sizeof(int));
+ defexprs = (ExprState **) palloc(num_phys_attrs * sizeof(ExprState *));
+
+ Assert(rel->attrmap->maplen == num_phys_attrs);
+ for (attnum = 0; attnum < num_phys_attrs; attnum++)
+ {
+ Expr *defexpr;
+
+ if (TupleDescAttr(desc, attnum)->attisdropped || TupleDescAttr(desc, attnum)->attgenerated)
+ continue;
+
+ if (rel->attrmap->attnums[attnum] >= 0)
+ continue;
+
+ defexpr = (Expr *) build_column_default(rel->localrel, attnum + 1);
+
+ if (defexpr != NULL)
+ {
+ /* Run the expression through planner */
+ defexpr = expression_planner(defexpr);
+
+ /* Initialize executable expression in copycontext */
+ defexprs[num_defaults] = ExecInitExpr(defexpr, NULL);
+ defmap[num_defaults] = attnum;
+ num_defaults++;
+ }
+
+ }
+
+ for (i = 0; i < num_defaults; i++)
+ slot->tts_values[defmap[i]] =
+ ExecEvalExpr(defexprs[i], econtext, &slot->tts_isnull[defmap[i]]);
+}
+
+/*
+ * Error callback to give more context info about data conversion failures
+ * while reading data from the remote server.
+ */
+static void
+slot_store_error_callback(void *arg)
+{
+ SlotErrCallbackArg *errarg = (SlotErrCallbackArg *) arg;
+ LogicalRepRelMapEntry *rel;
+
+ /* Nothing to do if remote attribute number is not set */
+ if (errarg->remote_attnum < 0)
+ return;
+
+ rel = errarg->rel;
+ errcontext("processing remote data for replication target relation \"%s.%s\" column \"%s\"",
+ rel->remoterel.nspname, rel->remoterel.relname,
+ rel->remoterel.attnames[errarg->remote_attnum]);
+}
+
+/*
+ * Store data in C string form into slot.
+ * This is similar to BuildTupleFromCStrings but TupleTableSlot fits our
+ * use better.
+ */
+static void
+slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
+ char **values)
+{
+ int natts = slot->tts_tupleDescriptor->natts;
+ int i;
+ SlotErrCallbackArg errarg;
+ ErrorContextCallback errcallback;
+
+ ExecClearTuple(slot);
+
+ /* Push callback + info on the error context stack */
+ errarg.rel = rel;
+ errarg.remote_attnum = -1;
+ errcallback.callback = slot_store_error_callback;
+ errcallback.arg = (void *) &errarg;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ /* Call the "in" function for each non-dropped attribute */
+ Assert(natts == rel->attrmap->maplen);
+ for (i = 0; i < natts; i++)
+ {
+ Form_pg_attribute att = TupleDescAttr(slot->tts_tupleDescriptor, i);
+ int remoteattnum = rel->attrmap->attnums[i];
+
+ if (!att->attisdropped && remoteattnum >= 0 &&
+ values[remoteattnum] != NULL)
+ {
+ Oid typinput;
+ Oid typioparam;
+
+ errarg.remote_attnum = remoteattnum;
+
+ getTypeInputInfo(att->atttypid, &typinput, &typioparam);
+ slot->tts_values[i] =
+ OidInputFunctionCall(typinput, values[remoteattnum],
+ typioparam, att->atttypmod);
+ slot->tts_isnull[i] = false;
+
+ errarg.remote_attnum = -1;
+ }
+ else
+ {
+ /*
+ * We assign NULL to dropped attributes, NULL values, and missing
+ * values (missing values should be later filled using
+ * slot_fill_defaults).
+ */
+ slot->tts_values[i] = (Datum) 0;
+ slot->tts_isnull[i] = true;
+ }
+ }
+
+ /* Pop the error context stack */
+ error_context_stack = errcallback.previous;
+
+ ExecStoreVirtualTuple(slot);
+}
+
+/*
+ * Replace selected columns with user data provided as C strings.
+ * This is somewhat similar to heap_modify_tuple but also calls the type
+ * input functions on the user data.
+ * "slot" is filled with a copy of the tuple in "srcslot", with
+ * columns selected by the "replaces" array replaced with data values
+ * from "values".
+ * Caution: unreplaced pass-by-ref columns in "slot" will point into the
+ * storage for "srcslot". This is OK for current usage, but someday we may
+ * need to materialize "slot" at the end to make it independent of "srcslot".
+ */
+static void
+slot_modify_cstrings(TupleTableSlot *slot, TupleTableSlot *srcslot,
+ LogicalRepRelMapEntry *rel,
+ char **values, bool *replaces)
+{
+ int natts = slot->tts_tupleDescriptor->natts;
+ int i;
+ SlotErrCallbackArg errarg;
+ ErrorContextCallback errcallback;
+
+ /* We'll fill "slot" with a virtual tuple, so we must start with ... */
+ ExecClearTuple(slot);
+
+ /*
+ * Copy all the column data from srcslot, so that we'll have valid values
+ * for unreplaced columns.
+ */
+ Assert(natts == srcslot->tts_tupleDescriptor->natts);
+ slot_getallattrs(srcslot);
+ memcpy(slot->tts_values, srcslot->tts_values, natts * sizeof(Datum));
+ memcpy(slot->tts_isnull, srcslot->tts_isnull, natts * sizeof(bool));
+
+ /* For error reporting, push callback + info on the error context stack */
+ errarg.rel = rel;
+ errarg.remote_attnum = -1;
+ errcallback.callback = slot_store_error_callback;
+ errcallback.arg = (void *) &errarg;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ /* Call the "in" function for each replaced attribute */
+ Assert(natts == rel->attrmap->maplen);
+ for (i = 0; i < natts; i++)
+ {
+ Form_pg_attribute att = TupleDescAttr(slot->tts_tupleDescriptor, i);
+ int remoteattnum = rel->attrmap->attnums[i];
+
+ if (remoteattnum < 0)
+ continue;
+
+ if (!replaces[remoteattnum])
+ continue;
+
+ if (values[remoteattnum] != NULL)
+ {
+ Oid typinput;
+ Oid typioparam;
+
+ errarg.remote_attnum = remoteattnum;
+
+ getTypeInputInfo(att->atttypid, &typinput, &typioparam);
+ slot->tts_values[i] =
+ OidInputFunctionCall(typinput, values[remoteattnum],
+ typioparam, att->atttypmod);
+ slot->tts_isnull[i] = false;
+
+ errarg.remote_attnum = -1;
+ }
+ else
+ {
+ slot->tts_values[i] = (Datum) 0;
+ slot->tts_isnull[i] = true;
+ }
+ }
+
+ /* Pop the error context stack */
+ error_context_stack = errcallback.previous;
+
+ /* And finally, declare that "slot" contains a valid virtual tuple */
+ ExecStoreVirtualTuple(slot);
+}
+
+/*
+ * Handle BEGIN message.
+ */
+static void
+apply_handle_begin(StringInfo s)
+{
+ LogicalRepBeginData begin_data;
+
+ logicalrep_read_begin(s, &begin_data);
+
+ remote_final_lsn = begin_data.final_lsn;
+
+ in_remote_transaction = true;
+
+ pgstat_report_activity(STATE_RUNNING, NULL);
+}
+
+/*
+ * Handle COMMIT message.
+ *
+ * TODO, support tracking of multiple origins
+ */
+static void
+apply_handle_commit(StringInfo s)
+{
+ LogicalRepCommitData commit_data;
+
+ logicalrep_read_commit(s, &commit_data);
+
+ if (commit_data.commit_lsn != remote_final_lsn)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg_internal("incorrect commit LSN %X/%X in commit message (expected %X/%X)",
+ (uint32) (commit_data.commit_lsn >> 32),
+ (uint32) commit_data.commit_lsn,
+ (uint32) (remote_final_lsn >> 32),
+ (uint32) remote_final_lsn)));
+
+ /* The synchronization worker runs in single transaction. */
+ if (IsTransactionState() && !am_tablesync_worker())
+ {
+ /*
+ * Update origin state so we can restart streaming from correct
+ * position in case of crash.
+ */
+ replorigin_session_origin_lsn = commit_data.end_lsn;
+ replorigin_session_origin_timestamp = commit_data.committime;
+
+ CommitTransactionCommand();
+ pgstat_report_stat(false);
+
+ store_flush_position(commit_data.end_lsn);
+ }
+ else
+ {
+ /* Process any invalidation messages that might have accumulated. */
+ AcceptInvalidationMessages();
+ maybe_reread_subscription();
+ }
+
+ in_remote_transaction = false;
+
+ /* Process any tables that are being synchronized in parallel. */
+ process_syncing_tables(commit_data.end_lsn);
+
+ pgstat_report_activity(STATE_IDLE, NULL);
+}
+
+/*
+ * Handle ORIGIN message.
+ *
+ * TODO, support tracking of multiple origins
+ */
+static void
+apply_handle_origin(StringInfo s)
+{
+ /*
+ * ORIGIN message can only come inside remote transaction and before any
+ * actual writes.
+ */
+ if (!in_remote_transaction ||
+ (IsTransactionState() && !am_tablesync_worker()))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("ORIGIN message sent out of order")));
+}
+
+/*
+ * Handle RELATION message.
+ *
+ * Note we don't do validation against local schema here. The validation
+ * against local schema is postponed until first change for given relation
+ * comes as we only care about it when applying changes for it anyway and we
+ * do less locking this way.
+ */
+static void
+apply_handle_relation(StringInfo s)
+{
+ LogicalRepRelation *rel;
+
+ rel = logicalrep_read_rel(s);
+ logicalrep_relmap_update(rel);
+}
+
+/*
+ * Handle TYPE message.
+ *
+ * This is now vestigial; we read the info and discard it.
+ */
+static void
+apply_handle_type(StringInfo s)
+{
+ LogicalRepTyp typ;
+
+ logicalrep_read_typ(s, &typ);
+}
+
+/*
+ * Get replica identity index or if it is not defined a primary key.
+ *
+ * If neither is defined, returns InvalidOid
+ */
+static Oid
+GetRelationIdentityOrPK(Relation rel)
+{
+ Oid idxoid;
+
+ idxoid = RelationGetReplicaIndex(rel);
+
+ if (!OidIsValid(idxoid))
+ idxoid = RelationGetPrimaryKeyIndex(rel);
+
+ return idxoid;
+}
+
+/*
+ * Handle INSERT message.
+ */
+
+static void
+apply_handle_insert(StringInfo s)
+{
+ LogicalRepRelMapEntry *rel;
+ LogicalRepTupleData newtup;
+ LogicalRepRelId relid;
+ ApplyExecutionData *edata;
+ EState *estate;
+ TupleTableSlot *remoteslot;
+ MemoryContext oldctx;
+
+ begin_replication_step();
+
+ relid = logicalrep_read_insert(s, &newtup);
+ rel = logicalrep_rel_open(relid, RowExclusiveLock);
+ if (!should_apply_changes_for_rel(rel))
+ {
+ /*
+ * The relation can't become interesting in the middle of the
+ * transaction so it's safe to unlock it.
+ */
+ logicalrep_rel_close(rel, RowExclusiveLock);
+ end_replication_step();
+ return;
+ }
+
+ /* Initialize the executor state. */
+ edata = create_edata_for_relation(rel);
+ estate = edata->estate;
+ remoteslot = ExecInitExtraTupleSlot(estate,
+ RelationGetDescr(rel->localrel),
+ &TTSOpsVirtual);
+
+ /* Process and store remote tuple in the slot */
+ oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
+ slot_store_cstrings(remoteslot, rel, newtup.values);
+ slot_fill_defaults(rel, estate, remoteslot);
+ MemoryContextSwitchTo(oldctx);
+
+ /* For a partitioned table, insert the tuple into a partition. */
+ if (rel->localrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
+ apply_handle_tuple_routing(edata,
+ remoteslot, NULL, CMD_INSERT);
+ else
+ apply_handle_insert_internal(estate->es_result_relation_info, estate,
+ remoteslot);
+
+ finish_edata(edata);
+
+ logicalrep_rel_close(rel, NoLock);
+
+ end_replication_step();
+}
+
+/* Workhorse for apply_handle_insert() */
+static void
+apply_handle_insert_internal(ResultRelInfo *relinfo,
+ EState *estate, TupleTableSlot *remoteslot)
+{
+ ExecOpenIndices(relinfo, false);
+
+ /* Do the insert. */
+ ExecSimpleRelationInsert(estate, remoteslot);
+
+ /* Cleanup. */
+ ExecCloseIndices(relinfo);
+}
+
+/*
+ * Check if the logical replication relation is updatable and throw
+ * appropriate error if it isn't.
+ */
+static void
+check_relation_updatable(LogicalRepRelMapEntry *rel)
+{
+ /* Updatable, no error. */
+ if (rel->updatable)
+ return;
+
+ /*
+ * We are in error mode so it's fine this is somewhat slow. It's better to
+ * give user correct error.
+ */
+ if (OidIsValid(GetRelationIdentityOrPK(rel->localrel)))
+ {
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("publisher did not send replica identity column "
+ "expected by the logical replication target relation \"%s.%s\"",
+ rel->remoterel.nspname, rel->remoterel.relname)));
+ }
+
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("logical replication target relation \"%s.%s\" has "
+ "neither REPLICA IDENTITY index nor PRIMARY "
+ "KEY and published relation does not have "
+ "REPLICA IDENTITY FULL",
+ rel->remoterel.nspname, rel->remoterel.relname)));
+}
+
+/*
+ * Handle UPDATE message.
+ *
+ * TODO: FDW support
+ */
+static void
+apply_handle_update(StringInfo s)
+{
+ LogicalRepRelMapEntry *rel;
+ LogicalRepRelId relid;
+ ApplyExecutionData *edata;
+ EState *estate;
+ LogicalRepTupleData oldtup;
+ LogicalRepTupleData newtup;
+ bool has_oldtup;
+ TupleTableSlot *remoteslot;
+ RangeTblEntry *target_rte;
+ MemoryContext oldctx;
+
+ begin_replication_step();
+
+ relid = logicalrep_read_update(s, &has_oldtup, &oldtup,
+ &newtup);
+ rel = logicalrep_rel_open(relid, RowExclusiveLock);
+ if (!should_apply_changes_for_rel(rel))
+ {
+ /*
+ * The relation can't become interesting in the middle of the
+ * transaction so it's safe to unlock it.
+ */
+ logicalrep_rel_close(rel, RowExclusiveLock);
+ end_replication_step();
+ return;
+ }
+
+ /* Check if we can do the update. */
+ check_relation_updatable(rel);
+
+ /* Initialize the executor state. */
+ edata = create_edata_for_relation(rel);
+ estate = edata->estate;
+ remoteslot = ExecInitExtraTupleSlot(estate,
+ RelationGetDescr(rel->localrel),
+ &TTSOpsVirtual);
+
+ /*
+ * Populate updatedCols so that per-column triggers can fire. This could
+ * include more columns than were actually changed on the publisher
+ * because the logical replication protocol doesn't contain that
+ * information. But it would for example exclude columns that only exist
+ * on the subscriber, since we are not touching those.
+ */
+ target_rte = list_nth(estate->es_range_table, 0);
+ for (int i = 0; i < remoteslot->tts_tupleDescriptor->natts; i++)
+ {
+ Form_pg_attribute att = TupleDescAttr(remoteslot->tts_tupleDescriptor, i);
+ int remoteattnum = rel->attrmap->attnums[i];
+
+ if (!att->attisdropped && remoteattnum >= 0)
+ {
+ if (newtup.changed[remoteattnum])
+ target_rte->updatedCols =
+ bms_add_member(target_rte->updatedCols,
+ i + 1 - FirstLowInvalidHeapAttributeNumber);
+ }
+ }
+
+ /* Also populate extraUpdatedCols, in case we have generated columns */
+ fill_extraUpdatedCols(target_rte, rel->localrel);
+
+ /* Build the search tuple. */
+ oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
+ slot_store_cstrings(remoteslot, rel,
+ has_oldtup ? oldtup.values : newtup.values);
+ MemoryContextSwitchTo(oldctx);
+
+ /* For a partitioned table, apply update to correct partition. */
+ if (rel->localrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
+ apply_handle_tuple_routing(edata,
+ remoteslot, &newtup, CMD_UPDATE);
+ else
+ apply_handle_update_internal(estate->es_result_relation_info, estate,
+ remoteslot, &newtup, rel);
+
+ finish_edata(edata);
+
+ logicalrep_rel_close(rel, NoLock);
+
+ end_replication_step();
+}
+
+/* Workhorse for apply_handle_update() */
+static void
+apply_handle_update_internal(ResultRelInfo *relinfo,
+ EState *estate, TupleTableSlot *remoteslot,
+ LogicalRepTupleData *newtup,
+ LogicalRepRelMapEntry *relmapentry)
+{
+ Relation localrel = relinfo->ri_RelationDesc;
+ EPQState epqstate;
+ TupleTableSlot *localslot;
+ bool found;
+ MemoryContext oldctx;
+
+ EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1);
+ ExecOpenIndices(relinfo, false);
+
+ found = FindReplTupleInLocalRel(estate, localrel,
+ &relmapentry->remoterel,
+ remoteslot, &localslot);
+ ExecClearTuple(remoteslot);
+
+ /*
+ * Tuple found.
+ *
+ * Note this will fail if there are other conflicting unique indexes.
+ */
+ if (found)
+ {
+ /* Process and store remote tuple in the slot */
+ oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
+ slot_modify_cstrings(remoteslot, localslot, relmapentry,
+ newtup->values, newtup->changed);
+ MemoryContextSwitchTo(oldctx);
+
+ EvalPlanQualSetSlot(&epqstate, remoteslot);
+
+ /* Do the actual update. */
+ ExecSimpleRelationUpdate(estate, &epqstate, localslot, remoteslot);
+ }
+ else
+ {
+ /*
+ * The tuple to be updated could not be found. Do nothing except for
+ * emitting a log message.
+ *
+ * XXX should this be promoted to ereport(LOG) perhaps?
+ */
+ elog(DEBUG1,
+ "logical replication did not find row to be updated "
+ "in replication target relation \"%s\"",
+ RelationGetRelationName(localrel));
+ }
+
+ /* Cleanup. */
+ ExecCloseIndices(relinfo);
+ EvalPlanQualEnd(&epqstate);
+}
+
+/*
+ * Handle DELETE message.
+ *
+ * TODO: FDW support
+ */
+static void
+apply_handle_delete(StringInfo s)
+{
+ LogicalRepRelMapEntry *rel;
+ LogicalRepTupleData oldtup;
+ LogicalRepRelId relid;
+ ApplyExecutionData *edata;
+ EState *estate;
+ TupleTableSlot *remoteslot;
+ MemoryContext oldctx;
+
+ begin_replication_step();
+
+ relid = logicalrep_read_delete(s, &oldtup);
+ rel = logicalrep_rel_open(relid, RowExclusiveLock);
+ if (!should_apply_changes_for_rel(rel))
+ {
+ /*
+ * The relation can't become interesting in the middle of the
+ * transaction so it's safe to unlock it.
+ */
+ logicalrep_rel_close(rel, RowExclusiveLock);
+ end_replication_step();
+ return;
+ }
+
+ /* Check if we can do the delete. */
+ check_relation_updatable(rel);
+
+ /* Initialize the executor state. */
+ edata = create_edata_for_relation(rel);
+ estate = edata->estate;
+ remoteslot = ExecInitExtraTupleSlot(estate,
+ RelationGetDescr(rel->localrel),
+ &TTSOpsVirtual);
+
+ /* Build the search tuple. */
+ oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
+ slot_store_cstrings(remoteslot, rel, oldtup.values);
+ MemoryContextSwitchTo(oldctx);
+
+ /* For a partitioned table, apply delete to correct partition. */
+ if (rel->localrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
+ apply_handle_tuple_routing(edata,
+ remoteslot, NULL, CMD_DELETE);
+ else
+ apply_handle_delete_internal(estate->es_result_relation_info, estate,
+ remoteslot, &rel->remoterel);
+
+ finish_edata(edata);
+
+ logicalrep_rel_close(rel, NoLock);
+
+ end_replication_step();
+}
+
+/* Workhorse for apply_handle_delete() */
+static void
+apply_handle_delete_internal(ResultRelInfo *relinfo, EState *estate,
+ TupleTableSlot *remoteslot,
+ LogicalRepRelation *remoterel)
+{
+ Relation localrel = relinfo->ri_RelationDesc;
+ EPQState epqstate;
+ TupleTableSlot *localslot;
+ bool found;
+
+ EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1);
+ ExecOpenIndices(relinfo, false);
+
+ found = FindReplTupleInLocalRel(estate, localrel, remoterel,
+ remoteslot, &localslot);
+
+ /* If found delete it. */
+ if (found)
+ {
+ EvalPlanQualSetSlot(&epqstate, localslot);
+
+ /* Do the actual delete. */
+ ExecSimpleRelationDelete(estate, &epqstate, localslot);
+ }
+ else
+ {
+ /*
+ * The tuple to be deleted could not be found. Do nothing except for
+ * emitting a log message.
+ *
+ * XXX should this be promoted to ereport(LOG) perhaps?
+ */
+ elog(DEBUG1,
+ "logical replication did not find row to be deleted "
+ "in replication target relation \"%s\"",
+ RelationGetRelationName(localrel));
+ }
+
+ /* Cleanup. */
+ ExecCloseIndices(relinfo);
+ EvalPlanQualEnd(&epqstate);
+}
+
+/*
+ * Try to find a tuple received from the publication side (in 'remoteslot') in
+ * the corresponding local relation using either replica identity index,
+ * primary key or if needed, sequential scan.
+ *
+ * Local tuple, if found, is returned in '*localslot'.
+ */
+static bool
+FindReplTupleInLocalRel(EState *estate, Relation localrel,
+ LogicalRepRelation *remoterel,
+ TupleTableSlot *remoteslot,
+ TupleTableSlot **localslot)
+{
+ Oid idxoid;
+ bool found;
+
+ *localslot = table_slot_create(localrel, &estate->es_tupleTable);
+
+ idxoid = GetRelationIdentityOrPK(localrel);
+ Assert(OidIsValid(idxoid) ||
+ (remoterel->replident == REPLICA_IDENTITY_FULL));
+
+ if (OidIsValid(idxoid))
+ found = RelationFindReplTupleByIndex(localrel, idxoid,
+ LockTupleExclusive,
+ remoteslot, *localslot);
+ else
+ found = RelationFindReplTupleSeq(localrel, LockTupleExclusive,
+ remoteslot, *localslot);
+
+ return found;
+}
+
+/*
+ * This handles insert, update, delete on a partitioned table.
+ */
+static void
+apply_handle_tuple_routing(ApplyExecutionData *edata,
+ TupleTableSlot *remoteslot,
+ LogicalRepTupleData *newtup,
+ CmdType operation)
+{
+ EState *estate = edata->estate;
+ LogicalRepRelMapEntry *relmapentry = edata->targetRel;
+ ResultRelInfo *relinfo = edata->targetRelInfo;
+ Relation parentrel = relinfo->ri_RelationDesc;
+ ModifyTableState *mtstate;
+ PartitionTupleRouting *proute;
+ ResultRelInfo *partrelinfo;
+ Relation partrel;
+ TupleTableSlot *remoteslot_part;
+ PartitionRoutingInfo *partinfo;
+ TupleConversionMap *map;
+ MemoryContext oldctx;
+
+ /* ModifyTableState is needed for ExecFindPartition(). */
+ edata->mtstate = mtstate = makeNode(ModifyTableState);
+ mtstate->ps.plan = NULL;
+ mtstate->ps.state = estate;
+ mtstate->operation = operation;
+ mtstate->resultRelInfo = relinfo;
+
+ /* ... as is PartitionTupleRouting. */
+ edata->proute = proute = ExecSetupPartitionTupleRouting(estate, mtstate,
+ parentrel);
+
+ /*
+ * Find the partition to which the "search tuple" belongs.
+ */
+ Assert(remoteslot != NULL);
+ oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
+ partrelinfo = ExecFindPartition(mtstate, relinfo, proute,
+ remoteslot, estate);
+ Assert(partrelinfo != NULL);
+ partrel = partrelinfo->ri_RelationDesc;
+
+ /*
+ * To perform any of the operations below, the tuple must match the
+ * partition's rowtype. Convert if needed or just copy, using a dedicated
+ * slot to store the tuple in any case.
+ */
+ partinfo = partrelinfo->ri_PartitionInfo;
+ remoteslot_part = partinfo->pi_PartitionTupleSlot;
+ if (remoteslot_part == NULL)
+ remoteslot_part = table_slot_create(partrel, &estate->es_tupleTable);
+ map = partinfo->pi_RootToPartitionMap;
+ if (map != NULL)
+ remoteslot_part = execute_attr_map_slot(map->attrMap, remoteslot,
+ remoteslot_part);
+ else
+ {
+ remoteslot_part = ExecCopySlot(remoteslot_part, remoteslot);
+ slot_getallattrs(remoteslot_part);
+ }
+ MemoryContextSwitchTo(oldctx);
+
+ estate->es_result_relation_info = partrelinfo;
+ switch (operation)
+ {
+ case CMD_INSERT:
+ apply_handle_insert_internal(partrelinfo, estate,
+ remoteslot_part);
+ break;
+
+ case CMD_DELETE:
+ apply_handle_delete_internal(partrelinfo, estate,
+ remoteslot_part,
+ &relmapentry->remoterel);
+ break;
+
+ case CMD_UPDATE:
+
+ /*
+ * For UPDATE, depending on whether or not the updated tuple
+ * satisfies the partition's constraint, perform a simple UPDATE
+ * of the partition or move the updated tuple into a different
+ * suitable partition.
+ */
+ {
+ AttrMap *attrmap = map ? map->attrMap : NULL;
+ LogicalRepRelMapEntry *part_entry;
+ TupleTableSlot *localslot;
+ ResultRelInfo *partrelinfo_new;
+ bool found;
+
+ part_entry = logicalrep_partition_open(relmapentry, partrel,
+ attrmap);
+
+ /* Get the matching local tuple from the partition. */
+ found = FindReplTupleInLocalRel(estate, partrel,
+ &part_entry->remoterel,
+ remoteslot_part, &localslot);
+ if (!found)
+ {
+ /*
+ * The tuple to be updated could not be found. Do nothing
+ * except for emitting a log message.
+ *
+ * XXX should this be promoted to ereport(LOG) perhaps?
+ */
+ elog(DEBUG1,
+ "logical replication did not find row to be updated "
+ "in replication target relation's partition \"%s\"",
+ RelationGetRelationName(partrel));
+ return;
+ }
+
+ /*
+ * Apply the update to the local tuple, putting the result in
+ * remoteslot_part.
+ */
+ oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
+ slot_modify_cstrings(remoteslot_part, localslot,
+ part_entry,
+ newtup->values, newtup->changed);
+ MemoryContextSwitchTo(oldctx);
+
+ /*
+ * Does the updated tuple still satisfy the current
+ * partition's constraint?
+ */
+ if (partrelinfo->ri_PartitionCheck == NULL ||
+ ExecPartitionCheck(partrelinfo, remoteslot_part, estate,
+ false))
+ {
+ /*
+ * Yes, so simply UPDATE the partition. We don't call
+ * apply_handle_update_internal() here, which would
+ * normally do the following work, to avoid repeating some
+ * work already done above to find the local tuple in the
+ * partition.
+ */
+ EPQState epqstate;
+
+ EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1);
+ ExecOpenIndices(partrelinfo, false);
+
+ EvalPlanQualSetSlot(&epqstate, remoteslot_part);
+ ExecSimpleRelationUpdate(estate, &epqstate, localslot,
+ remoteslot_part);
+ ExecCloseIndices(partrelinfo);
+ EvalPlanQualEnd(&epqstate);
+ }
+ else
+ {
+ /* Move the tuple into the new partition. */
+
+ /*
+ * New partition will be found using tuple routing, which
+ * can only occur via the parent table. We might need to
+ * convert the tuple to the parent's rowtype. Note that
+ * this is the tuple found in the partition, not the
+ * original search tuple received by this function.
+ */
+ if (map)
+ {
+ TupleConversionMap *PartitionToRootMap =
+ convert_tuples_by_name(RelationGetDescr(partrel),
+ RelationGetDescr(parentrel));
+
+ remoteslot =
+ execute_attr_map_slot(PartitionToRootMap->attrMap,
+ remoteslot_part, remoteslot);
+ }
+ else
+ {
+ remoteslot = ExecCopySlot(remoteslot, remoteslot_part);
+ slot_getallattrs(remoteslot);
+ }
+
+
+ /* Find the new partition. */
+ oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
+ partrelinfo_new = ExecFindPartition(mtstate, relinfo,
+ proute, remoteslot,
+ estate);
+ MemoryContextSwitchTo(oldctx);
+ Assert(partrelinfo_new != partrelinfo);
+
+ /* DELETE old tuple found in the old partition. */
+ estate->es_result_relation_info = partrelinfo;
+ apply_handle_delete_internal(partrelinfo, estate,
+ localslot,
+ &relmapentry->remoterel);
+
+ /* INSERT new tuple into the new partition. */
+
+ /*
+ * Convert the replacement tuple to match the destination
+ * partition rowtype.
+ */
+ oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
+ partrel = partrelinfo_new->ri_RelationDesc;
+ partinfo = partrelinfo_new->ri_PartitionInfo;
+ remoteslot_part = partinfo->pi_PartitionTupleSlot;
+ if (remoteslot_part == NULL)
+ remoteslot_part = table_slot_create(partrel,
+ &estate->es_tupleTable);
+ map = partinfo->pi_RootToPartitionMap;
+ if (map != NULL)
+ {
+ remoteslot_part = execute_attr_map_slot(map->attrMap,
+ remoteslot,
+ remoteslot_part);
+ }
+ else
+ {
+ remoteslot_part = ExecCopySlot(remoteslot_part,
+ remoteslot);
+ slot_getallattrs(remoteslot);
+ }
+ MemoryContextSwitchTo(oldctx);
+ estate->es_result_relation_info = partrelinfo_new;
+ apply_handle_insert_internal(partrelinfo_new, estate,
+ remoteslot_part);
+ }
+ }
+ break;
+
+ default:
+ elog(ERROR, "unrecognized CmdType: %d", (int) operation);
+ break;
+ }
+}
+
+/*
+ * Handle TRUNCATE message.
+ *
+ * TODO: FDW support
+ */
+static void
+apply_handle_truncate(StringInfo s)
+{
+ bool cascade = false;
+ bool restart_seqs = false;
+ List *remote_relids = NIL;
+ List *remote_rels = NIL;
+ List *rels = NIL;
+ List *part_rels = NIL;
+ List *relids = NIL;
+ List *relids_logged = NIL;
+ ListCell *lc;
+ LOCKMODE lockmode = AccessExclusiveLock;
+
+ begin_replication_step();
+
+ remote_relids = logicalrep_read_truncate(s, &cascade, &restart_seqs);
+
+ foreach(lc, remote_relids)
+ {
+ LogicalRepRelId relid = lfirst_oid(lc);
+ LogicalRepRelMapEntry *rel;
+
+ rel = logicalrep_rel_open(relid, lockmode);
+ if (!should_apply_changes_for_rel(rel))
+ {
+ /*
+ * The relation can't become interesting in the middle of the
+ * transaction so it's safe to unlock it.
+ */
+ logicalrep_rel_close(rel, lockmode);
+ continue;
+ }
+
+ remote_rels = lappend(remote_rels, rel);
+ rels = lappend(rels, rel->localrel);
+ relids = lappend_oid(relids, rel->localreloid);
+ if (RelationIsLogicallyLogged(rel->localrel))
+ relids_logged = lappend_oid(relids_logged, rel->localreloid);
+
+ /*
+ * Truncate partitions if we got a message to truncate a partitioned
+ * table.
+ */
+ if (rel->localrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
+ {
+ ListCell *child;
+ List *children = find_all_inheritors(rel->localreloid,
+ lockmode,
+ NULL);
+
+ foreach(child, children)
+ {
+ Oid childrelid = lfirst_oid(child);
+ Relation childrel;
+
+ if (list_member_oid(relids, childrelid))
+ continue;
+
+ /* find_all_inheritors already got lock */
+ childrel = table_open(childrelid, NoLock);
+
+ /*
+ * Ignore temp tables of other backends. See similar code in
+ * ExecuteTruncate().
+ */
+ if (RELATION_IS_OTHER_TEMP(childrel))
+ {
+ table_close(childrel, lockmode);
+ continue;
+ }
+
+ rels = lappend(rels, childrel);
+ part_rels = lappend(part_rels, childrel);
+ relids = lappend_oid(relids, childrelid);
+ /* Log this relation only if needed for logical decoding */
+ if (RelationIsLogicallyLogged(childrel))
+ relids_logged = lappend_oid(relids_logged, childrelid);
+ }
+ }
+ }
+
+ /*
+ * Even if we used CASCADE on the upstream master we explicitly default to
+ * replaying changes without further cascading. This might be later
+ * changeable with a user specified option.
+ */
+ ExecuteTruncateGuts(rels, relids, relids_logged, DROP_RESTRICT, restart_seqs);
+
+ foreach(lc, remote_rels)
+ {
+ LogicalRepRelMapEntry *rel = lfirst(lc);
+
+ logicalrep_rel_close(rel, NoLock);
+ }
+ foreach(lc, part_rels)
+ {
+ Relation rel = lfirst(lc);
+
+ table_close(rel, NoLock);
+ }
+
+ end_replication_step();
+}
+
+
+/*
+ * Logical replication protocol message dispatcher.
+ */
+static void
+apply_dispatch(StringInfo s)
+{
+ char action = pq_getmsgbyte(s);
+
+ switch (action)
+ {
+ /* BEGIN */
+ case 'B':
+ apply_handle_begin(s);
+ break;
+ /* COMMIT */
+ case 'C':
+ apply_handle_commit(s);
+ break;
+ /* INSERT */
+ case 'I':
+ apply_handle_insert(s);
+ break;
+ /* UPDATE */
+ case 'U':
+ apply_handle_update(s);
+ break;
+ /* DELETE */
+ case 'D':
+ apply_handle_delete(s);
+ break;
+ /* TRUNCATE */
+ case 'T':
+ apply_handle_truncate(s);
+ break;
+ /* RELATION */
+ case 'R':
+ apply_handle_relation(s);
+ break;
+ /* TYPE */
+ case 'Y':
+ apply_handle_type(s);
+ break;
+ /* ORIGIN */
+ case 'O':
+ apply_handle_origin(s);
+ break;
+ default:
+ ereport(ERROR,
+ (errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("invalid logical replication message type \"%c\"", action)));
+ }
+}
+
+/*
+ * Figure out which write/flush positions to report to the walsender process.
+ *
+ * We can't simply report back the last LSN the walsender sent us because the
+ * local transaction might not yet be flushed to disk locally. Instead we
+ * build a list that associates local with remote LSNs for every commit. When
+ * reporting back the flush position to the sender we iterate that list and
+ * check which entries on it are already locally flushed. Those we can report
+ * as having been flushed.
+ *
+ * The have_pending_txes is true if there are outstanding transactions that
+ * need to be flushed.
+ */
+static void
+get_flush_position(XLogRecPtr *write, XLogRecPtr *flush,
+ bool *have_pending_txes)
+{
+ dlist_mutable_iter iter;
+ XLogRecPtr local_flush = GetFlushRecPtr();
+
+ *write = InvalidXLogRecPtr;
+ *flush = InvalidXLogRecPtr;
+
+ dlist_foreach_modify(iter, &lsn_mapping)
+ {
+ FlushPosition *pos =
+ dlist_container(FlushPosition, node, iter.cur);
+
+ *write = pos->remote_end;
+
+ if (pos->local_end <= local_flush)
+ {
+ *flush = pos->remote_end;
+ dlist_delete(iter.cur);
+ pfree(pos);
+ }
+ else
+ {
+ /*
+ * Don't want to uselessly iterate over the rest of the list which
+ * could potentially be long. Instead get the last element and
+ * grab the write position from there.
+ */
+ pos = dlist_tail_element(FlushPosition, node,
+ &lsn_mapping);
+ *write = pos->remote_end;
+ *have_pending_txes = true;
+ return;
+ }
+ }
+
+ *have_pending_txes = !dlist_is_empty(&lsn_mapping);
+}
+
+/*
+ * Store current remote/local lsn pair in the tracking list.
+ */
+static void
+store_flush_position(XLogRecPtr remote_lsn)
+{
+ FlushPosition *flushpos;
+
+ /* Need to do this in permanent context */
+ MemoryContextSwitchTo(ApplyContext);
+
+ /* Track commit lsn */
+ flushpos = (FlushPosition *) palloc(sizeof(FlushPosition));
+ flushpos->local_end = XactLastCommitEnd;
+ flushpos->remote_end = remote_lsn;
+
+ dlist_push_tail(&lsn_mapping, &flushpos->node);
+ MemoryContextSwitchTo(ApplyMessageContext);
+}
+
+
+/* Update statistics of the worker. */
+static void
+UpdateWorkerStats(XLogRecPtr last_lsn, TimestampTz send_time, bool reply)
+{
+ MyLogicalRepWorker->last_lsn = last_lsn;
+ MyLogicalRepWorker->last_send_time = send_time;
+ MyLogicalRepWorker->last_recv_time = GetCurrentTimestamp();
+ if (reply)
+ {
+ MyLogicalRepWorker->reply_lsn = last_lsn;
+ MyLogicalRepWorker->reply_time = send_time;
+ }
+}
+
+/*
+ * Apply main loop.
+ */
+static void
+LogicalRepApplyLoop(XLogRecPtr last_received)
+{
+ TimestampTz last_recv_timestamp = GetCurrentTimestamp();
+ bool ping_sent = false;
+
+ /*
+ * Init the ApplyMessageContext which we clean up after each replication
+ * protocol message.
+ */
+ ApplyMessageContext = AllocSetContextCreate(ApplyContext,
+ "ApplyMessageContext",
+ ALLOCSET_DEFAULT_SIZES);
+
+ /* mark as idle, before starting to loop */
+ pgstat_report_activity(STATE_IDLE, NULL);
+
+ /* This outer loop iterates once per wait. */
+ for (;;)
+ {
+ pgsocket fd = PGINVALID_SOCKET;
+ int rc;
+ int len;
+ char *buf = NULL;
+ bool endofstream = false;
+ long wait_time;
+
+ CHECK_FOR_INTERRUPTS();
+
+ MemoryContextSwitchTo(ApplyMessageContext);
+
+ len = walrcv_receive(LogRepWorkerWalRcvConn, &buf, &fd);
+
+ if (len != 0)
+ {
+ /* Loop to process all available data (without blocking). */
+ for (;;)
+ {
+ CHECK_FOR_INTERRUPTS();
+
+ if (len == 0)
+ {
+ break;
+ }
+ else if (len < 0)
+ {
+ ereport(LOG,
+ (errmsg("data stream from publisher has ended")));
+ endofstream = true;
+ break;
+ }
+ else
+ {
+ int c;
+ StringInfoData s;
+
+ /* Reset timeout. */
+ last_recv_timestamp = GetCurrentTimestamp();
+ ping_sent = false;
+
+ /* Ensure we are reading the data into our memory context. */
+ MemoryContextSwitchTo(ApplyMessageContext);
+
+ s.data = buf;
+ s.len = len;
+ s.cursor = 0;
+ s.maxlen = -1;
+
+ c = pq_getmsgbyte(&s);
+
+ if (c == 'w')
+ {
+ XLogRecPtr start_lsn;
+ XLogRecPtr end_lsn;
+ TimestampTz send_time;
+
+ start_lsn = pq_getmsgint64(&s);
+ end_lsn = pq_getmsgint64(&s);
+ send_time = pq_getmsgint64(&s);
+
+ if (last_received < start_lsn)
+ last_received = start_lsn;
+
+ if (last_received < end_lsn)
+ last_received = end_lsn;
+
+ UpdateWorkerStats(last_received, send_time, false);
+
+ apply_dispatch(&s);
+ }
+ else if (c == 'k')
+ {
+ XLogRecPtr end_lsn;
+ TimestampTz timestamp;
+ bool reply_requested;
+
+ end_lsn = pq_getmsgint64(&s);
+ timestamp = pq_getmsgint64(&s);
+ reply_requested = pq_getmsgbyte(&s);
+
+ if (last_received < end_lsn)
+ last_received = end_lsn;
+
+ send_feedback(last_received, reply_requested, false);
+ UpdateWorkerStats(last_received, timestamp, true);
+ }
+ /* other message types are purposefully ignored */
+
+ MemoryContextReset(ApplyMessageContext);
+ }
+
+ len = walrcv_receive(LogRepWorkerWalRcvConn, &buf, &fd);
+ }
+ }
+
+ /* confirm all writes so far */
+ send_feedback(last_received, false, false);
+
+ if (!in_remote_transaction)
+ {
+ /*
+ * If we didn't get any transactions for a while there might be
+ * unconsumed invalidation messages in the queue, consume them
+ * now.
+ */
+ AcceptInvalidationMessages();
+ maybe_reread_subscription();
+
+ /* Process any table synchronization changes. */
+ process_syncing_tables(last_received);
+ }
+
+ /* Cleanup the memory. */
+ MemoryContextResetAndDeleteChildren(ApplyMessageContext);
+ MemoryContextSwitchTo(TopMemoryContext);
+
+ /* Check if we need to exit the streaming loop. */
+ if (endofstream)
+ {
+ TimeLineID tli;
+
+ walrcv_endstreaming(LogRepWorkerWalRcvConn, &tli);
+ break;
+ }
+
+ /*
+ * Wait for more data or latch. If we have unflushed transactions,
+ * wake up after WalWriterDelay to see if they've been flushed yet (in
+ * which case we should send a feedback message). Otherwise, there's
+ * no particular urgency about waking up unless we get data or a
+ * signal.
+ */
+ if (!dlist_is_empty(&lsn_mapping))
+ wait_time = WalWriterDelay;
+ else
+ wait_time = NAPTIME_PER_CYCLE;
+
+ rc = WaitLatchOrSocket(MyLatch,
+ WL_SOCKET_READABLE | WL_LATCH_SET |
+ WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
+ fd, wait_time,
+ WAIT_EVENT_LOGICAL_APPLY_MAIN);
+
+ if (rc & WL_LATCH_SET)
+ {
+ ResetLatch(MyLatch);
+ CHECK_FOR_INTERRUPTS();
+ }
+
+ if (ConfigReloadPending)
+ {
+ ConfigReloadPending = false;
+ ProcessConfigFile(PGC_SIGHUP);
+ }
+
+ if (rc & WL_TIMEOUT)
+ {
+ /*
+ * We didn't receive anything new. If we haven't heard anything
+ * from the server for more than wal_receiver_timeout / 2, ping
+ * the server. Also, if it's been longer than
+ * wal_receiver_status_interval since the last update we sent,
+ * send a status update to the master anyway, to report any
+ * progress in applying WAL.
+ */
+ bool requestReply = false;
+
+ /*
+ * Check if time since last receive from standby has reached the
+ * configured limit.
+ */
+ if (wal_receiver_timeout > 0)
+ {
+ TimestampTz now = GetCurrentTimestamp();
+ TimestampTz timeout;
+
+ timeout =
+ TimestampTzPlusMilliseconds(last_recv_timestamp,
+ wal_receiver_timeout);
+
+ if (now >= timeout)
+ ereport(ERROR,
+ (errmsg("terminating logical replication worker due to timeout")));
+
+ /* Check to see if it's time for a ping. */
+ if (!ping_sent)
+ {
+ timeout = TimestampTzPlusMilliseconds(last_recv_timestamp,
+ (wal_receiver_timeout / 2));
+ if (now >= timeout)
+ {
+ requestReply = true;
+ ping_sent = true;
+ }
+ }
+ }
+
+ send_feedback(last_received, requestReply, requestReply);
+ }
+ }
+}
+
+/*
+ * Send a Standby Status Update message to server.
+ *
+ * 'recvpos' is the latest LSN we've received data to, force is set if we need
+ * to send a response to avoid timeouts.
+ */
+static void
+send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
+{
+ static StringInfo reply_message = NULL;
+ static TimestampTz send_time = 0;
+
+ static XLogRecPtr last_recvpos = InvalidXLogRecPtr;
+ static XLogRecPtr last_writepos = InvalidXLogRecPtr;
+ static XLogRecPtr last_flushpos = InvalidXLogRecPtr;
+
+ XLogRecPtr writepos;
+ XLogRecPtr flushpos;
+ TimestampTz now;
+ bool have_pending_txes;
+
+ /*
+ * If the user doesn't want status to be reported to the publisher, be
+ * sure to exit before doing anything at all.
+ */
+ if (!force && wal_receiver_status_interval <= 0)
+ return;
+
+ /* It's legal to not pass a recvpos */
+ if (recvpos < last_recvpos)
+ recvpos = last_recvpos;
+
+ get_flush_position(&writepos, &flushpos, &have_pending_txes);
+
+ /*
+ * No outstanding transactions to flush, we can report the latest received
+ * position. This is important for synchronous replication.
+ */
+ if (!have_pending_txes)
+ flushpos = writepos = recvpos;
+
+ if (writepos < last_writepos)
+ writepos = last_writepos;
+
+ if (flushpos < last_flushpos)
+ flushpos = last_flushpos;
+
+ now = GetCurrentTimestamp();
+
+ /* if we've already reported everything we're good */
+ if (!force &&
+ writepos == last_writepos &&
+ flushpos == last_flushpos &&
+ !TimestampDifferenceExceeds(send_time, now,
+ wal_receiver_status_interval * 1000))
+ return;
+ send_time = now;
+
+ if (!reply_message)
+ {
+ MemoryContext oldctx = MemoryContextSwitchTo(ApplyContext);
+
+ reply_message = makeStringInfo();
+ MemoryContextSwitchTo(oldctx);
+ }
+ else
+ resetStringInfo(reply_message);
+
+ pq_sendbyte(reply_message, 'r');
+ pq_sendint64(reply_message, recvpos); /* write */
+ pq_sendint64(reply_message, flushpos); /* flush */
+ pq_sendint64(reply_message, writepos); /* apply */
+ pq_sendint64(reply_message, now); /* sendTime */
+ pq_sendbyte(reply_message, requestReply); /* replyRequested */
+
+ elog(DEBUG2, "sending feedback (force %d) to recv %X/%X, write %X/%X, flush %X/%X",
+ force,
+ (uint32) (recvpos >> 32), (uint32) recvpos,
+ (uint32) (writepos >> 32), (uint32) writepos,
+ (uint32) (flushpos >> 32), (uint32) flushpos
+ );
+
+ walrcv_send(LogRepWorkerWalRcvConn,
+ reply_message->data, reply_message->len);
+
+ if (recvpos > last_recvpos)
+ last_recvpos = recvpos;
+ if (writepos > last_writepos)
+ last_writepos = writepos;
+ if (flushpos > last_flushpos)
+ last_flushpos = flushpos;
+}
+
+/*
+ * Reread subscription info if needed. Most changes will be exit.
+ */
+static void
+maybe_reread_subscription(void)
+{
+ MemoryContext oldctx;
+ Subscription *newsub;
+ bool started_tx = false;
+
+ /* When cache state is valid there is nothing to do here. */
+ if (MySubscriptionValid)
+ return;
+
+ /* This function might be called inside or outside of transaction. */
+ if (!IsTransactionState())
+ {
+ StartTransactionCommand();
+ started_tx = true;
+ }
+
+ /* Ensure allocations in permanent context. */
+ oldctx = MemoryContextSwitchTo(ApplyContext);
+
+ newsub = GetSubscription(MyLogicalRepWorker->subid, true);
+
+ /*
+ * Exit if the subscription was removed. This normally should not happen
+ * as the worker gets killed during DROP SUBSCRIPTION.
+ */
+ if (!newsub)
+ {
+ ereport(LOG,
+ (errmsg("logical replication apply worker for subscription \"%s\" will "
+ "stop because the subscription was removed",
+ MySubscription->name)));
+
+ proc_exit(0);
+ }
+
+ /*
+ * Exit if the subscription was disabled. This normally should not happen
+ * as the worker gets killed during ALTER SUBSCRIPTION ... DISABLE.
+ */
+ if (!newsub->enabled)
+ {
+ ereport(LOG,
+ (errmsg("logical replication apply worker for subscription \"%s\" will "
+ "stop because the subscription was disabled",
+ MySubscription->name)));
+
+ proc_exit(0);
+ }
+
+ /*
+ * Exit if connection string was changed. The launcher will start new
+ * worker.
+ */
+ if (strcmp(newsub->conninfo, MySubscription->conninfo) != 0)
+ {
+ ereport(LOG,
+ (errmsg("logical replication apply worker for subscription \"%s\" will "
+ "restart because the connection information was changed",
+ MySubscription->name)));
+
+ proc_exit(0);
+ }
+
+ /*
+ * Exit if subscription name was changed (it's used for
+ * fallback_application_name). The launcher will start new worker.
+ */
+ if (strcmp(newsub->name, MySubscription->name) != 0)
+ {
+ ereport(LOG,
+ (errmsg("logical replication apply worker for subscription \"%s\" will "
+ "restart because subscription was renamed",
+ MySubscription->name)));
+
+ proc_exit(0);
+ }
+
+ /* !slotname should never happen when enabled is true. */
+ Assert(newsub->slotname);
+
+ /*
+ * We need to make new connection to new slot if slot name has changed so
+ * exit here as well if that's the case.
+ */
+ if (strcmp(newsub->slotname, MySubscription->slotname) != 0)
+ {
+ ereport(LOG,
+ (errmsg("logical replication apply worker for subscription \"%s\" will "
+ "restart because the replication slot name was changed",
+ MySubscription->name)));
+
+ proc_exit(0);
+ }
+
+ /*
+ * Exit if publication list was changed. The launcher will start new
+ * worker.
+ */
+ if (!equal(newsub->publications, MySubscription->publications))
+ {
+ ereport(LOG,
+ (errmsg("logical replication apply worker for subscription \"%s\" will "
+ "restart because subscription's publications were changed",
+ MySubscription->name)));
+
+ proc_exit(0);
+ }
+
+ /* Check for other changes that should never happen too. */
+ if (newsub->dbid != MySubscription->dbid)
+ {
+ elog(ERROR, "subscription %u changed unexpectedly",
+ MyLogicalRepWorker->subid);
+ }
+
+ /* Clean old subscription info and switch to new one. */
+ FreeSubscription(MySubscription);
+ MySubscription = newsub;
+
+ MemoryContextSwitchTo(oldctx);
+
+ /* Change synchronous commit according to the user's wishes */
+ SetConfigOption("synchronous_commit", MySubscription->synccommit,
+ PGC_BACKEND, PGC_S_OVERRIDE);
+
+ if (started_tx)
+ CommitTransactionCommand();
+
+ MySubscriptionValid = true;
+}
+
+/*
+ * Callback from subscription syscache invalidation.
+ */
+static void
+subscription_change_cb(Datum arg, int cacheid, uint32 hashvalue)
+{
+ MySubscriptionValid = false;
+}
+
+/* Logical Replication Apply worker entry point */
+void
+ApplyWorkerMain(Datum main_arg)
+{
+ int worker_slot = DatumGetInt32(main_arg);
+ MemoryContext oldctx;
+ char originname[NAMEDATALEN];
+ XLogRecPtr origin_startpos;
+ char *myslotname;
+ WalRcvStreamOptions options;
+
+ /* Attach to slot */
+ logicalrep_worker_attach(worker_slot);
+
+ /* Setup signal handling */
+ pqsignal(SIGHUP, SignalHandlerForConfigReload);
+ pqsignal(SIGTERM, die);
+ BackgroundWorkerUnblockSignals();
+
+ /*
+ * We don't currently need any ResourceOwner in a walreceiver process, but
+ * if we did, we could call CreateAuxProcessResourceOwner here.
+ */
+
+ /* Initialise stats to a sanish value */
+ MyLogicalRepWorker->last_send_time = MyLogicalRepWorker->last_recv_time =
+ MyLogicalRepWorker->reply_time = GetCurrentTimestamp();
+
+ /* Load the libpq-specific functions */
+ load_file("libpqwalreceiver", false);
+
+ /* Run as replica session replication role. */
+ SetConfigOption("session_replication_role", "replica",
+ PGC_SUSET, PGC_S_OVERRIDE);
+
+ /* Connect to our database. */
+ BackgroundWorkerInitializeConnectionByOid(MyLogicalRepWorker->dbid,
+ MyLogicalRepWorker->userid,
+ 0);
+
+ /*
+ * Set always-secure search path, so malicious users can't redirect user
+ * code (e.g. pg_index.indexprs).
+ */
+ SetConfigOption("search_path", "", PGC_SUSET, PGC_S_OVERRIDE);
+
+ /* Load the subscription into persistent memory context. */
+ ApplyContext = AllocSetContextCreate(TopMemoryContext,
+ "ApplyContext",
+ ALLOCSET_DEFAULT_SIZES);
+ StartTransactionCommand();
+ oldctx = MemoryContextSwitchTo(ApplyContext);
+
+ MySubscription = GetSubscription(MyLogicalRepWorker->subid, true);
+ if (!MySubscription)
+ {
+ ereport(LOG,
+ (errmsg("logical replication apply worker for subscription %u will not "
+ "start because the subscription was removed during startup",
+ MyLogicalRepWorker->subid)));
+ proc_exit(0);
+ }
+
+ MySubscriptionValid = true;
+ MemoryContextSwitchTo(oldctx);
+
+ if (!MySubscription->enabled)
+ {
+ ereport(LOG,
+ (errmsg("logical replication apply worker for subscription \"%s\" will not "
+ "start because the subscription was disabled during startup",
+ MySubscription->name)));
+
+ proc_exit(0);
+ }
+
+ /* Setup synchronous commit according to the user's wishes */
+ SetConfigOption("synchronous_commit", MySubscription->synccommit,
+ PGC_BACKEND, PGC_S_OVERRIDE);
+
+ /* Keep us informed about subscription changes. */
+ CacheRegisterSyscacheCallback(SUBSCRIPTIONOID,
+ subscription_change_cb,
+ (Datum) 0);
+
+ if (am_tablesync_worker())
+ ereport(LOG,
+ (errmsg("logical replication table synchronization worker for subscription \"%s\", table \"%s\" has started",
+ MySubscription->name, get_rel_name(MyLogicalRepWorker->relid))));
+ else
+ ereport(LOG,
+ (errmsg("logical replication apply worker for subscription \"%s\" has started",
+ MySubscription->name)));
+
+ CommitTransactionCommand();
+
+ /* Connect to the origin and start the replication. */
+ elog(DEBUG1, "connecting to publisher using connection string \"%s\"",
+ MySubscription->conninfo);
+
+ if (am_tablesync_worker())
+ {
+ char *syncslotname;
+
+ /* This is table synchronization worker, call initial sync. */
+ syncslotname = LogicalRepSyncTableStart(&origin_startpos);
+
+ /* The slot name needs to be allocated in permanent memory context. */
+ oldctx = MemoryContextSwitchTo(ApplyContext);
+ myslotname = pstrdup(syncslotname);
+ MemoryContextSwitchTo(oldctx);
+
+ pfree(syncslotname);
+ }
+ else
+ {
+ /* This is main apply worker */
+ RepOriginId originid;
+ TimeLineID startpointTLI;
+ char *err;
+
+ myslotname = MySubscription->slotname;
+
+ /*
+ * This shouldn't happen if the subscription is enabled, but guard
+ * against DDL bugs or manual catalog changes. (libpqwalreceiver will
+ * crash if slot is NULL.)
+ */
+ if (!myslotname)
+ ereport(ERROR,
+ (errmsg("subscription has no replication slot set")));
+
+ /* Setup replication origin tracking. */
+ StartTransactionCommand();
+ snprintf(originname, sizeof(originname), "pg_%u", MySubscription->oid);
+ originid = replorigin_by_name(originname, true);
+ if (!OidIsValid(originid))
+ originid = replorigin_create(originname);
+ replorigin_session_setup(originid);
+ replorigin_session_origin = originid;
+ origin_startpos = replorigin_session_get_progress(false);
+ CommitTransactionCommand();
+
+ LogRepWorkerWalRcvConn = walrcv_connect(MySubscription->conninfo, true,
+ MySubscription->name, &err);
+ if (LogRepWorkerWalRcvConn == NULL)
+ ereport(ERROR,
+ (errmsg("could not connect to the publisher: %s", err)));
+
+ /*
+ * We don't really use the output identify_system for anything but it
+ * does some initializations on the upstream so let's still call it.
+ */
+ (void) walrcv_identify_system(LogRepWorkerWalRcvConn, &startpointTLI);
+ }
+
+ /*
+ * Setup callback for syscache so that we know when something changes in
+ * the subscription relation state.
+ */
+ CacheRegisterSyscacheCallback(SUBSCRIPTIONRELMAP,
+ invalidate_syncing_table_states,
+ (Datum) 0);
+
+ /* Build logical replication streaming options. */
+ options.logical = true;
+ options.startpoint = origin_startpos;
+ options.slotname = myslotname;
+ options.proto.logical.proto_version = LOGICALREP_PROTO_VERSION_NUM;
+ options.proto.logical.publication_names = MySubscription->publications;
+
+ /* Start normal logical streaming replication. */
+ walrcv_startstreaming(LogRepWorkerWalRcvConn, &options);
+
+ /* Run the main loop. */
+ LogicalRepApplyLoop(origin_startpos);
+
+ proc_exit(0);
+}
+
+/*
+ * Is current process a logical replication worker?
+ */
+bool
+IsLogicalWorker(void)
+{
+ return MyLogicalRepWorker != NULL;
+}
diff --git a/src/backend/replication/pgoutput/Makefile b/src/backend/replication/pgoutput/Makefile
new file mode 100644
index 0000000..3b41fbc
--- /dev/null
+++ b/src/backend/replication/pgoutput/Makefile
@@ -0,0 +1,32 @@
+#-------------------------------------------------------------------------
+#
+# Makefile--
+# Makefile for src/backend/replication/pgoutput
+#
+# IDENTIFICATION
+# src/backend/replication/pgoutput
+#
+#-------------------------------------------------------------------------
+
+subdir = src/backend/replication/pgoutput
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+
+OBJS = \
+ $(WIN32RES) \
+ pgoutput.o
+PGFILEDESC = "pgoutput - standard logical replication output plugin"
+NAME = pgoutput
+
+all: all-shared-lib
+
+include $(top_srcdir)/src/Makefile.shlib
+
+install: all installdirs install-lib
+
+installdirs: installdirs-lib
+
+uninstall: uninstall-lib
+
+clean distclean maintainer-clean: clean-lib
+ rm -f $(OBJS)
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
new file mode 100644
index 0000000..ddc0171
--- /dev/null
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -0,0 +1,880 @@
+/*-------------------------------------------------------------------------
+ *
+ * pgoutput.c
+ * Logical Replication output plugin
+ *
+ * Copyright (c) 2012-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/pgoutput/pgoutput.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/tupconvert.h"
+#include "catalog/partition.h"
+#include "catalog/pg_publication.h"
+#include "fmgr.h"
+#include "replication/logical.h"
+#include "replication/logicalproto.h"
+#include "replication/origin.h"
+#include "replication/pgoutput.h"
+#include "utils/int8.h"
+#include "utils/inval.h"
+#include "utils/lsyscache.h"
+#include "utils/memutils.h"
+#include "utils/syscache.h"
+#include "utils/varlena.h"
+
+PG_MODULE_MAGIC;
+
+extern void _PG_output_plugin_init(OutputPluginCallbacks *cb);
+
+static void pgoutput_startup(LogicalDecodingContext *ctx,
+ OutputPluginOptions *opt, bool is_init);
+static void pgoutput_shutdown(LogicalDecodingContext *ctx);
+static void pgoutput_begin_txn(LogicalDecodingContext *ctx,
+ ReorderBufferTXN *txn);
+static void pgoutput_commit_txn(LogicalDecodingContext *ctx,
+ ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
+static void pgoutput_change(LogicalDecodingContext *ctx,
+ ReorderBufferTXN *txn, Relation rel,
+ ReorderBufferChange *change);
+static void pgoutput_truncate(LogicalDecodingContext *ctx,
+ ReorderBufferTXN *txn, int nrelations, Relation relations[],
+ ReorderBufferChange *change);
+static bool pgoutput_origin_filter(LogicalDecodingContext *ctx,
+ RepOriginId origin_id);
+
+static bool publications_valid;
+
+static List *LoadPublications(List *pubnames);
+static void publication_invalidation_cb(Datum arg, int cacheid,
+ uint32 hashvalue);
+static void send_relation_and_attrs(Relation relation, LogicalDecodingContext *ctx);
+
+/*
+ * Entry in the map used to remember which relation schemas we sent.
+ *
+ * The schema_sent flag determines if the current schema record for the
+ * relation (and for its ancestor if publish_as_relid is set) was already sent
+ * to the subscriber (in which case we don't need to send it again).
+ *
+ * For partitions, 'pubactions' considers not only the table's own
+ * publications, but also those of all of its ancestors.
+ */
+typedef struct RelationSyncEntry
+{
+ Oid relid; /* relation oid */
+
+ bool schema_sent;
+
+ bool replicate_valid;
+ PublicationActions pubactions;
+
+ /*
+ * OID of the relation to publish changes as. For a partition, this may
+ * be set to one of its ancestors whose schema will be used when
+ * replicating changes, if publish_via_partition_root is set for the
+ * publication.
+ */
+ Oid publish_as_relid;
+
+ /*
+ * Map used when replicating using an ancestor's schema to convert tuples
+ * from partition's type to the ancestor's; NULL if publish_as_relid is
+ * same as 'relid' or if unnecessary due to partition and the ancestor
+ * having identical TupleDesc.
+ */
+ TupleConversionMap *map;
+} RelationSyncEntry;
+
+/* Map used to remember which relation schemas we sent. */
+static HTAB *RelationSyncCache = NULL;
+
+static void init_rel_sync_cache(MemoryContext decoding_context);
+static RelationSyncEntry *get_rel_sync_entry(PGOutputData *data, Oid relid);
+static void rel_sync_cache_relation_cb(Datum arg, Oid relid);
+static void rel_sync_cache_publication_cb(Datum arg, int cacheid,
+ uint32 hashvalue);
+
+/*
+ * Specify output plugin callbacks
+ */
+void
+_PG_output_plugin_init(OutputPluginCallbacks *cb)
+{
+ AssertVariableIsOfType(&_PG_output_plugin_init, LogicalOutputPluginInit);
+
+ cb->startup_cb = pgoutput_startup;
+ cb->begin_cb = pgoutput_begin_txn;
+ cb->change_cb = pgoutput_change;
+ cb->truncate_cb = pgoutput_truncate;
+ cb->commit_cb = pgoutput_commit_txn;
+ cb->filter_by_origin_cb = pgoutput_origin_filter;
+ cb->shutdown_cb = pgoutput_shutdown;
+}
+
+static void
+parse_output_parameters(List *options, uint32 *protocol_version,
+ List **publication_names)
+{
+ ListCell *lc;
+ bool protocol_version_given = false;
+ bool publication_names_given = false;
+
+ foreach(lc, options)
+ {
+ DefElem *defel = (DefElem *) lfirst(lc);
+
+ Assert(defel->arg == NULL || IsA(defel->arg, String));
+
+ /* Check each param, whether or not we recognize it */
+ if (strcmp(defel->defname, "proto_version") == 0)
+ {
+ int64 parsed;
+
+ if (protocol_version_given)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting or redundant options")));
+ protocol_version_given = true;
+
+ if (!scanint8(strVal(defel->arg), true, &parsed))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("invalid proto_version")));
+
+ if (parsed > PG_UINT32_MAX || parsed < 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("proto_version \"%s\" out of range",
+ strVal(defel->arg))));
+
+ *protocol_version = (uint32) parsed;
+ }
+ else if (strcmp(defel->defname, "publication_names") == 0)
+ {
+ if (publication_names_given)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting or redundant options")));
+ publication_names_given = true;
+
+ if (!SplitIdentifierString(strVal(defel->arg), ',',
+ publication_names))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_NAME),
+ errmsg("invalid publication_names syntax")));
+ }
+ else
+ elog(ERROR, "unrecognized pgoutput option: %s", defel->defname);
+ }
+}
+
+/*
+ * Initialize this plugin
+ */
+static void
+pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
+ bool is_init)
+{
+ PGOutputData *data = palloc0(sizeof(PGOutputData));
+
+ /* Create our memory context for private allocations. */
+ data->context = AllocSetContextCreate(ctx->context,
+ "logical replication output context",
+ ALLOCSET_DEFAULT_SIZES);
+
+ ctx->output_plugin_private = data;
+
+ /* This plugin uses binary protocol. */
+ opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT;
+
+ /*
+ * This is replication start and not slot initialization.
+ *
+ * Parse and validate options passed by the client.
+ */
+ if (!is_init)
+ {
+ /* Parse the params and ERROR if we see any we don't recognize */
+ parse_output_parameters(ctx->output_plugin_options,
+ &data->protocol_version,
+ &data->publication_names);
+
+ /* Check if we support requested protocol */
+ if (data->protocol_version > LOGICALREP_PROTO_VERSION_NUM)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("client sent proto_version=%d but we only support protocol %d or lower",
+ data->protocol_version, LOGICALREP_PROTO_VERSION_NUM)));
+
+ if (data->protocol_version < LOGICALREP_PROTO_MIN_VERSION_NUM)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("client sent proto_version=%d but we only support protocol %d or higher",
+ data->protocol_version, LOGICALREP_PROTO_MIN_VERSION_NUM)));
+
+ if (list_length(data->publication_names) < 1)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("publication_names parameter missing")));
+
+ /* Init publication state. */
+ data->publications = NIL;
+ publications_valid = false;
+ CacheRegisterSyscacheCallback(PUBLICATIONOID,
+ publication_invalidation_cb,
+ (Datum) 0);
+
+ /* Initialize relation schema cache. */
+ init_rel_sync_cache(CacheMemoryContext);
+ }
+}
+
+/*
+ * BEGIN callback
+ */
+static void
+pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
+{
+ bool send_replication_origin = txn->origin_id != InvalidRepOriginId;
+
+ OutputPluginPrepareWrite(ctx, !send_replication_origin);
+ logicalrep_write_begin(ctx->out, txn);
+
+ if (send_replication_origin)
+ {
+ char *origin;
+
+ /* Message boundary */
+ OutputPluginWrite(ctx, false);
+ OutputPluginPrepareWrite(ctx, true);
+
+ /*----------
+ * XXX: which behaviour do we want here?
+ *
+ * Alternatives:
+ * - don't send origin message if origin name not found
+ * (that's what we do now)
+ * - throw error - that will break replication, not good
+ * - send some special "unknown" origin
+ *----------
+ */
+ if (replorigin_by_oid(txn->origin_id, true, &origin))
+ logicalrep_write_origin(ctx->out, origin, txn->origin_lsn);
+ }
+
+ OutputPluginWrite(ctx, true);
+}
+
+/*
+ * COMMIT callback
+ */
+static void
+pgoutput_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
+ XLogRecPtr commit_lsn)
+{
+ OutputPluginUpdateProgress(ctx);
+
+ OutputPluginPrepareWrite(ctx, true);
+ logicalrep_write_commit(ctx->out, txn, commit_lsn);
+ OutputPluginWrite(ctx, true);
+}
+
+/*
+ * Write the current schema of the relation and its ancestor (if any) if not
+ * done yet.
+ */
+static void
+maybe_send_schema(LogicalDecodingContext *ctx,
+ Relation relation, RelationSyncEntry *relentry)
+{
+ /* Nothing to do if we already sent the schema. */
+ if (relentry->schema_sent)
+ return;
+
+ /*
+ * Nope, so send the schema. If the changes will be published using an
+ * ancestor's schema, not the relation's own, send that ancestor's schema
+ * before sending relation's own (XXX - maybe sending only the former
+ * suffices?). This is also a good place to set the map that will be used
+ * to convert the relation's tuples into the ancestor's format, if needed.
+ */
+ if (relentry->publish_as_relid != RelationGetRelid(relation))
+ {
+ Relation ancestor = RelationIdGetRelation(relentry->publish_as_relid);
+ TupleDesc indesc = RelationGetDescr(relation);
+ TupleDesc outdesc = RelationGetDescr(ancestor);
+ MemoryContext oldctx;
+
+ /* Map must live as long as the session does. */
+ oldctx = MemoryContextSwitchTo(CacheMemoryContext);
+
+ /*
+ * Make copies of the TupleDescs that will live as long as the map
+ * does before putting into the map.
+ */
+ indesc = CreateTupleDescCopy(indesc);
+ outdesc = CreateTupleDescCopy(outdesc);
+ relentry->map = convert_tuples_by_name(indesc, outdesc);
+ if (relentry->map == NULL)
+ {
+ /* Map not necessary, so free the TupleDescs too. */
+ FreeTupleDesc(indesc);
+ FreeTupleDesc(outdesc);
+ }
+
+ MemoryContextSwitchTo(oldctx);
+ send_relation_and_attrs(ancestor, ctx);
+ RelationClose(ancestor);
+ }
+
+ send_relation_and_attrs(relation, ctx);
+ relentry->schema_sent = true;
+}
+
+/*
+ * Sends a relation
+ */
+static void
+send_relation_and_attrs(Relation relation, LogicalDecodingContext *ctx)
+{
+ TupleDesc desc = RelationGetDescr(relation);
+ int i;
+
+ /*
+ * Write out type info if needed. We do that only for user-created types.
+ * We use FirstGenbkiObjectId as the cutoff, so that we only consider
+ * objects with hand-assigned OIDs to be "built in", not for instance any
+ * function or type defined in the information_schema. This is important
+ * because only hand-assigned OIDs can be expected to remain stable across
+ * major versions.
+ */
+ for (i = 0; i < desc->natts; i++)
+ {
+ Form_pg_attribute att = TupleDescAttr(desc, i);
+
+ if (att->attisdropped || att->attgenerated)
+ continue;
+
+ if (att->atttypid < FirstGenbkiObjectId)
+ continue;
+
+ OutputPluginPrepareWrite(ctx, false);
+ logicalrep_write_typ(ctx->out, att->atttypid);
+ OutputPluginWrite(ctx, false);
+ }
+
+ OutputPluginPrepareWrite(ctx, false);
+ logicalrep_write_rel(ctx->out, relation);
+ OutputPluginWrite(ctx, false);
+}
+
+/*
+ * Sends the decoded DML over wire.
+ */
+static void
+pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
+ Relation relation, ReorderBufferChange *change)
+{
+ PGOutputData *data = (PGOutputData *) ctx->output_plugin_private;
+ MemoryContext old;
+ RelationSyncEntry *relentry;
+ Relation ancestor = NULL;
+
+ if (!is_publishable_relation(relation))
+ return;
+
+ relentry = get_rel_sync_entry(data, RelationGetRelid(relation));
+
+ /* First check the table filter */
+ switch (change->action)
+ {
+ case REORDER_BUFFER_CHANGE_INSERT:
+ if (!relentry->pubactions.pubinsert)
+ return;
+ break;
+ case REORDER_BUFFER_CHANGE_UPDATE:
+ if (!relentry->pubactions.pubupdate)
+ return;
+ break;
+ case REORDER_BUFFER_CHANGE_DELETE:
+ if (!relentry->pubactions.pubdelete)
+ return;
+ break;
+ default:
+ Assert(false);
+ }
+
+ /* Avoid leaking memory by using and resetting our own context */
+ old = MemoryContextSwitchTo(data->context);
+
+ maybe_send_schema(ctx, relation, relentry);
+
+ /* Send the data */
+ switch (change->action)
+ {
+ case REORDER_BUFFER_CHANGE_INSERT:
+ {
+ HeapTuple tuple = &change->data.tp.newtuple->tuple;
+
+ /* Switch relation if publishing via root. */
+ if (relentry->publish_as_relid != RelationGetRelid(relation))
+ {
+ Assert(relation->rd_rel->relispartition);
+ ancestor = RelationIdGetRelation(relentry->publish_as_relid);
+ relation = ancestor;
+ /* Convert tuple if needed. */
+ if (relentry->map)
+ tuple = execute_attr_map_tuple(tuple, relentry->map);
+ }
+
+ OutputPluginPrepareWrite(ctx, true);
+ logicalrep_write_insert(ctx->out, relation, tuple);
+ OutputPluginWrite(ctx, true);
+ break;
+ }
+ case REORDER_BUFFER_CHANGE_UPDATE:
+ {
+ HeapTuple oldtuple = change->data.tp.oldtuple ?
+ &change->data.tp.oldtuple->tuple : NULL;
+ HeapTuple newtuple = &change->data.tp.newtuple->tuple;
+
+ /* Switch relation if publishing via root. */
+ if (relentry->publish_as_relid != RelationGetRelid(relation))
+ {
+ Assert(relation->rd_rel->relispartition);
+ ancestor = RelationIdGetRelation(relentry->publish_as_relid);
+ relation = ancestor;
+ /* Convert tuples if needed. */
+ if (relentry->map)
+ {
+ if (oldtuple)
+ oldtuple = execute_attr_map_tuple(oldtuple,
+ relentry->map);
+ newtuple = execute_attr_map_tuple(newtuple,
+ relentry->map);
+ }
+ }
+
+ OutputPluginPrepareWrite(ctx, true);
+ logicalrep_write_update(ctx->out, relation, oldtuple, newtuple);
+ OutputPluginWrite(ctx, true);
+ break;
+ }
+ case REORDER_BUFFER_CHANGE_DELETE:
+ if (change->data.tp.oldtuple)
+ {
+ HeapTuple oldtuple = &change->data.tp.oldtuple->tuple;
+
+ /* Switch relation if publishing via root. */
+ if (relentry->publish_as_relid != RelationGetRelid(relation))
+ {
+ Assert(relation->rd_rel->relispartition);
+ ancestor = RelationIdGetRelation(relentry->publish_as_relid);
+ relation = ancestor;
+ /* Convert tuple if needed. */
+ if (relentry->map)
+ oldtuple = execute_attr_map_tuple(oldtuple, relentry->map);
+ }
+
+ OutputPluginPrepareWrite(ctx, true);
+ logicalrep_write_delete(ctx->out, relation, oldtuple);
+ OutputPluginWrite(ctx, true);
+ }
+ else
+ elog(DEBUG1, "didn't send DELETE change because of missing oldtuple");
+ break;
+ default:
+ Assert(false);
+ }
+
+ if (RelationIsValid(ancestor))
+ {
+ RelationClose(ancestor);
+ ancestor = NULL;
+ }
+
+ /* Cleanup */
+ MemoryContextSwitchTo(old);
+ MemoryContextReset(data->context);
+}
+
+static void
+pgoutput_truncate(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
+ int nrelations, Relation relations[], ReorderBufferChange *change)
+{
+ PGOutputData *data = (PGOutputData *) ctx->output_plugin_private;
+ MemoryContext old;
+ RelationSyncEntry *relentry;
+ int i;
+ int nrelids;
+ Oid *relids;
+
+ old = MemoryContextSwitchTo(data->context);
+
+ relids = palloc0(nrelations * sizeof(Oid));
+ nrelids = 0;
+
+ for (i = 0; i < nrelations; i++)
+ {
+ Relation relation = relations[i];
+ Oid relid = RelationGetRelid(relation);
+
+ if (!is_publishable_relation(relation))
+ continue;
+
+ relentry = get_rel_sync_entry(data, relid);
+
+ if (!relentry->pubactions.pubtruncate)
+ continue;
+
+ /*
+ * Don't send partitions if the publication wants to send only the
+ * root tables through it.
+ */
+ if (relation->rd_rel->relispartition &&
+ relentry->publish_as_relid != relid)
+ continue;
+
+ relids[nrelids++] = relid;
+ maybe_send_schema(ctx, relation, relentry);
+ }
+
+ if (nrelids > 0)
+ {
+ OutputPluginPrepareWrite(ctx, true);
+ logicalrep_write_truncate(ctx->out,
+ nrelids,
+ relids,
+ change->data.truncate.cascade,
+ change->data.truncate.restart_seqs);
+ OutputPluginWrite(ctx, true);
+ }
+
+ MemoryContextSwitchTo(old);
+ MemoryContextReset(data->context);
+}
+
+/*
+ * Currently we always forward.
+ */
+static bool
+pgoutput_origin_filter(LogicalDecodingContext *ctx,
+ RepOriginId origin_id)
+{
+ return false;
+}
+
+/*
+ * Shutdown the output plugin.
+ *
+ * Note, we don't need to clean the data->context as it's child context
+ * of the ctx->context so it will be cleaned up by logical decoding machinery.
+ */
+static void
+pgoutput_shutdown(LogicalDecodingContext *ctx)
+{
+ if (RelationSyncCache)
+ {
+ hash_destroy(RelationSyncCache);
+ RelationSyncCache = NULL;
+ }
+}
+
+/*
+ * Load publications from the list of publication names.
+ */
+static List *
+LoadPublications(List *pubnames)
+{
+ List *result = NIL;
+ ListCell *lc;
+
+ foreach(lc, pubnames)
+ {
+ char *pubname = (char *) lfirst(lc);
+ Publication *pub = GetPublicationByName(pubname, false);
+
+ result = lappend(result, pub);
+ }
+
+ return result;
+}
+
+/*
+ * Publication cache invalidation callback.
+ */
+static void
+publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue)
+{
+ publications_valid = false;
+
+ /*
+ * Also invalidate per-relation cache so that next time the filtering info
+ * is checked it will be updated with the new publication settings.
+ */
+ rel_sync_cache_publication_cb(arg, cacheid, hashvalue);
+}
+
+/*
+ * Initialize the relation schema sync cache for a decoding session.
+ *
+ * The hash table is destroyed at the end of a decoding session. While
+ * relcache invalidations still exist and will still be invoked, they
+ * will just see the null hash table global and take no action.
+ */
+static void
+init_rel_sync_cache(MemoryContext cachectx)
+{
+ HASHCTL ctl;
+ MemoryContext old_ctxt;
+
+ if (RelationSyncCache != NULL)
+ return;
+
+ /* Make a new hash table for the cache */
+ MemSet(&ctl, 0, sizeof(ctl));
+ ctl.keysize = sizeof(Oid);
+ ctl.entrysize = sizeof(RelationSyncEntry);
+ ctl.hcxt = cachectx;
+
+ old_ctxt = MemoryContextSwitchTo(cachectx);
+ RelationSyncCache = hash_create("logical replication output relation cache",
+ 128, &ctl,
+ HASH_ELEM | HASH_CONTEXT | HASH_BLOBS);
+ (void) MemoryContextSwitchTo(old_ctxt);
+
+ Assert(RelationSyncCache != NULL);
+
+ CacheRegisterRelcacheCallback(rel_sync_cache_relation_cb, (Datum) 0);
+ CacheRegisterSyscacheCallback(PUBLICATIONRELMAP,
+ rel_sync_cache_publication_cb,
+ (Datum) 0);
+}
+
+/*
+ * Find or create entry in the relation schema cache.
+ *
+ * This looks up publications that the given relation is directly or
+ * indirectly part of (the latter if it's really the relation's ancestor that
+ * is part of a publication) and fills up the found entry with the information
+ * about which operations to publish and whether to use an ancestor's schema
+ * when publishing.
+ */
+static RelationSyncEntry *
+get_rel_sync_entry(PGOutputData *data, Oid relid)
+{
+ RelationSyncEntry *entry;
+ bool am_partition = get_rel_relispartition(relid);
+ char relkind = get_rel_relkind(relid);
+ bool found;
+ MemoryContext oldctx;
+
+ Assert(RelationSyncCache != NULL);
+
+ /* Find cached function info, creating if not found */
+ oldctx = MemoryContextSwitchTo(CacheMemoryContext);
+ entry = (RelationSyncEntry *) hash_search(RelationSyncCache,
+ (void *) &relid,
+ HASH_ENTER, &found);
+ MemoryContextSwitchTo(oldctx);
+ Assert(entry != NULL);
+
+ /* Not found means schema wasn't sent */
+ if (!found)
+ {
+ /*
+ * immediately make a new entry valid enough to satisfy callbacks
+ */
+ entry->schema_sent = false;
+ entry->replicate_valid = false;
+ entry->pubactions.pubinsert = entry->pubactions.pubupdate =
+ entry->pubactions.pubdelete = entry->pubactions.pubtruncate = false;
+ entry->publish_as_relid = InvalidOid;
+ entry->map = NULL; /* will be set by maybe_send_schema() if needed */
+ }
+
+ if (!entry->replicate_valid)
+ {
+ List *pubids = GetRelationPublications(relid);
+ ListCell *lc;
+ Oid publish_as_relid = relid;
+
+ /* Reload publications if needed before use. */
+ if (!publications_valid)
+ {
+ oldctx = MemoryContextSwitchTo(CacheMemoryContext);
+ if (data->publications)
+ list_free_deep(data->publications);
+
+ data->publications = LoadPublications(data->publication_names);
+ MemoryContextSwitchTo(oldctx);
+ publications_valid = true;
+ }
+
+ /*
+ * Build publication cache. We can't use one provided by relcache as
+ * relcache considers all publications given relation is in, but here
+ * we only need to consider ones that the subscriber requested.
+ */
+ entry->pubactions.pubinsert = entry->pubactions.pubupdate =
+ entry->pubactions.pubdelete = entry->pubactions.pubtruncate = false;
+
+ foreach(lc, data->publications)
+ {
+ Publication *pub = lfirst(lc);
+ bool publish = false;
+
+ if (pub->alltables)
+ {
+ publish = true;
+ if (pub->pubviaroot && am_partition)
+ publish_as_relid = llast_oid(get_partition_ancestors(relid));
+ }
+
+ if (!publish)
+ {
+ bool ancestor_published = false;
+
+ /*
+ * For a partition, check if any of the ancestors are
+ * published. If so, note down the topmost ancestor that is
+ * published via this publication, which will be used as the
+ * relation via which to publish the partition's changes.
+ */
+ if (am_partition)
+ {
+ List *ancestors = get_partition_ancestors(relid);
+ ListCell *lc2;
+
+ /*
+ * Find the "topmost" ancestor that is in this
+ * publication.
+ */
+ foreach(lc2, ancestors)
+ {
+ Oid ancestor = lfirst_oid(lc2);
+
+ if (list_member_oid(GetRelationPublications(ancestor),
+ pub->oid))
+ {
+ ancestor_published = true;
+ if (pub->pubviaroot)
+ publish_as_relid = ancestor;
+ }
+ }
+ }
+
+ if (list_member_oid(pubids, pub->oid) || ancestor_published)
+ publish = true;
+ }
+
+ /*
+ * Don't publish changes for partitioned tables, because
+ * publishing those of its partitions suffices, unless partition
+ * changes won't be published due to pubviaroot being set.
+ */
+ if (publish &&
+ (relkind != RELKIND_PARTITIONED_TABLE || pub->pubviaroot))
+ {
+ entry->pubactions.pubinsert |= pub->pubactions.pubinsert;
+ entry->pubactions.pubupdate |= pub->pubactions.pubupdate;
+ entry->pubactions.pubdelete |= pub->pubactions.pubdelete;
+ entry->pubactions.pubtruncate |= pub->pubactions.pubtruncate;
+ }
+
+ if (entry->pubactions.pubinsert && entry->pubactions.pubupdate &&
+ entry->pubactions.pubdelete && entry->pubactions.pubtruncate)
+ break;
+ }
+
+ list_free(pubids);
+
+ entry->publish_as_relid = publish_as_relid;
+ entry->replicate_valid = true;
+ }
+
+ return entry;
+}
+
+/*
+ * Relcache invalidation callback
+ */
+static void
+rel_sync_cache_relation_cb(Datum arg, Oid relid)
+{
+ RelationSyncEntry *entry;
+
+ /*
+ * We can get here if the plugin was used in SQL interface as the
+ * RelSchemaSyncCache is destroyed when the decoding finishes, but there
+ * is no way to unregister the relcache invalidation callback.
+ */
+ if (RelationSyncCache == NULL)
+ return;
+
+ /*
+ * Nobody keeps pointers to entries in this hash table around outside
+ * logical decoding callback calls - but invalidation events can come in
+ * *during* a callback if we access the relcache in the callback. Because
+ * of that we must mark the cache entry as invalid but not remove it from
+ * the hash while it could still be referenced, then prune it at a later
+ * safe point.
+ *
+ * Getting invalidations for relations that aren't in the table is
+ * entirely normal, since there's no way to unregister for an invalidation
+ * event. So we don't care if it's found or not.
+ */
+ entry = (RelationSyncEntry *) hash_search(RelationSyncCache, &relid,
+ HASH_FIND, NULL);
+
+ /*
+ * Reset schema sent status as the relation definition may have changed.
+ * Also, free any objects that depended on the earlier definition.
+ */
+ if (entry != NULL)
+ {
+ entry->schema_sent = false;
+ if (entry->map)
+ {
+ /*
+ * Must free the TupleDescs contained in the map explicitly,
+ * because free_conversion_map() doesn't.
+ */
+ FreeTupleDesc(entry->map->indesc);
+ FreeTupleDesc(entry->map->outdesc);
+ free_conversion_map(entry->map);
+ }
+ entry->map = NULL;
+ }
+}
+
+/*
+ * Publication relation map syscache invalidation callback
+ */
+static void
+rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
+{
+ HASH_SEQ_STATUS status;
+ RelationSyncEntry *entry;
+
+ /*
+ * We can get here if the plugin was used in SQL interface as the
+ * RelSchemaSyncCache is destroyed when the decoding finishes, but there
+ * is no way to unregister the relcache invalidation callback.
+ */
+ if (RelationSyncCache == NULL)
+ return;
+
+ /*
+ * There is no way to find which entry in our cache the hash belongs to so
+ * mark the whole cache as invalid.
+ */
+ hash_seq_init(&status, RelationSyncCache);
+ while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL)
+ entry->replicate_valid = false;
+}
diff --git a/src/backend/replication/repl_gram.c b/src/backend/replication/repl_gram.c
new file mode 100644
index 0000000..93f2f8a
--- /dev/null
+++ b/src/backend/replication/repl_gram.c
@@ -0,0 +1,1998 @@
+/* A Bison parser, made by GNU Bison 3.3.2. */
+
+/* Bison implementation for Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2019 Free Software Foundation,
+ Inc.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+/* C LALR(1) parser skeleton written by Richard Stallman, by
+ simplifying the original so-called "semantic" parser. */
+
+/* All symbols defined below should begin with yy or YY, to avoid
+ infringing on user name space. This should be done even for local
+ variables, as they might otherwise be expanded by user macros.
+ There are some unavoidable exceptions within include files to
+ define necessary library symbols; they are noted "INFRINGES ON
+ USER NAME SPACE" below. */
+
+/* Undocumented macros, especially those whose name start with YY_,
+ are private implementation details. Do not rely on them. */
+
+/* Identify Bison output. */
+#define YYBISON 1
+
+/* Bison version. */
+#define YYBISON_VERSION "3.3.2"
+
+/* Skeleton name. */
+#define YYSKELETON_NAME "yacc.c"
+
+/* Pure parsers. */
+#define YYPURE 0
+
+/* Push parsers. */
+#define YYPUSH 0
+
+/* Pull parsers. */
+#define YYPULL 1
+
+
+/* Substitute the variable and function names. */
+#define yyparse replication_yyparse
+#define yylex replication_yylex
+#define yyerror replication_yyerror
+#define yydebug replication_yydebug
+#define yynerrs replication_yynerrs
+
+#define yylval replication_yylval
+#define yychar replication_yychar
+
+/* First part of user prologue. */
+#line 1 "repl_gram.y" /* yacc.c:337 */
+
+/*-------------------------------------------------------------------------
+ *
+ * repl_gram.y - Parser for the replication commands
+ *
+ * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/replication/repl_gram.y
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/xlogdefs.h"
+#include "nodes/makefuncs.h"
+#include "nodes/replnodes.h"
+#include "replication/walsender.h"
+#include "replication/walsender_private.h"
+
+
+/* Result of the parsing is returned here */
+Node *replication_parse_result;
+
+static SQLCmd *make_sqlcmd(void);
+
+
+/*
+ * Bison doesn't allocate anything that needs to live across parser calls,
+ * so we can easily have it use palloc instead of malloc. This prevents
+ * memory leaks if we error out during parsing. Note this only works with
+ * bison >= 2.0. However, in bison 1.875 the default is to use alloca()
+ * if possible, so there's not really much problem anyhow, at least if
+ * you're building with gcc.
+ */
+#define YYMALLOC palloc
+#define YYFREE pfree
+
+
+#line 121 "repl_gram.c" /* yacc.c:337 */
+# ifndef YY_NULLPTR
+# if defined __cplusplus
+# if 201103L <= __cplusplus
+# define YY_NULLPTR nullptr
+# else
+# define YY_NULLPTR 0
+# endif
+# else
+# define YY_NULLPTR ((void*)0)
+# endif
+# endif
+
+/* Enabling verbose error messages. */
+#ifdef YYERROR_VERBOSE
+# undef YYERROR_VERBOSE
+# define YYERROR_VERBOSE 1
+#else
+# define YYERROR_VERBOSE 0
+#endif
+
+
+/* Debug traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 0
+#endif
+#if YYDEBUG
+extern int replication_yydebug;
+#endif
+
+/* Token type. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ enum yytokentype
+ {
+ SCONST = 258,
+ IDENT = 259,
+ UCONST = 260,
+ RECPTR = 261,
+ T_WORD = 262,
+ K_BASE_BACKUP = 263,
+ K_IDENTIFY_SYSTEM = 264,
+ K_SHOW = 265,
+ K_START_REPLICATION = 266,
+ K_CREATE_REPLICATION_SLOT = 267,
+ K_DROP_REPLICATION_SLOT = 268,
+ K_TIMELINE_HISTORY = 269,
+ K_LABEL = 270,
+ K_PROGRESS = 271,
+ K_FAST = 272,
+ K_WAIT = 273,
+ K_NOWAIT = 274,
+ K_MAX_RATE = 275,
+ K_WAL = 276,
+ K_TABLESPACE_MAP = 277,
+ K_NOVERIFY_CHECKSUMS = 278,
+ K_TIMELINE = 279,
+ K_PHYSICAL = 280,
+ K_LOGICAL = 281,
+ K_SLOT = 282,
+ K_RESERVE_WAL = 283,
+ K_TEMPORARY = 284,
+ K_EXPORT_SNAPSHOT = 285,
+ K_NOEXPORT_SNAPSHOT = 286,
+ K_USE_SNAPSHOT = 287,
+ K_MANIFEST = 288,
+ K_MANIFEST_CHECKSUMS = 289
+ };
+#endif
+
+/* Value type. */
+#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
+
+union YYSTYPE
+{
+#line 47 "repl_gram.y" /* yacc.c:352 */
+
+ char *str;
+ bool boolval;
+ uint32 uintval;
+
+ XLogRecPtr recptr;
+ Node *node;
+ List *list;
+ DefElem *defelt;
+
+#line 207 "repl_gram.c" /* yacc.c:352 */
+};
+
+typedef union YYSTYPE YYSTYPE;
+# define YYSTYPE_IS_TRIVIAL 1
+# define YYSTYPE_IS_DECLARED 1
+#endif
+
+
+extern YYSTYPE replication_yylval;
+
+int replication_yyparse (void);
+
+
+
+
+
+#ifdef short
+# undef short
+#endif
+
+#ifdef YYTYPE_UINT8
+typedef YYTYPE_UINT8 yytype_uint8;
+#else
+typedef unsigned char yytype_uint8;
+#endif
+
+#ifdef YYTYPE_INT8
+typedef YYTYPE_INT8 yytype_int8;
+#else
+typedef signed char yytype_int8;
+#endif
+
+#ifdef YYTYPE_UINT16
+typedef YYTYPE_UINT16 yytype_uint16;
+#else
+typedef unsigned short yytype_uint16;
+#endif
+
+#ifdef YYTYPE_INT16
+typedef YYTYPE_INT16 yytype_int16;
+#else
+typedef short yytype_int16;
+#endif
+
+#ifndef YYSIZE_T
+# ifdef __SIZE_TYPE__
+# define YYSIZE_T __SIZE_TYPE__
+# elif defined size_t
+# define YYSIZE_T size_t
+# elif ! defined YYSIZE_T
+# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
+# define YYSIZE_T size_t
+# else
+# define YYSIZE_T unsigned
+# endif
+#endif
+
+#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
+
+#ifndef YY_
+# if defined YYENABLE_NLS && YYENABLE_NLS
+# if ENABLE_NLS
+# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
+# define YY_(Msgid) dgettext ("bison-runtime", Msgid)
+# endif
+# endif
+# ifndef YY_
+# define YY_(Msgid) Msgid
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE
+# if (defined __GNUC__ \
+ && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \
+ || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C
+# define YY_ATTRIBUTE(Spec) __attribute__(Spec)
+# else
+# define YY_ATTRIBUTE(Spec) /* empty */
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE_PURE
+# define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__))
+#endif
+
+#ifndef YY_ATTRIBUTE_UNUSED
+# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__))
+#endif
+
+/* Suppress unused-variable warnings by "using" E. */
+#if ! defined lint || defined __GNUC__
+# define YYUSE(E) ((void) (E))
+#else
+# define YYUSE(E) /* empty */
+#endif
+
+#if defined __GNUC__ && ! defined __ICC && 407 <= __GNUC__ * 100 + __GNUC_MINOR__
+/* Suppress an incorrect diagnostic about yylval being uninitialized. */
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \
+ _Pragma ("GCC diagnostic push") \
+ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\
+ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END \
+ _Pragma ("GCC diagnostic pop")
+#else
+# define YY_INITIAL_VALUE(Value) Value
+#endif
+#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END
+#endif
+#ifndef YY_INITIAL_VALUE
+# define YY_INITIAL_VALUE(Value) /* Nothing. */
+#endif
+
+
+#if ! defined yyoverflow || YYERROR_VERBOSE
+
+/* The parser invokes alloca or malloc; define the necessary symbols. */
+
+# ifdef YYSTACK_USE_ALLOCA
+# if YYSTACK_USE_ALLOCA
+# ifdef __GNUC__
+# define YYSTACK_ALLOC __builtin_alloca
+# elif defined __BUILTIN_VA_ARG_INCR
+# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
+# elif defined _AIX
+# define YYSTACK_ALLOC __alloca
+# elif defined _MSC_VER
+# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
+# define alloca _alloca
+# else
+# define YYSTACK_ALLOC alloca
+# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+ /* Use EXIT_SUCCESS as a witness for stdlib.h. */
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
+# endif
+# endif
+# endif
+# endif
+# endif
+
+# ifdef YYSTACK_ALLOC
+ /* Pacify GCC's 'empty if-body' warning. */
+# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
+# ifndef YYSTACK_ALLOC_MAXIMUM
+ /* The OS might guarantee only one guard page at the bottom of the stack,
+ and a page size can be as small as 4096 bytes. So we cannot safely
+ invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
+ to allow for a few compiler-allocated temporary stack slots. */
+# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
+# endif
+# else
+# define YYSTACK_ALLOC YYMALLOC
+# define YYSTACK_FREE YYFREE
+# ifndef YYSTACK_ALLOC_MAXIMUM
+# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
+# endif
+# if (defined __cplusplus && ! defined EXIT_SUCCESS \
+ && ! ((defined YYMALLOC || defined malloc) \
+ && (defined YYFREE || defined free)))
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
+# endif
+# endif
+# ifndef YYMALLOC
+# define YYMALLOC malloc
+# if ! defined malloc && ! defined EXIT_SUCCESS
+void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# ifndef YYFREE
+# define YYFREE free
+# if ! defined free && ! defined EXIT_SUCCESS
+void free (void *); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# endif
+#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
+
+
+#if (! defined yyoverflow \
+ && (! defined __cplusplus \
+ || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
+
+/* A type that is properly aligned for any stack member. */
+union yyalloc
+{
+ yytype_int16 yyss_alloc;
+ YYSTYPE yyvs_alloc;
+};
+
+/* The size of the maximum gap between one aligned stack and the next. */
+# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
+
+/* The size of an array large to enough to hold all stacks, each with
+ N elements. */
+# define YYSTACK_BYTES(N) \
+ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ + YYSTACK_GAP_MAXIMUM)
+
+# define YYCOPY_NEEDED 1
+
+/* Relocate STACK from its old location to the new one. The
+ local variables YYSIZE and YYSTACKSIZE give the old and new number of
+ elements in the stack, and YYPTR gives the new location of the
+ stack. Advance YYPTR to a properly aligned location for the next
+ stack. */
+# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
+ do \
+ { \
+ YYSIZE_T yynewbytes; \
+ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
+ Stack = &yyptr->Stack_alloc; \
+ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
+ yyptr += yynewbytes / sizeof (*yyptr); \
+ } \
+ while (0)
+
+#endif
+
+#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
+/* Copy COUNT objects from SRC to DST. The source and destination do
+ not overlap. */
+# ifndef YYCOPY
+# if defined __GNUC__ && 1 < __GNUC__
+# define YYCOPY(Dst, Src, Count) \
+ __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src)))
+# else
+# define YYCOPY(Dst, Src, Count) \
+ do \
+ { \
+ YYSIZE_T yyi; \
+ for (yyi = 0; yyi < (Count); yyi++) \
+ (Dst)[yyi] = (Src)[yyi]; \
+ } \
+ while (0)
+# endif
+# endif
+#endif /* !YYCOPY_NEEDED */
+
+/* YYFINAL -- State number of the termination state. */
+#define YYFINAL 28
+/* YYLAST -- Last index in YYTABLE. */
+#define YYLAST 53
+
+/* YYNTOKENS -- Number of terminals. */
+#define YYNTOKENS 40
+/* YYNNTS -- Number of nonterminals. */
+#define YYNNTS 26
+/* YYNRULES -- Number of rules. */
+#define YYNRULES 59
+/* YYNSTATES -- Number of states. */
+#define YYNSTATES 80
+
+#define YYUNDEFTOK 2
+#define YYMAXUTOK 289
+
+/* YYTRANSLATE(TOKEN-NUM) -- Symbol number corresponding to TOKEN-NUM
+ as returned by yylex, with out-of-bounds checking. */
+#define YYTRANSLATE(YYX) \
+ ((unsigned) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
+
+/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM
+ as returned by yylex. */
+static const yytype_uint8 yytranslate[] =
+{
+ 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 37, 38, 2, 2, 39, 2, 36, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 35,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34
+};
+
+#if YYDEBUG
+ /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */
+static const yytype_uint16 yyrline[] =
+{
+ 0, 110, 110, 116, 117, 121, 122, 123, 124, 125,
+ 126, 127, 128, 129, 136, 146, 153, 154, 164, 173,
+ 176, 180, 185, 190, 195, 200, 205, 210, 215, 220,
+ 225, 234, 245, 259, 262, 266, 271, 276, 281, 290,
+ 298, 312, 327, 342, 359, 360, 364, 365, 369, 372,
+ 376, 384, 389, 390, 394, 398, 405, 412, 413, 417
+};
+#endif
+
+#if YYDEBUG || YYERROR_VERBOSE || 0
+/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
+ First, the terminals, then, starting at YYNTOKENS, nonterminals. */
+static const char *const yytname[] =
+{
+ "$end", "error", "$undefined", "SCONST", "IDENT", "UCONST", "RECPTR",
+ "T_WORD", "K_BASE_BACKUP", "K_IDENTIFY_SYSTEM", "K_SHOW",
+ "K_START_REPLICATION", "K_CREATE_REPLICATION_SLOT",
+ "K_DROP_REPLICATION_SLOT", "K_TIMELINE_HISTORY", "K_LABEL", "K_PROGRESS",
+ "K_FAST", "K_WAIT", "K_NOWAIT", "K_MAX_RATE", "K_WAL",
+ "K_TABLESPACE_MAP", "K_NOVERIFY_CHECKSUMS", "K_TIMELINE", "K_PHYSICAL",
+ "K_LOGICAL", "K_SLOT", "K_RESERVE_WAL", "K_TEMPORARY",
+ "K_EXPORT_SNAPSHOT", "K_NOEXPORT_SNAPSHOT", "K_USE_SNAPSHOT",
+ "K_MANIFEST", "K_MANIFEST_CHECKSUMS", "';'", "'.'", "'('", "')'", "','",
+ "$accept", "firstcmd", "opt_semicolon", "command", "identify_system",
+ "show", "var_name", "base_backup", "base_backup_opt_list",
+ "base_backup_opt", "create_replication_slot", "create_slot_opt_list",
+ "create_slot_opt", "drop_replication_slot", "start_replication",
+ "start_logical_replication", "timeline_history", "opt_physical",
+ "opt_temporary", "opt_slot", "opt_timeline", "plugin_options",
+ "plugin_opt_list", "plugin_opt_elem", "plugin_opt_arg", "sql_cmd", YY_NULLPTR
+};
+#endif
+
+# ifdef YYPRINT
+/* YYTOKNUM[NUM] -- (External) token number corresponding to the
+ (internal) symbol number NUM (which must be that of a token). */
+static const yytype_uint16 yytoknum[] =
+{
+ 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 59, 46, 40, 41, 44
+};
+# endif
+
+#define YYPACT_NINF -27
+
+#define yypact_value_is_default(Yystate) \
+ (!!((Yystate) == (-27)))
+
+#define YYTABLE_NINF -1
+
+#define yytable_value_is_error(Yytable_value) \
+ 0
+
+ /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
+ STATE-NUM. */
+static const yytype_int8 yypact[] =
+{
+ 12, -27, -27, -27, -1, -17, 13, 23, 25, 31,
+ -3, -27, -27, -27, -27, -27, -27, -27, -27, -27,
+ -15, -27, -2, 29, 10, 7, 19, -27, -27, -27,
+ -27, 35, -27, -27, -27, 34, -27, -27, -27, 37,
+ 38, -27, 39, 16, -27, 40, -27, -11, -27, -27,
+ -27, -27, -27, -27, 41, 20, -27, 44, 8, 45,
+ -27, -19, -27, 47, -27, -27, -27, -27, -27, -27,
+ -27, -19, 46, -10, -27, -27, -27, -27, 47, -27
+};
+
+ /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM.
+ Performed when YYTABLE does not specify something else to do. Zero
+ means the default is an error. */
+static const yytype_uint8 yydefact[] =
+{
+ 0, 59, 20, 14, 0, 49, 0, 0, 0, 0,
+ 4, 5, 12, 6, 9, 10, 7, 8, 11, 13,
+ 18, 16, 15, 0, 45, 47, 39, 43, 1, 3,
+ 2, 0, 22, 23, 25, 0, 24, 27, 28, 0,
+ 0, 19, 0, 48, 44, 0, 46, 0, 40, 21,
+ 26, 29, 30, 17, 0, 51, 34, 0, 53, 0,
+ 41, 31, 34, 0, 42, 50, 38, 35, 36, 37,
+ 33, 32, 58, 0, 54, 57, 56, 52, 0, 55
+};
+
+ /* YYPGOTO[NTERM-NUM]. */
+static const yytype_int8 yypgoto[] =
+{
+ -27, -27, -27, -27, -27, -27, -27, -27, -27, -27,
+ -27, -9, -27, -27, -27, -27, -27, -27, -27, -27,
+ -27, -27, -27, -26, -27, -27
+};
+
+ /* YYDEFGOTO[NTERM-NUM]. */
+static const yytype_int8 yydefgoto[] =
+{
+ -1, 9, 30, 10, 11, 12, 22, 13, 20, 41,
+ 14, 61, 70, 15, 16, 17, 18, 45, 47, 24,
+ 60, 64, 73, 74, 76, 19
+};
+
+ /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If
+ positive, shift that token. If negative, reduce the rule whose
+ number is the opposite. If YYTABLE_NINF, syntax error. */
+static const yytype_uint8 yytable[] =
+{
+ 31, 32, 33, 21, 34, 35, 36, 37, 38, 66,
+ 23, 67, 68, 69, 56, 57, 1, 25, 39, 40,
+ 2, 3, 4, 5, 6, 7, 8, 26, 77, 78,
+ 27, 28, 29, 43, 42, 44, 46, 48, 49, 50,
+ 51, 52, 54, 53, 59, 63, 55, 58, 62, 75,
+ 65, 72, 79, 71
+};
+
+static const yytype_uint8 yycheck[] =
+{
+ 15, 16, 17, 4, 19, 20, 21, 22, 23, 28,
+ 27, 30, 31, 32, 25, 26, 4, 4, 33, 34,
+ 8, 9, 10, 11, 12, 13, 14, 4, 38, 39,
+ 5, 0, 35, 4, 36, 25, 29, 18, 3, 5,
+ 3, 3, 26, 4, 24, 37, 6, 6, 4, 3,
+ 5, 4, 78, 62
+};
+
+ /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
+ symbol of state STATE-NUM. */
+static const yytype_uint8 yystos[] =
+{
+ 0, 4, 8, 9, 10, 11, 12, 13, 14, 41,
+ 43, 44, 45, 47, 50, 53, 54, 55, 56, 65,
+ 48, 4, 46, 27, 59, 4, 4, 5, 0, 35,
+ 42, 15, 16, 17, 19, 20, 21, 22, 23, 33,
+ 34, 49, 36, 4, 25, 57, 29, 58, 18, 3,
+ 5, 3, 3, 4, 26, 6, 25, 26, 6, 24,
+ 60, 51, 4, 37, 61, 5, 28, 30, 31, 32,
+ 52, 51, 4, 62, 63, 3, 64, 38, 39, 63
+};
+
+ /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
+static const yytype_uint8 yyr1[] =
+{
+ 0, 40, 41, 42, 42, 43, 43, 43, 43, 43,
+ 43, 43, 43, 43, 44, 45, 46, 46, 47, 48,
+ 48, 49, 49, 49, 49, 49, 49, 49, 49, 49,
+ 49, 50, 50, 51, 51, 52, 52, 52, 52, 53,
+ 53, 54, 55, 56, 57, 57, 58, 58, 59, 59,
+ 60, 60, 61, 61, 62, 62, 63, 64, 64, 65
+};
+
+ /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */
+static const yytype_uint8 yyr2[] =
+{
+ 0, 2, 2, 1, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 1, 3, 2, 2,
+ 0, 2, 1, 1, 1, 1, 2, 1, 1, 2,
+ 2, 5, 6, 2, 0, 1, 1, 1, 1, 2,
+ 3, 5, 6, 2, 1, 0, 1, 0, 2, 0,
+ 2, 0, 3, 0, 1, 3, 2, 1, 0, 1
+};
+
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY (-2)
+#define YYEOF 0
+
+#define YYACCEPT goto yyacceptlab
+#define YYABORT goto yyabortlab
+#define YYERROR goto yyerrorlab
+
+
+#define YYRECOVERING() (!!yyerrstatus)
+
+#define YYBACKUP(Token, Value) \
+ do \
+ if (yychar == YYEMPTY) \
+ { \
+ yychar = (Token); \
+ yylval = (Value); \
+ YYPOPSTACK (yylen); \
+ yystate = *yyssp; \
+ goto yybackup; \
+ } \
+ else \
+ { \
+ yyerror (YY_("syntax error: cannot back up")); \
+ YYERROR; \
+ } \
+ while (0)
+
+/* Error token number */
+#define YYTERROR 1
+#define YYERRCODE 256
+
+
+
+/* Enable debugging if requested. */
+#if YYDEBUG
+
+# ifndef YYFPRINTF
+# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
+# define YYFPRINTF fprintf
+# endif
+
+# define YYDPRINTF(Args) \
+do { \
+ if (yydebug) \
+ YYFPRINTF Args; \
+} while (0)
+
+/* This macro is provided for backward compatibility. */
+#ifndef YY_LOCATION_PRINT
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
+#endif
+
+
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
+do { \
+ if (yydebug) \
+ { \
+ YYFPRINTF (stderr, "%s ", Title); \
+ yy_symbol_print (stderr, \
+ Type, Value); \
+ YYFPRINTF (stderr, "\n"); \
+ } \
+} while (0)
+
+
+/*-----------------------------------.
+| Print this symbol's value on YYO. |
+`-----------------------------------*/
+
+static void
+yy_symbol_value_print (FILE *yyo, int yytype, YYSTYPE const * const yyvaluep)
+{
+ FILE *yyoutput = yyo;
+ YYUSE (yyoutput);
+ if (!yyvaluep)
+ return;
+# ifdef YYPRINT
+ if (yytype < YYNTOKENS)
+ YYPRINT (yyo, yytoknum[yytype], *yyvaluep);
+# endif
+ YYUSE (yytype);
+}
+
+
+/*---------------------------.
+| Print this symbol on YYO. |
+`---------------------------*/
+
+static void
+yy_symbol_print (FILE *yyo, int yytype, YYSTYPE const * const yyvaluep)
+{
+ YYFPRINTF (yyo, "%s %s (",
+ yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]);
+
+ yy_symbol_value_print (yyo, yytype, yyvaluep);
+ YYFPRINTF (yyo, ")");
+}
+
+/*------------------------------------------------------------------.
+| yy_stack_print -- Print the state stack from its BOTTOM up to its |
+| TOP (included). |
+`------------------------------------------------------------------*/
+
+static void
+yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
+{
+ YYFPRINTF (stderr, "Stack now");
+ for (; yybottom <= yytop; yybottom++)
+ {
+ int yybot = *yybottom;
+ YYFPRINTF (stderr, " %d", yybot);
+ }
+ YYFPRINTF (stderr, "\n");
+}
+
+# define YY_STACK_PRINT(Bottom, Top) \
+do { \
+ if (yydebug) \
+ yy_stack_print ((Bottom), (Top)); \
+} while (0)
+
+
+/*------------------------------------------------.
+| Report that the YYRULE is going to be reduced. |
+`------------------------------------------------*/
+
+static void
+yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule)
+{
+ unsigned long yylno = yyrline[yyrule];
+ int yynrhs = yyr2[yyrule];
+ int yyi;
+ YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
+ yyrule - 1, yylno);
+ /* The symbols being reduced. */
+ for (yyi = 0; yyi < yynrhs; yyi++)
+ {
+ YYFPRINTF (stderr, " $%d = ", yyi + 1);
+ yy_symbol_print (stderr,
+ yystos[yyssp[yyi + 1 - yynrhs]],
+ &yyvsp[(yyi + 1) - (yynrhs)]
+ );
+ YYFPRINTF (stderr, "\n");
+ }
+}
+
+# define YY_REDUCE_PRINT(Rule) \
+do { \
+ if (yydebug) \
+ yy_reduce_print (yyssp, yyvsp, Rule); \
+} while (0)
+
+/* Nonzero means print parse trace. It is left uninitialized so that
+ multiple parsers can coexist. */
+int yydebug;
+#else /* !YYDEBUG */
+# define YYDPRINTF(Args)
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
+# define YY_STACK_PRINT(Bottom, Top)
+# define YY_REDUCE_PRINT(Rule)
+#endif /* !YYDEBUG */
+
+
+/* YYINITDEPTH -- initial size of the parser's stacks. */
+#ifndef YYINITDEPTH
+# define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
+ if the built-in stack extension method is used).
+
+ Do not make this value too large; the results are undefined if
+ YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
+ evaluated with infinite-precision integer arithmetic. */
+
+#ifndef YYMAXDEPTH
+# define YYMAXDEPTH 10000
+#endif
+
+
+#if YYERROR_VERBOSE
+
+# ifndef yystrlen
+# if defined __GLIBC__ && defined _STRING_H
+# define yystrlen strlen
+# else
+/* Return the length of YYSTR. */
+static YYSIZE_T
+yystrlen (const char *yystr)
+{
+ YYSIZE_T yylen;
+ for (yylen = 0; yystr[yylen]; yylen++)
+ continue;
+ return yylen;
+}
+# endif
+# endif
+
+# ifndef yystpcpy
+# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
+# define yystpcpy stpcpy
+# else
+/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
+ YYDEST. */
+static char *
+yystpcpy (char *yydest, const char *yysrc)
+{
+ char *yyd = yydest;
+ const char *yys = yysrc;
+
+ while ((*yyd++ = *yys++) != '\0')
+ continue;
+
+ return yyd - 1;
+}
+# endif
+# endif
+
+# ifndef yytnamerr
+/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
+ quotes and backslashes, so that it's suitable for yyerror. The
+ heuristic is that double-quoting is unnecessary unless the string
+ contains an apostrophe, a comma, or backslash (other than
+ backslash-backslash). YYSTR is taken from yytname. If YYRES is
+ null, do not copy; instead, return the length of what the result
+ would have been. */
+static YYSIZE_T
+yytnamerr (char *yyres, const char *yystr)
+{
+ if (*yystr == '"')
+ {
+ YYSIZE_T yyn = 0;
+ char const *yyp = yystr;
+
+ for (;;)
+ switch (*++yyp)
+ {
+ case '\'':
+ case ',':
+ goto do_not_strip_quotes;
+
+ case '\\':
+ if (*++yyp != '\\')
+ goto do_not_strip_quotes;
+ else
+ goto append;
+
+ append:
+ default:
+ if (yyres)
+ yyres[yyn] = *yyp;
+ yyn++;
+ break;
+
+ case '"':
+ if (yyres)
+ yyres[yyn] = '\0';
+ return yyn;
+ }
+ do_not_strip_quotes: ;
+ }
+
+ if (! yyres)
+ return yystrlen (yystr);
+
+ return (YYSIZE_T) (yystpcpy (yyres, yystr) - yyres);
+}
+# endif
+
+/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
+ about the unexpected token YYTOKEN for the state stack whose top is
+ YYSSP.
+
+ Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is
+ not large enough to hold the message. In that case, also set
+ *YYMSG_ALLOC to the required number of bytes. Return 2 if the
+ required number of bytes is too large to store. */
+static int
+yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
+ yytype_int16 *yyssp, int yytoken)
+{
+ YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]);
+ YYSIZE_T yysize = yysize0;
+ enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
+ /* Internationalized format string. */
+ const char *yyformat = YY_NULLPTR;
+ /* Arguments of yyformat. */
+ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
+ /* Number of reported tokens (one for the "unexpected", one per
+ "expected"). */
+ int yycount = 0;
+
+ /* There are many possibilities here to consider:
+ - If this state is a consistent state with a default action, then
+ the only way this function was invoked is if the default action
+ is an error action. In that case, don't check for expected
+ tokens because there are none.
+ - The only way there can be no lookahead present (in yychar) is if
+ this state is a consistent state with a default action. Thus,
+ detecting the absence of a lookahead is sufficient to determine
+ that there is no unexpected or expected token to report. In that
+ case, just report a simple "syntax error".
+ - Don't assume there isn't a lookahead just because this state is a
+ consistent state with a default action. There might have been a
+ previous inconsistent state, consistent state with a non-default
+ action, or user semantic action that manipulated yychar.
+ - Of course, the expected token list depends on states to have
+ correct lookahead information, and it depends on the parser not
+ to perform extra reductions after fetching a lookahead from the
+ scanner and before detecting a syntax error. Thus, state merging
+ (from LALR or IELR) and default reductions corrupt the expected
+ token list. However, the list is correct for canonical LR with
+ one exception: it will still contain any token that will not be
+ accepted due to an error action in a later state.
+ */
+ if (yytoken != YYEMPTY)
+ {
+ int yyn = yypact[*yyssp];
+ yyarg[yycount++] = yytname[yytoken];
+ if (!yypact_value_is_default (yyn))
+ {
+ /* Start YYX at -YYN if negative to avoid negative indexes in
+ YYCHECK. In other words, skip the first -YYN actions for
+ this state because they are default actions. */
+ int yyxbegin = yyn < 0 ? -yyn : 0;
+ /* Stay within bounds of both yycheck and yytname. */
+ int yychecklim = YYLAST - yyn + 1;
+ int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
+ int yyx;
+
+ for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+ if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
+ && !yytable_value_is_error (yytable[yyx + yyn]))
+ {
+ if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
+ {
+ yycount = 1;
+ yysize = yysize0;
+ break;
+ }
+ yyarg[yycount++] = yytname[yyx];
+ {
+ YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]);
+ if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)
+ yysize = yysize1;
+ else
+ return 2;
+ }
+ }
+ }
+ }
+
+ switch (yycount)
+ {
+# define YYCASE_(N, S) \
+ case N: \
+ yyformat = S; \
+ break
+ default: /* Avoid compiler warnings. */
+ YYCASE_(0, YY_("syntax error"));
+ YYCASE_(1, YY_("syntax error, unexpected %s"));
+ YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
+ YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
+ YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
+ YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
+# undef YYCASE_
+ }
+
+ {
+ YYSIZE_T yysize1 = yysize + yystrlen (yyformat);
+ if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)
+ yysize = yysize1;
+ else
+ return 2;
+ }
+
+ if (*yymsg_alloc < yysize)
+ {
+ *yymsg_alloc = 2 * yysize;
+ if (! (yysize <= *yymsg_alloc
+ && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
+ *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
+ return 1;
+ }
+
+ /* Avoid sprintf, as that infringes on the user's name space.
+ Don't have undefined behavior even if the translation
+ produced a string with the wrong number of "%s"s. */
+ {
+ char *yyp = *yymsg;
+ int yyi = 0;
+ while ((*yyp = *yyformat) != '\0')
+ if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
+ {
+ yyp += yytnamerr (yyp, yyarg[yyi++]);
+ yyformat += 2;
+ }
+ else
+ {
+ yyp++;
+ yyformat++;
+ }
+ }
+ return 0;
+}
+#endif /* YYERROR_VERBOSE */
+
+/*-----------------------------------------------.
+| Release the memory associated to this symbol. |
+`-----------------------------------------------*/
+
+static void
+yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
+{
+ YYUSE (yyvaluep);
+ if (!yymsg)
+ yymsg = "Deleting";
+ YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
+
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ YYUSE (yytype);
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+}
+
+
+
+
+/* The lookahead symbol. */
+int yychar;
+
+/* The semantic value of the lookahead symbol. */
+YYSTYPE yylval;
+/* Number of syntax errors so far. */
+int yynerrs;
+
+
+/*----------.
+| yyparse. |
+`----------*/
+
+int
+yyparse (void)
+{
+ int yystate;
+ /* Number of tokens to shift before error messages enabled. */
+ int yyerrstatus;
+
+ /* The stacks and their tools:
+ 'yyss': related to states.
+ 'yyvs': related to semantic values.
+
+ Refer to the stacks through separate pointers, to allow yyoverflow
+ to reallocate them elsewhere. */
+
+ /* The state stack. */
+ yytype_int16 yyssa[YYINITDEPTH];
+ yytype_int16 *yyss;
+ yytype_int16 *yyssp;
+
+ /* The semantic value stack. */
+ YYSTYPE yyvsa[YYINITDEPTH];
+ YYSTYPE *yyvs;
+ YYSTYPE *yyvsp;
+
+ YYSIZE_T yystacksize;
+
+ int yyn;
+ int yyresult;
+ /* Lookahead token as an internal (translated) token number. */
+ int yytoken = 0;
+ /* The variables used to return semantic value and location from the
+ action routines. */
+ YYSTYPE yyval;
+
+#if YYERROR_VERBOSE
+ /* Buffer for error messages, and its allocated size. */
+ char yymsgbuf[128];
+ char *yymsg = yymsgbuf;
+ YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
+#endif
+
+#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
+
+ /* The number of symbols on the RHS of the reduced rule.
+ Keep to zero when no symbol should be popped. */
+ int yylen = 0;
+
+ yyssp = yyss = yyssa;
+ yyvsp = yyvs = yyvsa;
+ yystacksize = YYINITDEPTH;
+
+ YYDPRINTF ((stderr, "Starting parse\n"));
+
+ yystate = 0;
+ yyerrstatus = 0;
+ yynerrs = 0;
+ yychar = YYEMPTY; /* Cause a token to be read. */
+ goto yysetstate;
+
+
+/*------------------------------------------------------------.
+| yynewstate -- push a new state, which is found in yystate. |
+`------------------------------------------------------------*/
+yynewstate:
+ /* In all cases, when you get here, the value and location stacks
+ have just been pushed. So pushing a state here evens the stacks. */
+ yyssp++;
+
+
+/*--------------------------------------------------------------------.
+| yynewstate -- set current state (the top of the stack) to yystate. |
+`--------------------------------------------------------------------*/
+yysetstate:
+ *yyssp = (yytype_int16) yystate;
+
+ if (yyss + yystacksize - 1 <= yyssp)
+#if !defined yyoverflow && !defined YYSTACK_RELOCATE
+ goto yyexhaustedlab;
+#else
+ {
+ /* Get the current used size of the three stacks, in elements. */
+ YYSIZE_T yysize = (YYSIZE_T) (yyssp - yyss + 1);
+
+# if defined yyoverflow
+ {
+ /* Give user a chance to reallocate the stack. Use copies of
+ these so that the &'s don't force the real ones into
+ memory. */
+ YYSTYPE *yyvs1 = yyvs;
+ yytype_int16 *yyss1 = yyss;
+
+ /* Each stack pointer address is followed by the size of the
+ data in use in that stack, in bytes. This used to be a
+ conditional around just the two extra args, but that might
+ be undefined if yyoverflow is a macro. */
+ yyoverflow (YY_("memory exhausted"),
+ &yyss1, yysize * sizeof (*yyssp),
+ &yyvs1, yysize * sizeof (*yyvsp),
+ &yystacksize);
+ yyss = yyss1;
+ yyvs = yyvs1;
+ }
+# else /* defined YYSTACK_RELOCATE */
+ /* Extend the stack our own way. */
+ if (YYMAXDEPTH <= yystacksize)
+ goto yyexhaustedlab;
+ yystacksize *= 2;
+ if (YYMAXDEPTH < yystacksize)
+ yystacksize = YYMAXDEPTH;
+
+ {
+ yytype_int16 *yyss1 = yyss;
+ union yyalloc *yyptr =
+ (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
+ if (! yyptr)
+ goto yyexhaustedlab;
+ YYSTACK_RELOCATE (yyss_alloc, yyss);
+ YYSTACK_RELOCATE (yyvs_alloc, yyvs);
+# undef YYSTACK_RELOCATE
+ if (yyss1 != yyssa)
+ YYSTACK_FREE (yyss1);
+ }
+# endif
+
+ yyssp = yyss + yysize - 1;
+ yyvsp = yyvs + yysize - 1;
+
+ YYDPRINTF ((stderr, "Stack size increased to %lu\n",
+ (unsigned long) yystacksize));
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ YYABORT;
+ }
+#endif /* !defined yyoverflow && !defined YYSTACK_RELOCATE */
+
+ YYDPRINTF ((stderr, "Entering state %d\n", yystate));
+
+ if (yystate == YYFINAL)
+ YYACCEPT;
+
+ goto yybackup;
+
+
+/*-----------.
+| yybackup. |
+`-----------*/
+yybackup:
+ /* Do appropriate processing given the current state. Read a
+ lookahead token if we need one and don't already have one. */
+
+ /* First try to decide what to do without reference to lookahead token. */
+ yyn = yypact[yystate];
+ if (yypact_value_is_default (yyn))
+ goto yydefault;
+
+ /* Not known => get a lookahead token if don't already have one. */
+
+ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
+ if (yychar == YYEMPTY)
+ {
+ YYDPRINTF ((stderr, "Reading a token: "));
+ yychar = yylex ();
+ }
+
+ if (yychar <= YYEOF)
+ {
+ yychar = yytoken = YYEOF;
+ YYDPRINTF ((stderr, "Now at end of input.\n"));
+ }
+ else
+ {
+ yytoken = YYTRANSLATE (yychar);
+ YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
+ }
+
+ /* If the proper action on seeing token YYTOKEN is to reduce or to
+ detect an error, take that action. */
+ yyn += yytoken;
+ if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
+ goto yydefault;
+ yyn = yytable[yyn];
+ if (yyn <= 0)
+ {
+ if (yytable_value_is_error (yyn))
+ goto yyerrlab;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+
+ /* Count tokens shifted since error; after three, turn off error
+ status. */
+ if (yyerrstatus)
+ yyerrstatus--;
+
+ /* Shift the lookahead token. */
+ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
+
+ /* Discard the shifted token. */
+ yychar = YYEMPTY;
+
+ yystate = yyn;
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ *++yyvsp = yylval;
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+
+ goto yynewstate;
+
+
+/*-----------------------------------------------------------.
+| yydefault -- do the default action for the current state. |
+`-----------------------------------------------------------*/
+yydefault:
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+ goto yyreduce;
+
+
+/*-----------------------------.
+| yyreduce -- do a reduction. |
+`-----------------------------*/
+yyreduce:
+ /* yyn is the number of a rule to reduce with. */
+ yylen = yyr2[yyn];
+
+ /* If YYLEN is nonzero, implement the default value of the action:
+ '$$ = $1'.
+
+ Otherwise, the following line sets YYVAL to garbage.
+ This behavior is undocumented and Bison
+ users should not rely upon it. Assigning to YYVAL
+ unconditionally makes the parser a bit smaller, and it avoids a
+ GCC warning that YYVAL may be used uninitialized. */
+ yyval = yyvsp[1-yylen];
+
+
+ YY_REDUCE_PRINT (yyn);
+ switch (yyn)
+ {
+ case 2:
+#line 111 "repl_gram.y" /* yacc.c:1652 */
+ {
+ replication_parse_result = (yyvsp[-1].node);
+ }
+#line 1359 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 14:
+#line 137 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.node) = (Node *) makeNode(IdentifySystemCmd);
+ }
+#line 1367 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 15:
+#line 147 "repl_gram.y" /* yacc.c:1652 */
+ {
+ VariableShowStmt *n = makeNode(VariableShowStmt);
+ n->name = (yyvsp[0].str);
+ (yyval.node) = (Node *) n;
+ }
+#line 1377 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 16:
+#line 153 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.str) = (yyvsp[0].str); }
+#line 1383 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 17:
+#line 155 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.str) = psprintf("%s.%s", (yyvsp[-2].str), (yyvsp[0].str)); }
+#line 1389 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 18:
+#line 165 "repl_gram.y" /* yacc.c:1652 */
+ {
+ BaseBackupCmd *cmd = makeNode(BaseBackupCmd);
+ cmd->options = (yyvsp[0].list);
+ (yyval.node) = (Node *) cmd;
+ }
+#line 1399 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 19:
+#line 174 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.list) = lappend((yyvsp[-1].list), (yyvsp[0].defelt)); }
+#line 1405 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 20:
+#line 176 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.list) = NIL; }
+#line 1411 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 21:
+#line 181 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem("label",
+ (Node *)makeString((yyvsp[0].str)), -1);
+ }
+#line 1420 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 22:
+#line 186 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem("progress",
+ (Node *)makeInteger(true), -1);
+ }
+#line 1429 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 23:
+#line 191 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem("fast",
+ (Node *)makeInteger(true), -1);
+ }
+#line 1438 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 24:
+#line 196 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem("wal",
+ (Node *)makeInteger(true), -1);
+ }
+#line 1447 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 25:
+#line 201 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem("nowait",
+ (Node *)makeInteger(true), -1);
+ }
+#line 1456 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 26:
+#line 206 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem("max_rate",
+ (Node *)makeInteger((yyvsp[0].uintval)), -1);
+ }
+#line 1465 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 27:
+#line 211 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem("tablespace_map",
+ (Node *)makeInteger(true), -1);
+ }
+#line 1474 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 28:
+#line 216 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem("noverify_checksums",
+ (Node *)makeInteger(true), -1);
+ }
+#line 1483 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 29:
+#line 221 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem("manifest",
+ (Node *)makeString((yyvsp[0].str)), -1);
+ }
+#line 1492 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 30:
+#line 226 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem("manifest_checksums",
+ (Node *)makeString((yyvsp[0].str)), -1);
+ }
+#line 1501 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 31:
+#line 235 "repl_gram.y" /* yacc.c:1652 */
+ {
+ CreateReplicationSlotCmd *cmd;
+ cmd = makeNode(CreateReplicationSlotCmd);
+ cmd->kind = REPLICATION_KIND_PHYSICAL;
+ cmd->slotname = (yyvsp[-3].str);
+ cmd->temporary = (yyvsp[-2].boolval);
+ cmd->options = (yyvsp[0].list);
+ (yyval.node) = (Node *) cmd;
+ }
+#line 1515 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 32:
+#line 246 "repl_gram.y" /* yacc.c:1652 */
+ {
+ CreateReplicationSlotCmd *cmd;
+ cmd = makeNode(CreateReplicationSlotCmd);
+ cmd->kind = REPLICATION_KIND_LOGICAL;
+ cmd->slotname = (yyvsp[-4].str);
+ cmd->temporary = (yyvsp[-3].boolval);
+ cmd->plugin = (yyvsp[-1].str);
+ cmd->options = (yyvsp[0].list);
+ (yyval.node) = (Node *) cmd;
+ }
+#line 1530 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 33:
+#line 260 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.list) = lappend((yyvsp[-1].list), (yyvsp[0].defelt)); }
+#line 1536 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 34:
+#line 262 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.list) = NIL; }
+#line 1542 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 35:
+#line 267 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem("export_snapshot",
+ (Node *)makeInteger(true), -1);
+ }
+#line 1551 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 36:
+#line 272 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem("export_snapshot",
+ (Node *)makeInteger(false), -1);
+ }
+#line 1560 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 37:
+#line 277 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem("use_snapshot",
+ (Node *)makeInteger(true), -1);
+ }
+#line 1569 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 38:
+#line 282 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem("reserve_wal",
+ (Node *)makeInteger(true), -1);
+ }
+#line 1578 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 39:
+#line 291 "repl_gram.y" /* yacc.c:1652 */
+ {
+ DropReplicationSlotCmd *cmd;
+ cmd = makeNode(DropReplicationSlotCmd);
+ cmd->slotname = (yyvsp[0].str);
+ cmd->wait = false;
+ (yyval.node) = (Node *) cmd;
+ }
+#line 1590 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 40:
+#line 299 "repl_gram.y" /* yacc.c:1652 */
+ {
+ DropReplicationSlotCmd *cmd;
+ cmd = makeNode(DropReplicationSlotCmd);
+ cmd->slotname = (yyvsp[-1].str);
+ cmd->wait = true;
+ (yyval.node) = (Node *) cmd;
+ }
+#line 1602 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 41:
+#line 313 "repl_gram.y" /* yacc.c:1652 */
+ {
+ StartReplicationCmd *cmd;
+
+ cmd = makeNode(StartReplicationCmd);
+ cmd->kind = REPLICATION_KIND_PHYSICAL;
+ cmd->slotname = (yyvsp[-3].str);
+ cmd->startpoint = (yyvsp[-1].recptr);
+ cmd->timeline = (yyvsp[0].uintval);
+ (yyval.node) = (Node *) cmd;
+ }
+#line 1617 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 42:
+#line 328 "repl_gram.y" /* yacc.c:1652 */
+ {
+ StartReplicationCmd *cmd;
+ cmd = makeNode(StartReplicationCmd);
+ cmd->kind = REPLICATION_KIND_LOGICAL;
+ cmd->slotname = (yyvsp[-3].str);
+ cmd->startpoint = (yyvsp[-1].recptr);
+ cmd->options = (yyvsp[0].list);
+ (yyval.node) = (Node *) cmd;
+ }
+#line 1631 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 43:
+#line 343 "repl_gram.y" /* yacc.c:1652 */
+ {
+ TimeLineHistoryCmd *cmd;
+
+ if ((yyvsp[0].uintval) <= 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("invalid timeline %u", (yyvsp[0].uintval))));
+
+ cmd = makeNode(TimeLineHistoryCmd);
+ cmd->timeline = (yyvsp[0].uintval);
+
+ (yyval.node) = (Node *) cmd;
+ }
+#line 1649 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 46:
+#line 364 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.boolval) = true; }
+#line 1655 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 47:
+#line 365 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.boolval) = false; }
+#line 1661 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 48:
+#line 370 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.str) = (yyvsp[0].str); }
+#line 1667 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 49:
+#line 372 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.str) = NULL; }
+#line 1673 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 50:
+#line 377 "repl_gram.y" /* yacc.c:1652 */
+ {
+ if ((yyvsp[0].uintval) <= 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("invalid timeline %u", (yyvsp[0].uintval))));
+ (yyval.uintval) = (yyvsp[0].uintval);
+ }
+#line 1685 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 51:
+#line 384 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.uintval) = 0; }
+#line 1691 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 52:
+#line 389 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.list) = (yyvsp[-1].list); }
+#line 1697 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 53:
+#line 390 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.list) = NIL; }
+#line 1703 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 54:
+#line 395 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.list) = list_make1((yyvsp[0].defelt));
+ }
+#line 1711 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 55:
+#line 399 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.list) = lappend((yyvsp[-2].list), (yyvsp[0].defelt));
+ }
+#line 1719 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 56:
+#line 406 "repl_gram.y" /* yacc.c:1652 */
+ {
+ (yyval.defelt) = makeDefElem((yyvsp[-1].str), (yyvsp[0].node), -1);
+ }
+#line 1727 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 57:
+#line 412 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.node) = (Node *) makeString((yyvsp[0].str)); }
+#line 1733 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 58:
+#line 413 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.node) = NULL; }
+#line 1739 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 59:
+#line 417 "repl_gram.y" /* yacc.c:1652 */
+ { (yyval.node) = (Node *) make_sqlcmd(); }
+#line 1745 "repl_gram.c" /* yacc.c:1652 */
+ break;
+
+
+#line 1749 "repl_gram.c" /* yacc.c:1652 */
+ default: break;
+ }
+ /* User semantic actions sometimes alter yychar, and that requires
+ that yytoken be updated with the new translation. We take the
+ approach of translating immediately before every use of yytoken.
+ One alternative is translating here after every semantic action,
+ but that translation would be missed if the semantic action invokes
+ YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
+ if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an
+ incorrect destructor might then be invoked immediately. In the
+ case of YYERROR or YYBACKUP, subsequent parser actions might lead
+ to an incorrect destructor call or verbose syntax error message
+ before the lookahead is translated. */
+ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
+
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+
+ *++yyvsp = yyval;
+
+ /* Now 'shift' the result of the reduction. Determine what state
+ that goes to, based on the state we popped back to and the rule
+ number reduced by. */
+ {
+ const int yylhs = yyr1[yyn] - YYNTOKENS;
+ const int yyi = yypgoto[yylhs] + *yyssp;
+ yystate = (0 <= yyi && yyi <= YYLAST && yycheck[yyi] == *yyssp
+ ? yytable[yyi]
+ : yydefgoto[yylhs]);
+ }
+
+ goto yynewstate;
+
+
+/*--------------------------------------.
+| yyerrlab -- here on detecting error. |
+`--------------------------------------*/
+yyerrlab:
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
+
+ /* If not already recovering from an error, report this error. */
+ if (!yyerrstatus)
+ {
+ ++yynerrs;
+#if ! YYERROR_VERBOSE
+ yyerror (YY_("syntax error"));
+#else
+# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
+ yyssp, yytoken)
+ {
+ char const *yymsgp = YY_("syntax error");
+ int yysyntax_error_status;
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ if (yysyntax_error_status == 0)
+ yymsgp = yymsg;
+ else if (yysyntax_error_status == 1)
+ {
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+ yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
+ if (!yymsg)
+ {
+ yymsg = yymsgbuf;
+ yymsg_alloc = sizeof yymsgbuf;
+ yysyntax_error_status = 2;
+ }
+ else
+ {
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ yymsgp = yymsg;
+ }
+ }
+ yyerror (yymsgp);
+ if (yysyntax_error_status == 2)
+ goto yyexhaustedlab;
+ }
+# undef YYSYNTAX_ERROR
+#endif
+ }
+
+
+
+ if (yyerrstatus == 3)
+ {
+ /* If just tried and failed to reuse lookahead token after an
+ error, discard it. */
+
+ if (yychar <= YYEOF)
+ {
+ /* Return failure if at end of input. */
+ if (yychar == YYEOF)
+ YYABORT;
+ }
+ else
+ {
+ yydestruct ("Error: discarding",
+ yytoken, &yylval);
+ yychar = YYEMPTY;
+ }
+ }
+
+ /* Else will try to reuse lookahead token after shifting the error
+ token. */
+ goto yyerrlab1;
+
+
+/*---------------------------------------------------.
+| yyerrorlab -- error raised explicitly by YYERROR. |
+`---------------------------------------------------*/
+yyerrorlab:
+ /* Pacify compilers when the user code never invokes YYERROR and the
+ label yyerrorlab therefore never appears in user code. */
+ if (0)
+ YYERROR;
+
+ /* Do not reclaim the symbols of the rule whose action triggered
+ this YYERROR. */
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+ yystate = *yyssp;
+ goto yyerrlab1;
+
+
+/*-------------------------------------------------------------.
+| yyerrlab1 -- common code for both syntax error and YYERROR. |
+`-------------------------------------------------------------*/
+yyerrlab1:
+ yyerrstatus = 3; /* Each real token shifted decrements this. */
+
+ for (;;)
+ {
+ yyn = yypact[yystate];
+ if (!yypact_value_is_default (yyn))
+ {
+ yyn += YYTERROR;
+ if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
+ {
+ yyn = yytable[yyn];
+ if (0 < yyn)
+ break;
+ }
+ }
+
+ /* Pop the current state because it cannot handle the error token. */
+ if (yyssp == yyss)
+ YYABORT;
+
+
+ yydestruct ("Error: popping",
+ yystos[yystate], yyvsp);
+ YYPOPSTACK (1);
+ yystate = *yyssp;
+ YY_STACK_PRINT (yyss, yyssp);
+ }
+
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ *++yyvsp = yylval;
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+
+
+ /* Shift the error token. */
+ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
+
+ yystate = yyn;
+ goto yynewstate;
+
+
+/*-------------------------------------.
+| yyacceptlab -- YYACCEPT comes here. |
+`-------------------------------------*/
+yyacceptlab:
+ yyresult = 0;
+ goto yyreturn;
+
+
+/*-----------------------------------.
+| yyabortlab -- YYABORT comes here. |
+`-----------------------------------*/
+yyabortlab:
+ yyresult = 1;
+ goto yyreturn;
+
+
+#if !defined yyoverflow || YYERROR_VERBOSE
+/*-------------------------------------------------.
+| yyexhaustedlab -- memory exhaustion comes here. |
+`-------------------------------------------------*/
+yyexhaustedlab:
+ yyerror (YY_("memory exhausted"));
+ yyresult = 2;
+ /* Fall through. */
+#endif
+
+
+/*-----------------------------------------------------.
+| yyreturn -- parsing is finished, return the result. |
+`-----------------------------------------------------*/
+yyreturn:
+ if (yychar != YYEMPTY)
+ {
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = YYTRANSLATE (yychar);
+ yydestruct ("Cleanup: discarding lookahead",
+ yytoken, &yylval);
+ }
+ /* Do not reclaim the symbols of the rule whose action triggered
+ this YYABORT or YYACCEPT. */
+ YYPOPSTACK (yylen);
+ YY_STACK_PRINT (yyss, yyssp);
+ while (yyssp != yyss)
+ {
+ yydestruct ("Cleanup: popping",
+ yystos[*yyssp], yyvsp);
+ YYPOPSTACK (1);
+ }
+#ifndef yyoverflow
+ if (yyss != yyssa)
+ YYSTACK_FREE (yyss);
+#endif
+#if YYERROR_VERBOSE
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+#endif
+ return yyresult;
+}
+#line 419 "repl_gram.y" /* yacc.c:1918 */
+
+
+static SQLCmd *
+make_sqlcmd(void)
+{
+ SQLCmd *cmd = makeNode(SQLCmd);
+ int tok;
+
+ /* Just move lexer to the end of command. */
+ for (;;)
+ {
+ tok = yylex();
+ if (tok == ';' || tok == 0)
+ break;
+ }
+ return cmd;
+}
+
+#include "repl_scanner.c"
diff --git a/src/backend/replication/repl_gram.y b/src/backend/replication/repl_gram.y
new file mode 100644
index 0000000..f93a0de
--- /dev/null
+++ b/src/backend/replication/repl_gram.y
@@ -0,0 +1,437 @@
+%{
+/*-------------------------------------------------------------------------
+ *
+ * repl_gram.y - Parser for the replication commands
+ *
+ * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/replication/repl_gram.y
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/xlogdefs.h"
+#include "nodes/makefuncs.h"
+#include "nodes/replnodes.h"
+#include "replication/walsender.h"
+#include "replication/walsender_private.h"
+
+
+/* Result of the parsing is returned here */
+Node *replication_parse_result;
+
+static SQLCmd *make_sqlcmd(void);
+
+
+/*
+ * Bison doesn't allocate anything that needs to live across parser calls,
+ * so we can easily have it use palloc instead of malloc. This prevents
+ * memory leaks if we error out during parsing. Note this only works with
+ * bison >= 2.0. However, in bison 1.875 the default is to use alloca()
+ * if possible, so there's not really much problem anyhow, at least if
+ * you're building with gcc.
+ */
+#define YYMALLOC palloc
+#define YYFREE pfree
+
+%}
+
+%expect 0
+%name-prefix="replication_yy"
+
+%union {
+ char *str;
+ bool boolval;
+ uint32 uintval;
+
+ XLogRecPtr recptr;
+ Node *node;
+ List *list;
+ DefElem *defelt;
+}
+
+/* Non-keyword tokens */
+%token <str> SCONST IDENT
+%token <uintval> UCONST
+%token <recptr> RECPTR
+%token T_WORD
+
+/* Keyword tokens. */
+%token K_BASE_BACKUP
+%token K_IDENTIFY_SYSTEM
+%token K_SHOW
+%token K_START_REPLICATION
+%token K_CREATE_REPLICATION_SLOT
+%token K_DROP_REPLICATION_SLOT
+%token K_TIMELINE_HISTORY
+%token K_LABEL
+%token K_PROGRESS
+%token K_FAST
+%token K_WAIT
+%token K_NOWAIT
+%token K_MAX_RATE
+%token K_WAL
+%token K_TABLESPACE_MAP
+%token K_NOVERIFY_CHECKSUMS
+%token K_TIMELINE
+%token K_PHYSICAL
+%token K_LOGICAL
+%token K_SLOT
+%token K_RESERVE_WAL
+%token K_TEMPORARY
+%token K_EXPORT_SNAPSHOT
+%token K_NOEXPORT_SNAPSHOT
+%token K_USE_SNAPSHOT
+%token K_MANIFEST
+%token K_MANIFEST_CHECKSUMS
+
+%type <node> command
+%type <node> base_backup start_replication start_logical_replication
+ create_replication_slot drop_replication_slot identify_system
+ timeline_history show sql_cmd
+%type <list> base_backup_opt_list
+%type <defelt> base_backup_opt
+%type <uintval> opt_timeline
+%type <list> plugin_options plugin_opt_list
+%type <defelt> plugin_opt_elem
+%type <node> plugin_opt_arg
+%type <str> opt_slot var_name
+%type <boolval> opt_temporary
+%type <list> create_slot_opt_list
+%type <defelt> create_slot_opt
+
+%%
+
+firstcmd: command opt_semicolon
+ {
+ replication_parse_result = $1;
+ }
+ ;
+
+opt_semicolon: ';'
+ | /* EMPTY */
+ ;
+
+command:
+ identify_system
+ | base_backup
+ | start_replication
+ | start_logical_replication
+ | create_replication_slot
+ | drop_replication_slot
+ | timeline_history
+ | show
+ | sql_cmd
+ ;
+
+/*
+ * IDENTIFY_SYSTEM
+ */
+identify_system:
+ K_IDENTIFY_SYSTEM
+ {
+ $$ = (Node *) makeNode(IdentifySystemCmd);
+ }
+ ;
+
+/*
+ * SHOW setting
+ */
+show:
+ K_SHOW var_name
+ {
+ VariableShowStmt *n = makeNode(VariableShowStmt);
+ n->name = $2;
+ $$ = (Node *) n;
+ }
+
+var_name: IDENT { $$ = $1; }
+ | var_name '.' IDENT
+ { $$ = psprintf("%s.%s", $1, $3); }
+ ;
+
+/*
+ * BASE_BACKUP [LABEL '<label>'] [PROGRESS] [FAST] [WAL] [NOWAIT]
+ * [MAX_RATE %d] [TABLESPACE_MAP] [NOVERIFY_CHECKSUMS]
+ * [MANIFEST %s] [MANIFEST_CHECKSUMS %s]
+ */
+base_backup:
+ K_BASE_BACKUP base_backup_opt_list
+ {
+ BaseBackupCmd *cmd = makeNode(BaseBackupCmd);
+ cmd->options = $2;
+ $$ = (Node *) cmd;
+ }
+ ;
+
+base_backup_opt_list:
+ base_backup_opt_list base_backup_opt
+ { $$ = lappend($1, $2); }
+ | /* EMPTY */
+ { $$ = NIL; }
+ ;
+
+base_backup_opt:
+ K_LABEL SCONST
+ {
+ $$ = makeDefElem("label",
+ (Node *)makeString($2), -1);
+ }
+ | K_PROGRESS
+ {
+ $$ = makeDefElem("progress",
+ (Node *)makeInteger(true), -1);
+ }
+ | K_FAST
+ {
+ $$ = makeDefElem("fast",
+ (Node *)makeInteger(true), -1);
+ }
+ | K_WAL
+ {
+ $$ = makeDefElem("wal",
+ (Node *)makeInteger(true), -1);
+ }
+ | K_NOWAIT
+ {
+ $$ = makeDefElem("nowait",
+ (Node *)makeInteger(true), -1);
+ }
+ | K_MAX_RATE UCONST
+ {
+ $$ = makeDefElem("max_rate",
+ (Node *)makeInteger($2), -1);
+ }
+ | K_TABLESPACE_MAP
+ {
+ $$ = makeDefElem("tablespace_map",
+ (Node *)makeInteger(true), -1);
+ }
+ | K_NOVERIFY_CHECKSUMS
+ {
+ $$ = makeDefElem("noverify_checksums",
+ (Node *)makeInteger(true), -1);
+ }
+ | K_MANIFEST SCONST
+ {
+ $$ = makeDefElem("manifest",
+ (Node *)makeString($2), -1);
+ }
+ | K_MANIFEST_CHECKSUMS SCONST
+ {
+ $$ = makeDefElem("manifest_checksums",
+ (Node *)makeString($2), -1);
+ }
+ ;
+
+create_replication_slot:
+ /* CREATE_REPLICATION_SLOT slot TEMPORARY PHYSICAL RESERVE_WAL */
+ K_CREATE_REPLICATION_SLOT IDENT opt_temporary K_PHYSICAL create_slot_opt_list
+ {
+ CreateReplicationSlotCmd *cmd;
+ cmd = makeNode(CreateReplicationSlotCmd);
+ cmd->kind = REPLICATION_KIND_PHYSICAL;
+ cmd->slotname = $2;
+ cmd->temporary = $3;
+ cmd->options = $5;
+ $$ = (Node *) cmd;
+ }
+ /* CREATE_REPLICATION_SLOT slot TEMPORARY LOGICAL plugin */
+ | K_CREATE_REPLICATION_SLOT IDENT opt_temporary K_LOGICAL IDENT create_slot_opt_list
+ {
+ CreateReplicationSlotCmd *cmd;
+ cmd = makeNode(CreateReplicationSlotCmd);
+ cmd->kind = REPLICATION_KIND_LOGICAL;
+ cmd->slotname = $2;
+ cmd->temporary = $3;
+ cmd->plugin = $5;
+ cmd->options = $6;
+ $$ = (Node *) cmd;
+ }
+ ;
+
+create_slot_opt_list:
+ create_slot_opt_list create_slot_opt
+ { $$ = lappend($1, $2); }
+ | /* EMPTY */
+ { $$ = NIL; }
+ ;
+
+create_slot_opt:
+ K_EXPORT_SNAPSHOT
+ {
+ $$ = makeDefElem("export_snapshot",
+ (Node *)makeInteger(true), -1);
+ }
+ | K_NOEXPORT_SNAPSHOT
+ {
+ $$ = makeDefElem("export_snapshot",
+ (Node *)makeInteger(false), -1);
+ }
+ | K_USE_SNAPSHOT
+ {
+ $$ = makeDefElem("use_snapshot",
+ (Node *)makeInteger(true), -1);
+ }
+ | K_RESERVE_WAL
+ {
+ $$ = makeDefElem("reserve_wal",
+ (Node *)makeInteger(true), -1);
+ }
+ ;
+
+/* DROP_REPLICATION_SLOT slot */
+drop_replication_slot:
+ K_DROP_REPLICATION_SLOT IDENT
+ {
+ DropReplicationSlotCmd *cmd;
+ cmd = makeNode(DropReplicationSlotCmd);
+ cmd->slotname = $2;
+ cmd->wait = false;
+ $$ = (Node *) cmd;
+ }
+ | K_DROP_REPLICATION_SLOT IDENT K_WAIT
+ {
+ DropReplicationSlotCmd *cmd;
+ cmd = makeNode(DropReplicationSlotCmd);
+ cmd->slotname = $2;
+ cmd->wait = true;
+ $$ = (Node *) cmd;
+ }
+ ;
+
+/*
+ * START_REPLICATION [SLOT slot] [PHYSICAL] %X/%X [TIMELINE %d]
+ */
+start_replication:
+ K_START_REPLICATION opt_slot opt_physical RECPTR opt_timeline
+ {
+ StartReplicationCmd *cmd;
+
+ cmd = makeNode(StartReplicationCmd);
+ cmd->kind = REPLICATION_KIND_PHYSICAL;
+ cmd->slotname = $2;
+ cmd->startpoint = $4;
+ cmd->timeline = $5;
+ $$ = (Node *) cmd;
+ }
+ ;
+
+/* START_REPLICATION SLOT slot LOGICAL %X/%X options */
+start_logical_replication:
+ K_START_REPLICATION K_SLOT IDENT K_LOGICAL RECPTR plugin_options
+ {
+ StartReplicationCmd *cmd;
+ cmd = makeNode(StartReplicationCmd);
+ cmd->kind = REPLICATION_KIND_LOGICAL;
+ cmd->slotname = $3;
+ cmd->startpoint = $5;
+ cmd->options = $6;
+ $$ = (Node *) cmd;
+ }
+ ;
+/*
+ * TIMELINE_HISTORY %d
+ */
+timeline_history:
+ K_TIMELINE_HISTORY UCONST
+ {
+ TimeLineHistoryCmd *cmd;
+
+ if ($2 <= 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("invalid timeline %u", $2)));
+
+ cmd = makeNode(TimeLineHistoryCmd);
+ cmd->timeline = $2;
+
+ $$ = (Node *) cmd;
+ }
+ ;
+
+opt_physical:
+ K_PHYSICAL
+ | /* EMPTY */
+ ;
+
+opt_temporary:
+ K_TEMPORARY { $$ = true; }
+ | /* EMPTY */ { $$ = false; }
+ ;
+
+opt_slot:
+ K_SLOT IDENT
+ { $$ = $2; }
+ | /* EMPTY */
+ { $$ = NULL; }
+ ;
+
+opt_timeline:
+ K_TIMELINE UCONST
+ {
+ if ($2 <= 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("invalid timeline %u", $2)));
+ $$ = $2;
+ }
+ | /* EMPTY */ { $$ = 0; }
+ ;
+
+
+plugin_options:
+ '(' plugin_opt_list ')' { $$ = $2; }
+ | /* EMPTY */ { $$ = NIL; }
+ ;
+
+plugin_opt_list:
+ plugin_opt_elem
+ {
+ $$ = list_make1($1);
+ }
+ | plugin_opt_list ',' plugin_opt_elem
+ {
+ $$ = lappend($1, $3);
+ }
+ ;
+
+plugin_opt_elem:
+ IDENT plugin_opt_arg
+ {
+ $$ = makeDefElem($1, $2, -1);
+ }
+ ;
+
+plugin_opt_arg:
+ SCONST { $$ = (Node *) makeString($1); }
+ | /* EMPTY */ { $$ = NULL; }
+ ;
+
+sql_cmd:
+ IDENT { $$ = (Node *) make_sqlcmd(); }
+ ;
+%%
+
+static SQLCmd *
+make_sqlcmd(void)
+{
+ SQLCmd *cmd = makeNode(SQLCmd);
+ int tok;
+
+ /* Just move lexer to the end of command. */
+ for (;;)
+ {
+ tok = yylex();
+ if (tok == ';' || tok == 0)
+ break;
+ }
+ return cmd;
+}
+
+#include "repl_scanner.c"
diff --git a/src/backend/replication/repl_scanner.c b/src/backend/replication/repl_scanner.c
new file mode 100644
index 0000000..488bad4
--- /dev/null
+++ b/src/backend/replication/repl_scanner.c
@@ -0,0 +1,2586 @@
+#line 2 "repl_scanner.c"
+
+#line 4 "repl_scanner.c"
+
+#define YY_INT_ALIGNED short int
+
+/* A lexical scanner generated by flex */
+
+#define yy_create_buffer replication_yy_create_buffer
+#define yy_delete_buffer replication_yy_delete_buffer
+#define yy_scan_buffer replication_yy_scan_buffer
+#define yy_scan_string replication_yy_scan_string
+#define yy_scan_bytes replication_yy_scan_bytes
+#define yy_init_buffer replication_yy_init_buffer
+#define yy_flush_buffer replication_yy_flush_buffer
+#define yy_load_buffer_state replication_yy_load_buffer_state
+#define yy_switch_to_buffer replication_yy_switch_to_buffer
+#define yypush_buffer_state replication_yypush_buffer_state
+#define yypop_buffer_state replication_yypop_buffer_state
+#define yyensure_buffer_stack replication_yyensure_buffer_stack
+#define yy_flex_debug replication_yy_flex_debug
+#define yyin replication_yyin
+#define yyleng replication_yyleng
+#define yylex replication_yylex
+#define yylineno replication_yylineno
+#define yyout replication_yyout
+#define yyrestart replication_yyrestart
+#define yytext replication_yytext
+#define yywrap replication_yywrap
+#define yyalloc replication_yyalloc
+#define yyrealloc replication_yyrealloc
+#define yyfree replication_yyfree
+
+#define FLEX_SCANNER
+#define YY_FLEX_MAJOR_VERSION 2
+#define YY_FLEX_MINOR_VERSION 6
+#define YY_FLEX_SUBMINOR_VERSION 4
+#if YY_FLEX_SUBMINOR_VERSION > 0
+#define FLEX_BETA
+#endif
+
+#ifdef yy_create_buffer
+#define replication_yy_create_buffer_ALREADY_DEFINED
+#else
+#define yy_create_buffer replication_yy_create_buffer
+#endif
+
+#ifdef yy_delete_buffer
+#define replication_yy_delete_buffer_ALREADY_DEFINED
+#else
+#define yy_delete_buffer replication_yy_delete_buffer
+#endif
+
+#ifdef yy_scan_buffer
+#define replication_yy_scan_buffer_ALREADY_DEFINED
+#else
+#define yy_scan_buffer replication_yy_scan_buffer
+#endif
+
+#ifdef yy_scan_string
+#define replication_yy_scan_string_ALREADY_DEFINED
+#else
+#define yy_scan_string replication_yy_scan_string
+#endif
+
+#ifdef yy_scan_bytes
+#define replication_yy_scan_bytes_ALREADY_DEFINED
+#else
+#define yy_scan_bytes replication_yy_scan_bytes
+#endif
+
+#ifdef yy_init_buffer
+#define replication_yy_init_buffer_ALREADY_DEFINED
+#else
+#define yy_init_buffer replication_yy_init_buffer
+#endif
+
+#ifdef yy_flush_buffer
+#define replication_yy_flush_buffer_ALREADY_DEFINED
+#else
+#define yy_flush_buffer replication_yy_flush_buffer
+#endif
+
+#ifdef yy_load_buffer_state
+#define replication_yy_load_buffer_state_ALREADY_DEFINED
+#else
+#define yy_load_buffer_state replication_yy_load_buffer_state
+#endif
+
+#ifdef yy_switch_to_buffer
+#define replication_yy_switch_to_buffer_ALREADY_DEFINED
+#else
+#define yy_switch_to_buffer replication_yy_switch_to_buffer
+#endif
+
+#ifdef yypush_buffer_state
+#define replication_yypush_buffer_state_ALREADY_DEFINED
+#else
+#define yypush_buffer_state replication_yypush_buffer_state
+#endif
+
+#ifdef yypop_buffer_state
+#define replication_yypop_buffer_state_ALREADY_DEFINED
+#else
+#define yypop_buffer_state replication_yypop_buffer_state
+#endif
+
+#ifdef yyensure_buffer_stack
+#define replication_yyensure_buffer_stack_ALREADY_DEFINED
+#else
+#define yyensure_buffer_stack replication_yyensure_buffer_stack
+#endif
+
+#ifdef yylex
+#define replication_yylex_ALREADY_DEFINED
+#else
+#define yylex replication_yylex
+#endif
+
+#ifdef yyrestart
+#define replication_yyrestart_ALREADY_DEFINED
+#else
+#define yyrestart replication_yyrestart
+#endif
+
+#ifdef yylex_init
+#define replication_yylex_init_ALREADY_DEFINED
+#else
+#define yylex_init replication_yylex_init
+#endif
+
+#ifdef yylex_init_extra
+#define replication_yylex_init_extra_ALREADY_DEFINED
+#else
+#define yylex_init_extra replication_yylex_init_extra
+#endif
+
+#ifdef yylex_destroy
+#define replication_yylex_destroy_ALREADY_DEFINED
+#else
+#define yylex_destroy replication_yylex_destroy
+#endif
+
+#ifdef yyget_debug
+#define replication_yyget_debug_ALREADY_DEFINED
+#else
+#define yyget_debug replication_yyget_debug
+#endif
+
+#ifdef yyset_debug
+#define replication_yyset_debug_ALREADY_DEFINED
+#else
+#define yyset_debug replication_yyset_debug
+#endif
+
+#ifdef yyget_extra
+#define replication_yyget_extra_ALREADY_DEFINED
+#else
+#define yyget_extra replication_yyget_extra
+#endif
+
+#ifdef yyset_extra
+#define replication_yyset_extra_ALREADY_DEFINED
+#else
+#define yyset_extra replication_yyset_extra
+#endif
+
+#ifdef yyget_in
+#define replication_yyget_in_ALREADY_DEFINED
+#else
+#define yyget_in replication_yyget_in
+#endif
+
+#ifdef yyset_in
+#define replication_yyset_in_ALREADY_DEFINED
+#else
+#define yyset_in replication_yyset_in
+#endif
+
+#ifdef yyget_out
+#define replication_yyget_out_ALREADY_DEFINED
+#else
+#define yyget_out replication_yyget_out
+#endif
+
+#ifdef yyset_out
+#define replication_yyset_out_ALREADY_DEFINED
+#else
+#define yyset_out replication_yyset_out
+#endif
+
+#ifdef yyget_leng
+#define replication_yyget_leng_ALREADY_DEFINED
+#else
+#define yyget_leng replication_yyget_leng
+#endif
+
+#ifdef yyget_text
+#define replication_yyget_text_ALREADY_DEFINED
+#else
+#define yyget_text replication_yyget_text
+#endif
+
+#ifdef yyget_lineno
+#define replication_yyget_lineno_ALREADY_DEFINED
+#else
+#define yyget_lineno replication_yyget_lineno
+#endif
+
+#ifdef yyset_lineno
+#define replication_yyset_lineno_ALREADY_DEFINED
+#else
+#define yyset_lineno replication_yyset_lineno
+#endif
+
+#ifdef yywrap
+#define replication_yywrap_ALREADY_DEFINED
+#else
+#define yywrap replication_yywrap
+#endif
+
+#ifdef yyalloc
+#define replication_yyalloc_ALREADY_DEFINED
+#else
+#define yyalloc replication_yyalloc
+#endif
+
+#ifdef yyrealloc
+#define replication_yyrealloc_ALREADY_DEFINED
+#else
+#define yyrealloc replication_yyrealloc
+#endif
+
+#ifdef yyfree
+#define replication_yyfree_ALREADY_DEFINED
+#else
+#define yyfree replication_yyfree
+#endif
+
+#ifdef yytext
+#define replication_yytext_ALREADY_DEFINED
+#else
+#define yytext replication_yytext
+#endif
+
+#ifdef yyleng
+#define replication_yyleng_ALREADY_DEFINED
+#else
+#define yyleng replication_yyleng
+#endif
+
+#ifdef yyin
+#define replication_yyin_ALREADY_DEFINED
+#else
+#define yyin replication_yyin
+#endif
+
+#ifdef yyout
+#define replication_yyout_ALREADY_DEFINED
+#else
+#define yyout replication_yyout
+#endif
+
+#ifdef yy_flex_debug
+#define replication_yy_flex_debug_ALREADY_DEFINED
+#else
+#define yy_flex_debug replication_yy_flex_debug
+#endif
+
+#ifdef yylineno
+#define replication_yylineno_ALREADY_DEFINED
+#else
+#define yylineno replication_yylineno
+#endif
+
+/* First, we deal with platform-specific or compiler-specific issues. */
+
+/* begin standard C headers. */
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+
+/* end standard C headers. */
+
+/* flex integer type definitions */
+
+#ifndef FLEXINT_H
+#define FLEXINT_H
+
+/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+
+/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
+ * if you want the limit (max/min) macros for int types.
+ */
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS 1
+#endif
+
+#include <inttypes.h>
+typedef int8_t flex_int8_t;
+typedef uint8_t flex_uint8_t;
+typedef int16_t flex_int16_t;
+typedef uint16_t flex_uint16_t;
+typedef int32_t flex_int32_t;
+typedef uint32_t flex_uint32_t;
+#else
+typedef signed char flex_int8_t;
+typedef short int flex_int16_t;
+typedef int flex_int32_t;
+typedef unsigned char flex_uint8_t;
+typedef unsigned short int flex_uint16_t;
+typedef unsigned int flex_uint32_t;
+
+/* Limits of integral types. */
+#ifndef INT8_MIN
+#define INT8_MIN (-128)
+#endif
+#ifndef INT16_MIN
+#define INT16_MIN (-32767-1)
+#endif
+#ifndef INT32_MIN
+#define INT32_MIN (-2147483647-1)
+#endif
+#ifndef INT8_MAX
+#define INT8_MAX (127)
+#endif
+#ifndef INT16_MAX
+#define INT16_MAX (32767)
+#endif
+#ifndef INT32_MAX
+#define INT32_MAX (2147483647)
+#endif
+#ifndef UINT8_MAX
+#define UINT8_MAX (255U)
+#endif
+#ifndef UINT16_MAX
+#define UINT16_MAX (65535U)
+#endif
+#ifndef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#endif
+
+#ifndef SIZE_MAX
+#define SIZE_MAX (~(size_t)0)
+#endif
+
+#endif /* ! C99 */
+
+#endif /* ! FLEXINT_H */
+
+/* begin standard C++ headers. */
+
+/* TODO: this is always defined, so inline it */
+#define yyconst const
+
+#if defined(__GNUC__) && __GNUC__ >= 3
+#define yynoreturn __attribute__((__noreturn__))
+#else
+#define yynoreturn
+#endif
+
+/* Returned upon end-of-file. */
+#define YY_NULL 0
+
+/* Promotes a possibly negative, possibly signed char to an
+ * integer in range [0..255] for use as an array index.
+ */
+#define YY_SC_TO_UI(c) ((YY_CHAR) (c))
+
+/* Enter a start condition. This macro really ought to take a parameter,
+ * but we do it the disgusting crufty way forced on us by the ()-less
+ * definition of BEGIN.
+ */
+#define BEGIN (yy_start) = 1 + 2 *
+/* Translate the current start state into a value that can be later handed
+ * to BEGIN to return to the state. The YYSTATE alias is for lex
+ * compatibility.
+ */
+#define YY_START (((yy_start) - 1) / 2)
+#define YYSTATE YY_START
+/* Action number for EOF rule of a given start state. */
+#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
+/* Special action meaning "start processing a new file". */
+#define YY_NEW_FILE yyrestart( yyin )
+#define YY_END_OF_BUFFER_CHAR 0
+
+/* Size of default input buffer. */
+#ifndef YY_BUF_SIZE
+#ifdef __ia64__
+/* On IA-64, the buffer size is 16k, not 8k.
+ * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
+ * Ditto for the __ia64__ case accordingly.
+ */
+#define YY_BUF_SIZE 32768
+#else
+#define YY_BUF_SIZE 16384
+#endif /* __ia64__ */
+#endif
+
+/* The state buf must be large enough to hold one state per character in the main buffer.
+ */
+#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))
+
+#ifndef YY_TYPEDEF_YY_BUFFER_STATE
+#define YY_TYPEDEF_YY_BUFFER_STATE
+typedef struct yy_buffer_state *YY_BUFFER_STATE;
+#endif
+
+#ifndef YY_TYPEDEF_YY_SIZE_T
+#define YY_TYPEDEF_YY_SIZE_T
+typedef size_t yy_size_t;
+#endif
+
+extern int yyleng;
+
+extern FILE *yyin, *yyout;
+
+#define EOB_ACT_CONTINUE_SCAN 0
+#define EOB_ACT_END_OF_FILE 1
+#define EOB_ACT_LAST_MATCH 2
+
+ #define YY_LESS_LINENO(n)
+ #define YY_LINENO_REWIND_TO(ptr)
+
+/* Return all but the first "n" matched characters back to the input stream. */
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up yytext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ *yy_cp = (yy_hold_char); \
+ YY_RESTORE_YY_MORE_OFFSET \
+ (yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \
+ YY_DO_BEFORE_ACTION; /* set up yytext again */ \
+ } \
+ while ( 0 )
+#define unput(c) yyunput( c, (yytext_ptr) )
+
+#ifndef YY_STRUCT_YY_BUFFER_STATE
+#define YY_STRUCT_YY_BUFFER_STATE
+struct yy_buffer_state
+ {
+ FILE *yy_input_file;
+
+ char *yy_ch_buf; /* input buffer */
+ char *yy_buf_pos; /* current position in input buffer */
+
+ /* Size of input buffer in bytes, not including room for EOB
+ * characters.
+ */
+ int yy_buf_size;
+
+ /* Number of characters read into yy_ch_buf, not including EOB
+ * characters.
+ */
+ int yy_n_chars;
+
+ /* Whether we "own" the buffer - i.e., we know we created it,
+ * and can realloc() it to grow it, and should free() it to
+ * delete it.
+ */
+ int yy_is_our_buffer;
+
+ /* Whether this is an "interactive" input source; if so, and
+ * if we're using stdio for input, then we want to use getc()
+ * instead of fread(), to make sure we stop fetching input after
+ * each newline.
+ */
+ int yy_is_interactive;
+
+ /* Whether we're considered to be at the beginning of a line.
+ * If so, '^' rules will be active on the next match, otherwise
+ * not.
+ */
+ int yy_at_bol;
+
+ int yy_bs_lineno; /**< The line count. */
+ int yy_bs_column; /**< The column count. */
+
+ /* Whether to try to fill the input buffer when we reach the
+ * end of it.
+ */
+ int yy_fill_buffer;
+
+ int yy_buffer_status;
+
+#define YY_BUFFER_NEW 0
+#define YY_BUFFER_NORMAL 1
+ /* When an EOF's been seen but there's still some text to process
+ * then we mark the buffer as YY_EOF_PENDING, to indicate that we
+ * shouldn't try reading from the input source any more. We might
+ * still have a bunch of tokens to match, though, because of
+ * possible backing-up.
+ *
+ * When we actually see the EOF, we change the status to "new"
+ * (via yyrestart()), so that the user can continue scanning by
+ * just pointing yyin at a new input file.
+ */
+#define YY_BUFFER_EOF_PENDING 2
+
+ };
+#endif /* !YY_STRUCT_YY_BUFFER_STATE */
+
+/* Stack of input buffers. */
+static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */
+static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */
+static YY_BUFFER_STATE * yy_buffer_stack = NULL; /**< Stack as an array. */
+
+/* We provide macros for accessing buffer states in case in the
+ * future we want to put the buffer states in a more general
+ * "scanner state".
+ *
+ * Returns the top of the stack, or NULL.
+ */
+#define YY_CURRENT_BUFFER ( (yy_buffer_stack) \
+ ? (yy_buffer_stack)[(yy_buffer_stack_top)] \
+ : NULL)
+/* Same as previous macro, but useful when we know that the buffer stack is not
+ * NULL or when we need an lvalue. For internal use only.
+ */
+#define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)]
+
+/* yy_hold_char holds the character lost when yytext is formed. */
+static char yy_hold_char;
+static int yy_n_chars; /* number of characters read into yy_ch_buf */
+int yyleng;
+
+/* Points to current character in buffer. */
+static char *yy_c_buf_p = NULL;
+static int yy_init = 0; /* whether we need to initialize */
+static int yy_start = 0; /* start state number */
+
+/* Flag which is used to allow yywrap()'s to do buffer switches
+ * instead of setting up a fresh yyin. A bit of a hack ...
+ */
+static int yy_did_buffer_switch_on_eof;
+
+void yyrestart ( FILE *input_file );
+void yy_switch_to_buffer ( YY_BUFFER_STATE new_buffer );
+YY_BUFFER_STATE yy_create_buffer ( FILE *file, int size );
+void yy_delete_buffer ( YY_BUFFER_STATE b );
+void yy_flush_buffer ( YY_BUFFER_STATE b );
+void yypush_buffer_state ( YY_BUFFER_STATE new_buffer );
+void yypop_buffer_state ( void );
+
+static void yyensure_buffer_stack ( void );
+static void yy_load_buffer_state ( void );
+static void yy_init_buffer ( YY_BUFFER_STATE b, FILE *file );
+#define YY_FLUSH_BUFFER yy_flush_buffer( YY_CURRENT_BUFFER )
+
+YY_BUFFER_STATE yy_scan_buffer ( char *base, yy_size_t size );
+YY_BUFFER_STATE yy_scan_string ( const char *yy_str );
+YY_BUFFER_STATE yy_scan_bytes ( const char *bytes, int len );
+
+void *yyalloc ( yy_size_t );
+void *yyrealloc ( void *, yy_size_t );
+void yyfree ( void * );
+
+#define yy_new_buffer yy_create_buffer
+#define yy_set_interactive(is_interactive) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){ \
+ yyensure_buffer_stack (); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ yy_create_buffer( yyin, YY_BUF_SIZE ); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \
+ }
+#define yy_set_bol(at_bol) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){\
+ yyensure_buffer_stack (); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ yy_create_buffer( yyin, YY_BUF_SIZE ); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
+ }
+#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
+
+/* Begin user sect3 */
+
+#define replication_yywrap() (/*CONSTCOND*/1)
+#define YY_SKIP_YYWRAP
+typedef flex_uint8_t YY_CHAR;
+
+FILE *yyin = NULL, *yyout = NULL;
+
+typedef int yy_state_type;
+
+extern int yylineno;
+int yylineno = 1;
+
+extern char *yytext;
+#ifdef yytext_ptr
+#undef yytext_ptr
+#endif
+#define yytext_ptr yytext
+
+static yy_state_type yy_get_previous_state ( void );
+static yy_state_type yy_try_NUL_trans ( yy_state_type current_state );
+static int yy_get_next_buffer ( void );
+static void yynoreturn yy_fatal_error ( const char* msg );
+
+/* Done after the current pattern has been matched and before the
+ * corresponding action - sets up yytext.
+ */
+#define YY_DO_BEFORE_ACTION \
+ (yytext_ptr) = yy_bp; \
+ yyleng = (int) (yy_cp - yy_bp); \
+ (yy_hold_char) = *yy_cp; \
+ *yy_cp = '\0'; \
+ (yy_c_buf_p) = yy_cp;
+#define YY_NUM_RULES 46
+#define YY_END_OF_BUFFER 47
+/* This struct is not used in this scanner,
+ but its presence is necessary. */
+struct yy_trans_info
+ {
+ flex_int32_t yy_verify;
+ flex_int32_t yy_nxt;
+ };
+static const flex_int16_t yy_accept[299] =
+ { 0,
+ 0, 0, 0, 0, 0, 0, 47, 45, 33, 32,
+ 34, 41, 37, 30, 31, 28, 35, 29, 44, 44,
+ 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
+ 44, 44, 44, 44, 44, 40, 38, 43, 42, 0,
+ 35, 0, 44, 44, 44, 44, 44, 44, 44, 44,
+ 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
+ 44, 44, 44, 44, 44, 40, 39, 43, 36, 44,
+ 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
+ 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
+ 44, 44, 44, 9, 44, 44, 44, 44, 2, 44,
+
+ 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
+ 4, 20, 44, 44, 44, 44, 44, 25, 44, 44,
+ 44, 44, 44, 5, 44, 44, 44, 44, 44, 44,
+ 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
+ 44, 44, 44, 44, 44, 44, 44, 44, 6, 44,
+ 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
+ 44, 44, 19, 44, 44, 44, 44, 44, 44, 44,
+ 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
+ 26, 8, 44, 44, 17, 7, 44, 44, 44, 44,
+ 12, 44, 44, 44, 44, 44, 44, 44, 44, 44,
+
+ 44, 44, 44, 21, 44, 44, 44, 44, 44, 44,
+ 44, 44, 44, 44, 44, 44, 44, 44, 44, 1,
+ 44, 44, 44, 44, 44, 44, 44, 18, 44, 44,
+ 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
+ 44, 44, 24, 44, 44, 44, 44, 44, 44, 44,
+ 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
+ 44, 10, 44, 44, 44, 22, 3, 44, 44, 44,
+ 44, 44, 44, 44, 44, 44, 44, 44, 16, 44,
+ 44, 44, 23, 44, 13, 44, 44, 27, 11, 44,
+ 44, 44, 44, 44, 15, 44, 14, 0
+
+ } ;
+
+static const YY_CHAR yy_ec[256] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 4, 1, 5, 1, 6, 1, 1, 7, 8,
+ 9, 1, 1, 10, 1, 1, 11, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 1, 13, 1,
+ 1, 1, 1, 1, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 23, 30, 31, 32, 33, 34, 35, 36, 37, 23,
+ 1, 1, 1, 1, 38, 1, 23, 23, 23, 23,
+
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 1, 1, 1, 1, 1, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39
+ } ;
+
+static const YY_CHAR yy_meta[40] =
+ { 0,
+ 1, 1, 1, 1, 2, 3, 4, 1, 1, 1,
+ 5, 6, 1, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 3, 3
+ } ;
+
+static const flex_int16_t yy_base[307] =
+ { 0,
+ 0, 0, 596, 595, 596, 595, 599, 604, 604, 604,
+ 604, 604, 604, 604, 604, 604, 29, 604, 36, 38,
+ 40, 42, 48, 44, 50, 84, 29, 57, 78, 79,
+ 88, 82, 87, 77, 0, 0, 591, 0, 604, 0,
+ 51, 586, 0, 89, 97, 112, 105, 107, 110, 125,
+ 117, 128, 123, 140, 132, 133, 135, 143, 148, 150,
+ 158, 151, 156, 167, 166, 0, 604, 0, 0, 169,
+ 175, 176, 178, 179, 182, 184, 186, 199, 187, 202,
+ 204, 203, 205, 206, 221, 209, 214, 215, 226, 227,
+ 234, 233, 236, 239, 244, 245, 246, 251, 252, 255,
+
+ 261, 262, 274, 566, 271, 277, 281, 282, 278, 283,
+ 284, 287, 300, 302, 303, 308, 564, 307, 579, 309,
+ 563, 310, 312, 311, 315, 327, 578, 330, 332, 333,
+ 334, 335, 338, 342, 343, 345, 354, 564, 576, 350,
+ 571, 353, 365, 369, 371, 556, 373, 376, 372, 378,
+ 382, 391, 557, 393, 384, 397, 572, 569, 554, 554,
+ 551, 400, 401, 402, 563, 403, 404, 419, 412, 422,
+ 562, 424, 425, 428, 550, 554, 559, 551, 548, 431,
+ 434, 0, 435, 437, 440, 443, 539, 544, 450, 453,
+ 456, 541, 538, 541, 547, 554, 536, 550, 534, 548,
+
+ 549, 537, 465, 460, 540, 539, 530, 533, 541, 527,
+ 518, 533, 526, 531, 526, 528, 463, 527, 520, 0,
+ 525, 532, 514, 513, 525, 528, 523, 0, 524, 513,
+ 507, 505, 520, 499, 506, 490, 505, 488, 484, 485,
+ 484, 465, 0, 482, 473, 464, 471, 463, 455, 461,
+ 450, 451, 448, 429, 430, 395, 397, 389, 387, 358,
+ 355, 0, 339, 329, 292, 0, 0, 285, 266, 237,
+ 235, 220, 196, 161, 141, 123, 126, 117, 0, 107,
+ 98, 96, 0, 83, 0, 56, 48, 0, 0, 38,
+ 40, 41, 33, 36, 0, 28, 0, 604, 501, 507,
+
+ 509, 513, 517, 523, 529, 39
+ } ;
+
+static const flex_int16_t yy_def[307] =
+ { 0,
+ 298, 1, 299, 299, 300, 300, 298, 298, 298, 298,
+ 298, 298, 298, 298, 298, 298, 301, 298, 302, 302,
+ 302, 302, 302, 302, 302, 302, 26, 26, 26, 26,
+ 26, 26, 26, 26, 303, 304, 298, 305, 298, 306,
+ 301, 301, 303, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 304, 298, 305, 306, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+
+ 26, 26, 26, 303, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 303, 26, 303, 26,
+ 303, 26, 26, 26, 26, 26, 303, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 303, 303, 26,
+ 303, 26, 26, 26, 26, 303, 26, 26, 26, 26,
+ 26, 26, 303, 26, 26, 26, 303, 303, 303, 303,
+ 303, 26, 26, 26, 303, 26, 26, 26, 26, 26,
+ 303, 26, 26, 26, 303, 303, 303, 303, 303, 26,
+ 26, 303, 26, 26, 26, 26, 303, 303, 26, 26,
+ 26, 303, 303, 303, 303, 303, 303, 303, 303, 303,
+
+ 303, 303, 26, 26, 303, 303, 303, 303, 303, 303,
+ 303, 303, 303, 303, 303, 303, 26, 303, 303, 303,
+ 303, 303, 303, 303, 303, 303, 303, 303, 303, 303,
+ 303, 303, 303, 303, 303, 303, 303, 303, 303, 303,
+ 303, 303, 303, 303, 303, 303, 303, 303, 303, 303,
+ 303, 303, 303, 303, 303, 303, 303, 303, 303, 303,
+ 303, 303, 303, 303, 303, 303, 303, 303, 303, 303,
+ 303, 303, 303, 303, 303, 303, 303, 303, 303, 303,
+ 303, 303, 303, 303, 303, 303, 303, 303, 303, 303,
+ 303, 303, 303, 303, 303, 303, 303, 0, 298, 298,
+
+ 298, 298, 298, 298, 298, 298
+ } ;
+
+static const flex_int16_t yy_nxt[644] =
+ { 0,
+ 8, 9, 10, 11, 12, 8, 13, 14, 15, 16,
+ 8, 17, 18, 19, 20, 21, 22, 23, 24, 19,
+ 19, 25, 19, 19, 26, 27, 28, 19, 29, 30,
+ 31, 32, 33, 19, 34, 19, 19, 35, 35, 40,
+ 41, 43, 53, 43, 69, 43, 40, 43, 40, 43,
+ 40, 45, 40, 43, 40, 43, 44, 49, 40, 297,
+ 40, 40, 41, 296, 295, 294, 50, 293, 292, 46,
+ 44, 47, 291, 43, 43, 43, 43, 43, 43, 43,
+ 43, 43, 43, 48, 54, 43, 43, 43, 43, 43,
+ 65, 44, 44, 290, 40, 61, 57, 51, 55, 62,
+
+ 44, 44, 44, 63, 44, 44, 44, 56, 58, 44,
+ 44, 52, 59, 289, 44, 44, 44, 64, 44, 60,
+ 44, 43, 43, 44, 44, 44, 288, 70, 287, 71,
+ 44, 76, 72, 286, 44, 73, 44, 44, 44, 44,
+ 74, 44, 75, 285, 44, 44, 44, 77, 44, 78,
+ 44, 284, 44, 44, 283, 44, 44, 80, 79, 44,
+ 84, 44, 44, 88, 44, 85, 282, 44, 83, 44,
+ 86, 44, 89, 81, 82, 87, 90, 44, 44, 44,
+ 44, 91, 44, 44, 92, 44, 95, 93, 96, 44,
+ 94, 44, 44, 44, 44, 44, 44, 44, 281, 44,
+
+ 44, 101, 44, 44, 97, 98, 44, 102, 100, 44,
+ 99, 44, 44, 44, 44, 44, 107, 44, 44, 44,
+ 103, 106, 44, 280, 104, 109, 44, 44, 44, 44,
+ 44, 44, 44, 44, 44, 108, 44, 105, 110, 44,
+ 44, 44, 44, 111, 113, 112, 44, 44, 44, 44,
+ 114, 116, 44, 44, 44, 115, 279, 44, 44, 44,
+ 44, 44, 278, 44, 44, 44, 44, 118, 44, 277,
+ 117, 44, 44, 44, 44, 44, 120, 125, 44, 44,
+ 122, 119, 44, 121, 44, 124, 123, 44, 44, 44,
+ 44, 44, 126, 276, 44, 44, 44, 44, 44, 128,
+
+ 44, 44, 130, 131, 44, 44, 129, 132, 44, 44,
+ 44, 44, 133, 44, 44, 44, 44, 275, 274, 135,
+ 44, 44, 44, 44, 44, 44, 140, 44, 144, 44,
+ 136, 134, 137, 143, 44, 44, 44, 44, 44, 44,
+ 44, 142, 44, 44, 145, 44, 44, 44, 44, 150,
+ 273, 44, 151, 148, 44, 44, 44, 147, 44, 44,
+ 44, 44, 44, 44, 149, 44, 44, 44, 272, 44,
+ 44, 152, 44, 154, 155, 156, 271, 44, 44, 153,
+ 44, 44, 44, 162, 44, 44, 44, 159, 270, 44,
+ 161, 168, 44, 163, 167, 44, 44, 173, 44, 44,
+
+ 44, 164, 166, 44, 44, 44, 44, 269, 170, 44,
+ 44, 44, 169, 44, 44, 44, 44, 44, 44, 268,
+ 44, 172, 267, 174, 44, 44, 266, 44, 44, 44,
+ 44, 44, 44, 181, 183, 44, 180, 189, 44, 44,
+ 184, 44, 186, 185, 44, 191, 44, 44, 44, 44,
+ 44, 44, 44, 44, 190, 44, 44, 265, 44, 187,
+ 264, 44, 44, 44, 44, 203, 44, 44, 197, 44,
+ 44, 198, 199, 44, 200, 263, 44, 44, 44, 262,
+ 44, 261, 217, 44, 260, 259, 258, 44, 257, 204,
+ 44, 256, 44, 205, 255, 254, 253, 252, 251, 250,
+
+ 230, 36, 36, 36, 36, 36, 36, 38, 38, 38,
+ 38, 38, 38, 42, 42, 44, 249, 44, 44, 43,
+ 248, 247, 43, 66, 66, 66, 246, 66, 66, 68,
+ 245, 68, 68, 68, 68, 244, 243, 242, 241, 240,
+ 239, 238, 237, 236, 235, 234, 233, 232, 231, 229,
+ 228, 227, 226, 225, 224, 223, 222, 221, 220, 219,
+ 218, 216, 215, 214, 213, 212, 211, 210, 209, 208,
+ 207, 206, 202, 201, 196, 195, 194, 193, 192, 188,
+ 182, 179, 178, 177, 176, 175, 171, 165, 160, 158,
+ 157, 146, 141, 139, 138, 127, 40, 67, 298, 39,
+
+ 39, 37, 37, 7, 298, 298, 298, 298, 298, 298,
+ 298, 298, 298, 298, 298, 298, 298, 298, 298, 298,
+ 298, 298, 298, 298, 298, 298, 298, 298, 298, 298,
+ 298, 298, 298, 298, 298, 298, 298, 298, 298, 298,
+ 298, 298, 298
+ } ;
+
+static const flex_int16_t yy_chk[644] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 17,
+ 17, 19, 27, 20, 306, 21, 19, 22, 20, 24,
+ 21, 20, 22, 23, 24, 25, 27, 24, 23, 296,
+ 25, 41, 41, 294, 293, 292, 25, 291, 290, 21,
+ 28, 22, 287, 19, 19, 20, 20, 21, 21, 22,
+ 22, 24, 24, 23, 28, 23, 23, 25, 25, 26,
+ 34, 29, 30, 286, 26, 32, 30, 26, 29, 32,
+
+ 33, 31, 44, 32, 34, 29, 30, 29, 31, 32,
+ 45, 26, 31, 284, 33, 31, 44, 33, 47, 31,
+ 48, 26, 26, 49, 45, 46, 282, 45, 281, 46,
+ 51, 51, 47, 280, 48, 48, 53, 49, 50, 46,
+ 49, 52, 50, 278, 51, 55, 56, 52, 57, 53,
+ 53, 277, 50, 54, 276, 52, 58, 54, 53, 55,
+ 56, 59, 57, 60, 62, 57, 275, 54, 55, 63,
+ 58, 61, 61, 54, 54, 59, 62, 60, 62, 65,
+ 64, 63, 70, 63, 64, 61, 70, 65, 71, 72,
+ 65, 73, 74, 65, 64, 75, 70, 76, 274, 77,
+
+ 79, 76, 71, 72, 72, 73, 74, 77, 75, 75,
+ 74, 76, 78, 77, 79, 80, 82, 81, 83, 84,
+ 78, 81, 86, 273, 79, 84, 78, 87, 88, 80,
+ 82, 81, 83, 84, 85, 83, 86, 80, 85, 89,
+ 90, 87, 88, 86, 88, 87, 92, 91, 85, 93,
+ 89, 91, 94, 89, 90, 90, 272, 95, 96, 97,
+ 92, 91, 271, 93, 98, 99, 94, 93, 100, 270,
+ 92, 95, 96, 97, 101, 102, 96, 102, 98, 99,
+ 98, 95, 100, 97, 105, 101, 100, 103, 101, 102,
+ 106, 109, 103, 269, 107, 108, 110, 111, 105, 105,
+
+ 112, 103, 107, 108, 106, 109, 106, 109, 107, 108,
+ 110, 111, 110, 113, 112, 114, 115, 268, 265, 114,
+ 118, 116, 120, 122, 124, 123, 120, 113, 125, 114,
+ 115, 113, 116, 123, 118, 116, 120, 122, 124, 123,
+ 126, 122, 125, 128, 126, 129, 130, 131, 132, 131,
+ 264, 133, 132, 129, 126, 134, 135, 128, 136, 129,
+ 130, 131, 132, 140, 130, 133, 142, 137, 263, 134,
+ 135, 133, 136, 135, 136, 137, 261, 140, 143, 134,
+ 142, 137, 144, 143, 145, 149, 147, 140, 260, 148,
+ 142, 150, 143, 144, 148, 151, 144, 155, 145, 149,
+
+ 147, 145, 147, 148, 152, 150, 154, 259, 152, 151,
+ 156, 155, 151, 162, 163, 164, 166, 167, 152, 258,
+ 154, 154, 257, 156, 156, 169, 256, 162, 163, 164,
+ 166, 167, 168, 164, 166, 170, 162, 172, 173, 169,
+ 167, 174, 169, 168, 180, 174, 168, 181, 183, 170,
+ 184, 172, 173, 185, 173, 174, 186, 255, 180, 170,
+ 254, 181, 183, 189, 184, 189, 190, 185, 180, 191,
+ 186, 181, 183, 204, 184, 253, 217, 189, 203, 252,
+ 190, 251, 203, 191, 250, 249, 248, 204, 247, 190,
+ 217, 246, 203, 191, 245, 244, 242, 241, 240, 239,
+
+ 217, 299, 299, 299, 299, 299, 299, 300, 300, 300,
+ 300, 300, 300, 301, 301, 302, 238, 302, 302, 303,
+ 237, 236, 303, 304, 304, 304, 235, 304, 304, 305,
+ 234, 305, 305, 305, 305, 233, 232, 231, 230, 229,
+ 227, 226, 225, 224, 223, 222, 221, 219, 218, 216,
+ 215, 214, 213, 212, 211, 210, 209, 208, 207, 206,
+ 205, 202, 201, 200, 199, 198, 197, 196, 195, 194,
+ 193, 192, 188, 187, 179, 178, 177, 176, 175, 171,
+ 165, 161, 160, 159, 158, 157, 153, 146, 141, 139,
+ 138, 127, 121, 119, 117, 104, 42, 37, 7, 6,
+
+ 5, 4, 3, 298, 298, 298, 298, 298, 298, 298,
+ 298, 298, 298, 298, 298, 298, 298, 298, 298, 298,
+ 298, 298, 298, 298, 298, 298, 298, 298, 298, 298,
+ 298, 298, 298, 298, 298, 298, 298, 298, 298, 298,
+ 298, 298, 298
+ } ;
+
+static yy_state_type yy_last_accepting_state;
+static char *yy_last_accepting_cpos;
+
+extern int yy_flex_debug;
+int yy_flex_debug = 0;
+
+/* The intent behind this definition is that it'll catch
+ * any uses of REJECT which flex missed.
+ */
+#define REJECT reject_used_but_not_detected
+#define yymore() yymore_used_but_not_detected
+#define YY_MORE_ADJ 0
+#define YY_RESTORE_YY_MORE_OFFSET
+char *yytext;
+#line 1 "repl_scanner.l"
+#line 2 "repl_scanner.l"
+/*-------------------------------------------------------------------------
+ *
+ * repl_scanner.l
+ * a lexical scanner for the replication commands
+ *
+ * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/replication/repl_scanner.l
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "utils/builtins.h"
+#include "parser/scansup.h"
+
+/* Avoid exit() on fatal scanner errors (a bit ugly -- see yy_fatal_error) */
+#undef fprintf
+#define fprintf(file, fmt, msg) fprintf_to_ereport(fmt, msg)
+
+static void
+fprintf_to_ereport(const char *fmt, const char *msg)
+{
+ ereport(ERROR, (errmsg_internal("%s", msg)));
+}
+
+/* Handle to the buffer that the lexer uses internally */
+static YY_BUFFER_STATE scanbufhandle;
+
+static StringInfoData litbuf;
+
+static void startlit(void);
+static char *litbufdup(void);
+static void addlit(char *ytext, int yleng);
+static void addlitchar(unsigned char ychar);
+
+/* LCOV_EXCL_START */
+
+#line 987 "repl_scanner.c"
+#define YY_NO_INPUT 1
+
+/* Extended quote
+ * xqdouble implements embedded quote, ''''
+ */
+/* Double quote
+ * Allows embedded spaces and other special characters into identifiers.
+ */
+#line 996 "repl_scanner.c"
+
+#define INITIAL 0
+#define xq 1
+#define xd 2
+
+#ifndef YY_NO_UNISTD_H
+/* Special case for "unistd.h", since it is non-ANSI. We include it way
+ * down here because we want the user's section 1 to have been scanned first.
+ * The user has a chance to override it with an option.
+ */
+#include <unistd.h>
+#endif
+
+#ifndef YY_EXTRA_TYPE
+#define YY_EXTRA_TYPE void *
+#endif
+
+static int yy_init_globals ( void );
+
+/* Accessor methods to globals.
+ These are made visible to non-reentrant scanners for convenience. */
+
+int yylex_destroy ( void );
+
+int yyget_debug ( void );
+
+void yyset_debug ( int debug_flag );
+
+YY_EXTRA_TYPE yyget_extra ( void );
+
+void yyset_extra ( YY_EXTRA_TYPE user_defined );
+
+FILE *yyget_in ( void );
+
+void yyset_in ( FILE * _in_str );
+
+FILE *yyget_out ( void );
+
+void yyset_out ( FILE * _out_str );
+
+ int yyget_leng ( void );
+
+char *yyget_text ( void );
+
+int yyget_lineno ( void );
+
+void yyset_lineno ( int _line_number );
+
+/* Macros after this point can all be overridden by user definitions in
+ * section 1.
+ */
+
+#ifndef YY_SKIP_YYWRAP
+#ifdef __cplusplus
+extern "C" int yywrap ( void );
+#else
+extern int yywrap ( void );
+#endif
+#endif
+
+#ifndef YY_NO_UNPUT
+
+#endif
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy ( char *, const char *, int );
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen ( const char * );
+#endif
+
+#ifndef YY_NO_INPUT
+#ifdef __cplusplus
+static int yyinput ( void );
+#else
+static int input ( void );
+#endif
+
+#endif
+
+/* Amount of stuff to slurp up with each read. */
+#ifndef YY_READ_BUF_SIZE
+#ifdef __ia64__
+/* On IA-64, the buffer size is 16k, not 8k */
+#define YY_READ_BUF_SIZE 16384
+#else
+#define YY_READ_BUF_SIZE 8192
+#endif /* __ia64__ */
+#endif
+
+/* Copy whatever the last rule matched to the standard output. */
+#ifndef ECHO
+/* This used to be an fputs(), but since the string might contain NUL's,
+ * we now use fwrite().
+ */
+#define ECHO do { if (fwrite( yytext, (size_t) yyleng, 1, yyout )) {} } while (0)
+#endif
+
+/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
+ * is returned in "result".
+ */
+#ifndef YY_INPUT
+#define YY_INPUT(buf,result,max_size) \
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
+ { \
+ int c = '*'; \
+ int n; \
+ for ( n = 0; n < max_size && \
+ (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
+ buf[n] = (char) c; \
+ if ( c == '\n' ) \
+ buf[n++] = (char) c; \
+ if ( c == EOF && ferror( yyin ) ) \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ result = n; \
+ } \
+ else \
+ { \
+ errno=0; \
+ while ( (result = (int) fread(buf, 1, (yy_size_t) max_size, yyin)) == 0 && ferror(yyin)) \
+ { \
+ if( errno != EINTR) \
+ { \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ break; \
+ } \
+ errno=0; \
+ clearerr(yyin); \
+ } \
+ }\
+\
+
+#endif
+
+/* No semi-colon after return; correct usage is to write "yyterminate();" -
+ * we don't want an extra ';' after the "return" because that will cause
+ * some compilers to complain about unreachable statements.
+ */
+#ifndef yyterminate
+#define yyterminate() return YY_NULL
+#endif
+
+/* Number of entries by which start-condition stack grows. */
+#ifndef YY_START_STACK_INCR
+#define YY_START_STACK_INCR 25
+#endif
+
+/* Report a fatal error. */
+#ifndef YY_FATAL_ERROR
+#define YY_FATAL_ERROR(msg) yy_fatal_error( msg )
+#endif
+
+/* end tables serialization structures and prototypes */
+
+/* Default declaration of generated scanner - a define so the user can
+ * easily add parameters.
+ */
+#ifndef YY_DECL
+#define YY_DECL_IS_OURS 1
+
+extern int yylex (void);
+
+#define YY_DECL int yylex (void)
+#endif /* !YY_DECL */
+
+/* Code executed at the beginning of each rule, after yytext and yyleng
+ * have been set up.
+ */
+#ifndef YY_USER_ACTION
+#define YY_USER_ACTION
+#endif
+
+/* Code executed at the end of each rule. */
+#ifndef YY_BREAK
+#define YY_BREAK /*LINTED*/break;
+#endif
+
+#define YY_RULE_SETUP \
+ YY_USER_ACTION
+
+/** The main scanner function which does all the work.
+ */
+YY_DECL
+{
+ yy_state_type yy_current_state;
+ char *yy_cp, *yy_bp;
+ int yy_act;
+
+ if ( !(yy_init) )
+ {
+ (yy_init) = 1;
+
+#ifdef YY_USER_INIT
+ YY_USER_INIT;
+#endif
+
+ if ( ! (yy_start) )
+ (yy_start) = 1; /* first start state */
+
+ if ( ! yyin )
+ yyin = stdin;
+
+ if ( ! yyout )
+ yyout = stdout;
+
+ if ( ! YY_CURRENT_BUFFER ) {
+ yyensure_buffer_stack ();
+ YY_CURRENT_BUFFER_LVALUE =
+ yy_create_buffer( yyin, YY_BUF_SIZE );
+ }
+
+ yy_load_buffer_state( );
+ }
+
+ {
+#line 83 "repl_scanner.l"
+
+
+#line 1216 "repl_scanner.c"
+
+ while ( /*CONSTCOND*/1 ) /* loops until end-of-file is reached */
+ {
+ yy_cp = (yy_c_buf_p);
+
+ /* Support of yytext. */
+ *yy_cp = (yy_hold_char);
+
+ /* yy_bp points to the position in yy_ch_buf of the start of
+ * the current run.
+ */
+ yy_bp = yy_cp;
+
+ yy_current_state = (yy_start);
+yy_match:
+ do
+ {
+ YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)] ;
+ if ( yy_accept[yy_current_state] )
+ {
+ (yy_last_accepting_state) = yy_current_state;
+ (yy_last_accepting_cpos) = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 299 )
+ yy_c = yy_meta[yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c];
+ ++yy_cp;
+ }
+ while ( yy_current_state != 298 );
+ yy_cp = (yy_last_accepting_cpos);
+ yy_current_state = (yy_last_accepting_state);
+
+yy_find_action:
+ yy_act = yy_accept[yy_current_state];
+
+ YY_DO_BEFORE_ACTION;
+
+do_action: /* This label is used only to access EOF actions. */
+
+ switch ( yy_act )
+ { /* beginning of action switch */
+ case 0: /* must back up */
+ /* undo the effects of YY_DO_BEFORE_ACTION */
+ *yy_cp = (yy_hold_char);
+ yy_cp = (yy_last_accepting_cpos);
+ yy_current_state = (yy_last_accepting_state);
+ goto yy_find_action;
+
+case 1:
+YY_RULE_SETUP
+#line 85 "repl_scanner.l"
+{ return K_BASE_BACKUP; }
+ YY_BREAK
+case 2:
+YY_RULE_SETUP
+#line 86 "repl_scanner.l"
+{ return K_FAST; }
+ YY_BREAK
+case 3:
+YY_RULE_SETUP
+#line 87 "repl_scanner.l"
+{ return K_IDENTIFY_SYSTEM; }
+ YY_BREAK
+case 4:
+YY_RULE_SETUP
+#line 88 "repl_scanner.l"
+{ return K_SHOW; }
+ YY_BREAK
+case 5:
+YY_RULE_SETUP
+#line 89 "repl_scanner.l"
+{ return K_LABEL; }
+ YY_BREAK
+case 6:
+YY_RULE_SETUP
+#line 90 "repl_scanner.l"
+{ return K_NOWAIT; }
+ YY_BREAK
+case 7:
+YY_RULE_SETUP
+#line 91 "repl_scanner.l"
+{ return K_PROGRESS; }
+ YY_BREAK
+case 8:
+YY_RULE_SETUP
+#line 92 "repl_scanner.l"
+{ return K_MAX_RATE; }
+ YY_BREAK
+case 9:
+YY_RULE_SETUP
+#line 93 "repl_scanner.l"
+{ return K_WAL; }
+ YY_BREAK
+case 10:
+YY_RULE_SETUP
+#line 94 "repl_scanner.l"
+{ return K_TABLESPACE_MAP; }
+ YY_BREAK
+case 11:
+YY_RULE_SETUP
+#line 95 "repl_scanner.l"
+{ return K_NOVERIFY_CHECKSUMS; }
+ YY_BREAK
+case 12:
+YY_RULE_SETUP
+#line 96 "repl_scanner.l"
+{ return K_TIMELINE; }
+ YY_BREAK
+case 13:
+YY_RULE_SETUP
+#line 97 "repl_scanner.l"
+{ return K_START_REPLICATION; }
+ YY_BREAK
+case 14:
+YY_RULE_SETUP
+#line 98 "repl_scanner.l"
+{ return K_CREATE_REPLICATION_SLOT; }
+ YY_BREAK
+case 15:
+YY_RULE_SETUP
+#line 99 "repl_scanner.l"
+{ return K_DROP_REPLICATION_SLOT; }
+ YY_BREAK
+case 16:
+YY_RULE_SETUP
+#line 100 "repl_scanner.l"
+{ return K_TIMELINE_HISTORY; }
+ YY_BREAK
+case 17:
+YY_RULE_SETUP
+#line 101 "repl_scanner.l"
+{ return K_PHYSICAL; }
+ YY_BREAK
+case 18:
+YY_RULE_SETUP
+#line 102 "repl_scanner.l"
+{ return K_RESERVE_WAL; }
+ YY_BREAK
+case 19:
+YY_RULE_SETUP
+#line 103 "repl_scanner.l"
+{ return K_LOGICAL; }
+ YY_BREAK
+case 20:
+YY_RULE_SETUP
+#line 104 "repl_scanner.l"
+{ return K_SLOT; }
+ YY_BREAK
+case 21:
+YY_RULE_SETUP
+#line 105 "repl_scanner.l"
+{ return K_TEMPORARY; }
+ YY_BREAK
+case 22:
+YY_RULE_SETUP
+#line 106 "repl_scanner.l"
+{ return K_EXPORT_SNAPSHOT; }
+ YY_BREAK
+case 23:
+YY_RULE_SETUP
+#line 107 "repl_scanner.l"
+{ return K_NOEXPORT_SNAPSHOT; }
+ YY_BREAK
+case 24:
+YY_RULE_SETUP
+#line 108 "repl_scanner.l"
+{ return K_USE_SNAPSHOT; }
+ YY_BREAK
+case 25:
+YY_RULE_SETUP
+#line 109 "repl_scanner.l"
+{ return K_WAIT; }
+ YY_BREAK
+case 26:
+YY_RULE_SETUP
+#line 110 "repl_scanner.l"
+{ return K_MANIFEST; }
+ YY_BREAK
+case 27:
+YY_RULE_SETUP
+#line 111 "repl_scanner.l"
+{ return K_MANIFEST_CHECKSUMS; }
+ YY_BREAK
+case 28:
+YY_RULE_SETUP
+#line 113 "repl_scanner.l"
+{ return ','; }
+ YY_BREAK
+case 29:
+YY_RULE_SETUP
+#line 114 "repl_scanner.l"
+{ return ';'; }
+ YY_BREAK
+case 30:
+YY_RULE_SETUP
+#line 115 "repl_scanner.l"
+{ return '('; }
+ YY_BREAK
+case 31:
+YY_RULE_SETUP
+#line 116 "repl_scanner.l"
+{ return ')'; }
+ YY_BREAK
+case 32:
+/* rule 32 can match eol */
+YY_RULE_SETUP
+#line 118 "repl_scanner.l"
+;
+ YY_BREAK
+case 33:
+YY_RULE_SETUP
+#line 119 "repl_scanner.l"
+;
+ YY_BREAK
+case 34:
+YY_RULE_SETUP
+#line 120 "repl_scanner.l"
+;
+ YY_BREAK
+case 35:
+YY_RULE_SETUP
+#line 122 "repl_scanner.l"
+{
+ yylval.uintval = strtoul(yytext, NULL, 10);
+ return UCONST;
+ }
+ YY_BREAK
+case 36:
+YY_RULE_SETUP
+#line 127 "repl_scanner.l"
+{
+ uint32 hi,
+ lo;
+ if (sscanf(yytext, "%X/%X", &hi, &lo) != 2)
+ yyerror("invalid streaming start location");
+ yylval.recptr = ((uint64) hi) << 32 | lo;
+ return RECPTR;
+ }
+ YY_BREAK
+case 37:
+YY_RULE_SETUP
+#line 136 "repl_scanner.l"
+{
+ BEGIN(xq);
+ startlit();
+ }
+ YY_BREAK
+case 38:
+YY_RULE_SETUP
+#line 141 "repl_scanner.l"
+{
+ yyless(1);
+ BEGIN(INITIAL);
+ yylval.str = litbufdup();
+ return SCONST;
+ }
+ YY_BREAK
+case 39:
+YY_RULE_SETUP
+#line 148 "repl_scanner.l"
+{
+ addlitchar('\'');
+ }
+ YY_BREAK
+case 40:
+/* rule 40 can match eol */
+YY_RULE_SETUP
+#line 152 "repl_scanner.l"
+{
+ addlit(yytext, yyleng);
+ }
+ YY_BREAK
+case 41:
+YY_RULE_SETUP
+#line 156 "repl_scanner.l"
+{
+ BEGIN(xd);
+ startlit();
+ }
+ YY_BREAK
+case 42:
+YY_RULE_SETUP
+#line 161 "repl_scanner.l"
+{
+ int len;
+ yyless(1);
+ BEGIN(INITIAL);
+ yylval.str = litbufdup();
+ len = strlen(yylval.str);
+ truncate_identifier(yylval.str, len, true);
+ return IDENT;
+ }
+ YY_BREAK
+case 43:
+/* rule 43 can match eol */
+YY_RULE_SETUP
+#line 171 "repl_scanner.l"
+{
+ addlit(yytext, yyleng);
+ }
+ YY_BREAK
+case 44:
+YY_RULE_SETUP
+#line 175 "repl_scanner.l"
+{
+ int len = strlen(yytext);
+
+ yylval.str = downcase_truncate_identifier(yytext, len, true);
+ return IDENT;
+ }
+ YY_BREAK
+case YY_STATE_EOF(xq):
+case YY_STATE_EOF(xd):
+#line 182 "repl_scanner.l"
+{ yyerror("unterminated quoted string"); }
+ YY_BREAK
+case YY_STATE_EOF(INITIAL):
+#line 185 "repl_scanner.l"
+{
+ yyterminate();
+ }
+ YY_BREAK
+case 45:
+YY_RULE_SETUP
+#line 189 "repl_scanner.l"
+{
+ return T_WORD;
+ }
+ YY_BREAK
+case 46:
+YY_RULE_SETUP
+#line 192 "repl_scanner.l"
+YY_FATAL_ERROR( "flex scanner jammed" );
+ YY_BREAK
+#line 1555 "repl_scanner.c"
+
+ case YY_END_OF_BUFFER:
+ {
+ /* Amount of text matched not including the EOB char. */
+ int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1;
+
+ /* Undo the effects of YY_DO_BEFORE_ACTION. */
+ *yy_cp = (yy_hold_char);
+ YY_RESTORE_YY_MORE_OFFSET
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )
+ {
+ /* We're scanning a new file or input source. It's
+ * possible that this happened because the user
+ * just pointed yyin at a new source and called
+ * yylex(). If so, then we have to assure
+ * consistency between YY_CURRENT_BUFFER and our
+ * globals. Here is the right place to do so, because
+ * this is the first action (other than possibly a
+ * back-up) that will match for the new input source.
+ */
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ YY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;
+ }
+
+ /* Note that here we test for yy_c_buf_p "<=" to the position
+ * of the first EOB in the buffer, since yy_c_buf_p will
+ * already have been incremented past the NUL character
+ * (since all states make transitions on EOB to the
+ * end-of-buffer state). Contrast this with the test
+ * in input().
+ */
+ if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
+ { /* This was really a NUL. */
+ yy_state_type yy_next_state;
+
+ (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( );
+
+ /* Okay, we're now positioned to make the NUL
+ * transition. We couldn't have
+ * yy_get_previous_state() go ahead and do it
+ * for us because it doesn't know how to deal
+ * with the possibility of jamming (and we don't
+ * want to build jamming into it because then it
+ * will run more slowly).
+ */
+
+ yy_next_state = yy_try_NUL_trans( yy_current_state );
+
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+
+ if ( yy_next_state )
+ {
+ /* Consume the NUL. */
+ yy_cp = ++(yy_c_buf_p);
+ yy_current_state = yy_next_state;
+ goto yy_match;
+ }
+
+ else
+ {
+ yy_cp = (yy_last_accepting_cpos);
+ yy_current_state = (yy_last_accepting_state);
+ goto yy_find_action;
+ }
+ }
+
+ else switch ( yy_get_next_buffer( ) )
+ {
+ case EOB_ACT_END_OF_FILE:
+ {
+ (yy_did_buffer_switch_on_eof) = 0;
+
+ if ( yywrap( ) )
+ {
+ /* Note: because we've taken care in
+ * yy_get_next_buffer() to have set up
+ * yytext, we can now set up
+ * yy_c_buf_p so that if some total
+ * hoser (like flex itself) wants to
+ * call the scanner after we return the
+ * YY_NULL, it'll still work - another
+ * YY_NULL will get returned.
+ */
+ (yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ;
+
+ yy_act = YY_STATE_EOF(YY_START);
+ goto do_action;
+ }
+
+ else
+ {
+ if ( ! (yy_did_buffer_switch_on_eof) )
+ YY_NEW_FILE;
+ }
+ break;
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ (yy_c_buf_p) =
+ (yytext_ptr) + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( );
+
+ yy_cp = (yy_c_buf_p);
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+ goto yy_match;
+
+ case EOB_ACT_LAST_MATCH:
+ (yy_c_buf_p) =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)];
+
+ yy_current_state = yy_get_previous_state( );
+
+ yy_cp = (yy_c_buf_p);
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+ goto yy_find_action;
+ }
+ break;
+ }
+
+ default:
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--no action found" );
+ } /* end of action switch */
+ } /* end of scanning one token */
+ } /* end of user's declarations */
+} /* end of yylex */
+
+/* yy_get_next_buffer - try to read in a new buffer
+ *
+ * Returns a code representing an action:
+ * EOB_ACT_LAST_MATCH -
+ * EOB_ACT_CONTINUE_SCAN - continue scanning from current position
+ * EOB_ACT_END_OF_FILE - end of file
+ */
+static int yy_get_next_buffer (void)
+{
+ char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
+ char *source = (yytext_ptr);
+ int number_to_move, i;
+ int ret_val;
+
+ if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] )
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--end of buffer missed" );
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )
+ { /* Don't try to fill the buffer, so this is an EOF. */
+ if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 )
+ {
+ /* We matched a single character, the EOB, so
+ * treat this as a final EOF.
+ */
+ return EOB_ACT_END_OF_FILE;
+ }
+
+ else
+ {
+ /* We matched some text prior to the EOB, first
+ * process it.
+ */
+ return EOB_ACT_LAST_MATCH;
+ }
+ }
+
+ /* Try to read more data. */
+
+ /* First move last chars to start of buffer. */
+ number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr) - 1);
+
+ for ( i = 0; i < number_to_move; ++i )
+ *(dest++) = *(source++);
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )
+ /* don't do the read, it's not guaranteed to return an EOF,
+ * just force an EOF
+ */
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0;
+
+ else
+ {
+ int num_to_read =
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
+
+ while ( num_to_read <= 0 )
+ { /* Not enough room in the buffer - grow it. */
+
+ /* just a shorter name for the current buffer */
+ YY_BUFFER_STATE b = YY_CURRENT_BUFFER_LVALUE;
+
+ int yy_c_buf_p_offset =
+ (int) ((yy_c_buf_p) - b->yy_ch_buf);
+
+ if ( b->yy_is_our_buffer )
+ {
+ int new_size = b->yy_buf_size * 2;
+
+ if ( new_size <= 0 )
+ b->yy_buf_size += b->yy_buf_size / 8;
+ else
+ b->yy_buf_size *= 2;
+
+ b->yy_ch_buf = (char *)
+ /* Include room in for 2 EOB chars. */
+ yyrealloc( (void *) b->yy_ch_buf,
+ (yy_size_t) (b->yy_buf_size + 2) );
+ }
+ else
+ /* Can't grow it, we don't own it. */
+ b->yy_ch_buf = NULL;
+
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR(
+ "fatal error - scanner input buffer overflow" );
+
+ (yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset];
+
+ num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -
+ number_to_move - 1;
+
+ }
+
+ if ( num_to_read > YY_READ_BUF_SIZE )
+ num_to_read = YY_READ_BUF_SIZE;
+
+ /* Read in more data. */
+ YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
+ (yy_n_chars), num_to_read );
+
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ if ( (yy_n_chars) == 0 )
+ {
+ if ( number_to_move == YY_MORE_ADJ )
+ {
+ ret_val = EOB_ACT_END_OF_FILE;
+ yyrestart( yyin );
+ }
+
+ else
+ {
+ ret_val = EOB_ACT_LAST_MATCH;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status =
+ YY_BUFFER_EOF_PENDING;
+ }
+ }
+
+ else
+ ret_val = EOB_ACT_CONTINUE_SCAN;
+
+ if (((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {
+ /* Extend the array by 50%, plus the number we really need. */
+ int new_size = (yy_n_chars) + number_to_move + ((yy_n_chars) >> 1);
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) yyrealloc(
+ (void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf, (yy_size_t) new_size );
+ if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" );
+ /* "- 2" to take care of EOB's */
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size = (int) (new_size - 2);
+ }
+
+ (yy_n_chars) += number_to_move;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR;
+
+ (yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];
+
+ return ret_val;
+}
+
+/* yy_get_previous_state - get the state just before the EOB char was reached */
+
+ static yy_state_type yy_get_previous_state (void)
+{
+ yy_state_type yy_current_state;
+ char *yy_cp;
+
+ yy_current_state = (yy_start);
+
+ for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp )
+ {
+ YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1);
+ if ( yy_accept[yy_current_state] )
+ {
+ (yy_last_accepting_state) = yy_current_state;
+ (yy_last_accepting_cpos) = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 299 )
+ yy_c = yy_meta[yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c];
+ }
+
+ return yy_current_state;
+}
+
+/* yy_try_NUL_trans - try to make a transition on the NUL character
+ *
+ * synopsis
+ * next_state = yy_try_NUL_trans( current_state );
+ */
+ static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state )
+{
+ int yy_is_jam;
+ char *yy_cp = (yy_c_buf_p);
+
+ YY_CHAR yy_c = 1;
+ if ( yy_accept[yy_current_state] )
+ {
+ (yy_last_accepting_state) = yy_current_state;
+ (yy_last_accepting_cpos) = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 299 )
+ yy_c = yy_meta[yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c];
+ yy_is_jam = (yy_current_state == 298);
+
+ return yy_is_jam ? 0 : yy_current_state;
+}
+
+#ifndef YY_NO_UNPUT
+
+#endif
+
+#ifndef YY_NO_INPUT
+#ifdef __cplusplus
+ static int yyinput (void)
+#else
+ static int input (void)
+#endif
+
+{
+ int c;
+
+ *(yy_c_buf_p) = (yy_hold_char);
+
+ if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR )
+ {
+ /* yy_c_buf_p now points to the character we want to return.
+ * If this occurs *before* the EOB characters, then it's a
+ * valid NUL; if not, then we've hit the end of the buffer.
+ */
+ if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
+ /* This was really a NUL. */
+ *(yy_c_buf_p) = '\0';
+
+ else
+ { /* need more input */
+ int offset = (int) ((yy_c_buf_p) - (yytext_ptr));
+ ++(yy_c_buf_p);
+
+ switch ( yy_get_next_buffer( ) )
+ {
+ case EOB_ACT_LAST_MATCH:
+ /* This happens because yy_g_n_b()
+ * sees that we've accumulated a
+ * token and flags that we need to
+ * try matching the token before
+ * proceeding. But for input(),
+ * there's no matching to consider.
+ * So convert the EOB_ACT_LAST_MATCH
+ * to EOB_ACT_END_OF_FILE.
+ */
+
+ /* Reset buffer status. */
+ yyrestart( yyin );
+
+ /*FALLTHROUGH*/
+
+ case EOB_ACT_END_OF_FILE:
+ {
+ if ( yywrap( ) )
+ return 0;
+
+ if ( ! (yy_did_buffer_switch_on_eof) )
+ YY_NEW_FILE;
+#ifdef __cplusplus
+ return yyinput();
+#else
+ return input();
+#endif
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ (yy_c_buf_p) = (yytext_ptr) + offset;
+ break;
+ }
+ }
+ }
+
+ c = *(unsigned char *) (yy_c_buf_p); /* cast for 8-bit char's */
+ *(yy_c_buf_p) = '\0'; /* preserve yytext */
+ (yy_hold_char) = *++(yy_c_buf_p);
+
+ return c;
+}
+#endif /* ifndef YY_NO_INPUT */
+
+/** Immediately switch to a different input stream.
+ * @param input_file A readable stream.
+ *
+ * @note This function does not reset the start condition to @c INITIAL .
+ */
+ void yyrestart (FILE * input_file )
+{
+
+ if ( ! YY_CURRENT_BUFFER ){
+ yyensure_buffer_stack ();
+ YY_CURRENT_BUFFER_LVALUE =
+ yy_create_buffer( yyin, YY_BUF_SIZE );
+ }
+
+ yy_init_buffer( YY_CURRENT_BUFFER, input_file );
+ yy_load_buffer_state( );
+}
+
+/** Switch to a different input buffer.
+ * @param new_buffer The new input buffer.
+ *
+ */
+ void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer )
+{
+
+ /* TODO. We should be able to replace this entire function body
+ * with
+ * yypop_buffer_state();
+ * yypush_buffer_state(new_buffer);
+ */
+ yyensure_buffer_stack ();
+ if ( YY_CURRENT_BUFFER == new_buffer )
+ return;
+
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *(yy_c_buf_p) = (yy_hold_char);
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+ yy_load_buffer_state( );
+
+ /* We don't actually know whether we did this switch during
+ * EOF (yywrap()) processing, but the only time this flag
+ * is looked at is after yywrap() is called, so it's safe
+ * to go ahead and always set it.
+ */
+ (yy_did_buffer_switch_on_eof) = 1;
+}
+
+static void yy_load_buffer_state (void)
+{
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ (yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
+ yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
+ (yy_hold_char) = *(yy_c_buf_p);
+}
+
+/** Allocate and initialize an input buffer state.
+ * @param file A readable stream.
+ * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
+ *
+ * @return the allocated buffer state.
+ */
+ YY_BUFFER_STATE yy_create_buffer (FILE * file, int size )
+{
+ YY_BUFFER_STATE b;
+
+ b = (YY_BUFFER_STATE) yyalloc( sizeof( struct yy_buffer_state ) );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
+
+ b->yy_buf_size = size;
+
+ /* yy_ch_buf has to be 2 characters longer than the size given because
+ * we need to put in 2 end-of-buffer characters.
+ */
+ b->yy_ch_buf = (char *) yyalloc( (yy_size_t) (b->yy_buf_size + 2) );
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
+
+ b->yy_is_our_buffer = 1;
+
+ yy_init_buffer( b, file );
+
+ return b;
+}
+
+/** Destroy the buffer.
+ * @param b a buffer created with yy_create_buffer()
+ *
+ */
+ void yy_delete_buffer (YY_BUFFER_STATE b )
+{
+
+ if ( ! b )
+ return;
+
+ if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */
+ YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
+
+ if ( b->yy_is_our_buffer )
+ yyfree( (void *) b->yy_ch_buf );
+
+ yyfree( (void *) b );
+}
+
+/* Initializes or reinitializes a buffer.
+ * This function is sometimes called more than once on the same buffer,
+ * such as during a yyrestart() or at EOF.
+ */
+ static void yy_init_buffer (YY_BUFFER_STATE b, FILE * file )
+
+{
+ int oerrno = errno;
+
+ yy_flush_buffer( b );
+
+ b->yy_input_file = file;
+ b->yy_fill_buffer = 1;
+
+ /* If b is the current buffer, then yy_init_buffer was _probably_
+ * called from yyrestart() or through yy_get_next_buffer.
+ * In that case, we don't want to reset the lineno or column.
+ */
+ if (b != YY_CURRENT_BUFFER){
+ b->yy_bs_lineno = 1;
+ b->yy_bs_column = 0;
+ }
+
+ b->yy_is_interactive = 0;
+
+ errno = oerrno;
+}
+
+/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
+ * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
+ *
+ */
+ void yy_flush_buffer (YY_BUFFER_STATE b )
+{
+ if ( ! b )
+ return;
+
+ b->yy_n_chars = 0;
+
+ /* We always need two end-of-buffer characters. The first causes
+ * a transition to the end-of-buffer state. The second causes
+ * a jam in that state.
+ */
+ b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
+ b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
+
+ b->yy_buf_pos = &b->yy_ch_buf[0];
+
+ b->yy_at_bol = 1;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ if ( b == YY_CURRENT_BUFFER )
+ yy_load_buffer_state( );
+}
+
+/** Pushes the new state onto the stack. The new state becomes
+ * the current state. This function will allocate the stack
+ * if necessary.
+ * @param new_buffer The new state.
+ *
+ */
+void yypush_buffer_state (YY_BUFFER_STATE new_buffer )
+{
+ if (new_buffer == NULL)
+ return;
+
+ yyensure_buffer_stack();
+
+ /* This block is copied from yy_switch_to_buffer. */
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *(yy_c_buf_p) = (yy_hold_char);
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ /* Only push if top exists. Otherwise, replace top. */
+ if (YY_CURRENT_BUFFER)
+ (yy_buffer_stack_top)++;
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+
+ /* copied from yy_switch_to_buffer. */
+ yy_load_buffer_state( );
+ (yy_did_buffer_switch_on_eof) = 1;
+}
+
+/** Removes and deletes the top of the stack, if present.
+ * The next element becomes the new top.
+ *
+ */
+void yypop_buffer_state (void)
+{
+ if (!YY_CURRENT_BUFFER)
+ return;
+
+ yy_delete_buffer(YY_CURRENT_BUFFER );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ if ((yy_buffer_stack_top) > 0)
+ --(yy_buffer_stack_top);
+
+ if (YY_CURRENT_BUFFER) {
+ yy_load_buffer_state( );
+ (yy_did_buffer_switch_on_eof) = 1;
+ }
+}
+
+/* Allocates the stack if it does not exist.
+ * Guarantees space for at least one push.
+ */
+static void yyensure_buffer_stack (void)
+{
+ yy_size_t num_to_alloc;
+
+ if (!(yy_buffer_stack)) {
+
+ /* First allocation is just for 2 elements, since we don't know if this
+ * scanner will even need a stack. We use 2 instead of 1 to avoid an
+ * immediate realloc on the next call.
+ */
+ num_to_alloc = 1; /* After all that talk, this was set to 1 anyways... */
+ (yy_buffer_stack) = (struct yy_buffer_state**)yyalloc
+ (num_to_alloc * sizeof(struct yy_buffer_state*)
+ );
+ if ( ! (yy_buffer_stack) )
+ YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" );
+
+ memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*));
+
+ (yy_buffer_stack_max) = num_to_alloc;
+ (yy_buffer_stack_top) = 0;
+ return;
+ }
+
+ if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){
+
+ /* Increase the buffer to prepare for a possible push. */
+ yy_size_t grow_size = 8 /* arbitrary grow size */;
+
+ num_to_alloc = (yy_buffer_stack_max) + grow_size;
+ (yy_buffer_stack) = (struct yy_buffer_state**)yyrealloc
+ ((yy_buffer_stack),
+ num_to_alloc * sizeof(struct yy_buffer_state*)
+ );
+ if ( ! (yy_buffer_stack) )
+ YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" );
+
+ /* zero only the new slots.*/
+ memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*));
+ (yy_buffer_stack_max) = num_to_alloc;
+ }
+}
+
+/** Setup the input buffer state to scan directly from a user-specified character buffer.
+ * @param base the character buffer
+ * @param size the size in bytes of the character buffer
+ *
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE yy_scan_buffer (char * base, yy_size_t size )
+{
+ YY_BUFFER_STATE b;
+
+ if ( size < 2 ||
+ base[size-2] != YY_END_OF_BUFFER_CHAR ||
+ base[size-1] != YY_END_OF_BUFFER_CHAR )
+ /* They forgot to leave room for the EOB's. */
+ return NULL;
+
+ b = (YY_BUFFER_STATE) yyalloc( sizeof( struct yy_buffer_state ) );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_scan_buffer()" );
+
+ b->yy_buf_size = (int) (size - 2); /* "- 2" to take care of EOB's */
+ b->yy_buf_pos = b->yy_ch_buf = base;
+ b->yy_is_our_buffer = 0;
+ b->yy_input_file = NULL;
+ b->yy_n_chars = b->yy_buf_size;
+ b->yy_is_interactive = 0;
+ b->yy_at_bol = 1;
+ b->yy_fill_buffer = 0;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ yy_switch_to_buffer( b );
+
+ return b;
+}
+
+/** Setup the input buffer state to scan a string. The next call to yylex() will
+ * scan from a @e copy of @a str.
+ * @param yystr a NUL-terminated string to scan
+ *
+ * @return the newly allocated buffer state object.
+ * @note If you want to scan bytes that may contain NUL values, then use
+ * yy_scan_bytes() instead.
+ */
+YY_BUFFER_STATE yy_scan_string (const char * yystr )
+{
+
+ return yy_scan_bytes( yystr, (int) strlen(yystr) );
+}
+
+/** Setup the input buffer state to scan the given bytes. The next call to yylex() will
+ * scan from a @e copy of @a bytes.
+ * @param yybytes the byte buffer to scan
+ * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes.
+ *
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE yy_scan_bytes (const char * yybytes, int _yybytes_len )
+{
+ YY_BUFFER_STATE b;
+ char *buf;
+ yy_size_t n;
+ int i;
+
+ /* Get memory for full buffer, including space for trailing EOB's. */
+ n = (yy_size_t) (_yybytes_len + 2);
+ buf = (char *) yyalloc( n );
+ if ( ! buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" );
+
+ for ( i = 0; i < _yybytes_len; ++i )
+ buf[i] = yybytes[i];
+
+ buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;
+
+ b = yy_scan_buffer( buf, n );
+ if ( ! b )
+ YY_FATAL_ERROR( "bad buffer in yy_scan_bytes()" );
+
+ /* It's okay to grow etc. this buffer, and we should throw it
+ * away when we're done.
+ */
+ b->yy_is_our_buffer = 1;
+
+ return b;
+}
+
+#ifndef YY_EXIT_FAILURE
+#define YY_EXIT_FAILURE 2
+#endif
+
+static void yynoreturn yy_fatal_error (const char* msg )
+{
+ fprintf( stderr, "%s\n", msg );
+ exit( YY_EXIT_FAILURE );
+}
+
+/* Redefine yyless() so it works in section 3 code. */
+
+#undef yyless
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up yytext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ yytext[yyleng] = (yy_hold_char); \
+ (yy_c_buf_p) = yytext + yyless_macro_arg; \
+ (yy_hold_char) = *(yy_c_buf_p); \
+ *(yy_c_buf_p) = '\0'; \
+ yyleng = yyless_macro_arg; \
+ } \
+ while ( 0 )
+
+/* Accessor methods (get/set functions) to struct members. */
+
+/** Get the current line number.
+ *
+ */
+int yyget_lineno (void)
+{
+
+ return yylineno;
+}
+
+/** Get the input stream.
+ *
+ */
+FILE *yyget_in (void)
+{
+ return yyin;
+}
+
+/** Get the output stream.
+ *
+ */
+FILE *yyget_out (void)
+{
+ return yyout;
+}
+
+/** Get the length of the current token.
+ *
+ */
+int yyget_leng (void)
+{
+ return yyleng;
+}
+
+/** Get the current token.
+ *
+ */
+
+char *yyget_text (void)
+{
+ return yytext;
+}
+
+/** Set the current line number.
+ * @param _line_number line number
+ *
+ */
+void yyset_lineno (int _line_number )
+{
+
+ yylineno = _line_number;
+}
+
+/** Set the input stream. This does not discard the current
+ * input buffer.
+ * @param _in_str A readable stream.
+ *
+ * @see yy_switch_to_buffer
+ */
+void yyset_in (FILE * _in_str )
+{
+ yyin = _in_str ;
+}
+
+void yyset_out (FILE * _out_str )
+{
+ yyout = _out_str ;
+}
+
+int yyget_debug (void)
+{
+ return yy_flex_debug;
+}
+
+void yyset_debug (int _bdebug )
+{
+ yy_flex_debug = _bdebug ;
+}
+
+static int yy_init_globals (void)
+{
+ /* Initialization is the same as for the non-reentrant scanner.
+ * This function is called from yylex_destroy(), so don't allocate here.
+ */
+
+ (yy_buffer_stack) = NULL;
+ (yy_buffer_stack_top) = 0;
+ (yy_buffer_stack_max) = 0;
+ (yy_c_buf_p) = NULL;
+ (yy_init) = 0;
+ (yy_start) = 0;
+
+/* Defined in main.c */
+#ifdef YY_STDINIT
+ yyin = stdin;
+ yyout = stdout;
+#else
+ yyin = NULL;
+ yyout = NULL;
+#endif
+
+ /* For future reference: Set errno on error, since we are called by
+ * yylex_init()
+ */
+ return 0;
+}
+
+/* yylex_destroy is for both reentrant and non-reentrant scanners. */
+int yylex_destroy (void)
+{
+
+ /* Pop the buffer stack, destroying each element. */
+ while(YY_CURRENT_BUFFER){
+ yy_delete_buffer( YY_CURRENT_BUFFER );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ yypop_buffer_state();
+ }
+
+ /* Destroy the stack itself. */
+ yyfree((yy_buffer_stack) );
+ (yy_buffer_stack) = NULL;
+
+ /* Reset the globals. This is important in a non-reentrant scanner so the next time
+ * yylex() is called, initialization will occur. */
+ yy_init_globals( );
+
+ return 0;
+}
+
+/*
+ * Internal utility routines.
+ */
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy (char* s1, const char * s2, int n )
+{
+
+ int i;
+ for ( i = 0; i < n; ++i )
+ s1[i] = s2[i];
+}
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen (const char * s )
+{
+ int n;
+ for ( n = 0; s[n]; ++n )
+ ;
+
+ return n;
+}
+#endif
+
+void *yyalloc (yy_size_t size )
+{
+ return malloc(size);
+}
+
+void *yyrealloc (void * ptr, yy_size_t size )
+{
+
+ /* The cast to (char *) in the following accommodates both
+ * implementations that use char* generic pointers, and those
+ * that use void* generic pointers. It works with the latter
+ * because both ANSI C and C++ allow castless assignment from
+ * any pointer type to void*, and deal with argument conversions
+ * as though doing an assignment.
+ */
+ return realloc(ptr, size);
+}
+
+void yyfree (void * ptr )
+{
+ free( (char *) ptr ); /* see yyrealloc() for (char *) cast */
+}
+
+#define YYTABLES_NAME "yytables"
+
+#line 192 "repl_scanner.l"
+
+
+/* LCOV_EXCL_STOP */
+
+static void
+startlit(void)
+{
+ initStringInfo(&litbuf);
+}
+
+static char *
+litbufdup(void)
+{
+ return litbuf.data;
+}
+
+static void
+addlit(char *ytext, int yleng)
+{
+ appendBinaryStringInfo(&litbuf, ytext, yleng);
+}
+
+static void
+addlitchar(unsigned char ychar)
+{
+ appendStringInfoChar(&litbuf, ychar);
+}
+
+void
+yyerror(const char *message)
+{
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg_internal("%s", message)));
+}
+
+
+void
+replication_scanner_init(const char *str)
+{
+ Size slen = strlen(str);
+ char *scanbuf;
+
+ /*
+ * Might be left over after ereport()
+ */
+ if (YY_CURRENT_BUFFER)
+ yy_delete_buffer(YY_CURRENT_BUFFER);
+
+ /*
+ * Make a scan buffer with special termination needed by flex.
+ */
+ scanbuf = (char *) palloc(slen + 2);
+ memcpy(scanbuf, str, slen);
+ scanbuf[slen] = scanbuf[slen + 1] = YY_END_OF_BUFFER_CHAR;
+ scanbufhandle = yy_scan_buffer(scanbuf, slen + 2);
+}
+
+void
+replication_scanner_finish(void)
+{
+ yy_delete_buffer(scanbufhandle);
+ scanbufhandle = NULL;
+}
+
diff --git a/src/backend/replication/repl_scanner.l b/src/backend/replication/repl_scanner.l
new file mode 100644
index 0000000..452ad9f
--- /dev/null
+++ b/src/backend/replication/repl_scanner.l
@@ -0,0 +1,255 @@
+%{
+/*-------------------------------------------------------------------------
+ *
+ * repl_scanner.l
+ * a lexical scanner for the replication commands
+ *
+ * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/replication/repl_scanner.l
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "utils/builtins.h"
+#include "parser/scansup.h"
+
+/* Avoid exit() on fatal scanner errors (a bit ugly -- see yy_fatal_error) */
+#undef fprintf
+#define fprintf(file, fmt, msg) fprintf_to_ereport(fmt, msg)
+
+static void
+fprintf_to_ereport(const char *fmt, const char *msg)
+{
+ ereport(ERROR, (errmsg_internal("%s", msg)));
+}
+
+/* Handle to the buffer that the lexer uses internally */
+static YY_BUFFER_STATE scanbufhandle;
+
+static StringInfoData litbuf;
+
+static void startlit(void);
+static char *litbufdup(void);
+static void addlit(char *ytext, int yleng);
+static void addlitchar(unsigned char ychar);
+
+/* LCOV_EXCL_START */
+
+%}
+
+%option 8bit
+%option never-interactive
+%option nodefault
+%option noinput
+%option nounput
+%option noyywrap
+%option warn
+%option prefix="replication_yy"
+
+%x xq xd
+
+/* Extended quote
+ * xqdouble implements embedded quote, ''''
+ */
+xqstart {quote}
+xqdouble {quote}{quote}
+xqinside [^']+
+
+/* Double quote
+ * Allows embedded spaces and other special characters into identifiers.
+ */
+dquote \"
+xdstart {dquote}
+xdstop {dquote}
+xddouble {dquote}{dquote}
+xdinside [^"]+
+
+digit [0-9]+
+hexdigit [0-9A-Za-z]+
+
+quote '
+quotestop {quote}
+
+ident_start [A-Za-z\200-\377_]
+ident_cont [A-Za-z\200-\377_0-9\$]
+
+identifier {ident_start}{ident_cont}*
+
+%%
+
+BASE_BACKUP { return K_BASE_BACKUP; }
+FAST { return K_FAST; }
+IDENTIFY_SYSTEM { return K_IDENTIFY_SYSTEM; }
+SHOW { return K_SHOW; }
+LABEL { return K_LABEL; }
+NOWAIT { return K_NOWAIT; }
+PROGRESS { return K_PROGRESS; }
+MAX_RATE { return K_MAX_RATE; }
+WAL { return K_WAL; }
+TABLESPACE_MAP { return K_TABLESPACE_MAP; }
+NOVERIFY_CHECKSUMS { return K_NOVERIFY_CHECKSUMS; }
+TIMELINE { return K_TIMELINE; }
+START_REPLICATION { return K_START_REPLICATION; }
+CREATE_REPLICATION_SLOT { return K_CREATE_REPLICATION_SLOT; }
+DROP_REPLICATION_SLOT { return K_DROP_REPLICATION_SLOT; }
+TIMELINE_HISTORY { return K_TIMELINE_HISTORY; }
+PHYSICAL { return K_PHYSICAL; }
+RESERVE_WAL { return K_RESERVE_WAL; }
+LOGICAL { return K_LOGICAL; }
+SLOT { return K_SLOT; }
+TEMPORARY { return K_TEMPORARY; }
+EXPORT_SNAPSHOT { return K_EXPORT_SNAPSHOT; }
+NOEXPORT_SNAPSHOT { return K_NOEXPORT_SNAPSHOT; }
+USE_SNAPSHOT { return K_USE_SNAPSHOT; }
+WAIT { return K_WAIT; }
+MANIFEST { return K_MANIFEST; }
+MANIFEST_CHECKSUMS { return K_MANIFEST_CHECKSUMS; }
+
+"," { return ','; }
+";" { return ';'; }
+"(" { return '('; }
+")" { return ')'; }
+
+[\n] ;
+[\t] ;
+" " ;
+
+{digit}+ {
+ yylval.uintval = strtoul(yytext, NULL, 10);
+ return UCONST;
+ }
+
+{hexdigit}+\/{hexdigit}+ {
+ uint32 hi,
+ lo;
+ if (sscanf(yytext, "%X/%X", &hi, &lo) != 2)
+ yyerror("invalid streaming start location");
+ yylval.recptr = ((uint64) hi) << 32 | lo;
+ return RECPTR;
+ }
+
+{xqstart} {
+ BEGIN(xq);
+ startlit();
+ }
+
+<xq>{quotestop} {
+ yyless(1);
+ BEGIN(INITIAL);
+ yylval.str = litbufdup();
+ return SCONST;
+ }
+
+<xq>{xqdouble} {
+ addlitchar('\'');
+ }
+
+<xq>{xqinside} {
+ addlit(yytext, yyleng);
+ }
+
+{xdstart} {
+ BEGIN(xd);
+ startlit();
+ }
+
+<xd>{xdstop} {
+ int len;
+ yyless(1);
+ BEGIN(INITIAL);
+ yylval.str = litbufdup();
+ len = strlen(yylval.str);
+ truncate_identifier(yylval.str, len, true);
+ return IDENT;
+ }
+
+<xd>{xdinside} {
+ addlit(yytext, yyleng);
+ }
+
+{identifier} {
+ int len = strlen(yytext);
+
+ yylval.str = downcase_truncate_identifier(yytext, len, true);
+ return IDENT;
+ }
+
+<xq,xd><<EOF>> { yyerror("unterminated quoted string"); }
+
+
+<<EOF>> {
+ yyterminate();
+ }
+
+. {
+ return T_WORD;
+ }
+%%
+
+/* LCOV_EXCL_STOP */
+
+static void
+startlit(void)
+{
+ initStringInfo(&litbuf);
+}
+
+static char *
+litbufdup(void)
+{
+ return litbuf.data;
+}
+
+static void
+addlit(char *ytext, int yleng)
+{
+ appendBinaryStringInfo(&litbuf, ytext, yleng);
+}
+
+static void
+addlitchar(unsigned char ychar)
+{
+ appendStringInfoChar(&litbuf, ychar);
+}
+
+void
+yyerror(const char *message)
+{
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg_internal("%s", message)));
+}
+
+
+void
+replication_scanner_init(const char *str)
+{
+ Size slen = strlen(str);
+ char *scanbuf;
+
+ /*
+ * Might be left over after ereport()
+ */
+ if (YY_CURRENT_BUFFER)
+ yy_delete_buffer(YY_CURRENT_BUFFER);
+
+ /*
+ * Make a scan buffer with special termination needed by flex.
+ */
+ scanbuf = (char *) palloc(slen + 2);
+ memcpy(scanbuf, str, slen);
+ scanbuf[slen] = scanbuf[slen + 1] = YY_END_OF_BUFFER_CHAR;
+ scanbufhandle = yy_scan_buffer(scanbuf, slen + 2);
+}
+
+void
+replication_scanner_finish(void)
+{
+ yy_delete_buffer(scanbufhandle);
+ scanbufhandle = NULL;
+}
diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c
new file mode 100644
index 0000000..02047ea
--- /dev/null
+++ b/src/backend/replication/slot.c
@@ -0,0 +1,1850 @@
+/*-------------------------------------------------------------------------
+ *
+ * slot.c
+ * Replication slot management.
+ *
+ *
+ * Copyright (c) 2012-2020, PostgreSQL Global Development Group
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/replication/slot.c
+ *
+ * NOTES
+ *
+ * Replication slots are used to keep state about replication streams
+ * originating from this cluster. Their primary purpose is to prevent the
+ * premature removal of WAL or of old tuple versions in a manner that would
+ * interfere with replication; they are also useful for monitoring purposes.
+ * Slots need to be permanent (to allow restarts), crash-safe, and allocatable
+ * on standbys (to support cascading setups). The requirement that slots be
+ * usable on standbys precludes storing them in the system catalogs.
+ *
+ * Each replication slot gets its own directory inside the $PGDATA/pg_replslot
+ * directory. Inside that directory the state file will contain the slot's
+ * own data. Additional data can be stored alongside that file if required.
+ * While the server is running, the state data is also cached in memory for
+ * efficiency.
+ *
+ * ReplicationSlotAllocationLock must be taken in exclusive mode to allocate
+ * or free a slot. ReplicationSlotControlLock must be taken in shared mode
+ * to iterate over the slots, and in exclusive mode to change the in_use flag
+ * of a slot. The remaining data in each slot is protected by its mutex.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include <unistd.h>
+#include <sys/stat.h>
+
+#include "access/transam.h"
+#include "access/xlog_internal.h"
+#include "common/string.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "replication/slot.h"
+#include "storage/fd.h"
+#include "storage/proc.h"
+#include "storage/procarray.h"
+#include "utils/builtins.h"
+
+/*
+ * Replication slot on-disk data structure.
+ */
+typedef struct ReplicationSlotOnDisk
+{
+ /* first part of this struct needs to be version independent */
+
+ /* data not covered by checksum */
+ uint32 magic;
+ pg_crc32c checksum;
+
+ /* data covered by checksum */
+ uint32 version;
+ uint32 length;
+
+ /*
+ * The actual data in the slot that follows can differ based on the above
+ * 'version'.
+ */
+
+ ReplicationSlotPersistentData slotdata;
+} ReplicationSlotOnDisk;
+
+/* size of version independent data */
+#define ReplicationSlotOnDiskConstantSize \
+ offsetof(ReplicationSlotOnDisk, slotdata)
+/* size of the part of the slot not covered by the checksum */
+#define SnapBuildOnDiskNotChecksummedSize \
+ offsetof(ReplicationSlotOnDisk, version)
+/* size of the part covered by the checksum */
+#define SnapBuildOnDiskChecksummedSize \
+ sizeof(ReplicationSlotOnDisk) - SnapBuildOnDiskNotChecksummedSize
+/* size of the slot data that is version dependent */
+#define ReplicationSlotOnDiskV2Size \
+ sizeof(ReplicationSlotOnDisk) - ReplicationSlotOnDiskConstantSize
+
+#define SLOT_MAGIC 0x1051CA1 /* format identifier */
+#define SLOT_VERSION 2 /* version for new files */
+
+/* Control array for replication slot management */
+ReplicationSlotCtlData *ReplicationSlotCtl = NULL;
+
+/* My backend's replication slot in the shared memory array */
+ReplicationSlot *MyReplicationSlot = NULL;
+
+/* GUCs */
+int max_replication_slots = 0; /* the maximum number of replication
+ * slots */
+
+static ReplicationSlot *SearchNamedReplicationSlot(const char *name);
+static int ReplicationSlotAcquireInternal(ReplicationSlot *slot,
+ const char *name, SlotAcquireBehavior behavior);
+static void ReplicationSlotDropAcquired(void);
+static void ReplicationSlotDropPtr(ReplicationSlot *slot);
+
+/* internal persistency functions */
+static void RestoreSlotFromDisk(const char *name);
+static void CreateSlotOnDisk(ReplicationSlot *slot);
+static void SaveSlotToPath(ReplicationSlot *slot, const char *path, int elevel);
+
+/*
+ * Report shared-memory space needed by ReplicationSlotsShmemInit.
+ */
+Size
+ReplicationSlotsShmemSize(void)
+{
+ Size size = 0;
+
+ if (max_replication_slots == 0)
+ return size;
+
+ size = offsetof(ReplicationSlotCtlData, replication_slots);
+ size = add_size(size,
+ mul_size(max_replication_slots, sizeof(ReplicationSlot)));
+
+ return size;
+}
+
+/*
+ * Allocate and initialize shared memory for replication slots.
+ */
+void
+ReplicationSlotsShmemInit(void)
+{
+ bool found;
+
+ if (max_replication_slots == 0)
+ return;
+
+ ReplicationSlotCtl = (ReplicationSlotCtlData *)
+ ShmemInitStruct("ReplicationSlot Ctl", ReplicationSlotsShmemSize(),
+ &found);
+
+ if (!found)
+ {
+ int i;
+
+ /* First time through, so initialize */
+ MemSet(ReplicationSlotCtl, 0, ReplicationSlotsShmemSize());
+
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationSlot *slot = &ReplicationSlotCtl->replication_slots[i];
+
+ /* everything else is zeroed by the memset above */
+ SpinLockInit(&slot->mutex);
+ LWLockInitialize(&slot->io_in_progress_lock,
+ LWTRANCHE_REPLICATION_SLOT_IO);
+ ConditionVariableInit(&slot->active_cv);
+ }
+ }
+}
+
+/*
+ * Check whether the passed slot name is valid and report errors at elevel.
+ *
+ * Slot names may consist out of [a-z0-9_]{1,NAMEDATALEN-1} which should allow
+ * the name to be used as a directory name on every supported OS.
+ *
+ * Returns whether the directory name is valid or not if elevel < ERROR.
+ */
+bool
+ReplicationSlotValidateName(const char *name, int elevel)
+{
+ const char *cp;
+
+ if (strlen(name) == 0)
+ {
+ ereport(elevel,
+ (errcode(ERRCODE_INVALID_NAME),
+ errmsg("replication slot name \"%s\" is too short",
+ name)));
+ return false;
+ }
+
+ if (strlen(name) >= NAMEDATALEN)
+ {
+ ereport(elevel,
+ (errcode(ERRCODE_NAME_TOO_LONG),
+ errmsg("replication slot name \"%s\" is too long",
+ name)));
+ return false;
+ }
+
+ for (cp = name; *cp; cp++)
+ {
+ if (!((*cp >= 'a' && *cp <= 'z')
+ || (*cp >= '0' && *cp <= '9')
+ || (*cp == '_')))
+ {
+ ereport(elevel,
+ (errcode(ERRCODE_INVALID_NAME),
+ errmsg("replication slot name \"%s\" contains invalid character",
+ name),
+ errhint("Replication slot names may only contain lower case letters, numbers, and the underscore character.")));
+ return false;
+ }
+ }
+ return true;
+}
+
+/*
+ * Create a new replication slot and mark it as used by this backend.
+ *
+ * name: Name of the slot
+ * db_specific: logical decoding is db specific; if the slot is going to
+ * be used for that pass true, otherwise false.
+ */
+void
+ReplicationSlotCreate(const char *name, bool db_specific,
+ ReplicationSlotPersistency persistency)
+{
+ ReplicationSlot *slot = NULL;
+ int i;
+
+ Assert(MyReplicationSlot == NULL);
+
+ ReplicationSlotValidateName(name, ERROR);
+
+ /*
+ * If some other backend ran this code concurrently with us, we'd likely
+ * both allocate the same slot, and that would be bad. We'd also be at
+ * risk of missing a name collision. Also, we don't want to try to create
+ * a new slot while somebody's busy cleaning up an old one, because we
+ * might both be monkeying with the same directory.
+ */
+ LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE);
+
+ /*
+ * Check for name collision, and identify an allocatable slot. We need to
+ * hold ReplicationSlotControlLock in shared mode for this, so that nobody
+ * else can change the in_use flags while we're looking at them.
+ */
+ LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
+
+ if (s->in_use && strcmp(name, NameStr(s->data.name)) == 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_DUPLICATE_OBJECT),
+ errmsg("replication slot \"%s\" already exists", name)));
+ if (!s->in_use && slot == NULL)
+ slot = s;
+ }
+ LWLockRelease(ReplicationSlotControlLock);
+
+ /* If all slots are in use, we're out of luck. */
+ if (slot == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
+ errmsg("all replication slots are in use"),
+ errhint("Free one or increase max_replication_slots.")));
+
+ /*
+ * Since this slot is not in use, nobody should be looking at any part of
+ * it other than the in_use field unless they're trying to allocate it.
+ * And since we hold ReplicationSlotAllocationLock, nobody except us can
+ * be doing that. So it's safe to initialize the slot.
+ */
+ Assert(!slot->in_use);
+ Assert(slot->active_pid == 0);
+
+ /* first initialize persistent data */
+ memset(&slot->data, 0, sizeof(ReplicationSlotPersistentData));
+ StrNCpy(NameStr(slot->data.name), name, NAMEDATALEN);
+ slot->data.database = db_specific ? MyDatabaseId : InvalidOid;
+ slot->data.persistency = persistency;
+
+ /* and then data only present in shared memory */
+ slot->just_dirtied = false;
+ slot->dirty = false;
+ slot->effective_xmin = InvalidTransactionId;
+ slot->effective_catalog_xmin = InvalidTransactionId;
+ slot->candidate_catalog_xmin = InvalidTransactionId;
+ slot->candidate_xmin_lsn = InvalidXLogRecPtr;
+ slot->candidate_restart_valid = InvalidXLogRecPtr;
+ slot->candidate_restart_lsn = InvalidXLogRecPtr;
+
+ /*
+ * Create the slot on disk. We haven't actually marked the slot allocated
+ * yet, so no special cleanup is required if this errors out.
+ */
+ CreateSlotOnDisk(slot);
+
+ /*
+ * We need to briefly prevent any other backend from iterating over the
+ * slots while we flip the in_use flag. We also need to set the active
+ * flag while holding the ControlLock as otherwise a concurrent
+ * ReplicationSlotAcquire() could acquire the slot as well.
+ */
+ LWLockAcquire(ReplicationSlotControlLock, LW_EXCLUSIVE);
+
+ slot->in_use = true;
+
+ /* We can now mark the slot active, and that makes it our slot. */
+ SpinLockAcquire(&slot->mutex);
+ Assert(slot->active_pid == 0);
+ slot->active_pid = MyProcPid;
+ SpinLockRelease(&slot->mutex);
+ MyReplicationSlot = slot;
+
+ LWLockRelease(ReplicationSlotControlLock);
+
+ /*
+ * Now that the slot has been marked as in_use and active, it's safe to
+ * let somebody else try to allocate a slot.
+ */
+ LWLockRelease(ReplicationSlotAllocationLock);
+
+ /* Let everybody know we've modified this slot */
+ ConditionVariableBroadcast(&slot->active_cv);
+}
+
+/*
+ * Search for the named replication slot.
+ *
+ * Return the replication slot if found, otherwise NULL.
+ *
+ * The caller must hold ReplicationSlotControlLock in shared mode.
+ */
+static ReplicationSlot *
+SearchNamedReplicationSlot(const char *name)
+{
+ int i;
+ ReplicationSlot *slot = NULL;
+
+ Assert(LWLockHeldByMeInMode(ReplicationSlotControlLock,
+ LW_SHARED));
+
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
+
+ if (s->in_use && strcmp(name, NameStr(s->data.name)) == 0)
+ {
+ slot = s;
+ break;
+ }
+ }
+
+ return slot;
+}
+
+/*
+ * Find a previously created slot and mark it as used by this process.
+ *
+ * The return value is only useful if behavior is SAB_Inquire, in which
+ * it's zero if we successfully acquired the slot, -1 if the slot no longer
+ * exists, or the PID of the owning process otherwise. If behavior is
+ * SAB_Error, then trying to acquire an owned slot is an error.
+ * If SAB_Block, we sleep until the slot is released by the owning process.
+ */
+int
+ReplicationSlotAcquire(const char *name, SlotAcquireBehavior behavior)
+{
+ return ReplicationSlotAcquireInternal(NULL, name, behavior);
+}
+
+/*
+ * Mark the specified slot as used by this process.
+ *
+ * Only one of slot and name can be specified.
+ * If slot == NULL, search for the slot with the given name.
+ *
+ * See comments about the return value in ReplicationSlotAcquire().
+ */
+static int
+ReplicationSlotAcquireInternal(ReplicationSlot *slot, const char *name,
+ SlotAcquireBehavior behavior)
+{
+ ReplicationSlot *s;
+ int active_pid;
+
+ AssertArg((slot == NULL) ^ (name == NULL));
+
+retry:
+ Assert(MyReplicationSlot == NULL);
+
+ LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
+
+ /*
+ * Search for the slot with the specified name if the slot to acquire is
+ * not given. If the slot is not found, we either return -1 or error out.
+ */
+ s = slot ? slot : SearchNamedReplicationSlot(name);
+ if (s == NULL || !s->in_use)
+ {
+ LWLockRelease(ReplicationSlotControlLock);
+
+ if (behavior == SAB_Inquire)
+ return -1;
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("replication slot \"%s\" does not exist",
+ name ? name : NameStr(slot->data.name))));
+ }
+
+ /*
+ * This is the slot we want; check if it's active under some other
+ * process. In single user mode, we don't need this check.
+ */
+ if (IsUnderPostmaster)
+ {
+ /*
+ * Get ready to sleep on the slot in case it is active if SAB_Block.
+ * (We may end up not sleeping, but we don't want to do this while
+ * holding the spinlock.)
+ */
+ if (behavior == SAB_Block)
+ ConditionVariablePrepareToSleep(&s->active_cv);
+
+ SpinLockAcquire(&s->mutex);
+ if (s->active_pid == 0)
+ s->active_pid = MyProcPid;
+ active_pid = s->active_pid;
+ SpinLockRelease(&s->mutex);
+ }
+ else
+ active_pid = MyProcPid;
+ LWLockRelease(ReplicationSlotControlLock);
+
+ /*
+ * If we found the slot but it's already active in another process, we
+ * either error out, return the PID of the owning process, or retry
+ * after a short wait, as caller specified.
+ */
+ if (active_pid != MyProcPid)
+ {
+ if (behavior == SAB_Error)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_IN_USE),
+ errmsg("replication slot \"%s\" is active for PID %d",
+ NameStr(s->data.name), active_pid)));
+ else if (behavior == SAB_Inquire)
+ return active_pid;
+
+ /* Wait here until we get signaled, and then restart */
+ ConditionVariableSleep(&s->active_cv,
+ WAIT_EVENT_REPLICATION_SLOT_DROP);
+ ConditionVariableCancelSleep();
+ goto retry;
+ }
+ else if (behavior == SAB_Block)
+ ConditionVariableCancelSleep(); /* no sleep needed after all */
+
+ /* Let everybody know we've modified this slot */
+ ConditionVariableBroadcast(&s->active_cv);
+
+ /* We made this slot active, so it's ours now. */
+ MyReplicationSlot = s;
+
+ /* success */
+ return 0;
+}
+
+/*
+ * Release the replication slot that this backend considers to own.
+ *
+ * This or another backend can re-acquire the slot later.
+ * Resources this slot requires will be preserved.
+ */
+void
+ReplicationSlotRelease(void)
+{
+ ReplicationSlot *slot = MyReplicationSlot;
+
+ Assert(slot != NULL && slot->active_pid != 0);
+
+ if (slot->data.persistency == RS_EPHEMERAL)
+ {
+ /*
+ * Delete the slot. There is no !PANIC case where this is allowed to
+ * fail, all that may happen is an incomplete cleanup of the on-disk
+ * data.
+ */
+ ReplicationSlotDropAcquired();
+ }
+
+ /*
+ * If slot needed to temporarily restrain both data and catalog xmin to
+ * create the catalog snapshot, remove that temporary constraint.
+ * Snapshots can only be exported while the initial snapshot is still
+ * acquired.
+ */
+ if (!TransactionIdIsValid(slot->data.xmin) &&
+ TransactionIdIsValid(slot->effective_xmin))
+ {
+ SpinLockAcquire(&slot->mutex);
+ slot->effective_xmin = InvalidTransactionId;
+ SpinLockRelease(&slot->mutex);
+ ReplicationSlotsComputeRequiredXmin(false);
+ }
+
+ if (slot->data.persistency == RS_PERSISTENT)
+ {
+ /*
+ * Mark persistent slot inactive. We're not freeing it, just
+ * disconnecting, but wake up others that may be waiting for it.
+ */
+ SpinLockAcquire(&slot->mutex);
+ slot->active_pid = 0;
+ SpinLockRelease(&slot->mutex);
+ ConditionVariableBroadcast(&slot->active_cv);
+ }
+
+ MyReplicationSlot = NULL;
+
+ /* might not have been set when we've been a plain slot */
+ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+ MyPgXact->vacuumFlags &= ~PROC_IN_LOGICAL_DECODING;
+ LWLockRelease(ProcArrayLock);
+}
+
+/*
+ * Cleanup all temporary slots created in current session.
+ */
+void
+ReplicationSlotCleanup(void)
+{
+ int i;
+
+ Assert(MyReplicationSlot == NULL);
+
+restart:
+ LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
+
+ if (!s->in_use)
+ continue;
+
+ SpinLockAcquire(&s->mutex);
+ if (s->active_pid == MyProcPid)
+ {
+ Assert(s->data.persistency == RS_TEMPORARY);
+ SpinLockRelease(&s->mutex);
+ LWLockRelease(ReplicationSlotControlLock); /* avoid deadlock */
+
+ ReplicationSlotDropPtr(s);
+
+ ConditionVariableBroadcast(&s->active_cv);
+ goto restart;
+ }
+ else
+ SpinLockRelease(&s->mutex);
+ }
+
+ LWLockRelease(ReplicationSlotControlLock);
+}
+
+/*
+ * Permanently drop replication slot identified by the passed in name.
+ */
+void
+ReplicationSlotDrop(const char *name, bool nowait)
+{
+ Assert(MyReplicationSlot == NULL);
+
+ (void) ReplicationSlotAcquire(name, nowait ? SAB_Error : SAB_Block);
+
+ ReplicationSlotDropAcquired();
+}
+
+/*
+ * Permanently drop the currently acquired replication slot.
+ */
+static void
+ReplicationSlotDropAcquired(void)
+{
+ ReplicationSlot *slot = MyReplicationSlot;
+
+ Assert(MyReplicationSlot != NULL);
+
+ /* slot isn't acquired anymore */
+ MyReplicationSlot = NULL;
+
+ ReplicationSlotDropPtr(slot);
+}
+
+/*
+ * Permanently drop the replication slot which will be released by the point
+ * this function returns.
+ */
+static void
+ReplicationSlotDropPtr(ReplicationSlot *slot)
+{
+ char path[MAXPGPATH];
+ char tmppath[MAXPGPATH];
+
+ /*
+ * If some other backend ran this code concurrently with us, we might try
+ * to delete a slot with a certain name while someone else was trying to
+ * create a slot with the same name.
+ */
+ LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE);
+
+ /* Generate pathnames. */
+ sprintf(path, "pg_replslot/%s", NameStr(slot->data.name));
+ sprintf(tmppath, "pg_replslot/%s.tmp", NameStr(slot->data.name));
+
+ /*
+ * Rename the slot directory on disk, so that we'll no longer recognize
+ * this as a valid slot. Note that if this fails, we've got to mark the
+ * slot inactive before bailing out. If we're dropping an ephemeral or a
+ * temporary slot, we better never fail hard as the caller won't expect
+ * the slot to survive and this might get called during error handling.
+ */
+ if (rename(path, tmppath) == 0)
+ {
+ /*
+ * We need to fsync() the directory we just renamed and its parent to
+ * make sure that our changes are on disk in a crash-safe fashion. If
+ * fsync() fails, we can't be sure whether the changes are on disk or
+ * not. For now, we handle that by panicking;
+ * StartupReplicationSlots() will try to straighten it out after
+ * restart.
+ */
+ START_CRIT_SECTION();
+ fsync_fname(tmppath, true);
+ fsync_fname("pg_replslot", true);
+ END_CRIT_SECTION();
+ }
+ else
+ {
+ bool fail_softly = slot->data.persistency != RS_PERSISTENT;
+
+ SpinLockAcquire(&slot->mutex);
+ slot->active_pid = 0;
+ SpinLockRelease(&slot->mutex);
+
+ /* wake up anyone waiting on this slot */
+ ConditionVariableBroadcast(&slot->active_cv);
+
+ ereport(fail_softly ? WARNING : ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not rename file \"%s\" to \"%s\": %m",
+ path, tmppath)));
+ }
+
+ /*
+ * The slot is definitely gone. Lock out concurrent scans of the array
+ * long enough to kill it. It's OK to clear the active PID here without
+ * grabbing the mutex because nobody else can be scanning the array here,
+ * and nobody can be attached to this slot and thus access it without
+ * scanning the array.
+ *
+ * Also wake up processes waiting for it.
+ */
+ LWLockAcquire(ReplicationSlotControlLock, LW_EXCLUSIVE);
+ slot->active_pid = 0;
+ slot->in_use = false;
+ LWLockRelease(ReplicationSlotControlLock);
+ ConditionVariableBroadcast(&slot->active_cv);
+
+ /*
+ * Slot is dead and doesn't prevent resource removal anymore, recompute
+ * limits.
+ */
+ ReplicationSlotsComputeRequiredXmin(false);
+ ReplicationSlotsComputeRequiredLSN();
+
+ /*
+ * If removing the directory fails, the worst thing that will happen is
+ * that the user won't be able to create a new slot with the same name
+ * until the next server restart. We warn about it, but that's all.
+ */
+ if (!rmtree(tmppath, true))
+ ereport(WARNING,
+ (errmsg("could not remove directory \"%s\"", tmppath)));
+
+ /*
+ * We release this at the very end, so that nobody starts trying to create
+ * a slot while we're still cleaning up the detritus of the old one.
+ */
+ LWLockRelease(ReplicationSlotAllocationLock);
+}
+
+/*
+ * Serialize the currently acquired slot's state from memory to disk, thereby
+ * guaranteeing the current state will survive a crash.
+ */
+void
+ReplicationSlotSave(void)
+{
+ char path[MAXPGPATH];
+
+ Assert(MyReplicationSlot != NULL);
+
+ sprintf(path, "pg_replslot/%s", NameStr(MyReplicationSlot->data.name));
+ SaveSlotToPath(MyReplicationSlot, path, ERROR);
+}
+
+/*
+ * Signal that it would be useful if the currently acquired slot would be
+ * flushed out to disk.
+ *
+ * Note that the actual flush to disk can be delayed for a long time, if
+ * required for correctness explicitly do a ReplicationSlotSave().
+ */
+void
+ReplicationSlotMarkDirty(void)
+{
+ ReplicationSlot *slot = MyReplicationSlot;
+
+ Assert(MyReplicationSlot != NULL);
+
+ SpinLockAcquire(&slot->mutex);
+ MyReplicationSlot->just_dirtied = true;
+ MyReplicationSlot->dirty = true;
+ SpinLockRelease(&slot->mutex);
+}
+
+/*
+ * Convert a slot that's marked as RS_EPHEMERAL to a RS_PERSISTENT slot,
+ * guaranteeing it will be there after an eventual crash.
+ */
+void
+ReplicationSlotPersist(void)
+{
+ ReplicationSlot *slot = MyReplicationSlot;
+
+ Assert(slot != NULL);
+ Assert(slot->data.persistency != RS_PERSISTENT);
+
+ SpinLockAcquire(&slot->mutex);
+ slot->data.persistency = RS_PERSISTENT;
+ SpinLockRelease(&slot->mutex);
+
+ ReplicationSlotMarkDirty();
+ ReplicationSlotSave();
+}
+
+/*
+ * Compute the oldest xmin across all slots and store it in the ProcArray.
+ *
+ * If already_locked is true, ProcArrayLock has already been acquired
+ * exclusively.
+ */
+void
+ReplicationSlotsComputeRequiredXmin(bool already_locked)
+{
+ int i;
+ TransactionId agg_xmin = InvalidTransactionId;
+ TransactionId agg_catalog_xmin = InvalidTransactionId;
+
+ Assert(ReplicationSlotCtl != NULL);
+
+ LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
+
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
+ TransactionId effective_xmin;
+ TransactionId effective_catalog_xmin;
+
+ if (!s->in_use)
+ continue;
+
+ SpinLockAcquire(&s->mutex);
+ effective_xmin = s->effective_xmin;
+ effective_catalog_xmin = s->effective_catalog_xmin;
+ SpinLockRelease(&s->mutex);
+
+ /* check the data xmin */
+ if (TransactionIdIsValid(effective_xmin) &&
+ (!TransactionIdIsValid(agg_xmin) ||
+ TransactionIdPrecedes(effective_xmin, agg_xmin)))
+ agg_xmin = effective_xmin;
+
+ /* check the catalog xmin */
+ if (TransactionIdIsValid(effective_catalog_xmin) &&
+ (!TransactionIdIsValid(agg_catalog_xmin) ||
+ TransactionIdPrecedes(effective_catalog_xmin, agg_catalog_xmin)))
+ agg_catalog_xmin = effective_catalog_xmin;
+ }
+
+ LWLockRelease(ReplicationSlotControlLock);
+
+ ProcArraySetReplicationSlotXmin(agg_xmin, agg_catalog_xmin, already_locked);
+}
+
+/*
+ * Compute the oldest restart LSN across all slots and inform xlog module.
+ *
+ * Note: while max_slot_wal_keep_size is theoretically relevant for this
+ * purpose, we don't try to account for that, because this module doesn't
+ * know what to compare against.
+ */
+void
+ReplicationSlotsComputeRequiredLSN(void)
+{
+ int i;
+ XLogRecPtr min_required = InvalidXLogRecPtr;
+
+ Assert(ReplicationSlotCtl != NULL);
+
+ LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
+ XLogRecPtr restart_lsn;
+
+ if (!s->in_use)
+ continue;
+
+ SpinLockAcquire(&s->mutex);
+ restart_lsn = s->data.restart_lsn;
+ SpinLockRelease(&s->mutex);
+
+ if (restart_lsn != InvalidXLogRecPtr &&
+ (min_required == InvalidXLogRecPtr ||
+ restart_lsn < min_required))
+ min_required = restart_lsn;
+ }
+ LWLockRelease(ReplicationSlotControlLock);
+
+ XLogSetReplicationSlotMinimumLSN(min_required);
+}
+
+/*
+ * Compute the oldest WAL LSN required by *logical* decoding slots..
+ *
+ * Returns InvalidXLogRecPtr if logical decoding is disabled or no logical
+ * slots exist.
+ *
+ * NB: this returns a value >= ReplicationSlotsComputeRequiredLSN(), since it
+ * ignores physical replication slots.
+ *
+ * The results aren't required frequently, so we don't maintain a precomputed
+ * value like we do for ComputeRequiredLSN() and ComputeRequiredXmin().
+ */
+XLogRecPtr
+ReplicationSlotsComputeLogicalRestartLSN(void)
+{
+ XLogRecPtr result = InvalidXLogRecPtr;
+ int i;
+
+ if (max_replication_slots <= 0)
+ return InvalidXLogRecPtr;
+
+ LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
+
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationSlot *s;
+ XLogRecPtr restart_lsn;
+
+ s = &ReplicationSlotCtl->replication_slots[i];
+
+ /* cannot change while ReplicationSlotCtlLock is held */
+ if (!s->in_use)
+ continue;
+
+ /* we're only interested in logical slots */
+ if (!SlotIsLogical(s))
+ continue;
+
+ /* read once, it's ok if it increases while we're checking */
+ SpinLockAcquire(&s->mutex);
+ restart_lsn = s->data.restart_lsn;
+ SpinLockRelease(&s->mutex);
+
+ if (restart_lsn == InvalidXLogRecPtr)
+ continue;
+
+ if (result == InvalidXLogRecPtr ||
+ restart_lsn < result)
+ result = restart_lsn;
+ }
+
+ LWLockRelease(ReplicationSlotControlLock);
+
+ return result;
+}
+
+/*
+ * ReplicationSlotsCountDBSlots -- count the number of slots that refer to the
+ * passed database oid.
+ *
+ * Returns true if there are any slots referencing the database. *nslots will
+ * be set to the absolute number of slots in the database, *nactive to ones
+ * currently active.
+ */
+bool
+ReplicationSlotsCountDBSlots(Oid dboid, int *nslots, int *nactive)
+{
+ int i;
+
+ *nslots = *nactive = 0;
+
+ if (max_replication_slots <= 0)
+ return false;
+
+ LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationSlot *s;
+
+ s = &ReplicationSlotCtl->replication_slots[i];
+
+ /* cannot change while ReplicationSlotCtlLock is held */
+ if (!s->in_use)
+ continue;
+
+ /* only logical slots are database specific, skip */
+ if (!SlotIsLogical(s))
+ continue;
+
+ /* not our database, skip */
+ if (s->data.database != dboid)
+ continue;
+
+ /* count slots with spinlock held */
+ SpinLockAcquire(&s->mutex);
+ (*nslots)++;
+ if (s->active_pid != 0)
+ (*nactive)++;
+ SpinLockRelease(&s->mutex);
+ }
+ LWLockRelease(ReplicationSlotControlLock);
+
+ if (*nslots > 0)
+ return true;
+ return false;
+}
+
+/*
+ * ReplicationSlotsDropDBSlots -- Drop all db-specific slots relating to the
+ * passed database oid. The caller should hold an exclusive lock on the
+ * pg_database oid for the database to prevent creation of new slots on the db
+ * or replay from existing slots.
+ *
+ * Another session that concurrently acquires an existing slot on the target DB
+ * (most likely to drop it) may cause this function to ERROR. If that happens
+ * it may have dropped some but not all slots.
+ *
+ * This routine isn't as efficient as it could be - but we don't drop
+ * databases often, especially databases with lots of slots.
+ */
+void
+ReplicationSlotsDropDBSlots(Oid dboid)
+{
+ int i;
+
+ if (max_replication_slots <= 0)
+ return;
+
+restart:
+ LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationSlot *s;
+ char *slotname;
+ int active_pid;
+
+ s = &ReplicationSlotCtl->replication_slots[i];
+
+ /* cannot change while ReplicationSlotCtlLock is held */
+ if (!s->in_use)
+ continue;
+
+ /* only logical slots are database specific, skip */
+ if (!SlotIsLogical(s))
+ continue;
+
+ /* not our database, skip */
+ if (s->data.database != dboid)
+ continue;
+
+ /* acquire slot, so ReplicationSlotDropAcquired can be reused */
+ SpinLockAcquire(&s->mutex);
+ /* can't change while ReplicationSlotControlLock is held */
+ slotname = NameStr(s->data.name);
+ active_pid = s->active_pid;
+ if (active_pid == 0)
+ {
+ MyReplicationSlot = s;
+ s->active_pid = MyProcPid;
+ }
+ SpinLockRelease(&s->mutex);
+
+ /*
+ * Even though we hold an exclusive lock on the database object a
+ * logical slot for that DB can still be active, e.g. if it's
+ * concurrently being dropped by a backend connected to another DB.
+ *
+ * That's fairly unlikely in practice, so we'll just bail out.
+ */
+ if (active_pid)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_IN_USE),
+ errmsg("replication slot \"%s\" is active for PID %d",
+ slotname, active_pid)));
+
+ /*
+ * To avoid duplicating ReplicationSlotDropAcquired() and to avoid
+ * holding ReplicationSlotControlLock over filesystem operations,
+ * release ReplicationSlotControlLock and use
+ * ReplicationSlotDropAcquired.
+ *
+ * As that means the set of slots could change, restart scan from the
+ * beginning each time we release the lock.
+ */
+ LWLockRelease(ReplicationSlotControlLock);
+ ReplicationSlotDropAcquired();
+ goto restart;
+ }
+ LWLockRelease(ReplicationSlotControlLock);
+}
+
+
+/*
+ * Check whether the server's configuration supports using replication
+ * slots.
+ */
+void
+CheckSlotRequirements(void)
+{
+ /*
+ * NB: Adding a new requirement likely means that RestoreSlotFromDisk()
+ * needs the same check.
+ */
+
+ if (max_replication_slots == 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("replication slots can only be used if max_replication_slots > 0")));
+
+ if (wal_level < WAL_LEVEL_REPLICA)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("replication slots can only be used if wal_level >= replica")));
+}
+
+/*
+ * Reserve WAL for the currently active slot.
+ *
+ * Compute and set restart_lsn in a manner that's appropriate for the type of
+ * the slot and concurrency safe.
+ */
+void
+ReplicationSlotReserveWal(void)
+{
+ ReplicationSlot *slot = MyReplicationSlot;
+
+ Assert(slot != NULL);
+ Assert(slot->data.restart_lsn == InvalidXLogRecPtr);
+
+ /*
+ * The replication slot mechanism is used to prevent removal of required
+ * WAL. As there is no interlock between this routine and checkpoints, WAL
+ * segments could concurrently be removed when a now stale return value of
+ * ReplicationSlotsComputeRequiredLSN() is used. In the unlikely case that
+ * this happens we'll just retry.
+ */
+ while (true)
+ {
+ XLogSegNo segno;
+ XLogRecPtr restart_lsn;
+
+ /*
+ * For logical slots log a standby snapshot and start logical decoding
+ * at exactly that position. That allows the slot to start up more
+ * quickly.
+ *
+ * That's not needed (or indeed helpful) for physical slots as they'll
+ * start replay at the last logged checkpoint anyway. Instead return
+ * the location of the last redo LSN. While that slightly increases
+ * the chance that we have to retry, it's where a base backup has to
+ * start replay at.
+ */
+ if (!RecoveryInProgress() && SlotIsLogical(slot))
+ {
+ XLogRecPtr flushptr;
+
+ /* start at current insert position */
+ restart_lsn = GetXLogInsertRecPtr();
+ SpinLockAcquire(&slot->mutex);
+ slot->data.restart_lsn = restart_lsn;
+ SpinLockRelease(&slot->mutex);
+
+ /* make sure we have enough information to start */
+ flushptr = LogStandbySnapshot();
+
+ /* and make sure it's fsynced to disk */
+ XLogFlush(flushptr);
+ }
+ else
+ {
+ restart_lsn = GetRedoRecPtr();
+ SpinLockAcquire(&slot->mutex);
+ slot->data.restart_lsn = restart_lsn;
+ SpinLockRelease(&slot->mutex);
+ }
+
+ /* prevent WAL removal as fast as possible */
+ ReplicationSlotsComputeRequiredLSN();
+
+ /*
+ * If all required WAL is still there, great, otherwise retry. The
+ * slot should prevent further removal of WAL, unless there's a
+ * concurrent ReplicationSlotsComputeRequiredLSN() after we've written
+ * the new restart_lsn above, so normally we should never need to loop
+ * more than twice.
+ */
+ XLByteToSeg(slot->data.restart_lsn, segno, wal_segment_size);
+ if (XLogGetLastRemovedSegno() < segno)
+ break;
+ }
+}
+
+/*
+ * Helper for InvalidateObsoleteReplicationSlots -- acquires the given slot
+ * and mark it invalid, if necessary and possible.
+ *
+ * Returns whether ReplicationSlotControlLock was released in the interim (and
+ * in that case we're not holding the lock at return, otherwise we are).
+ *
+ * Sets *invalidated true if the slot was invalidated. (Untouched otherwise.)
+ *
+ * This is inherently racy, because we release the LWLock
+ * for syscalls, so caller must restart if we return true.
+ */
+static bool
+InvalidatePossiblyObsoleteSlot(ReplicationSlot *s, XLogRecPtr oldestLSN,
+ bool *invalidated)
+{
+ int last_signaled_pid = 0;
+ bool released_lock = false;
+
+ for (;;)
+ {
+ XLogRecPtr restart_lsn;
+ NameData slotname;
+ int active_pid = 0;
+
+ Assert(LWLockHeldByMeInMode(ReplicationSlotControlLock, LW_SHARED));
+
+ if (!s->in_use)
+ {
+ if (released_lock)
+ LWLockRelease(ReplicationSlotControlLock);
+ break;
+ }
+
+ /*
+ * Check if the slot needs to be invalidated. If it needs to be
+ * invalidated, and is not currently acquired, acquire it and mark it
+ * as having been invalidated. We do this with the spinlock held to
+ * avoid race conditions -- for example the restart_lsn could move
+ * forward, or the slot could be dropped.
+ */
+ SpinLockAcquire(&s->mutex);
+
+ restart_lsn = s->data.restart_lsn;
+
+ /*
+ * If the slot is already invalid or is fresh enough, we don't need to
+ * do anything.
+ */
+ if (XLogRecPtrIsInvalid(restart_lsn) || restart_lsn >= oldestLSN)
+ {
+ SpinLockRelease(&s->mutex);
+ if (released_lock)
+ LWLockRelease(ReplicationSlotControlLock);
+ break;
+ }
+
+ slotname = s->data.name;
+ active_pid = s->active_pid;
+
+ /*
+ * If the slot can be acquired, do so and mark it invalidated
+ * immediately. Otherwise we'll signal the owning process, below, and
+ * retry.
+ */
+ if (active_pid == 0)
+ {
+ MyReplicationSlot = s;
+ s->active_pid = MyProcPid;
+ s->data.invalidated_at = restart_lsn;
+ s->data.restart_lsn = InvalidXLogRecPtr;
+
+ /* Let caller know */
+ *invalidated = true;
+ }
+
+ SpinLockRelease(&s->mutex);
+
+ if (active_pid != 0)
+ {
+ /*
+ * Prepare the sleep on the slot's condition variable before
+ * releasing the lock, to close a possible race condition if the
+ * slot is released before the sleep below.
+ */
+ ConditionVariablePrepareToSleep(&s->active_cv);
+
+ LWLockRelease(ReplicationSlotControlLock);
+ released_lock = true;
+
+ /*
+ * Signal to terminate the process that owns the slot, if we
+ * haven't already signalled it. (Avoidance of repeated
+ * signalling is the only reason for there to be a loop in this
+ * routine; otherwise we could rely on caller's restart loop.)
+ *
+ * There is the race condition that other process may own the slot
+ * after its current owner process is terminated and before this
+ * process owns it. To handle that, we signal only if the PID of
+ * the owning process has changed from the previous time. (This
+ * logic assumes that the same PID is not reused very quickly.)
+ */
+ if (last_signaled_pid != active_pid)
+ {
+ ereport(LOG,
+ (errmsg("terminating process %d to release replication slot \"%s\"",
+ active_pid, NameStr(slotname))));
+
+ (void) kill(active_pid, SIGTERM);
+ last_signaled_pid = active_pid;
+ }
+
+ /* Wait until the slot is released. */
+ ConditionVariableSleep(&s->active_cv,
+ WAIT_EVENT_REPLICATION_SLOT_DROP);
+
+ /*
+ * Re-acquire lock and start over; we expect to invalidate the slot
+ * next time (unless another process acquires the slot in the
+ * meantime).
+ */
+ LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
+ continue;
+ }
+ else
+ {
+ /*
+ * We hold the slot now and have already invalidated it; flush it
+ * to ensure that state persists.
+ *
+ * Don't want to hold ReplicationSlotControlLock across file
+ * system operations, so release it now but be sure to tell caller
+ * to restart from scratch.
+ */
+ LWLockRelease(ReplicationSlotControlLock);
+ released_lock = true;
+
+ /* Make sure the invalidated state persists across server restart */
+ ReplicationSlotMarkDirty();
+ ReplicationSlotSave();
+ ReplicationSlotRelease();
+
+ ereport(LOG,
+ (errmsg("invalidating slot \"%s\" because its restart_lsn %X/%X exceeds max_slot_wal_keep_size",
+ NameStr(slotname),
+ (uint32) (restart_lsn >> 32),
+ (uint32) restart_lsn)));
+
+ /* done with this slot for now */
+ break;
+ }
+ }
+
+ Assert(released_lock == !LWLockHeldByMe(ReplicationSlotControlLock));
+
+ return released_lock;
+}
+
+/*
+ * Mark any slot that points to an LSN older than the given segment
+ * as invalid; it requires WAL that's about to be removed.
+ *
+ * Returns true when any slot have got invalidated.
+ *
+ * NB - this runs as part of checkpoint, so avoid raising errors if possible.
+ */
+bool
+InvalidateObsoleteReplicationSlots(XLogSegNo oldestSegno)
+{
+ XLogRecPtr oldestLSN;
+ bool invalidated = false;
+
+ XLogSegNoOffsetToRecPtr(oldestSegno, 0, wal_segment_size, oldestLSN);
+
+restart:
+ LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
+ for (int i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
+
+ if (!s->in_use)
+ continue;
+
+ if (InvalidatePossiblyObsoleteSlot(s, oldestLSN, &invalidated))
+ {
+ /* if the lock was released, start from scratch */
+ goto restart;
+ }
+ }
+ LWLockRelease(ReplicationSlotControlLock);
+
+ /*
+ * If any slots have been invalidated, recalculate the resource limits.
+ */
+ if (invalidated)
+ {
+ ReplicationSlotsComputeRequiredXmin(false);
+ ReplicationSlotsComputeRequiredLSN();
+ }
+
+ return invalidated;
+}
+
+/*
+ * Flush all replication slots to disk.
+ *
+ * This needn't actually be part of a checkpoint, but it's a convenient
+ * location.
+ */
+void
+CheckPointReplicationSlots(void)
+{
+ int i;
+
+ elog(DEBUG1, "performing replication slot checkpoint");
+
+ /*
+ * Prevent any slot from being created/dropped while we're active. As we
+ * explicitly do *not* want to block iterating over replication_slots or
+ * acquiring a slot we cannot take the control lock - but that's OK,
+ * because holding ReplicationSlotAllocationLock is strictly stronger, and
+ * enough to guarantee that nobody can change the in_use bits on us.
+ */
+ LWLockAcquire(ReplicationSlotAllocationLock, LW_SHARED);
+
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
+ char path[MAXPGPATH];
+
+ if (!s->in_use)
+ continue;
+
+ /* save the slot to disk, locking is handled in SaveSlotToPath() */
+ sprintf(path, "pg_replslot/%s", NameStr(s->data.name));
+ SaveSlotToPath(s, path, LOG);
+ }
+ LWLockRelease(ReplicationSlotAllocationLock);
+}
+
+/*
+ * Load all replication slots from disk into memory at server startup. This
+ * needs to be run before we start crash recovery.
+ */
+void
+StartupReplicationSlots(void)
+{
+ DIR *replication_dir;
+ struct dirent *replication_de;
+
+ elog(DEBUG1, "starting up replication slots");
+
+ /* restore all slots by iterating over all on-disk entries */
+ replication_dir = AllocateDir("pg_replslot");
+ while ((replication_de = ReadDir(replication_dir, "pg_replslot")) != NULL)
+ {
+ struct stat statbuf;
+ char path[MAXPGPATH + 12];
+
+ if (strcmp(replication_de->d_name, ".") == 0 ||
+ strcmp(replication_de->d_name, "..") == 0)
+ continue;
+
+ snprintf(path, sizeof(path), "pg_replslot/%s", replication_de->d_name);
+
+ /* we're only creating directories here, skip if it's not our's */
+ if (lstat(path, &statbuf) == 0 && !S_ISDIR(statbuf.st_mode))
+ continue;
+
+ /* we crashed while a slot was being setup or deleted, clean up */
+ if (pg_str_endswith(replication_de->d_name, ".tmp"))
+ {
+ if (!rmtree(path, true))
+ {
+ ereport(WARNING,
+ (errmsg("could not remove directory \"%s\"",
+ path)));
+ continue;
+ }
+ fsync_fname("pg_replslot", true);
+ continue;
+ }
+
+ /* looks like a slot in a normal state, restore */
+ RestoreSlotFromDisk(replication_de->d_name);
+ }
+ FreeDir(replication_dir);
+
+ /* currently no slots exist, we're done. */
+ if (max_replication_slots <= 0)
+ return;
+
+ /* Now that we have recovered all the data, compute replication xmin */
+ ReplicationSlotsComputeRequiredXmin(false);
+ ReplicationSlotsComputeRequiredLSN();
+}
+
+/* ----
+ * Manipulation of on-disk state of replication slots
+ *
+ * NB: none of the routines below should take any notice whether a slot is the
+ * current one or not, that's all handled a layer above.
+ * ----
+ */
+static void
+CreateSlotOnDisk(ReplicationSlot *slot)
+{
+ char tmppath[MAXPGPATH];
+ char path[MAXPGPATH];
+ struct stat st;
+
+ /*
+ * No need to take out the io_in_progress_lock, nobody else can see this
+ * slot yet, so nobody else will write. We're reusing SaveSlotToPath which
+ * takes out the lock, if we'd take the lock here, we'd deadlock.
+ */
+
+ sprintf(path, "pg_replslot/%s", NameStr(slot->data.name));
+ sprintf(tmppath, "pg_replslot/%s.tmp", NameStr(slot->data.name));
+
+ /*
+ * It's just barely possible that some previous effort to create or drop a
+ * slot with this name left a temp directory lying around. If that seems
+ * to be the case, try to remove it. If the rmtree() fails, we'll error
+ * out at the MakePGDirectory() below, so we don't bother checking
+ * success.
+ */
+ if (stat(tmppath, &st) == 0 && S_ISDIR(st.st_mode))
+ rmtree(tmppath, true);
+
+ /* Create and fsync the temporary slot directory. */
+ if (MakePGDirectory(tmppath) < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not create directory \"%s\": %m",
+ tmppath)));
+ fsync_fname(tmppath, true);
+
+ /* Write the actual state file. */
+ slot->dirty = true; /* signal that we really need to write */
+ SaveSlotToPath(slot, tmppath, ERROR);
+
+ /* Rename the directory into place. */
+ if (rename(tmppath, path) != 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not rename file \"%s\" to \"%s\": %m",
+ tmppath, path)));
+
+ /*
+ * If we'd now fail - really unlikely - we wouldn't know whether this slot
+ * would persist after an OS crash or not - so, force a restart. The
+ * restart would try to fsync this again till it works.
+ */
+ START_CRIT_SECTION();
+
+ fsync_fname(path, true);
+ fsync_fname("pg_replslot", true);
+
+ END_CRIT_SECTION();
+}
+
+/*
+ * Shared functionality between saving and creating a replication slot.
+ */
+static void
+SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel)
+{
+ char tmppath[MAXPGPATH];
+ char path[MAXPGPATH];
+ int fd;
+ ReplicationSlotOnDisk cp;
+ bool was_dirty;
+
+ /* first check whether there's something to write out */
+ SpinLockAcquire(&slot->mutex);
+ was_dirty = slot->dirty;
+ slot->just_dirtied = false;
+ SpinLockRelease(&slot->mutex);
+
+ /* and don't do anything if there's nothing to write */
+ if (!was_dirty)
+ return;
+
+ LWLockAcquire(&slot->io_in_progress_lock, LW_EXCLUSIVE);
+
+ /* silence valgrind :( */
+ memset(&cp, 0, sizeof(ReplicationSlotOnDisk));
+
+ sprintf(tmppath, "%s/state.tmp", dir);
+ sprintf(path, "%s/state", dir);
+
+ fd = OpenTransientFile(tmppath, O_CREAT | O_EXCL | O_WRONLY | PG_BINARY);
+ if (fd < 0)
+ {
+ /*
+ * If not an ERROR, then release the lock before returning. In case
+ * of an ERROR, the error recovery path automatically releases the
+ * lock, but no harm in explicitly releasing even in that case. Note
+ * that LWLockRelease() could affect errno.
+ */
+ int save_errno = errno;
+
+ LWLockRelease(&slot->io_in_progress_lock);
+ errno = save_errno;
+ ereport(elevel,
+ (errcode_for_file_access(),
+ errmsg("could not create file \"%s\": %m",
+ tmppath)));
+ return;
+ }
+
+ cp.magic = SLOT_MAGIC;
+ INIT_CRC32C(cp.checksum);
+ cp.version = SLOT_VERSION;
+ cp.length = ReplicationSlotOnDiskV2Size;
+
+ SpinLockAcquire(&slot->mutex);
+
+ memcpy(&cp.slotdata, &slot->data, sizeof(ReplicationSlotPersistentData));
+
+ SpinLockRelease(&slot->mutex);
+
+ COMP_CRC32C(cp.checksum,
+ (char *) (&cp) + SnapBuildOnDiskNotChecksummedSize,
+ SnapBuildOnDiskChecksummedSize);
+ FIN_CRC32C(cp.checksum);
+
+ errno = 0;
+ pgstat_report_wait_start(WAIT_EVENT_REPLICATION_SLOT_WRITE);
+ if ((write(fd, &cp, sizeof(cp))) != sizeof(cp))
+ {
+ int save_errno = errno;
+
+ pgstat_report_wait_end();
+ CloseTransientFile(fd);
+ LWLockRelease(&slot->io_in_progress_lock);
+
+ /* if write didn't set errno, assume problem is no disk space */
+ errno = save_errno ? save_errno : ENOSPC;
+ ereport(elevel,
+ (errcode_for_file_access(),
+ errmsg("could not write to file \"%s\": %m",
+ tmppath)));
+ return;
+ }
+ pgstat_report_wait_end();
+
+ /* fsync the temporary file */
+ pgstat_report_wait_start(WAIT_EVENT_REPLICATION_SLOT_SYNC);
+ if (pg_fsync(fd) != 0)
+ {
+ int save_errno = errno;
+
+ pgstat_report_wait_end();
+ CloseTransientFile(fd);
+ LWLockRelease(&slot->io_in_progress_lock);
+ errno = save_errno;
+ ereport(elevel,
+ (errcode_for_file_access(),
+ errmsg("could not fsync file \"%s\": %m",
+ tmppath)));
+ return;
+ }
+ pgstat_report_wait_end();
+
+ if (CloseTransientFile(fd) != 0)
+ {
+ int save_errno = errno;
+
+ LWLockRelease(&slot->io_in_progress_lock);
+ errno = save_errno;
+ ereport(elevel,
+ (errcode_for_file_access(),
+ errmsg("could not close file \"%s\": %m",
+ tmppath)));
+ return;
+ }
+
+ /* rename to permanent file, fsync file and directory */
+ if (rename(tmppath, path) != 0)
+ {
+ int save_errno = errno;
+
+ LWLockRelease(&slot->io_in_progress_lock);
+ errno = save_errno;
+ ereport(elevel,
+ (errcode_for_file_access(),
+ errmsg("could not rename file \"%s\" to \"%s\": %m",
+ tmppath, path)));
+ return;
+ }
+
+ /*
+ * Check CreateSlotOnDisk() for the reasoning of using a critical section.
+ */
+ START_CRIT_SECTION();
+
+ fsync_fname(path, false);
+ fsync_fname(dir, true);
+ fsync_fname("pg_replslot", true);
+
+ END_CRIT_SECTION();
+
+ /*
+ * Successfully wrote, unset dirty bit, unless somebody dirtied again
+ * already.
+ */
+ SpinLockAcquire(&slot->mutex);
+ if (!slot->just_dirtied)
+ slot->dirty = false;
+ SpinLockRelease(&slot->mutex);
+
+ LWLockRelease(&slot->io_in_progress_lock);
+}
+
+/*
+ * Load a single slot from disk into memory.
+ */
+static void
+RestoreSlotFromDisk(const char *name)
+{
+ ReplicationSlotOnDisk cp;
+ int i;
+ char slotdir[MAXPGPATH + 12];
+ char path[MAXPGPATH + 22];
+ int fd;
+ bool restored = false;
+ int readBytes;
+ pg_crc32c checksum;
+
+ /* no need to lock here, no concurrent access allowed yet */
+
+ /* delete temp file if it exists */
+ sprintf(slotdir, "pg_replslot/%s", name);
+ sprintf(path, "%s/state.tmp", slotdir);
+ if (unlink(path) < 0 && errno != ENOENT)
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not remove file \"%s\": %m", path)));
+
+ sprintf(path, "%s/state", slotdir);
+
+ elog(DEBUG1, "restoring replication slot from \"%s\"", path);
+
+ /* on some operating systems fsyncing a file requires O_RDWR */
+ fd = OpenTransientFile(path, O_RDWR | PG_BINARY);
+
+ /*
+ * We do not need to handle this as we are rename()ing the directory into
+ * place only after we fsync()ed the state file.
+ */
+ if (fd < 0)
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\": %m", path)));
+
+ /*
+ * Sync state file before we're reading from it. We might have crashed
+ * while it wasn't synced yet and we shouldn't continue on that basis.
+ */
+ pgstat_report_wait_start(WAIT_EVENT_REPLICATION_SLOT_RESTORE_SYNC);
+ if (pg_fsync(fd) != 0)
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not fsync file \"%s\": %m",
+ path)));
+ pgstat_report_wait_end();
+
+ /* Also sync the parent directory */
+ START_CRIT_SECTION();
+ fsync_fname(slotdir, true);
+ END_CRIT_SECTION();
+
+ /* read part of statefile that's guaranteed to be version independent */
+ pgstat_report_wait_start(WAIT_EVENT_REPLICATION_SLOT_READ);
+ readBytes = read(fd, &cp, ReplicationSlotOnDiskConstantSize);
+ pgstat_report_wait_end();
+ if (readBytes != ReplicationSlotOnDiskConstantSize)
+ {
+ if (readBytes < 0)
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": %m", path)));
+ else
+ ereport(PANIC,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("could not read file \"%s\": read %d of %zu",
+ path, readBytes,
+ (Size) ReplicationSlotOnDiskConstantSize)));
+ }
+
+ /* verify magic */
+ if (cp.magic != SLOT_MAGIC)
+ ereport(PANIC,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("replication slot file \"%s\" has wrong magic number: %u instead of %u",
+ path, cp.magic, SLOT_MAGIC)));
+
+ /* verify version */
+ if (cp.version != SLOT_VERSION)
+ ereport(PANIC,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("replication slot file \"%s\" has unsupported version %u",
+ path, cp.version)));
+
+ /* boundary check on length */
+ if (cp.length != ReplicationSlotOnDiskV2Size)
+ ereport(PANIC,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("replication slot file \"%s\" has corrupted length %u",
+ path, cp.length)));
+
+ /* Now that we know the size, read the entire file */
+ pgstat_report_wait_start(WAIT_EVENT_REPLICATION_SLOT_READ);
+ readBytes = read(fd,
+ (char *) &cp + ReplicationSlotOnDiskConstantSize,
+ cp.length);
+ pgstat_report_wait_end();
+ if (readBytes != cp.length)
+ {
+ if (readBytes < 0)
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": %m", path)));
+ else
+ ereport(PANIC,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("could not read file \"%s\": read %d of %zu",
+ path, readBytes, (Size) cp.length)));
+ }
+
+ if (CloseTransientFile(fd) != 0)
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not close file \"%s\": %m", path)));
+
+ /* now verify the CRC */
+ INIT_CRC32C(checksum);
+ COMP_CRC32C(checksum,
+ (char *) &cp + SnapBuildOnDiskNotChecksummedSize,
+ SnapBuildOnDiskChecksummedSize);
+ FIN_CRC32C(checksum);
+
+ if (!EQ_CRC32C(checksum, cp.checksum))
+ ereport(PANIC,
+ (errmsg("checksum mismatch for replication slot file \"%s\": is %u, should be %u",
+ path, checksum, cp.checksum)));
+
+ /*
+ * If we crashed with an ephemeral slot active, don't restore but delete
+ * it.
+ */
+ if (cp.slotdata.persistency != RS_PERSISTENT)
+ {
+ if (!rmtree(slotdir, true))
+ {
+ ereport(WARNING,
+ (errmsg("could not remove directory \"%s\"",
+ slotdir)));
+ }
+ fsync_fname("pg_replslot", true);
+ return;
+ }
+
+ /*
+ * Verify that requirements for the specific slot type are met. That's
+ * important because if these aren't met we're not guaranteed to retain
+ * all the necessary resources for the slot.
+ *
+ * NB: We have to do so *after* the above checks for ephemeral slots,
+ * because otherwise a slot that shouldn't exist anymore could prevent
+ * restarts.
+ *
+ * NB: Changing the requirements here also requires adapting
+ * CheckSlotRequirements() and CheckLogicalDecodingRequirements().
+ */
+ if (cp.slotdata.database != InvalidOid && wal_level < WAL_LEVEL_LOGICAL)
+ ereport(FATAL,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("logical replication slot \"%s\" exists, but wal_level < logical",
+ NameStr(cp.slotdata.name)),
+ errhint("Change wal_level to be logical or higher.")));
+ else if (wal_level < WAL_LEVEL_REPLICA)
+ ereport(FATAL,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("physical replication slot \"%s\" exists, but wal_level < replica",
+ NameStr(cp.slotdata.name)),
+ errhint("Change wal_level to be replica or higher.")));
+
+ /* nothing can be active yet, don't lock anything */
+ for (i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationSlot *slot;
+
+ slot = &ReplicationSlotCtl->replication_slots[i];
+
+ if (slot->in_use)
+ continue;
+
+ /* restore the entire set of persistent data */
+ memcpy(&slot->data, &cp.slotdata,
+ sizeof(ReplicationSlotPersistentData));
+
+ /* initialize in memory state */
+ slot->effective_xmin = cp.slotdata.xmin;
+ slot->effective_catalog_xmin = cp.slotdata.catalog_xmin;
+
+ slot->candidate_catalog_xmin = InvalidTransactionId;
+ slot->candidate_xmin_lsn = InvalidXLogRecPtr;
+ slot->candidate_restart_lsn = InvalidXLogRecPtr;
+ slot->candidate_restart_valid = InvalidXLogRecPtr;
+
+ slot->in_use = true;
+ slot->active_pid = 0;
+
+ restored = true;
+ break;
+ }
+
+ if (!restored)
+ ereport(FATAL,
+ (errmsg("too many replication slots active before shutdown"),
+ errhint("Increase max_replication_slots and try again.")));
+}
diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c
new file mode 100644
index 0000000..f886946
--- /dev/null
+++ b/src/backend/replication/slotfuncs.c
@@ -0,0 +1,948 @@
+/*-------------------------------------------------------------------------
+ *
+ * slotfuncs.c
+ * Support functions for replication slots
+ *
+ * Copyright (c) 2012-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/slotfuncs.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/htup_details.h"
+#include "access/xlog_internal.h"
+#include "access/xlogutils.h"
+#include "funcapi.h"
+#include "miscadmin.h"
+#include "replication/decode.h"
+#include "replication/logical.h"
+#include "replication/slot.h"
+#include "utils/builtins.h"
+#include "utils/inval.h"
+#include "utils/pg_lsn.h"
+#include "utils/resowner.h"
+
+static void
+check_permissions(void)
+{
+ if (!superuser() && !has_rolreplication(GetUserId()))
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("must be superuser or replication role to use replication slots")));
+}
+
+/*
+ * Helper function for creating a new physical replication slot with
+ * given arguments. Note that this function doesn't release the created
+ * slot.
+ *
+ * If restart_lsn is a valid value, we use it without WAL reservation
+ * routine. So the caller must guarantee that WAL is available.
+ */
+static void
+create_physical_replication_slot(char *name, bool immediately_reserve,
+ bool temporary, XLogRecPtr restart_lsn)
+{
+ Assert(!MyReplicationSlot);
+
+ /* acquire replication slot, this will check for conflicting names */
+ ReplicationSlotCreate(name, false,
+ temporary ? RS_TEMPORARY : RS_PERSISTENT);
+
+ if (immediately_reserve)
+ {
+ /* Reserve WAL as the user asked for it */
+ if (XLogRecPtrIsInvalid(restart_lsn))
+ ReplicationSlotReserveWal();
+ else
+ MyReplicationSlot->data.restart_lsn = restart_lsn;
+
+ /* Write this slot to disk */
+ ReplicationSlotMarkDirty();
+ ReplicationSlotSave();
+ }
+}
+
+/*
+ * SQL function for creating a new physical (streaming replication)
+ * replication slot.
+ */
+Datum
+pg_create_physical_replication_slot(PG_FUNCTION_ARGS)
+{
+ Name name = PG_GETARG_NAME(0);
+ bool immediately_reserve = PG_GETARG_BOOL(1);
+ bool temporary = PG_GETARG_BOOL(2);
+ Datum values[2];
+ bool nulls[2];
+ TupleDesc tupdesc;
+ HeapTuple tuple;
+ Datum result;
+
+ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ check_permissions();
+
+ CheckSlotRequirements();
+
+ create_physical_replication_slot(NameStr(*name),
+ immediately_reserve,
+ temporary,
+ InvalidXLogRecPtr);
+
+ values[0] = NameGetDatum(&MyReplicationSlot->data.name);
+ nulls[0] = false;
+
+ if (immediately_reserve)
+ {
+ values[1] = LSNGetDatum(MyReplicationSlot->data.restart_lsn);
+ nulls[1] = false;
+ }
+ else
+ nulls[1] = true;
+
+ tuple = heap_form_tuple(tupdesc, values, nulls);
+ result = HeapTupleGetDatum(tuple);
+
+ ReplicationSlotRelease();
+
+ PG_RETURN_DATUM(result);
+}
+
+
+/*
+ * Helper function for creating a new logical replication slot with
+ * given arguments. Note that this function doesn't release the created
+ * slot.
+ *
+ * When find_startpoint is false, the slot's confirmed_flush is not set; it's
+ * caller's responsibility to ensure it's set to something sensible.
+ */
+static void
+create_logical_replication_slot(char *name, char *plugin,
+ bool temporary, XLogRecPtr restart_lsn,
+ bool find_startpoint)
+{
+ LogicalDecodingContext *ctx = NULL;
+
+ Assert(!MyReplicationSlot);
+
+ /*
+ * Acquire a logical decoding slot, this will check for conflicting names.
+ * Initially create persistent slot as ephemeral - that allows us to
+ * nicely handle errors during initialization because it'll get dropped if
+ * this transaction fails. We'll make it persistent at the end. Temporary
+ * slots can be created as temporary from beginning as they get dropped on
+ * error as well.
+ */
+ ReplicationSlotCreate(name, true,
+ temporary ? RS_TEMPORARY : RS_EPHEMERAL);
+
+ /*
+ * Create logical decoding context to find start point or, if we don't
+ * need it, to 1) bump slot's restart_lsn and xmin 2) check plugin sanity.
+ *
+ * Note: when !find_startpoint this is still important, because it's at
+ * this point that the output plugin is validated.
+ */
+ ctx = CreateInitDecodingContext(plugin, NIL,
+ false, /* just catalogs is OK */
+ restart_lsn,
+ XL_ROUTINE(.page_read = read_local_xlog_page,
+ .segment_open = wal_segment_open,
+ .segment_close = wal_segment_close),
+ NULL, NULL, NULL);
+
+ /*
+ * If caller needs us to determine the decoding start point, do so now.
+ * This might take a while.
+ */
+ if (find_startpoint)
+ DecodingContextFindStartpoint(ctx);
+
+ /* don't need the decoding context anymore */
+ FreeDecodingContext(ctx);
+}
+
+/*
+ * SQL function for creating a new logical replication slot.
+ */
+Datum
+pg_create_logical_replication_slot(PG_FUNCTION_ARGS)
+{
+ Name name = PG_GETARG_NAME(0);
+ Name plugin = PG_GETARG_NAME(1);
+ bool temporary = PG_GETARG_BOOL(2);
+ Datum result;
+ TupleDesc tupdesc;
+ HeapTuple tuple;
+ Datum values[2];
+ bool nulls[2];
+
+ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ check_permissions();
+
+ CheckLogicalDecodingRequirements();
+
+ create_logical_replication_slot(NameStr(*name),
+ NameStr(*plugin),
+ temporary,
+ InvalidXLogRecPtr,
+ true);
+
+ values[0] = NameGetDatum(&MyReplicationSlot->data.name);
+ values[1] = LSNGetDatum(MyReplicationSlot->data.confirmed_flush);
+
+ memset(nulls, 0, sizeof(nulls));
+
+ tuple = heap_form_tuple(tupdesc, values, nulls);
+ result = HeapTupleGetDatum(tuple);
+
+ /* ok, slot is now fully created, mark it as persistent if needed */
+ if (!temporary)
+ ReplicationSlotPersist();
+ ReplicationSlotRelease();
+
+ PG_RETURN_DATUM(result);
+}
+
+
+/*
+ * SQL function for dropping a replication slot.
+ */
+Datum
+pg_drop_replication_slot(PG_FUNCTION_ARGS)
+{
+ Name name = PG_GETARG_NAME(0);
+
+ check_permissions();
+
+ CheckSlotRequirements();
+
+ ReplicationSlotDrop(NameStr(*name), true);
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * pg_get_replication_slots - SQL SRF showing active replication slots.
+ */
+Datum
+pg_get_replication_slots(PG_FUNCTION_ARGS)
+{
+#define PG_GET_REPLICATION_SLOTS_COLS 13
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ XLogRecPtr currlsn;
+ int slotno;
+
+ /* check to see if caller supports us returning a tuplestore */
+ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("set-valued function called in context that cannot accept a set")));
+ if (!(rsinfo->allowedModes & SFRM_Materialize))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("materialize mode required, but it is not allowed in this context")));
+
+ /* Build a tuple descriptor for our result type */
+ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ /*
+ * We don't require any special permission to see this function's data
+ * because nothing should be sensitive. The most critical being the slot
+ * name, which shouldn't contain anything particularly sensitive.
+ */
+
+ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
+ oldcontext = MemoryContextSwitchTo(per_query_ctx);
+
+ tupstore = tuplestore_begin_heap(true, false, work_mem);
+ rsinfo->returnMode = SFRM_Materialize;
+ rsinfo->setResult = tupstore;
+ rsinfo->setDesc = tupdesc;
+
+ MemoryContextSwitchTo(oldcontext);
+
+ currlsn = GetXLogWriteRecPtr();
+
+ LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
+ for (slotno = 0; slotno < max_replication_slots; slotno++)
+ {
+ ReplicationSlot *slot = &ReplicationSlotCtl->replication_slots[slotno];
+ ReplicationSlot slot_contents;
+ Datum values[PG_GET_REPLICATION_SLOTS_COLS];
+ bool nulls[PG_GET_REPLICATION_SLOTS_COLS];
+ WALAvailability walstate;
+ int i;
+
+ if (!slot->in_use)
+ continue;
+
+ /* Copy slot contents while holding spinlock, then examine at leisure */
+ SpinLockAcquire(&slot->mutex);
+ slot_contents = *slot;
+ SpinLockRelease(&slot->mutex);
+
+ memset(values, 0, sizeof(values));
+ memset(nulls, 0, sizeof(nulls));
+
+ i = 0;
+ values[i++] = NameGetDatum(&slot_contents.data.name);
+
+ if (slot_contents.data.database == InvalidOid)
+ nulls[i++] = true;
+ else
+ values[i++] = NameGetDatum(&slot_contents.data.plugin);
+
+ if (slot_contents.data.database == InvalidOid)
+ values[i++] = CStringGetTextDatum("physical");
+ else
+ values[i++] = CStringGetTextDatum("logical");
+
+ if (slot_contents.data.database == InvalidOid)
+ nulls[i++] = true;
+ else
+ values[i++] = ObjectIdGetDatum(slot_contents.data.database);
+
+ values[i++] = BoolGetDatum(slot_contents.data.persistency == RS_TEMPORARY);
+ values[i++] = BoolGetDatum(slot_contents.active_pid != 0);
+
+ if (slot_contents.active_pid != 0)
+ values[i++] = Int32GetDatum(slot_contents.active_pid);
+ else
+ nulls[i++] = true;
+
+ if (slot_contents.data.xmin != InvalidTransactionId)
+ values[i++] = TransactionIdGetDatum(slot_contents.data.xmin);
+ else
+ nulls[i++] = true;
+
+ if (slot_contents.data.catalog_xmin != InvalidTransactionId)
+ values[i++] = TransactionIdGetDatum(slot_contents.data.catalog_xmin);
+ else
+ nulls[i++] = true;
+
+ if (slot_contents.data.restart_lsn != InvalidXLogRecPtr)
+ values[i++] = LSNGetDatum(slot_contents.data.restart_lsn);
+ else
+ nulls[i++] = true;
+
+ if (slot_contents.data.confirmed_flush != InvalidXLogRecPtr)
+ values[i++] = LSNGetDatum(slot_contents.data.confirmed_flush);
+ else
+ nulls[i++] = true;
+
+ /*
+ * If invalidated_at is valid and restart_lsn is invalid, we know for
+ * certain that the slot has been invalidated. Otherwise, test
+ * availability from restart_lsn.
+ */
+ if (XLogRecPtrIsInvalid(slot_contents.data.restart_lsn) &&
+ !XLogRecPtrIsInvalid(slot_contents.data.invalidated_at))
+ walstate = WALAVAIL_REMOVED;
+ else
+ walstate = GetWALAvailability(slot_contents.data.restart_lsn);
+
+ switch (walstate)
+ {
+ case WALAVAIL_INVALID_LSN:
+ nulls[i++] = true;
+ break;
+
+ case WALAVAIL_RESERVED:
+ values[i++] = CStringGetTextDatum("reserved");
+ break;
+
+ case WALAVAIL_EXTENDED:
+ values[i++] = CStringGetTextDatum("extended");
+ break;
+
+ case WALAVAIL_UNRESERVED:
+ values[i++] = CStringGetTextDatum("unreserved");
+ break;
+
+ case WALAVAIL_REMOVED:
+
+ /*
+ * If we read the restart_lsn long enough ago, maybe that file
+ * has been removed by now. However, the walsender could have
+ * moved forward enough that it jumped to another file after
+ * we looked. If checkpointer signalled the process to
+ * termination, then it's definitely lost; but if a process is
+ * still alive, then "unreserved" seems more appropriate.
+ *
+ * If we do change it, save the state for safe_wal_size below.
+ */
+ if (!XLogRecPtrIsInvalid(slot_contents.data.restart_lsn))
+ {
+ int pid;
+
+ SpinLockAcquire(&slot->mutex);
+ pid = slot->active_pid;
+ slot_contents.data.restart_lsn = slot->data.restart_lsn;
+ SpinLockRelease(&slot->mutex);
+ if (pid != 0)
+ {
+ values[i++] = CStringGetTextDatum("unreserved");
+ walstate = WALAVAIL_UNRESERVED;
+ break;
+ }
+ }
+ values[i++] = CStringGetTextDatum("lost");
+ break;
+ }
+
+ /*
+ * safe_wal_size is only computed for slots that have not been lost,
+ * and only if there's a configured maximum size.
+ */
+ if (walstate == WALAVAIL_REMOVED || max_slot_wal_keep_size_mb < 0)
+ nulls[i++] = true;
+ else
+ {
+ XLogSegNo targetSeg;
+ uint64 slotKeepSegs;
+ uint64 keepSegs;
+ XLogSegNo failSeg;
+ XLogRecPtr failLSN;
+
+ XLByteToSeg(slot_contents.data.restart_lsn, targetSeg, wal_segment_size);
+
+ /* determine how many segments slots can be kept by slots */
+ slotKeepSegs = XLogMBVarToSegs(max_slot_wal_keep_size_mb, wal_segment_size);
+ /* ditto for wal_keep_size */
+ keepSegs = XLogMBVarToSegs(wal_keep_size_mb, wal_segment_size);
+
+ /* if currpos reaches failLSN, we lose our segment */
+ failSeg = targetSeg + Max(slotKeepSegs, keepSegs) + 1;
+ XLogSegNoOffsetToRecPtr(failSeg, 0, wal_segment_size, failLSN);
+
+ values[i++] = Int64GetDatum(failLSN - currlsn);
+ }
+
+ Assert(i == PG_GET_REPLICATION_SLOTS_COLS);
+
+ tuplestore_putvalues(tupstore, tupdesc, values, nulls);
+ }
+
+ LWLockRelease(ReplicationSlotControlLock);
+
+ tuplestore_donestoring(tupstore);
+
+ return (Datum) 0;
+}
+
+/*
+ * Helper function for advancing our physical replication slot forward.
+ *
+ * The LSN position to move to is compared simply to the slot's restart_lsn,
+ * knowing that any position older than that would be removed by successive
+ * checkpoints.
+ */
+static XLogRecPtr
+pg_physical_replication_slot_advance(XLogRecPtr moveto)
+{
+ XLogRecPtr startlsn = MyReplicationSlot->data.restart_lsn;
+ XLogRecPtr retlsn = startlsn;
+
+ Assert(moveto != InvalidXLogRecPtr);
+
+ if (startlsn < moveto)
+ {
+ SpinLockAcquire(&MyReplicationSlot->mutex);
+ MyReplicationSlot->data.restart_lsn = moveto;
+ SpinLockRelease(&MyReplicationSlot->mutex);
+ retlsn = moveto;
+
+ /*
+ * Dirty the slot so as it is written out at the next checkpoint. Note
+ * that the LSN position advanced may still be lost in the event of a
+ * crash, but this makes the data consistent after a clean shutdown.
+ */
+ ReplicationSlotMarkDirty();
+ }
+
+ return retlsn;
+}
+
+/*
+ * Helper function for advancing our logical replication slot forward.
+ *
+ * The slot's restart_lsn is used as start point for reading records, while
+ * confirmed_flush is used as base point for the decoding context.
+ *
+ * We cannot just do LogicalConfirmReceivedLocation to update confirmed_flush,
+ * because we need to digest WAL to advance restart_lsn allowing to recycle
+ * WAL and removal of old catalog tuples. As decoding is done in fast_forward
+ * mode, no changes are generated anyway.
+ */
+static XLogRecPtr
+pg_logical_replication_slot_advance(XLogRecPtr moveto)
+{
+ LogicalDecodingContext *ctx;
+ ResourceOwner old_resowner = CurrentResourceOwner;
+ XLogRecPtr retlsn;
+
+ Assert(moveto != InvalidXLogRecPtr);
+
+ PG_TRY();
+ {
+ /*
+ * Create our decoding context in fast_forward mode, passing start_lsn
+ * as InvalidXLogRecPtr, so that we start processing from my slot's
+ * confirmed_flush.
+ */
+ ctx = CreateDecodingContext(InvalidXLogRecPtr,
+ NIL,
+ true, /* fast_forward */
+ XL_ROUTINE(.page_read = read_local_xlog_page,
+ .segment_open = wal_segment_open,
+ .segment_close = wal_segment_close),
+ NULL, NULL, NULL);
+
+ /*
+ * Start reading at the slot's restart_lsn, which we know to point to
+ * a valid record.
+ */
+ XLogBeginRead(ctx->reader, MyReplicationSlot->data.restart_lsn);
+
+ /* Initialize our return value in case we don't do anything */
+ retlsn = MyReplicationSlot->data.confirmed_flush;
+
+ /* invalidate non-timetravel entries */
+ InvalidateSystemCaches();
+
+ /* Decode at least one record, until we run out of records */
+ while (ctx->reader->EndRecPtr < moveto)
+ {
+ char *errm = NULL;
+ XLogRecord *record;
+
+ /*
+ * Read records. No changes are generated in fast_forward mode,
+ * but snapbuilder/slot statuses are updated properly.
+ */
+ record = XLogReadRecord(ctx->reader, &errm);
+ if (errm)
+ elog(ERROR, "%s", errm);
+
+ /*
+ * Process the record. Storage-level changes are ignored in
+ * fast_forward mode, but other modules (such as snapbuilder)
+ * might still have critical updates to do.
+ */
+ if (record)
+ LogicalDecodingProcessRecord(ctx, ctx->reader);
+
+ /* Stop once the requested target has been reached */
+ if (moveto <= ctx->reader->EndRecPtr)
+ break;
+
+ CHECK_FOR_INTERRUPTS();
+ }
+
+ /*
+ * Logical decoding could have clobbered CurrentResourceOwner during
+ * transaction management, so restore the executor's value. (This is
+ * a kluge, but it's not worth cleaning up right now.)
+ */
+ CurrentResourceOwner = old_resowner;
+
+ if (ctx->reader->EndRecPtr != InvalidXLogRecPtr)
+ {
+ LogicalConfirmReceivedLocation(moveto);
+
+ /*
+ * If only the confirmed_flush LSN has changed the slot won't get
+ * marked as dirty by the above. Callers on the walsender
+ * interface are expected to keep track of their own progress and
+ * don't need it written out. But SQL-interface users cannot
+ * specify their own start positions and it's harder for them to
+ * keep track of their progress, so we should make more of an
+ * effort to save it for them.
+ *
+ * Dirty the slot so it is written out at the next checkpoint. The
+ * LSN position advanced to may still be lost on a crash but this
+ * makes the data consistent after a clean shutdown.
+ */
+ ReplicationSlotMarkDirty();
+ }
+
+ retlsn = MyReplicationSlot->data.confirmed_flush;
+
+ /* free context, call shutdown callback */
+ FreeDecodingContext(ctx);
+
+ InvalidateSystemCaches();
+ }
+ PG_CATCH();
+ {
+ /* clear all timetravel entries */
+ InvalidateSystemCaches();
+
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ return retlsn;
+}
+
+/*
+ * SQL function for moving the position in a replication slot.
+ */
+Datum
+pg_replication_slot_advance(PG_FUNCTION_ARGS)
+{
+ Name slotname = PG_GETARG_NAME(0);
+ XLogRecPtr moveto = PG_GETARG_LSN(1);
+ XLogRecPtr endlsn;
+ XLogRecPtr minlsn;
+ TupleDesc tupdesc;
+ Datum values[2];
+ bool nulls[2];
+ HeapTuple tuple;
+ Datum result;
+
+ Assert(!MyReplicationSlot);
+
+ check_permissions();
+
+ if (XLogRecPtrIsInvalid(moveto))
+ ereport(ERROR,
+ (errmsg("invalid target WAL LSN")));
+
+ /* Build a tuple descriptor for our result type */
+ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ /*
+ * We can't move slot past what's been flushed/replayed so clamp the
+ * target position accordingly.
+ */
+ if (!RecoveryInProgress())
+ moveto = Min(moveto, GetFlushRecPtr());
+ else
+ moveto = Min(moveto, GetXLogReplayRecPtr(&ThisTimeLineID));
+
+ /* Acquire the slot so we "own" it */
+ (void) ReplicationSlotAcquire(NameStr(*slotname), SAB_Error);
+
+ /* A slot whose restart_lsn has never been reserved cannot be advanced */
+ if (XLogRecPtrIsInvalid(MyReplicationSlot->data.restart_lsn))
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("replication slot \"%s\" cannot be advanced",
+ NameStr(*slotname)),
+ errdetail("This slot has never previously reserved WAL, or has been invalidated.")));
+
+ /*
+ * Check if the slot is not moving backwards. Physical slots rely simply
+ * on restart_lsn as a minimum point, while logical slots have confirmed
+ * consumption up to confirmed_flush, meaning that in both cases data
+ * older than that is not available anymore.
+ */
+ if (OidIsValid(MyReplicationSlot->data.database))
+ minlsn = MyReplicationSlot->data.confirmed_flush;
+ else
+ minlsn = MyReplicationSlot->data.restart_lsn;
+
+ if (moveto < minlsn)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot advance replication slot to %X/%X, minimum is %X/%X",
+ (uint32) (moveto >> 32), (uint32) moveto,
+ (uint32) (minlsn >> 32), (uint32) minlsn)));
+
+ /* Do the actual slot update, depending on the slot type */
+ if (OidIsValid(MyReplicationSlot->data.database))
+ endlsn = pg_logical_replication_slot_advance(moveto);
+ else
+ endlsn = pg_physical_replication_slot_advance(moveto);
+
+ values[0] = NameGetDatum(&MyReplicationSlot->data.name);
+ nulls[0] = false;
+
+ /*
+ * Recompute the minimum LSN and xmin across all slots to adjust with the
+ * advancing potentially done.
+ */
+ ReplicationSlotsComputeRequiredXmin(false);
+ ReplicationSlotsComputeRequiredLSN();
+
+ ReplicationSlotRelease();
+
+ /* Return the reached position. */
+ values[1] = LSNGetDatum(endlsn);
+ nulls[1] = false;
+
+ tuple = heap_form_tuple(tupdesc, values, nulls);
+ result = HeapTupleGetDatum(tuple);
+
+ PG_RETURN_DATUM(result);
+}
+
+/*
+ * Helper function of copying a replication slot.
+ */
+static Datum
+copy_replication_slot(FunctionCallInfo fcinfo, bool logical_slot)
+{
+ Name src_name = PG_GETARG_NAME(0);
+ Name dst_name = PG_GETARG_NAME(1);
+ ReplicationSlot *src = NULL;
+ ReplicationSlot first_slot_contents;
+ ReplicationSlot second_slot_contents;
+ XLogRecPtr src_restart_lsn;
+ bool src_islogical;
+ bool temporary;
+ char *plugin;
+ Datum values[2];
+ bool nulls[2];
+ Datum result;
+ TupleDesc tupdesc;
+ HeapTuple tuple;
+
+ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ check_permissions();
+
+ if (logical_slot)
+ CheckLogicalDecodingRequirements();
+ else
+ CheckSlotRequirements();
+
+ LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
+
+ /*
+ * We need to prevent the source slot's reserved WAL from being removed,
+ * but we don't want to lock that slot for very long, and it can advance
+ * in the meantime. So obtain the source slot's data, and create a new
+ * slot using its restart_lsn. Afterwards we lock the source slot again
+ * and verify that the data we copied (name, type) has not changed
+ * incompatibly. No inconvenient WAL removal can occur once the new slot
+ * is created -- but since WAL removal could have occurred before we
+ * managed to create the new slot, we advance the new slot's restart_lsn
+ * to the source slot's updated restart_lsn the second time we lock it.
+ */
+ for (int i = 0; i < max_replication_slots; i++)
+ {
+ ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
+
+ if (s->in_use && strcmp(NameStr(s->data.name), NameStr(*src_name)) == 0)
+ {
+ /* Copy the slot contents while holding spinlock */
+ SpinLockAcquire(&s->mutex);
+ first_slot_contents = *s;
+ SpinLockRelease(&s->mutex);
+ src = s;
+ break;
+ }
+ }
+
+ LWLockRelease(ReplicationSlotControlLock);
+
+ if (src == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("replication slot \"%s\" does not exist", NameStr(*src_name))));
+
+ src_islogical = SlotIsLogical(&first_slot_contents);
+ src_restart_lsn = first_slot_contents.data.restart_lsn;
+ temporary = (first_slot_contents.data.persistency == RS_TEMPORARY);
+ plugin = logical_slot ? NameStr(first_slot_contents.data.plugin) : NULL;
+
+ /* Check type of replication slot */
+ if (src_islogical != logical_slot)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ src_islogical ?
+ errmsg("cannot copy physical replication slot \"%s\" as a logical replication slot",
+ NameStr(*src_name)) :
+ errmsg("cannot copy logical replication slot \"%s\" as a physical replication slot",
+ NameStr(*src_name))));
+
+ /* Copying non-reserved slot doesn't make sense */
+ if (XLogRecPtrIsInvalid(src_restart_lsn))
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot copy a replication slot that doesn't reserve WAL")));
+
+ /* Overwrite params from optional arguments */
+ if (PG_NARGS() >= 3)
+ temporary = PG_GETARG_BOOL(2);
+ if (PG_NARGS() >= 4)
+ {
+ Assert(logical_slot);
+ plugin = NameStr(*(PG_GETARG_NAME(3)));
+ }
+
+ /* Create new slot and acquire it */
+ if (logical_slot)
+ {
+ /*
+ * We must not try to read WAL, since we haven't reserved it yet --
+ * hence pass find_startpoint false. confirmed_flush will be set
+ * below, by copying from the source slot.
+ */
+ create_logical_replication_slot(NameStr(*dst_name),
+ plugin,
+ temporary,
+ src_restart_lsn,
+ false);
+ }
+ else
+ create_physical_replication_slot(NameStr(*dst_name),
+ true,
+ temporary,
+ src_restart_lsn);
+
+ /*
+ * Update the destination slot to current values of the source slot;
+ * recheck that the source slot is still the one we saw previously.
+ */
+ {
+ TransactionId copy_effective_xmin;
+ TransactionId copy_effective_catalog_xmin;
+ TransactionId copy_xmin;
+ TransactionId copy_catalog_xmin;
+ XLogRecPtr copy_restart_lsn;
+ XLogRecPtr copy_confirmed_flush;
+ bool copy_islogical;
+ char *copy_name;
+
+ /* Copy data of source slot again */
+ SpinLockAcquire(&src->mutex);
+ second_slot_contents = *src;
+ SpinLockRelease(&src->mutex);
+
+ copy_effective_xmin = second_slot_contents.effective_xmin;
+ copy_effective_catalog_xmin = second_slot_contents.effective_catalog_xmin;
+
+ copy_xmin = second_slot_contents.data.xmin;
+ copy_catalog_xmin = second_slot_contents.data.catalog_xmin;
+ copy_restart_lsn = second_slot_contents.data.restart_lsn;
+ copy_confirmed_flush = second_slot_contents.data.confirmed_flush;
+
+ /* for existence check */
+ copy_name = NameStr(second_slot_contents.data.name);
+ copy_islogical = SlotIsLogical(&second_slot_contents);
+
+ /*
+ * Check if the source slot still exists and is valid. We regard it as
+ * invalid if the type of replication slot or name has been changed,
+ * or the restart_lsn either is invalid or has gone backward. (The
+ * restart_lsn could go backwards if the source slot is dropped and
+ * copied from an older slot during installation.)
+ *
+ * Since erroring out will release and drop the destination slot we
+ * don't need to release it here.
+ */
+ if (copy_restart_lsn < src_restart_lsn ||
+ src_islogical != copy_islogical ||
+ strcmp(copy_name, NameStr(*src_name)) != 0)
+ ereport(ERROR,
+ (errmsg("could not copy replication slot \"%s\"",
+ NameStr(*src_name)),
+ errdetail("The source replication slot was modified incompatibly during the copy operation.")));
+
+ /* The source slot must have a consistent snapshot */
+ if (src_islogical && XLogRecPtrIsInvalid(copy_confirmed_flush))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot copy unfinished logical replication slot \"%s\"",
+ NameStr(*src_name)),
+ errhint("Retry when the source replication slot's confirmed_flush_lsn is valid.")));
+
+ /* Install copied values again */
+ SpinLockAcquire(&MyReplicationSlot->mutex);
+ MyReplicationSlot->effective_xmin = copy_effective_xmin;
+ MyReplicationSlot->effective_catalog_xmin = copy_effective_catalog_xmin;
+
+ MyReplicationSlot->data.xmin = copy_xmin;
+ MyReplicationSlot->data.catalog_xmin = copy_catalog_xmin;
+ MyReplicationSlot->data.restart_lsn = copy_restart_lsn;
+ MyReplicationSlot->data.confirmed_flush = copy_confirmed_flush;
+ SpinLockRelease(&MyReplicationSlot->mutex);
+
+ ReplicationSlotMarkDirty();
+ ReplicationSlotsComputeRequiredXmin(false);
+ ReplicationSlotsComputeRequiredLSN();
+ ReplicationSlotSave();
+
+#ifdef USE_ASSERT_CHECKING
+ /* Check that the restart_lsn is available */
+ {
+ XLogSegNo segno;
+
+ XLByteToSeg(copy_restart_lsn, segno, wal_segment_size);
+ Assert(XLogGetLastRemovedSegno() < segno);
+ }
+#endif
+ }
+
+ /* target slot fully created, mark as persistent if needed */
+ if (logical_slot && !temporary)
+ ReplicationSlotPersist();
+
+ /* All done. Set up the return values */
+ values[0] = NameGetDatum(dst_name);
+ nulls[0] = false;
+ if (!XLogRecPtrIsInvalid(MyReplicationSlot->data.confirmed_flush))
+ {
+ values[1] = LSNGetDatum(MyReplicationSlot->data.confirmed_flush);
+ nulls[1] = false;
+ }
+ else
+ nulls[1] = true;
+
+ tuple = heap_form_tuple(tupdesc, values, nulls);
+ result = HeapTupleGetDatum(tuple);
+
+ ReplicationSlotRelease();
+
+ PG_RETURN_DATUM(result);
+}
+
+/* The wrappers below are all to appease opr_sanity */
+Datum
+pg_copy_logical_replication_slot_a(PG_FUNCTION_ARGS)
+{
+ return copy_replication_slot(fcinfo, true);
+}
+
+Datum
+pg_copy_logical_replication_slot_b(PG_FUNCTION_ARGS)
+{
+ return copy_replication_slot(fcinfo, true);
+}
+
+Datum
+pg_copy_logical_replication_slot_c(PG_FUNCTION_ARGS)
+{
+ return copy_replication_slot(fcinfo, true);
+}
+
+Datum
+pg_copy_physical_replication_slot_a(PG_FUNCTION_ARGS)
+{
+ return copy_replication_slot(fcinfo, false);
+}
+
+Datum
+pg_copy_physical_replication_slot_b(PG_FUNCTION_ARGS)
+{
+ return copy_replication_slot(fcinfo, false);
+}
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
new file mode 100644
index 0000000..df1e341
--- /dev/null
+++ b/src/backend/replication/syncrep.c
@@ -0,0 +1,1092 @@
+/*-------------------------------------------------------------------------
+ *
+ * syncrep.c
+ *
+ * Synchronous replication is new as of PostgreSQL 9.1.
+ *
+ * If requested, transaction commits wait until their commit LSN are
+ * acknowledged by the synchronous standbys.
+ *
+ * This module contains the code for waiting and release of backends.
+ * All code in this module executes on the primary. The core streaming
+ * replication transport remains within WALreceiver/WALsender modules.
+ *
+ * The essence of this design is that it isolates all logic about
+ * waiting/releasing onto the primary. The primary defines which standbys
+ * it wishes to wait for. The standbys are completely unaware of the
+ * durability requirements of transactions on the primary, reducing the
+ * complexity of the code and streamlining both standby operations and
+ * network bandwidth because there is no requirement to ship
+ * per-transaction state information.
+ *
+ * Replication is either synchronous or not synchronous (async). If it is
+ * async, we just fastpath out of here. If it is sync, then we wait for
+ * the write, flush or apply location on the standby before releasing
+ * the waiting backend. Further complexity in that interaction is
+ * expected in later releases.
+ *
+ * The best performing way to manage the waiting backends is to have a
+ * single ordered queue of waiting backends, so that we can avoid
+ * searching the through all waiters each time we receive a reply.
+ *
+ * In 9.5 or before only a single standby could be considered as
+ * synchronous. In 9.6 we support a priority-based multiple synchronous
+ * standbys. In 10.0 a quorum-based multiple synchronous standbys is also
+ * supported. The number of synchronous standbys that transactions
+ * must wait for replies from is specified in synchronous_standby_names.
+ * This parameter also specifies a list of standby names and the method
+ * (FIRST and ANY) to choose synchronous standbys from the listed ones.
+ *
+ * The method FIRST specifies a priority-based synchronous replication
+ * and makes transaction commits wait until their WAL records are
+ * replicated to the requested number of synchronous standbys chosen based
+ * on their priorities. The standbys whose names appear earlier in the list
+ * are given higher priority and will be considered as synchronous.
+ * Other standby servers appearing later in this list represent potential
+ * synchronous standbys. If any of the current synchronous standbys
+ * disconnects for whatever reason, it will be replaced immediately with
+ * the next-highest-priority standby.
+ *
+ * The method ANY specifies a quorum-based synchronous replication
+ * and makes transaction commits wait until their WAL records are
+ * replicated to at least the requested number of synchronous standbys
+ * in the list. All the standbys appearing in the list are considered as
+ * candidates for quorum synchronous standbys.
+ *
+ * If neither FIRST nor ANY is specified, FIRST is used as the method.
+ * This is for backward compatibility with 9.6 or before where only a
+ * priority-based sync replication was supported.
+ *
+ * Before the standbys chosen from synchronous_standby_names can
+ * become the synchronous standbys they must have caught up with
+ * the primary; that may take some time. Once caught up,
+ * the standbys which are considered as synchronous at that moment
+ * will release waiters from the queue.
+ *
+ * Portions Copyright (c) 2010-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/syncrep.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <unistd.h>
+
+#include "access/xact.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "replication/syncrep.h"
+#include "replication/walsender.h"
+#include "replication/walsender_private.h"
+#include "storage/pmsignal.h"
+#include "storage/proc.h"
+#include "tcop/tcopprot.h"
+#include "utils/builtins.h"
+#include "utils/ps_status.h"
+
+/* User-settable parameters for sync rep */
+char *SyncRepStandbyNames;
+
+#define SyncStandbysDefined() \
+ (SyncRepStandbyNames != NULL && SyncRepStandbyNames[0] != '\0')
+
+static bool announce_next_takeover = true;
+
+SyncRepConfigData *SyncRepConfig = NULL;
+static int SyncRepWaitMode = SYNC_REP_NO_WAIT;
+
+static void SyncRepQueueInsert(int mode);
+static void SyncRepCancelWait(void);
+static int SyncRepWakeQueue(bool all, int mode);
+
+static bool SyncRepGetSyncRecPtr(XLogRecPtr *writePtr,
+ XLogRecPtr *flushPtr,
+ XLogRecPtr *applyPtr,
+ bool *am_sync);
+static void SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr,
+ XLogRecPtr *flushPtr,
+ XLogRecPtr *applyPtr,
+ SyncRepStandbyData *sync_standbys,
+ int num_standbys);
+static void SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr,
+ XLogRecPtr *flushPtr,
+ XLogRecPtr *applyPtr,
+ SyncRepStandbyData *sync_standbys,
+ int num_standbys,
+ uint8 nth);
+static int SyncRepGetStandbyPriority(void);
+static int standby_priority_comparator(const void *a, const void *b);
+static int cmp_lsn(const void *a, const void *b);
+
+#ifdef USE_ASSERT_CHECKING
+static bool SyncRepQueueIsOrderedByLSN(int mode);
+#endif
+
+/*
+ * ===========================================================
+ * Synchronous Replication functions for normal user backends
+ * ===========================================================
+ */
+
+/*
+ * Wait for synchronous replication, if requested by user.
+ *
+ * Initially backends start in state SYNC_REP_NOT_WAITING and then
+ * change that state to SYNC_REP_WAITING before adding ourselves
+ * to the wait queue. During SyncRepWakeQueue() a WALSender changes
+ * the state to SYNC_REP_WAIT_COMPLETE once replication is confirmed.
+ * This backend then resets its state to SYNC_REP_NOT_WAITING.
+ *
+ * 'lsn' represents the LSN to wait for. 'commit' indicates whether this LSN
+ * represents a commit record. If it doesn't, then we wait only for the WAL
+ * to be flushed if synchronous_commit is set to the higher level of
+ * remote_apply, because only commit records provide apply feedback.
+ */
+void
+SyncRepWaitForLSN(XLogRecPtr lsn, bool commit)
+{
+ char *new_status = NULL;
+ const char *old_status;
+ int mode;
+
+ /*
+ * This should be called while holding interrupts during a transaction
+ * commit to prevent the follow-up shared memory queue cleanups to be
+ * influenced by external interruptions.
+ */
+ Assert(InterruptHoldoffCount > 0);
+
+ /* Cap the level for anything other than commit to remote flush only. */
+ if (commit)
+ mode = SyncRepWaitMode;
+ else
+ mode = Min(SyncRepWaitMode, SYNC_REP_WAIT_FLUSH);
+
+ /*
+ * Fast exit if user has not requested sync replication.
+ */
+ if (!SyncRepRequested())
+ return;
+
+ Assert(SHMQueueIsDetached(&(MyProc->syncRepLinks)));
+ Assert(WalSndCtl != NULL);
+
+ LWLockAcquire(SyncRepLock, LW_EXCLUSIVE);
+ Assert(MyProc->syncRepState == SYNC_REP_NOT_WAITING);
+
+ /*
+ * We don't wait for sync rep if WalSndCtl->sync_standbys_defined is not
+ * set. See SyncRepUpdateSyncStandbysDefined.
+ *
+ * Also check that the standby hasn't already replied. Unlikely race
+ * condition but we'll be fetching that cache line anyway so it's likely
+ * to be a low cost check.
+ */
+ if (!WalSndCtl->sync_standbys_defined ||
+ lsn <= WalSndCtl->lsn[mode])
+ {
+ LWLockRelease(SyncRepLock);
+ return;
+ }
+
+ /*
+ * Set our waitLSN so WALSender will know when to wake us, and add
+ * ourselves to the queue.
+ */
+ MyProc->waitLSN = lsn;
+ MyProc->syncRepState = SYNC_REP_WAITING;
+ SyncRepQueueInsert(mode);
+ Assert(SyncRepQueueIsOrderedByLSN(mode));
+ LWLockRelease(SyncRepLock);
+
+ /* Alter ps display to show waiting for sync rep. */
+ if (update_process_title)
+ {
+ int len;
+
+ old_status = get_ps_display(&len);
+ new_status = (char *) palloc(len + 32 + 1);
+ memcpy(new_status, old_status, len);
+ sprintf(new_status + len, " waiting for %X/%X",
+ (uint32) (lsn >> 32), (uint32) lsn);
+ set_ps_display(new_status);
+ new_status[len] = '\0'; /* truncate off " waiting ..." */
+ }
+
+ /*
+ * Wait for specified LSN to be confirmed.
+ *
+ * Each proc has its own wait latch, so we perform a normal latch
+ * check/wait loop here.
+ */
+ for (;;)
+ {
+ int rc;
+
+ /* Must reset the latch before testing state. */
+ ResetLatch(MyLatch);
+
+ /*
+ * Acquiring the lock is not needed, the latch ensures proper
+ * barriers. If it looks like we're done, we must really be done,
+ * because once walsender changes the state to SYNC_REP_WAIT_COMPLETE,
+ * it will never update it again, so we can't be seeing a stale value
+ * in that case.
+ */
+ if (MyProc->syncRepState == SYNC_REP_WAIT_COMPLETE)
+ break;
+
+ /*
+ * If a wait for synchronous replication is pending, we can neither
+ * acknowledge the commit nor raise ERROR or FATAL. The latter would
+ * lead the client to believe that the transaction aborted, which is
+ * not true: it's already committed locally. The former is no good
+ * either: the client has requested synchronous replication, and is
+ * entitled to assume that an acknowledged commit is also replicated,
+ * which might not be true. So in this case we issue a WARNING (which
+ * some clients may be able to interpret) and shut off further output.
+ * We do NOT reset ProcDiePending, so that the process will die after
+ * the commit is cleaned up.
+ */
+ if (ProcDiePending)
+ {
+ ereport(WARNING,
+ (errcode(ERRCODE_ADMIN_SHUTDOWN),
+ errmsg("canceling the wait for synchronous replication and terminating connection due to administrator command"),
+ errdetail("The transaction has already committed locally, but might not have been replicated to the standby.")));
+ whereToSendOutput = DestNone;
+ SyncRepCancelWait();
+ break;
+ }
+
+ /*
+ * It's unclear what to do if a query cancel interrupt arrives. We
+ * can't actually abort at this point, but ignoring the interrupt
+ * altogether is not helpful, so we just terminate the wait with a
+ * suitable warning.
+ */
+ if (QueryCancelPending)
+ {
+ QueryCancelPending = false;
+ ereport(WARNING,
+ (errmsg("canceling wait for synchronous replication due to user request"),
+ errdetail("The transaction has already committed locally, but might not have been replicated to the standby.")));
+ SyncRepCancelWait();
+ break;
+ }
+
+ /*
+ * Wait on latch. Any condition that should wake us up will set the
+ * latch, so no need for timeout.
+ */
+ rc = WaitLatch(MyLatch, WL_LATCH_SET | WL_POSTMASTER_DEATH, -1,
+ WAIT_EVENT_SYNC_REP);
+
+ /*
+ * If the postmaster dies, we'll probably never get an acknowledgment,
+ * because all the wal sender processes will exit. So just bail out.
+ */
+ if (rc & WL_POSTMASTER_DEATH)
+ {
+ ProcDiePending = true;
+ whereToSendOutput = DestNone;
+ SyncRepCancelWait();
+ break;
+ }
+ }
+
+ /*
+ * WalSender has checked our LSN and has removed us from queue. Clean up
+ * state and leave. It's OK to reset these shared memory fields without
+ * holding SyncRepLock, because any walsenders will ignore us anyway when
+ * we're not on the queue. We need a read barrier to make sure we see the
+ * changes to the queue link (this might be unnecessary without
+ * assertions, but better safe than sorry).
+ */
+ pg_read_barrier();
+ Assert(SHMQueueIsDetached(&(MyProc->syncRepLinks)));
+ MyProc->syncRepState = SYNC_REP_NOT_WAITING;
+ MyProc->waitLSN = 0;
+
+ if (new_status)
+ {
+ /* Reset ps display */
+ set_ps_display(new_status);
+ pfree(new_status);
+ }
+}
+
+/*
+ * Insert MyProc into the specified SyncRepQueue, maintaining sorted invariant.
+ *
+ * Usually we will go at tail of queue, though it's possible that we arrive
+ * here out of order, so start at tail and work back to insertion point.
+ */
+static void
+SyncRepQueueInsert(int mode)
+{
+ PGPROC *proc;
+
+ Assert(mode >= 0 && mode < NUM_SYNC_REP_WAIT_MODE);
+ proc = (PGPROC *) SHMQueuePrev(&(WalSndCtl->SyncRepQueue[mode]),
+ &(WalSndCtl->SyncRepQueue[mode]),
+ offsetof(PGPROC, syncRepLinks));
+
+ while (proc)
+ {
+ /*
+ * Stop at the queue element that we should after to ensure the queue
+ * is ordered by LSN.
+ */
+ if (proc->waitLSN < MyProc->waitLSN)
+ break;
+
+ proc = (PGPROC *) SHMQueuePrev(&(WalSndCtl->SyncRepQueue[mode]),
+ &(proc->syncRepLinks),
+ offsetof(PGPROC, syncRepLinks));
+ }
+
+ if (proc)
+ SHMQueueInsertAfter(&(proc->syncRepLinks), &(MyProc->syncRepLinks));
+ else
+ SHMQueueInsertAfter(&(WalSndCtl->SyncRepQueue[mode]), &(MyProc->syncRepLinks));
+}
+
+/*
+ * Acquire SyncRepLock and cancel any wait currently in progress.
+ */
+static void
+SyncRepCancelWait(void)
+{
+ LWLockAcquire(SyncRepLock, LW_EXCLUSIVE);
+ if (!SHMQueueIsDetached(&(MyProc->syncRepLinks)))
+ SHMQueueDelete(&(MyProc->syncRepLinks));
+ MyProc->syncRepState = SYNC_REP_NOT_WAITING;
+ LWLockRelease(SyncRepLock);
+}
+
+void
+SyncRepCleanupAtProcExit(void)
+{
+ /*
+ * First check if we are removed from the queue without the lock to not
+ * slow down backend exit.
+ */
+ if (!SHMQueueIsDetached(&(MyProc->syncRepLinks)))
+ {
+ LWLockAcquire(SyncRepLock, LW_EXCLUSIVE);
+
+ /* maybe we have just been removed, so recheck */
+ if (!SHMQueueIsDetached(&(MyProc->syncRepLinks)))
+ SHMQueueDelete(&(MyProc->syncRepLinks));
+
+ LWLockRelease(SyncRepLock);
+ }
+}
+
+/*
+ * ===========================================================
+ * Synchronous Replication functions for wal sender processes
+ * ===========================================================
+ */
+
+/*
+ * Take any action required to initialise sync rep state from config
+ * data. Called at WALSender startup and after each SIGHUP.
+ */
+void
+SyncRepInitConfig(void)
+{
+ int priority;
+
+ /*
+ * Determine if we are a potential sync standby and remember the result
+ * for handling replies from standby.
+ */
+ priority = SyncRepGetStandbyPriority();
+ if (MyWalSnd->sync_standby_priority != priority)
+ {
+ SpinLockAcquire(&MyWalSnd->mutex);
+ MyWalSnd->sync_standby_priority = priority;
+ SpinLockRelease(&MyWalSnd->mutex);
+
+ ereport(DEBUG1,
+ (errmsg("standby \"%s\" now has synchronous standby priority %u",
+ application_name, priority)));
+ }
+}
+
+/*
+ * Update the LSNs on each queue based upon our latest state. This
+ * implements a simple policy of first-valid-sync-standby-releases-waiter.
+ *
+ * Other policies are possible, which would change what we do here and
+ * perhaps also which information we store as well.
+ */
+void
+SyncRepReleaseWaiters(void)
+{
+ volatile WalSndCtlData *walsndctl = WalSndCtl;
+ XLogRecPtr writePtr;
+ XLogRecPtr flushPtr;
+ XLogRecPtr applyPtr;
+ bool got_recptr;
+ bool am_sync;
+ int numwrite = 0;
+ int numflush = 0;
+ int numapply = 0;
+
+ /*
+ * If this WALSender is serving a standby that is not on the list of
+ * potential sync standbys then we have nothing to do. If we are still
+ * starting up, still running base backup or the current flush position is
+ * still invalid, then leave quickly also. Streaming or stopping WAL
+ * senders are allowed to release waiters.
+ */
+ if (MyWalSnd->sync_standby_priority == 0 ||
+ (MyWalSnd->state != WALSNDSTATE_STREAMING &&
+ MyWalSnd->state != WALSNDSTATE_STOPPING) ||
+ XLogRecPtrIsInvalid(MyWalSnd->flush))
+ {
+ announce_next_takeover = true;
+ return;
+ }
+
+ /*
+ * We're a potential sync standby. Release waiters if there are enough
+ * sync standbys and we are considered as sync.
+ */
+ LWLockAcquire(SyncRepLock, LW_EXCLUSIVE);
+
+ /*
+ * Check whether we are a sync standby or not, and calculate the synced
+ * positions among all sync standbys. (Note: although this step does not
+ * of itself require holding SyncRepLock, it seems like a good idea to do
+ * it after acquiring the lock. This ensures that the WAL pointers we use
+ * to release waiters are newer than any previous execution of this
+ * routine used.)
+ */
+ got_recptr = SyncRepGetSyncRecPtr(&writePtr, &flushPtr, &applyPtr, &am_sync);
+
+ /*
+ * If we are managing a sync standby, though we weren't prior to this,
+ * then announce we are now a sync standby.
+ */
+ if (announce_next_takeover && am_sync)
+ {
+ announce_next_takeover = false;
+
+ if (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY)
+ ereport(LOG,
+ (errmsg("standby \"%s\" is now a synchronous standby with priority %u",
+ application_name, MyWalSnd->sync_standby_priority)));
+ else
+ ereport(LOG,
+ (errmsg("standby \"%s\" is now a candidate for quorum synchronous standby",
+ application_name)));
+ }
+
+ /*
+ * If the number of sync standbys is less than requested or we aren't
+ * managing a sync standby then just leave.
+ */
+ if (!got_recptr || !am_sync)
+ {
+ LWLockRelease(SyncRepLock);
+ announce_next_takeover = !am_sync;
+ return;
+ }
+
+ /*
+ * Set the lsn first so that when we wake backends they will release up to
+ * this location.
+ */
+ if (walsndctl->lsn[SYNC_REP_WAIT_WRITE] < writePtr)
+ {
+ walsndctl->lsn[SYNC_REP_WAIT_WRITE] = writePtr;
+ numwrite = SyncRepWakeQueue(false, SYNC_REP_WAIT_WRITE);
+ }
+ if (walsndctl->lsn[SYNC_REP_WAIT_FLUSH] < flushPtr)
+ {
+ walsndctl->lsn[SYNC_REP_WAIT_FLUSH] = flushPtr;
+ numflush = SyncRepWakeQueue(false, SYNC_REP_WAIT_FLUSH);
+ }
+ if (walsndctl->lsn[SYNC_REP_WAIT_APPLY] < applyPtr)
+ {
+ walsndctl->lsn[SYNC_REP_WAIT_APPLY] = applyPtr;
+ numapply = SyncRepWakeQueue(false, SYNC_REP_WAIT_APPLY);
+ }
+
+ LWLockRelease(SyncRepLock);
+
+ elog(DEBUG3, "released %d procs up to write %X/%X, %d procs up to flush %X/%X, %d procs up to apply %X/%X",
+ numwrite, (uint32) (writePtr >> 32), (uint32) writePtr,
+ numflush, (uint32) (flushPtr >> 32), (uint32) flushPtr,
+ numapply, (uint32) (applyPtr >> 32), (uint32) applyPtr);
+}
+
+/*
+ * Calculate the synced Write, Flush and Apply positions among sync standbys.
+ *
+ * Return false if the number of sync standbys is less than
+ * synchronous_standby_names specifies. Otherwise return true and
+ * store the positions into *writePtr, *flushPtr and *applyPtr.
+ *
+ * On return, *am_sync is set to true if this walsender is connecting to
+ * sync standby. Otherwise it's set to false.
+ */
+static bool
+SyncRepGetSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
+ XLogRecPtr *applyPtr, bool *am_sync)
+{
+ SyncRepStandbyData *sync_standbys;
+ int num_standbys;
+ int i;
+
+ /* Initialize default results */
+ *writePtr = InvalidXLogRecPtr;
+ *flushPtr = InvalidXLogRecPtr;
+ *applyPtr = InvalidXLogRecPtr;
+ *am_sync = false;
+
+ /* Quick out if not even configured to be synchronous */
+ if (SyncRepConfig == NULL)
+ return false;
+
+ /* Get standbys that are considered as synchronous at this moment */
+ num_standbys = SyncRepGetCandidateStandbys(&sync_standbys);
+
+ /* Am I among the candidate sync standbys? */
+ for (i = 0; i < num_standbys; i++)
+ {
+ if (sync_standbys[i].is_me)
+ {
+ *am_sync = true;
+ break;
+ }
+ }
+
+ /*
+ * Nothing more to do if we are not managing a sync standby or there are
+ * not enough synchronous standbys.
+ */
+ if (!(*am_sync) ||
+ num_standbys < SyncRepConfig->num_sync)
+ {
+ pfree(sync_standbys);
+ return false;
+ }
+
+ /*
+ * In a priority-based sync replication, the synced positions are the
+ * oldest ones among sync standbys. In a quorum-based, they are the Nth
+ * latest ones.
+ *
+ * SyncRepGetNthLatestSyncRecPtr() also can calculate the oldest
+ * positions. But we use SyncRepGetOldestSyncRecPtr() for that calculation
+ * because it's a bit more efficient.
+ *
+ * XXX If the numbers of current and requested sync standbys are the same,
+ * we can use SyncRepGetOldestSyncRecPtr() to calculate the synced
+ * positions even in a quorum-based sync replication.
+ */
+ if (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY)
+ {
+ SyncRepGetOldestSyncRecPtr(writePtr, flushPtr, applyPtr,
+ sync_standbys, num_standbys);
+ }
+ else
+ {
+ SyncRepGetNthLatestSyncRecPtr(writePtr, flushPtr, applyPtr,
+ sync_standbys, num_standbys,
+ SyncRepConfig->num_sync);
+ }
+
+ pfree(sync_standbys);
+ return true;
+}
+
+/*
+ * Calculate the oldest Write, Flush and Apply positions among sync standbys.
+ */
+static void
+SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr,
+ XLogRecPtr *flushPtr,
+ XLogRecPtr *applyPtr,
+ SyncRepStandbyData *sync_standbys,
+ int num_standbys)
+{
+ int i;
+
+ /*
+ * Scan through all sync standbys and calculate the oldest Write, Flush
+ * and Apply positions. We assume *writePtr et al were initialized to
+ * InvalidXLogRecPtr.
+ */
+ for (i = 0; i < num_standbys; i++)
+ {
+ XLogRecPtr write = sync_standbys[i].write;
+ XLogRecPtr flush = sync_standbys[i].flush;
+ XLogRecPtr apply = sync_standbys[i].apply;
+
+ if (XLogRecPtrIsInvalid(*writePtr) || *writePtr > write)
+ *writePtr = write;
+ if (XLogRecPtrIsInvalid(*flushPtr) || *flushPtr > flush)
+ *flushPtr = flush;
+ if (XLogRecPtrIsInvalid(*applyPtr) || *applyPtr > apply)
+ *applyPtr = apply;
+ }
+}
+
+/*
+ * Calculate the Nth latest Write, Flush and Apply positions among sync
+ * standbys.
+ */
+static void
+SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr,
+ XLogRecPtr *flushPtr,
+ XLogRecPtr *applyPtr,
+ SyncRepStandbyData *sync_standbys,
+ int num_standbys,
+ uint8 nth)
+{
+ XLogRecPtr *write_array;
+ XLogRecPtr *flush_array;
+ XLogRecPtr *apply_array;
+ int i;
+
+ /* Should have enough candidates, or somebody messed up */
+ Assert(nth > 0 && nth <= num_standbys);
+
+ write_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * num_standbys);
+ flush_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * num_standbys);
+ apply_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * num_standbys);
+
+ for (i = 0; i < num_standbys; i++)
+ {
+ write_array[i] = sync_standbys[i].write;
+ flush_array[i] = sync_standbys[i].flush;
+ apply_array[i] = sync_standbys[i].apply;
+ }
+
+ /* Sort each array in descending order */
+ qsort(write_array, num_standbys, sizeof(XLogRecPtr), cmp_lsn);
+ qsort(flush_array, num_standbys, sizeof(XLogRecPtr), cmp_lsn);
+ qsort(apply_array, num_standbys, sizeof(XLogRecPtr), cmp_lsn);
+
+ /* Get Nth latest Write, Flush, Apply positions */
+ *writePtr = write_array[nth - 1];
+ *flushPtr = flush_array[nth - 1];
+ *applyPtr = apply_array[nth - 1];
+
+ pfree(write_array);
+ pfree(flush_array);
+ pfree(apply_array);
+}
+
+/*
+ * Compare lsn in order to sort array in descending order.
+ */
+static int
+cmp_lsn(const void *a, const void *b)
+{
+ XLogRecPtr lsn1 = *((const XLogRecPtr *) a);
+ XLogRecPtr lsn2 = *((const XLogRecPtr *) b);
+
+ if (lsn1 > lsn2)
+ return -1;
+ else if (lsn1 == lsn2)
+ return 0;
+ else
+ return 1;
+}
+
+/*
+ * Return data about walsenders that are candidates to be sync standbys.
+ *
+ * *standbys is set to a palloc'd array of structs of per-walsender data,
+ * and the number of valid entries (candidate sync senders) is returned.
+ * (This might be more or fewer than num_sync; caller must check.)
+ */
+int
+SyncRepGetCandidateStandbys(SyncRepStandbyData **standbys)
+{
+ int i;
+ int n;
+
+ /* Create result array */
+ *standbys = (SyncRepStandbyData *)
+ palloc(max_wal_senders * sizeof(SyncRepStandbyData));
+
+ /* Quick exit if sync replication is not requested */
+ if (SyncRepConfig == NULL)
+ return 0;
+
+ /* Collect raw data from shared memory */
+ n = 0;
+ for (i = 0; i < max_wal_senders; i++)
+ {
+ volatile WalSnd *walsnd; /* Use volatile pointer to prevent code
+ * rearrangement */
+ SyncRepStandbyData *stby;
+ WalSndState state; /* not included in SyncRepStandbyData */
+
+ walsnd = &WalSndCtl->walsnds[i];
+ stby = *standbys + n;
+
+ SpinLockAcquire(&walsnd->mutex);
+ stby->pid = walsnd->pid;
+ state = walsnd->state;
+ stby->write = walsnd->write;
+ stby->flush = walsnd->flush;
+ stby->apply = walsnd->apply;
+ stby->sync_standby_priority = walsnd->sync_standby_priority;
+ SpinLockRelease(&walsnd->mutex);
+
+ /* Must be active */
+ if (stby->pid == 0)
+ continue;
+
+ /* Must be streaming or stopping */
+ if (state != WALSNDSTATE_STREAMING &&
+ state != WALSNDSTATE_STOPPING)
+ continue;
+
+ /* Must be synchronous */
+ if (stby->sync_standby_priority == 0)
+ continue;
+
+ /* Must have a valid flush position */
+ if (XLogRecPtrIsInvalid(stby->flush))
+ continue;
+
+ /* OK, it's a candidate */
+ stby->walsnd_index = i;
+ stby->is_me = (walsnd == MyWalSnd);
+ n++;
+ }
+
+ /*
+ * In quorum mode, we return all the candidates. In priority mode, if we
+ * have too many candidates then return only the num_sync ones of highest
+ * priority.
+ */
+ if (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY &&
+ n > SyncRepConfig->num_sync)
+ {
+ /* Sort by priority ... */
+ qsort(*standbys, n, sizeof(SyncRepStandbyData),
+ standby_priority_comparator);
+ /* ... then report just the first num_sync ones */
+ n = SyncRepConfig->num_sync;
+ }
+
+ return n;
+}
+
+/*
+ * qsort comparator to sort SyncRepStandbyData entries by priority
+ */
+static int
+standby_priority_comparator(const void *a, const void *b)
+{
+ const SyncRepStandbyData *sa = (const SyncRepStandbyData *) a;
+ const SyncRepStandbyData *sb = (const SyncRepStandbyData *) b;
+
+ /* First, sort by increasing priority value */
+ if (sa->sync_standby_priority != sb->sync_standby_priority)
+ return sa->sync_standby_priority - sb->sync_standby_priority;
+
+ /*
+ * We might have equal priority values; arbitrarily break ties by position
+ * in the WALSnd array. (This is utterly bogus, since that is arrival
+ * order dependent, but there are regression tests that rely on it.)
+ */
+ return sa->walsnd_index - sb->walsnd_index;
+}
+
+
+/*
+ * Check if we are in the list of sync standbys, and if so, determine
+ * priority sequence. Return priority if set, or zero to indicate that
+ * we are not a potential sync standby.
+ *
+ * Compare the parameter SyncRepStandbyNames against the application_name
+ * for this WALSender, or allow any name if we find a wildcard "*".
+ */
+static int
+SyncRepGetStandbyPriority(void)
+{
+ const char *standby_name;
+ int priority;
+ bool found = false;
+
+ /*
+ * Since synchronous cascade replication is not allowed, we always set the
+ * priority of cascading walsender to zero.
+ */
+ if (am_cascading_walsender)
+ return 0;
+
+ if (!SyncStandbysDefined() || SyncRepConfig == NULL)
+ return 0;
+
+ standby_name = SyncRepConfig->member_names;
+ for (priority = 1; priority <= SyncRepConfig->nmembers; priority++)
+ {
+ if (pg_strcasecmp(standby_name, application_name) == 0 ||
+ strcmp(standby_name, "*") == 0)
+ {
+ found = true;
+ break;
+ }
+ standby_name += strlen(standby_name) + 1;
+ }
+
+ if (!found)
+ return 0;
+
+ /*
+ * In quorum-based sync replication, all the standbys in the list have the
+ * same priority, one.
+ */
+ return (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY) ? priority : 1;
+}
+
+/*
+ * Walk the specified queue from head. Set the state of any backends that
+ * need to be woken, remove them from the queue, and then wake them.
+ * Pass all = true to wake whole queue; otherwise, just wake up to
+ * the walsender's LSN.
+ *
+ * The caller must hold SyncRepLock in exclusive mode.
+ */
+static int
+SyncRepWakeQueue(bool all, int mode)
+{
+ volatile WalSndCtlData *walsndctl = WalSndCtl;
+ PGPROC *proc = NULL;
+ PGPROC *thisproc = NULL;
+ int numprocs = 0;
+
+ Assert(mode >= 0 && mode < NUM_SYNC_REP_WAIT_MODE);
+ Assert(LWLockHeldByMeInMode(SyncRepLock, LW_EXCLUSIVE));
+ Assert(SyncRepQueueIsOrderedByLSN(mode));
+
+ proc = (PGPROC *) SHMQueueNext(&(WalSndCtl->SyncRepQueue[mode]),
+ &(WalSndCtl->SyncRepQueue[mode]),
+ offsetof(PGPROC, syncRepLinks));
+
+ while (proc)
+ {
+ /*
+ * Assume the queue is ordered by LSN
+ */
+ if (!all && walsndctl->lsn[mode] < proc->waitLSN)
+ return numprocs;
+
+ /*
+ * Move to next proc, so we can delete thisproc from the queue.
+ * thisproc is valid, proc may be NULL after this.
+ */
+ thisproc = proc;
+ proc = (PGPROC *) SHMQueueNext(&(WalSndCtl->SyncRepQueue[mode]),
+ &(proc->syncRepLinks),
+ offsetof(PGPROC, syncRepLinks));
+
+ /*
+ * Remove thisproc from queue.
+ */
+ SHMQueueDelete(&(thisproc->syncRepLinks));
+
+ /*
+ * SyncRepWaitForLSN() reads syncRepState without holding the lock, so
+ * make sure that it sees the queue link being removed before the
+ * syncRepState change.
+ */
+ pg_write_barrier();
+
+ /*
+ * Set state to complete; see SyncRepWaitForLSN() for discussion of
+ * the various states.
+ */
+ thisproc->syncRepState = SYNC_REP_WAIT_COMPLETE;
+
+ /*
+ * Wake only when we have set state and removed from queue.
+ */
+ SetLatch(&(thisproc->procLatch));
+
+ numprocs++;
+ }
+
+ return numprocs;
+}
+
+/*
+ * The checkpointer calls this as needed to update the shared
+ * sync_standbys_defined flag, so that backends don't remain permanently wedged
+ * if synchronous_standby_names is unset. It's safe to check the current value
+ * without the lock, because it's only ever updated by one process. But we
+ * must take the lock to change it.
+ */
+void
+SyncRepUpdateSyncStandbysDefined(void)
+{
+ bool sync_standbys_defined = SyncStandbysDefined();
+
+ if (sync_standbys_defined != WalSndCtl->sync_standbys_defined)
+ {
+ LWLockAcquire(SyncRepLock, LW_EXCLUSIVE);
+
+ /*
+ * If synchronous_standby_names has been reset to empty, it's futile
+ * for backends to continue waiting. Since the user no longer wants
+ * synchronous replication, we'd better wake them up.
+ */
+ if (!sync_standbys_defined)
+ {
+ int i;
+
+ for (i = 0; i < NUM_SYNC_REP_WAIT_MODE; i++)
+ SyncRepWakeQueue(true, i);
+ }
+
+ /*
+ * Only allow people to join the queue when there are synchronous
+ * standbys defined. Without this interlock, there's a race
+ * condition: we might wake up all the current waiters; then, some
+ * backend that hasn't yet reloaded its config might go to sleep on
+ * the queue (and never wake up). This prevents that.
+ */
+ WalSndCtl->sync_standbys_defined = sync_standbys_defined;
+
+ LWLockRelease(SyncRepLock);
+ }
+}
+
+#ifdef USE_ASSERT_CHECKING
+static bool
+SyncRepQueueIsOrderedByLSN(int mode)
+{
+ PGPROC *proc = NULL;
+ XLogRecPtr lastLSN;
+
+ Assert(mode >= 0 && mode < NUM_SYNC_REP_WAIT_MODE);
+
+ lastLSN = 0;
+
+ proc = (PGPROC *) SHMQueueNext(&(WalSndCtl->SyncRepQueue[mode]),
+ &(WalSndCtl->SyncRepQueue[mode]),
+ offsetof(PGPROC, syncRepLinks));
+
+ while (proc)
+ {
+ /*
+ * Check the queue is ordered by LSN and that multiple procs don't
+ * have matching LSNs
+ */
+ if (proc->waitLSN <= lastLSN)
+ return false;
+
+ lastLSN = proc->waitLSN;
+
+ proc = (PGPROC *) SHMQueueNext(&(WalSndCtl->SyncRepQueue[mode]),
+ &(proc->syncRepLinks),
+ offsetof(PGPROC, syncRepLinks));
+ }
+
+ return true;
+}
+#endif
+
+/*
+ * ===========================================================
+ * Synchronous Replication functions executed by any process
+ * ===========================================================
+ */
+
+bool
+check_synchronous_standby_names(char **newval, void **extra, GucSource source)
+{
+ if (*newval != NULL && (*newval)[0] != '\0')
+ {
+ int parse_rc;
+ SyncRepConfigData *pconf;
+
+ /* Reset communication variables to ensure a fresh start */
+ syncrep_parse_result = NULL;
+ syncrep_parse_error_msg = NULL;
+
+ /* Parse the synchronous_standby_names string */
+ syncrep_scanner_init(*newval);
+ parse_rc = syncrep_yyparse();
+ syncrep_scanner_finish();
+
+ if (parse_rc != 0 || syncrep_parse_result == NULL)
+ {
+ GUC_check_errcode(ERRCODE_SYNTAX_ERROR);
+ if (syncrep_parse_error_msg)
+ GUC_check_errdetail("%s", syncrep_parse_error_msg);
+ else
+ GUC_check_errdetail("synchronous_standby_names parser failed");
+ return false;
+ }
+
+ if (syncrep_parse_result->num_sync <= 0)
+ {
+ GUC_check_errmsg("number of synchronous standbys (%d) must be greater than zero",
+ syncrep_parse_result->num_sync);
+ return false;
+ }
+
+ /* GUC extra value must be malloc'd, not palloc'd */
+ pconf = (SyncRepConfigData *)
+ malloc(syncrep_parse_result->config_size);
+ if (pconf == NULL)
+ return false;
+ memcpy(pconf, syncrep_parse_result, syncrep_parse_result->config_size);
+
+ *extra = (void *) pconf;
+
+ /*
+ * We need not explicitly clean up syncrep_parse_result. It, and any
+ * other cruft generated during parsing, will be freed when the
+ * current memory context is deleted. (This code is generally run in
+ * a short-lived context used for config file processing, so that will
+ * not be very long.)
+ */
+ }
+ else
+ *extra = NULL;
+
+ return true;
+}
+
+void
+assign_synchronous_standby_names(const char *newval, void *extra)
+{
+ SyncRepConfig = (SyncRepConfigData *) extra;
+}
+
+void
+assign_synchronous_commit(int newval, void *extra)
+{
+ switch (newval)
+ {
+ case SYNCHRONOUS_COMMIT_REMOTE_WRITE:
+ SyncRepWaitMode = SYNC_REP_WAIT_WRITE;
+ break;
+ case SYNCHRONOUS_COMMIT_REMOTE_FLUSH:
+ SyncRepWaitMode = SYNC_REP_WAIT_FLUSH;
+ break;
+ case SYNCHRONOUS_COMMIT_REMOTE_APPLY:
+ SyncRepWaitMode = SYNC_REP_WAIT_APPLY;
+ break;
+ default:
+ SyncRepWaitMode = SYNC_REP_NO_WAIT;
+ break;
+ }
+}
diff --git a/src/backend/replication/syncrep_gram.c b/src/backend/replication/syncrep_gram.c
new file mode 100644
index 0000000..601bf51
--- /dev/null
+++ b/src/backend/replication/syncrep_gram.c
@@ -0,0 +1,1587 @@
+/* A Bison parser, made by GNU Bison 3.3.2. */
+
+/* Bison implementation for Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2019 Free Software Foundation,
+ Inc.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+/* C LALR(1) parser skeleton written by Richard Stallman, by
+ simplifying the original so-called "semantic" parser. */
+
+/* All symbols defined below should begin with yy or YY, to avoid
+ infringing on user name space. This should be done even for local
+ variables, as they might otherwise be expanded by user macros.
+ There are some unavoidable exceptions within include files to
+ define necessary library symbols; they are noted "INFRINGES ON
+ USER NAME SPACE" below. */
+
+/* Undocumented macros, especially those whose name start with YY_,
+ are private implementation details. Do not rely on them. */
+
+/* Identify Bison output. */
+#define YYBISON 1
+
+/* Bison version. */
+#define YYBISON_VERSION "3.3.2"
+
+/* Skeleton name. */
+#define YYSKELETON_NAME "yacc.c"
+
+/* Pure parsers. */
+#define YYPURE 0
+
+/* Push parsers. */
+#define YYPUSH 0
+
+/* Pull parsers. */
+#define YYPULL 1
+
+
+/* Substitute the variable and function names. */
+#define yyparse syncrep_yyparse
+#define yylex syncrep_yylex
+#define yyerror syncrep_yyerror
+#define yydebug syncrep_yydebug
+#define yynerrs syncrep_yynerrs
+
+#define yylval syncrep_yylval
+#define yychar syncrep_yychar
+
+/* First part of user prologue. */
+#line 1 "syncrep_gram.y" /* yacc.c:337 */
+
+/*-------------------------------------------------------------------------
+ *
+ * syncrep_gram.y - Parser for synchronous_standby_names
+ *
+ * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/replication/syncrep_gram.y
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "replication/syncrep.h"
+
+/* Result of parsing is returned in one of these two variables */
+SyncRepConfigData *syncrep_parse_result;
+char *syncrep_parse_error_msg;
+
+static SyncRepConfigData *create_syncrep_config(const char *num_sync,
+ List *members, uint8 syncrep_method);
+
+/*
+ * Bison doesn't allocate anything that needs to live across parser calls,
+ * so we can easily have it use palloc instead of malloc. This prevents
+ * memory leaks if we error out during parsing. Note this only works with
+ * bison >= 2.0. However, in bison 1.875 the default is to use alloca()
+ * if possible, so there's not really much problem anyhow, at least if
+ * you're building with gcc.
+ */
+#define YYMALLOC palloc
+#define YYFREE pfree
+
+
+#line 116 "syncrep_gram.c" /* yacc.c:337 */
+# ifndef YY_NULLPTR
+# if defined __cplusplus
+# if 201103L <= __cplusplus
+# define YY_NULLPTR nullptr
+# else
+# define YY_NULLPTR 0
+# endif
+# else
+# define YY_NULLPTR ((void*)0)
+# endif
+# endif
+
+/* Enabling verbose error messages. */
+#ifdef YYERROR_VERBOSE
+# undef YYERROR_VERBOSE
+# define YYERROR_VERBOSE 1
+#else
+# define YYERROR_VERBOSE 0
+#endif
+
+
+/* Debug traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 0
+#endif
+#if YYDEBUG
+extern int syncrep_yydebug;
+#endif
+
+/* Token type. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ enum yytokentype
+ {
+ NAME = 258,
+ NUM = 259,
+ JUNK = 260,
+ ANY = 261,
+ FIRST = 262
+ };
+#endif
+
+/* Value type. */
+#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
+
+union YYSTYPE
+{
+#line 43 "syncrep_gram.y" /* yacc.c:352 */
+
+ char *str;
+ List *list;
+ SyncRepConfigData *config;
+
+#line 170 "syncrep_gram.c" /* yacc.c:352 */
+};
+
+typedef union YYSTYPE YYSTYPE;
+# define YYSTYPE_IS_TRIVIAL 1
+# define YYSTYPE_IS_DECLARED 1
+#endif
+
+
+extern YYSTYPE syncrep_yylval;
+
+int syncrep_yyparse (void);
+
+
+
+
+
+#ifdef short
+# undef short
+#endif
+
+#ifdef YYTYPE_UINT8
+typedef YYTYPE_UINT8 yytype_uint8;
+#else
+typedef unsigned char yytype_uint8;
+#endif
+
+#ifdef YYTYPE_INT8
+typedef YYTYPE_INT8 yytype_int8;
+#else
+typedef signed char yytype_int8;
+#endif
+
+#ifdef YYTYPE_UINT16
+typedef YYTYPE_UINT16 yytype_uint16;
+#else
+typedef unsigned short yytype_uint16;
+#endif
+
+#ifdef YYTYPE_INT16
+typedef YYTYPE_INT16 yytype_int16;
+#else
+typedef short yytype_int16;
+#endif
+
+#ifndef YYSIZE_T
+# ifdef __SIZE_TYPE__
+# define YYSIZE_T __SIZE_TYPE__
+# elif defined size_t
+# define YYSIZE_T size_t
+# elif ! defined YYSIZE_T
+# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
+# define YYSIZE_T size_t
+# else
+# define YYSIZE_T unsigned
+# endif
+#endif
+
+#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
+
+#ifndef YY_
+# if defined YYENABLE_NLS && YYENABLE_NLS
+# if ENABLE_NLS
+# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
+# define YY_(Msgid) dgettext ("bison-runtime", Msgid)
+# endif
+# endif
+# ifndef YY_
+# define YY_(Msgid) Msgid
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE
+# if (defined __GNUC__ \
+ && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \
+ || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C
+# define YY_ATTRIBUTE(Spec) __attribute__(Spec)
+# else
+# define YY_ATTRIBUTE(Spec) /* empty */
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE_PURE
+# define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__))
+#endif
+
+#ifndef YY_ATTRIBUTE_UNUSED
+# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__))
+#endif
+
+/* Suppress unused-variable warnings by "using" E. */
+#if ! defined lint || defined __GNUC__
+# define YYUSE(E) ((void) (E))
+#else
+# define YYUSE(E) /* empty */
+#endif
+
+#if defined __GNUC__ && ! defined __ICC && 407 <= __GNUC__ * 100 + __GNUC_MINOR__
+/* Suppress an incorrect diagnostic about yylval being uninitialized. */
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \
+ _Pragma ("GCC diagnostic push") \
+ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\
+ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END \
+ _Pragma ("GCC diagnostic pop")
+#else
+# define YY_INITIAL_VALUE(Value) Value
+#endif
+#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END
+#endif
+#ifndef YY_INITIAL_VALUE
+# define YY_INITIAL_VALUE(Value) /* Nothing. */
+#endif
+
+
+#if ! defined yyoverflow || YYERROR_VERBOSE
+
+/* The parser invokes alloca or malloc; define the necessary symbols. */
+
+# ifdef YYSTACK_USE_ALLOCA
+# if YYSTACK_USE_ALLOCA
+# ifdef __GNUC__
+# define YYSTACK_ALLOC __builtin_alloca
+# elif defined __BUILTIN_VA_ARG_INCR
+# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
+# elif defined _AIX
+# define YYSTACK_ALLOC __alloca
+# elif defined _MSC_VER
+# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
+# define alloca _alloca
+# else
+# define YYSTACK_ALLOC alloca
+# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+ /* Use EXIT_SUCCESS as a witness for stdlib.h. */
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
+# endif
+# endif
+# endif
+# endif
+# endif
+
+# ifdef YYSTACK_ALLOC
+ /* Pacify GCC's 'empty if-body' warning. */
+# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
+# ifndef YYSTACK_ALLOC_MAXIMUM
+ /* The OS might guarantee only one guard page at the bottom of the stack,
+ and a page size can be as small as 4096 bytes. So we cannot safely
+ invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
+ to allow for a few compiler-allocated temporary stack slots. */
+# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
+# endif
+# else
+# define YYSTACK_ALLOC YYMALLOC
+# define YYSTACK_FREE YYFREE
+# ifndef YYSTACK_ALLOC_MAXIMUM
+# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
+# endif
+# if (defined __cplusplus && ! defined EXIT_SUCCESS \
+ && ! ((defined YYMALLOC || defined malloc) \
+ && (defined YYFREE || defined free)))
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
+# endif
+# endif
+# ifndef YYMALLOC
+# define YYMALLOC malloc
+# if ! defined malloc && ! defined EXIT_SUCCESS
+void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# ifndef YYFREE
+# define YYFREE free
+# if ! defined free && ! defined EXIT_SUCCESS
+void free (void *); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# endif
+#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
+
+
+#if (! defined yyoverflow \
+ && (! defined __cplusplus \
+ || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
+
+/* A type that is properly aligned for any stack member. */
+union yyalloc
+{
+ yytype_int16 yyss_alloc;
+ YYSTYPE yyvs_alloc;
+};
+
+/* The size of the maximum gap between one aligned stack and the next. */
+# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
+
+/* The size of an array large to enough to hold all stacks, each with
+ N elements. */
+# define YYSTACK_BYTES(N) \
+ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ + YYSTACK_GAP_MAXIMUM)
+
+# define YYCOPY_NEEDED 1
+
+/* Relocate STACK from its old location to the new one. The
+ local variables YYSIZE and YYSTACKSIZE give the old and new number of
+ elements in the stack, and YYPTR gives the new location of the
+ stack. Advance YYPTR to a properly aligned location for the next
+ stack. */
+# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
+ do \
+ { \
+ YYSIZE_T yynewbytes; \
+ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
+ Stack = &yyptr->Stack_alloc; \
+ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
+ yyptr += yynewbytes / sizeof (*yyptr); \
+ } \
+ while (0)
+
+#endif
+
+#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
+/* Copy COUNT objects from SRC to DST. The source and destination do
+ not overlap. */
+# ifndef YYCOPY
+# if defined __GNUC__ && 1 < __GNUC__
+# define YYCOPY(Dst, Src, Count) \
+ __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src)))
+# else
+# define YYCOPY(Dst, Src, Count) \
+ do \
+ { \
+ YYSIZE_T yyi; \
+ for (yyi = 0; yyi < (Count); yyi++) \
+ (Dst)[yyi] = (Src)[yyi]; \
+ } \
+ while (0)
+# endif
+# endif
+#endif /* !YYCOPY_NEEDED */
+
+/* YYFINAL -- State number of the termination state. */
+#define YYFINAL 12
+/* YYLAST -- Last index in YYTABLE. */
+#define YYLAST 22
+
+/* YYNTOKENS -- Number of terminals. */
+#define YYNTOKENS 11
+/* YYNNTS -- Number of nonterminals. */
+#define YYNNTS 5
+/* YYNRULES -- Number of rules. */
+#define YYNRULES 10
+/* YYNSTATES -- Number of states. */
+#define YYNSTATES 24
+
+#define YYUNDEFTOK 2
+#define YYMAXUTOK 262
+
+/* YYTRANSLATE(TOKEN-NUM) -- Symbol number corresponding to TOKEN-NUM
+ as returned by yylex, with out-of-bounds checking. */
+#define YYTRANSLATE(YYX) \
+ ((unsigned) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
+
+/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM
+ as returned by yylex. */
+static const yytype_uint8 yytranslate[] =
+{
+ 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 8, 9, 2, 2, 10, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
+ 5, 6, 7
+};
+
+#if YYDEBUG
+ /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */
+static const yytype_uint8 yyrline[] =
+{
+ 0, 59, 59, 63, 64, 65, 66, 70, 71, 75,
+ 76
+};
+#endif
+
+#if YYDEBUG || YYERROR_VERBOSE || 0
+/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
+ First, the terminals, then, starting at YYNTOKENS, nonterminals. */
+static const char *const yytname[] =
+{
+ "$end", "error", "$undefined", "NAME", "NUM", "JUNK", "ANY", "FIRST",
+ "'('", "')'", "','", "$accept", "result", "standby_config",
+ "standby_list", "standby_name", YY_NULLPTR
+};
+#endif
+
+# ifdef YYPRINT
+/* YYTOKNUM[NUM] -- (External) token number corresponding to the
+ (internal) symbol number NUM (which must be that of a token). */
+static const yytype_uint16 yytoknum[] =
+{
+ 0, 256, 257, 258, 259, 260, 261, 262, 40, 41,
+ 44
+};
+# endif
+
+#define YYPACT_NINF -10
+
+#define yypact_value_is_default(Yystate) \
+ (!!((Yystate) == (-10)))
+
+#define YYTABLE_NINF -1
+
+#define yytable_value_is_error(Yytable_value) \
+ 0
+
+ /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
+ STATE-NUM. */
+static const yytype_int8 yypact[] =
+{
+ -2, -10, -5, 11, 14, 19, -10, -4, -10, 6,
+ 12, 13, -10, 6, -10, 2, 6, 6, -10, -10,
+ 4, 7, -10, -10
+};
+
+ /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM.
+ Performed when YYTABLE does not specify something else to do. Zero
+ means the default is an error. */
+static const yytype_uint8 yydefact[] =
+{
+ 0, 9, 10, 0, 0, 0, 2, 3, 7, 0,
+ 0, 0, 1, 0, 10, 0, 0, 0, 8, 4,
+ 0, 0, 5, 6
+};
+
+ /* YYPGOTO[NTERM-NUM]. */
+static const yytype_int8 yypgoto[] =
+{
+ -10, -10, -10, -9, 9
+};
+
+ /* YYDEFGOTO[NTERM-NUM]. */
+static const yytype_int8 yydefgoto[] =
+{
+ -1, 5, 6, 7, 8
+};
+
+ /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If
+ positive, shift that token. If negative, reduce the rule whose
+ number is the opposite. If YYTABLE_NINF, syntax error. */
+static const yytype_uint8 yytable[] =
+{
+ 15, 1, 2, 9, 3, 4, 13, 20, 21, 1,
+ 14, 19, 13, 22, 13, 10, 23, 13, 11, 12,
+ 16, 17, 18
+};
+
+static const yytype_uint8 yycheck[] =
+{
+ 9, 3, 4, 8, 6, 7, 10, 16, 17, 3,
+ 4, 9, 10, 9, 10, 4, 9, 10, 4, 0,
+ 8, 8, 13
+};
+
+ /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
+ symbol of state STATE-NUM. */
+static const yytype_uint8 yystos[] =
+{
+ 0, 3, 4, 6, 7, 12, 13, 14, 15, 8,
+ 4, 4, 0, 10, 4, 14, 8, 8, 15, 9,
+ 14, 14, 9, 9
+};
+
+ /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
+static const yytype_uint8 yyr1[] =
+{
+ 0, 11, 12, 13, 13, 13, 13, 14, 14, 15,
+ 15
+};
+
+ /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */
+static const yytype_uint8 yyr2[] =
+{
+ 0, 2, 1, 1, 4, 5, 5, 1, 3, 1,
+ 1
+};
+
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY (-2)
+#define YYEOF 0
+
+#define YYACCEPT goto yyacceptlab
+#define YYABORT goto yyabortlab
+#define YYERROR goto yyerrorlab
+
+
+#define YYRECOVERING() (!!yyerrstatus)
+
+#define YYBACKUP(Token, Value) \
+ do \
+ if (yychar == YYEMPTY) \
+ { \
+ yychar = (Token); \
+ yylval = (Value); \
+ YYPOPSTACK (yylen); \
+ yystate = *yyssp; \
+ goto yybackup; \
+ } \
+ else \
+ { \
+ yyerror (YY_("syntax error: cannot back up")); \
+ YYERROR; \
+ } \
+ while (0)
+
+/* Error token number */
+#define YYTERROR 1
+#define YYERRCODE 256
+
+
+
+/* Enable debugging if requested. */
+#if YYDEBUG
+
+# ifndef YYFPRINTF
+# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
+# define YYFPRINTF fprintf
+# endif
+
+# define YYDPRINTF(Args) \
+do { \
+ if (yydebug) \
+ YYFPRINTF Args; \
+} while (0)
+
+/* This macro is provided for backward compatibility. */
+#ifndef YY_LOCATION_PRINT
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
+#endif
+
+
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
+do { \
+ if (yydebug) \
+ { \
+ YYFPRINTF (stderr, "%s ", Title); \
+ yy_symbol_print (stderr, \
+ Type, Value); \
+ YYFPRINTF (stderr, "\n"); \
+ } \
+} while (0)
+
+
+/*-----------------------------------.
+| Print this symbol's value on YYO. |
+`-----------------------------------*/
+
+static void
+yy_symbol_value_print (FILE *yyo, int yytype, YYSTYPE const * const yyvaluep)
+{
+ FILE *yyoutput = yyo;
+ YYUSE (yyoutput);
+ if (!yyvaluep)
+ return;
+# ifdef YYPRINT
+ if (yytype < YYNTOKENS)
+ YYPRINT (yyo, yytoknum[yytype], *yyvaluep);
+# endif
+ YYUSE (yytype);
+}
+
+
+/*---------------------------.
+| Print this symbol on YYO. |
+`---------------------------*/
+
+static void
+yy_symbol_print (FILE *yyo, int yytype, YYSTYPE const * const yyvaluep)
+{
+ YYFPRINTF (yyo, "%s %s (",
+ yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]);
+
+ yy_symbol_value_print (yyo, yytype, yyvaluep);
+ YYFPRINTF (yyo, ")");
+}
+
+/*------------------------------------------------------------------.
+| yy_stack_print -- Print the state stack from its BOTTOM up to its |
+| TOP (included). |
+`------------------------------------------------------------------*/
+
+static void
+yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
+{
+ YYFPRINTF (stderr, "Stack now");
+ for (; yybottom <= yytop; yybottom++)
+ {
+ int yybot = *yybottom;
+ YYFPRINTF (stderr, " %d", yybot);
+ }
+ YYFPRINTF (stderr, "\n");
+}
+
+# define YY_STACK_PRINT(Bottom, Top) \
+do { \
+ if (yydebug) \
+ yy_stack_print ((Bottom), (Top)); \
+} while (0)
+
+
+/*------------------------------------------------.
+| Report that the YYRULE is going to be reduced. |
+`------------------------------------------------*/
+
+static void
+yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule)
+{
+ unsigned long yylno = yyrline[yyrule];
+ int yynrhs = yyr2[yyrule];
+ int yyi;
+ YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
+ yyrule - 1, yylno);
+ /* The symbols being reduced. */
+ for (yyi = 0; yyi < yynrhs; yyi++)
+ {
+ YYFPRINTF (stderr, " $%d = ", yyi + 1);
+ yy_symbol_print (stderr,
+ yystos[yyssp[yyi + 1 - yynrhs]],
+ &yyvsp[(yyi + 1) - (yynrhs)]
+ );
+ YYFPRINTF (stderr, "\n");
+ }
+}
+
+# define YY_REDUCE_PRINT(Rule) \
+do { \
+ if (yydebug) \
+ yy_reduce_print (yyssp, yyvsp, Rule); \
+} while (0)
+
+/* Nonzero means print parse trace. It is left uninitialized so that
+ multiple parsers can coexist. */
+int yydebug;
+#else /* !YYDEBUG */
+# define YYDPRINTF(Args)
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
+# define YY_STACK_PRINT(Bottom, Top)
+# define YY_REDUCE_PRINT(Rule)
+#endif /* !YYDEBUG */
+
+
+/* YYINITDEPTH -- initial size of the parser's stacks. */
+#ifndef YYINITDEPTH
+# define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
+ if the built-in stack extension method is used).
+
+ Do not make this value too large; the results are undefined if
+ YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
+ evaluated with infinite-precision integer arithmetic. */
+
+#ifndef YYMAXDEPTH
+# define YYMAXDEPTH 10000
+#endif
+
+
+#if YYERROR_VERBOSE
+
+# ifndef yystrlen
+# if defined __GLIBC__ && defined _STRING_H
+# define yystrlen strlen
+# else
+/* Return the length of YYSTR. */
+static YYSIZE_T
+yystrlen (const char *yystr)
+{
+ YYSIZE_T yylen;
+ for (yylen = 0; yystr[yylen]; yylen++)
+ continue;
+ return yylen;
+}
+# endif
+# endif
+
+# ifndef yystpcpy
+# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
+# define yystpcpy stpcpy
+# else
+/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
+ YYDEST. */
+static char *
+yystpcpy (char *yydest, const char *yysrc)
+{
+ char *yyd = yydest;
+ const char *yys = yysrc;
+
+ while ((*yyd++ = *yys++) != '\0')
+ continue;
+
+ return yyd - 1;
+}
+# endif
+# endif
+
+# ifndef yytnamerr
+/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
+ quotes and backslashes, so that it's suitable for yyerror. The
+ heuristic is that double-quoting is unnecessary unless the string
+ contains an apostrophe, a comma, or backslash (other than
+ backslash-backslash). YYSTR is taken from yytname. If YYRES is
+ null, do not copy; instead, return the length of what the result
+ would have been. */
+static YYSIZE_T
+yytnamerr (char *yyres, const char *yystr)
+{
+ if (*yystr == '"')
+ {
+ YYSIZE_T yyn = 0;
+ char const *yyp = yystr;
+
+ for (;;)
+ switch (*++yyp)
+ {
+ case '\'':
+ case ',':
+ goto do_not_strip_quotes;
+
+ case '\\':
+ if (*++yyp != '\\')
+ goto do_not_strip_quotes;
+ else
+ goto append;
+
+ append:
+ default:
+ if (yyres)
+ yyres[yyn] = *yyp;
+ yyn++;
+ break;
+
+ case '"':
+ if (yyres)
+ yyres[yyn] = '\0';
+ return yyn;
+ }
+ do_not_strip_quotes: ;
+ }
+
+ if (! yyres)
+ return yystrlen (yystr);
+
+ return (YYSIZE_T) (yystpcpy (yyres, yystr) - yyres);
+}
+# endif
+
+/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
+ about the unexpected token YYTOKEN for the state stack whose top is
+ YYSSP.
+
+ Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is
+ not large enough to hold the message. In that case, also set
+ *YYMSG_ALLOC to the required number of bytes. Return 2 if the
+ required number of bytes is too large to store. */
+static int
+yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
+ yytype_int16 *yyssp, int yytoken)
+{
+ YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]);
+ YYSIZE_T yysize = yysize0;
+ enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
+ /* Internationalized format string. */
+ const char *yyformat = YY_NULLPTR;
+ /* Arguments of yyformat. */
+ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
+ /* Number of reported tokens (one for the "unexpected", one per
+ "expected"). */
+ int yycount = 0;
+
+ /* There are many possibilities here to consider:
+ - If this state is a consistent state with a default action, then
+ the only way this function was invoked is if the default action
+ is an error action. In that case, don't check for expected
+ tokens because there are none.
+ - The only way there can be no lookahead present (in yychar) is if
+ this state is a consistent state with a default action. Thus,
+ detecting the absence of a lookahead is sufficient to determine
+ that there is no unexpected or expected token to report. In that
+ case, just report a simple "syntax error".
+ - Don't assume there isn't a lookahead just because this state is a
+ consistent state with a default action. There might have been a
+ previous inconsistent state, consistent state with a non-default
+ action, or user semantic action that manipulated yychar.
+ - Of course, the expected token list depends on states to have
+ correct lookahead information, and it depends on the parser not
+ to perform extra reductions after fetching a lookahead from the
+ scanner and before detecting a syntax error. Thus, state merging
+ (from LALR or IELR) and default reductions corrupt the expected
+ token list. However, the list is correct for canonical LR with
+ one exception: it will still contain any token that will not be
+ accepted due to an error action in a later state.
+ */
+ if (yytoken != YYEMPTY)
+ {
+ int yyn = yypact[*yyssp];
+ yyarg[yycount++] = yytname[yytoken];
+ if (!yypact_value_is_default (yyn))
+ {
+ /* Start YYX at -YYN if negative to avoid negative indexes in
+ YYCHECK. In other words, skip the first -YYN actions for
+ this state because they are default actions. */
+ int yyxbegin = yyn < 0 ? -yyn : 0;
+ /* Stay within bounds of both yycheck and yytname. */
+ int yychecklim = YYLAST - yyn + 1;
+ int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
+ int yyx;
+
+ for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+ if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
+ && !yytable_value_is_error (yytable[yyx + yyn]))
+ {
+ if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
+ {
+ yycount = 1;
+ yysize = yysize0;
+ break;
+ }
+ yyarg[yycount++] = yytname[yyx];
+ {
+ YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]);
+ if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)
+ yysize = yysize1;
+ else
+ return 2;
+ }
+ }
+ }
+ }
+
+ switch (yycount)
+ {
+# define YYCASE_(N, S) \
+ case N: \
+ yyformat = S; \
+ break
+ default: /* Avoid compiler warnings. */
+ YYCASE_(0, YY_("syntax error"));
+ YYCASE_(1, YY_("syntax error, unexpected %s"));
+ YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
+ YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
+ YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
+ YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
+# undef YYCASE_
+ }
+
+ {
+ YYSIZE_T yysize1 = yysize + yystrlen (yyformat);
+ if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)
+ yysize = yysize1;
+ else
+ return 2;
+ }
+
+ if (*yymsg_alloc < yysize)
+ {
+ *yymsg_alloc = 2 * yysize;
+ if (! (yysize <= *yymsg_alloc
+ && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
+ *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
+ return 1;
+ }
+
+ /* Avoid sprintf, as that infringes on the user's name space.
+ Don't have undefined behavior even if the translation
+ produced a string with the wrong number of "%s"s. */
+ {
+ char *yyp = *yymsg;
+ int yyi = 0;
+ while ((*yyp = *yyformat) != '\0')
+ if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
+ {
+ yyp += yytnamerr (yyp, yyarg[yyi++]);
+ yyformat += 2;
+ }
+ else
+ {
+ yyp++;
+ yyformat++;
+ }
+ }
+ return 0;
+}
+#endif /* YYERROR_VERBOSE */
+
+/*-----------------------------------------------.
+| Release the memory associated to this symbol. |
+`-----------------------------------------------*/
+
+static void
+yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
+{
+ YYUSE (yyvaluep);
+ if (!yymsg)
+ yymsg = "Deleting";
+ YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
+
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ YYUSE (yytype);
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+}
+
+
+
+
+/* The lookahead symbol. */
+int yychar;
+
+/* The semantic value of the lookahead symbol. */
+YYSTYPE yylval;
+/* Number of syntax errors so far. */
+int yynerrs;
+
+
+/*----------.
+| yyparse. |
+`----------*/
+
+int
+yyparse (void)
+{
+ int yystate;
+ /* Number of tokens to shift before error messages enabled. */
+ int yyerrstatus;
+
+ /* The stacks and their tools:
+ 'yyss': related to states.
+ 'yyvs': related to semantic values.
+
+ Refer to the stacks through separate pointers, to allow yyoverflow
+ to reallocate them elsewhere. */
+
+ /* The state stack. */
+ yytype_int16 yyssa[YYINITDEPTH];
+ yytype_int16 *yyss;
+ yytype_int16 *yyssp;
+
+ /* The semantic value stack. */
+ YYSTYPE yyvsa[YYINITDEPTH];
+ YYSTYPE *yyvs;
+ YYSTYPE *yyvsp;
+
+ YYSIZE_T yystacksize;
+
+ int yyn;
+ int yyresult;
+ /* Lookahead token as an internal (translated) token number. */
+ int yytoken = 0;
+ /* The variables used to return semantic value and location from the
+ action routines. */
+ YYSTYPE yyval;
+
+#if YYERROR_VERBOSE
+ /* Buffer for error messages, and its allocated size. */
+ char yymsgbuf[128];
+ char *yymsg = yymsgbuf;
+ YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
+#endif
+
+#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
+
+ /* The number of symbols on the RHS of the reduced rule.
+ Keep to zero when no symbol should be popped. */
+ int yylen = 0;
+
+ yyssp = yyss = yyssa;
+ yyvsp = yyvs = yyvsa;
+ yystacksize = YYINITDEPTH;
+
+ YYDPRINTF ((stderr, "Starting parse\n"));
+
+ yystate = 0;
+ yyerrstatus = 0;
+ yynerrs = 0;
+ yychar = YYEMPTY; /* Cause a token to be read. */
+ goto yysetstate;
+
+
+/*------------------------------------------------------------.
+| yynewstate -- push a new state, which is found in yystate. |
+`------------------------------------------------------------*/
+yynewstate:
+ /* In all cases, when you get here, the value and location stacks
+ have just been pushed. So pushing a state here evens the stacks. */
+ yyssp++;
+
+
+/*--------------------------------------------------------------------.
+| yynewstate -- set current state (the top of the stack) to yystate. |
+`--------------------------------------------------------------------*/
+yysetstate:
+ *yyssp = (yytype_int16) yystate;
+
+ if (yyss + yystacksize - 1 <= yyssp)
+#if !defined yyoverflow && !defined YYSTACK_RELOCATE
+ goto yyexhaustedlab;
+#else
+ {
+ /* Get the current used size of the three stacks, in elements. */
+ YYSIZE_T yysize = (YYSIZE_T) (yyssp - yyss + 1);
+
+# if defined yyoverflow
+ {
+ /* Give user a chance to reallocate the stack. Use copies of
+ these so that the &'s don't force the real ones into
+ memory. */
+ YYSTYPE *yyvs1 = yyvs;
+ yytype_int16 *yyss1 = yyss;
+
+ /* Each stack pointer address is followed by the size of the
+ data in use in that stack, in bytes. This used to be a
+ conditional around just the two extra args, but that might
+ be undefined if yyoverflow is a macro. */
+ yyoverflow (YY_("memory exhausted"),
+ &yyss1, yysize * sizeof (*yyssp),
+ &yyvs1, yysize * sizeof (*yyvsp),
+ &yystacksize);
+ yyss = yyss1;
+ yyvs = yyvs1;
+ }
+# else /* defined YYSTACK_RELOCATE */
+ /* Extend the stack our own way. */
+ if (YYMAXDEPTH <= yystacksize)
+ goto yyexhaustedlab;
+ yystacksize *= 2;
+ if (YYMAXDEPTH < yystacksize)
+ yystacksize = YYMAXDEPTH;
+
+ {
+ yytype_int16 *yyss1 = yyss;
+ union yyalloc *yyptr =
+ (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
+ if (! yyptr)
+ goto yyexhaustedlab;
+ YYSTACK_RELOCATE (yyss_alloc, yyss);
+ YYSTACK_RELOCATE (yyvs_alloc, yyvs);
+# undef YYSTACK_RELOCATE
+ if (yyss1 != yyssa)
+ YYSTACK_FREE (yyss1);
+ }
+# endif
+
+ yyssp = yyss + yysize - 1;
+ yyvsp = yyvs + yysize - 1;
+
+ YYDPRINTF ((stderr, "Stack size increased to %lu\n",
+ (unsigned long) yystacksize));
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ YYABORT;
+ }
+#endif /* !defined yyoverflow && !defined YYSTACK_RELOCATE */
+
+ YYDPRINTF ((stderr, "Entering state %d\n", yystate));
+
+ if (yystate == YYFINAL)
+ YYACCEPT;
+
+ goto yybackup;
+
+
+/*-----------.
+| yybackup. |
+`-----------*/
+yybackup:
+ /* Do appropriate processing given the current state. Read a
+ lookahead token if we need one and don't already have one. */
+
+ /* First try to decide what to do without reference to lookahead token. */
+ yyn = yypact[yystate];
+ if (yypact_value_is_default (yyn))
+ goto yydefault;
+
+ /* Not known => get a lookahead token if don't already have one. */
+
+ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
+ if (yychar == YYEMPTY)
+ {
+ YYDPRINTF ((stderr, "Reading a token: "));
+ yychar = yylex ();
+ }
+
+ if (yychar <= YYEOF)
+ {
+ yychar = yytoken = YYEOF;
+ YYDPRINTF ((stderr, "Now at end of input.\n"));
+ }
+ else
+ {
+ yytoken = YYTRANSLATE (yychar);
+ YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
+ }
+
+ /* If the proper action on seeing token YYTOKEN is to reduce or to
+ detect an error, take that action. */
+ yyn += yytoken;
+ if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
+ goto yydefault;
+ yyn = yytable[yyn];
+ if (yyn <= 0)
+ {
+ if (yytable_value_is_error (yyn))
+ goto yyerrlab;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+
+ /* Count tokens shifted since error; after three, turn off error
+ status. */
+ if (yyerrstatus)
+ yyerrstatus--;
+
+ /* Shift the lookahead token. */
+ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
+
+ /* Discard the shifted token. */
+ yychar = YYEMPTY;
+
+ yystate = yyn;
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ *++yyvsp = yylval;
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+
+ goto yynewstate;
+
+
+/*-----------------------------------------------------------.
+| yydefault -- do the default action for the current state. |
+`-----------------------------------------------------------*/
+yydefault:
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+ goto yyreduce;
+
+
+/*-----------------------------.
+| yyreduce -- do a reduction. |
+`-----------------------------*/
+yyreduce:
+ /* yyn is the number of a rule to reduce with. */
+ yylen = yyr2[yyn];
+
+ /* If YYLEN is nonzero, implement the default value of the action:
+ '$$ = $1'.
+
+ Otherwise, the following line sets YYVAL to garbage.
+ This behavior is undocumented and Bison
+ users should not rely upon it. Assigning to YYVAL
+ unconditionally makes the parser a bit smaller, and it avoids a
+ GCC warning that YYVAL may be used uninitialized. */
+ yyval = yyvsp[1-yylen];
+
+
+ YY_REDUCE_PRINT (yyn);
+ switch (yyn)
+ {
+ case 2:
+#line 59 "syncrep_gram.y" /* yacc.c:1652 */
+ { syncrep_parse_result = (yyvsp[0].config); }
+#line 1266 "syncrep_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 3:
+#line 63 "syncrep_gram.y" /* yacc.c:1652 */
+ { (yyval.config) = create_syncrep_config("1", (yyvsp[0].list), SYNC_REP_PRIORITY); }
+#line 1272 "syncrep_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 4:
+#line 64 "syncrep_gram.y" /* yacc.c:1652 */
+ { (yyval.config) = create_syncrep_config((yyvsp[-3].str), (yyvsp[-1].list), SYNC_REP_PRIORITY); }
+#line 1278 "syncrep_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 5:
+#line 65 "syncrep_gram.y" /* yacc.c:1652 */
+ { (yyval.config) = create_syncrep_config((yyvsp[-3].str), (yyvsp[-1].list), SYNC_REP_QUORUM); }
+#line 1284 "syncrep_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 6:
+#line 66 "syncrep_gram.y" /* yacc.c:1652 */
+ { (yyval.config) = create_syncrep_config((yyvsp[-3].str), (yyvsp[-1].list), SYNC_REP_PRIORITY); }
+#line 1290 "syncrep_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 7:
+#line 70 "syncrep_gram.y" /* yacc.c:1652 */
+ { (yyval.list) = list_make1((yyvsp[0].str)); }
+#line 1296 "syncrep_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 8:
+#line 71 "syncrep_gram.y" /* yacc.c:1652 */
+ { (yyval.list) = lappend((yyvsp[-2].list), (yyvsp[0].str)); }
+#line 1302 "syncrep_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 9:
+#line 75 "syncrep_gram.y" /* yacc.c:1652 */
+ { (yyval.str) = (yyvsp[0].str); }
+#line 1308 "syncrep_gram.c" /* yacc.c:1652 */
+ break;
+
+ case 10:
+#line 76 "syncrep_gram.y" /* yacc.c:1652 */
+ { (yyval.str) = (yyvsp[0].str); }
+#line 1314 "syncrep_gram.c" /* yacc.c:1652 */
+ break;
+
+
+#line 1318 "syncrep_gram.c" /* yacc.c:1652 */
+ default: break;
+ }
+ /* User semantic actions sometimes alter yychar, and that requires
+ that yytoken be updated with the new translation. We take the
+ approach of translating immediately before every use of yytoken.
+ One alternative is translating here after every semantic action,
+ but that translation would be missed if the semantic action invokes
+ YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
+ if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an
+ incorrect destructor might then be invoked immediately. In the
+ case of YYERROR or YYBACKUP, subsequent parser actions might lead
+ to an incorrect destructor call or verbose syntax error message
+ before the lookahead is translated. */
+ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
+
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+
+ *++yyvsp = yyval;
+
+ /* Now 'shift' the result of the reduction. Determine what state
+ that goes to, based on the state we popped back to and the rule
+ number reduced by. */
+ {
+ const int yylhs = yyr1[yyn] - YYNTOKENS;
+ const int yyi = yypgoto[yylhs] + *yyssp;
+ yystate = (0 <= yyi && yyi <= YYLAST && yycheck[yyi] == *yyssp
+ ? yytable[yyi]
+ : yydefgoto[yylhs]);
+ }
+
+ goto yynewstate;
+
+
+/*--------------------------------------.
+| yyerrlab -- here on detecting error. |
+`--------------------------------------*/
+yyerrlab:
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
+
+ /* If not already recovering from an error, report this error. */
+ if (!yyerrstatus)
+ {
+ ++yynerrs;
+#if ! YYERROR_VERBOSE
+ yyerror (YY_("syntax error"));
+#else
+# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
+ yyssp, yytoken)
+ {
+ char const *yymsgp = YY_("syntax error");
+ int yysyntax_error_status;
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ if (yysyntax_error_status == 0)
+ yymsgp = yymsg;
+ else if (yysyntax_error_status == 1)
+ {
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+ yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
+ if (!yymsg)
+ {
+ yymsg = yymsgbuf;
+ yymsg_alloc = sizeof yymsgbuf;
+ yysyntax_error_status = 2;
+ }
+ else
+ {
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ yymsgp = yymsg;
+ }
+ }
+ yyerror (yymsgp);
+ if (yysyntax_error_status == 2)
+ goto yyexhaustedlab;
+ }
+# undef YYSYNTAX_ERROR
+#endif
+ }
+
+
+
+ if (yyerrstatus == 3)
+ {
+ /* If just tried and failed to reuse lookahead token after an
+ error, discard it. */
+
+ if (yychar <= YYEOF)
+ {
+ /* Return failure if at end of input. */
+ if (yychar == YYEOF)
+ YYABORT;
+ }
+ else
+ {
+ yydestruct ("Error: discarding",
+ yytoken, &yylval);
+ yychar = YYEMPTY;
+ }
+ }
+
+ /* Else will try to reuse lookahead token after shifting the error
+ token. */
+ goto yyerrlab1;
+
+
+/*---------------------------------------------------.
+| yyerrorlab -- error raised explicitly by YYERROR. |
+`---------------------------------------------------*/
+yyerrorlab:
+ /* Pacify compilers when the user code never invokes YYERROR and the
+ label yyerrorlab therefore never appears in user code. */
+ if (0)
+ YYERROR;
+
+ /* Do not reclaim the symbols of the rule whose action triggered
+ this YYERROR. */
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+ yystate = *yyssp;
+ goto yyerrlab1;
+
+
+/*-------------------------------------------------------------.
+| yyerrlab1 -- common code for both syntax error and YYERROR. |
+`-------------------------------------------------------------*/
+yyerrlab1:
+ yyerrstatus = 3; /* Each real token shifted decrements this. */
+
+ for (;;)
+ {
+ yyn = yypact[yystate];
+ if (!yypact_value_is_default (yyn))
+ {
+ yyn += YYTERROR;
+ if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
+ {
+ yyn = yytable[yyn];
+ if (0 < yyn)
+ break;
+ }
+ }
+
+ /* Pop the current state because it cannot handle the error token. */
+ if (yyssp == yyss)
+ YYABORT;
+
+
+ yydestruct ("Error: popping",
+ yystos[yystate], yyvsp);
+ YYPOPSTACK (1);
+ yystate = *yyssp;
+ YY_STACK_PRINT (yyss, yyssp);
+ }
+
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ *++yyvsp = yylval;
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+
+
+ /* Shift the error token. */
+ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
+
+ yystate = yyn;
+ goto yynewstate;
+
+
+/*-------------------------------------.
+| yyacceptlab -- YYACCEPT comes here. |
+`-------------------------------------*/
+yyacceptlab:
+ yyresult = 0;
+ goto yyreturn;
+
+
+/*-----------------------------------.
+| yyabortlab -- YYABORT comes here. |
+`-----------------------------------*/
+yyabortlab:
+ yyresult = 1;
+ goto yyreturn;
+
+
+#if !defined yyoverflow || YYERROR_VERBOSE
+/*-------------------------------------------------.
+| yyexhaustedlab -- memory exhaustion comes here. |
+`-------------------------------------------------*/
+yyexhaustedlab:
+ yyerror (YY_("memory exhausted"));
+ yyresult = 2;
+ /* Fall through. */
+#endif
+
+
+/*-----------------------------------------------------.
+| yyreturn -- parsing is finished, return the result. |
+`-----------------------------------------------------*/
+yyreturn:
+ if (yychar != YYEMPTY)
+ {
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = YYTRANSLATE (yychar);
+ yydestruct ("Cleanup: discarding lookahead",
+ yytoken, &yylval);
+ }
+ /* Do not reclaim the symbols of the rule whose action triggered
+ this YYABORT or YYACCEPT. */
+ YYPOPSTACK (yylen);
+ YY_STACK_PRINT (yyss, yyssp);
+ while (yyssp != yyss)
+ {
+ yydestruct ("Cleanup: popping",
+ yystos[*yyssp], yyvsp);
+ YYPOPSTACK (1);
+ }
+#ifndef yyoverflow
+ if (yyss != yyssa)
+ YYSTACK_FREE (yyss);
+#endif
+#if YYERROR_VERBOSE
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+#endif
+ return yyresult;
+}
+#line 78 "syncrep_gram.y" /* yacc.c:1918 */
+
+
+static SyncRepConfigData *
+create_syncrep_config(const char *num_sync, List *members, uint8 syncrep_method)
+{
+ SyncRepConfigData *config;
+ int size;
+ ListCell *lc;
+ char *ptr;
+
+ /* Compute space needed for flat representation */
+ size = offsetof(SyncRepConfigData, member_names);
+ foreach(lc, members)
+ {
+ char *standby_name = (char *) lfirst(lc);
+
+ size += strlen(standby_name) + 1;
+ }
+
+ /* And transform the data into flat representation */
+ config = (SyncRepConfigData *) palloc(size);
+
+ config->config_size = size;
+ config->num_sync = atoi(num_sync);
+ config->syncrep_method = syncrep_method;
+ config->nmembers = list_length(members);
+ ptr = config->member_names;
+ foreach(lc, members)
+ {
+ char *standby_name = (char *) lfirst(lc);
+
+ strcpy(ptr, standby_name);
+ ptr += strlen(standby_name) + 1;
+ }
+
+ return config;
+}
+
+#include "syncrep_scanner.c"
diff --git a/src/backend/replication/syncrep_gram.y b/src/backend/replication/syncrep_gram.y
new file mode 100644
index 0000000..350195e
--- /dev/null
+++ b/src/backend/replication/syncrep_gram.y
@@ -0,0 +1,116 @@
+%{
+/*-------------------------------------------------------------------------
+ *
+ * syncrep_gram.y - Parser for synchronous_standby_names
+ *
+ * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/replication/syncrep_gram.y
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "replication/syncrep.h"
+
+/* Result of parsing is returned in one of these two variables */
+SyncRepConfigData *syncrep_parse_result;
+char *syncrep_parse_error_msg;
+
+static SyncRepConfigData *create_syncrep_config(const char *num_sync,
+ List *members, uint8 syncrep_method);
+
+/*
+ * Bison doesn't allocate anything that needs to live across parser calls,
+ * so we can easily have it use palloc instead of malloc. This prevents
+ * memory leaks if we error out during parsing. Note this only works with
+ * bison >= 2.0. However, in bison 1.875 the default is to use alloca()
+ * if possible, so there's not really much problem anyhow, at least if
+ * you're building with gcc.
+ */
+#define YYMALLOC palloc
+#define YYFREE pfree
+
+%}
+
+%expect 0
+%name-prefix="syncrep_yy"
+
+%union
+{
+ char *str;
+ List *list;
+ SyncRepConfigData *config;
+}
+
+%token <str> NAME NUM JUNK ANY FIRST
+
+%type <config> result standby_config
+%type <list> standby_list
+%type <str> standby_name
+
+%start result
+
+%%
+result:
+ standby_config { syncrep_parse_result = $1; }
+ ;
+
+standby_config:
+ standby_list { $$ = create_syncrep_config("1", $1, SYNC_REP_PRIORITY); }
+ | NUM '(' standby_list ')' { $$ = create_syncrep_config($1, $3, SYNC_REP_PRIORITY); }
+ | ANY NUM '(' standby_list ')' { $$ = create_syncrep_config($2, $4, SYNC_REP_QUORUM); }
+ | FIRST NUM '(' standby_list ')' { $$ = create_syncrep_config($2, $4, SYNC_REP_PRIORITY); }
+ ;
+
+standby_list:
+ standby_name { $$ = list_make1($1); }
+ | standby_list ',' standby_name { $$ = lappend($1, $3); }
+ ;
+
+standby_name:
+ NAME { $$ = $1; }
+ | NUM { $$ = $1; }
+ ;
+%%
+
+static SyncRepConfigData *
+create_syncrep_config(const char *num_sync, List *members, uint8 syncrep_method)
+{
+ SyncRepConfigData *config;
+ int size;
+ ListCell *lc;
+ char *ptr;
+
+ /* Compute space needed for flat representation */
+ size = offsetof(SyncRepConfigData, member_names);
+ foreach(lc, members)
+ {
+ char *standby_name = (char *) lfirst(lc);
+
+ size += strlen(standby_name) + 1;
+ }
+
+ /* And transform the data into flat representation */
+ config = (SyncRepConfigData *) palloc(size);
+
+ config->config_size = size;
+ config->num_sync = atoi(num_sync);
+ config->syncrep_method = syncrep_method;
+ config->nmembers = list_length(members);
+ ptr = config->member_names;
+ foreach(lc, members)
+ {
+ char *standby_name = (char *) lfirst(lc);
+
+ strcpy(ptr, standby_name);
+ ptr += strlen(standby_name) + 1;
+ }
+
+ return config;
+}
+
+#include "syncrep_scanner.c"
diff --git a/src/backend/replication/syncrep_scanner.c b/src/backend/replication/syncrep_scanner.c
new file mode 100644
index 0000000..42bed8c
--- /dev/null
+++ b/src/backend/replication/syncrep_scanner.c
@@ -0,0 +1,2163 @@
+#line 2 "syncrep_scanner.c"
+
+#line 4 "syncrep_scanner.c"
+
+#define YY_INT_ALIGNED short int
+
+/* A lexical scanner generated by flex */
+
+#define yy_create_buffer syncrep_yy_create_buffer
+#define yy_delete_buffer syncrep_yy_delete_buffer
+#define yy_scan_buffer syncrep_yy_scan_buffer
+#define yy_scan_string syncrep_yy_scan_string
+#define yy_scan_bytes syncrep_yy_scan_bytes
+#define yy_init_buffer syncrep_yy_init_buffer
+#define yy_flush_buffer syncrep_yy_flush_buffer
+#define yy_load_buffer_state syncrep_yy_load_buffer_state
+#define yy_switch_to_buffer syncrep_yy_switch_to_buffer
+#define yypush_buffer_state syncrep_yypush_buffer_state
+#define yypop_buffer_state syncrep_yypop_buffer_state
+#define yyensure_buffer_stack syncrep_yyensure_buffer_stack
+#define yy_flex_debug syncrep_yy_flex_debug
+#define yyin syncrep_yyin
+#define yyleng syncrep_yyleng
+#define yylex syncrep_yylex
+#define yylineno syncrep_yylineno
+#define yyout syncrep_yyout
+#define yyrestart syncrep_yyrestart
+#define yytext syncrep_yytext
+#define yywrap syncrep_yywrap
+#define yyalloc syncrep_yyalloc
+#define yyrealloc syncrep_yyrealloc
+#define yyfree syncrep_yyfree
+
+#define FLEX_SCANNER
+#define YY_FLEX_MAJOR_VERSION 2
+#define YY_FLEX_MINOR_VERSION 6
+#define YY_FLEX_SUBMINOR_VERSION 4
+#if YY_FLEX_SUBMINOR_VERSION > 0
+#define FLEX_BETA
+#endif
+
+#ifdef yy_create_buffer
+#define syncrep_yy_create_buffer_ALREADY_DEFINED
+#else
+#define yy_create_buffer syncrep_yy_create_buffer
+#endif
+
+#ifdef yy_delete_buffer
+#define syncrep_yy_delete_buffer_ALREADY_DEFINED
+#else
+#define yy_delete_buffer syncrep_yy_delete_buffer
+#endif
+
+#ifdef yy_scan_buffer
+#define syncrep_yy_scan_buffer_ALREADY_DEFINED
+#else
+#define yy_scan_buffer syncrep_yy_scan_buffer
+#endif
+
+#ifdef yy_scan_string
+#define syncrep_yy_scan_string_ALREADY_DEFINED
+#else
+#define yy_scan_string syncrep_yy_scan_string
+#endif
+
+#ifdef yy_scan_bytes
+#define syncrep_yy_scan_bytes_ALREADY_DEFINED
+#else
+#define yy_scan_bytes syncrep_yy_scan_bytes
+#endif
+
+#ifdef yy_init_buffer
+#define syncrep_yy_init_buffer_ALREADY_DEFINED
+#else
+#define yy_init_buffer syncrep_yy_init_buffer
+#endif
+
+#ifdef yy_flush_buffer
+#define syncrep_yy_flush_buffer_ALREADY_DEFINED
+#else
+#define yy_flush_buffer syncrep_yy_flush_buffer
+#endif
+
+#ifdef yy_load_buffer_state
+#define syncrep_yy_load_buffer_state_ALREADY_DEFINED
+#else
+#define yy_load_buffer_state syncrep_yy_load_buffer_state
+#endif
+
+#ifdef yy_switch_to_buffer
+#define syncrep_yy_switch_to_buffer_ALREADY_DEFINED
+#else
+#define yy_switch_to_buffer syncrep_yy_switch_to_buffer
+#endif
+
+#ifdef yypush_buffer_state
+#define syncrep_yypush_buffer_state_ALREADY_DEFINED
+#else
+#define yypush_buffer_state syncrep_yypush_buffer_state
+#endif
+
+#ifdef yypop_buffer_state
+#define syncrep_yypop_buffer_state_ALREADY_DEFINED
+#else
+#define yypop_buffer_state syncrep_yypop_buffer_state
+#endif
+
+#ifdef yyensure_buffer_stack
+#define syncrep_yyensure_buffer_stack_ALREADY_DEFINED
+#else
+#define yyensure_buffer_stack syncrep_yyensure_buffer_stack
+#endif
+
+#ifdef yylex
+#define syncrep_yylex_ALREADY_DEFINED
+#else
+#define yylex syncrep_yylex
+#endif
+
+#ifdef yyrestart
+#define syncrep_yyrestart_ALREADY_DEFINED
+#else
+#define yyrestart syncrep_yyrestart
+#endif
+
+#ifdef yylex_init
+#define syncrep_yylex_init_ALREADY_DEFINED
+#else
+#define yylex_init syncrep_yylex_init
+#endif
+
+#ifdef yylex_init_extra
+#define syncrep_yylex_init_extra_ALREADY_DEFINED
+#else
+#define yylex_init_extra syncrep_yylex_init_extra
+#endif
+
+#ifdef yylex_destroy
+#define syncrep_yylex_destroy_ALREADY_DEFINED
+#else
+#define yylex_destroy syncrep_yylex_destroy
+#endif
+
+#ifdef yyget_debug
+#define syncrep_yyget_debug_ALREADY_DEFINED
+#else
+#define yyget_debug syncrep_yyget_debug
+#endif
+
+#ifdef yyset_debug
+#define syncrep_yyset_debug_ALREADY_DEFINED
+#else
+#define yyset_debug syncrep_yyset_debug
+#endif
+
+#ifdef yyget_extra
+#define syncrep_yyget_extra_ALREADY_DEFINED
+#else
+#define yyget_extra syncrep_yyget_extra
+#endif
+
+#ifdef yyset_extra
+#define syncrep_yyset_extra_ALREADY_DEFINED
+#else
+#define yyset_extra syncrep_yyset_extra
+#endif
+
+#ifdef yyget_in
+#define syncrep_yyget_in_ALREADY_DEFINED
+#else
+#define yyget_in syncrep_yyget_in
+#endif
+
+#ifdef yyset_in
+#define syncrep_yyset_in_ALREADY_DEFINED
+#else
+#define yyset_in syncrep_yyset_in
+#endif
+
+#ifdef yyget_out
+#define syncrep_yyget_out_ALREADY_DEFINED
+#else
+#define yyget_out syncrep_yyget_out
+#endif
+
+#ifdef yyset_out
+#define syncrep_yyset_out_ALREADY_DEFINED
+#else
+#define yyset_out syncrep_yyset_out
+#endif
+
+#ifdef yyget_leng
+#define syncrep_yyget_leng_ALREADY_DEFINED
+#else
+#define yyget_leng syncrep_yyget_leng
+#endif
+
+#ifdef yyget_text
+#define syncrep_yyget_text_ALREADY_DEFINED
+#else
+#define yyget_text syncrep_yyget_text
+#endif
+
+#ifdef yyget_lineno
+#define syncrep_yyget_lineno_ALREADY_DEFINED
+#else
+#define yyget_lineno syncrep_yyget_lineno
+#endif
+
+#ifdef yyset_lineno
+#define syncrep_yyset_lineno_ALREADY_DEFINED
+#else
+#define yyset_lineno syncrep_yyset_lineno
+#endif
+
+#ifdef yywrap
+#define syncrep_yywrap_ALREADY_DEFINED
+#else
+#define yywrap syncrep_yywrap
+#endif
+
+#ifdef yyalloc
+#define syncrep_yyalloc_ALREADY_DEFINED
+#else
+#define yyalloc syncrep_yyalloc
+#endif
+
+#ifdef yyrealloc
+#define syncrep_yyrealloc_ALREADY_DEFINED
+#else
+#define yyrealloc syncrep_yyrealloc
+#endif
+
+#ifdef yyfree
+#define syncrep_yyfree_ALREADY_DEFINED
+#else
+#define yyfree syncrep_yyfree
+#endif
+
+#ifdef yytext
+#define syncrep_yytext_ALREADY_DEFINED
+#else
+#define yytext syncrep_yytext
+#endif
+
+#ifdef yyleng
+#define syncrep_yyleng_ALREADY_DEFINED
+#else
+#define yyleng syncrep_yyleng
+#endif
+
+#ifdef yyin
+#define syncrep_yyin_ALREADY_DEFINED
+#else
+#define yyin syncrep_yyin
+#endif
+
+#ifdef yyout
+#define syncrep_yyout_ALREADY_DEFINED
+#else
+#define yyout syncrep_yyout
+#endif
+
+#ifdef yy_flex_debug
+#define syncrep_yy_flex_debug_ALREADY_DEFINED
+#else
+#define yy_flex_debug syncrep_yy_flex_debug
+#endif
+
+#ifdef yylineno
+#define syncrep_yylineno_ALREADY_DEFINED
+#else
+#define yylineno syncrep_yylineno
+#endif
+
+/* First, we deal with platform-specific or compiler-specific issues. */
+
+/* begin standard C headers. */
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+
+/* end standard C headers. */
+
+/* flex integer type definitions */
+
+#ifndef FLEXINT_H
+#define FLEXINT_H
+
+/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+
+/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
+ * if you want the limit (max/min) macros for int types.
+ */
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS 1
+#endif
+
+#include <inttypes.h>
+typedef int8_t flex_int8_t;
+typedef uint8_t flex_uint8_t;
+typedef int16_t flex_int16_t;
+typedef uint16_t flex_uint16_t;
+typedef int32_t flex_int32_t;
+typedef uint32_t flex_uint32_t;
+#else
+typedef signed char flex_int8_t;
+typedef short int flex_int16_t;
+typedef int flex_int32_t;
+typedef unsigned char flex_uint8_t;
+typedef unsigned short int flex_uint16_t;
+typedef unsigned int flex_uint32_t;
+
+/* Limits of integral types. */
+#ifndef INT8_MIN
+#define INT8_MIN (-128)
+#endif
+#ifndef INT16_MIN
+#define INT16_MIN (-32767-1)
+#endif
+#ifndef INT32_MIN
+#define INT32_MIN (-2147483647-1)
+#endif
+#ifndef INT8_MAX
+#define INT8_MAX (127)
+#endif
+#ifndef INT16_MAX
+#define INT16_MAX (32767)
+#endif
+#ifndef INT32_MAX
+#define INT32_MAX (2147483647)
+#endif
+#ifndef UINT8_MAX
+#define UINT8_MAX (255U)
+#endif
+#ifndef UINT16_MAX
+#define UINT16_MAX (65535U)
+#endif
+#ifndef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#endif
+
+#ifndef SIZE_MAX
+#define SIZE_MAX (~(size_t)0)
+#endif
+
+#endif /* ! C99 */
+
+#endif /* ! FLEXINT_H */
+
+/* begin standard C++ headers. */
+
+/* TODO: this is always defined, so inline it */
+#define yyconst const
+
+#if defined(__GNUC__) && __GNUC__ >= 3
+#define yynoreturn __attribute__((__noreturn__))
+#else
+#define yynoreturn
+#endif
+
+/* Returned upon end-of-file. */
+#define YY_NULL 0
+
+/* Promotes a possibly negative, possibly signed char to an
+ * integer in range [0..255] for use as an array index.
+ */
+#define YY_SC_TO_UI(c) ((YY_CHAR) (c))
+
+/* Enter a start condition. This macro really ought to take a parameter,
+ * but we do it the disgusting crufty way forced on us by the ()-less
+ * definition of BEGIN.
+ */
+#define BEGIN (yy_start) = 1 + 2 *
+/* Translate the current start state into a value that can be later handed
+ * to BEGIN to return to the state. The YYSTATE alias is for lex
+ * compatibility.
+ */
+#define YY_START (((yy_start) - 1) / 2)
+#define YYSTATE YY_START
+/* Action number for EOF rule of a given start state. */
+#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
+/* Special action meaning "start processing a new file". */
+#define YY_NEW_FILE yyrestart( yyin )
+#define YY_END_OF_BUFFER_CHAR 0
+
+/* Size of default input buffer. */
+#ifndef YY_BUF_SIZE
+#ifdef __ia64__
+/* On IA-64, the buffer size is 16k, not 8k.
+ * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
+ * Ditto for the __ia64__ case accordingly.
+ */
+#define YY_BUF_SIZE 32768
+#else
+#define YY_BUF_SIZE 16384
+#endif /* __ia64__ */
+#endif
+
+/* The state buf must be large enough to hold one state per character in the main buffer.
+ */
+#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))
+
+#ifndef YY_TYPEDEF_YY_BUFFER_STATE
+#define YY_TYPEDEF_YY_BUFFER_STATE
+typedef struct yy_buffer_state *YY_BUFFER_STATE;
+#endif
+
+#ifndef YY_TYPEDEF_YY_SIZE_T
+#define YY_TYPEDEF_YY_SIZE_T
+typedef size_t yy_size_t;
+#endif
+
+extern int yyleng;
+
+extern FILE *yyin, *yyout;
+
+#define EOB_ACT_CONTINUE_SCAN 0
+#define EOB_ACT_END_OF_FILE 1
+#define EOB_ACT_LAST_MATCH 2
+
+ #define YY_LESS_LINENO(n)
+ #define YY_LINENO_REWIND_TO(ptr)
+
+/* Return all but the first "n" matched characters back to the input stream. */
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up yytext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ *yy_cp = (yy_hold_char); \
+ YY_RESTORE_YY_MORE_OFFSET \
+ (yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \
+ YY_DO_BEFORE_ACTION; /* set up yytext again */ \
+ } \
+ while ( 0 )
+#define unput(c) yyunput( c, (yytext_ptr) )
+
+#ifndef YY_STRUCT_YY_BUFFER_STATE
+#define YY_STRUCT_YY_BUFFER_STATE
+struct yy_buffer_state
+ {
+ FILE *yy_input_file;
+
+ char *yy_ch_buf; /* input buffer */
+ char *yy_buf_pos; /* current position in input buffer */
+
+ /* Size of input buffer in bytes, not including room for EOB
+ * characters.
+ */
+ int yy_buf_size;
+
+ /* Number of characters read into yy_ch_buf, not including EOB
+ * characters.
+ */
+ int yy_n_chars;
+
+ /* Whether we "own" the buffer - i.e., we know we created it,
+ * and can realloc() it to grow it, and should free() it to
+ * delete it.
+ */
+ int yy_is_our_buffer;
+
+ /* Whether this is an "interactive" input source; if so, and
+ * if we're using stdio for input, then we want to use getc()
+ * instead of fread(), to make sure we stop fetching input after
+ * each newline.
+ */
+ int yy_is_interactive;
+
+ /* Whether we're considered to be at the beginning of a line.
+ * If so, '^' rules will be active on the next match, otherwise
+ * not.
+ */
+ int yy_at_bol;
+
+ int yy_bs_lineno; /**< The line count. */
+ int yy_bs_column; /**< The column count. */
+
+ /* Whether to try to fill the input buffer when we reach the
+ * end of it.
+ */
+ int yy_fill_buffer;
+
+ int yy_buffer_status;
+
+#define YY_BUFFER_NEW 0
+#define YY_BUFFER_NORMAL 1
+ /* When an EOF's been seen but there's still some text to process
+ * then we mark the buffer as YY_EOF_PENDING, to indicate that we
+ * shouldn't try reading from the input source any more. We might
+ * still have a bunch of tokens to match, though, because of
+ * possible backing-up.
+ *
+ * When we actually see the EOF, we change the status to "new"
+ * (via yyrestart()), so that the user can continue scanning by
+ * just pointing yyin at a new input file.
+ */
+#define YY_BUFFER_EOF_PENDING 2
+
+ };
+#endif /* !YY_STRUCT_YY_BUFFER_STATE */
+
+/* Stack of input buffers. */
+static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */
+static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */
+static YY_BUFFER_STATE * yy_buffer_stack = NULL; /**< Stack as an array. */
+
+/* We provide macros for accessing buffer states in case in the
+ * future we want to put the buffer states in a more general
+ * "scanner state".
+ *
+ * Returns the top of the stack, or NULL.
+ */
+#define YY_CURRENT_BUFFER ( (yy_buffer_stack) \
+ ? (yy_buffer_stack)[(yy_buffer_stack_top)] \
+ : NULL)
+/* Same as previous macro, but useful when we know that the buffer stack is not
+ * NULL or when we need an lvalue. For internal use only.
+ */
+#define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)]
+
+/* yy_hold_char holds the character lost when yytext is formed. */
+static char yy_hold_char;
+static int yy_n_chars; /* number of characters read into yy_ch_buf */
+int yyleng;
+
+/* Points to current character in buffer. */
+static char *yy_c_buf_p = NULL;
+static int yy_init = 0; /* whether we need to initialize */
+static int yy_start = 0; /* start state number */
+
+/* Flag which is used to allow yywrap()'s to do buffer switches
+ * instead of setting up a fresh yyin. A bit of a hack ...
+ */
+static int yy_did_buffer_switch_on_eof;
+
+void yyrestart ( FILE *input_file );
+void yy_switch_to_buffer ( YY_BUFFER_STATE new_buffer );
+YY_BUFFER_STATE yy_create_buffer ( FILE *file, int size );
+void yy_delete_buffer ( YY_BUFFER_STATE b );
+void yy_flush_buffer ( YY_BUFFER_STATE b );
+void yypush_buffer_state ( YY_BUFFER_STATE new_buffer );
+void yypop_buffer_state ( void );
+
+static void yyensure_buffer_stack ( void );
+static void yy_load_buffer_state ( void );
+static void yy_init_buffer ( YY_BUFFER_STATE b, FILE *file );
+#define YY_FLUSH_BUFFER yy_flush_buffer( YY_CURRENT_BUFFER )
+
+YY_BUFFER_STATE yy_scan_buffer ( char *base, yy_size_t size );
+YY_BUFFER_STATE yy_scan_string ( const char *yy_str );
+YY_BUFFER_STATE yy_scan_bytes ( const char *bytes, int len );
+
+void *yyalloc ( yy_size_t );
+void *yyrealloc ( void *, yy_size_t );
+void yyfree ( void * );
+
+#define yy_new_buffer yy_create_buffer
+#define yy_set_interactive(is_interactive) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){ \
+ yyensure_buffer_stack (); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ yy_create_buffer( yyin, YY_BUF_SIZE ); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \
+ }
+#define yy_set_bol(at_bol) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){\
+ yyensure_buffer_stack (); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ yy_create_buffer( yyin, YY_BUF_SIZE ); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
+ }
+#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
+
+/* Begin user sect3 */
+
+#define syncrep_yywrap() (/*CONSTCOND*/1)
+#define YY_SKIP_YYWRAP
+typedef flex_uint8_t YY_CHAR;
+
+FILE *yyin = NULL, *yyout = NULL;
+
+typedef int yy_state_type;
+
+extern int yylineno;
+int yylineno = 1;
+
+extern char *yytext;
+#ifdef yytext_ptr
+#undef yytext_ptr
+#endif
+#define yytext_ptr yytext
+
+static yy_state_type yy_get_previous_state ( void );
+static yy_state_type yy_try_NUL_trans ( yy_state_type current_state );
+static int yy_get_next_buffer ( void );
+static void yynoreturn yy_fatal_error ( const char* msg );
+
+/* Done after the current pattern has been matched and before the
+ * corresponding action - sets up yytext.
+ */
+#define YY_DO_BEFORE_ACTION \
+ (yytext_ptr) = yy_bp; \
+ yyleng = (int) (yy_cp - yy_bp); \
+ (yy_hold_char) = *yy_cp; \
+ *yy_cp = '\0'; \
+ (yy_c_buf_p) = yy_cp;
+#define YY_NUM_RULES 15
+#define YY_END_OF_BUFFER 16
+/* This struct is not used in this scanner,
+ but its presence is necessary. */
+struct yy_trans_info
+ {
+ flex_int32_t yy_verify;
+ flex_int32_t yy_nxt;
+ };
+static const flex_int16_t yy_accept[32] =
+ { 0,
+ 0, 0, 0, 0, 16, 14, 1, 1, 4, 12,
+ 13, 10, 11, 9, 8, 8, 8, 6, 7, 1,
+ 9, 8, 8, 8, 6, 5, 2, 8, 8, 3,
+ 0
+ } ;
+
+static const YY_CHAR yy_ec[256] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
+ 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 1, 4, 1, 5, 1, 1, 1, 6,
+ 7, 8, 1, 9, 1, 1, 1, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 1, 1, 1,
+ 1, 1, 1, 1, 11, 12, 12, 12, 12, 13,
+ 12, 12, 14, 12, 12, 12, 12, 15, 12, 12,
+ 12, 16, 17, 18, 12, 12, 12, 12, 19, 12,
+ 1, 1, 1, 1, 12, 1, 11, 12, 12, 12,
+
+ 12, 13, 12, 12, 14, 12, 12, 12, 12, 15,
+ 12, 12, 12, 16, 17, 18, 12, 12, 12, 12,
+ 19, 12, 1, 1, 1, 1, 1, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12
+ } ;
+
+static const YY_CHAR yy_meta[20] =
+ { 0,
+ 1, 1, 1, 2, 3, 1, 1, 1, 1, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3
+ } ;
+
+static const flex_int16_t yy_base[35] =
+ { 0,
+ 0, 0, 39, 38, 41, 44, 18, 20, 44, 44,
+ 44, 44, 44, 30, 24, 0, 24, 0, 33, 22,
+ 26, 0, 16, 18, 0, 44, 0, 16, 14, 0,
+ 44, 25, 27, 28
+ } ;
+
+static const flex_int16_t yy_def[35] =
+ { 0,
+ 31, 1, 32, 32, 31, 31, 31, 31, 31, 31,
+ 31, 31, 31, 31, 33, 33, 33, 34, 31, 31,
+ 31, 33, 33, 33, 34, 31, 33, 33, 33, 33,
+ 0, 31, 31, 31
+ } ;
+
+static const flex_int16_t yy_nxt[64] =
+ { 0,
+ 6, 7, 8, 9, 6, 10, 11, 12, 13, 14,
+ 15, 16, 17, 16, 16, 16, 16, 16, 16, 20,
+ 20, 20, 20, 20, 20, 18, 18, 18, 25, 22,
+ 25, 30, 29, 28, 27, 21, 26, 24, 23, 21,
+ 31, 19, 19, 5, 31, 31, 31, 31, 31, 31,
+ 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
+ 31, 31, 31
+ } ;
+
+static const flex_int16_t yy_chk[64] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 7,
+ 7, 8, 8, 20, 20, 32, 32, 32, 34, 33,
+ 34, 29, 28, 24, 23, 21, 19, 17, 15, 14,
+ 5, 4, 3, 31, 31, 31, 31, 31, 31, 31,
+ 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
+ 31, 31, 31
+ } ;
+
+static yy_state_type yy_last_accepting_state;
+static char *yy_last_accepting_cpos;
+
+extern int yy_flex_debug;
+int yy_flex_debug = 0;
+
+/* The intent behind this definition is that it'll catch
+ * any uses of REJECT which flex missed.
+ */
+#define REJECT reject_used_but_not_detected
+#define yymore() yymore_used_but_not_detected
+#define YY_MORE_ADJ 0
+#define YY_RESTORE_YY_MORE_OFFSET
+char *yytext;
+#line 1 "syncrep_scanner.l"
+#line 2 "syncrep_scanner.l"
+/*-------------------------------------------------------------------------
+ *
+ * syncrep_scanner.l
+ * a lexical scanner for synchronous_standby_names
+ *
+ * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/replication/syncrep_scanner.l
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "lib/stringinfo.h"
+
+/* Avoid exit() on fatal scanner errors (a bit ugly -- see yy_fatal_error) */
+#undef fprintf
+#define fprintf(file, fmt, msg) fprintf_to_ereport(fmt, msg)
+
+static void
+fprintf_to_ereport(const char *fmt, const char *msg)
+{
+ ereport(ERROR, (errmsg_internal("%s", msg)));
+}
+
+/* Handles to the buffer that the lexer uses internally */
+static YY_BUFFER_STATE scanbufhandle;
+
+static StringInfoData xdbuf;
+
+/* LCOV_EXCL_START */
+
+#line 762 "syncrep_scanner.c"
+#define YY_NO_INPUT 1
+/*
+ * <xd> delimited identifiers (double-quoted identifiers)
+ */
+
+#line 768 "syncrep_scanner.c"
+
+#define INITIAL 0
+#define xd 1
+
+#ifndef YY_NO_UNISTD_H
+/* Special case for "unistd.h", since it is non-ANSI. We include it way
+ * down here because we want the user's section 1 to have been scanned first.
+ * The user has a chance to override it with an option.
+ */
+#include <unistd.h>
+#endif
+
+#ifndef YY_EXTRA_TYPE
+#define YY_EXTRA_TYPE void *
+#endif
+
+static int yy_init_globals ( void );
+
+/* Accessor methods to globals.
+ These are made visible to non-reentrant scanners for convenience. */
+
+int yylex_destroy ( void );
+
+int yyget_debug ( void );
+
+void yyset_debug ( int debug_flag );
+
+YY_EXTRA_TYPE yyget_extra ( void );
+
+void yyset_extra ( YY_EXTRA_TYPE user_defined );
+
+FILE *yyget_in ( void );
+
+void yyset_in ( FILE * _in_str );
+
+FILE *yyget_out ( void );
+
+void yyset_out ( FILE * _out_str );
+
+ int yyget_leng ( void );
+
+char *yyget_text ( void );
+
+int yyget_lineno ( void );
+
+void yyset_lineno ( int _line_number );
+
+/* Macros after this point can all be overridden by user definitions in
+ * section 1.
+ */
+
+#ifndef YY_SKIP_YYWRAP
+#ifdef __cplusplus
+extern "C" int yywrap ( void );
+#else
+extern int yywrap ( void );
+#endif
+#endif
+
+#ifndef YY_NO_UNPUT
+
+#endif
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy ( char *, const char *, int );
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen ( const char * );
+#endif
+
+#ifndef YY_NO_INPUT
+#ifdef __cplusplus
+static int yyinput ( void );
+#else
+static int input ( void );
+#endif
+
+#endif
+
+/* Amount of stuff to slurp up with each read. */
+#ifndef YY_READ_BUF_SIZE
+#ifdef __ia64__
+/* On IA-64, the buffer size is 16k, not 8k */
+#define YY_READ_BUF_SIZE 16384
+#else
+#define YY_READ_BUF_SIZE 8192
+#endif /* __ia64__ */
+#endif
+
+/* Copy whatever the last rule matched to the standard output. */
+#ifndef ECHO
+/* This used to be an fputs(), but since the string might contain NUL's,
+ * we now use fwrite().
+ */
+#define ECHO do { if (fwrite( yytext, (size_t) yyleng, 1, yyout )) {} } while (0)
+#endif
+
+/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
+ * is returned in "result".
+ */
+#ifndef YY_INPUT
+#define YY_INPUT(buf,result,max_size) \
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
+ { \
+ int c = '*'; \
+ int n; \
+ for ( n = 0; n < max_size && \
+ (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
+ buf[n] = (char) c; \
+ if ( c == '\n' ) \
+ buf[n++] = (char) c; \
+ if ( c == EOF && ferror( yyin ) ) \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ result = n; \
+ } \
+ else \
+ { \
+ errno=0; \
+ while ( (result = (int) fread(buf, 1, (yy_size_t) max_size, yyin)) == 0 && ferror(yyin)) \
+ { \
+ if( errno != EINTR) \
+ { \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ break; \
+ } \
+ errno=0; \
+ clearerr(yyin); \
+ } \
+ }\
+\
+
+#endif
+
+/* No semi-colon after return; correct usage is to write "yyterminate();" -
+ * we don't want an extra ';' after the "return" because that will cause
+ * some compilers to complain about unreachable statements.
+ */
+#ifndef yyterminate
+#define yyterminate() return YY_NULL
+#endif
+
+/* Number of entries by which start-condition stack grows. */
+#ifndef YY_START_STACK_INCR
+#define YY_START_STACK_INCR 25
+#endif
+
+/* Report a fatal error. */
+#ifndef YY_FATAL_ERROR
+#define YY_FATAL_ERROR(msg) yy_fatal_error( msg )
+#endif
+
+/* end tables serialization structures and prototypes */
+
+/* Default declaration of generated scanner - a define so the user can
+ * easily add parameters.
+ */
+#ifndef YY_DECL
+#define YY_DECL_IS_OURS 1
+
+extern int yylex (void);
+
+#define YY_DECL int yylex (void)
+#endif /* !YY_DECL */
+
+/* Code executed at the beginning of each rule, after yytext and yyleng
+ * have been set up.
+ */
+#ifndef YY_USER_ACTION
+#define YY_USER_ACTION
+#endif
+
+/* Code executed at the end of each rule. */
+#ifndef YY_BREAK
+#define YY_BREAK /*LINTED*/break;
+#endif
+
+#define YY_RULE_SETUP \
+ YY_USER_ACTION
+
+/** The main scanner function which does all the work.
+ */
+YY_DECL
+{
+ yy_state_type yy_current_state;
+ char *yy_cp, *yy_bp;
+ int yy_act;
+
+ if ( !(yy_init) )
+ {
+ (yy_init) = 1;
+
+#ifdef YY_USER_INIT
+ YY_USER_INIT;
+#endif
+
+ if ( ! (yy_start) )
+ (yy_start) = 1; /* first start state */
+
+ if ( ! yyin )
+ yyin = stdin;
+
+ if ( ! yyout )
+ yyout = stdout;
+
+ if ( ! YY_CURRENT_BUFFER ) {
+ yyensure_buffer_stack ();
+ YY_CURRENT_BUFFER_LVALUE =
+ yy_create_buffer( yyin, YY_BUF_SIZE );
+ }
+
+ yy_load_buffer_state( );
+ }
+
+ {
+#line 66 "syncrep_scanner.l"
+
+#line 986 "syncrep_scanner.c"
+
+ while ( /*CONSTCOND*/1 ) /* loops until end-of-file is reached */
+ {
+ yy_cp = (yy_c_buf_p);
+
+ /* Support of yytext. */
+ *yy_cp = (yy_hold_char);
+
+ /* yy_bp points to the position in yy_ch_buf of the start of
+ * the current run.
+ */
+ yy_bp = yy_cp;
+
+ yy_current_state = (yy_start);
+yy_match:
+ do
+ {
+ YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)] ;
+ if ( yy_accept[yy_current_state] )
+ {
+ (yy_last_accepting_state) = yy_current_state;
+ (yy_last_accepting_cpos) = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 32 )
+ yy_c = yy_meta[yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c];
+ ++yy_cp;
+ }
+ while ( yy_current_state != 31 );
+ yy_cp = (yy_last_accepting_cpos);
+ yy_current_state = (yy_last_accepting_state);
+
+yy_find_action:
+ yy_act = yy_accept[yy_current_state];
+
+ YY_DO_BEFORE_ACTION;
+
+do_action: /* This label is used only to access EOF actions. */
+
+ switch ( yy_act )
+ { /* beginning of action switch */
+ case 0: /* must back up */
+ /* undo the effects of YY_DO_BEFORE_ACTION */
+ *yy_cp = (yy_hold_char);
+ yy_cp = (yy_last_accepting_cpos);
+ yy_current_state = (yy_last_accepting_state);
+ goto yy_find_action;
+
+case 1:
+/* rule 1 can match eol */
+YY_RULE_SETUP
+#line 67 "syncrep_scanner.l"
+{ /* ignore */ }
+ YY_BREAK
+/* brute-force case insensitivity is safer than relying on flex -i */
+case 2:
+YY_RULE_SETUP
+#line 71 "syncrep_scanner.l"
+{ return ANY; }
+ YY_BREAK
+case 3:
+YY_RULE_SETUP
+#line 72 "syncrep_scanner.l"
+{ return FIRST; }
+ YY_BREAK
+case 4:
+YY_RULE_SETUP
+#line 74 "syncrep_scanner.l"
+{
+ initStringInfo(&xdbuf);
+ BEGIN(xd);
+ }
+ YY_BREAK
+case 5:
+YY_RULE_SETUP
+#line 78 "syncrep_scanner.l"
+{
+ appendStringInfoChar(&xdbuf, '"');
+ }
+ YY_BREAK
+case 6:
+/* rule 6 can match eol */
+YY_RULE_SETUP
+#line 81 "syncrep_scanner.l"
+{
+ appendStringInfoString(&xdbuf, yytext);
+ }
+ YY_BREAK
+case 7:
+YY_RULE_SETUP
+#line 84 "syncrep_scanner.l"
+{
+ yylval.str = xdbuf.data;
+ xdbuf.data = NULL;
+ BEGIN(INITIAL);
+ return NAME;
+ }
+ YY_BREAK
+case YY_STATE_EOF(xd):
+#line 90 "syncrep_scanner.l"
+{
+ yyerror("unterminated quoted identifier");
+ return JUNK;
+ }
+ YY_BREAK
+case 8:
+YY_RULE_SETUP
+#line 95 "syncrep_scanner.l"
+{
+ yylval.str = pstrdup(yytext);
+ return NAME;
+ }
+ YY_BREAK
+case 9:
+YY_RULE_SETUP
+#line 100 "syncrep_scanner.l"
+{
+ yylval.str = pstrdup(yytext);
+ return NUM;
+ }
+ YY_BREAK
+case 10:
+YY_RULE_SETUP
+#line 105 "syncrep_scanner.l"
+{
+ yylval.str = "*";
+ return NAME;
+ }
+ YY_BREAK
+case 11:
+YY_RULE_SETUP
+#line 110 "syncrep_scanner.l"
+{ return ','; }
+ YY_BREAK
+case 12:
+YY_RULE_SETUP
+#line 111 "syncrep_scanner.l"
+{ return '('; }
+ YY_BREAK
+case 13:
+YY_RULE_SETUP
+#line 112 "syncrep_scanner.l"
+{ return ')'; }
+ YY_BREAK
+case 14:
+YY_RULE_SETUP
+#line 114 "syncrep_scanner.l"
+{ return JUNK; }
+ YY_BREAK
+case 15:
+YY_RULE_SETUP
+#line 115 "syncrep_scanner.l"
+YY_FATAL_ERROR( "flex scanner jammed" );
+ YY_BREAK
+#line 1145 "syncrep_scanner.c"
+case YY_STATE_EOF(INITIAL):
+ yyterminate();
+
+ case YY_END_OF_BUFFER:
+ {
+ /* Amount of text matched not including the EOB char. */
+ int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1;
+
+ /* Undo the effects of YY_DO_BEFORE_ACTION. */
+ *yy_cp = (yy_hold_char);
+ YY_RESTORE_YY_MORE_OFFSET
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )
+ {
+ /* We're scanning a new file or input source. It's
+ * possible that this happened because the user
+ * just pointed yyin at a new source and called
+ * yylex(). If so, then we have to assure
+ * consistency between YY_CURRENT_BUFFER and our
+ * globals. Here is the right place to do so, because
+ * this is the first action (other than possibly a
+ * back-up) that will match for the new input source.
+ */
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ YY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;
+ }
+
+ /* Note that here we test for yy_c_buf_p "<=" to the position
+ * of the first EOB in the buffer, since yy_c_buf_p will
+ * already have been incremented past the NUL character
+ * (since all states make transitions on EOB to the
+ * end-of-buffer state). Contrast this with the test
+ * in input().
+ */
+ if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
+ { /* This was really a NUL. */
+ yy_state_type yy_next_state;
+
+ (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( );
+
+ /* Okay, we're now positioned to make the NUL
+ * transition. We couldn't have
+ * yy_get_previous_state() go ahead and do it
+ * for us because it doesn't know how to deal
+ * with the possibility of jamming (and we don't
+ * want to build jamming into it because then it
+ * will run more slowly).
+ */
+
+ yy_next_state = yy_try_NUL_trans( yy_current_state );
+
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+
+ if ( yy_next_state )
+ {
+ /* Consume the NUL. */
+ yy_cp = ++(yy_c_buf_p);
+ yy_current_state = yy_next_state;
+ goto yy_match;
+ }
+
+ else
+ {
+ yy_cp = (yy_last_accepting_cpos);
+ yy_current_state = (yy_last_accepting_state);
+ goto yy_find_action;
+ }
+ }
+
+ else switch ( yy_get_next_buffer( ) )
+ {
+ case EOB_ACT_END_OF_FILE:
+ {
+ (yy_did_buffer_switch_on_eof) = 0;
+
+ if ( yywrap( ) )
+ {
+ /* Note: because we've taken care in
+ * yy_get_next_buffer() to have set up
+ * yytext, we can now set up
+ * yy_c_buf_p so that if some total
+ * hoser (like flex itself) wants to
+ * call the scanner after we return the
+ * YY_NULL, it'll still work - another
+ * YY_NULL will get returned.
+ */
+ (yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ;
+
+ yy_act = YY_STATE_EOF(YY_START);
+ goto do_action;
+ }
+
+ else
+ {
+ if ( ! (yy_did_buffer_switch_on_eof) )
+ YY_NEW_FILE;
+ }
+ break;
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ (yy_c_buf_p) =
+ (yytext_ptr) + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( );
+
+ yy_cp = (yy_c_buf_p);
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+ goto yy_match;
+
+ case EOB_ACT_LAST_MATCH:
+ (yy_c_buf_p) =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)];
+
+ yy_current_state = yy_get_previous_state( );
+
+ yy_cp = (yy_c_buf_p);
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+ goto yy_find_action;
+ }
+ break;
+ }
+
+ default:
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--no action found" );
+ } /* end of action switch */
+ } /* end of scanning one token */
+ } /* end of user's declarations */
+} /* end of yylex */
+
+/* yy_get_next_buffer - try to read in a new buffer
+ *
+ * Returns a code representing an action:
+ * EOB_ACT_LAST_MATCH -
+ * EOB_ACT_CONTINUE_SCAN - continue scanning from current position
+ * EOB_ACT_END_OF_FILE - end of file
+ */
+static int yy_get_next_buffer (void)
+{
+ char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
+ char *source = (yytext_ptr);
+ int number_to_move, i;
+ int ret_val;
+
+ if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] )
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--end of buffer missed" );
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )
+ { /* Don't try to fill the buffer, so this is an EOF. */
+ if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 )
+ {
+ /* We matched a single character, the EOB, so
+ * treat this as a final EOF.
+ */
+ return EOB_ACT_END_OF_FILE;
+ }
+
+ else
+ {
+ /* We matched some text prior to the EOB, first
+ * process it.
+ */
+ return EOB_ACT_LAST_MATCH;
+ }
+ }
+
+ /* Try to read more data. */
+
+ /* First move last chars to start of buffer. */
+ number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr) - 1);
+
+ for ( i = 0; i < number_to_move; ++i )
+ *(dest++) = *(source++);
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )
+ /* don't do the read, it's not guaranteed to return an EOF,
+ * just force an EOF
+ */
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0;
+
+ else
+ {
+ int num_to_read =
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
+
+ while ( num_to_read <= 0 )
+ { /* Not enough room in the buffer - grow it. */
+
+ /* just a shorter name for the current buffer */
+ YY_BUFFER_STATE b = YY_CURRENT_BUFFER_LVALUE;
+
+ int yy_c_buf_p_offset =
+ (int) ((yy_c_buf_p) - b->yy_ch_buf);
+
+ if ( b->yy_is_our_buffer )
+ {
+ int new_size = b->yy_buf_size * 2;
+
+ if ( new_size <= 0 )
+ b->yy_buf_size += b->yy_buf_size / 8;
+ else
+ b->yy_buf_size *= 2;
+
+ b->yy_ch_buf = (char *)
+ /* Include room in for 2 EOB chars. */
+ yyrealloc( (void *) b->yy_ch_buf,
+ (yy_size_t) (b->yy_buf_size + 2) );
+ }
+ else
+ /* Can't grow it, we don't own it. */
+ b->yy_ch_buf = NULL;
+
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR(
+ "fatal error - scanner input buffer overflow" );
+
+ (yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset];
+
+ num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -
+ number_to_move - 1;
+
+ }
+
+ if ( num_to_read > YY_READ_BUF_SIZE )
+ num_to_read = YY_READ_BUF_SIZE;
+
+ /* Read in more data. */
+ YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
+ (yy_n_chars), num_to_read );
+
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ if ( (yy_n_chars) == 0 )
+ {
+ if ( number_to_move == YY_MORE_ADJ )
+ {
+ ret_val = EOB_ACT_END_OF_FILE;
+ yyrestart( yyin );
+ }
+
+ else
+ {
+ ret_val = EOB_ACT_LAST_MATCH;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status =
+ YY_BUFFER_EOF_PENDING;
+ }
+ }
+
+ else
+ ret_val = EOB_ACT_CONTINUE_SCAN;
+
+ if (((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {
+ /* Extend the array by 50%, plus the number we really need. */
+ int new_size = (yy_n_chars) + number_to_move + ((yy_n_chars) >> 1);
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) yyrealloc(
+ (void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf, (yy_size_t) new_size );
+ if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" );
+ /* "- 2" to take care of EOB's */
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size = (int) (new_size - 2);
+ }
+
+ (yy_n_chars) += number_to_move;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR;
+
+ (yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];
+
+ return ret_val;
+}
+
+/* yy_get_previous_state - get the state just before the EOB char was reached */
+
+ static yy_state_type yy_get_previous_state (void)
+{
+ yy_state_type yy_current_state;
+ char *yy_cp;
+
+ yy_current_state = (yy_start);
+
+ for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp )
+ {
+ YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1);
+ if ( yy_accept[yy_current_state] )
+ {
+ (yy_last_accepting_state) = yy_current_state;
+ (yy_last_accepting_cpos) = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 32 )
+ yy_c = yy_meta[yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c];
+ }
+
+ return yy_current_state;
+}
+
+/* yy_try_NUL_trans - try to make a transition on the NUL character
+ *
+ * synopsis
+ * next_state = yy_try_NUL_trans( current_state );
+ */
+ static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state )
+{
+ int yy_is_jam;
+ char *yy_cp = (yy_c_buf_p);
+
+ YY_CHAR yy_c = 1;
+ if ( yy_accept[yy_current_state] )
+ {
+ (yy_last_accepting_state) = yy_current_state;
+ (yy_last_accepting_cpos) = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 32 )
+ yy_c = yy_meta[yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c];
+ yy_is_jam = (yy_current_state == 31);
+
+ return yy_is_jam ? 0 : yy_current_state;
+}
+
+#ifndef YY_NO_UNPUT
+
+#endif
+
+#ifndef YY_NO_INPUT
+#ifdef __cplusplus
+ static int yyinput (void)
+#else
+ static int input (void)
+#endif
+
+{
+ int c;
+
+ *(yy_c_buf_p) = (yy_hold_char);
+
+ if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR )
+ {
+ /* yy_c_buf_p now points to the character we want to return.
+ * If this occurs *before* the EOB characters, then it's a
+ * valid NUL; if not, then we've hit the end of the buffer.
+ */
+ if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
+ /* This was really a NUL. */
+ *(yy_c_buf_p) = '\0';
+
+ else
+ { /* need more input */
+ int offset = (int) ((yy_c_buf_p) - (yytext_ptr));
+ ++(yy_c_buf_p);
+
+ switch ( yy_get_next_buffer( ) )
+ {
+ case EOB_ACT_LAST_MATCH:
+ /* This happens because yy_g_n_b()
+ * sees that we've accumulated a
+ * token and flags that we need to
+ * try matching the token before
+ * proceeding. But for input(),
+ * there's no matching to consider.
+ * So convert the EOB_ACT_LAST_MATCH
+ * to EOB_ACT_END_OF_FILE.
+ */
+
+ /* Reset buffer status. */
+ yyrestart( yyin );
+
+ /*FALLTHROUGH*/
+
+ case EOB_ACT_END_OF_FILE:
+ {
+ if ( yywrap( ) )
+ return 0;
+
+ if ( ! (yy_did_buffer_switch_on_eof) )
+ YY_NEW_FILE;
+#ifdef __cplusplus
+ return yyinput();
+#else
+ return input();
+#endif
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ (yy_c_buf_p) = (yytext_ptr) + offset;
+ break;
+ }
+ }
+ }
+
+ c = *(unsigned char *) (yy_c_buf_p); /* cast for 8-bit char's */
+ *(yy_c_buf_p) = '\0'; /* preserve yytext */
+ (yy_hold_char) = *++(yy_c_buf_p);
+
+ return c;
+}
+#endif /* ifndef YY_NO_INPUT */
+
+/** Immediately switch to a different input stream.
+ * @param input_file A readable stream.
+ *
+ * @note This function does not reset the start condition to @c INITIAL .
+ */
+ void yyrestart (FILE * input_file )
+{
+
+ if ( ! YY_CURRENT_BUFFER ){
+ yyensure_buffer_stack ();
+ YY_CURRENT_BUFFER_LVALUE =
+ yy_create_buffer( yyin, YY_BUF_SIZE );
+ }
+
+ yy_init_buffer( YY_CURRENT_BUFFER, input_file );
+ yy_load_buffer_state( );
+}
+
+/** Switch to a different input buffer.
+ * @param new_buffer The new input buffer.
+ *
+ */
+ void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer )
+{
+
+ /* TODO. We should be able to replace this entire function body
+ * with
+ * yypop_buffer_state();
+ * yypush_buffer_state(new_buffer);
+ */
+ yyensure_buffer_stack ();
+ if ( YY_CURRENT_BUFFER == new_buffer )
+ return;
+
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *(yy_c_buf_p) = (yy_hold_char);
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+ yy_load_buffer_state( );
+
+ /* We don't actually know whether we did this switch during
+ * EOF (yywrap()) processing, but the only time this flag
+ * is looked at is after yywrap() is called, so it's safe
+ * to go ahead and always set it.
+ */
+ (yy_did_buffer_switch_on_eof) = 1;
+}
+
+static void yy_load_buffer_state (void)
+{
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ (yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
+ yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
+ (yy_hold_char) = *(yy_c_buf_p);
+}
+
+/** Allocate and initialize an input buffer state.
+ * @param file A readable stream.
+ * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
+ *
+ * @return the allocated buffer state.
+ */
+ YY_BUFFER_STATE yy_create_buffer (FILE * file, int size )
+{
+ YY_BUFFER_STATE b;
+
+ b = (YY_BUFFER_STATE) yyalloc( sizeof( struct yy_buffer_state ) );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
+
+ b->yy_buf_size = size;
+
+ /* yy_ch_buf has to be 2 characters longer than the size given because
+ * we need to put in 2 end-of-buffer characters.
+ */
+ b->yy_ch_buf = (char *) yyalloc( (yy_size_t) (b->yy_buf_size + 2) );
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
+
+ b->yy_is_our_buffer = 1;
+
+ yy_init_buffer( b, file );
+
+ return b;
+}
+
+/** Destroy the buffer.
+ * @param b a buffer created with yy_create_buffer()
+ *
+ */
+ void yy_delete_buffer (YY_BUFFER_STATE b )
+{
+
+ if ( ! b )
+ return;
+
+ if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */
+ YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
+
+ if ( b->yy_is_our_buffer )
+ yyfree( (void *) b->yy_ch_buf );
+
+ yyfree( (void *) b );
+}
+
+/* Initializes or reinitializes a buffer.
+ * This function is sometimes called more than once on the same buffer,
+ * such as during a yyrestart() or at EOF.
+ */
+ static void yy_init_buffer (YY_BUFFER_STATE b, FILE * file )
+
+{
+ int oerrno = errno;
+
+ yy_flush_buffer( b );
+
+ b->yy_input_file = file;
+ b->yy_fill_buffer = 1;
+
+ /* If b is the current buffer, then yy_init_buffer was _probably_
+ * called from yyrestart() or through yy_get_next_buffer.
+ * In that case, we don't want to reset the lineno or column.
+ */
+ if (b != YY_CURRENT_BUFFER){
+ b->yy_bs_lineno = 1;
+ b->yy_bs_column = 0;
+ }
+
+ b->yy_is_interactive = 0;
+
+ errno = oerrno;
+}
+
+/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
+ * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
+ *
+ */
+ void yy_flush_buffer (YY_BUFFER_STATE b )
+{
+ if ( ! b )
+ return;
+
+ b->yy_n_chars = 0;
+
+ /* We always need two end-of-buffer characters. The first causes
+ * a transition to the end-of-buffer state. The second causes
+ * a jam in that state.
+ */
+ b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
+ b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
+
+ b->yy_buf_pos = &b->yy_ch_buf[0];
+
+ b->yy_at_bol = 1;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ if ( b == YY_CURRENT_BUFFER )
+ yy_load_buffer_state( );
+}
+
+/** Pushes the new state onto the stack. The new state becomes
+ * the current state. This function will allocate the stack
+ * if necessary.
+ * @param new_buffer The new state.
+ *
+ */
+void yypush_buffer_state (YY_BUFFER_STATE new_buffer )
+{
+ if (new_buffer == NULL)
+ return;
+
+ yyensure_buffer_stack();
+
+ /* This block is copied from yy_switch_to_buffer. */
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *(yy_c_buf_p) = (yy_hold_char);
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ /* Only push if top exists. Otherwise, replace top. */
+ if (YY_CURRENT_BUFFER)
+ (yy_buffer_stack_top)++;
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+
+ /* copied from yy_switch_to_buffer. */
+ yy_load_buffer_state( );
+ (yy_did_buffer_switch_on_eof) = 1;
+}
+
+/** Removes and deletes the top of the stack, if present.
+ * The next element becomes the new top.
+ *
+ */
+void yypop_buffer_state (void)
+{
+ if (!YY_CURRENT_BUFFER)
+ return;
+
+ yy_delete_buffer(YY_CURRENT_BUFFER );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ if ((yy_buffer_stack_top) > 0)
+ --(yy_buffer_stack_top);
+
+ if (YY_CURRENT_BUFFER) {
+ yy_load_buffer_state( );
+ (yy_did_buffer_switch_on_eof) = 1;
+ }
+}
+
+/* Allocates the stack if it does not exist.
+ * Guarantees space for at least one push.
+ */
+static void yyensure_buffer_stack (void)
+{
+ yy_size_t num_to_alloc;
+
+ if (!(yy_buffer_stack)) {
+
+ /* First allocation is just for 2 elements, since we don't know if this
+ * scanner will even need a stack. We use 2 instead of 1 to avoid an
+ * immediate realloc on the next call.
+ */
+ num_to_alloc = 1; /* After all that talk, this was set to 1 anyways... */
+ (yy_buffer_stack) = (struct yy_buffer_state**)yyalloc
+ (num_to_alloc * sizeof(struct yy_buffer_state*)
+ );
+ if ( ! (yy_buffer_stack) )
+ YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" );
+
+ memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*));
+
+ (yy_buffer_stack_max) = num_to_alloc;
+ (yy_buffer_stack_top) = 0;
+ return;
+ }
+
+ if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){
+
+ /* Increase the buffer to prepare for a possible push. */
+ yy_size_t grow_size = 8 /* arbitrary grow size */;
+
+ num_to_alloc = (yy_buffer_stack_max) + grow_size;
+ (yy_buffer_stack) = (struct yy_buffer_state**)yyrealloc
+ ((yy_buffer_stack),
+ num_to_alloc * sizeof(struct yy_buffer_state*)
+ );
+ if ( ! (yy_buffer_stack) )
+ YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" );
+
+ /* zero only the new slots.*/
+ memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*));
+ (yy_buffer_stack_max) = num_to_alloc;
+ }
+}
+
+/** Setup the input buffer state to scan directly from a user-specified character buffer.
+ * @param base the character buffer
+ * @param size the size in bytes of the character buffer
+ *
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE yy_scan_buffer (char * base, yy_size_t size )
+{
+ YY_BUFFER_STATE b;
+
+ if ( size < 2 ||
+ base[size-2] != YY_END_OF_BUFFER_CHAR ||
+ base[size-1] != YY_END_OF_BUFFER_CHAR )
+ /* They forgot to leave room for the EOB's. */
+ return NULL;
+
+ b = (YY_BUFFER_STATE) yyalloc( sizeof( struct yy_buffer_state ) );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_scan_buffer()" );
+
+ b->yy_buf_size = (int) (size - 2); /* "- 2" to take care of EOB's */
+ b->yy_buf_pos = b->yy_ch_buf = base;
+ b->yy_is_our_buffer = 0;
+ b->yy_input_file = NULL;
+ b->yy_n_chars = b->yy_buf_size;
+ b->yy_is_interactive = 0;
+ b->yy_at_bol = 1;
+ b->yy_fill_buffer = 0;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ yy_switch_to_buffer( b );
+
+ return b;
+}
+
+/** Setup the input buffer state to scan a string. The next call to yylex() will
+ * scan from a @e copy of @a str.
+ * @param yystr a NUL-terminated string to scan
+ *
+ * @return the newly allocated buffer state object.
+ * @note If you want to scan bytes that may contain NUL values, then use
+ * yy_scan_bytes() instead.
+ */
+YY_BUFFER_STATE yy_scan_string (const char * yystr )
+{
+
+ return yy_scan_bytes( yystr, (int) strlen(yystr) );
+}
+
+/** Setup the input buffer state to scan the given bytes. The next call to yylex() will
+ * scan from a @e copy of @a bytes.
+ * @param yybytes the byte buffer to scan
+ * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes.
+ *
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE yy_scan_bytes (const char * yybytes, int _yybytes_len )
+{
+ YY_BUFFER_STATE b;
+ char *buf;
+ yy_size_t n;
+ int i;
+
+ /* Get memory for full buffer, including space for trailing EOB's. */
+ n = (yy_size_t) (_yybytes_len + 2);
+ buf = (char *) yyalloc( n );
+ if ( ! buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" );
+
+ for ( i = 0; i < _yybytes_len; ++i )
+ buf[i] = yybytes[i];
+
+ buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;
+
+ b = yy_scan_buffer( buf, n );
+ if ( ! b )
+ YY_FATAL_ERROR( "bad buffer in yy_scan_bytes()" );
+
+ /* It's okay to grow etc. this buffer, and we should throw it
+ * away when we're done.
+ */
+ b->yy_is_our_buffer = 1;
+
+ return b;
+}
+
+#ifndef YY_EXIT_FAILURE
+#define YY_EXIT_FAILURE 2
+#endif
+
+static void yynoreturn yy_fatal_error (const char* msg )
+{
+ fprintf( stderr, "%s\n", msg );
+ exit( YY_EXIT_FAILURE );
+}
+
+/* Redefine yyless() so it works in section 3 code. */
+
+#undef yyless
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up yytext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ yytext[yyleng] = (yy_hold_char); \
+ (yy_c_buf_p) = yytext + yyless_macro_arg; \
+ (yy_hold_char) = *(yy_c_buf_p); \
+ *(yy_c_buf_p) = '\0'; \
+ yyleng = yyless_macro_arg; \
+ } \
+ while ( 0 )
+
+/* Accessor methods (get/set functions) to struct members. */
+
+/** Get the current line number.
+ *
+ */
+int yyget_lineno (void)
+{
+
+ return yylineno;
+}
+
+/** Get the input stream.
+ *
+ */
+FILE *yyget_in (void)
+{
+ return yyin;
+}
+
+/** Get the output stream.
+ *
+ */
+FILE *yyget_out (void)
+{
+ return yyout;
+}
+
+/** Get the length of the current token.
+ *
+ */
+int yyget_leng (void)
+{
+ return yyleng;
+}
+
+/** Get the current token.
+ *
+ */
+
+char *yyget_text (void)
+{
+ return yytext;
+}
+
+/** Set the current line number.
+ * @param _line_number line number
+ *
+ */
+void yyset_lineno (int _line_number )
+{
+
+ yylineno = _line_number;
+}
+
+/** Set the input stream. This does not discard the current
+ * input buffer.
+ * @param _in_str A readable stream.
+ *
+ * @see yy_switch_to_buffer
+ */
+void yyset_in (FILE * _in_str )
+{
+ yyin = _in_str ;
+}
+
+void yyset_out (FILE * _out_str )
+{
+ yyout = _out_str ;
+}
+
+int yyget_debug (void)
+{
+ return yy_flex_debug;
+}
+
+void yyset_debug (int _bdebug )
+{
+ yy_flex_debug = _bdebug ;
+}
+
+static int yy_init_globals (void)
+{
+ /* Initialization is the same as for the non-reentrant scanner.
+ * This function is called from yylex_destroy(), so don't allocate here.
+ */
+
+ (yy_buffer_stack) = NULL;
+ (yy_buffer_stack_top) = 0;
+ (yy_buffer_stack_max) = 0;
+ (yy_c_buf_p) = NULL;
+ (yy_init) = 0;
+ (yy_start) = 0;
+
+/* Defined in main.c */
+#ifdef YY_STDINIT
+ yyin = stdin;
+ yyout = stdout;
+#else
+ yyin = NULL;
+ yyout = NULL;
+#endif
+
+ /* For future reference: Set errno on error, since we are called by
+ * yylex_init()
+ */
+ return 0;
+}
+
+/* yylex_destroy is for both reentrant and non-reentrant scanners. */
+int yylex_destroy (void)
+{
+
+ /* Pop the buffer stack, destroying each element. */
+ while(YY_CURRENT_BUFFER){
+ yy_delete_buffer( YY_CURRENT_BUFFER );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ yypop_buffer_state();
+ }
+
+ /* Destroy the stack itself. */
+ yyfree((yy_buffer_stack) );
+ (yy_buffer_stack) = NULL;
+
+ /* Reset the globals. This is important in a non-reentrant scanner so the next time
+ * yylex() is called, initialization will occur. */
+ yy_init_globals( );
+
+ return 0;
+}
+
+/*
+ * Internal utility routines.
+ */
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy (char* s1, const char * s2, int n )
+{
+
+ int i;
+ for ( i = 0; i < n; ++i )
+ s1[i] = s2[i];
+}
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen (const char * s )
+{
+ int n;
+ for ( n = 0; s[n]; ++n )
+ ;
+
+ return n;
+}
+#endif
+
+void *yyalloc (yy_size_t size )
+{
+ return malloc(size);
+}
+
+void *yyrealloc (void * ptr, yy_size_t size )
+{
+
+ /* The cast to (char *) in the following accommodates both
+ * implementations that use char* generic pointers, and those
+ * that use void* generic pointers. It works with the latter
+ * because both ANSI C and C++ allow castless assignment from
+ * any pointer type to void*, and deal with argument conversions
+ * as though doing an assignment.
+ */
+ return realloc(ptr, size);
+}
+
+void yyfree (void * ptr )
+{
+ free( (char *) ptr ); /* see yyrealloc() for (char *) cast */
+}
+
+#define YYTABLES_NAME "yytables"
+
+#line 115 "syncrep_scanner.l"
+
+
+/* LCOV_EXCL_STOP */
+
+/* Needs to be here for access to yytext */
+void
+syncrep_yyerror(const char *message)
+{
+ /* report only the first error in a parse operation */
+ if (syncrep_parse_error_msg)
+ return;
+ if (yytext[0])
+ syncrep_parse_error_msg = psprintf("%s at or near \"%s\"",
+ message, yytext);
+ else
+ syncrep_parse_error_msg = psprintf("%s at end of input",
+ message);
+}
+
+void
+syncrep_scanner_init(const char *str)
+{
+ Size slen = strlen(str);
+ char *scanbuf;
+
+ /*
+ * Might be left over after ereport()
+ */
+ if (YY_CURRENT_BUFFER)
+ yy_delete_buffer(YY_CURRENT_BUFFER);
+
+ /*
+ * Make a scan buffer with special termination needed by flex.
+ */
+ scanbuf = (char *) palloc(slen + 2);
+ memcpy(scanbuf, str, slen);
+ scanbuf[slen] = scanbuf[slen + 1] = YY_END_OF_BUFFER_CHAR;
+ scanbufhandle = yy_scan_buffer(scanbuf, slen + 2);
+
+ /* Make sure we start in proper state */
+ BEGIN(INITIAL);
+}
+
+void
+syncrep_scanner_finish(void)
+{
+ yy_delete_buffer(scanbufhandle);
+ scanbufhandle = NULL;
+}
+
diff --git a/src/backend/replication/syncrep_scanner.l b/src/backend/replication/syncrep_scanner.l
new file mode 100644
index 0000000..6883f60
--- /dev/null
+++ b/src/backend/replication/syncrep_scanner.l
@@ -0,0 +1,163 @@
+%{
+/*-------------------------------------------------------------------------
+ *
+ * syncrep_scanner.l
+ * a lexical scanner for synchronous_standby_names
+ *
+ * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/replication/syncrep_scanner.l
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "lib/stringinfo.h"
+
+/* Avoid exit() on fatal scanner errors (a bit ugly -- see yy_fatal_error) */
+#undef fprintf
+#define fprintf(file, fmt, msg) fprintf_to_ereport(fmt, msg)
+
+static void
+fprintf_to_ereport(const char *fmt, const char *msg)
+{
+ ereport(ERROR, (errmsg_internal("%s", msg)));
+}
+
+/* Handles to the buffer that the lexer uses internally */
+static YY_BUFFER_STATE scanbufhandle;
+
+static StringInfoData xdbuf;
+
+/* LCOV_EXCL_START */
+
+%}
+
+%option 8bit
+%option never-interactive
+%option nodefault
+%option noinput
+%option nounput
+%option noyywrap
+%option warn
+%option prefix="syncrep_yy"
+
+/*
+ * <xd> delimited identifiers (double-quoted identifiers)
+ */
+%x xd
+
+space [ \t\n\r\f\v]
+
+digit [0-9]
+ident_start [A-Za-z\200-\377_]
+ident_cont [A-Za-z\200-\377_0-9\$]
+identifier {ident_start}{ident_cont}*
+
+dquote \"
+xdstart {dquote}
+xdstop {dquote}
+xddouble {dquote}{dquote}
+xdinside [^"]+
+
+%%
+{space}+ { /* ignore */ }
+
+ /* brute-force case insensitivity is safer than relying on flex -i */
+
+[Aa][Nn][Yy] { return ANY; }
+[Ff][Ii][Rr][Ss][Tt] { return FIRST; }
+
+{xdstart} {
+ initStringInfo(&xdbuf);
+ BEGIN(xd);
+ }
+<xd>{xddouble} {
+ appendStringInfoChar(&xdbuf, '"');
+ }
+<xd>{xdinside} {
+ appendStringInfoString(&xdbuf, yytext);
+ }
+<xd>{xdstop} {
+ yylval.str = xdbuf.data;
+ xdbuf.data = NULL;
+ BEGIN(INITIAL);
+ return NAME;
+ }
+<xd><<EOF>> {
+ yyerror("unterminated quoted identifier");
+ return JUNK;
+ }
+
+{identifier} {
+ yylval.str = pstrdup(yytext);
+ return NAME;
+ }
+
+{digit}+ {
+ yylval.str = pstrdup(yytext);
+ return NUM;
+ }
+
+"*" {
+ yylval.str = "*";
+ return NAME;
+ }
+
+"," { return ','; }
+"(" { return '('; }
+")" { return ')'; }
+
+. { return JUNK; }
+%%
+
+/* LCOV_EXCL_STOP */
+
+/* Needs to be here for access to yytext */
+void
+syncrep_yyerror(const char *message)
+{
+ /* report only the first error in a parse operation */
+ if (syncrep_parse_error_msg)
+ return;
+ if (yytext[0])
+ syncrep_parse_error_msg = psprintf("%s at or near \"%s\"",
+ message, yytext);
+ else
+ syncrep_parse_error_msg = psprintf("%s at end of input",
+ message);
+}
+
+void
+syncrep_scanner_init(const char *str)
+{
+ Size slen = strlen(str);
+ char *scanbuf;
+
+ /*
+ * Might be left over after ereport()
+ */
+ if (YY_CURRENT_BUFFER)
+ yy_delete_buffer(YY_CURRENT_BUFFER);
+
+ /*
+ * Make a scan buffer with special termination needed by flex.
+ */
+ scanbuf = (char *) palloc(slen + 2);
+ memcpy(scanbuf, str, slen);
+ scanbuf[slen] = scanbuf[slen + 1] = YY_END_OF_BUFFER_CHAR;
+ scanbufhandle = yy_scan_buffer(scanbuf, slen + 2);
+
+ /* Make sure we start in proper state */
+ BEGIN(INITIAL);
+}
+
+void
+syncrep_scanner_finish(void)
+{
+ yy_delete_buffer(scanbufhandle);
+ scanbufhandle = NULL;
+}
diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
new file mode 100644
index 0000000..d4f294f
--- /dev/null
+++ b/src/backend/replication/walreceiver.c
@@ -0,0 +1,1479 @@
+/*-------------------------------------------------------------------------
+ *
+ * walreceiver.c
+ *
+ * The WAL receiver process (walreceiver) is new as of Postgres 9.0. It
+ * is the process in the standby server that takes charge of receiving
+ * XLOG records from a primary server during streaming replication.
+ *
+ * When the startup process determines that it's time to start streaming,
+ * it instructs postmaster to start walreceiver. Walreceiver first connects
+ * to the primary server (it will be served by a walsender process
+ * in the primary server), and then keeps receiving XLOG records and
+ * writing them to the disk as long as the connection is alive. As XLOG
+ * records are received and flushed to disk, it updates the
+ * WalRcv->flushedUpto variable in shared memory, to inform the startup
+ * process of how far it can proceed with XLOG replay.
+ *
+ * A WAL receiver cannot directly load GUC parameters used when establishing
+ * its connection to the primary. Instead it relies on parameter values
+ * that are passed down by the startup process when streaming is requested.
+ * This applies, for example, to the replication slot and the connection
+ * string to be used for the connection with the primary.
+ *
+ * If the primary server ends streaming, but doesn't disconnect, walreceiver
+ * goes into "waiting" mode, and waits for the startup process to give new
+ * instructions. The startup process will treat that the same as
+ * disconnection, and will rescan the archive/pg_wal directory. But when the
+ * startup process wants to try streaming replication again, it will just
+ * nudge the existing walreceiver process that's waiting, instead of launching
+ * a new one.
+ *
+ * Normal termination is by SIGTERM, which instructs the walreceiver to
+ * exit(0). Emergency termination is by SIGQUIT; like any postmaster child
+ * process, the walreceiver will simply abort and exit on SIGQUIT. A close
+ * of the connection and a FATAL error are treated not as a crash but as
+ * normal operation.
+ *
+ * This file contains the server-facing parts of walreceiver. The libpq-
+ * specific parts are in the libpqwalreceiver module. It's loaded
+ * dynamically to avoid linking the server with libpq.
+ *
+ * Portions Copyright (c) 2010-2020, PostgreSQL Global Development Group
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/replication/walreceiver.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <unistd.h>
+
+#include "access/htup_details.h"
+#include "access/timeline.h"
+#include "access/transam.h"
+#include "access/xlog_internal.h"
+#include "access/xlogarchive.h"
+#include "catalog/pg_authid.h"
+#include "catalog/pg_type.h"
+#include "common/ip.h"
+#include "funcapi.h"
+#include "libpq/pqformat.h"
+#include "libpq/pqsignal.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "postmaster/interrupt.h"
+#include "replication/walreceiver.h"
+#include "replication/walsender.h"
+#include "storage/ipc.h"
+#include "storage/pmsignal.h"
+#include "storage/procarray.h"
+#include "storage/procsignal.h"
+#include "utils/acl.h"
+#include "utils/builtins.h"
+#include "utils/guc.h"
+#include "utils/pg_lsn.h"
+#include "utils/ps_status.h"
+#include "utils/resowner.h"
+#include "utils/timestamp.h"
+
+
+/*
+ * GUC variables. (Other variables that affect walreceiver are in xlog.c
+ * because they're passed down from the startup process, for better
+ * synchronization.)
+ */
+int wal_receiver_status_interval;
+int wal_receiver_timeout;
+bool hot_standby_feedback;
+
+/* libpqwalreceiver connection */
+static WalReceiverConn *wrconn = NULL;
+WalReceiverFunctionsType *WalReceiverFunctions = NULL;
+
+#define NAPTIME_PER_CYCLE 100 /* max sleep time between cycles (100ms) */
+
+/*
+ * These variables are used similarly to openLogFile/SegNo,
+ * but for walreceiver to write the XLOG. recvFileTLI is the TimeLineID
+ * corresponding the filename of recvFile.
+ */
+static int recvFile = -1;
+static TimeLineID recvFileTLI = 0;
+static XLogSegNo recvSegNo = 0;
+
+/*
+ * Flags set by interrupt handlers of walreceiver for later service in the
+ * main loop.
+ */
+static volatile sig_atomic_t got_SIGHUP = false;
+static volatile sig_atomic_t got_SIGTERM = false;
+
+/*
+ * LogstreamResult indicates the byte positions that we have already
+ * written/fsynced.
+ */
+static struct
+{
+ XLogRecPtr Write; /* last byte + 1 written out in the standby */
+ XLogRecPtr Flush; /* last byte + 1 flushed in the standby */
+} LogstreamResult;
+
+static StringInfoData reply_message;
+static StringInfoData incoming_message;
+
+/* Prototypes for private functions */
+static void WalRcvFetchTimeLineHistoryFiles(TimeLineID first, TimeLineID last);
+static void WalRcvWaitForStartPosition(XLogRecPtr *startpoint, TimeLineID *startpointTLI);
+static void WalRcvDie(int code, Datum arg);
+static void XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len);
+static void XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr);
+static void XLogWalRcvFlush(bool dying);
+static void XLogWalRcvSendReply(bool force, bool requestReply);
+static void XLogWalRcvSendHSFeedback(bool immed);
+static void ProcessWalSndrMessage(XLogRecPtr walEnd, TimestampTz sendTime);
+
+/* Signal handlers */
+static void WalRcvSigHupHandler(SIGNAL_ARGS);
+static void WalRcvShutdownHandler(SIGNAL_ARGS);
+
+
+/*
+ * Process any interrupts the walreceiver process may have received.
+ * This should be called any time the process's latch has become set.
+ *
+ * Currently, only SIGTERM is of interest. We can't just exit(1) within the
+ * SIGTERM signal handler, because the signal might arrive in the middle of
+ * some critical operation, like while we're holding a spinlock. Instead, the
+ * signal handler sets a flag variable as well as setting the process's latch.
+ * We must check the flag (by calling ProcessWalRcvInterrupts) anytime the
+ * latch has become set. Operations that could block for a long time, such as
+ * reading from a remote server, must pay attention to the latch too; see
+ * libpqrcv_PQgetResult for example.
+ */
+void
+ProcessWalRcvInterrupts(void)
+{
+ /*
+ * Although walreceiver interrupt handling doesn't use the same scheme as
+ * regular backends, call CHECK_FOR_INTERRUPTS() to make sure we receive
+ * any incoming signals on Win32, and also to make sure we process any
+ * barrier events.
+ */
+ CHECK_FOR_INTERRUPTS();
+
+ if (got_SIGTERM)
+ {
+ ereport(FATAL,
+ (errcode(ERRCODE_ADMIN_SHUTDOWN),
+ errmsg("terminating walreceiver process due to administrator command")));
+ }
+}
+
+
+/* Main entry point for walreceiver process */
+void
+WalReceiverMain(void)
+{
+ char conninfo[MAXCONNINFO];
+ char *tmp_conninfo;
+ char slotname[NAMEDATALEN];
+ bool is_temp_slot;
+ XLogRecPtr startpoint;
+ TimeLineID startpointTLI;
+ TimeLineID primaryTLI;
+ bool first_stream;
+ WalRcvData *walrcv = WalRcv;
+ TimestampTz last_recv_timestamp;
+ TimestampTz now;
+ bool ping_sent;
+ char *err;
+ char *sender_host = NULL;
+ int sender_port = 0;
+
+ /*
+ * WalRcv should be set up already (if we are a backend, we inherit this
+ * by fork() or EXEC_BACKEND mechanism from the postmaster).
+ */
+ Assert(walrcv != NULL);
+
+ now = GetCurrentTimestamp();
+
+ /*
+ * Mark walreceiver as running in shared memory.
+ *
+ * Do this as early as possible, so that if we fail later on, we'll set
+ * state to STOPPED. If we die before this, the startup process will keep
+ * waiting for us to start up, until it times out.
+ */
+ SpinLockAcquire(&walrcv->mutex);
+ Assert(walrcv->pid == 0);
+ switch (walrcv->walRcvState)
+ {
+ case WALRCV_STOPPING:
+ /* If we've already been requested to stop, don't start up. */
+ walrcv->walRcvState = WALRCV_STOPPED;
+ /* fall through */
+
+ case WALRCV_STOPPED:
+ SpinLockRelease(&walrcv->mutex);
+ proc_exit(1);
+ break;
+
+ case WALRCV_STARTING:
+ /* The usual case */
+ break;
+
+ case WALRCV_WAITING:
+ case WALRCV_STREAMING:
+ case WALRCV_RESTARTING:
+ default:
+ /* Shouldn't happen */
+ SpinLockRelease(&walrcv->mutex);
+ elog(PANIC, "walreceiver still running according to shared memory state");
+ }
+ /* Advertise our PID so that the startup process can kill us */
+ walrcv->pid = MyProcPid;
+ walrcv->walRcvState = WALRCV_STREAMING;
+
+ /* Fetch information required to start streaming */
+ walrcv->ready_to_display = false;
+ strlcpy(conninfo, (char *) walrcv->conninfo, MAXCONNINFO);
+ strlcpy(slotname, (char *) walrcv->slotname, NAMEDATALEN);
+ is_temp_slot = walrcv->is_temp_slot;
+ startpoint = walrcv->receiveStart;
+ startpointTLI = walrcv->receiveStartTLI;
+
+ /*
+ * At most one of is_temp_slot and slotname can be set; otherwise,
+ * RequestXLogStreaming messed up.
+ */
+ Assert(!is_temp_slot || (slotname[0] == '\0'));
+
+ /* Initialise to a sanish value */
+ walrcv->lastMsgSendTime =
+ walrcv->lastMsgReceiptTime = walrcv->latestWalEndTime = now;
+
+ /* Report the latch to use to awaken this process */
+ walrcv->latch = &MyProc->procLatch;
+
+ SpinLockRelease(&walrcv->mutex);
+
+ pg_atomic_write_u64(&WalRcv->writtenUpto, 0);
+
+ /* Arrange to clean up at walreceiver exit */
+ on_shmem_exit(WalRcvDie, 0);
+
+ /* Properly accept or ignore signals the postmaster might send us */
+ pqsignal(SIGHUP, WalRcvSigHupHandler); /* set flag to read config file */
+ pqsignal(SIGINT, SIG_IGN);
+ pqsignal(SIGTERM, WalRcvShutdownHandler); /* request shutdown */
+ pqsignal(SIGQUIT, SignalHandlerForCrashExit);
+ pqsignal(SIGALRM, SIG_IGN);
+ pqsignal(SIGPIPE, SIG_IGN);
+ pqsignal(SIGUSR1, procsignal_sigusr1_handler);
+ pqsignal(SIGUSR2, SIG_IGN);
+
+ /* Reset some signals that are accepted by postmaster but not here */
+ pqsignal(SIGCHLD, SIG_DFL);
+
+ /* We allow SIGQUIT (quickdie) at all times */
+ sigdelset(&BlockSig, SIGQUIT);
+
+ /* Load the libpq-specific functions */
+ load_file("libpqwalreceiver", false);
+ if (WalReceiverFunctions == NULL)
+ elog(ERROR, "libpqwalreceiver didn't initialize correctly");
+
+ /* Unblock signals (they were blocked when the postmaster forked us) */
+ PG_SETMASK(&UnBlockSig);
+
+ /* Establish the connection to the primary for XLOG streaming */
+ wrconn = walrcv_connect(conninfo, false, cluster_name[0] ? cluster_name : "walreceiver", &err);
+ if (!wrconn)
+ ereport(ERROR,
+ (errmsg("could not connect to the primary server: %s", err)));
+
+ /*
+ * Save user-visible connection string. This clobbers the original
+ * conninfo, for security. Also save host and port of the sender server
+ * this walreceiver is connected to.
+ */
+ tmp_conninfo = walrcv_get_conninfo(wrconn);
+ walrcv_get_senderinfo(wrconn, &sender_host, &sender_port);
+ SpinLockAcquire(&walrcv->mutex);
+ memset(walrcv->conninfo, 0, MAXCONNINFO);
+ if (tmp_conninfo)
+ strlcpy((char *) walrcv->conninfo, tmp_conninfo, MAXCONNINFO);
+
+ memset(walrcv->sender_host, 0, NI_MAXHOST);
+ if (sender_host)
+ strlcpy((char *) walrcv->sender_host, sender_host, NI_MAXHOST);
+
+ walrcv->sender_port = sender_port;
+ walrcv->ready_to_display = true;
+ SpinLockRelease(&walrcv->mutex);
+
+ if (tmp_conninfo)
+ pfree(tmp_conninfo);
+
+ if (sender_host)
+ pfree(sender_host);
+
+ first_stream = true;
+ for (;;)
+ {
+ char *primary_sysid;
+ char standby_sysid[32];
+ WalRcvStreamOptions options;
+
+ /*
+ * Check that we're connected to a valid server using the
+ * IDENTIFY_SYSTEM replication command.
+ */
+ primary_sysid = walrcv_identify_system(wrconn, &primaryTLI);
+
+ snprintf(standby_sysid, sizeof(standby_sysid), UINT64_FORMAT,
+ GetSystemIdentifier());
+ if (strcmp(primary_sysid, standby_sysid) != 0)
+ {
+ ereport(ERROR,
+ (errmsg("database system identifier differs between the primary and standby"),
+ errdetail("The primary's identifier is %s, the standby's identifier is %s.",
+ primary_sysid, standby_sysid)));
+ }
+
+ /*
+ * Confirm that the current timeline of the primary is the same or
+ * ahead of ours.
+ */
+ if (primaryTLI < startpointTLI)
+ ereport(ERROR,
+ (errmsg("highest timeline %u of the primary is behind recovery timeline %u",
+ primaryTLI, startpointTLI)));
+
+ /*
+ * Get any missing history files. We do this always, even when we're
+ * not interested in that timeline, so that if we're promoted to
+ * become the master later on, we don't select the same timeline that
+ * was already used in the current master. This isn't bullet-proof -
+ * you'll need some external software to manage your cluster if you
+ * need to ensure that a unique timeline id is chosen in every case,
+ * but let's avoid the confusion of timeline id collisions where we
+ * can.
+ */
+ WalRcvFetchTimeLineHistoryFiles(startpointTLI, primaryTLI);
+
+ /*
+ * Create temporary replication slot if requested, and update slot
+ * name in shared memory. (Note the slot name cannot already be set
+ * in this case.)
+ */
+ if (is_temp_slot)
+ {
+ snprintf(slotname, sizeof(slotname),
+ "pg_walreceiver_%lld",
+ (long long int) walrcv_get_backend_pid(wrconn));
+
+ walrcv_create_slot(wrconn, slotname, true, 0, NULL);
+
+ SpinLockAcquire(&walrcv->mutex);
+ strlcpy(walrcv->slotname, slotname, NAMEDATALEN);
+ SpinLockRelease(&walrcv->mutex);
+ }
+
+ /*
+ * Start streaming.
+ *
+ * We'll try to start at the requested starting point and timeline,
+ * even if it's different from the server's latest timeline. In case
+ * we've already reached the end of the old timeline, the server will
+ * finish the streaming immediately, and we will go back to await
+ * orders from the startup process. If recovery_target_timeline is
+ * 'latest', the startup process will scan pg_wal and find the new
+ * history file, bump recovery target timeline, and ask us to restart
+ * on the new timeline.
+ */
+ options.logical = false;
+ options.startpoint = startpoint;
+ options.slotname = slotname[0] != '\0' ? slotname : NULL;
+ options.proto.physical.startpointTLI = startpointTLI;
+ ThisTimeLineID = startpointTLI;
+ if (walrcv_startstreaming(wrconn, &options))
+ {
+ if (first_stream)
+ ereport(LOG,
+ (errmsg("started streaming WAL from primary at %X/%X on timeline %u",
+ (uint32) (startpoint >> 32), (uint32) startpoint,
+ startpointTLI)));
+ else
+ ereport(LOG,
+ (errmsg("restarted WAL streaming at %X/%X on timeline %u",
+ (uint32) (startpoint >> 32), (uint32) startpoint,
+ startpointTLI)));
+ first_stream = false;
+
+ /* Initialize LogstreamResult and buffers for processing messages */
+ LogstreamResult.Write = LogstreamResult.Flush = GetXLogReplayRecPtr(NULL);
+ initStringInfo(&reply_message);
+ initStringInfo(&incoming_message);
+
+ /* Initialize the last recv timestamp */
+ last_recv_timestamp = GetCurrentTimestamp();
+ ping_sent = false;
+
+ /* Loop until end-of-streaming or error */
+ for (;;)
+ {
+ char *buf;
+ int len;
+ bool endofwal = false;
+ pgsocket wait_fd = PGINVALID_SOCKET;
+ int rc;
+
+ /*
+ * Exit walreceiver if we're not in recovery. This should not
+ * happen, but cross-check the status here.
+ */
+ if (!RecoveryInProgress())
+ ereport(FATAL,
+ (errmsg("cannot continue WAL streaming, recovery has already ended")));
+
+ /* Process any requests or signals received recently */
+ ProcessWalRcvInterrupts();
+
+ if (got_SIGHUP)
+ {
+ got_SIGHUP = false;
+ ProcessConfigFile(PGC_SIGHUP);
+ XLogWalRcvSendHSFeedback(true);
+ }
+
+ /* See if we can read data immediately */
+ len = walrcv_receive(wrconn, &buf, &wait_fd);
+ if (len != 0)
+ {
+ /*
+ * Process the received data, and any subsequent data we
+ * can read without blocking.
+ */
+ for (;;)
+ {
+ if (len > 0)
+ {
+ /*
+ * Something was received from master, so reset
+ * timeout
+ */
+ last_recv_timestamp = GetCurrentTimestamp();
+ ping_sent = false;
+ XLogWalRcvProcessMsg(buf[0], &buf[1], len - 1);
+ }
+ else if (len == 0)
+ break;
+ else if (len < 0)
+ {
+ ereport(LOG,
+ (errmsg("replication terminated by primary server"),
+ errdetail("End of WAL reached on timeline %u at %X/%X.",
+ startpointTLI,
+ (uint32) (LogstreamResult.Write >> 32), (uint32) LogstreamResult.Write)));
+ endofwal = true;
+ break;
+ }
+ len = walrcv_receive(wrconn, &buf, &wait_fd);
+ }
+
+ /* Let the master know that we received some data. */
+ XLogWalRcvSendReply(false, false);
+
+ /*
+ * If we've written some records, flush them to disk and
+ * let the startup process and primary server know about
+ * them.
+ */
+ XLogWalRcvFlush(false);
+ }
+
+ /* Check if we need to exit the streaming loop. */
+ if (endofwal)
+ break;
+
+ /*
+ * Ideally we would reuse a WaitEventSet object repeatedly
+ * here to avoid the overheads of WaitLatchOrSocket on epoll
+ * systems, but we can't be sure that libpq (or any other
+ * walreceiver implementation) has the same socket (even if
+ * the fd is the same number, it may have been closed and
+ * reopened since the last time). In future, if there is a
+ * function for removing sockets from WaitEventSet, then we
+ * could add and remove just the socket each time, potentially
+ * avoiding some system calls.
+ */
+ Assert(wait_fd != PGINVALID_SOCKET);
+ rc = WaitLatchOrSocket(walrcv->latch,
+ WL_EXIT_ON_PM_DEATH | WL_SOCKET_READABLE |
+ WL_TIMEOUT | WL_LATCH_SET,
+ wait_fd,
+ NAPTIME_PER_CYCLE,
+ WAIT_EVENT_WAL_RECEIVER_MAIN);
+ if (rc & WL_LATCH_SET)
+ {
+ ResetLatch(walrcv->latch);
+ ProcessWalRcvInterrupts();
+
+ if (walrcv->force_reply)
+ {
+ /*
+ * The recovery process has asked us to send apply
+ * feedback now. Make sure the flag is really set to
+ * false in shared memory before sending the reply, so
+ * we don't miss a new request for a reply.
+ */
+ walrcv->force_reply = false;
+ pg_memory_barrier();
+ XLogWalRcvSendReply(true, false);
+ }
+ }
+ if (rc & WL_TIMEOUT)
+ {
+ /*
+ * We didn't receive anything new. If we haven't heard
+ * anything from the server for more than
+ * wal_receiver_timeout / 2, ping the server. Also, if
+ * it's been longer than wal_receiver_status_interval
+ * since the last update we sent, send a status update to
+ * the master anyway, to report any progress in applying
+ * WAL.
+ */
+ bool requestReply = false;
+
+ /*
+ * Check if time since last receive from standby has
+ * reached the configured limit.
+ */
+ if (wal_receiver_timeout > 0)
+ {
+ TimestampTz now = GetCurrentTimestamp();
+ TimestampTz timeout;
+
+ timeout =
+ TimestampTzPlusMilliseconds(last_recv_timestamp,
+ wal_receiver_timeout);
+
+ if (now >= timeout)
+ ereport(ERROR,
+ (errmsg("terminating walreceiver due to timeout")));
+
+ /*
+ * We didn't receive anything new, for half of
+ * receiver replication timeout. Ping the server.
+ */
+ if (!ping_sent)
+ {
+ timeout = TimestampTzPlusMilliseconds(last_recv_timestamp,
+ (wal_receiver_timeout / 2));
+ if (now >= timeout)
+ {
+ requestReply = true;
+ ping_sent = true;
+ }
+ }
+ }
+
+ XLogWalRcvSendReply(requestReply, requestReply);
+ XLogWalRcvSendHSFeedback(false);
+ }
+ }
+
+ /*
+ * The backend finished streaming. Exit streaming COPY-mode from
+ * our side, too.
+ */
+ walrcv_endstreaming(wrconn, &primaryTLI);
+
+ /*
+ * If the server had switched to a new timeline that we didn't
+ * know about when we began streaming, fetch its timeline history
+ * file now.
+ */
+ WalRcvFetchTimeLineHistoryFiles(startpointTLI, primaryTLI);
+ }
+ else
+ ereport(LOG,
+ (errmsg("primary server contains no more WAL on requested timeline %u",
+ startpointTLI)));
+
+ /*
+ * End of WAL reached on the requested timeline. Close the last
+ * segment, and await for new orders from the startup process.
+ */
+ if (recvFile >= 0)
+ {
+ char xlogfname[MAXFNAMELEN];
+
+ XLogWalRcvFlush(false);
+ XLogFileName(xlogfname, recvFileTLI, recvSegNo, wal_segment_size);
+ if (close(recvFile) != 0)
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not close log segment %s: %m",
+ xlogfname)));
+
+ /*
+ * Create .done file forcibly to prevent the streamed segment from
+ * being archived later.
+ */
+ if (XLogArchiveMode != ARCHIVE_MODE_ALWAYS)
+ XLogArchiveForceDone(xlogfname);
+ else
+ XLogArchiveNotify(xlogfname);
+ }
+ recvFile = -1;
+
+ elog(DEBUG1, "walreceiver ended streaming and awaits new instructions");
+ WalRcvWaitForStartPosition(&startpoint, &startpointTLI);
+ }
+ /* not reached */
+}
+
+/*
+ * Wait for startup process to set receiveStart and receiveStartTLI.
+ */
+static void
+WalRcvWaitForStartPosition(XLogRecPtr *startpoint, TimeLineID *startpointTLI)
+{
+ WalRcvData *walrcv = WalRcv;
+ int state;
+
+ SpinLockAcquire(&walrcv->mutex);
+ state = walrcv->walRcvState;
+ if (state != WALRCV_STREAMING)
+ {
+ SpinLockRelease(&walrcv->mutex);
+ if (state == WALRCV_STOPPING)
+ proc_exit(0);
+ else
+ elog(FATAL, "unexpected walreceiver state");
+ }
+ walrcv->walRcvState = WALRCV_WAITING;
+ walrcv->receiveStart = InvalidXLogRecPtr;
+ walrcv->receiveStartTLI = 0;
+ SpinLockRelease(&walrcv->mutex);
+
+ set_ps_display("idle");
+
+ /*
+ * nudge startup process to notice that we've stopped streaming and are
+ * now waiting for instructions.
+ */
+ WakeupRecovery();
+ for (;;)
+ {
+ ResetLatch(walrcv->latch);
+
+ ProcessWalRcvInterrupts();
+
+ SpinLockAcquire(&walrcv->mutex);
+ Assert(walrcv->walRcvState == WALRCV_RESTARTING ||
+ walrcv->walRcvState == WALRCV_WAITING ||
+ walrcv->walRcvState == WALRCV_STOPPING);
+ if (walrcv->walRcvState == WALRCV_RESTARTING)
+ {
+ /*
+ * No need to handle changes in primary_conninfo or
+ * primary_slotname here. Startup process will signal us to
+ * terminate in case those change.
+ */
+ *startpoint = walrcv->receiveStart;
+ *startpointTLI = walrcv->receiveStartTLI;
+ walrcv->walRcvState = WALRCV_STREAMING;
+ SpinLockRelease(&walrcv->mutex);
+ break;
+ }
+ if (walrcv->walRcvState == WALRCV_STOPPING)
+ {
+ /*
+ * We should've received SIGTERM if the startup process wants us
+ * to die, but might as well check it here too.
+ */
+ SpinLockRelease(&walrcv->mutex);
+ exit(1);
+ }
+ SpinLockRelease(&walrcv->mutex);
+
+ (void) WaitLatch(walrcv->latch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
+ WAIT_EVENT_WAL_RECEIVER_WAIT_START);
+ }
+
+ if (update_process_title)
+ {
+ char activitymsg[50];
+
+ snprintf(activitymsg, sizeof(activitymsg), "restarting at %X/%X",
+ (uint32) (*startpoint >> 32),
+ (uint32) *startpoint);
+ set_ps_display(activitymsg);
+ }
+}
+
+/*
+ * Fetch any missing timeline history files between 'first' and 'last'
+ * (inclusive) from the server.
+ */
+static void
+WalRcvFetchTimeLineHistoryFiles(TimeLineID first, TimeLineID last)
+{
+ TimeLineID tli;
+
+ for (tli = first; tli <= last; tli++)
+ {
+ /* there's no history file for timeline 1 */
+ if (tli != 1 && !existsTimeLineHistory(tli))
+ {
+ char *fname;
+ char *content;
+ int len;
+ char expectedfname[MAXFNAMELEN];
+
+ ereport(LOG,
+ (errmsg("fetching timeline history file for timeline %u from primary server",
+ tli)));
+
+ walrcv_readtimelinehistoryfile(wrconn, tli, &fname, &content, &len);
+
+ /*
+ * Check that the filename on the master matches what we
+ * calculated ourselves. This is just a sanity check, it should
+ * always match.
+ */
+ TLHistoryFileName(expectedfname, tli);
+ if (strcmp(fname, expectedfname) != 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg_internal("primary reported unexpected file name for timeline history file of timeline %u",
+ tli)));
+
+ /*
+ * Write the file to pg_wal.
+ */
+ writeTimeLineHistoryFile(tli, content, len);
+
+ /*
+ * Mark the streamed history file as ready for archiving
+ * if archive_mode is always.
+ */
+ if (XLogArchiveMode != ARCHIVE_MODE_ALWAYS)
+ XLogArchiveForceDone(fname);
+ else
+ XLogArchiveNotify(fname);
+
+ pfree(fname);
+ pfree(content);
+ }
+ }
+}
+
+/*
+ * Mark us as STOPPED in shared memory at exit.
+ */
+static void
+WalRcvDie(int code, Datum arg)
+{
+ WalRcvData *walrcv = WalRcv;
+
+ /* Ensure that all WAL records received are flushed to disk */
+ XLogWalRcvFlush(true);
+
+ /* Mark ourselves inactive in shared memory */
+ SpinLockAcquire(&walrcv->mutex);
+ Assert(walrcv->walRcvState == WALRCV_STREAMING ||
+ walrcv->walRcvState == WALRCV_RESTARTING ||
+ walrcv->walRcvState == WALRCV_STARTING ||
+ walrcv->walRcvState == WALRCV_WAITING ||
+ walrcv->walRcvState == WALRCV_STOPPING);
+ Assert(walrcv->pid == MyProcPid);
+ walrcv->walRcvState = WALRCV_STOPPED;
+ walrcv->pid = 0;
+ walrcv->ready_to_display = false;
+ walrcv->latch = NULL;
+ SpinLockRelease(&walrcv->mutex);
+
+ /* Terminate the connection gracefully. */
+ if (wrconn != NULL)
+ walrcv_disconnect(wrconn);
+
+ /* Wake up the startup process to notice promptly that we're gone */
+ WakeupRecovery();
+}
+
+/* SIGHUP: set flag to re-read config file at next convenient time */
+static void
+WalRcvSigHupHandler(SIGNAL_ARGS)
+{
+ got_SIGHUP = true;
+}
+
+
+/* SIGTERM: set flag for ProcessWalRcvInterrupts */
+static void
+WalRcvShutdownHandler(SIGNAL_ARGS)
+{
+ int save_errno = errno;
+
+ got_SIGTERM = true;
+
+ if (WalRcv->latch)
+ SetLatch(WalRcv->latch);
+
+ errno = save_errno;
+}
+
+/*
+ * Accept the message from XLOG stream, and process it.
+ */
+static void
+XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len)
+{
+ int hdrlen;
+ XLogRecPtr dataStart;
+ XLogRecPtr walEnd;
+ TimestampTz sendTime;
+ bool replyRequested;
+
+ resetStringInfo(&incoming_message);
+
+ switch (type)
+ {
+ case 'w': /* WAL records */
+ {
+ /* copy message to StringInfo */
+ hdrlen = sizeof(int64) + sizeof(int64) + sizeof(int64);
+ if (len < hdrlen)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg_internal("invalid WAL message received from primary")));
+ appendBinaryStringInfo(&incoming_message, buf, hdrlen);
+
+ /* read the fields */
+ dataStart = pq_getmsgint64(&incoming_message);
+ walEnd = pq_getmsgint64(&incoming_message);
+ sendTime = pq_getmsgint64(&incoming_message);
+ ProcessWalSndrMessage(walEnd, sendTime);
+
+ buf += hdrlen;
+ len -= hdrlen;
+ XLogWalRcvWrite(buf, len, dataStart);
+ break;
+ }
+ case 'k': /* Keepalive */
+ {
+ /* copy message to StringInfo */
+ hdrlen = sizeof(int64) + sizeof(int64) + sizeof(char);
+ if (len != hdrlen)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg_internal("invalid keepalive message received from primary")));
+ appendBinaryStringInfo(&incoming_message, buf, hdrlen);
+
+ /* read the fields */
+ walEnd = pq_getmsgint64(&incoming_message);
+ sendTime = pq_getmsgint64(&incoming_message);
+ replyRequested = pq_getmsgbyte(&incoming_message);
+
+ ProcessWalSndrMessage(walEnd, sendTime);
+
+ /* If the primary requested a reply, send one immediately */
+ if (replyRequested)
+ XLogWalRcvSendReply(true, false);
+ break;
+ }
+ default:
+ ereport(ERROR,
+ (errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg_internal("invalid replication message type %d",
+ type)));
+ }
+}
+
+/*
+ * Write XLOG data to disk.
+ */
+static void
+XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr)
+{
+ int startoff;
+ int byteswritten;
+
+ while (nbytes > 0)
+ {
+ int segbytes;
+
+ if (recvFile < 0 || !XLByteInSeg(recptr, recvSegNo, wal_segment_size))
+ {
+ bool use_existent;
+
+ /*
+ * fsync() and close current file before we switch to next one. We
+ * would otherwise have to reopen this file to fsync it later
+ */
+ if (recvFile >= 0)
+ {
+ char xlogfname[MAXFNAMELEN];
+
+ XLogWalRcvFlush(false);
+
+ XLogFileName(xlogfname, recvFileTLI, recvSegNo, wal_segment_size);
+
+ /*
+ * XLOG segment files will be re-read by recovery in startup
+ * process soon, so we don't advise the OS to release cache
+ * pages associated with the file like XLogFileClose() does.
+ */
+ if (close(recvFile) != 0)
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not close log segment %s: %m",
+ xlogfname)));
+
+ /*
+ * Create .done file forcibly to prevent the streamed segment
+ * from being archived later.
+ */
+ if (XLogArchiveMode != ARCHIVE_MODE_ALWAYS)
+ XLogArchiveForceDone(xlogfname);
+ else
+ XLogArchiveNotify(xlogfname);
+ }
+ recvFile = -1;
+
+ /* Create/use new log file */
+ XLByteToSeg(recptr, recvSegNo, wal_segment_size);
+ use_existent = true;
+ recvFile = XLogFileInit(recvSegNo, &use_existent, true);
+ recvFileTLI = ThisTimeLineID;
+ }
+
+ /* Calculate the start offset of the received logs */
+ startoff = XLogSegmentOffset(recptr, wal_segment_size);
+
+ if (startoff + nbytes > wal_segment_size)
+ segbytes = wal_segment_size - startoff;
+ else
+ segbytes = nbytes;
+
+ /* OK to write the logs */
+ errno = 0;
+
+ byteswritten = pg_pwrite(recvFile, buf, segbytes, (off_t) startoff);
+ if (byteswritten <= 0)
+ {
+ char xlogfname[MAXFNAMELEN];
+ int save_errno;
+
+ /* if write didn't set errno, assume no disk space */
+ if (errno == 0)
+ errno = ENOSPC;
+
+ save_errno = errno;
+ XLogFileName(xlogfname, recvFileTLI, recvSegNo, wal_segment_size);
+ errno = save_errno;
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not write to log segment %s "
+ "at offset %u, length %lu: %m",
+ xlogfname, startoff, (unsigned long) segbytes)));
+ }
+
+ /* Update state for write */
+ recptr += byteswritten;
+
+ nbytes -= byteswritten;
+ buf += byteswritten;
+
+ LogstreamResult.Write = recptr;
+ }
+
+ /* Update shared-memory status */
+ pg_atomic_write_u64(&WalRcv->writtenUpto, LogstreamResult.Write);
+}
+
+/*
+ * Flush the log to disk.
+ *
+ * If we're in the midst of dying, it's unwise to do anything that might throw
+ * an error, so we skip sending a reply in that case.
+ */
+static void
+XLogWalRcvFlush(bool dying)
+{
+ if (LogstreamResult.Flush < LogstreamResult.Write)
+ {
+ WalRcvData *walrcv = WalRcv;
+
+ issue_xlog_fsync(recvFile, recvSegNo);
+
+ LogstreamResult.Flush = LogstreamResult.Write;
+
+ /* Update shared-memory status */
+ SpinLockAcquire(&walrcv->mutex);
+ if (walrcv->flushedUpto < LogstreamResult.Flush)
+ {
+ walrcv->latestChunkStart = walrcv->flushedUpto;
+ walrcv->flushedUpto = LogstreamResult.Flush;
+ walrcv->receivedTLI = ThisTimeLineID;
+ }
+ SpinLockRelease(&walrcv->mutex);
+
+ /* Signal the startup process and walsender that new WAL has arrived */
+ WakeupRecovery();
+ if (AllowCascadeReplication())
+ WalSndWakeup();
+
+ /* Report XLOG streaming progress in PS display */
+ if (update_process_title)
+ {
+ char activitymsg[50];
+
+ snprintf(activitymsg, sizeof(activitymsg), "streaming %X/%X",
+ (uint32) (LogstreamResult.Write >> 32),
+ (uint32) LogstreamResult.Write);
+ set_ps_display(activitymsg);
+ }
+
+ /* Also let the master know that we made some progress */
+ if (!dying)
+ {
+ XLogWalRcvSendReply(false, false);
+ XLogWalRcvSendHSFeedback(false);
+ }
+ }
+}
+
+/*
+ * Send reply message to primary, indicating our current WAL locations, oldest
+ * xmin and the current time.
+ *
+ * If 'force' is not set, the message is only sent if enough time has
+ * passed since last status update to reach wal_receiver_status_interval.
+ * If wal_receiver_status_interval is disabled altogether and 'force' is
+ * false, this is a no-op.
+ *
+ * If 'requestReply' is true, requests the server to reply immediately upon
+ * receiving this message. This is used for heartbeats, when approaching
+ * wal_receiver_timeout.
+ */
+static void
+XLogWalRcvSendReply(bool force, bool requestReply)
+{
+ static XLogRecPtr writePtr = 0;
+ static XLogRecPtr flushPtr = 0;
+ XLogRecPtr applyPtr;
+ static TimestampTz sendTime = 0;
+ TimestampTz now;
+
+ /*
+ * If the user doesn't want status to be reported to the master, be sure
+ * to exit before doing anything at all.
+ */
+ if (!force && wal_receiver_status_interval <= 0)
+ return;
+
+ /* Get current timestamp. */
+ now = GetCurrentTimestamp();
+
+ /*
+ * We can compare the write and flush positions to the last message we
+ * sent without taking any lock, but the apply position requires a spin
+ * lock, so we don't check that unless something else has changed or 10
+ * seconds have passed. This means that the apply WAL location will
+ * appear, from the master's point of view, to lag slightly, but since
+ * this is only for reporting purposes and only on idle systems, that's
+ * probably OK.
+ */
+ if (!force
+ && writePtr == LogstreamResult.Write
+ && flushPtr == LogstreamResult.Flush
+ && !TimestampDifferenceExceeds(sendTime, now,
+ wal_receiver_status_interval * 1000))
+ return;
+ sendTime = now;
+
+ /* Construct a new message */
+ writePtr = LogstreamResult.Write;
+ flushPtr = LogstreamResult.Flush;
+ applyPtr = GetXLogReplayRecPtr(NULL);
+
+ resetStringInfo(&reply_message);
+ pq_sendbyte(&reply_message, 'r');
+ pq_sendint64(&reply_message, writePtr);
+ pq_sendint64(&reply_message, flushPtr);
+ pq_sendint64(&reply_message, applyPtr);
+ pq_sendint64(&reply_message, GetCurrentTimestamp());
+ pq_sendbyte(&reply_message, requestReply ? 1 : 0);
+
+ /* Send it */
+ elog(DEBUG2, "sending write %X/%X flush %X/%X apply %X/%X%s",
+ (uint32) (writePtr >> 32), (uint32) writePtr,
+ (uint32) (flushPtr >> 32), (uint32) flushPtr,
+ (uint32) (applyPtr >> 32), (uint32) applyPtr,
+ requestReply ? " (reply requested)" : "");
+
+ walrcv_send(wrconn, reply_message.data, reply_message.len);
+}
+
+/*
+ * Send hot standby feedback message to primary, plus the current time,
+ * in case they don't have a watch.
+ *
+ * If the user disables feedback, send one final message to tell sender
+ * to forget about the xmin on this standby. We also send this message
+ * on first connect because a previous connection might have set xmin
+ * on a replication slot. (If we're not using a slot it's harmless to
+ * send a feedback message explicitly setting InvalidTransactionId).
+ */
+static void
+XLogWalRcvSendHSFeedback(bool immed)
+{
+ TimestampTz now;
+ FullTransactionId nextFullXid;
+ TransactionId nextXid;
+ uint32 xmin_epoch,
+ catalog_xmin_epoch;
+ TransactionId xmin,
+ catalog_xmin;
+ static TimestampTz sendTime = 0;
+
+ /* initially true so we always send at least one feedback message */
+ static bool master_has_standby_xmin = true;
+
+ /*
+ * If the user doesn't want status to be reported to the master, be sure
+ * to exit before doing anything at all.
+ */
+ if ((wal_receiver_status_interval <= 0 || !hot_standby_feedback) &&
+ !master_has_standby_xmin)
+ return;
+
+ /* Get current timestamp. */
+ now = GetCurrentTimestamp();
+
+ if (!immed)
+ {
+ /*
+ * Send feedback at most once per wal_receiver_status_interval.
+ */
+ if (!TimestampDifferenceExceeds(sendTime, now,
+ wal_receiver_status_interval * 1000))
+ return;
+ sendTime = now;
+ }
+
+ /*
+ * If Hot Standby is not yet accepting connections there is nothing to
+ * send. Check this after the interval has expired to reduce number of
+ * calls.
+ *
+ * Bailing out here also ensures that we don't send feedback until we've
+ * read our own replication slot state, so we don't tell the master to
+ * discard needed xmin or catalog_xmin from any slots that may exist on
+ * this replica.
+ */
+ if (!HotStandbyActive())
+ return;
+
+ /*
+ * Make the expensive call to get the oldest xmin once we are certain
+ * everything else has been checked.
+ */
+ if (hot_standby_feedback)
+ {
+ TransactionId slot_xmin;
+
+ /*
+ * Usually GetOldestXmin() would include both global replication slot
+ * xmin and catalog_xmin in its calculations, but we want to derive
+ * separate values for each of those. So we ask for an xmin that
+ * excludes the catalog_xmin.
+ */
+ xmin = GetOldestXmin(NULL,
+ PROCARRAY_FLAGS_DEFAULT | PROCARRAY_SLOTS_XMIN);
+
+ ProcArrayGetReplicationSlotXmin(&slot_xmin, &catalog_xmin);
+
+ if (TransactionIdIsValid(slot_xmin) &&
+ TransactionIdPrecedes(slot_xmin, xmin))
+ xmin = slot_xmin;
+ }
+ else
+ {
+ xmin = InvalidTransactionId;
+ catalog_xmin = InvalidTransactionId;
+ }
+
+ /*
+ * Get epoch and adjust if nextXid and oldestXmin are different sides of
+ * the epoch boundary.
+ */
+ nextFullXid = ReadNextFullTransactionId();
+ nextXid = XidFromFullTransactionId(nextFullXid);
+ xmin_epoch = EpochFromFullTransactionId(nextFullXid);
+ catalog_xmin_epoch = xmin_epoch;
+ if (nextXid < xmin)
+ xmin_epoch--;
+ if (nextXid < catalog_xmin)
+ catalog_xmin_epoch--;
+
+ elog(DEBUG2, "sending hot standby feedback xmin %u epoch %u catalog_xmin %u catalog_xmin_epoch %u",
+ xmin, xmin_epoch, catalog_xmin, catalog_xmin_epoch);
+
+ /* Construct the message and send it. */
+ resetStringInfo(&reply_message);
+ pq_sendbyte(&reply_message, 'h');
+ pq_sendint64(&reply_message, GetCurrentTimestamp());
+ pq_sendint32(&reply_message, xmin);
+ pq_sendint32(&reply_message, xmin_epoch);
+ pq_sendint32(&reply_message, catalog_xmin);
+ pq_sendint32(&reply_message, catalog_xmin_epoch);
+ walrcv_send(wrconn, reply_message.data, reply_message.len);
+ if (TransactionIdIsValid(xmin) || TransactionIdIsValid(catalog_xmin))
+ master_has_standby_xmin = true;
+ else
+ master_has_standby_xmin = false;
+}
+
+/*
+ * Update shared memory status upon receiving a message from primary.
+ *
+ * 'walEnd' and 'sendTime' are the end-of-WAL and timestamp of the latest
+ * message, reported by primary.
+ */
+static void
+ProcessWalSndrMessage(XLogRecPtr walEnd, TimestampTz sendTime)
+{
+ WalRcvData *walrcv = WalRcv;
+
+ TimestampTz lastMsgReceiptTime = GetCurrentTimestamp();
+
+ /* Update shared-memory status */
+ SpinLockAcquire(&walrcv->mutex);
+ if (walrcv->latestWalEnd < walEnd)
+ walrcv->latestWalEndTime = sendTime;
+ walrcv->latestWalEnd = walEnd;
+ walrcv->lastMsgSendTime = sendTime;
+ walrcv->lastMsgReceiptTime = lastMsgReceiptTime;
+ SpinLockRelease(&walrcv->mutex);
+
+ if (log_min_messages <= DEBUG2)
+ {
+ char *sendtime;
+ char *receipttime;
+ int applyDelay;
+
+ /* Copy because timestamptz_to_str returns a static buffer */
+ sendtime = pstrdup(timestamptz_to_str(sendTime));
+ receipttime = pstrdup(timestamptz_to_str(lastMsgReceiptTime));
+ applyDelay = GetReplicationApplyDelay();
+
+ /* apply delay is not available */
+ if (applyDelay == -1)
+ elog(DEBUG2, "sendtime %s receipttime %s replication apply delay (N/A) transfer latency %d ms",
+ sendtime,
+ receipttime,
+ GetReplicationTransferLatency());
+ else
+ elog(DEBUG2, "sendtime %s receipttime %s replication apply delay %d ms transfer latency %d ms",
+ sendtime,
+ receipttime,
+ applyDelay,
+ GetReplicationTransferLatency());
+
+ pfree(sendtime);
+ pfree(receipttime);
+ }
+}
+
+/*
+ * Wake up the walreceiver main loop.
+ *
+ * This is called by the startup process whenever interesting xlog records
+ * are applied, so that walreceiver can check if it needs to send an apply
+ * notification back to the master which may be waiting in a COMMIT with
+ * synchronous_commit = remote_apply.
+ */
+void
+WalRcvForceReply(void)
+{
+ Latch *latch;
+
+ WalRcv->force_reply = true;
+ /* fetching the latch pointer might not be atomic, so use spinlock */
+ SpinLockAcquire(&WalRcv->mutex);
+ latch = WalRcv->latch;
+ SpinLockRelease(&WalRcv->mutex);
+ if (latch)
+ SetLatch(latch);
+}
+
+/*
+ * Return a string constant representing the state. This is used
+ * in system functions and views, and should *not* be translated.
+ */
+static const char *
+WalRcvGetStateString(WalRcvState state)
+{
+ switch (state)
+ {
+ case WALRCV_STOPPED:
+ return "stopped";
+ case WALRCV_STARTING:
+ return "starting";
+ case WALRCV_STREAMING:
+ return "streaming";
+ case WALRCV_WAITING:
+ return "waiting";
+ case WALRCV_RESTARTING:
+ return "restarting";
+ case WALRCV_STOPPING:
+ return "stopping";
+ }
+ return "UNKNOWN";
+}
+
+/*
+ * Returns activity of WAL receiver, including pid, state and xlog locations
+ * received from the WAL sender of another server.
+ */
+Datum
+pg_stat_get_wal_receiver(PG_FUNCTION_ARGS)
+{
+ TupleDesc tupdesc;
+ Datum *values;
+ bool *nulls;
+ int pid;
+ bool ready_to_display;
+ WalRcvState state;
+ XLogRecPtr receive_start_lsn;
+ TimeLineID receive_start_tli;
+ XLogRecPtr written_lsn;
+ XLogRecPtr flushed_lsn;
+ TimeLineID received_tli;
+ TimestampTz last_send_time;
+ TimestampTz last_receipt_time;
+ XLogRecPtr latest_end_lsn;
+ TimestampTz latest_end_time;
+ char sender_host[NI_MAXHOST];
+ int sender_port = 0;
+ char slotname[NAMEDATALEN];
+ char conninfo[MAXCONNINFO];
+
+ /* Take a lock to ensure value consistency */
+ SpinLockAcquire(&WalRcv->mutex);
+ pid = (int) WalRcv->pid;
+ ready_to_display = WalRcv->ready_to_display;
+ state = WalRcv->walRcvState;
+ receive_start_lsn = WalRcv->receiveStart;
+ receive_start_tli = WalRcv->receiveStartTLI;
+ flushed_lsn = WalRcv->flushedUpto;
+ received_tli = WalRcv->receivedTLI;
+ last_send_time = WalRcv->lastMsgSendTime;
+ last_receipt_time = WalRcv->lastMsgReceiptTime;
+ latest_end_lsn = WalRcv->latestWalEnd;
+ latest_end_time = WalRcv->latestWalEndTime;
+ strlcpy(slotname, (char *) WalRcv->slotname, sizeof(slotname));
+ strlcpy(sender_host, (char *) WalRcv->sender_host, sizeof(sender_host));
+ sender_port = WalRcv->sender_port;
+ strlcpy(conninfo, (char *) WalRcv->conninfo, sizeof(conninfo));
+ SpinLockRelease(&WalRcv->mutex);
+
+ /*
+ * No WAL receiver (or not ready yet), just return a tuple with NULL
+ * values
+ */
+ if (pid == 0 || !ready_to_display)
+ PG_RETURN_NULL();
+
+ /*
+ * Read "writtenUpto" without holding a spinlock. Note that it may not be
+ * consistent with the other shared variables of the WAL receiver
+ * protected by a spinlock, but this should not be used for data integrity
+ * checks.
+ */
+ written_lsn = pg_atomic_read_u64(&WalRcv->writtenUpto);
+
+ /* determine result type */
+ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ values = palloc0(sizeof(Datum) * tupdesc->natts);
+ nulls = palloc0(sizeof(bool) * tupdesc->natts);
+
+ /* Fetch values */
+ values[0] = Int32GetDatum(pid);
+
+ if (!is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS))
+ {
+ /*
+ * Only superusers and members of pg_read_all_stats can see details.
+ * Other users only get the pid value to know whether it is a WAL
+ * receiver, but no details.
+ */
+ MemSet(&nulls[1], true, sizeof(bool) * (tupdesc->natts - 1));
+ }
+ else
+ {
+ values[1] = CStringGetTextDatum(WalRcvGetStateString(state));
+
+ if (XLogRecPtrIsInvalid(receive_start_lsn))
+ nulls[2] = true;
+ else
+ values[2] = LSNGetDatum(receive_start_lsn);
+ values[3] = Int32GetDatum(receive_start_tli);
+ if (XLogRecPtrIsInvalid(written_lsn))
+ nulls[4] = true;
+ else
+ values[4] = LSNGetDatum(written_lsn);
+ if (XLogRecPtrIsInvalid(flushed_lsn))
+ nulls[5] = true;
+ else
+ values[5] = LSNGetDatum(flushed_lsn);
+ values[6] = Int32GetDatum(received_tli);
+ if (last_send_time == 0)
+ nulls[7] = true;
+ else
+ values[7] = TimestampTzGetDatum(last_send_time);
+ if (last_receipt_time == 0)
+ nulls[8] = true;
+ else
+ values[8] = TimestampTzGetDatum(last_receipt_time);
+ if (XLogRecPtrIsInvalid(latest_end_lsn))
+ nulls[9] = true;
+ else
+ values[9] = LSNGetDatum(latest_end_lsn);
+ if (latest_end_time == 0)
+ nulls[10] = true;
+ else
+ values[10] = TimestampTzGetDatum(latest_end_time);
+ if (*slotname == '\0')
+ nulls[11] = true;
+ else
+ values[11] = CStringGetTextDatum(slotname);
+ if (*sender_host == '\0')
+ nulls[12] = true;
+ else
+ values[12] = CStringGetTextDatum(sender_host);
+ if (sender_port == 0)
+ nulls[13] = true;
+ else
+ values[13] = Int32GetDatum(sender_port);
+ if (*conninfo == '\0')
+ nulls[14] = true;
+ else
+ values[14] = CStringGetTextDatum(conninfo);
+ }
+
+ /* Returns the record as Datum */
+ PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
+}
diff --git a/src/backend/replication/walreceiverfuncs.c b/src/backend/replication/walreceiverfuncs.c
new file mode 100644
index 0000000..3984228
--- /dev/null
+++ b/src/backend/replication/walreceiverfuncs.c
@@ -0,0 +1,392 @@
+/*-------------------------------------------------------------------------
+ *
+ * walreceiverfuncs.c
+ *
+ * This file contains functions used by the startup process to communicate
+ * with the walreceiver process. Functions implementing walreceiver itself
+ * are in walreceiver.c.
+ *
+ * Portions Copyright (c) 2010-2020, PostgreSQL Global Development Group
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/replication/walreceiverfuncs.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include "access/xlog_internal.h"
+#include "postmaster/startup.h"
+#include "replication/walreceiver.h"
+#include "storage/pmsignal.h"
+#include "storage/shmem.h"
+#include "utils/timestamp.h"
+
+WalRcvData *WalRcv = NULL;
+
+/*
+ * How long to wait for walreceiver to start up after requesting
+ * postmaster to launch it. In seconds.
+ */
+#define WALRCV_STARTUP_TIMEOUT 10
+
+/* Report shared memory space needed by WalRcvShmemInit */
+Size
+WalRcvShmemSize(void)
+{
+ Size size = 0;
+
+ size = add_size(size, sizeof(WalRcvData));
+
+ return size;
+}
+
+/* Allocate and initialize walreceiver-related shared memory */
+void
+WalRcvShmemInit(void)
+{
+ bool found;
+
+ WalRcv = (WalRcvData *)
+ ShmemInitStruct("Wal Receiver Ctl", WalRcvShmemSize(), &found);
+
+ if (!found)
+ {
+ /* First time through, so initialize */
+ MemSet(WalRcv, 0, WalRcvShmemSize());
+ WalRcv->walRcvState = WALRCV_STOPPED;
+ SpinLockInit(&WalRcv->mutex);
+ pg_atomic_init_u64(&WalRcv->writtenUpto, 0);
+ WalRcv->latch = NULL;
+ }
+}
+
+/* Is walreceiver running (or starting up)? */
+bool
+WalRcvRunning(void)
+{
+ WalRcvData *walrcv = WalRcv;
+ WalRcvState state;
+ pg_time_t startTime;
+
+ SpinLockAcquire(&walrcv->mutex);
+
+ state = walrcv->walRcvState;
+ startTime = walrcv->startTime;
+
+ SpinLockRelease(&walrcv->mutex);
+
+ /*
+ * If it has taken too long for walreceiver to start up, give up. Setting
+ * the state to STOPPED ensures that if walreceiver later does start up
+ * after all, it will see that it's not supposed to be running and die
+ * without doing anything.
+ */
+ if (state == WALRCV_STARTING)
+ {
+ pg_time_t now = (pg_time_t) time(NULL);
+
+ if ((now - startTime) > WALRCV_STARTUP_TIMEOUT)
+ {
+ SpinLockAcquire(&walrcv->mutex);
+
+ if (walrcv->walRcvState == WALRCV_STARTING)
+ state = walrcv->walRcvState = WALRCV_STOPPED;
+
+ SpinLockRelease(&walrcv->mutex);
+ }
+ }
+
+ if (state != WALRCV_STOPPED)
+ return true;
+ else
+ return false;
+}
+
+/*
+ * Is walreceiver running and streaming (or at least attempting to connect,
+ * or starting up)?
+ */
+bool
+WalRcvStreaming(void)
+{
+ WalRcvData *walrcv = WalRcv;
+ WalRcvState state;
+ pg_time_t startTime;
+
+ SpinLockAcquire(&walrcv->mutex);
+
+ state = walrcv->walRcvState;
+ startTime = walrcv->startTime;
+
+ SpinLockRelease(&walrcv->mutex);
+
+ /*
+ * If it has taken too long for walreceiver to start up, give up. Setting
+ * the state to STOPPED ensures that if walreceiver later does start up
+ * after all, it will see that it's not supposed to be running and die
+ * without doing anything.
+ */
+ if (state == WALRCV_STARTING)
+ {
+ pg_time_t now = (pg_time_t) time(NULL);
+
+ if ((now - startTime) > WALRCV_STARTUP_TIMEOUT)
+ {
+ SpinLockAcquire(&walrcv->mutex);
+
+ if (walrcv->walRcvState == WALRCV_STARTING)
+ state = walrcv->walRcvState = WALRCV_STOPPED;
+
+ SpinLockRelease(&walrcv->mutex);
+ }
+ }
+
+ if (state == WALRCV_STREAMING || state == WALRCV_STARTING ||
+ state == WALRCV_RESTARTING)
+ return true;
+ else
+ return false;
+}
+
+/*
+ * Stop walreceiver (if running) and wait for it to die.
+ * Executed by the Startup process.
+ */
+void
+ShutdownWalRcv(void)
+{
+ WalRcvData *walrcv = WalRcv;
+ pid_t walrcvpid = 0;
+
+ /*
+ * Request walreceiver to stop. Walreceiver will switch to WALRCV_STOPPED
+ * mode once it's finished, and will also request postmaster to not
+ * restart itself.
+ */
+ SpinLockAcquire(&walrcv->mutex);
+ switch (walrcv->walRcvState)
+ {
+ case WALRCV_STOPPED:
+ break;
+ case WALRCV_STARTING:
+ walrcv->walRcvState = WALRCV_STOPPED;
+ break;
+
+ case WALRCV_STREAMING:
+ case WALRCV_WAITING:
+ case WALRCV_RESTARTING:
+ walrcv->walRcvState = WALRCV_STOPPING;
+ /* fall through */
+ case WALRCV_STOPPING:
+ walrcvpid = walrcv->pid;
+ break;
+ }
+ SpinLockRelease(&walrcv->mutex);
+
+ /*
+ * Signal walreceiver process if it was still running.
+ */
+ if (walrcvpid != 0)
+ kill(walrcvpid, SIGTERM);
+
+ /*
+ * Wait for walreceiver to acknowledge its death by setting state to
+ * WALRCV_STOPPED.
+ */
+ while (WalRcvRunning())
+ {
+ /*
+ * This possibly-long loop needs to handle interrupts of startup
+ * process.
+ */
+ HandleStartupProcInterrupts();
+
+ pg_usleep(100000); /* 100ms */
+ }
+}
+
+/*
+ * Request postmaster to start walreceiver.
+ *
+ * "recptr" indicates the position where streaming should begin. "conninfo"
+ * is a libpq connection string to use. "slotname" is, optionally, the name
+ * of a replication slot to acquire. "create_temp_slot" indicates to create
+ * a temporary slot when no "slotname" is given.
+ *
+ * WAL receivers do not directly load GUC parameters used for the connection
+ * to the primary, and rely on the values passed down by the caller of this
+ * routine instead. Hence, the addition of any new parameters should happen
+ * through this code path.
+ */
+void
+RequestXLogStreaming(TimeLineID tli, XLogRecPtr recptr, const char *conninfo,
+ const char *slotname, bool create_temp_slot)
+{
+ WalRcvData *walrcv = WalRcv;
+ bool launch = false;
+ pg_time_t now = (pg_time_t) time(NULL);
+ Latch *latch;
+
+ /*
+ * We always start at the beginning of the segment. That prevents a broken
+ * segment (i.e., with no records in the first half of a segment) from
+ * being created by XLOG streaming, which might cause trouble later on if
+ * the segment is e.g archived.
+ */
+ if (XLogSegmentOffset(recptr, wal_segment_size) != 0)
+ recptr -= XLogSegmentOffset(recptr, wal_segment_size);
+
+ SpinLockAcquire(&walrcv->mutex);
+
+ /* It better be stopped if we try to restart it */
+ Assert(walrcv->walRcvState == WALRCV_STOPPED ||
+ walrcv->walRcvState == WALRCV_WAITING);
+
+ if (conninfo != NULL)
+ strlcpy((char *) walrcv->conninfo, conninfo, MAXCONNINFO);
+ else
+ walrcv->conninfo[0] = '\0';
+
+ /*
+ * Use configured replication slot if present, and ignore the value of
+ * create_temp_slot as the slot name should be persistent. Otherwise, use
+ * create_temp_slot to determine whether this WAL receiver should create a
+ * temporary slot by itself and use it, or not.
+ */
+ if (slotname != NULL && slotname[0] != '\0')
+ {
+ strlcpy((char *) walrcv->slotname, slotname, NAMEDATALEN);
+ walrcv->is_temp_slot = false;
+ }
+ else
+ {
+ walrcv->slotname[0] = '\0';
+ walrcv->is_temp_slot = create_temp_slot;
+ }
+
+ if (walrcv->walRcvState == WALRCV_STOPPED)
+ {
+ launch = true;
+ walrcv->walRcvState = WALRCV_STARTING;
+ }
+ else
+ walrcv->walRcvState = WALRCV_RESTARTING;
+ walrcv->startTime = now;
+
+ /*
+ * If this is the first startup of walreceiver (on this timeline),
+ * initialize flushedUpto and latestChunkStart to the starting point.
+ */
+ if (walrcv->receiveStart == 0 || walrcv->receivedTLI != tli)
+ {
+ walrcv->flushedUpto = recptr;
+ walrcv->receivedTLI = tli;
+ walrcv->latestChunkStart = recptr;
+ }
+ walrcv->receiveStart = recptr;
+ walrcv->receiveStartTLI = tli;
+
+ latch = walrcv->latch;
+
+ SpinLockRelease(&walrcv->mutex);
+
+ if (launch)
+ SendPostmasterSignal(PMSIGNAL_START_WALRECEIVER);
+ else if (latch)
+ SetLatch(latch);
+}
+
+/*
+ * Returns the last+1 byte position that walreceiver has flushed.
+ *
+ * Optionally, returns the previous chunk start, that is the first byte
+ * written in the most recent walreceiver flush cycle. Callers not
+ * interested in that value may pass NULL for latestChunkStart. Same for
+ * receiveTLI.
+ */
+XLogRecPtr
+GetWalRcvFlushRecPtr(XLogRecPtr *latestChunkStart, TimeLineID *receiveTLI)
+{
+ WalRcvData *walrcv = WalRcv;
+ XLogRecPtr recptr;
+
+ SpinLockAcquire(&walrcv->mutex);
+ recptr = walrcv->flushedUpto;
+ if (latestChunkStart)
+ *latestChunkStart = walrcv->latestChunkStart;
+ if (receiveTLI)
+ *receiveTLI = walrcv->receivedTLI;
+ SpinLockRelease(&walrcv->mutex);
+
+ return recptr;
+}
+
+/*
+ * Returns the last+1 byte position that walreceiver has written.
+ * This returns a recently written value without taking a lock.
+ */
+XLogRecPtr
+GetWalRcvWriteRecPtr(void)
+{
+ WalRcvData *walrcv = WalRcv;
+
+ return pg_atomic_read_u64(&walrcv->writtenUpto);
+}
+
+/*
+ * Returns the replication apply delay in ms or -1
+ * if the apply delay info is not available
+ */
+int
+GetReplicationApplyDelay(void)
+{
+ WalRcvData *walrcv = WalRcv;
+ XLogRecPtr receivePtr;
+ XLogRecPtr replayPtr;
+ TimestampTz chunkReplayStartTime;
+
+ SpinLockAcquire(&walrcv->mutex);
+ receivePtr = walrcv->flushedUpto;
+ SpinLockRelease(&walrcv->mutex);
+
+ replayPtr = GetXLogReplayRecPtr(NULL);
+
+ if (receivePtr == replayPtr)
+ return 0;
+
+ chunkReplayStartTime = GetCurrentChunkReplayStartTime();
+
+ if (chunkReplayStartTime == 0)
+ return -1;
+
+ return TimestampDifferenceMilliseconds(chunkReplayStartTime,
+ GetCurrentTimestamp());
+}
+
+/*
+ * Returns the network latency in ms, note that this includes any
+ * difference in clock settings between the servers, as well as timezone.
+ */
+int
+GetReplicationTransferLatency(void)
+{
+ WalRcvData *walrcv = WalRcv;
+ TimestampTz lastMsgSendTime;
+ TimestampTz lastMsgReceiptTime;
+
+ SpinLockAcquire(&walrcv->mutex);
+ lastMsgSendTime = walrcv->lastMsgSendTime;
+ lastMsgReceiptTime = walrcv->lastMsgReceiptTime;
+ SpinLockRelease(&walrcv->mutex);
+
+ return TimestampDifferenceMilliseconds(lastMsgSendTime,
+ lastMsgReceiptTime);
+}
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
new file mode 100644
index 0000000..975139a
--- /dev/null
+++ b/src/backend/replication/walsender.c
@@ -0,0 +1,3665 @@
+/*-------------------------------------------------------------------------
+ *
+ * walsender.c
+ *
+ * The WAL sender process (walsender) is new as of Postgres 9.0. It takes
+ * care of sending XLOG from the primary server to a single recipient.
+ * (Note that there can be more than one walsender process concurrently.)
+ * It is started by the postmaster when the walreceiver of a standby server
+ * connects to the primary server and requests XLOG streaming replication.
+ *
+ * A walsender is similar to a regular backend, ie. there is a one-to-one
+ * relationship between a connection and a walsender process, but instead
+ * of processing SQL queries, it understands a small set of special
+ * replication-mode commands. The START_REPLICATION command begins streaming
+ * WAL to the client. While streaming, the walsender keeps reading XLOG
+ * records from the disk and sends them to the standby server over the
+ * COPY protocol, until either side ends the replication by exiting COPY
+ * mode (or until the connection is closed).
+ *
+ * Normal termination is by SIGTERM, which instructs the walsender to
+ * close the connection and exit(0) at the next convenient moment. Emergency
+ * termination is by SIGQUIT; like any backend, the walsender will simply
+ * abort and exit on SIGQUIT. A close of the connection and a FATAL error
+ * are treated as not a crash but approximately normal termination;
+ * the walsender will exit quickly without sending any more XLOG records.
+ *
+ * If the server is shut down, checkpointer sends us
+ * PROCSIG_WALSND_INIT_STOPPING after all regular backends have exited. If
+ * the backend is idle or runs an SQL query this causes the backend to
+ * shutdown, if logical replication is in progress all existing WAL records
+ * are processed followed by a shutdown. Otherwise this causes the walsender
+ * to switch to the "stopping" state. In this state, the walsender will reject
+ * any further replication commands. The checkpointer begins the shutdown
+ * checkpoint once all walsenders are confirmed as stopping. When the shutdown
+ * checkpoint finishes, the postmaster sends us SIGUSR2. This instructs
+ * walsender to send any outstanding WAL, including the shutdown checkpoint
+ * record, wait for it to be replicated to the standby, and then exit.
+ *
+ *
+ * Portions Copyright (c) 2010-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/replication/walsender.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <signal.h>
+#include <unistd.h>
+
+#include "access/printtup.h"
+#include "access/timeline.h"
+#include "access/transam.h"
+#include "access/xact.h"
+#include "access/xlog_internal.h"
+#include "access/xlogreader.h"
+#include "access/xlogutils.h"
+#include "catalog/pg_authid.h"
+#include "catalog/pg_type.h"
+#include "commands/dbcommands.h"
+#include "commands/defrem.h"
+#include "funcapi.h"
+#include "libpq/libpq.h"
+#include "libpq/pqformat.h"
+#include "miscadmin.h"
+#include "nodes/replnodes.h"
+#include "pgstat.h"
+#include "postmaster/interrupt.h"
+#include "replication/basebackup.h"
+#include "replication/decode.h"
+#include "replication/logical.h"
+#include "replication/slot.h"
+#include "replication/snapbuild.h"
+#include "replication/syncrep.h"
+#include "replication/walreceiver.h"
+#include "replication/walsender.h"
+#include "replication/walsender_private.h"
+#include "storage/condition_variable.h"
+#include "storage/fd.h"
+#include "storage/ipc.h"
+#include "storage/pmsignal.h"
+#include "storage/proc.h"
+#include "storage/procarray.h"
+#include "tcop/dest.h"
+#include "tcop/tcopprot.h"
+#include "utils/acl.h"
+#include "utils/builtins.h"
+#include "utils/guc.h"
+#include "utils/memutils.h"
+#include "utils/pg_lsn.h"
+#include "utils/portal.h"
+#include "utils/ps_status.h"
+#include "utils/timeout.h"
+#include "utils/timestamp.h"
+
+/*
+ * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ.
+ *
+ * We don't have a good idea of what a good value would be; there's some
+ * overhead per message in both walsender and walreceiver, but on the other
+ * hand sending large batches makes walsender less responsive to signals
+ * because signals are checked only between messages. 128kB (with
+ * default 8k blocks) seems like a reasonable guess for now.
+ */
+#define MAX_SEND_SIZE (XLOG_BLCKSZ * 16)
+
+/* Array of WalSnds in shared memory */
+WalSndCtlData *WalSndCtl = NULL;
+
+/* My slot in the shared memory array */
+WalSnd *MyWalSnd = NULL;
+
+/* Global state */
+bool am_walsender = false; /* Am I a walsender process? */
+bool am_cascading_walsender = false; /* Am I cascading WAL to another
+ * standby? */
+bool am_db_walsender = false; /* Connected to a database? */
+
+/* User-settable parameters for walsender */
+int max_wal_senders = 0; /* the maximum number of concurrent
+ * walsenders */
+int wal_sender_timeout = 60 * 1000; /* maximum time to send one WAL
+ * data message */
+bool log_replication_commands = false;
+
+/*
+ * State for WalSndWakeupRequest
+ */
+bool wake_wal_senders = false;
+
+/*
+ * xlogreader used for replication. Note that a WAL sender doing physical
+ * replication does not need xlogreader to read WAL, but it needs one to
+ * keep a state of its work.
+ */
+static XLogReaderState *xlogreader = NULL;
+
+/*
+ * These variables keep track of the state of the timeline we're currently
+ * sending. sendTimeLine identifies the timeline. If sendTimeLineIsHistoric,
+ * the timeline is not the latest timeline on this server, and the server's
+ * history forked off from that timeline at sendTimeLineValidUpto.
+ */
+static TimeLineID sendTimeLine = 0;
+static TimeLineID sendTimeLineNextTLI = 0;
+static bool sendTimeLineIsHistoric = false;
+static XLogRecPtr sendTimeLineValidUpto = InvalidXLogRecPtr;
+
+/*
+ * How far have we sent WAL already? This is also advertised in
+ * MyWalSnd->sentPtr. (Actually, this is the next WAL location to send.)
+ */
+static XLogRecPtr sentPtr = InvalidXLogRecPtr;
+
+/* Buffers for constructing outgoing messages and processing reply messages. */
+static StringInfoData output_message;
+static StringInfoData reply_message;
+static StringInfoData tmpbuf;
+
+/* Timestamp of last ProcessRepliesIfAny(). */
+static TimestampTz last_processing = 0;
+
+/*
+ * Timestamp of last ProcessRepliesIfAny() that saw a reply from the
+ * standby. Set to 0 if wal_sender_timeout doesn't need to be active.
+ */
+static TimestampTz last_reply_timestamp = 0;
+
+/* Have we sent a heartbeat message asking for reply, since last reply? */
+static bool waiting_for_ping_response = false;
+
+/*
+ * While streaming WAL in Copy mode, streamingDoneSending is set to true
+ * after we have sent CopyDone. We should not send any more CopyData messages
+ * after that. streamingDoneReceiving is set to true when we receive CopyDone
+ * from the other end. When both become true, it's time to exit Copy mode.
+ */
+static bool streamingDoneSending;
+static bool streamingDoneReceiving;
+
+/* Are we there yet? */
+static bool WalSndCaughtUp = false;
+
+/* Flags set by signal handlers for later service in main loop */
+static volatile sig_atomic_t got_SIGUSR2 = false;
+static volatile sig_atomic_t got_STOPPING = false;
+
+/*
+ * This is set while we are streaming. When not set
+ * PROCSIG_WALSND_INIT_STOPPING signal will be handled like SIGTERM. When set,
+ * the main loop is responsible for checking got_STOPPING and terminating when
+ * it's set (after streaming any remaining WAL).
+ */
+static volatile sig_atomic_t replication_active = false;
+
+static LogicalDecodingContext *logical_decoding_ctx = NULL;
+
+/* A sample associating a WAL location with the time it was written. */
+typedef struct
+{
+ XLogRecPtr lsn;
+ TimestampTz time;
+} WalTimeSample;
+
+/* The size of our buffer of time samples. */
+#define LAG_TRACKER_BUFFER_SIZE 8192
+
+/* A mechanism for tracking replication lag. */
+typedef struct
+{
+ XLogRecPtr last_lsn;
+ WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE];
+ int write_head;
+ int read_heads[NUM_SYNC_REP_WAIT_MODE];
+ WalTimeSample last_read[NUM_SYNC_REP_WAIT_MODE];
+} LagTracker;
+
+static LagTracker *lag_tracker;
+
+/* Signal handlers */
+static void WalSndLastCycleHandler(SIGNAL_ARGS);
+
+/* Prototypes for private functions */
+typedef void (*WalSndSendDataCallback) (void);
+static void WalSndLoop(WalSndSendDataCallback send_data);
+static void InitWalSenderSlot(void);
+static void WalSndKill(int code, Datum arg);
+static void WalSndShutdown(void) pg_attribute_noreturn();
+static void XLogSendPhysical(void);
+static void XLogSendLogical(void);
+static void WalSndDone(WalSndSendDataCallback send_data);
+static XLogRecPtr GetStandbyFlushRecPtr(void);
+static void IdentifySystem(void);
+static void CreateReplicationSlot(CreateReplicationSlotCmd *cmd);
+static void DropReplicationSlot(DropReplicationSlotCmd *cmd);
+static void StartReplication(StartReplicationCmd *cmd);
+static void StartLogicalReplication(StartReplicationCmd *cmd);
+static void ProcessStandbyMessage(void);
+static void ProcessStandbyReplyMessage(void);
+static void ProcessStandbyHSFeedbackMessage(void);
+static void ProcessRepliesIfAny(void);
+static void WalSndKeepalive(bool requestReply);
+static void WalSndKeepaliveIfNecessary(void);
+static void WalSndCheckTimeOut(void);
+static long WalSndComputeSleeptime(TimestampTz now);
+static void WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write);
+static void WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write);
+static void WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid);
+static XLogRecPtr WalSndWaitForWal(XLogRecPtr loc);
+static void LagTrackerWrite(XLogRecPtr lsn, TimestampTz local_flush_time);
+static TimeOffset LagTrackerRead(int head, XLogRecPtr lsn, TimestampTz now);
+static bool TransactionIdInRecentPast(TransactionId xid, uint32 epoch);
+
+static void WalSndSegmentOpen(XLogReaderState *state, XLogSegNo nextSegNo,
+ TimeLineID *tli_p);
+
+
+/* Initialize walsender process before entering the main command loop */
+void
+InitWalSender(void)
+{
+ am_cascading_walsender = RecoveryInProgress();
+
+ /* Create a per-walsender data structure in shared memory */
+ InitWalSenderSlot();
+
+ /*
+ * We don't currently need any ResourceOwner in a walsender process, but
+ * if we did, we could call CreateAuxProcessResourceOwner here.
+ */
+
+ /*
+ * Let postmaster know that we're a WAL sender. Once we've declared us as
+ * a WAL sender process, postmaster will let us outlive the bgwriter and
+ * kill us last in the shutdown sequence, so we get a chance to stream all
+ * remaining WAL at shutdown, including the shutdown checkpoint. Note that
+ * there's no going back, and we mustn't write any WAL records after this.
+ */
+ MarkPostmasterChildWalSender();
+ SendPostmasterSignal(PMSIGNAL_ADVANCE_STATE_MACHINE);
+
+ /* Initialize empty timestamp buffer for lag tracking. */
+ lag_tracker = MemoryContextAllocZero(TopMemoryContext, sizeof(LagTracker));
+}
+
+/*
+ * Clean up after an error.
+ *
+ * WAL sender processes don't use transactions like regular backends do.
+ * This function does any cleanup required after an error in a WAL sender
+ * process, similar to what transaction abort does in a regular backend.
+ */
+void
+WalSndErrorCleanup(void)
+{
+ LWLockReleaseAll();
+ ConditionVariableCancelSleep();
+ pgstat_report_wait_end();
+
+ if (xlogreader != NULL && xlogreader->seg.ws_file >= 0)
+ wal_segment_close(xlogreader);
+
+ if (MyReplicationSlot != NULL)
+ ReplicationSlotRelease();
+
+ ReplicationSlotCleanup();
+
+ replication_active = false;
+
+ /*
+ * If there is a transaction in progress, it will clean up our
+ * ResourceOwner, but if a replication command set up a resource owner
+ * without a transaction, we've got to clean that up now.
+ */
+ if (!IsTransactionOrTransactionBlock())
+ WalSndResourceCleanup(false);
+
+ if (got_STOPPING || got_SIGUSR2)
+ proc_exit(0);
+
+ /* Revert back to startup state */
+ WalSndSetState(WALSNDSTATE_STARTUP);
+}
+
+/*
+ * Clean up any ResourceOwner we created.
+ */
+void
+WalSndResourceCleanup(bool isCommit)
+{
+ ResourceOwner resowner;
+
+ if (CurrentResourceOwner == NULL)
+ return;
+
+ /*
+ * Deleting CurrentResourceOwner is not allowed, so we must save a pointer
+ * in a local variable and clear it first.
+ */
+ resowner = CurrentResourceOwner;
+ CurrentResourceOwner = NULL;
+
+ /* Now we can release resources and delete it. */
+ ResourceOwnerRelease(resowner,
+ RESOURCE_RELEASE_BEFORE_LOCKS, isCommit, true);
+ ResourceOwnerRelease(resowner,
+ RESOURCE_RELEASE_LOCKS, isCommit, true);
+ ResourceOwnerRelease(resowner,
+ RESOURCE_RELEASE_AFTER_LOCKS, isCommit, true);
+ ResourceOwnerDelete(resowner);
+}
+
+/*
+ * Handle a client's connection abort in an orderly manner.
+ */
+static void
+WalSndShutdown(void)
+{
+ /*
+ * Reset whereToSendOutput to prevent ereport from attempting to send any
+ * more messages to the standby.
+ */
+ if (whereToSendOutput == DestRemote)
+ whereToSendOutput = DestNone;
+
+ proc_exit(0);
+ abort(); /* keep the compiler quiet */
+}
+
+/*
+ * Handle the IDENTIFY_SYSTEM command.
+ */
+static void
+IdentifySystem(void)
+{
+ char sysid[32];
+ char xloc[MAXFNAMELEN];
+ XLogRecPtr logptr;
+ char *dbname = NULL;
+ DestReceiver *dest;
+ TupOutputState *tstate;
+ TupleDesc tupdesc;
+ Datum values[4];
+ bool nulls[4];
+
+ /*
+ * Reply with a result set with one row, four columns. First col is system
+ * ID, second is timeline ID, third is current xlog location and the
+ * fourth contains the database name if we are connected to one.
+ */
+
+ snprintf(sysid, sizeof(sysid), UINT64_FORMAT,
+ GetSystemIdentifier());
+
+ am_cascading_walsender = RecoveryInProgress();
+ if (am_cascading_walsender)
+ {
+ /* this also updates ThisTimeLineID */
+ logptr = GetStandbyFlushRecPtr();
+ }
+ else
+ logptr = GetFlushRecPtr();
+
+ snprintf(xloc, sizeof(xloc), "%X/%X", (uint32) (logptr >> 32), (uint32) logptr);
+
+ if (MyDatabaseId != InvalidOid)
+ {
+ MemoryContext cur = CurrentMemoryContext;
+
+ /* syscache access needs a transaction env. */
+ StartTransactionCommand();
+ /* make dbname live outside TX context */
+ MemoryContextSwitchTo(cur);
+ dbname = get_database_name(MyDatabaseId);
+ CommitTransactionCommand();
+ /* CommitTransactionCommand switches to TopMemoryContext */
+ MemoryContextSwitchTo(cur);
+ }
+
+ dest = CreateDestReceiver(DestRemoteSimple);
+ MemSet(nulls, false, sizeof(nulls));
+
+ /* need a tuple descriptor representing four columns */
+ tupdesc = CreateTemplateTupleDesc(4);
+ TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "systemid",
+ TEXTOID, -1, 0);
+ TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "timeline",
+ INT4OID, -1, 0);
+ TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 3, "xlogpos",
+ TEXTOID, -1, 0);
+ TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 4, "dbname",
+ TEXTOID, -1, 0);
+
+ /* prepare for projection of tuples */
+ tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
+
+ /* column 1: system identifier */
+ values[0] = CStringGetTextDatum(sysid);
+
+ /* column 2: timeline */
+ values[1] = Int32GetDatum(ThisTimeLineID);
+
+ /* column 3: wal location */
+ values[2] = CStringGetTextDatum(xloc);
+
+ /* column 4: database name, or NULL if none */
+ if (dbname)
+ values[3] = CStringGetTextDatum(dbname);
+ else
+ nulls[3] = true;
+
+ /* send it to dest */
+ do_tup_output(tstate, values, nulls);
+
+ end_tup_output(tstate);
+}
+
+
+/*
+ * Handle TIMELINE_HISTORY command.
+ */
+static void
+SendTimeLineHistory(TimeLineHistoryCmd *cmd)
+{
+ StringInfoData buf;
+ char histfname[MAXFNAMELEN];
+ char path[MAXPGPATH];
+ int fd;
+ off_t histfilelen;
+ off_t bytesleft;
+ Size len;
+
+ /*
+ * Reply with a result set with one row, and two columns. The first col is
+ * the name of the history file, 2nd is the contents.
+ */
+
+ TLHistoryFileName(histfname, cmd->timeline);
+ TLHistoryFilePath(path, cmd->timeline);
+
+ /* Send a RowDescription message */
+ pq_beginmessage(&buf, 'T');
+ pq_sendint16(&buf, 2); /* 2 fields */
+
+ /* first field */
+ pq_sendstring(&buf, "filename"); /* col name */
+ pq_sendint32(&buf, 0); /* table oid */
+ pq_sendint16(&buf, 0); /* attnum */
+ pq_sendint32(&buf, TEXTOID); /* type oid */
+ pq_sendint16(&buf, -1); /* typlen */
+ pq_sendint32(&buf, 0); /* typmod */
+ pq_sendint16(&buf, 0); /* format code */
+
+ /* second field */
+ pq_sendstring(&buf, "content"); /* col name */
+ pq_sendint32(&buf, 0); /* table oid */
+ pq_sendint16(&buf, 0); /* attnum */
+ /*
+ * While this is labeled as BYTEAOID, it is the same output format
+ * as TEXTOID above.
+ */
+ pq_sendint32(&buf, BYTEAOID); /* type oid */
+ pq_sendint16(&buf, -1); /* typlen */
+ pq_sendint32(&buf, 0); /* typmod */
+ pq_sendint16(&buf, 0); /* format code */
+ pq_endmessage(&buf);
+
+ /* Send a DataRow message */
+ pq_beginmessage(&buf, 'D');
+ pq_sendint16(&buf, 2); /* # of columns */
+ len = strlen(histfname);
+ pq_sendint32(&buf, len); /* col1 len */
+ pq_sendbytes(&buf, histfname, len);
+
+ fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
+ if (fd < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\": %m", path)));
+
+ /* Determine file length and send it to client */
+ histfilelen = lseek(fd, 0, SEEK_END);
+ if (histfilelen < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not seek to end of file \"%s\": %m", path)));
+ if (lseek(fd, 0, SEEK_SET) != 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not seek to beginning of file \"%s\": %m", path)));
+
+ pq_sendint32(&buf, histfilelen); /* col2 len */
+
+ bytesleft = histfilelen;
+ while (bytesleft > 0)
+ {
+ PGAlignedBlock rbuf;
+ int nread;
+
+ pgstat_report_wait_start(WAIT_EVENT_WALSENDER_TIMELINE_HISTORY_READ);
+ nread = read(fd, rbuf.data, sizeof(rbuf));
+ pgstat_report_wait_end();
+ if (nread < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": %m",
+ path)));
+ else if (nread == 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("could not read file \"%s\": read %d of %zu",
+ path, nread, (Size) bytesleft)));
+
+ pq_sendbytes(&buf, rbuf.data, nread);
+ bytesleft -= nread;
+ }
+
+ if (CloseTransientFile(fd) != 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not close file \"%s\": %m", path)));
+
+ pq_endmessage(&buf);
+}
+
+/*
+ * Handle START_REPLICATION command.
+ *
+ * At the moment, this never returns, but an ereport(ERROR) will take us back
+ * to the main loop.
+ */
+static void
+StartReplication(StartReplicationCmd *cmd)
+{
+ StringInfoData buf;
+ XLogRecPtr FlushPtr;
+
+ if (ThisTimeLineID == 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("IDENTIFY_SYSTEM has not been run before START_REPLICATION")));
+
+ /* create xlogreader for physical replication */
+ xlogreader =
+ XLogReaderAllocate(wal_segment_size, NULL,
+ XL_ROUTINE(.segment_open = WalSndSegmentOpen,
+ .segment_close = wal_segment_close),
+ NULL);
+
+ if (!xlogreader)
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
+
+ /*
+ * We assume here that we're logging enough information in the WAL for
+ * log-shipping, since this is checked in PostmasterMain().
+ *
+ * NOTE: wal_level can only change at shutdown, so in most cases it is
+ * difficult for there to be WAL data that we can still see that was
+ * written at wal_level='minimal'.
+ */
+
+ if (cmd->slotname)
+ {
+ (void) ReplicationSlotAcquire(cmd->slotname, SAB_Error);
+ if (SlotIsLogical(MyReplicationSlot))
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot use a logical replication slot for physical replication")));
+
+ /*
+ * We don't need to verify the slot's restart_lsn here; instead we
+ * rely on the caller requesting the starting point to use. If the
+ * WAL segment doesn't exist, we'll fail later.
+ */
+ }
+
+ /*
+ * Select the timeline. If it was given explicitly by the client, use
+ * that. Otherwise use the timeline of the last replayed record, which is
+ * kept in ThisTimeLineID.
+ */
+ if (am_cascading_walsender)
+ {
+ /* this also updates ThisTimeLineID */
+ FlushPtr = GetStandbyFlushRecPtr();
+ }
+ else
+ FlushPtr = GetFlushRecPtr();
+
+ if (cmd->timeline != 0)
+ {
+ XLogRecPtr switchpoint;
+
+ sendTimeLine = cmd->timeline;
+ if (sendTimeLine == ThisTimeLineID)
+ {
+ sendTimeLineIsHistoric = false;
+ sendTimeLineValidUpto = InvalidXLogRecPtr;
+ }
+ else
+ {
+ List *timeLineHistory;
+
+ sendTimeLineIsHistoric = true;
+
+ /*
+ * Check that the timeline the client requested exists, and the
+ * requested start location is on that timeline.
+ */
+ timeLineHistory = readTimeLineHistory(ThisTimeLineID);
+ switchpoint = tliSwitchPoint(cmd->timeline, timeLineHistory,
+ &sendTimeLineNextTLI);
+ list_free_deep(timeLineHistory);
+
+ /*
+ * Found the requested timeline in the history. Check that
+ * requested startpoint is on that timeline in our history.
+ *
+ * This is quite loose on purpose. We only check that we didn't
+ * fork off the requested timeline before the switchpoint. We
+ * don't check that we switched *to* it before the requested
+ * starting point. This is because the client can legitimately
+ * request to start replication from the beginning of the WAL
+ * segment that contains switchpoint, but on the new timeline, so
+ * that it doesn't end up with a partial segment. If you ask for
+ * too old a starting point, you'll get an error later when we
+ * fail to find the requested WAL segment in pg_wal.
+ *
+ * XXX: we could be more strict here and only allow a startpoint
+ * that's older than the switchpoint, if it's still in the same
+ * WAL segment.
+ */
+ if (!XLogRecPtrIsInvalid(switchpoint) &&
+ switchpoint < cmd->startpoint)
+ {
+ ereport(ERROR,
+ (errmsg("requested starting point %X/%X on timeline %u is not in this server's history",
+ (uint32) (cmd->startpoint >> 32),
+ (uint32) (cmd->startpoint),
+ cmd->timeline),
+ errdetail("This server's history forked from timeline %u at %X/%X.",
+ cmd->timeline,
+ (uint32) (switchpoint >> 32),
+ (uint32) (switchpoint))));
+ }
+ sendTimeLineValidUpto = switchpoint;
+ }
+ }
+ else
+ {
+ sendTimeLine = ThisTimeLineID;
+ sendTimeLineValidUpto = InvalidXLogRecPtr;
+ sendTimeLineIsHistoric = false;
+ }
+
+ streamingDoneSending = streamingDoneReceiving = false;
+
+ /* If there is nothing to stream, don't even enter COPY mode */
+ if (!sendTimeLineIsHistoric || cmd->startpoint < sendTimeLineValidUpto)
+ {
+ /*
+ * When we first start replication the standby will be behind the
+ * primary. For some applications, for example synchronous
+ * replication, it is important to have a clear state for this initial
+ * catchup mode, so we can trigger actions when we change streaming
+ * state later. We may stay in this state for a long time, which is
+ * exactly why we want to be able to monitor whether or not we are
+ * still here.
+ */
+ WalSndSetState(WALSNDSTATE_CATCHUP);
+
+ /* Send a CopyBothResponse message, and start streaming */
+ pq_beginmessage(&buf, 'W');
+ pq_sendbyte(&buf, 0);
+ pq_sendint16(&buf, 0);
+ pq_endmessage(&buf);
+ pq_flush();
+
+ /*
+ * Don't allow a request to stream from a future point in WAL that
+ * hasn't been flushed to disk in this server yet.
+ */
+ if (FlushPtr < cmd->startpoint)
+ {
+ ereport(ERROR,
+ (errmsg("requested starting point %X/%X is ahead of the WAL flush position of this server %X/%X",
+ (uint32) (cmd->startpoint >> 32),
+ (uint32) (cmd->startpoint),
+ (uint32) (FlushPtr >> 32),
+ (uint32) (FlushPtr))));
+ }
+
+ /* Start streaming from the requested point */
+ sentPtr = cmd->startpoint;
+
+ /* Initialize shared memory status, too */
+ SpinLockAcquire(&MyWalSnd->mutex);
+ MyWalSnd->sentPtr = sentPtr;
+ SpinLockRelease(&MyWalSnd->mutex);
+
+ SyncRepInitConfig();
+
+ /* Main loop of walsender */
+ replication_active = true;
+
+ WalSndLoop(XLogSendPhysical);
+
+ replication_active = false;
+ if (got_STOPPING)
+ proc_exit(0);
+ WalSndSetState(WALSNDSTATE_STARTUP);
+
+ Assert(streamingDoneSending && streamingDoneReceiving);
+ }
+
+ if (cmd->slotname)
+ ReplicationSlotRelease();
+
+ /*
+ * Copy is finished now. Send a single-row result set indicating the next
+ * timeline.
+ */
+ if (sendTimeLineIsHistoric)
+ {
+ char startpos_str[8 + 1 + 8 + 1];
+ DestReceiver *dest;
+ TupOutputState *tstate;
+ TupleDesc tupdesc;
+ Datum values[2];
+ bool nulls[2];
+
+ snprintf(startpos_str, sizeof(startpos_str), "%X/%X",
+ (uint32) (sendTimeLineValidUpto >> 32),
+ (uint32) sendTimeLineValidUpto);
+
+ dest = CreateDestReceiver(DestRemoteSimple);
+ MemSet(nulls, false, sizeof(nulls));
+
+ /*
+ * Need a tuple descriptor representing two columns. int8 may seem
+ * like a surprising data type for this, but in theory int4 would not
+ * be wide enough for this, as TimeLineID is unsigned.
+ */
+ tupdesc = CreateTemplateTupleDesc(2);
+ TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "next_tli",
+ INT8OID, -1, 0);
+ TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "next_tli_startpos",
+ TEXTOID, -1, 0);
+
+ /* prepare for projection of tuple */
+ tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
+
+ values[0] = Int64GetDatum((int64) sendTimeLineNextTLI);
+ values[1] = CStringGetTextDatum(startpos_str);
+
+ /* send it to dest */
+ do_tup_output(tstate, values, nulls);
+
+ end_tup_output(tstate);
+ }
+
+ /* Send CommandComplete message */
+ EndReplicationCommand("START_STREAMING");
+}
+
+/*
+ * XLogReaderRoutine->page_read callback for logical decoding contexts, as a
+ * walsender process.
+ *
+ * Inside the walsender we can do better than read_local_xlog_page,
+ * which has to do a plain sleep/busy loop, because the walsender's latch gets
+ * set every time WAL is flushed.
+ */
+static int
+logical_read_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen,
+ XLogRecPtr targetRecPtr, char *cur_page)
+{
+ XLogRecPtr flushptr;
+ int count;
+ WALReadError errinfo;
+ XLogSegNo segno;
+
+ XLogReadDetermineTimeline(state, targetPagePtr, reqLen);
+ sendTimeLineIsHistoric = (state->currTLI != ThisTimeLineID);
+ sendTimeLine = state->currTLI;
+ sendTimeLineValidUpto = state->currTLIValidUntil;
+ sendTimeLineNextTLI = state->nextTLI;
+
+ /* make sure we have enough WAL available */
+ flushptr = WalSndWaitForWal(targetPagePtr + reqLen);
+
+ /* fail if not (implies we are going to shut down) */
+ if (flushptr < targetPagePtr + reqLen)
+ return -1;
+
+ if (targetPagePtr + XLOG_BLCKSZ <= flushptr)
+ count = XLOG_BLCKSZ; /* more than one block available */
+ else
+ count = flushptr - targetPagePtr; /* part of the page available */
+
+ /* now actually read the data, we know it's there */
+ if (!WALRead(state,
+ cur_page,
+ targetPagePtr,
+ XLOG_BLCKSZ,
+ state->seg.ws_tli, /* Pass the current TLI because only
+ * WalSndSegmentOpen controls whether new
+ * TLI is needed. */
+ &errinfo))
+ WALReadRaiseError(&errinfo);
+
+ /*
+ * After reading into the buffer, check that what we read was valid. We do
+ * this after reading, because even though the segment was present when we
+ * opened it, it might get recycled or removed while we read it. The
+ * read() succeeds in that case, but the data we tried to read might
+ * already have been overwritten with new WAL records.
+ */
+ XLByteToSeg(targetPagePtr, segno, state->segcxt.ws_segsize);
+ CheckXLogRemoved(segno, state->seg.ws_tli);
+
+ return count;
+}
+
+/*
+ * Process extra options given to CREATE_REPLICATION_SLOT.
+ */
+static void
+parseCreateReplSlotOptions(CreateReplicationSlotCmd *cmd,
+ bool *reserve_wal,
+ CRSSnapshotAction *snapshot_action)
+{
+ ListCell *lc;
+ bool snapshot_action_given = false;
+ bool reserve_wal_given = false;
+
+ /* Parse options */
+ foreach(lc, cmd->options)
+ {
+ DefElem *defel = (DefElem *) lfirst(lc);
+
+ if (strcmp(defel->defname, "export_snapshot") == 0)
+ {
+ if (snapshot_action_given || cmd->kind != REPLICATION_KIND_LOGICAL)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting or redundant options")));
+
+ snapshot_action_given = true;
+ *snapshot_action = defGetBoolean(defel) ? CRS_EXPORT_SNAPSHOT :
+ CRS_NOEXPORT_SNAPSHOT;
+ }
+ else if (strcmp(defel->defname, "use_snapshot") == 0)
+ {
+ if (snapshot_action_given || cmd->kind != REPLICATION_KIND_LOGICAL)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting or redundant options")));
+
+ snapshot_action_given = true;
+ *snapshot_action = CRS_USE_SNAPSHOT;
+ }
+ else if (strcmp(defel->defname, "reserve_wal") == 0)
+ {
+ if (reserve_wal_given || cmd->kind != REPLICATION_KIND_PHYSICAL)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting or redundant options")));
+
+ reserve_wal_given = true;
+ *reserve_wal = true;
+ }
+ else
+ elog(ERROR, "unrecognized option: %s", defel->defname);
+ }
+}
+
+/*
+ * Create a new replication slot.
+ */
+static void
+CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
+{
+ const char *snapshot_name = NULL;
+ char xloc[MAXFNAMELEN];
+ char *slot_name;
+ bool reserve_wal = false;
+ CRSSnapshotAction snapshot_action = CRS_EXPORT_SNAPSHOT;
+ DestReceiver *dest;
+ TupOutputState *tstate;
+ TupleDesc tupdesc;
+ Datum values[4];
+ bool nulls[4];
+
+ Assert(!MyReplicationSlot);
+
+ parseCreateReplSlotOptions(cmd, &reserve_wal, &snapshot_action);
+
+ /* setup state for WalSndSegmentOpen */
+ sendTimeLineIsHistoric = false;
+ sendTimeLine = ThisTimeLineID;
+
+ if (cmd->kind == REPLICATION_KIND_PHYSICAL)
+ {
+ ReplicationSlotCreate(cmd->slotname, false,
+ cmd->temporary ? RS_TEMPORARY : RS_PERSISTENT);
+ }
+ else
+ {
+ CheckLogicalDecodingRequirements();
+
+ /*
+ * Initially create persistent slot as ephemeral - that allows us to
+ * nicely handle errors during initialization because it'll get
+ * dropped if this transaction fails. We'll make it persistent at the
+ * end. Temporary slots can be created as temporary from beginning as
+ * they get dropped on error as well.
+ */
+ ReplicationSlotCreate(cmd->slotname, true,
+ cmd->temporary ? RS_TEMPORARY : RS_EPHEMERAL);
+ }
+
+ if (cmd->kind == REPLICATION_KIND_LOGICAL)
+ {
+ LogicalDecodingContext *ctx;
+ bool need_full_snapshot = false;
+
+ /*
+ * Do options check early so that we can bail before calling the
+ * DecodingContextFindStartpoint which can take long time.
+ */
+ if (snapshot_action == CRS_EXPORT_SNAPSHOT)
+ {
+ if (IsTransactionBlock())
+ ereport(ERROR,
+ /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
+ (errmsg("%s must not be called inside a transaction",
+ "CREATE_REPLICATION_SLOT ... EXPORT_SNAPSHOT")));
+
+ need_full_snapshot = true;
+ }
+ else if (snapshot_action == CRS_USE_SNAPSHOT)
+ {
+ if (!IsTransactionBlock())
+ ereport(ERROR,
+ /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
+ (errmsg("%s must be called inside a transaction",
+ "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT")));
+
+ if (XactIsoLevel != XACT_REPEATABLE_READ)
+ ereport(ERROR,
+ /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
+ (errmsg("%s must be called in REPEATABLE READ isolation mode transaction",
+ "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT")));
+
+ if (FirstSnapshotSet)
+ ereport(ERROR,
+ /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
+ (errmsg("%s must be called before any query",
+ "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT")));
+
+ if (IsSubTransaction())
+ ereport(ERROR,
+ /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
+ (errmsg("%s must not be called in a subtransaction",
+ "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT")));
+
+ need_full_snapshot = true;
+ }
+
+ ctx = CreateInitDecodingContext(cmd->plugin, NIL, need_full_snapshot,
+ InvalidXLogRecPtr,
+ XL_ROUTINE(.page_read = logical_read_xlog_page,
+ .segment_open = WalSndSegmentOpen,
+ .segment_close = wal_segment_close),
+ WalSndPrepareWrite, WalSndWriteData,
+ WalSndUpdateProgress);
+
+ /*
+ * Signal that we don't need the timeout mechanism. We're just
+ * creating the replication slot and don't yet accept feedback
+ * messages or send keepalives. As we possibly need to wait for
+ * further WAL the walsender would otherwise possibly be killed too
+ * soon.
+ */
+ last_reply_timestamp = 0;
+
+ /* build initial snapshot, might take a while */
+ DecodingContextFindStartpoint(ctx);
+
+ /*
+ * Export or use the snapshot if we've been asked to do so.
+ *
+ * NB. We will convert the snapbuild.c kind of snapshot to normal
+ * snapshot when doing this.
+ */
+ if (snapshot_action == CRS_EXPORT_SNAPSHOT)
+ {
+ snapshot_name = SnapBuildExportSnapshot(ctx->snapshot_builder);
+ }
+ else if (snapshot_action == CRS_USE_SNAPSHOT)
+ {
+ Snapshot snap;
+
+ snap = SnapBuildInitialSnapshot(ctx->snapshot_builder);
+ RestoreTransactionSnapshot(snap, MyProc);
+ }
+
+ /* don't need the decoding context anymore */
+ FreeDecodingContext(ctx);
+
+ if (!cmd->temporary)
+ ReplicationSlotPersist();
+ }
+ else if (cmd->kind == REPLICATION_KIND_PHYSICAL && reserve_wal)
+ {
+ ReplicationSlotReserveWal();
+
+ ReplicationSlotMarkDirty();
+
+ /* Write this slot to disk if it's a permanent one. */
+ if (!cmd->temporary)
+ ReplicationSlotSave();
+ }
+
+ snprintf(xloc, sizeof(xloc), "%X/%X",
+ (uint32) (MyReplicationSlot->data.confirmed_flush >> 32),
+ (uint32) MyReplicationSlot->data.confirmed_flush);
+
+ dest = CreateDestReceiver(DestRemoteSimple);
+ MemSet(nulls, false, sizeof(nulls));
+
+ /*----------
+ * Need a tuple descriptor representing four columns:
+ * - first field: the slot name
+ * - second field: LSN at which we became consistent
+ * - third field: exported snapshot's name
+ * - fourth field: output plugin
+ *----------
+ */
+ tupdesc = CreateTemplateTupleDesc(4);
+ TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "slot_name",
+ TEXTOID, -1, 0);
+ TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "consistent_point",
+ TEXTOID, -1, 0);
+ TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 3, "snapshot_name",
+ TEXTOID, -1, 0);
+ TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 4, "output_plugin",
+ TEXTOID, -1, 0);
+
+ /* prepare for projection of tuples */
+ tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
+
+ /* slot_name */
+ slot_name = NameStr(MyReplicationSlot->data.name);
+ values[0] = CStringGetTextDatum(slot_name);
+
+ /* consistent wal location */
+ values[1] = CStringGetTextDatum(xloc);
+
+ /* snapshot name, or NULL if none */
+ if (snapshot_name != NULL)
+ values[2] = CStringGetTextDatum(snapshot_name);
+ else
+ nulls[2] = true;
+
+ /* plugin, or NULL if none */
+ if (cmd->plugin != NULL)
+ values[3] = CStringGetTextDatum(cmd->plugin);
+ else
+ nulls[3] = true;
+
+ /* send it to dest */
+ do_tup_output(tstate, values, nulls);
+ end_tup_output(tstate);
+
+ ReplicationSlotRelease();
+}
+
+/*
+ * Get rid of a replication slot that is no longer wanted.
+ */
+static void
+DropReplicationSlot(DropReplicationSlotCmd *cmd)
+{
+ ReplicationSlotDrop(cmd->slotname, !cmd->wait);
+}
+
+/*
+ * Load previously initiated logical slot and prepare for sending data (via
+ * WalSndLoop).
+ */
+static void
+StartLogicalReplication(StartReplicationCmd *cmd)
+{
+ StringInfoData buf;
+ QueryCompletion qc;
+
+ /* make sure that our requirements are still fulfilled */
+ CheckLogicalDecodingRequirements();
+
+ Assert(!MyReplicationSlot);
+
+ (void) ReplicationSlotAcquire(cmd->slotname, SAB_Error);
+
+ if (XLogRecPtrIsInvalid(MyReplicationSlot->data.restart_lsn))
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot read from logical replication slot \"%s\"",
+ cmd->slotname),
+ errdetail("This slot has been invalidated because it exceeded the maximum reserved size.")));
+
+ /*
+ * Force a disconnect, so that the decoding code doesn't need to care
+ * about an eventual switch from running in recovery, to running in a
+ * normal environment. Client code is expected to handle reconnects.
+ */
+ if (am_cascading_walsender && !RecoveryInProgress())
+ {
+ ereport(LOG,
+ (errmsg("terminating walsender process after promotion")));
+ got_STOPPING = true;
+ }
+
+ /*
+ * Create our decoding context, making it start at the previously ack'ed
+ * position.
+ *
+ * Do this before sending a CopyBothResponse message, so that any errors
+ * are reported early.
+ */
+ logical_decoding_ctx =
+ CreateDecodingContext(cmd->startpoint, cmd->options, false,
+ XL_ROUTINE(.page_read = logical_read_xlog_page,
+ .segment_open = WalSndSegmentOpen,
+ .segment_close = wal_segment_close),
+ WalSndPrepareWrite, WalSndWriteData,
+ WalSndUpdateProgress);
+ xlogreader = logical_decoding_ctx->reader;
+
+ WalSndSetState(WALSNDSTATE_CATCHUP);
+
+ /* Send a CopyBothResponse message, and start streaming */
+ pq_beginmessage(&buf, 'W');
+ pq_sendbyte(&buf, 0);
+ pq_sendint16(&buf, 0);
+ pq_endmessage(&buf);
+ pq_flush();
+
+ /* Start reading WAL from the oldest required WAL. */
+ XLogBeginRead(logical_decoding_ctx->reader,
+ MyReplicationSlot->data.restart_lsn);
+
+ /*
+ * Report the location after which we'll send out further commits as the
+ * current sentPtr.
+ */
+ sentPtr = MyReplicationSlot->data.confirmed_flush;
+
+ /* Also update the sent position status in shared memory */
+ SpinLockAcquire(&MyWalSnd->mutex);
+ MyWalSnd->sentPtr = MyReplicationSlot->data.restart_lsn;
+ SpinLockRelease(&MyWalSnd->mutex);
+
+ replication_active = true;
+
+ SyncRepInitConfig();
+
+ /* Main loop of walsender */
+ WalSndLoop(XLogSendLogical);
+
+ FreeDecodingContext(logical_decoding_ctx);
+ ReplicationSlotRelease();
+
+ replication_active = false;
+ if (got_STOPPING)
+ proc_exit(0);
+ WalSndSetState(WALSNDSTATE_STARTUP);
+
+ /* Get out of COPY mode (CommandComplete). */
+ SetQueryCompletion(&qc, CMDTAG_COPY, 0);
+ EndCommand(&qc, DestRemote, false);
+}
+
+/*
+ * LogicalDecodingContext 'prepare_write' callback.
+ *
+ * Prepare a write into a StringInfo.
+ *
+ * Don't do anything lasting in here, it's quite possible that nothing will be done
+ * with the data.
+ */
+static void
+WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write)
+{
+ /* can't have sync rep confused by sending the same LSN several times */
+ if (!last_write)
+ lsn = InvalidXLogRecPtr;
+
+ resetStringInfo(ctx->out);
+
+ pq_sendbyte(ctx->out, 'w');
+ pq_sendint64(ctx->out, lsn); /* dataStart */
+ pq_sendint64(ctx->out, lsn); /* walEnd */
+
+ /*
+ * Fill out the sendtime later, just as it's done in XLogSendPhysical, but
+ * reserve space here.
+ */
+ pq_sendint64(ctx->out, 0); /* sendtime */
+}
+
+/*
+ * LogicalDecodingContext 'write' callback.
+ *
+ * Actually write out data previously prepared by WalSndPrepareWrite out to
+ * the network. Take as long as needed, but process replies from the other
+ * side and check timeouts during that.
+ */
+static void
+WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
+ bool last_write)
+{
+ TimestampTz now;
+
+ /*
+ * Fill the send timestamp last, so that it is taken as late as possible.
+ * This is somewhat ugly, but the protocol is set as it's already used for
+ * several releases by streaming physical replication.
+ */
+ resetStringInfo(&tmpbuf);
+ now = GetCurrentTimestamp();
+ pq_sendint64(&tmpbuf, now);
+ memcpy(&ctx->out->data[1 + sizeof(int64) + sizeof(int64)],
+ tmpbuf.data, sizeof(int64));
+
+ /* output previously gathered data in a CopyData packet */
+ pq_putmessage_noblock('d', ctx->out->data, ctx->out->len);
+
+ CHECK_FOR_INTERRUPTS();
+
+ /* Try to flush pending output to the client */
+ if (pq_flush_if_writable() != 0)
+ WalSndShutdown();
+
+ /* Try taking fast path unless we get too close to walsender timeout. */
+ if (now < TimestampTzPlusMilliseconds(last_reply_timestamp,
+ wal_sender_timeout / 2) &&
+ !pq_is_send_pending())
+ {
+ return;
+ }
+
+ /* If we have pending write here, go to slow path */
+ for (;;)
+ {
+ int wakeEvents;
+ long sleeptime;
+
+ /* Check for input from the client */
+ ProcessRepliesIfAny();
+
+ /* die if timeout was reached */
+ WalSndCheckTimeOut();
+
+ /* Send keepalive if the time has come */
+ WalSndKeepaliveIfNecessary();
+
+ if (!pq_is_send_pending())
+ break;
+
+ sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp());
+
+ wakeEvents = WL_LATCH_SET | WL_EXIT_ON_PM_DEATH |
+ WL_SOCKET_WRITEABLE | WL_SOCKET_READABLE | WL_TIMEOUT;
+
+ /* Sleep until something happens or we time out */
+ (void) WaitLatchOrSocket(MyLatch, wakeEvents,
+ MyProcPort->sock, sleeptime,
+ WAIT_EVENT_WAL_SENDER_WRITE_DATA);
+
+ /* Clear any already-pending wakeups */
+ ResetLatch(MyLatch);
+
+ CHECK_FOR_INTERRUPTS();
+
+ /* Process any requests or signals received recently */
+ if (ConfigReloadPending)
+ {
+ ConfigReloadPending = false;
+ ProcessConfigFile(PGC_SIGHUP);
+ SyncRepInitConfig();
+ }
+
+ /* Try to flush pending output to the client */
+ if (pq_flush_if_writable() != 0)
+ WalSndShutdown();
+ }
+
+ /* reactivate latch so WalSndLoop knows to continue */
+ SetLatch(MyLatch);
+}
+
+/*
+ * LogicalDecodingContext 'update_progress' callback.
+ *
+ * Write the current position to the lag tracker (see XLogSendPhysical).
+ */
+static void
+WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid)
+{
+ static TimestampTz sendTime = 0;
+ TimestampTz now = GetCurrentTimestamp();
+
+ /*
+ * Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS to
+ * avoid flooding the lag tracker when we commit frequently.
+ */
+#define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS 1000
+ if (!TimestampDifferenceExceeds(sendTime, now,
+ WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS))
+ return;
+
+ LagTrackerWrite(lsn, now);
+ sendTime = now;
+}
+
+/*
+ * Wait till WAL < loc is flushed to disk so it can be safely sent to client.
+ *
+ * Returns end LSN of flushed WAL. Normally this will be >= loc, but
+ * if we detect a shutdown request (either from postmaster or client)
+ * we will return early, so caller must always check.
+ */
+static XLogRecPtr
+WalSndWaitForWal(XLogRecPtr loc)
+{
+ int wakeEvents;
+ static XLogRecPtr RecentFlushPtr = InvalidXLogRecPtr;
+
+ /*
+ * Fast path to avoid acquiring the spinlock in case we already know we
+ * have enough WAL available. This is particularly interesting if we're
+ * far behind.
+ */
+ if (RecentFlushPtr != InvalidXLogRecPtr &&
+ loc <= RecentFlushPtr)
+ return RecentFlushPtr;
+
+ /* Get a more recent flush pointer. */
+ if (!RecoveryInProgress())
+ RecentFlushPtr = GetFlushRecPtr();
+ else
+ RecentFlushPtr = GetXLogReplayRecPtr(NULL);
+
+ for (;;)
+ {
+ long sleeptime;
+
+ /* Clear any already-pending wakeups */
+ ResetLatch(MyLatch);
+
+ CHECK_FOR_INTERRUPTS();
+
+ /* Process any requests or signals received recently */
+ if (ConfigReloadPending)
+ {
+ ConfigReloadPending = false;
+ ProcessConfigFile(PGC_SIGHUP);
+ SyncRepInitConfig();
+ }
+
+ /* Check for input from the client */
+ ProcessRepliesIfAny();
+
+ /*
+ * If we're shutting down, trigger pending WAL to be written out,
+ * otherwise we'd possibly end up waiting for WAL that never gets
+ * written, because walwriter has shut down already.
+ */
+ if (got_STOPPING)
+ XLogBackgroundFlush();
+
+ /* Update our idea of the currently flushed position. */
+ if (!RecoveryInProgress())
+ RecentFlushPtr = GetFlushRecPtr();
+ else
+ RecentFlushPtr = GetXLogReplayRecPtr(NULL);
+
+ /*
+ * If postmaster asked us to stop, don't wait anymore.
+ *
+ * It's important to do this check after the recomputation of
+ * RecentFlushPtr, so we can send all remaining data before shutting
+ * down.
+ */
+ if (got_STOPPING)
+ break;
+
+ /*
+ * We only send regular messages to the client for full decoded
+ * transactions, but a synchronous replication and walsender shutdown
+ * possibly are waiting for a later location. So, before sleeping, we
+ * send a ping containing the flush location. If the receiver is
+ * otherwise idle, this keepalive will trigger a reply. Processing the
+ * reply will update these MyWalSnd locations.
+ */
+ if (MyWalSnd->flush < sentPtr &&
+ MyWalSnd->write < sentPtr &&
+ !waiting_for_ping_response)
+ WalSndKeepalive(false);
+
+ /* check whether we're done */
+ if (loc <= RecentFlushPtr)
+ break;
+
+ /* Waiting for new WAL. Since we need to wait, we're now caught up. */
+ WalSndCaughtUp = true;
+
+ /*
+ * Try to flush any pending output to the client.
+ */
+ if (pq_flush_if_writable() != 0)
+ WalSndShutdown();
+
+ /*
+ * If we have received CopyDone from the client, sent CopyDone
+ * ourselves, and the output buffer is empty, it's time to exit
+ * streaming, so fail the current WAL fetch request.
+ */
+ if (streamingDoneReceiving && streamingDoneSending &&
+ !pq_is_send_pending())
+ break;
+
+ /* die if timeout was reached */
+ WalSndCheckTimeOut();
+
+ /* Send keepalive if the time has come */
+ WalSndKeepaliveIfNecessary();
+
+ /*
+ * Sleep until something happens or we time out. Also wait for the
+ * socket becoming writable, if there's still pending output.
+ * Otherwise we might sit on sendable output data while waiting for
+ * new WAL to be generated. (But if we have nothing to send, we don't
+ * want to wake on socket-writable.)
+ */
+ sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp());
+
+ wakeEvents = WL_LATCH_SET | WL_EXIT_ON_PM_DEATH |
+ WL_SOCKET_READABLE | WL_TIMEOUT;
+
+ if (pq_is_send_pending())
+ wakeEvents |= WL_SOCKET_WRITEABLE;
+
+ (void) WaitLatchOrSocket(MyLatch, wakeEvents,
+ MyProcPort->sock, sleeptime,
+ WAIT_EVENT_WAL_SENDER_WAIT_WAL);
+ }
+
+ /* reactivate latch so WalSndLoop knows to continue */
+ SetLatch(MyLatch);
+ return RecentFlushPtr;
+}
+
+/*
+ * Execute an incoming replication command.
+ *
+ * Returns true if the cmd_string was recognized as WalSender command, false
+ * if not.
+ */
+bool
+exec_replication_command(const char *cmd_string)
+{
+ int parse_rc;
+ Node *cmd_node;
+ const char *cmdtag;
+ MemoryContext cmd_context;
+ MemoryContext old_context;
+
+ /*
+ * If WAL sender has been told that shutdown is getting close, switch its
+ * status accordingly to handle the next replication commands correctly.
+ */
+ if (got_STOPPING)
+ WalSndSetState(WALSNDSTATE_STOPPING);
+
+ /*
+ * Throw error if in stopping mode. We need prevent commands that could
+ * generate WAL while the shutdown checkpoint is being written. To be
+ * safe, we just prohibit all new commands.
+ */
+ if (MyWalSnd->state == WALSNDSTATE_STOPPING)
+ ereport(ERROR,
+ (errmsg("cannot execute new commands while WAL sender is in stopping mode")));
+
+ /*
+ * CREATE_REPLICATION_SLOT ... LOGICAL exports a snapshot until the next
+ * command arrives. Clean up the old stuff if there's anything.
+ */
+ SnapBuildClearExportedSnapshot();
+
+ CHECK_FOR_INTERRUPTS();
+
+ /*
+ * Parse the command.
+ */
+ cmd_context = AllocSetContextCreate(CurrentMemoryContext,
+ "Replication command context",
+ ALLOCSET_DEFAULT_SIZES);
+ old_context = MemoryContextSwitchTo(cmd_context);
+
+ replication_scanner_init(cmd_string);
+ parse_rc = replication_yyparse();
+ if (parse_rc != 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg_internal("replication command parser returned %d",
+ parse_rc)));
+ replication_scanner_finish();
+
+ cmd_node = replication_parse_result;
+
+ /*
+ * If it's a SQL command, just clean up our mess and return false; the
+ * caller will take care of executing it.
+ */
+ if (IsA(cmd_node, SQLCmd))
+ {
+ if (MyDatabaseId == InvalidOid)
+ ereport(ERROR,
+ (errmsg("cannot execute SQL commands in WAL sender for physical replication")));
+
+ MemoryContextSwitchTo(old_context);
+ MemoryContextDelete(cmd_context);
+
+ /* Tell the caller that this wasn't a WalSender command. */
+ return false;
+ }
+
+ /*
+ * Report query to various monitoring facilities. For this purpose, we
+ * report replication commands just like SQL commands.
+ */
+ debug_query_string = cmd_string;
+
+ pgstat_report_activity(STATE_RUNNING, cmd_string);
+
+ /*
+ * Log replication command if log_replication_commands is enabled. Even
+ * when it's disabled, log the command with DEBUG1 level for backward
+ * compatibility.
+ */
+ ereport(log_replication_commands ? LOG : DEBUG1,
+ (errmsg("received replication command: %s", cmd_string)));
+
+ /*
+ * Disallow replication commands in aborted transaction blocks.
+ */
+ if (IsAbortedTransactionBlockState())
+ ereport(ERROR,
+ (errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
+ errmsg("current transaction is aborted, "
+ "commands ignored until end of transaction block")));
+
+ CHECK_FOR_INTERRUPTS();
+
+ /*
+ * Allocate buffers that will be used for each outgoing and incoming
+ * message. We do this just once per command to reduce palloc overhead.
+ */
+ initStringInfo(&output_message);
+ initStringInfo(&reply_message);
+ initStringInfo(&tmpbuf);
+
+ switch (cmd_node->type)
+ {
+ case T_IdentifySystemCmd:
+ cmdtag = "IDENTIFY_SYSTEM";
+ IdentifySystem();
+ EndReplicationCommand(cmdtag);
+ break;
+
+ case T_BaseBackupCmd:
+ cmdtag = "BASE_BACKUP";
+ PreventInTransactionBlock(true, cmdtag);
+ SendBaseBackup((BaseBackupCmd *) cmd_node);
+ EndReplicationCommand(cmdtag);
+ break;
+
+ case T_CreateReplicationSlotCmd:
+ cmdtag = "CREATE_REPLICATION_SLOT";
+ CreateReplicationSlot((CreateReplicationSlotCmd *) cmd_node);
+ EndReplicationCommand(cmdtag);
+ break;
+
+ case T_DropReplicationSlotCmd:
+ cmdtag = "DROP_REPLICATION_SLOT";
+ DropReplicationSlot((DropReplicationSlotCmd *) cmd_node);
+ EndReplicationCommand(cmdtag);
+ break;
+
+ case T_StartReplicationCmd:
+ {
+ StartReplicationCmd *cmd = (StartReplicationCmd *) cmd_node;
+
+ cmdtag = "START_REPLICATION";
+ PreventInTransactionBlock(true, cmdtag);
+
+ if (cmd->kind == REPLICATION_KIND_PHYSICAL)
+ StartReplication(cmd);
+ else
+ StartLogicalReplication(cmd);
+
+ /* dupe, but necessary per libpqrcv_endstreaming */
+ EndReplicationCommand(cmdtag);
+
+ Assert(xlogreader != NULL);
+ break;
+ }
+
+ case T_TimeLineHistoryCmd:
+ cmdtag = "TIMELINE_HISTORY";
+ PreventInTransactionBlock(true, cmdtag);
+ SendTimeLineHistory((TimeLineHistoryCmd *) cmd_node);
+ EndReplicationCommand(cmdtag);
+ break;
+
+ case T_VariableShowStmt:
+ {
+ DestReceiver *dest = CreateDestReceiver(DestRemoteSimple);
+ VariableShowStmt *n = (VariableShowStmt *) cmd_node;
+
+ cmdtag = "SHOW";
+
+ /* syscache access needs a transaction environment */
+ StartTransactionCommand();
+ GetPGVariable(n->name, dest);
+ CommitTransactionCommand();
+ EndReplicationCommand(cmdtag);
+ }
+ break;
+
+ default:
+ elog(ERROR, "unrecognized replication command node tag: %u",
+ cmd_node->type);
+ }
+
+ /* done */
+ MemoryContextSwitchTo(old_context);
+ MemoryContextDelete(cmd_context);
+
+ /* Report to pgstat that this process is now idle */
+ pgstat_report_activity(STATE_IDLE, NULL);
+ debug_query_string = NULL;
+
+ return true;
+}
+
+/*
+ * Process any incoming messages while streaming. Also checks if the remote
+ * end has closed the connection.
+ */
+static void
+ProcessRepliesIfAny(void)
+{
+ unsigned char firstchar;
+ int r;
+ bool received = false;
+
+ last_processing = GetCurrentTimestamp();
+
+ /*
+ * If we already received a CopyDone from the frontend, any subsequent
+ * message is the beginning of a new command, and should be processed in
+ * the main processing loop.
+ */
+ while (!streamingDoneReceiving)
+ {
+ pq_startmsgread();
+ r = pq_getbyte_if_available(&firstchar);
+ if (r < 0)
+ {
+ /* unexpected error or EOF */
+ ereport(COMMERROR,
+ (errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("unexpected EOF on standby connection")));
+ proc_exit(0);
+ }
+ if (r == 0)
+ {
+ /* no data available without blocking */
+ pq_endmsgread();
+ break;
+ }
+
+ /* Read the message contents */
+ resetStringInfo(&reply_message);
+ if (pq_getmessage(&reply_message, 0))
+ {
+ ereport(COMMERROR,
+ (errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("unexpected EOF on standby connection")));
+ proc_exit(0);
+ }
+
+ /* Handle the very limited subset of commands expected in this phase */
+ switch (firstchar)
+ {
+ /*
+ * 'd' means a standby reply wrapped in a CopyData packet.
+ */
+ case 'd':
+ ProcessStandbyMessage();
+ received = true;
+ break;
+
+ /*
+ * CopyDone means the standby requested to finish streaming.
+ * Reply with CopyDone, if we had not sent that already.
+ */
+ case 'c':
+ if (!streamingDoneSending)
+ {
+ pq_putmessage_noblock('c', NULL, 0);
+ streamingDoneSending = true;
+ }
+
+ streamingDoneReceiving = true;
+ received = true;
+ break;
+
+ /*
+ * 'X' means that the standby is closing down the socket.
+ */
+ case 'X':
+ proc_exit(0);
+
+ default:
+ ereport(FATAL,
+ (errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("invalid standby message type \"%c\"",
+ firstchar)));
+ }
+ }
+
+ /*
+ * Save the last reply timestamp if we've received at least one reply.
+ */
+ if (received)
+ {
+ last_reply_timestamp = last_processing;
+ waiting_for_ping_response = false;
+ }
+}
+
+/*
+ * Process a status update message received from standby.
+ */
+static void
+ProcessStandbyMessage(void)
+{
+ char msgtype;
+
+ /*
+ * Check message type from the first byte.
+ */
+ msgtype = pq_getmsgbyte(&reply_message);
+
+ switch (msgtype)
+ {
+ case 'r':
+ ProcessStandbyReplyMessage();
+ break;
+
+ case 'h':
+ ProcessStandbyHSFeedbackMessage();
+ break;
+
+ default:
+ ereport(COMMERROR,
+ (errcode(ERRCODE_PROTOCOL_VIOLATION),
+ errmsg("unexpected message type \"%c\"", msgtype)));
+ proc_exit(0);
+ }
+}
+
+/*
+ * Remember that a walreceiver just confirmed receipt of lsn `lsn`.
+ */
+static void
+PhysicalConfirmReceivedLocation(XLogRecPtr lsn)
+{
+ bool changed = false;
+ ReplicationSlot *slot = MyReplicationSlot;
+
+ Assert(lsn != InvalidXLogRecPtr);
+ SpinLockAcquire(&slot->mutex);
+ if (slot->data.restart_lsn != lsn)
+ {
+ changed = true;
+ slot->data.restart_lsn = lsn;
+ }
+ SpinLockRelease(&slot->mutex);
+
+ if (changed)
+ {
+ ReplicationSlotMarkDirty();
+ ReplicationSlotsComputeRequiredLSN();
+ }
+
+ /*
+ * One could argue that the slot should be saved to disk now, but that'd
+ * be energy wasted - the worst lost information can do here is give us
+ * wrong information in a statistics view - we'll just potentially be more
+ * conservative in removing files.
+ */
+}
+
+/*
+ * Regular reply from standby advising of WAL locations on standby server.
+ */
+static void
+ProcessStandbyReplyMessage(void)
+{
+ XLogRecPtr writePtr,
+ flushPtr,
+ applyPtr;
+ bool replyRequested;
+ TimeOffset writeLag,
+ flushLag,
+ applyLag;
+ bool clearLagTimes;
+ TimestampTz now;
+ TimestampTz replyTime;
+
+ static bool fullyAppliedLastTime = false;
+
+ /* the caller already consumed the msgtype byte */
+ writePtr = pq_getmsgint64(&reply_message);
+ flushPtr = pq_getmsgint64(&reply_message);
+ applyPtr = pq_getmsgint64(&reply_message);
+ replyTime = pq_getmsgint64(&reply_message);
+ replyRequested = pq_getmsgbyte(&reply_message);
+
+ if (log_min_messages <= DEBUG2)
+ {
+ char *replyTimeStr;
+
+ /* Copy because timestamptz_to_str returns a static buffer */
+ replyTimeStr = pstrdup(timestamptz_to_str(replyTime));
+
+ elog(DEBUG2, "write %X/%X flush %X/%X apply %X/%X%s reply_time %s",
+ (uint32) (writePtr >> 32), (uint32) writePtr,
+ (uint32) (flushPtr >> 32), (uint32) flushPtr,
+ (uint32) (applyPtr >> 32), (uint32) applyPtr,
+ replyRequested ? " (reply requested)" : "",
+ replyTimeStr);
+
+ pfree(replyTimeStr);
+ }
+
+ /* See if we can compute the round-trip lag for these positions. */
+ now = GetCurrentTimestamp();
+ writeLag = LagTrackerRead(SYNC_REP_WAIT_WRITE, writePtr, now);
+ flushLag = LagTrackerRead(SYNC_REP_WAIT_FLUSH, flushPtr, now);
+ applyLag = LagTrackerRead(SYNC_REP_WAIT_APPLY, applyPtr, now);
+
+ /*
+ * If the standby reports that it has fully replayed the WAL in two
+ * consecutive reply messages, then the second such message must result
+ * from wal_receiver_status_interval expiring on the standby. This is a
+ * convenient time to forget the lag times measured when it last
+ * wrote/flushed/applied a WAL record, to avoid displaying stale lag data
+ * until more WAL traffic arrives.
+ */
+ clearLagTimes = false;
+ if (applyPtr == sentPtr)
+ {
+ if (fullyAppliedLastTime)
+ clearLagTimes = true;
+ fullyAppliedLastTime = true;
+ }
+ else
+ fullyAppliedLastTime = false;
+
+ /* Send a reply if the standby requested one. */
+ if (replyRequested)
+ WalSndKeepalive(false);
+
+ /*
+ * Update shared state for this WalSender process based on reply data from
+ * standby.
+ */
+ {
+ WalSnd *walsnd = MyWalSnd;
+
+ SpinLockAcquire(&walsnd->mutex);
+ walsnd->write = writePtr;
+ walsnd->flush = flushPtr;
+ walsnd->apply = applyPtr;
+ if (writeLag != -1 || clearLagTimes)
+ walsnd->writeLag = writeLag;
+ if (flushLag != -1 || clearLagTimes)
+ walsnd->flushLag = flushLag;
+ if (applyLag != -1 || clearLagTimes)
+ walsnd->applyLag = applyLag;
+ walsnd->replyTime = replyTime;
+ SpinLockRelease(&walsnd->mutex);
+ }
+
+ if (!am_cascading_walsender)
+ SyncRepReleaseWaiters();
+
+ /*
+ * Advance our local xmin horizon when the client confirmed a flush.
+ */
+ if (MyReplicationSlot && flushPtr != InvalidXLogRecPtr)
+ {
+ if (SlotIsLogical(MyReplicationSlot))
+ LogicalConfirmReceivedLocation(flushPtr);
+ else
+ PhysicalConfirmReceivedLocation(flushPtr);
+ }
+}
+
+/* compute new replication slot xmin horizon if needed */
+static void
+PhysicalReplicationSlotNewXmin(TransactionId feedbackXmin, TransactionId feedbackCatalogXmin)
+{
+ bool changed = false;
+ ReplicationSlot *slot = MyReplicationSlot;
+
+ SpinLockAcquire(&slot->mutex);
+ MyPgXact->xmin = InvalidTransactionId;
+
+ /*
+ * For physical replication we don't need the interlock provided by xmin
+ * and effective_xmin since the consequences of a missed increase are
+ * limited to query cancellations, so set both at once.
+ */
+ if (!TransactionIdIsNormal(slot->data.xmin) ||
+ !TransactionIdIsNormal(feedbackXmin) ||
+ TransactionIdPrecedes(slot->data.xmin, feedbackXmin))
+ {
+ changed = true;
+ slot->data.xmin = feedbackXmin;
+ slot->effective_xmin = feedbackXmin;
+ }
+ if (!TransactionIdIsNormal(slot->data.catalog_xmin) ||
+ !TransactionIdIsNormal(feedbackCatalogXmin) ||
+ TransactionIdPrecedes(slot->data.catalog_xmin, feedbackCatalogXmin))
+ {
+ changed = true;
+ slot->data.catalog_xmin = feedbackCatalogXmin;
+ slot->effective_catalog_xmin = feedbackCatalogXmin;
+ }
+ SpinLockRelease(&slot->mutex);
+
+ if (changed)
+ {
+ ReplicationSlotMarkDirty();
+ ReplicationSlotsComputeRequiredXmin(false);
+ }
+}
+
+/*
+ * Check that the provided xmin/epoch are sane, that is, not in the future
+ * and not so far back as to be already wrapped around.
+ *
+ * Epoch of nextXid should be same as standby, or if the counter has
+ * wrapped, then one greater than standby.
+ *
+ * This check doesn't care about whether clog exists for these xids
+ * at all.
+ */
+static bool
+TransactionIdInRecentPast(TransactionId xid, uint32 epoch)
+{
+ FullTransactionId nextFullXid;
+ TransactionId nextXid;
+ uint32 nextEpoch;
+
+ nextFullXid = ReadNextFullTransactionId();
+ nextXid = XidFromFullTransactionId(nextFullXid);
+ nextEpoch = EpochFromFullTransactionId(nextFullXid);
+
+ if (xid <= nextXid)
+ {
+ if (epoch != nextEpoch)
+ return false;
+ }
+ else
+ {
+ if (epoch + 1 != nextEpoch)
+ return false;
+ }
+
+ if (!TransactionIdPrecedesOrEquals(xid, nextXid))
+ return false; /* epoch OK, but it's wrapped around */
+
+ return true;
+}
+
+/*
+ * Hot Standby feedback
+ */
+static void
+ProcessStandbyHSFeedbackMessage(void)
+{
+ TransactionId feedbackXmin;
+ uint32 feedbackEpoch;
+ TransactionId feedbackCatalogXmin;
+ uint32 feedbackCatalogEpoch;
+ TimestampTz replyTime;
+
+ /*
+ * Decipher the reply message. The caller already consumed the msgtype
+ * byte. See XLogWalRcvSendHSFeedback() in walreceiver.c for the creation
+ * of this message.
+ */
+ replyTime = pq_getmsgint64(&reply_message);
+ feedbackXmin = pq_getmsgint(&reply_message, 4);
+ feedbackEpoch = pq_getmsgint(&reply_message, 4);
+ feedbackCatalogXmin = pq_getmsgint(&reply_message, 4);
+ feedbackCatalogEpoch = pq_getmsgint(&reply_message, 4);
+
+ if (log_min_messages <= DEBUG2)
+ {
+ char *replyTimeStr;
+
+ /* Copy because timestamptz_to_str returns a static buffer */
+ replyTimeStr = pstrdup(timestamptz_to_str(replyTime));
+
+ elog(DEBUG2, "hot standby feedback xmin %u epoch %u, catalog_xmin %u epoch %u reply_time %s",
+ feedbackXmin,
+ feedbackEpoch,
+ feedbackCatalogXmin,
+ feedbackCatalogEpoch,
+ replyTimeStr);
+
+ pfree(replyTimeStr);
+ }
+
+ /*
+ * Update shared state for this WalSender process based on reply data from
+ * standby.
+ */
+ {
+ WalSnd *walsnd = MyWalSnd;
+
+ SpinLockAcquire(&walsnd->mutex);
+ walsnd->replyTime = replyTime;
+ SpinLockRelease(&walsnd->mutex);
+ }
+
+ /*
+ * Unset WalSender's xmins if the feedback message values are invalid.
+ * This happens when the downstream turned hot_standby_feedback off.
+ */
+ if (!TransactionIdIsNormal(feedbackXmin)
+ && !TransactionIdIsNormal(feedbackCatalogXmin))
+ {
+ MyPgXact->xmin = InvalidTransactionId;
+ if (MyReplicationSlot != NULL)
+ PhysicalReplicationSlotNewXmin(feedbackXmin, feedbackCatalogXmin);
+ return;
+ }
+
+ /*
+ * Check that the provided xmin/epoch are sane, that is, not in the future
+ * and not so far back as to be already wrapped around. Ignore if not.
+ */
+ if (TransactionIdIsNormal(feedbackXmin) &&
+ !TransactionIdInRecentPast(feedbackXmin, feedbackEpoch))
+ return;
+
+ if (TransactionIdIsNormal(feedbackCatalogXmin) &&
+ !TransactionIdInRecentPast(feedbackCatalogXmin, feedbackCatalogEpoch))
+ return;
+
+ /*
+ * Set the WalSender's xmin equal to the standby's requested xmin, so that
+ * the xmin will be taken into account by GetOldestXmin. This will hold
+ * back the removal of dead rows and thereby prevent the generation of
+ * cleanup conflicts on the standby server.
+ *
+ * There is a small window for a race condition here: although we just
+ * checked that feedbackXmin precedes nextXid, the nextXid could have
+ * gotten advanced between our fetching it and applying the xmin below,
+ * perhaps far enough to make feedbackXmin wrap around. In that case the
+ * xmin we set here would be "in the future" and have no effect. No point
+ * in worrying about this since it's too late to save the desired data
+ * anyway. Assuming that the standby sends us an increasing sequence of
+ * xmins, this could only happen during the first reply cycle, else our
+ * own xmin would prevent nextXid from advancing so far.
+ *
+ * We don't bother taking the ProcArrayLock here. Setting the xmin field
+ * is assumed atomic, and there's no real need to prevent a concurrent
+ * GetOldestXmin. (If we're moving our xmin forward, this is obviously
+ * safe, and if we're moving it backwards, well, the data is at risk
+ * already since a VACUUM could have just finished calling GetOldestXmin.)
+ *
+ * If we're using a replication slot we reserve the xmin via that,
+ * otherwise via the walsender's PGXACT entry. We can only track the
+ * catalog xmin separately when using a slot, so we store the least of the
+ * two provided when not using a slot.
+ *
+ * XXX: It might make sense to generalize the ephemeral slot concept and
+ * always use the slot mechanism to handle the feedback xmin.
+ */
+ if (MyReplicationSlot != NULL) /* XXX: persistency configurable? */
+ PhysicalReplicationSlotNewXmin(feedbackXmin, feedbackCatalogXmin);
+ else
+ {
+ if (TransactionIdIsNormal(feedbackCatalogXmin)
+ && TransactionIdPrecedes(feedbackCatalogXmin, feedbackXmin))
+ MyPgXact->xmin = feedbackCatalogXmin;
+ else
+ MyPgXact->xmin = feedbackXmin;
+ }
+}
+
+/*
+ * Compute how long send/receive loops should sleep.
+ *
+ * If wal_sender_timeout is enabled we want to wake up in time to send
+ * keepalives and to abort the connection if wal_sender_timeout has been
+ * reached.
+ */
+static long
+WalSndComputeSleeptime(TimestampTz now)
+{
+ long sleeptime = 10000; /* 10 s */
+
+ if (wal_sender_timeout > 0 && last_reply_timestamp > 0)
+ {
+ TimestampTz wakeup_time;
+
+ /*
+ * At the latest stop sleeping once wal_sender_timeout has been
+ * reached.
+ */
+ wakeup_time = TimestampTzPlusMilliseconds(last_reply_timestamp,
+ wal_sender_timeout);
+
+ /*
+ * If no ping has been sent yet, wakeup when it's time to do so.
+ * WalSndKeepaliveIfNecessary() wants to send a keepalive once half of
+ * the timeout passed without a response.
+ */
+ if (!waiting_for_ping_response)
+ wakeup_time = TimestampTzPlusMilliseconds(last_reply_timestamp,
+ wal_sender_timeout / 2);
+
+ /* Compute relative time until wakeup. */
+ sleeptime = TimestampDifferenceMilliseconds(now, wakeup_time);
+ }
+
+ return sleeptime;
+}
+
+/*
+ * Check whether there have been responses by the client within
+ * wal_sender_timeout and shutdown if not. Using last_processing as the
+ * reference point avoids counting server-side stalls against the client.
+ * However, a long server-side stall can make WalSndKeepaliveIfNecessary()
+ * postdate last_processing by more than wal_sender_timeout. If that happens,
+ * the client must reply almost immediately to avoid a timeout. This rarely
+ * affects the default configuration, under which clients spontaneously send a
+ * message every standby_message_timeout = wal_sender_timeout/6 = 10s. We
+ * could eliminate that problem by recognizing timeout expiration at
+ * wal_sender_timeout/2 after the keepalive.
+ */
+static void
+WalSndCheckTimeOut(void)
+{
+ TimestampTz timeout;
+
+ /* don't bail out if we're doing something that doesn't require timeouts */
+ if (last_reply_timestamp <= 0)
+ return;
+
+ timeout = TimestampTzPlusMilliseconds(last_reply_timestamp,
+ wal_sender_timeout);
+
+ if (wal_sender_timeout > 0 && last_processing >= timeout)
+ {
+ /*
+ * Since typically expiration of replication timeout means
+ * communication problem, we don't send the error message to the
+ * standby.
+ */
+ ereport(COMMERROR,
+ (errmsg("terminating walsender process due to replication timeout")));
+
+ WalSndShutdown();
+ }
+}
+
+/* Main loop of walsender process that streams the WAL over Copy messages. */
+static void
+WalSndLoop(WalSndSendDataCallback send_data)
+{
+ /*
+ * Initialize the last reply timestamp. That enables timeout processing
+ * from hereon.
+ */
+ last_reply_timestamp = GetCurrentTimestamp();
+ waiting_for_ping_response = false;
+
+ /*
+ * Loop until we reach the end of this timeline or the client requests to
+ * stop streaming.
+ */
+ for (;;)
+ {
+ /* Clear any already-pending wakeups */
+ ResetLatch(MyLatch);
+
+ CHECK_FOR_INTERRUPTS();
+
+ /* Process any requests or signals received recently */
+ if (ConfigReloadPending)
+ {
+ ConfigReloadPending = false;
+ ProcessConfigFile(PGC_SIGHUP);
+ SyncRepInitConfig();
+ }
+
+ /* Check for input from the client */
+ ProcessRepliesIfAny();
+
+ /*
+ * If we have received CopyDone from the client, sent CopyDone
+ * ourselves, and the output buffer is empty, it's time to exit
+ * streaming.
+ */
+ if (streamingDoneReceiving && streamingDoneSending &&
+ !pq_is_send_pending())
+ break;
+
+ /*
+ * If we don't have any pending data in the output buffer, try to send
+ * some more. If there is some, we don't bother to call send_data
+ * again until we've flushed it ... but we'd better assume we are not
+ * caught up.
+ */
+ if (!pq_is_send_pending())
+ send_data();
+ else
+ WalSndCaughtUp = false;
+
+ /* Try to flush pending output to the client */
+ if (pq_flush_if_writable() != 0)
+ WalSndShutdown();
+
+ /* If nothing remains to be sent right now ... */
+ if (WalSndCaughtUp && !pq_is_send_pending())
+ {
+ /*
+ * If we're in catchup state, move to streaming. This is an
+ * important state change for users to know about, since before
+ * this point data loss might occur if the primary dies and we
+ * need to failover to the standby. The state change is also
+ * important for synchronous replication, since commits that
+ * started to wait at that point might wait for some time.
+ */
+ if (MyWalSnd->state == WALSNDSTATE_CATCHUP)
+ {
+ ereport(DEBUG1,
+ (errmsg("\"%s\" has now caught up with upstream server",
+ application_name)));
+ WalSndSetState(WALSNDSTATE_STREAMING);
+ }
+
+ /*
+ * When SIGUSR2 arrives, we send any outstanding logs up to the
+ * shutdown checkpoint record (i.e., the latest record), wait for
+ * them to be replicated to the standby, and exit. This may be a
+ * normal termination at shutdown, or a promotion, the walsender
+ * is not sure which.
+ */
+ if (got_SIGUSR2)
+ WalSndDone(send_data);
+ }
+
+ /* Check for replication timeout. */
+ WalSndCheckTimeOut();
+
+ /* Send keepalive if the time has come */
+ WalSndKeepaliveIfNecessary();
+
+ /*
+ * Block if we have unsent data. XXX For logical replication, let
+ * WalSndWaitForWal() handle any other blocking; idle receivers need
+ * its additional actions. For physical replication, also block if
+ * caught up; its send_data does not block.
+ */
+ if ((WalSndCaughtUp && send_data != XLogSendLogical &&
+ !streamingDoneSending) ||
+ pq_is_send_pending())
+ {
+ long sleeptime;
+ int wakeEvents;
+
+ wakeEvents = WL_LATCH_SET | WL_EXIT_ON_PM_DEATH | WL_TIMEOUT;
+
+ if (!streamingDoneReceiving)
+ wakeEvents |= WL_SOCKET_READABLE;
+
+ /*
+ * Use fresh timestamp, not last_processing, to reduce the chance
+ * of reaching wal_sender_timeout before sending a keepalive.
+ */
+ sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp());
+
+ if (pq_is_send_pending())
+ wakeEvents |= WL_SOCKET_WRITEABLE;
+
+ /* Sleep until something happens or we time out */
+ (void) WaitLatchOrSocket(MyLatch, wakeEvents,
+ MyProcPort->sock, sleeptime,
+ WAIT_EVENT_WAL_SENDER_MAIN);
+ }
+ }
+}
+
+/* Initialize a per-walsender data structure for this walsender process */
+static void
+InitWalSenderSlot(void)
+{
+ int i;
+
+ /*
+ * WalSndCtl should be set up already (we inherit this by fork() or
+ * EXEC_BACKEND mechanism from the postmaster).
+ */
+ Assert(WalSndCtl != NULL);
+ Assert(MyWalSnd == NULL);
+
+ /*
+ * Find a free walsender slot and reserve it. This must not fail due to
+ * the prior check for free WAL senders in InitProcess().
+ */
+ for (i = 0; i < max_wal_senders; i++)
+ {
+ WalSnd *walsnd = &WalSndCtl->walsnds[i];
+
+ SpinLockAcquire(&walsnd->mutex);
+
+ if (walsnd->pid != 0)
+ {
+ SpinLockRelease(&walsnd->mutex);
+ continue;
+ }
+ else
+ {
+ /*
+ * Found a free slot. Reserve it for us.
+ */
+ walsnd->pid = MyProcPid;
+ walsnd->state = WALSNDSTATE_STARTUP;
+ walsnd->sentPtr = InvalidXLogRecPtr;
+ walsnd->needreload = false;
+ walsnd->write = InvalidXLogRecPtr;
+ walsnd->flush = InvalidXLogRecPtr;
+ walsnd->apply = InvalidXLogRecPtr;
+ walsnd->writeLag = -1;
+ walsnd->flushLag = -1;
+ walsnd->applyLag = -1;
+ walsnd->sync_standby_priority = 0;
+ walsnd->latch = &MyProc->procLatch;
+ walsnd->replyTime = 0;
+ SpinLockRelease(&walsnd->mutex);
+ /* don't need the lock anymore */
+ MyWalSnd = (WalSnd *) walsnd;
+
+ break;
+ }
+ }
+
+ Assert(MyWalSnd != NULL);
+
+ /* Arrange to clean up at walsender exit */
+ on_shmem_exit(WalSndKill, 0);
+}
+
+/* Destroy the per-walsender data structure for this walsender process */
+static void
+WalSndKill(int code, Datum arg)
+{
+ WalSnd *walsnd = MyWalSnd;
+
+ Assert(walsnd != NULL);
+
+ MyWalSnd = NULL;
+
+ SpinLockAcquire(&walsnd->mutex);
+ /* clear latch while holding the spinlock, so it can safely be read */
+ walsnd->latch = NULL;
+ /* Mark WalSnd struct as no longer being in use. */
+ walsnd->pid = 0;
+ SpinLockRelease(&walsnd->mutex);
+}
+
+/* XLogReaderRoutine->segment_open callback */
+static void
+WalSndSegmentOpen(XLogReaderState *state, XLogSegNo nextSegNo,
+ TimeLineID *tli_p)
+{
+ char path[MAXPGPATH];
+
+ /*-------
+ * When reading from a historic timeline, and there is a timeline switch
+ * within this segment, read from the WAL segment belonging to the new
+ * timeline.
+ *
+ * For example, imagine that this server is currently on timeline 5, and
+ * we're streaming timeline 4. The switch from timeline 4 to 5 happened at
+ * 0/13002088. In pg_wal, we have these files:
+ *
+ * ...
+ * 000000040000000000000012
+ * 000000040000000000000013
+ * 000000050000000000000013
+ * 000000050000000000000014
+ * ...
+ *
+ * In this situation, when requested to send the WAL from segment 0x13, on
+ * timeline 4, we read the WAL from file 000000050000000000000013. Archive
+ * recovery prefers files from newer timelines, so if the segment was
+ * restored from the archive on this server, the file belonging to the old
+ * timeline, 000000040000000000000013, might not exist. Their contents are
+ * equal up to the switchpoint, because at a timeline switch, the used
+ * portion of the old segment is copied to the new file. -------
+ */
+ *tli_p = sendTimeLine;
+ if (sendTimeLineIsHistoric)
+ {
+ XLogSegNo endSegNo;
+
+ XLByteToSeg(sendTimeLineValidUpto, endSegNo, state->segcxt.ws_segsize);
+ if (nextSegNo == endSegNo)
+ *tli_p = sendTimeLineNextTLI;
+ }
+
+ XLogFilePath(path, *tli_p, nextSegNo, state->segcxt.ws_segsize);
+ state->seg.ws_file = BasicOpenFile(path, O_RDONLY | PG_BINARY);
+ if (state->seg.ws_file >= 0)
+ return;
+
+ /*
+ * If the file is not found, assume it's because the standby asked for a
+ * too old WAL segment that has already been removed or recycled.
+ */
+ if (errno == ENOENT)
+ {
+ char xlogfname[MAXFNAMELEN];
+ int save_errno = errno;
+
+ XLogFileName(xlogfname, *tli_p, nextSegNo, wal_segment_size);
+ errno = save_errno;
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("requested WAL segment %s has already been removed",
+ xlogfname)));
+ }
+ else
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\": %m",
+ path)));
+}
+
+/*
+ * Send out the WAL in its normal physical/stored form.
+ *
+ * Read up to MAX_SEND_SIZE bytes of WAL that's been flushed to disk,
+ * but not yet sent to the client, and buffer it in the libpq output
+ * buffer.
+ *
+ * If there is no unsent WAL remaining, WalSndCaughtUp is set to true,
+ * otherwise WalSndCaughtUp is set to false.
+ */
+static void
+XLogSendPhysical(void)
+{
+ XLogRecPtr SendRqstPtr;
+ XLogRecPtr startptr;
+ XLogRecPtr endptr;
+ Size nbytes;
+ XLogSegNo segno;
+ WALReadError errinfo;
+
+ /* If requested switch the WAL sender to the stopping state. */
+ if (got_STOPPING)
+ WalSndSetState(WALSNDSTATE_STOPPING);
+
+ if (streamingDoneSending)
+ {
+ WalSndCaughtUp = true;
+ return;
+ }
+
+ /* Figure out how far we can safely send the WAL. */
+ if (sendTimeLineIsHistoric)
+ {
+ /*
+ * Streaming an old timeline that's in this server's history, but is
+ * not the one we're currently inserting or replaying. It can be
+ * streamed up to the point where we switched off that timeline.
+ */
+ SendRqstPtr = sendTimeLineValidUpto;
+ }
+ else if (am_cascading_walsender)
+ {
+ /*
+ * Streaming the latest timeline on a standby.
+ *
+ * Attempt to send all WAL that has already been replayed, so that we
+ * know it's valid. If we're receiving WAL through streaming
+ * replication, it's also OK to send any WAL that has been received
+ * but not replayed.
+ *
+ * The timeline we're recovering from can change, or we can be
+ * promoted. In either case, the current timeline becomes historic. We
+ * need to detect that so that we don't try to stream past the point
+ * where we switched to another timeline. We check for promotion or
+ * timeline switch after calculating FlushPtr, to avoid a race
+ * condition: if the timeline becomes historic just after we checked
+ * that it was still current, it's still be OK to stream it up to the
+ * FlushPtr that was calculated before it became historic.
+ */
+ bool becameHistoric = false;
+
+ SendRqstPtr = GetStandbyFlushRecPtr();
+
+ if (!RecoveryInProgress())
+ {
+ /*
+ * We have been promoted. RecoveryInProgress() updated
+ * ThisTimeLineID to the new current timeline.
+ */
+ am_cascading_walsender = false;
+ becameHistoric = true;
+ }
+ else
+ {
+ /*
+ * Still a cascading standby. But is the timeline we're sending
+ * still the one recovery is recovering from? ThisTimeLineID was
+ * updated by the GetStandbyFlushRecPtr() call above.
+ */
+ if (sendTimeLine != ThisTimeLineID)
+ becameHistoric = true;
+ }
+
+ if (becameHistoric)
+ {
+ /*
+ * The timeline we were sending has become historic. Read the
+ * timeline history file of the new timeline to see where exactly
+ * we forked off from the timeline we were sending.
+ */
+ List *history;
+
+ history = readTimeLineHistory(ThisTimeLineID);
+ sendTimeLineValidUpto = tliSwitchPoint(sendTimeLine, history, &sendTimeLineNextTLI);
+
+ Assert(sendTimeLine < sendTimeLineNextTLI);
+ list_free_deep(history);
+
+ sendTimeLineIsHistoric = true;
+
+ SendRqstPtr = sendTimeLineValidUpto;
+ }
+ }
+ else
+ {
+ /*
+ * Streaming the current timeline on a master.
+ *
+ * Attempt to send all data that's already been written out and
+ * fsync'd to disk. We cannot go further than what's been written out
+ * given the current implementation of WALRead(). And in any case
+ * it's unsafe to send WAL that is not securely down to disk on the
+ * master: if the master subsequently crashes and restarts, standbys
+ * must not have applied any WAL that got lost on the master.
+ */
+ SendRqstPtr = GetFlushRecPtr();
+ }
+
+ /*
+ * Record the current system time as an approximation of the time at which
+ * this WAL location was written for the purposes of lag tracking.
+ *
+ * In theory we could make XLogFlush() record a time in shmem whenever WAL
+ * is flushed and we could get that time as well as the LSN when we call
+ * GetFlushRecPtr() above (and likewise for the cascading standby
+ * equivalent), but rather than putting any new code into the hot WAL path
+ * it seems good enough to capture the time here. We should reach this
+ * after XLogFlush() runs WalSndWakeupProcessRequests(), and although that
+ * may take some time, we read the WAL flush pointer and take the time
+ * very close to together here so that we'll get a later position if it is
+ * still moving.
+ *
+ * Because LagTrackerWrite ignores samples when the LSN hasn't advanced,
+ * this gives us a cheap approximation for the WAL flush time for this
+ * LSN.
+ *
+ * Note that the LSN is not necessarily the LSN for the data contained in
+ * the present message; it's the end of the WAL, which might be further
+ * ahead. All the lag tracking machinery cares about is finding out when
+ * that arbitrary LSN is eventually reported as written, flushed and
+ * applied, so that it can measure the elapsed time.
+ */
+ LagTrackerWrite(SendRqstPtr, GetCurrentTimestamp());
+
+ /*
+ * If this is a historic timeline and we've reached the point where we
+ * forked to the next timeline, stop streaming.
+ *
+ * Note: We might already have sent WAL > sendTimeLineValidUpto. The
+ * startup process will normally replay all WAL that has been received
+ * from the master, before promoting, but if the WAL streaming is
+ * terminated at a WAL page boundary, the valid portion of the timeline
+ * might end in the middle of a WAL record. We might've already sent the
+ * first half of that partial WAL record to the cascading standby, so that
+ * sentPtr > sendTimeLineValidUpto. That's OK; the cascading standby can't
+ * replay the partial WAL record either, so it can still follow our
+ * timeline switch.
+ */
+ if (sendTimeLineIsHistoric && sendTimeLineValidUpto <= sentPtr)
+ {
+ /* close the current file. */
+ if (xlogreader->seg.ws_file >= 0)
+ wal_segment_close(xlogreader);
+
+ /* Send CopyDone */
+ pq_putmessage_noblock('c', NULL, 0);
+ streamingDoneSending = true;
+
+ WalSndCaughtUp = true;
+
+ elog(DEBUG1, "walsender reached end of timeline at %X/%X (sent up to %X/%X)",
+ (uint32) (sendTimeLineValidUpto >> 32), (uint32) sendTimeLineValidUpto,
+ (uint32) (sentPtr >> 32), (uint32) sentPtr);
+ return;
+ }
+
+ /* Do we have any work to do? */
+ Assert(sentPtr <= SendRqstPtr);
+ if (SendRqstPtr <= sentPtr)
+ {
+ WalSndCaughtUp = true;
+ return;
+ }
+
+ /*
+ * Figure out how much to send in one message. If there's no more than
+ * MAX_SEND_SIZE bytes to send, send everything. Otherwise send
+ * MAX_SEND_SIZE bytes, but round back to logfile or page boundary.
+ *
+ * The rounding is not only for performance reasons. Walreceiver relies on
+ * the fact that we never split a WAL record across two messages. Since a
+ * long WAL record is split at page boundary into continuation records,
+ * page boundary is always a safe cut-off point. We also assume that
+ * SendRqstPtr never points to the middle of a WAL record.
+ */
+ startptr = sentPtr;
+ endptr = startptr;
+ endptr += MAX_SEND_SIZE;
+
+ /* if we went beyond SendRqstPtr, back off */
+ if (SendRqstPtr <= endptr)
+ {
+ endptr = SendRqstPtr;
+ if (sendTimeLineIsHistoric)
+ WalSndCaughtUp = false;
+ else
+ WalSndCaughtUp = true;
+ }
+ else
+ {
+ /* round down to page boundary. */
+ endptr -= (endptr % XLOG_BLCKSZ);
+ WalSndCaughtUp = false;
+ }
+
+ nbytes = endptr - startptr;
+ Assert(nbytes <= MAX_SEND_SIZE);
+
+ /*
+ * OK to read and send the slice.
+ */
+ resetStringInfo(&output_message);
+ pq_sendbyte(&output_message, 'w');
+
+ pq_sendint64(&output_message, startptr); /* dataStart */
+ pq_sendint64(&output_message, SendRqstPtr); /* walEnd */
+ pq_sendint64(&output_message, 0); /* sendtime, filled in last */
+
+ /*
+ * Read the log directly into the output buffer to avoid extra memcpy
+ * calls.
+ */
+ enlargeStringInfo(&output_message, nbytes);
+
+retry:
+ if (!WALRead(xlogreader,
+ &output_message.data[output_message.len],
+ startptr,
+ nbytes,
+ xlogreader->seg.ws_tli, /* Pass the current TLI because
+ * only WalSndSegmentOpen controls
+ * whether new TLI is needed. */
+ &errinfo))
+ WALReadRaiseError(&errinfo);
+
+ /* See logical_read_xlog_page(). */
+ XLByteToSeg(startptr, segno, xlogreader->segcxt.ws_segsize);
+ CheckXLogRemoved(segno, xlogreader->seg.ws_tli);
+
+ /*
+ * During recovery, the currently-open WAL file might be replaced with the
+ * file of the same name retrieved from archive. So we always need to
+ * check what we read was valid after reading into the buffer. If it's
+ * invalid, we try to open and read the file again.
+ */
+ if (am_cascading_walsender)
+ {
+ WalSnd *walsnd = MyWalSnd;
+ bool reload;
+
+ SpinLockAcquire(&walsnd->mutex);
+ reload = walsnd->needreload;
+ walsnd->needreload = false;
+ SpinLockRelease(&walsnd->mutex);
+
+ if (reload && xlogreader->seg.ws_file >= 0)
+ {
+ wal_segment_close(xlogreader);
+
+ goto retry;
+ }
+ }
+
+ output_message.len += nbytes;
+ output_message.data[output_message.len] = '\0';
+
+ /*
+ * Fill the send timestamp last, so that it is taken as late as possible.
+ */
+ resetStringInfo(&tmpbuf);
+ pq_sendint64(&tmpbuf, GetCurrentTimestamp());
+ memcpy(&output_message.data[1 + sizeof(int64) + sizeof(int64)],
+ tmpbuf.data, sizeof(int64));
+
+ pq_putmessage_noblock('d', output_message.data, output_message.len);
+
+ sentPtr = endptr;
+
+ /* Update shared memory status */
+ {
+ WalSnd *walsnd = MyWalSnd;
+
+ SpinLockAcquire(&walsnd->mutex);
+ walsnd->sentPtr = sentPtr;
+ SpinLockRelease(&walsnd->mutex);
+ }
+
+ /* Report progress of XLOG streaming in PS display */
+ if (update_process_title)
+ {
+ char activitymsg[50];
+
+ snprintf(activitymsg, sizeof(activitymsg), "streaming %X/%X",
+ (uint32) (sentPtr >> 32), (uint32) sentPtr);
+ set_ps_display(activitymsg);
+ }
+}
+
+/*
+ * Stream out logically decoded data.
+ */
+static void
+XLogSendLogical(void)
+{
+ XLogRecord *record;
+ char *errm;
+
+ /*
+ * We'll use the current flush point to determine whether we've caught up.
+ * This variable is static in order to cache it across calls. Caching is
+ * helpful because GetFlushRecPtr() needs to acquire a heavily-contended
+ * spinlock.
+ */
+ static XLogRecPtr flushPtr = InvalidXLogRecPtr;
+
+ /*
+ * Don't know whether we've caught up yet. We'll set WalSndCaughtUp to
+ * true in WalSndWaitForWal, if we're actually waiting. We also set to
+ * true if XLogReadRecord() had to stop reading but WalSndWaitForWal
+ * didn't wait - i.e. when we're shutting down.
+ */
+ WalSndCaughtUp = false;
+
+ record = XLogReadRecord(logical_decoding_ctx->reader, &errm);
+
+ /* xlog record was invalid */
+ if (errm != NULL)
+ elog(ERROR, "%s", errm);
+
+ if (record != NULL)
+ {
+ /*
+ * Note the lack of any call to LagTrackerWrite() which is handled by
+ * WalSndUpdateProgress which is called by output plugin through
+ * logical decoding write api.
+ */
+ LogicalDecodingProcessRecord(logical_decoding_ctx, logical_decoding_ctx->reader);
+
+ sentPtr = logical_decoding_ctx->reader->EndRecPtr;
+ }
+
+ /*
+ * If first time through in this session, initialize flushPtr. Otherwise,
+ * we only need to update flushPtr if EndRecPtr is past it.
+ */
+ if (flushPtr == InvalidXLogRecPtr)
+ flushPtr = GetFlushRecPtr();
+ else if (logical_decoding_ctx->reader->EndRecPtr >= flushPtr)
+ flushPtr = GetFlushRecPtr();
+
+ /* If EndRecPtr is still past our flushPtr, it means we caught up. */
+ if (logical_decoding_ctx->reader->EndRecPtr >= flushPtr)
+ WalSndCaughtUp = true;
+
+ /*
+ * If we're caught up and have been requested to stop, have WalSndLoop()
+ * terminate the connection in an orderly manner, after writing out all
+ * the pending data.
+ */
+ if (WalSndCaughtUp && got_STOPPING)
+ got_SIGUSR2 = true;
+
+ /* Update shared memory status */
+ {
+ WalSnd *walsnd = MyWalSnd;
+
+ SpinLockAcquire(&walsnd->mutex);
+ walsnd->sentPtr = sentPtr;
+ SpinLockRelease(&walsnd->mutex);
+ }
+}
+
+/*
+ * Shutdown if the sender is caught up.
+ *
+ * NB: This should only be called when the shutdown signal has been received
+ * from postmaster.
+ *
+ * Note that if we determine that there's still more data to send, this
+ * function will return control to the caller.
+ */
+static void
+WalSndDone(WalSndSendDataCallback send_data)
+{
+ XLogRecPtr replicatedPtr;
+
+ /* ... let's just be real sure we're caught up ... */
+ send_data();
+
+ /*
+ * To figure out whether all WAL has successfully been replicated, check
+ * flush location if valid, write otherwise. Tools like pg_receivewal will
+ * usually (unless in synchronous mode) return an invalid flush location.
+ */
+ replicatedPtr = XLogRecPtrIsInvalid(MyWalSnd->flush) ?
+ MyWalSnd->write : MyWalSnd->flush;
+
+ if (WalSndCaughtUp && sentPtr == replicatedPtr &&
+ !pq_is_send_pending())
+ {
+ QueryCompletion qc;
+
+ /* Inform the standby that XLOG streaming is done */
+ SetQueryCompletion(&qc, CMDTAG_COPY, 0);
+ EndCommand(&qc, DestRemote, false);
+ pq_flush();
+
+ proc_exit(0);
+ }
+ if (!waiting_for_ping_response)
+ WalSndKeepalive(true);
+}
+
+/*
+ * Returns the latest point in WAL that has been safely flushed to disk, and
+ * can be sent to the standby. This should only be called when in recovery,
+ * ie. we're streaming to a cascaded standby.
+ *
+ * As a side-effect, ThisTimeLineID is updated to the TLI of the last
+ * replayed WAL record.
+ */
+static XLogRecPtr
+GetStandbyFlushRecPtr(void)
+{
+ XLogRecPtr replayPtr;
+ TimeLineID replayTLI;
+ XLogRecPtr receivePtr;
+ TimeLineID receiveTLI;
+ XLogRecPtr result;
+
+ /*
+ * We can safely send what's already been replayed. Also, if walreceiver
+ * is streaming WAL from the same timeline, we can send anything that it
+ * has streamed, but hasn't been replayed yet.
+ */
+
+ receivePtr = GetWalRcvFlushRecPtr(NULL, &receiveTLI);
+ replayPtr = GetXLogReplayRecPtr(&replayTLI);
+
+ ThisTimeLineID = replayTLI;
+
+ result = replayPtr;
+ if (receiveTLI == ThisTimeLineID && receivePtr > replayPtr)
+ result = receivePtr;
+
+ return result;
+}
+
+/*
+ * Request walsenders to reload the currently-open WAL file
+ */
+void
+WalSndRqstFileReload(void)
+{
+ int i;
+
+ for (i = 0; i < max_wal_senders; i++)
+ {
+ WalSnd *walsnd = &WalSndCtl->walsnds[i];
+
+ SpinLockAcquire(&walsnd->mutex);
+ if (walsnd->pid == 0)
+ {
+ SpinLockRelease(&walsnd->mutex);
+ continue;
+ }
+ walsnd->needreload = true;
+ SpinLockRelease(&walsnd->mutex);
+ }
+}
+
+/*
+ * Handle PROCSIG_WALSND_INIT_STOPPING signal.
+ */
+void
+HandleWalSndInitStopping(void)
+{
+ Assert(am_walsender);
+
+ /*
+ * If replication has not yet started, die like with SIGTERM. If
+ * replication is active, only set a flag and wake up the main loop. It
+ * will send any outstanding WAL, wait for it to be replicated to the
+ * standby, and then exit gracefully.
+ */
+ if (!replication_active)
+ kill(MyProcPid, SIGTERM);
+ else
+ got_STOPPING = true;
+}
+
+/*
+ * SIGUSR2: set flag to do a last cycle and shut down afterwards. The WAL
+ * sender should already have been switched to WALSNDSTATE_STOPPING at
+ * this point.
+ */
+static void
+WalSndLastCycleHandler(SIGNAL_ARGS)
+{
+ int save_errno = errno;
+
+ got_SIGUSR2 = true;
+ SetLatch(MyLatch);
+
+ errno = save_errno;
+}
+
+/* Set up signal handlers */
+void
+WalSndSignals(void)
+{
+ /* Set up signal handlers */
+ pqsignal(SIGHUP, SignalHandlerForConfigReload);
+ pqsignal(SIGINT, StatementCancelHandler); /* query cancel */
+ pqsignal(SIGTERM, die); /* request shutdown */
+ pqsignal(SIGQUIT, quickdie); /* hard crash time */
+ InitializeTimeouts(); /* establishes SIGALRM handler */
+ pqsignal(SIGPIPE, SIG_IGN);
+ pqsignal(SIGUSR1, procsignal_sigusr1_handler);
+ pqsignal(SIGUSR2, WalSndLastCycleHandler); /* request a last cycle and
+ * shutdown */
+
+ /* Reset some signals that are accepted by postmaster but not here */
+ pqsignal(SIGCHLD, SIG_DFL);
+}
+
+/* Report shared-memory space needed by WalSndShmemInit */
+Size
+WalSndShmemSize(void)
+{
+ Size size = 0;
+
+ size = offsetof(WalSndCtlData, walsnds);
+ size = add_size(size, mul_size(max_wal_senders, sizeof(WalSnd)));
+
+ return size;
+}
+
+/* Allocate and initialize walsender-related shared memory */
+void
+WalSndShmemInit(void)
+{
+ bool found;
+ int i;
+
+ WalSndCtl = (WalSndCtlData *)
+ ShmemInitStruct("Wal Sender Ctl", WalSndShmemSize(), &found);
+
+ if (!found)
+ {
+ /* First time through, so initialize */
+ MemSet(WalSndCtl, 0, WalSndShmemSize());
+
+ for (i = 0; i < NUM_SYNC_REP_WAIT_MODE; i++)
+ SHMQueueInit(&(WalSndCtl->SyncRepQueue[i]));
+
+ for (i = 0; i < max_wal_senders; i++)
+ {
+ WalSnd *walsnd = &WalSndCtl->walsnds[i];
+
+ SpinLockInit(&walsnd->mutex);
+ }
+ }
+}
+
+/*
+ * Wake up all walsenders
+ *
+ * This will be called inside critical sections, so throwing an error is not
+ * advisable.
+ */
+void
+WalSndWakeup(void)
+{
+ int i;
+
+ for (i = 0; i < max_wal_senders; i++)
+ {
+ Latch *latch;
+ WalSnd *walsnd = &WalSndCtl->walsnds[i];
+
+ /*
+ * Get latch pointer with spinlock held, for the unlikely case that
+ * pointer reads aren't atomic (as they're 8 bytes).
+ */
+ SpinLockAcquire(&walsnd->mutex);
+ latch = walsnd->latch;
+ SpinLockRelease(&walsnd->mutex);
+
+ if (latch != NULL)
+ SetLatch(latch);
+ }
+}
+
+/*
+ * Signal all walsenders to move to stopping state.
+ *
+ * This will trigger walsenders to move to a state where no further WAL can be
+ * generated. See this file's header for details.
+ */
+void
+WalSndInitStopping(void)
+{
+ int i;
+
+ for (i = 0; i < max_wal_senders; i++)
+ {
+ WalSnd *walsnd = &WalSndCtl->walsnds[i];
+ pid_t pid;
+
+ SpinLockAcquire(&walsnd->mutex);
+ pid = walsnd->pid;
+ SpinLockRelease(&walsnd->mutex);
+
+ if (pid == 0)
+ continue;
+
+ SendProcSignal(pid, PROCSIG_WALSND_INIT_STOPPING, InvalidBackendId);
+ }
+}
+
+/*
+ * Wait that all the WAL senders have quit or reached the stopping state. This
+ * is used by the checkpointer to control when the shutdown checkpoint can
+ * safely be performed.
+ */
+void
+WalSndWaitStopping(void)
+{
+ for (;;)
+ {
+ int i;
+ bool all_stopped = true;
+
+ for (i = 0; i < max_wal_senders; i++)
+ {
+ WalSnd *walsnd = &WalSndCtl->walsnds[i];
+
+ SpinLockAcquire(&walsnd->mutex);
+
+ if (walsnd->pid == 0)
+ {
+ SpinLockRelease(&walsnd->mutex);
+ continue;
+ }
+
+ if (walsnd->state != WALSNDSTATE_STOPPING)
+ {
+ all_stopped = false;
+ SpinLockRelease(&walsnd->mutex);
+ break;
+ }
+ SpinLockRelease(&walsnd->mutex);
+ }
+
+ /* safe to leave if confirmation is done for all WAL senders */
+ if (all_stopped)
+ return;
+
+ pg_usleep(10000L); /* wait for 10 msec */
+ }
+}
+
+/* Set state for current walsender (only called in walsender) */
+void
+WalSndSetState(WalSndState state)
+{
+ WalSnd *walsnd = MyWalSnd;
+
+ Assert(am_walsender);
+
+ if (walsnd->state == state)
+ return;
+
+ SpinLockAcquire(&walsnd->mutex);
+ walsnd->state = state;
+ SpinLockRelease(&walsnd->mutex);
+}
+
+/*
+ * Return a string constant representing the state. This is used
+ * in system views, and should *not* be translated.
+ */
+static const char *
+WalSndGetStateString(WalSndState state)
+{
+ switch (state)
+ {
+ case WALSNDSTATE_STARTUP:
+ return "startup";
+ case WALSNDSTATE_BACKUP:
+ return "backup";
+ case WALSNDSTATE_CATCHUP:
+ return "catchup";
+ case WALSNDSTATE_STREAMING:
+ return "streaming";
+ case WALSNDSTATE_STOPPING:
+ return "stopping";
+ }
+ return "UNKNOWN";
+}
+
+static Interval *
+offset_to_interval(TimeOffset offset)
+{
+ Interval *result = palloc(sizeof(Interval));
+
+ result->month = 0;
+ result->day = 0;
+ result->time = offset;
+
+ return result;
+}
+
+/*
+ * Returns activity of walsenders, including pids and xlog locations sent to
+ * standby servers.
+ */
+Datum
+pg_stat_get_wal_senders(PG_FUNCTION_ARGS)
+{
+#define PG_STAT_GET_WAL_SENDERS_COLS 12
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ SyncRepStandbyData *sync_standbys;
+ int num_standbys;
+ int i;
+
+ /* check to see if caller supports us returning a tuplestore */
+ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("set-valued function called in context that cannot accept a set")));
+ if (!(rsinfo->allowedModes & SFRM_Materialize))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("materialize mode required, but it is not allowed in this context")));
+
+ /* Build a tuple descriptor for our result type */
+ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
+ oldcontext = MemoryContextSwitchTo(per_query_ctx);
+
+ tupstore = tuplestore_begin_heap(true, false, work_mem);
+ rsinfo->returnMode = SFRM_Materialize;
+ rsinfo->setResult = tupstore;
+ rsinfo->setDesc = tupdesc;
+
+ MemoryContextSwitchTo(oldcontext);
+
+ /*
+ * Get the currently active synchronous standbys. This could be out of
+ * date before we're done, but we'll use the data anyway.
+ */
+ num_standbys = SyncRepGetCandidateStandbys(&sync_standbys);
+
+ for (i = 0; i < max_wal_senders; i++)
+ {
+ WalSnd *walsnd = &WalSndCtl->walsnds[i];
+ XLogRecPtr sentPtr;
+ XLogRecPtr write;
+ XLogRecPtr flush;
+ XLogRecPtr apply;
+ TimeOffset writeLag;
+ TimeOffset flushLag;
+ TimeOffset applyLag;
+ int priority;
+ int pid;
+ WalSndState state;
+ TimestampTz replyTime;
+ bool is_sync_standby;
+ Datum values[PG_STAT_GET_WAL_SENDERS_COLS];
+ bool nulls[PG_STAT_GET_WAL_SENDERS_COLS];
+ int j;
+
+ /* Collect data from shared memory */
+ SpinLockAcquire(&walsnd->mutex);
+ if (walsnd->pid == 0)
+ {
+ SpinLockRelease(&walsnd->mutex);
+ continue;
+ }
+ pid = walsnd->pid;
+ sentPtr = walsnd->sentPtr;
+ state = walsnd->state;
+ write = walsnd->write;
+ flush = walsnd->flush;
+ apply = walsnd->apply;
+ writeLag = walsnd->writeLag;
+ flushLag = walsnd->flushLag;
+ applyLag = walsnd->applyLag;
+ priority = walsnd->sync_standby_priority;
+ replyTime = walsnd->replyTime;
+ SpinLockRelease(&walsnd->mutex);
+
+ /*
+ * Detect whether walsender is/was considered synchronous. We can
+ * provide some protection against stale data by checking the PID
+ * along with walsnd_index.
+ */
+ is_sync_standby = false;
+ for (j = 0; j < num_standbys; j++)
+ {
+ if (sync_standbys[j].walsnd_index == i &&
+ sync_standbys[j].pid == pid)
+ {
+ is_sync_standby = true;
+ break;
+ }
+ }
+
+ memset(nulls, 0, sizeof(nulls));
+ values[0] = Int32GetDatum(pid);
+
+ if (!is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS))
+ {
+ /*
+ * Only superusers and members of pg_read_all_stats can see
+ * details. Other users only get the pid value to know it's a
+ * walsender, but no details.
+ */
+ MemSet(&nulls[1], true, PG_STAT_GET_WAL_SENDERS_COLS - 1);
+ }
+ else
+ {
+ values[1] = CStringGetTextDatum(WalSndGetStateString(state));
+
+ if (XLogRecPtrIsInvalid(sentPtr))
+ nulls[2] = true;
+ values[2] = LSNGetDatum(sentPtr);
+
+ if (XLogRecPtrIsInvalid(write))
+ nulls[3] = true;
+ values[3] = LSNGetDatum(write);
+
+ if (XLogRecPtrIsInvalid(flush))
+ nulls[4] = true;
+ values[4] = LSNGetDatum(flush);
+
+ if (XLogRecPtrIsInvalid(apply))
+ nulls[5] = true;
+ values[5] = LSNGetDatum(apply);
+
+ /*
+ * Treat a standby such as a pg_basebackup background process
+ * which always returns an invalid flush location, as an
+ * asynchronous standby.
+ */
+ priority = XLogRecPtrIsInvalid(flush) ? 0 : priority;
+
+ if (writeLag < 0)
+ nulls[6] = true;
+ else
+ values[6] = IntervalPGetDatum(offset_to_interval(writeLag));
+
+ if (flushLag < 0)
+ nulls[7] = true;
+ else
+ values[7] = IntervalPGetDatum(offset_to_interval(flushLag));
+
+ if (applyLag < 0)
+ nulls[8] = true;
+ else
+ values[8] = IntervalPGetDatum(offset_to_interval(applyLag));
+
+ values[9] = Int32GetDatum(priority);
+
+ /*
+ * More easily understood version of standby state. This is purely
+ * informational.
+ *
+ * In quorum-based sync replication, the role of each standby
+ * listed in synchronous_standby_names can be changing very
+ * frequently. Any standbys considered as "sync" at one moment can
+ * be switched to "potential" ones at the next moment. So, it's
+ * basically useless to report "sync" or "potential" as their sync
+ * states. We report just "quorum" for them.
+ */
+ if (priority == 0)
+ values[10] = CStringGetTextDatum("async");
+ else if (is_sync_standby)
+ values[10] = SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY ?
+ CStringGetTextDatum("sync") : CStringGetTextDatum("quorum");
+ else
+ values[10] = CStringGetTextDatum("potential");
+
+ if (replyTime == 0)
+ nulls[11] = true;
+ else
+ values[11] = TimestampTzGetDatum(replyTime);
+ }
+
+ tuplestore_putvalues(tupstore, tupdesc, values, nulls);
+ }
+
+ /* clean up and return the tuplestore */
+ tuplestore_donestoring(tupstore);
+
+ return (Datum) 0;
+}
+
+/*
+ * Send a keepalive message to standby.
+ *
+ * If requestReply is set, the message requests the other party to send
+ * a message back to us, for heartbeat purposes. We also set a flag to
+ * let nearby code that we're waiting for that response, to avoid
+ * repeated requests.
+ */
+static void
+WalSndKeepalive(bool requestReply)
+{
+ elog(DEBUG2, "sending replication keepalive");
+
+ /* construct the message... */
+ resetStringInfo(&output_message);
+ pq_sendbyte(&output_message, 'k');
+ pq_sendint64(&output_message, sentPtr);
+ pq_sendint64(&output_message, GetCurrentTimestamp());
+ pq_sendbyte(&output_message, requestReply ? 1 : 0);
+
+ /* ... and send it wrapped in CopyData */
+ pq_putmessage_noblock('d', output_message.data, output_message.len);
+
+ /* Set local flag */
+ if (requestReply)
+ waiting_for_ping_response = true;
+}
+
+/*
+ * Send keepalive message if too much time has elapsed.
+ */
+static void
+WalSndKeepaliveIfNecessary(void)
+{
+ TimestampTz ping_time;
+
+ /*
+ * Don't send keepalive messages if timeouts are globally disabled or
+ * we're doing something not partaking in timeouts.
+ */
+ if (wal_sender_timeout <= 0 || last_reply_timestamp <= 0)
+ return;
+
+ if (waiting_for_ping_response)
+ return;
+
+ /*
+ * If half of wal_sender_timeout has lapsed without receiving any reply
+ * from the standby, send a keep-alive message to the standby requesting
+ * an immediate reply.
+ */
+ ping_time = TimestampTzPlusMilliseconds(last_reply_timestamp,
+ wal_sender_timeout / 2);
+ if (last_processing >= ping_time)
+ {
+ WalSndKeepalive(true);
+
+ /* Try to flush pending output to the client */
+ if (pq_flush_if_writable() != 0)
+ WalSndShutdown();
+ }
+}
+
+/*
+ * Record the end of the WAL and the time it was flushed locally, so that
+ * LagTrackerRead can compute the elapsed time (lag) when this WAL location is
+ * eventually reported to have been written, flushed and applied by the
+ * standby in a reply message.
+ */
+static void
+LagTrackerWrite(XLogRecPtr lsn, TimestampTz local_flush_time)
+{
+ bool buffer_full;
+ int new_write_head;
+ int i;
+
+ if (!am_walsender)
+ return;
+
+ /*
+ * If the lsn hasn't advanced since last time, then do nothing. This way
+ * we only record a new sample when new WAL has been written.
+ */
+ if (lag_tracker->last_lsn == lsn)
+ return;
+ lag_tracker->last_lsn = lsn;
+
+ /*
+ * If advancing the write head of the circular buffer would crash into any
+ * of the read heads, then the buffer is full. In other words, the
+ * slowest reader (presumably apply) is the one that controls the release
+ * of space.
+ */
+ new_write_head = (lag_tracker->write_head + 1) % LAG_TRACKER_BUFFER_SIZE;
+ buffer_full = false;
+ for (i = 0; i < NUM_SYNC_REP_WAIT_MODE; ++i)
+ {
+ if (new_write_head == lag_tracker->read_heads[i])
+ buffer_full = true;
+ }
+
+ /*
+ * If the buffer is full, for now we just rewind by one slot and overwrite
+ * the last sample, as a simple (if somewhat uneven) way to lower the
+ * sampling rate. There may be better adaptive compaction algorithms.
+ */
+ if (buffer_full)
+ {
+ new_write_head = lag_tracker->write_head;
+ if (lag_tracker->write_head > 0)
+ lag_tracker->write_head--;
+ else
+ lag_tracker->write_head = LAG_TRACKER_BUFFER_SIZE - 1;
+ }
+
+ /* Store a sample at the current write head position. */
+ lag_tracker->buffer[lag_tracker->write_head].lsn = lsn;
+ lag_tracker->buffer[lag_tracker->write_head].time = local_flush_time;
+ lag_tracker->write_head = new_write_head;
+}
+
+/*
+ * Find out how much time has elapsed between the moment WAL location 'lsn'
+ * (or the highest known earlier LSN) was flushed locally and the time 'now'.
+ * We have a separate read head for each of the reported LSN locations we
+ * receive in replies from standby; 'head' controls which read head is
+ * used. Whenever a read head crosses an LSN which was written into the
+ * lag buffer with LagTrackerWrite, we can use the associated timestamp to
+ * find out the time this LSN (or an earlier one) was flushed locally, and
+ * therefore compute the lag.
+ *
+ * Return -1 if no new sample data is available, and otherwise the elapsed
+ * time in microseconds.
+ */
+static TimeOffset
+LagTrackerRead(int head, XLogRecPtr lsn, TimestampTz now)
+{
+ TimestampTz time = 0;
+
+ /* Read all unread samples up to this LSN or end of buffer. */
+ while (lag_tracker->read_heads[head] != lag_tracker->write_head &&
+ lag_tracker->buffer[lag_tracker->read_heads[head]].lsn <= lsn)
+ {
+ time = lag_tracker->buffer[lag_tracker->read_heads[head]].time;
+ lag_tracker->last_read[head] =
+ lag_tracker->buffer[lag_tracker->read_heads[head]];
+ lag_tracker->read_heads[head] =
+ (lag_tracker->read_heads[head] + 1) % LAG_TRACKER_BUFFER_SIZE;
+ }
+
+ /*
+ * If the lag tracker is empty, that means the standby has processed
+ * everything we've ever sent so we should now clear 'last_read'. If we
+ * didn't do that, we'd risk using a stale and irrelevant sample for
+ * interpolation at the beginning of the next burst of WAL after a period
+ * of idleness.
+ */
+ if (lag_tracker->read_heads[head] == lag_tracker->write_head)
+ lag_tracker->last_read[head].time = 0;
+
+ if (time > now)
+ {
+ /* If the clock somehow went backwards, treat as not found. */
+ return -1;
+ }
+ else if (time == 0)
+ {
+ /*
+ * We didn't cross a time. If there is a future sample that we
+ * haven't reached yet, and we've already reached at least one sample,
+ * let's interpolate the local flushed time. This is mainly useful
+ * for reporting a completely stuck apply position as having
+ * increasing lag, since otherwise we'd have to wait for it to
+ * eventually start moving again and cross one of our samples before
+ * we can show the lag increasing.
+ */
+ if (lag_tracker->read_heads[head] == lag_tracker->write_head)
+ {
+ /* There are no future samples, so we can't interpolate. */
+ return -1;
+ }
+ else if (lag_tracker->last_read[head].time != 0)
+ {
+ /* We can interpolate between last_read and the next sample. */
+ double fraction;
+ WalTimeSample prev = lag_tracker->last_read[head];
+ WalTimeSample next = lag_tracker->buffer[lag_tracker->read_heads[head]];
+
+ if (lsn < prev.lsn)
+ {
+ /*
+ * Reported LSNs shouldn't normally go backwards, but it's
+ * possible when there is a timeline change. Treat as not
+ * found.
+ */
+ return -1;
+ }
+
+ Assert(prev.lsn < next.lsn);
+
+ if (prev.time > next.time)
+ {
+ /* If the clock somehow went backwards, treat as not found. */
+ return -1;
+ }
+
+ /* See how far we are between the previous and next samples. */
+ fraction =
+ (double) (lsn - prev.lsn) / (double) (next.lsn - prev.lsn);
+
+ /* Scale the local flush time proportionally. */
+ time = (TimestampTz)
+ ((double) prev.time + (next.time - prev.time) * fraction);
+ }
+ else
+ {
+ /*
+ * We have only a future sample, implying that we were entirely
+ * caught up but and now there is a new burst of WAL and the
+ * standby hasn't processed the first sample yet. Until the
+ * standby reaches the future sample the best we can do is report
+ * the hypothetical lag if that sample were to be replayed now.
+ */
+ time = lag_tracker->buffer[lag_tracker->read_heads[head]].time;
+ }
+ }
+
+ /* Return the elapsed time since local flush time in microseconds. */
+ Assert(time != 0);
+ return now - time;
+}