summaryrefslogtreecommitdiffstats
path: root/contrib/postgres_fdw
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/postgres_fdw')
-rw-r--r--contrib/postgres_fdw/.gitignore4
-rw-r--r--contrib/postgres_fdw/Makefile31
-rw-r--r--contrib/postgres_fdw/connection.c1673
-rw-r--r--contrib/postgres_fdw/deparse.c3613
-rw-r--r--contrib/postgres_fdw/expected/postgres_fdw.out10858
-rw-r--r--contrib/postgres_fdw/option.c437
-rw-r--r--contrib/postgres_fdw/postgres_fdw--1.0--1.1.sql20
-rw-r--r--contrib/postgres_fdw/postgres_fdw--1.0.sql18
-rw-r--r--contrib/postgres_fdw/postgres_fdw.c7558
-rw-r--r--contrib/postgres_fdw/postgres_fdw.control5
-rw-r--r--contrib/postgres_fdw/postgres_fdw.h238
-rw-r--r--contrib/postgres_fdw/shippable.c211
-rw-r--r--contrib/postgres_fdw/sql/postgres_fdw.sql3466
13 files changed, 28132 insertions, 0 deletions
diff --git a/contrib/postgres_fdw/.gitignore b/contrib/postgres_fdw/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/contrib/postgres_fdw/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/contrib/postgres_fdw/Makefile b/contrib/postgres_fdw/Makefile
new file mode 100644
index 0000000..c1b0cad
--- /dev/null
+++ b/contrib/postgres_fdw/Makefile
@@ -0,0 +1,31 @@
+# contrib/postgres_fdw/Makefile
+
+MODULE_big = postgres_fdw
+OBJS = \
+ $(WIN32RES) \
+ connection.o \
+ deparse.o \
+ option.o \
+ postgres_fdw.o \
+ shippable.o
+PGFILEDESC = "postgres_fdw - foreign data wrapper for PostgreSQL"
+
+PG_CPPFLAGS = -I$(libpq_srcdir)
+SHLIB_LINK_INTERNAL = $(libpq)
+
+EXTENSION = postgres_fdw
+DATA = postgres_fdw--1.0.sql postgres_fdw--1.0--1.1.sql
+
+REGRESS = postgres_fdw
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+SHLIB_PREREQS = submake-libpq
+subdir = contrib/postgres_fdw
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
new file mode 100644
index 0000000..3528441
--- /dev/null
+++ b/contrib/postgres_fdw/connection.c
@@ -0,0 +1,1673 @@
+/*-------------------------------------------------------------------------
+ *
+ * connection.c
+ * Connection management functions for postgres_fdw
+ *
+ * Portions Copyright (c) 2012-2021, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/postgres_fdw/connection.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/htup_details.h"
+#include "access/xact.h"
+#include "catalog/pg_user_mapping.h"
+#include "commands/defrem.h"
+#include "funcapi.h"
+#include "mb/pg_wchar.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "postgres_fdw.h"
+#include "storage/fd.h"
+#include "storage/latch.h"
+#include "utils/builtins.h"
+#include "utils/datetime.h"
+#include "utils/hsearch.h"
+#include "utils/inval.h"
+#include "utils/memutils.h"
+#include "utils/syscache.h"
+
+/*
+ * Connection cache hash table entry
+ *
+ * The lookup key in this hash table is the user mapping OID. We use just one
+ * connection per user mapping ID, which ensures that all the scans use the
+ * same snapshot during a query. Using the user mapping OID rather than
+ * the foreign server OID + user OID avoids creating multiple connections when
+ * the public user mapping applies to all user OIDs.
+ *
+ * The "conn" pointer can be NULL if we don't currently have a live connection.
+ * When we do have a connection, xact_depth tracks the current depth of
+ * transactions and subtransactions open on the remote side. We need to issue
+ * commands at the same nesting depth on the remote as we're executing at
+ * ourselves, so that rolling back a subtransaction will kill the right
+ * queries and not the wrong ones.
+ */
+typedef Oid ConnCacheKey;
+
+typedef struct ConnCacheEntry
+{
+ ConnCacheKey key; /* hash key (must be first) */
+ PGconn *conn; /* connection to foreign server, or NULL */
+ /* Remaining fields are invalid when conn is NULL: */
+ int xact_depth; /* 0 = no xact open, 1 = main xact open, 2 =
+ * one level of subxact open, etc */
+ bool have_prep_stmt; /* have we prepared any stmts in this xact? */
+ bool have_error; /* have any subxacts aborted in this xact? */
+ bool changing_xact_state; /* xact state change in process */
+ bool invalidated; /* true if reconnect is pending */
+ bool keep_connections; /* setting value of keep_connections
+ * server option */
+ Oid serverid; /* foreign server OID used to get server name */
+ uint32 server_hashvalue; /* hash value of foreign server OID */
+ uint32 mapping_hashvalue; /* hash value of user mapping OID */
+ PgFdwConnState state; /* extra per-connection state */
+} ConnCacheEntry;
+
+/*
+ * Connection cache (initialized on first use)
+ */
+static HTAB *ConnectionHash = NULL;
+
+/* for assigning cursor numbers and prepared statement numbers */
+static unsigned int cursor_number = 0;
+static unsigned int prep_stmt_number = 0;
+
+/* tracks whether any work is needed in callback functions */
+static bool xact_got_connection = false;
+
+/*
+ * SQL functions
+ */
+PG_FUNCTION_INFO_V1(postgres_fdw_get_connections);
+PG_FUNCTION_INFO_V1(postgres_fdw_disconnect);
+PG_FUNCTION_INFO_V1(postgres_fdw_disconnect_all);
+
+/* prototypes of private functions */
+static void make_new_connection(ConnCacheEntry *entry, UserMapping *user);
+static PGconn *connect_pg_server(ForeignServer *server, UserMapping *user);
+static void disconnect_pg_server(ConnCacheEntry *entry);
+static void check_conn_params(const char **keywords, const char **values, UserMapping *user);
+static void configure_remote_session(PGconn *conn);
+static void begin_remote_xact(ConnCacheEntry *entry);
+static void pgfdw_xact_callback(XactEvent event, void *arg);
+static void pgfdw_subxact_callback(SubXactEvent event,
+ SubTransactionId mySubid,
+ SubTransactionId parentSubid,
+ void *arg);
+static void pgfdw_inval_callback(Datum arg, int cacheid, uint32 hashvalue);
+static void pgfdw_reject_incomplete_xact_state_change(ConnCacheEntry *entry);
+static bool pgfdw_cancel_query(PGconn *conn);
+static bool pgfdw_exec_cleanup_query(PGconn *conn, const char *query,
+ bool ignore_errors);
+static bool pgfdw_get_cleanup_result(PGconn *conn, TimestampTz endtime,
+ PGresult **result);
+static bool UserMappingPasswordRequired(UserMapping *user);
+static bool disconnect_cached_connections(Oid serverid);
+
+/*
+ * Get a PGconn which can be used to execute queries on the remote PostgreSQL
+ * server with the user's authorization. A new connection is established
+ * if we don't already have a suitable one, and a transaction is opened at
+ * the right subtransaction nesting depth if we didn't do that already.
+ *
+ * will_prep_stmt must be true if caller intends to create any prepared
+ * statements. Since those don't go away automatically at transaction end
+ * (not even on error), we need this flag to cue manual cleanup.
+ *
+ * If state is not NULL, *state receives the per-connection state associated
+ * with the PGconn.
+ */
+PGconn *
+GetConnection(UserMapping *user, bool will_prep_stmt, PgFdwConnState **state)
+{
+ bool found;
+ bool retry = false;
+ ConnCacheEntry *entry;
+ ConnCacheKey key;
+ MemoryContext ccxt = CurrentMemoryContext;
+
+ /* First time through, initialize connection cache hashtable */
+ if (ConnectionHash == NULL)
+ {
+ HASHCTL ctl;
+
+ ctl.keysize = sizeof(ConnCacheKey);
+ ctl.entrysize = sizeof(ConnCacheEntry);
+ ConnectionHash = hash_create("postgres_fdw connections", 8,
+ &ctl,
+ HASH_ELEM | HASH_BLOBS);
+
+ /*
+ * Register some callback functions that manage connection cleanup.
+ * This should be done just once in each backend.
+ */
+ RegisterXactCallback(pgfdw_xact_callback, NULL);
+ RegisterSubXactCallback(pgfdw_subxact_callback, NULL);
+ CacheRegisterSyscacheCallback(FOREIGNSERVEROID,
+ pgfdw_inval_callback, (Datum) 0);
+ CacheRegisterSyscacheCallback(USERMAPPINGOID,
+ pgfdw_inval_callback, (Datum) 0);
+ }
+
+ /* Set flag that we did GetConnection during the current transaction */
+ xact_got_connection = true;
+
+ /* Create hash key for the entry. Assume no pad bytes in key struct */
+ key = user->umid;
+
+ /*
+ * Find or create cached entry for requested connection.
+ */
+ entry = hash_search(ConnectionHash, &key, HASH_ENTER, &found);
+ if (!found)
+ {
+ /*
+ * We need only clear "conn" here; remaining fields will be filled
+ * later when "conn" is set.
+ */
+ entry->conn = NULL;
+ }
+
+ /* Reject further use of connections which failed abort cleanup. */
+ pgfdw_reject_incomplete_xact_state_change(entry);
+
+ /*
+ * If the connection needs to be remade due to invalidation, disconnect as
+ * soon as we're out of all transactions.
+ */
+ if (entry->conn != NULL && entry->invalidated && entry->xact_depth == 0)
+ {
+ elog(DEBUG3, "closing connection %p for option changes to take effect",
+ entry->conn);
+ disconnect_pg_server(entry);
+ }
+
+ /*
+ * If cache entry doesn't have a connection, we have to establish a new
+ * connection. (If connect_pg_server throws an error, the cache entry
+ * will remain in a valid empty state, ie conn == NULL.)
+ */
+ if (entry->conn == NULL)
+ make_new_connection(entry, user);
+
+ /*
+ * We check the health of the cached connection here when using it. In
+ * cases where we're out of all transactions, if a broken connection is
+ * detected, we try to reestablish a new connection later.
+ */
+ PG_TRY();
+ {
+ /* Process a pending asynchronous request if any. */
+ if (entry->state.pendingAreq)
+ process_pending_request(entry->state.pendingAreq);
+ /* Start a new transaction or subtransaction if needed. */
+ begin_remote_xact(entry);
+ }
+ PG_CATCH();
+ {
+ MemoryContext ecxt = MemoryContextSwitchTo(ccxt);
+ ErrorData *errdata = CopyErrorData();
+
+ /*
+ * Determine whether to try to reestablish the connection.
+ *
+ * After a broken connection is detected in libpq, any error other
+ * than connection failure (e.g., out-of-memory) can be thrown
+ * somewhere between return from libpq and the expected ereport() call
+ * in pgfdw_report_error(). In this case, since PQstatus() indicates
+ * CONNECTION_BAD, checking only PQstatus() causes the false detection
+ * of connection failure. To avoid this, we also verify that the
+ * error's sqlstate is ERRCODE_CONNECTION_FAILURE. Note that also
+ * checking only the sqlstate can cause another false detection
+ * because pgfdw_report_error() may report ERRCODE_CONNECTION_FAILURE
+ * for any libpq-originated error condition.
+ */
+ if (errdata->sqlerrcode != ERRCODE_CONNECTION_FAILURE ||
+ PQstatus(entry->conn) != CONNECTION_BAD ||
+ entry->xact_depth > 0)
+ {
+ MemoryContextSwitchTo(ecxt);
+ PG_RE_THROW();
+ }
+
+ /* Clean up the error state */
+ FlushErrorState();
+ FreeErrorData(errdata);
+ errdata = NULL;
+
+ retry = true;
+ }
+ PG_END_TRY();
+
+ /*
+ * If a broken connection is detected, disconnect it, reestablish a new
+ * connection and retry a new remote transaction. If connection failure is
+ * reported again, we give up getting a connection.
+ */
+ if (retry)
+ {
+ Assert(entry->xact_depth == 0);
+
+ ereport(DEBUG3,
+ (errmsg_internal("could not start remote transaction on connection %p",
+ entry->conn)),
+ errdetail_internal("%s", pchomp(PQerrorMessage(entry->conn))));
+
+ elog(DEBUG3, "closing connection %p to reestablish a new one",
+ entry->conn);
+ disconnect_pg_server(entry);
+
+ if (entry->conn == NULL)
+ make_new_connection(entry, user);
+
+ begin_remote_xact(entry);
+ }
+
+ /* Remember if caller will prepare statements */
+ entry->have_prep_stmt |= will_prep_stmt;
+
+ /* If caller needs access to the per-connection state, return it. */
+ if (state)
+ *state = &entry->state;
+
+ return entry->conn;
+}
+
+/*
+ * Reset all transient state fields in the cached connection entry and
+ * establish new connection to the remote server.
+ */
+static void
+make_new_connection(ConnCacheEntry *entry, UserMapping *user)
+{
+ ForeignServer *server = GetForeignServer(user->serverid);
+ ListCell *lc;
+
+ Assert(entry->conn == NULL);
+
+ /* Reset all transient state fields, to be sure all are clean */
+ entry->xact_depth = 0;
+ entry->have_prep_stmt = false;
+ entry->have_error = false;
+ entry->changing_xact_state = false;
+ entry->invalidated = false;
+ entry->serverid = server->serverid;
+ entry->server_hashvalue =
+ GetSysCacheHashValue1(FOREIGNSERVEROID,
+ ObjectIdGetDatum(server->serverid));
+ entry->mapping_hashvalue =
+ GetSysCacheHashValue1(USERMAPPINGOID,
+ ObjectIdGetDatum(user->umid));
+ memset(&entry->state, 0, sizeof(entry->state));
+
+ /*
+ * Determine whether to keep the connection that we're about to make here
+ * open even after the transaction using it ends, so that the subsequent
+ * transactions can re-use it.
+ *
+ * It's enough to determine this only when making new connection because
+ * all the connections to the foreign server whose keep_connections option
+ * is changed will be closed and re-made later.
+ *
+ * By default, all the connections to any foreign servers are kept open.
+ */
+ entry->keep_connections = true;
+ foreach(lc, server->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "keep_connections") == 0)
+ entry->keep_connections = defGetBoolean(def);
+ }
+
+ /* Now try to make the connection */
+ entry->conn = connect_pg_server(server, user);
+
+ elog(DEBUG3, "new postgres_fdw connection %p for server \"%s\" (user mapping oid %u, userid %u)",
+ entry->conn, server->servername, user->umid, user->userid);
+}
+
+/*
+ * Connect to remote server using specified server and user mapping properties.
+ */
+static PGconn *
+connect_pg_server(ForeignServer *server, UserMapping *user)
+{
+ PGconn *volatile conn = NULL;
+
+ /*
+ * Use PG_TRY block to ensure closing connection on error.
+ */
+ PG_TRY();
+ {
+ const char **keywords;
+ const char **values;
+ int n;
+
+ /*
+ * Construct connection params from generic options of ForeignServer
+ * and UserMapping. (Some of them might not be libpq options, in
+ * which case we'll just waste a few array slots.) Add 3 extra slots
+ * for fallback_application_name, client_encoding, end marker.
+ */
+ n = list_length(server->options) + list_length(user->options) + 3;
+ keywords = (const char **) palloc(n * sizeof(char *));
+ values = (const char **) palloc(n * sizeof(char *));
+
+ n = 0;
+ n += ExtractConnectionOptions(server->options,
+ keywords + n, values + n);
+ n += ExtractConnectionOptions(user->options,
+ keywords + n, values + n);
+
+ /* Use "postgres_fdw" as fallback_application_name. */
+ keywords[n] = "fallback_application_name";
+ values[n] = "postgres_fdw";
+ n++;
+
+ /* Set client_encoding so that libpq can convert encoding properly. */
+ keywords[n] = "client_encoding";
+ values[n] = GetDatabaseEncodingName();
+ n++;
+
+ keywords[n] = values[n] = NULL;
+
+ /* verify the set of connection parameters */
+ check_conn_params(keywords, values, user);
+
+ /*
+ * We must obey fd.c's limit on non-virtual file descriptors. Assume
+ * that a PGconn represents one long-lived FD. (Doing this here also
+ * ensures that VFDs are closed if needed to make room.)
+ */
+ if (!AcquireExternalFD())
+ {
+#ifndef WIN32 /* can't write #if within ereport() macro */
+ ereport(ERROR,
+ (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
+ errmsg("could not connect to server \"%s\"",
+ server->servername),
+ errdetail("There are too many open files on the local server."),
+ errhint("Raise the server's max_files_per_process and/or \"ulimit -n\" limits.")));
+#else
+ ereport(ERROR,
+ (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
+ errmsg("could not connect to server \"%s\"",
+ server->servername),
+ errdetail("There are too many open files on the local server."),
+ errhint("Raise the server's max_files_per_process setting.")));
+#endif
+ }
+
+ /* OK to make connection */
+ conn = PQconnectdbParams(keywords, values, false);
+
+ if (!conn)
+ ReleaseExternalFD(); /* because the PG_CATCH block won't */
+
+ if (!conn || PQstatus(conn) != CONNECTION_OK)
+ ereport(ERROR,
+ (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
+ errmsg("could not connect to server \"%s\"",
+ server->servername),
+ errdetail_internal("%s", pchomp(PQerrorMessage(conn)))));
+
+ /*
+ * Check that non-superuser has used password to establish connection;
+ * otherwise, he's piggybacking on the postgres server's user
+ * identity. See also dblink_security_check() in contrib/dblink and
+ * check_conn_params.
+ */
+ if (!superuser_arg(user->userid) && UserMappingPasswordRequired(user) &&
+ !PQconnectionUsedPassword(conn))
+ ereport(ERROR,
+ (errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
+ errmsg("password is required"),
+ errdetail("Non-superuser cannot connect if the server does not request a password."),
+ errhint("Target server's authentication method must be changed or password_required=false set in the user mapping attributes.")));
+
+ /* Prepare new session for use */
+ configure_remote_session(conn);
+
+ pfree(keywords);
+ pfree(values);
+ }
+ PG_CATCH();
+ {
+ /* Release PGconn data structure if we managed to create one */
+ if (conn)
+ {
+ PQfinish(conn);
+ ReleaseExternalFD();
+ }
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ return conn;
+}
+
+/*
+ * Disconnect any open connection for a connection cache entry.
+ */
+static void
+disconnect_pg_server(ConnCacheEntry *entry)
+{
+ if (entry->conn != NULL)
+ {
+ PQfinish(entry->conn);
+ entry->conn = NULL;
+ ReleaseExternalFD();
+ }
+}
+
+/*
+ * Return true if the password_required is defined and false for this user
+ * mapping, otherwise false. The mapping has been pre-validated.
+ */
+static bool
+UserMappingPasswordRequired(UserMapping *user)
+{
+ ListCell *cell;
+
+ foreach(cell, user->options)
+ {
+ DefElem *def = (DefElem *) lfirst(cell);
+
+ if (strcmp(def->defname, "password_required") == 0)
+ return defGetBoolean(def);
+ }
+
+ return true;
+}
+
+/*
+ * For non-superusers, insist that the connstr specify a password. This
+ * prevents a password from being picked up from .pgpass, a service file, the
+ * environment, etc. We don't want the postgres user's passwords,
+ * certificates, etc to be accessible to non-superusers. (See also
+ * dblink_connstr_check in contrib/dblink.)
+ */
+static void
+check_conn_params(const char **keywords, const char **values, UserMapping *user)
+{
+ int i;
+
+ /* no check required if superuser */
+ if (superuser_arg(user->userid))
+ return;
+
+ /* ok if params contain a non-empty password */
+ for (i = 0; keywords[i] != NULL; i++)
+ {
+ if (strcmp(keywords[i], "password") == 0 && values[i][0] != '\0')
+ return;
+ }
+
+ /* ok if the superuser explicitly said so at user mapping creation time */
+ if (!UserMappingPasswordRequired(user))
+ return;
+
+ ereport(ERROR,
+ (errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
+ errmsg("password is required"),
+ errdetail("Non-superusers must provide a password in the user mapping.")));
+}
+
+/*
+ * Issue SET commands to make sure remote session is configured properly.
+ *
+ * We do this just once at connection, assuming nothing will change the
+ * values later. Since we'll never send volatile function calls to the
+ * remote, there shouldn't be any way to break this assumption from our end.
+ * It's possible to think of ways to break it at the remote end, eg making
+ * a foreign table point to a view that includes a set_config call ---
+ * but once you admit the possibility of a malicious view definition,
+ * there are any number of ways to break things.
+ */
+static void
+configure_remote_session(PGconn *conn)
+{
+ int remoteversion = PQserverVersion(conn);
+
+ /* Force the search path to contain only pg_catalog (see deparse.c) */
+ do_sql_command(conn, "SET search_path = pg_catalog");
+
+ /*
+ * Set remote timezone; this is basically just cosmetic, since all
+ * transmitted and returned timestamptzs should specify a zone explicitly
+ * anyway. However it makes the regression test outputs more predictable.
+ *
+ * We don't risk setting remote zone equal to ours, since the remote
+ * server might use a different timezone database. Instead, use UTC
+ * (quoted, because very old servers are picky about case).
+ */
+ do_sql_command(conn, "SET timezone = 'UTC'");
+
+ /*
+ * Set values needed to ensure unambiguous data output from remote. (This
+ * logic should match what pg_dump does. See also set_transmission_modes
+ * in postgres_fdw.c.)
+ */
+ do_sql_command(conn, "SET datestyle = ISO");
+ if (remoteversion >= 80400)
+ do_sql_command(conn, "SET intervalstyle = postgres");
+ if (remoteversion >= 90000)
+ do_sql_command(conn, "SET extra_float_digits = 3");
+ else
+ do_sql_command(conn, "SET extra_float_digits = 2");
+}
+
+/*
+ * Convenience subroutine to issue a non-data-returning SQL command to remote
+ */
+void
+do_sql_command(PGconn *conn, const char *sql)
+{
+ PGresult *res;
+
+ if (!PQsendQuery(conn, sql))
+ pgfdw_report_error(ERROR, NULL, conn, false, sql);
+ res = pgfdw_get_result(conn, sql);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(ERROR, res, conn, true, sql);
+ PQclear(res);
+}
+
+/*
+ * Start remote transaction or subtransaction, if needed.
+ *
+ * Note that we always use at least REPEATABLE READ in the remote session.
+ * This is so that, if a query initiates multiple scans of the same or
+ * different foreign tables, we will get snapshot-consistent results from
+ * those scans. A disadvantage is that we can't provide sane emulation of
+ * READ COMMITTED behavior --- it would be nice if we had some other way to
+ * control which remote queries share a snapshot.
+ */
+static void
+begin_remote_xact(ConnCacheEntry *entry)
+{
+ int curlevel = GetCurrentTransactionNestLevel();
+
+ /* Start main transaction if we haven't yet */
+ if (entry->xact_depth <= 0)
+ {
+ const char *sql;
+
+ elog(DEBUG3, "starting remote transaction on connection %p",
+ entry->conn);
+
+ if (IsolationIsSerializable())
+ sql = "START TRANSACTION ISOLATION LEVEL SERIALIZABLE";
+ else
+ sql = "START TRANSACTION ISOLATION LEVEL REPEATABLE READ";
+ entry->changing_xact_state = true;
+ do_sql_command(entry->conn, sql);
+ entry->xact_depth = 1;
+ entry->changing_xact_state = false;
+ }
+
+ /*
+ * If we're in a subtransaction, stack up savepoints to match our level.
+ * This ensures we can rollback just the desired effects when a
+ * subtransaction aborts.
+ */
+ while (entry->xact_depth < curlevel)
+ {
+ char sql[64];
+
+ snprintf(sql, sizeof(sql), "SAVEPOINT s%d", entry->xact_depth + 1);
+ entry->changing_xact_state = true;
+ do_sql_command(entry->conn, sql);
+ entry->xact_depth++;
+ entry->changing_xact_state = false;
+ }
+}
+
+/*
+ * Release connection reference count created by calling GetConnection.
+ */
+void
+ReleaseConnection(PGconn *conn)
+{
+ /*
+ * Currently, we don't actually track connection references because all
+ * cleanup is managed on a transaction or subtransaction basis instead. So
+ * there's nothing to do here.
+ */
+}
+
+/*
+ * Assign a "unique" number for a cursor.
+ *
+ * These really only need to be unique per connection within a transaction.
+ * For the moment we ignore the per-connection point and assign them across
+ * all connections in the transaction, but we ask for the connection to be
+ * supplied in case we want to refine that.
+ *
+ * Note that even if wraparound happens in a very long transaction, actual
+ * collisions are highly improbable; just be sure to use %u not %d to print.
+ */
+unsigned int
+GetCursorNumber(PGconn *conn)
+{
+ return ++cursor_number;
+}
+
+/*
+ * Assign a "unique" number for a prepared statement.
+ *
+ * This works much like GetCursorNumber, except that we never reset the counter
+ * within a session. That's because we can't be 100% sure we've gotten rid
+ * of all prepared statements on all connections, and it's not really worth
+ * increasing the risk of prepared-statement name collisions by resetting.
+ */
+unsigned int
+GetPrepStmtNumber(PGconn *conn)
+{
+ return ++prep_stmt_number;
+}
+
+/*
+ * Submit a query and wait for the result.
+ *
+ * This function is interruptible by signals.
+ *
+ * Caller is responsible for the error handling on the result.
+ */
+PGresult *
+pgfdw_exec_query(PGconn *conn, const char *query, PgFdwConnState *state)
+{
+ /* First, process a pending asynchronous request, if any. */
+ if (state && state->pendingAreq)
+ process_pending_request(state->pendingAreq);
+
+ /*
+ * Submit a query. Since we don't use non-blocking mode, this also can
+ * block. But its risk is relatively small, so we ignore that for now.
+ */
+ if (!PQsendQuery(conn, query))
+ pgfdw_report_error(ERROR, NULL, conn, false, query);
+
+ /* Wait for the result. */
+ return pgfdw_get_result(conn, query);
+}
+
+/*
+ * Wait for the result from a prior asynchronous execution function call.
+ *
+ * This function offers quick responsiveness by checking for any interruptions.
+ *
+ * This function emulates PQexec()'s behavior of returning the last result
+ * when there are many.
+ *
+ * Caller is responsible for the error handling on the result.
+ */
+PGresult *
+pgfdw_get_result(PGconn *conn, const char *query)
+{
+ PGresult *volatile last_res = NULL;
+
+ /* In what follows, do not leak any PGresults on an error. */
+ PG_TRY();
+ {
+ for (;;)
+ {
+ PGresult *res;
+
+ while (PQisBusy(conn))
+ {
+ int wc;
+
+ /* Sleep until there's something to do */
+ wc = WaitLatchOrSocket(MyLatch,
+ WL_LATCH_SET | WL_SOCKET_READABLE |
+ WL_EXIT_ON_PM_DEATH,
+ PQsocket(conn),
+ -1L, PG_WAIT_EXTENSION);
+ ResetLatch(MyLatch);
+
+ CHECK_FOR_INTERRUPTS();
+
+ /* Data available in socket? */
+ if (wc & WL_SOCKET_READABLE)
+ {
+ if (!PQconsumeInput(conn))
+ pgfdw_report_error(ERROR, NULL, conn, false, query);
+ }
+ }
+
+ res = PQgetResult(conn);
+ if (res == NULL)
+ break; /* query is complete */
+
+ PQclear(last_res);
+ last_res = res;
+ }
+ }
+ PG_CATCH();
+ {
+ PQclear(last_res);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ return last_res;
+}
+
+/*
+ * Report an error we got from the remote server.
+ *
+ * elevel: error level to use (typically ERROR, but might be less)
+ * res: PGresult containing the error
+ * conn: connection we did the query on
+ * clear: if true, PQclear the result (otherwise caller will handle it)
+ * sql: NULL, or text of remote command we tried to execute
+ *
+ * Note: callers that choose not to throw ERROR for a remote error are
+ * responsible for making sure that the associated ConnCacheEntry gets
+ * marked with have_error = true.
+ */
+void
+pgfdw_report_error(int elevel, PGresult *res, PGconn *conn,
+ bool clear, const char *sql)
+{
+ /* If requested, PGresult must be released before leaving this function. */
+ PG_TRY();
+ {
+ char *diag_sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE);
+ char *message_primary = PQresultErrorField(res, PG_DIAG_MESSAGE_PRIMARY);
+ char *message_detail = PQresultErrorField(res, PG_DIAG_MESSAGE_DETAIL);
+ char *message_hint = PQresultErrorField(res, PG_DIAG_MESSAGE_HINT);
+ char *message_context = PQresultErrorField(res, PG_DIAG_CONTEXT);
+ int sqlstate;
+
+ if (diag_sqlstate)
+ sqlstate = MAKE_SQLSTATE(diag_sqlstate[0],
+ diag_sqlstate[1],
+ diag_sqlstate[2],
+ diag_sqlstate[3],
+ diag_sqlstate[4]);
+ else
+ sqlstate = ERRCODE_CONNECTION_FAILURE;
+
+ /*
+ * If we don't get a message from the PGresult, try the PGconn. This
+ * is needed because for connection-level failures, PQexec may just
+ * return NULL, not a PGresult at all.
+ */
+ if (message_primary == NULL)
+ message_primary = pchomp(PQerrorMessage(conn));
+
+ ereport(elevel,
+ (errcode(sqlstate),
+ (message_primary != NULL && message_primary[0] != '\0') ?
+ errmsg_internal("%s", message_primary) :
+ errmsg("could not obtain message string for remote error"),
+ message_detail ? errdetail_internal("%s", message_detail) : 0,
+ message_hint ? errhint("%s", message_hint) : 0,
+ message_context ? errcontext("%s", message_context) : 0,
+ sql ? errcontext("remote SQL command: %s", sql) : 0));
+ }
+ PG_FINALLY();
+ {
+ if (clear)
+ PQclear(res);
+ }
+ PG_END_TRY();
+}
+
+/*
+ * pgfdw_xact_callback --- cleanup at main-transaction end.
+ *
+ * This runs just late enough that it must not enter user-defined code
+ * locally. (Entering such code on the remote side is fine. Its remote
+ * COMMIT TRANSACTION may run deferred triggers.)
+ */
+static void
+pgfdw_xact_callback(XactEvent event, void *arg)
+{
+ HASH_SEQ_STATUS scan;
+ ConnCacheEntry *entry;
+
+ /* Quick exit if no connections were touched in this transaction. */
+ if (!xact_got_connection)
+ return;
+
+ /*
+ * Scan all connection cache entries to find open remote transactions, and
+ * close them.
+ */
+ hash_seq_init(&scan, ConnectionHash);
+ while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+ {
+ PGresult *res;
+
+ /* Ignore cache entry if no open connection right now */
+ if (entry->conn == NULL)
+ continue;
+
+ /* If it has an open remote transaction, try to close it */
+ if (entry->xact_depth > 0)
+ {
+ bool abort_cleanup_failure = false;
+
+ elog(DEBUG3, "closing remote transaction on connection %p",
+ entry->conn);
+
+ switch (event)
+ {
+ case XACT_EVENT_PARALLEL_PRE_COMMIT:
+ case XACT_EVENT_PRE_COMMIT:
+
+ /*
+ * If abort cleanup previously failed for this connection,
+ * we can't issue any more commands against it.
+ */
+ pgfdw_reject_incomplete_xact_state_change(entry);
+
+ /* Commit all remote transactions during pre-commit */
+ entry->changing_xact_state = true;
+ do_sql_command(entry->conn, "COMMIT TRANSACTION");
+ entry->changing_xact_state = false;
+
+ /*
+ * If there were any errors in subtransactions, and we
+ * made prepared statements, do a DEALLOCATE ALL to make
+ * sure we get rid of all prepared statements. This is
+ * annoying and not terribly bulletproof, but it's
+ * probably not worth trying harder.
+ *
+ * DEALLOCATE ALL only exists in 8.3 and later, so this
+ * constrains how old a server postgres_fdw can
+ * communicate with. We intentionally ignore errors in
+ * the DEALLOCATE, so that we can hobble along to some
+ * extent with older servers (leaking prepared statements
+ * as we go; but we don't really support update operations
+ * pre-8.3 anyway).
+ */
+ if (entry->have_prep_stmt && entry->have_error)
+ {
+ res = PQexec(entry->conn, "DEALLOCATE ALL");
+ PQclear(res);
+ }
+ entry->have_prep_stmt = false;
+ entry->have_error = false;
+ break;
+ case XACT_EVENT_PRE_PREPARE:
+
+ /*
+ * We disallow any remote transactions, since it's not
+ * very reasonable to hold them open until the prepared
+ * transaction is committed. For the moment, throw error
+ * unconditionally; later we might allow read-only cases.
+ * Note that the error will cause us to come right back
+ * here with event == XACT_EVENT_ABORT, so we'll clean up
+ * the connection state at that point.
+ */
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot PREPARE a transaction that has operated on postgres_fdw foreign tables")));
+ break;
+ case XACT_EVENT_PARALLEL_COMMIT:
+ case XACT_EVENT_COMMIT:
+ case XACT_EVENT_PREPARE:
+ /* Pre-commit should have closed the open transaction */
+ elog(ERROR, "missed cleaning up connection during pre-commit");
+ break;
+ case XACT_EVENT_PARALLEL_ABORT:
+ case XACT_EVENT_ABORT:
+
+ /*
+ * Don't try to clean up the connection if we're already
+ * in error recursion trouble.
+ */
+ if (in_error_recursion_trouble())
+ entry->changing_xact_state = true;
+
+ /*
+ * If connection is already unsalvageable, don't touch it
+ * further.
+ */
+ if (entry->changing_xact_state)
+ break;
+
+ /*
+ * Mark this connection as in the process of changing
+ * transaction state.
+ */
+ entry->changing_xact_state = true;
+
+ /* Assume we might have lost track of prepared statements */
+ entry->have_error = true;
+
+ /*
+ * If a command has been submitted to the remote server by
+ * using an asynchronous execution function, the command
+ * might not have yet completed. Check to see if a
+ * command is still being processed by the remote server,
+ * and if so, request cancellation of the command.
+ */
+ if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE &&
+ !pgfdw_cancel_query(entry->conn))
+ {
+ /* Unable to cancel running query. */
+ abort_cleanup_failure = true;
+ }
+ else if (!pgfdw_exec_cleanup_query(entry->conn,
+ "ABORT TRANSACTION",
+ false))
+ {
+ /* Unable to abort remote transaction. */
+ abort_cleanup_failure = true;
+ }
+ else if (entry->have_prep_stmt && entry->have_error &&
+ !pgfdw_exec_cleanup_query(entry->conn,
+ "DEALLOCATE ALL",
+ true))
+ {
+ /* Trouble clearing prepared statements. */
+ abort_cleanup_failure = true;
+ }
+ else
+ {
+ entry->have_prep_stmt = false;
+ entry->have_error = false;
+
+ /*
+ * If pendingAreq of the per-connection state is not
+ * NULL, it means that an asynchronous fetch begun by
+ * fetch_more_data_begin() was not done successfully
+ * and thus the per-connection state was not reset in
+ * fetch_more_data(); in that case reset the
+ * per-connection state here.
+ */
+ if (entry->state.pendingAreq)
+ memset(&entry->state, 0, sizeof(entry->state));
+ }
+
+ /* Disarm changing_xact_state if it all worked. */
+ entry->changing_xact_state = abort_cleanup_failure;
+ break;
+ }
+ }
+
+ /* Reset state to show we're out of a transaction */
+ entry->xact_depth = 0;
+
+ /*
+ * If the connection isn't in a good idle state, it is marked as
+ * invalid or keep_connections option of its server is disabled, then
+ * discard it to recover. Next GetConnection will open a new
+ * connection.
+ */
+ if (PQstatus(entry->conn) != CONNECTION_OK ||
+ PQtransactionStatus(entry->conn) != PQTRANS_IDLE ||
+ entry->changing_xact_state ||
+ entry->invalidated ||
+ !entry->keep_connections)
+ {
+ elog(DEBUG3, "discarding connection %p", entry->conn);
+ disconnect_pg_server(entry);
+ }
+ }
+
+ /*
+ * Regardless of the event type, we can now mark ourselves as out of the
+ * transaction. (Note: if we are here during PRE_COMMIT or PRE_PREPARE,
+ * this saves a useless scan of the hashtable during COMMIT or PREPARE.)
+ */
+ xact_got_connection = false;
+
+ /* Also reset cursor numbering for next transaction */
+ cursor_number = 0;
+}
+
+/*
+ * pgfdw_subxact_callback --- cleanup at subtransaction end.
+ */
+static void
+pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
+ SubTransactionId parentSubid, void *arg)
+{
+ HASH_SEQ_STATUS scan;
+ ConnCacheEntry *entry;
+ int curlevel;
+
+ /* Nothing to do at subxact start, nor after commit. */
+ if (!(event == SUBXACT_EVENT_PRE_COMMIT_SUB ||
+ event == SUBXACT_EVENT_ABORT_SUB))
+ return;
+
+ /* Quick exit if no connections were touched in this transaction. */
+ if (!xact_got_connection)
+ return;
+
+ /*
+ * Scan all connection cache entries to find open remote subtransactions
+ * of the current level, and close them.
+ */
+ curlevel = GetCurrentTransactionNestLevel();
+ hash_seq_init(&scan, ConnectionHash);
+ while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+ {
+ char sql[100];
+
+ /*
+ * We only care about connections with open remote subtransactions of
+ * the current level.
+ */
+ if (entry->conn == NULL || entry->xact_depth < curlevel)
+ continue;
+
+ if (entry->xact_depth > curlevel)
+ elog(ERROR, "missed cleaning up remote subtransaction at level %d",
+ entry->xact_depth);
+
+ if (event == SUBXACT_EVENT_PRE_COMMIT_SUB)
+ {
+ /*
+ * If abort cleanup previously failed for this connection, we
+ * can't issue any more commands against it.
+ */
+ pgfdw_reject_incomplete_xact_state_change(entry);
+
+ /* Commit all remote subtransactions during pre-commit */
+ snprintf(sql, sizeof(sql), "RELEASE SAVEPOINT s%d", curlevel);
+ entry->changing_xact_state = true;
+ do_sql_command(entry->conn, sql);
+ entry->changing_xact_state = false;
+ }
+ else if (in_error_recursion_trouble())
+ {
+ /*
+ * Don't try to clean up the connection if we're already in error
+ * recursion trouble.
+ */
+ entry->changing_xact_state = true;
+ }
+ else if (!entry->changing_xact_state)
+ {
+ bool abort_cleanup_failure = false;
+
+ /* Remember that abort cleanup is in progress. */
+ entry->changing_xact_state = true;
+
+ /* Assume we might have lost track of prepared statements */
+ entry->have_error = true;
+
+ /*
+ * If a command has been submitted to the remote server by using
+ * an asynchronous execution function, the command might not have
+ * yet completed. Check to see if a command is still being
+ * processed by the remote server, and if so, request cancellation
+ * of the command.
+ */
+ if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE &&
+ !pgfdw_cancel_query(entry->conn))
+ abort_cleanup_failure = true;
+ else
+ {
+ /* Rollback all remote subtransactions during abort */
+ snprintf(sql, sizeof(sql),
+ "ROLLBACK TO SAVEPOINT s%d; RELEASE SAVEPOINT s%d",
+ curlevel, curlevel);
+ if (!pgfdw_exec_cleanup_query(entry->conn, sql, false))
+ abort_cleanup_failure = true;
+ else
+ {
+ /*
+ * If pendingAreq of the per-connection state is not NULL,
+ * it means that an asynchronous fetch begun by
+ * fetch_more_data_begin() was not done successfully and
+ * thus the per-connection state was not reset in
+ * fetch_more_data(); in that case reset the
+ * per-connection state here.
+ */
+ if (entry->state.pendingAreq)
+ memset(&entry->state, 0, sizeof(entry->state));
+ }
+ }
+
+ /* Disarm changing_xact_state if it all worked. */
+ entry->changing_xact_state = abort_cleanup_failure;
+ }
+
+ /* OK, we're outta that level of subtransaction */
+ entry->xact_depth--;
+ }
+}
+
+/*
+ * Connection invalidation callback function
+ *
+ * After a change to a pg_foreign_server or pg_user_mapping catalog entry,
+ * close connections depending on that entry immediately if current transaction
+ * has not used those connections yet. Otherwise, mark those connections as
+ * invalid and then make pgfdw_xact_callback() close them at the end of current
+ * transaction, since they cannot be closed in the midst of the transaction
+ * using them. Closed connections will be remade at the next opportunity if
+ * necessary.
+ *
+ * Although most cache invalidation callbacks blow away all the related stuff
+ * regardless of the given hashvalue, connections are expensive enough that
+ * it's worth trying to avoid that.
+ *
+ * NB: We could avoid unnecessary disconnection more strictly by examining
+ * individual option values, but it seems too much effort for the gain.
+ */
+static void
+pgfdw_inval_callback(Datum arg, int cacheid, uint32 hashvalue)
+{
+ HASH_SEQ_STATUS scan;
+ ConnCacheEntry *entry;
+
+ Assert(cacheid == FOREIGNSERVEROID || cacheid == USERMAPPINGOID);
+
+ /* ConnectionHash must exist already, if we're registered */
+ hash_seq_init(&scan, ConnectionHash);
+ while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+ {
+ /* Ignore invalid entries */
+ if (entry->conn == NULL)
+ continue;
+
+ /* hashvalue == 0 means a cache reset, must clear all state */
+ if (hashvalue == 0 ||
+ (cacheid == FOREIGNSERVEROID &&
+ entry->server_hashvalue == hashvalue) ||
+ (cacheid == USERMAPPINGOID &&
+ entry->mapping_hashvalue == hashvalue))
+ {
+ /*
+ * Close the connection immediately if it's not used yet in this
+ * transaction. Otherwise mark it as invalid so that
+ * pgfdw_xact_callback() can close it at the end of this
+ * transaction.
+ */
+ if (entry->xact_depth == 0)
+ {
+ elog(DEBUG3, "discarding connection %p", entry->conn);
+ disconnect_pg_server(entry);
+ }
+ else
+ entry->invalidated = true;
+ }
+ }
+}
+
+/*
+ * Raise an error if the given connection cache entry is marked as being
+ * in the middle of an xact state change. This should be called at which no
+ * such change is expected to be in progress; if one is found to be in
+ * progress, it means that we aborted in the middle of a previous state change
+ * and now don't know what the remote transaction state actually is.
+ * Such connections can't safely be further used. Re-establishing the
+ * connection would change the snapshot and roll back any writes already
+ * performed, so that's not an option, either. Thus, we must abort.
+ */
+static void
+pgfdw_reject_incomplete_xact_state_change(ConnCacheEntry *entry)
+{
+ ForeignServer *server;
+
+ /* nothing to do for inactive entries and entries of sane state */
+ if (entry->conn == NULL || !entry->changing_xact_state)
+ return;
+
+ /* make sure this entry is inactive */
+ disconnect_pg_server(entry);
+
+ /* find server name to be shown in the message below */
+ server = GetForeignServer(entry->serverid);
+
+ ereport(ERROR,
+ (errcode(ERRCODE_CONNECTION_EXCEPTION),
+ errmsg("connection to server \"%s\" was lost",
+ server->servername)));
+}
+
+/*
+ * Cancel the currently-in-progress query (whose query text we do not have)
+ * and ignore the result. Returns true if we successfully cancel the query
+ * and discard any pending result, and false if not.
+ *
+ * It's not a huge problem if we throw an ERROR here, but if we get into error
+ * recursion trouble, we'll end up slamming the connection shut, which will
+ * necessitate failing the entire toplevel transaction even if subtransactions
+ * were used. Try to use WARNING where we can.
+ *
+ * XXX: if the query was one sent by fetch_more_data_begin(), we could get the
+ * query text from the pendingAreq saved in the per-connection state, then
+ * report the query using it.
+ */
+static bool
+pgfdw_cancel_query(PGconn *conn)
+{
+ PGcancel *cancel;
+ char errbuf[256];
+ PGresult *result = NULL;
+ TimestampTz endtime;
+
+ /*
+ * If it takes too long to cancel the query and discard the result, assume
+ * the connection is dead.
+ */
+ endtime = TimestampTzPlusMilliseconds(GetCurrentTimestamp(), 30000);
+
+ /*
+ * Issue cancel request. Unfortunately, there's no good way to limit the
+ * amount of time that we might block inside PQgetCancel().
+ */
+ if ((cancel = PQgetCancel(conn)))
+ {
+ if (!PQcancel(cancel, errbuf, sizeof(errbuf)))
+ {
+ ereport(WARNING,
+ (errcode(ERRCODE_CONNECTION_FAILURE),
+ errmsg("could not send cancel request: %s",
+ errbuf)));
+ PQfreeCancel(cancel);
+ return false;
+ }
+ PQfreeCancel(cancel);
+ }
+
+ /* Get and discard the result of the query. */
+ if (pgfdw_get_cleanup_result(conn, endtime, &result))
+ return false;
+ PQclear(result);
+
+ return true;
+}
+
+/*
+ * Submit a query during (sub)abort cleanup and wait up to 30 seconds for the
+ * result. If the query is executed without error, the return value is true.
+ * If the query is executed successfully but returns an error, the return
+ * value is true if and only if ignore_errors is set. If the query can't be
+ * sent or times out, the return value is false.
+ *
+ * It's not a huge problem if we throw an ERROR here, but if we get into error
+ * recursion trouble, we'll end up slamming the connection shut, which will
+ * necessitate failing the entire toplevel transaction even if subtransactions
+ * were used. Try to use WARNING where we can.
+ */
+static bool
+pgfdw_exec_cleanup_query(PGconn *conn, const char *query, bool ignore_errors)
+{
+ PGresult *result = NULL;
+ TimestampTz endtime;
+
+ /*
+ * If it takes too long to execute a cleanup query, assume the connection
+ * is dead. It's fairly likely that this is why we aborted in the first
+ * place (e.g. statement timeout, user cancel), so the timeout shouldn't
+ * be too long.
+ */
+ endtime = TimestampTzPlusMilliseconds(GetCurrentTimestamp(), 30000);
+
+ /*
+ * Submit a query. Since we don't use non-blocking mode, this also can
+ * block. But its risk is relatively small, so we ignore that for now.
+ */
+ if (!PQsendQuery(conn, query))
+ {
+ pgfdw_report_error(WARNING, NULL, conn, false, query);
+ return false;
+ }
+
+ /* Get the result of the query. */
+ if (pgfdw_get_cleanup_result(conn, endtime, &result))
+ return false;
+
+ /* Issue a warning if not successful. */
+ if (PQresultStatus(result) != PGRES_COMMAND_OK)
+ {
+ pgfdw_report_error(WARNING, result, conn, true, query);
+ return ignore_errors;
+ }
+ PQclear(result);
+
+ return true;
+}
+
+/*
+ * Get, during abort cleanup, the result of a query that is in progress. This
+ * might be a query that is being interrupted by transaction abort, or it might
+ * be a query that was initiated as part of transaction abort to get the remote
+ * side back to the appropriate state.
+ *
+ * endtime is the time at which we should give up and assume the remote
+ * side is dead. Returns true if the timeout expired, otherwise false.
+ * Sets *result except in case of a timeout.
+ */
+static bool
+pgfdw_get_cleanup_result(PGconn *conn, TimestampTz endtime, PGresult **result)
+{
+ volatile bool timed_out = false;
+ PGresult *volatile last_res = NULL;
+
+ /* In what follows, do not leak any PGresults on an error. */
+ PG_TRY();
+ {
+ for (;;)
+ {
+ PGresult *res;
+
+ while (PQisBusy(conn))
+ {
+ int wc;
+ TimestampTz now = GetCurrentTimestamp();
+ long cur_timeout;
+
+ /* If timeout has expired, give up, else get sleep time. */
+ cur_timeout = TimestampDifferenceMilliseconds(now, endtime);
+ if (cur_timeout <= 0)
+ {
+ timed_out = true;
+ goto exit;
+ }
+
+ /* Sleep until there's something to do */
+ wc = WaitLatchOrSocket(MyLatch,
+ WL_LATCH_SET | WL_SOCKET_READABLE |
+ WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
+ PQsocket(conn),
+ cur_timeout, PG_WAIT_EXTENSION);
+ ResetLatch(MyLatch);
+
+ CHECK_FOR_INTERRUPTS();
+
+ /* Data available in socket? */
+ if (wc & WL_SOCKET_READABLE)
+ {
+ if (!PQconsumeInput(conn))
+ {
+ /* connection trouble; treat the same as a timeout */
+ timed_out = true;
+ goto exit;
+ }
+ }
+ }
+
+ res = PQgetResult(conn);
+ if (res == NULL)
+ break; /* query is complete */
+
+ PQclear(last_res);
+ last_res = res;
+ }
+exit: ;
+ }
+ PG_CATCH();
+ {
+ PQclear(last_res);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ if (timed_out)
+ PQclear(last_res);
+ else
+ *result = last_res;
+ return timed_out;
+}
+
+/*
+ * List active foreign server connections.
+ *
+ * This function takes no input parameter and returns setof record made of
+ * following values:
+ * - server_name - server name of active connection. In case the foreign server
+ * is dropped but still the connection is active, then the server name will
+ * be NULL in output.
+ * - valid - true/false representing whether the connection is valid or not.
+ * Note that the connections can get invalidated in pgfdw_inval_callback.
+ *
+ * No records are returned when there are no cached connections at all.
+ */
+Datum
+postgres_fdw_get_connections(PG_FUNCTION_ARGS)
+{
+#define POSTGRES_FDW_GET_CONNECTIONS_COLS 2
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ HASH_SEQ_STATUS scan;
+ ConnCacheEntry *entry;
+
+ /* check to see if caller supports us returning a tuplestore */
+ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("set-valued function called in context that cannot accept a set")));
+ if (!(rsinfo->allowedModes & SFRM_Materialize))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("materialize mode required, but it is not allowed in this context")));
+
+ /* Build a tuple descriptor for our result type */
+ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ /* Build tuplestore to hold the result rows */
+ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
+ oldcontext = MemoryContextSwitchTo(per_query_ctx);
+
+ tupstore = tuplestore_begin_heap(true, false, work_mem);
+ rsinfo->returnMode = SFRM_Materialize;
+ rsinfo->setResult = tupstore;
+ rsinfo->setDesc = tupdesc;
+
+ MemoryContextSwitchTo(oldcontext);
+
+ /* If cache doesn't exist, we return no records */
+ if (!ConnectionHash)
+ {
+ /* clean up and return the tuplestore */
+ tuplestore_donestoring(tupstore);
+
+ PG_RETURN_VOID();
+ }
+
+ hash_seq_init(&scan, ConnectionHash);
+ while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+ {
+ ForeignServer *server;
+ Datum values[POSTGRES_FDW_GET_CONNECTIONS_COLS];
+ bool nulls[POSTGRES_FDW_GET_CONNECTIONS_COLS];
+
+ /* We only look for open remote connections */
+ if (!entry->conn)
+ continue;
+
+ server = GetForeignServerExtended(entry->serverid, FSV_MISSING_OK);
+
+ MemSet(values, 0, sizeof(values));
+ MemSet(nulls, 0, sizeof(nulls));
+
+ /*
+ * The foreign server may have been dropped in current explicit
+ * transaction. It is not possible to drop the server from another
+ * session when the connection associated with it is in use in the
+ * current transaction, if tried so, the drop query in another session
+ * blocks until the current transaction finishes.
+ *
+ * Even though the server is dropped in the current transaction, the
+ * cache can still have associated active connection entry, say we
+ * call such connections dangling. Since we can not fetch the server
+ * name from system catalogs for dangling connections, instead we show
+ * NULL value for server name in output.
+ *
+ * We could have done better by storing the server name in the cache
+ * entry instead of server oid so that it could be used in the output.
+ * But the server name in each cache entry requires 64 bytes of
+ * memory, which is huge, when there are many cached connections and
+ * the use case i.e. dropping the foreign server within the explicit
+ * current transaction seems rare. So, we chose to show NULL value for
+ * server name in output.
+ *
+ * Such dangling connections get closed either in next use or at the
+ * end of current explicit transaction in pgfdw_xact_callback.
+ */
+ if (!server)
+ {
+ /*
+ * If the server has been dropped in the current explicit
+ * transaction, then this entry would have been invalidated in
+ * pgfdw_inval_callback at the end of drop server command. Note
+ * that this connection would not have been closed in
+ * pgfdw_inval_callback because it is still being used in the
+ * current explicit transaction. So, assert that here.
+ */
+ Assert(entry->conn && entry->xact_depth > 0 && entry->invalidated);
+
+ /* Show null, if no server name was found */
+ nulls[0] = true;
+ }
+ else
+ values[0] = CStringGetTextDatum(server->servername);
+
+ values[1] = BoolGetDatum(!entry->invalidated);
+
+ tuplestore_putvalues(tupstore, tupdesc, values, nulls);
+ }
+
+ /* clean up and return the tuplestore */
+ tuplestore_donestoring(tupstore);
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Disconnect the specified cached connections.
+ *
+ * This function discards the open connections that are established by
+ * postgres_fdw from the local session to the foreign server with
+ * the given name. Note that there can be multiple connections to
+ * the given server using different user mappings. If the connections
+ * are used in the current local transaction, they are not disconnected
+ * and warning messages are reported. This function returns true
+ * if it disconnects at least one connection, otherwise false. If no
+ * foreign server with the given name is found, an error is reported.
+ */
+Datum
+postgres_fdw_disconnect(PG_FUNCTION_ARGS)
+{
+ ForeignServer *server;
+ char *servername;
+
+ servername = text_to_cstring(PG_GETARG_TEXT_PP(0));
+ server = GetForeignServerByName(servername, false);
+
+ PG_RETURN_BOOL(disconnect_cached_connections(server->serverid));
+}
+
+/*
+ * Disconnect all the cached connections.
+ *
+ * This function discards all the open connections that are established by
+ * postgres_fdw from the local session to the foreign servers.
+ * If the connections are used in the current local transaction, they are
+ * not disconnected and warning messages are reported. This function
+ * returns true if it disconnects at least one connection, otherwise false.
+ */
+Datum
+postgres_fdw_disconnect_all(PG_FUNCTION_ARGS)
+{
+ PG_RETURN_BOOL(disconnect_cached_connections(InvalidOid));
+}
+
+/*
+ * Workhorse to disconnect cached connections.
+ *
+ * This function scans all the connection cache entries and disconnects
+ * the open connections whose foreign server OID matches with
+ * the specified one. If InvalidOid is specified, it disconnects all
+ * the cached connections.
+ *
+ * This function emits a warning for each connection that's used in
+ * the current transaction and doesn't close it. It returns true if
+ * it disconnects at least one connection, otherwise false.
+ *
+ * Note that this function disconnects even the connections that are
+ * established by other users in the same local session using different
+ * user mappings. This leads even non-superuser to be able to close
+ * the connections established by superusers in the same local session.
+ *
+ * XXX As of now we don't see any security risk doing this. But we should
+ * set some restrictions on that, for example, prevent non-superuser
+ * from closing the connections established by superusers even
+ * in the same session?
+ */
+static bool
+disconnect_cached_connections(Oid serverid)
+{
+ HASH_SEQ_STATUS scan;
+ ConnCacheEntry *entry;
+ bool all = !OidIsValid(serverid);
+ bool result = false;
+
+ /*
+ * Connection cache hashtable has not been initialized yet in this
+ * session, so return false.
+ */
+ if (!ConnectionHash)
+ return false;
+
+ hash_seq_init(&scan, ConnectionHash);
+ while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+ {
+ /* Ignore cache entry if no open connection right now. */
+ if (!entry->conn)
+ continue;
+
+ if (all || entry->serverid == serverid)
+ {
+ /*
+ * Emit a warning because the connection to close is used in the
+ * current transaction and cannot be disconnected right now.
+ */
+ if (entry->xact_depth > 0)
+ {
+ ForeignServer *server;
+
+ server = GetForeignServerExtended(entry->serverid,
+ FSV_MISSING_OK);
+
+ if (!server)
+ {
+ /*
+ * If the foreign server was dropped while its connection
+ * was used in the current transaction, the connection
+ * must have been marked as invalid by
+ * pgfdw_inval_callback at the end of DROP SERVER command.
+ */
+ Assert(entry->invalidated);
+
+ ereport(WARNING,
+ (errmsg("cannot close dropped server connection because it is still in use")));
+ }
+ else
+ ereport(WARNING,
+ (errmsg("cannot close connection for server \"%s\" because it is still in use",
+ server->servername)));
+ }
+ else
+ {
+ elog(DEBUG3, "discarding connection %p", entry->conn);
+ disconnect_pg_server(entry);
+ result = true;
+ }
+ }
+ }
+
+ return result;
+}
diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c
new file mode 100644
index 0000000..efaf387
--- /dev/null
+++ b/contrib/postgres_fdw/deparse.c
@@ -0,0 +1,3613 @@
+/*-------------------------------------------------------------------------
+ *
+ * deparse.c
+ * Query deparser for postgres_fdw
+ *
+ * This file includes functions that examine query WHERE clauses to see
+ * whether they're safe to send to the remote server for execution, as
+ * well as functions to construct the query text to be sent. The latter
+ * functionality is annoyingly duplicative of ruleutils.c, but there are
+ * enough special considerations that it seems best to keep this separate.
+ * One saving grace is that we only need deparse logic for node types that
+ * we consider safe to send.
+ *
+ * We assume that the remote session's search_path is exactly "pg_catalog",
+ * and thus we need schema-qualify all and only names outside pg_catalog.
+ *
+ * We do not consider that it is ever safe to send COLLATE expressions to
+ * the remote server: it might not have the same collation names we do.
+ * (Later we might consider it safe to send COLLATE "C", but even that would
+ * fail on old remote servers.) An expression is considered safe to send
+ * only if all operator/function input collations used in it are traceable to
+ * Var(s) of the foreign table. That implies that if the remote server gets
+ * a different answer than we do, the foreign table's columns are not marked
+ * with collations that match the remote table's columns, which we can
+ * consider to be user error.
+ *
+ * Portions Copyright (c) 2012-2021, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/postgres_fdw/deparse.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/htup_details.h"
+#include "access/sysattr.h"
+#include "access/table.h"
+#include "catalog/pg_aggregate.h"
+#include "catalog/pg_collation.h"
+#include "catalog/pg_namespace.h"
+#include "catalog/pg_operator.h"
+#include "catalog/pg_opfamily.h"
+#include "catalog/pg_proc.h"
+#include "catalog/pg_type.h"
+#include "commands/defrem.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "nodes/plannodes.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/prep.h"
+#include "optimizer/tlist.h"
+#include "parser/parsetree.h"
+#include "postgres_fdw.h"
+#include "utils/builtins.h"
+#include "utils/lsyscache.h"
+#include "utils/rel.h"
+#include "utils/syscache.h"
+#include "utils/typcache.h"
+#include "commands/tablecmds.h"
+
+/*
+ * Global context for foreign_expr_walker's search of an expression tree.
+ */
+typedef struct foreign_glob_cxt
+{
+ PlannerInfo *root; /* global planner state */
+ RelOptInfo *foreignrel; /* the foreign relation we are planning for */
+ Relids relids; /* relids of base relations in the underlying
+ * scan */
+} foreign_glob_cxt;
+
+/*
+ * Local (per-tree-level) context for foreign_expr_walker's search.
+ * This is concerned with identifying collations used in the expression.
+ */
+typedef enum
+{
+ FDW_COLLATE_NONE, /* expression is of a noncollatable type, or
+ * it has default collation that is not
+ * traceable to a foreign Var */
+ FDW_COLLATE_SAFE, /* collation derives from a foreign Var */
+ FDW_COLLATE_UNSAFE /* collation is non-default and derives from
+ * something other than a foreign Var */
+} FDWCollateState;
+
+typedef struct foreign_loc_cxt
+{
+ Oid collation; /* OID of current collation, if any */
+ FDWCollateState state; /* state of current collation choice */
+} foreign_loc_cxt;
+
+/*
+ * Context for deparseExpr
+ */
+typedef struct deparse_expr_cxt
+{
+ PlannerInfo *root; /* global planner state */
+ RelOptInfo *foreignrel; /* the foreign relation we are planning for */
+ RelOptInfo *scanrel; /* the underlying scan relation. Same as
+ * foreignrel, when that represents a join or
+ * a base relation. */
+ StringInfo buf; /* output buffer to append to */
+ List **params_list; /* exprs that will become remote Params */
+} deparse_expr_cxt;
+
+#define REL_ALIAS_PREFIX "r"
+/* Handy macro to add relation name qualification */
+#define ADD_REL_QUALIFIER(buf, varno) \
+ appendStringInfo((buf), "%s%d.", REL_ALIAS_PREFIX, (varno))
+#define SUBQUERY_REL_ALIAS_PREFIX "s"
+#define SUBQUERY_COL_ALIAS_PREFIX "c"
+
+/*
+ * Functions to determine whether an expression can be evaluated safely on
+ * remote server.
+ */
+static bool foreign_expr_walker(Node *node,
+ foreign_glob_cxt *glob_cxt,
+ foreign_loc_cxt *outer_cxt);
+static char *deparse_type_name(Oid type_oid, int32 typemod);
+
+/*
+ * Functions to construct string representation of a node tree.
+ */
+static void deparseTargetList(StringInfo buf,
+ RangeTblEntry *rte,
+ Index rtindex,
+ Relation rel,
+ bool is_returning,
+ Bitmapset *attrs_used,
+ bool qualify_col,
+ List **retrieved_attrs);
+static void deparseExplicitTargetList(List *tlist,
+ bool is_returning,
+ List **retrieved_attrs,
+ deparse_expr_cxt *context);
+static void deparseSubqueryTargetList(deparse_expr_cxt *context);
+static void deparseReturningList(StringInfo buf, RangeTblEntry *rte,
+ Index rtindex, Relation rel,
+ bool trig_after_row,
+ List *withCheckOptionList,
+ List *returningList,
+ List **retrieved_attrs);
+static void deparseColumnRef(StringInfo buf, int varno, int varattno,
+ RangeTblEntry *rte, bool qualify_col);
+static void deparseRelation(StringInfo buf, Relation rel);
+static void deparseExpr(Expr *expr, deparse_expr_cxt *context);
+static void deparseVar(Var *node, deparse_expr_cxt *context);
+static void deparseConst(Const *node, deparse_expr_cxt *context, int showtype);
+static void deparseParam(Param *node, deparse_expr_cxt *context);
+static void deparseSubscriptingRef(SubscriptingRef *node, deparse_expr_cxt *context);
+static void deparseFuncExpr(FuncExpr *node, deparse_expr_cxt *context);
+static void deparseOpExpr(OpExpr *node, deparse_expr_cxt *context);
+static void deparseOperatorName(StringInfo buf, Form_pg_operator opform);
+static void deparseDistinctExpr(DistinctExpr *node, deparse_expr_cxt *context);
+static void deparseScalarArrayOpExpr(ScalarArrayOpExpr *node,
+ deparse_expr_cxt *context);
+static void deparseRelabelType(RelabelType *node, deparse_expr_cxt *context);
+static void deparseBoolExpr(BoolExpr *node, deparse_expr_cxt *context);
+static void deparseNullTest(NullTest *node, deparse_expr_cxt *context);
+static void deparseArrayExpr(ArrayExpr *node, deparse_expr_cxt *context);
+static void printRemoteParam(int paramindex, Oid paramtype, int32 paramtypmod,
+ deparse_expr_cxt *context);
+static void printRemotePlaceholder(Oid paramtype, int32 paramtypmod,
+ deparse_expr_cxt *context);
+static void deparseSelectSql(List *tlist, bool is_subquery, List **retrieved_attrs,
+ deparse_expr_cxt *context);
+static void deparseLockingClause(deparse_expr_cxt *context);
+static void appendOrderByClause(List *pathkeys, bool has_final_sort,
+ deparse_expr_cxt *context);
+static void appendLimitClause(deparse_expr_cxt *context);
+static void appendConditions(List *exprs, deparse_expr_cxt *context);
+static void deparseFromExprForRel(StringInfo buf, PlannerInfo *root,
+ RelOptInfo *foreignrel, bool use_alias,
+ Index ignore_rel, List **ignore_conds,
+ List **params_list);
+static void deparseFromExpr(List *quals, deparse_expr_cxt *context);
+static void deparseRangeTblRef(StringInfo buf, PlannerInfo *root,
+ RelOptInfo *foreignrel, bool make_subquery,
+ Index ignore_rel, List **ignore_conds, List **params_list);
+static void deparseAggref(Aggref *node, deparse_expr_cxt *context);
+static void appendGroupByClause(List *tlist, deparse_expr_cxt *context);
+static void appendOrderBySuffix(Oid sortop, Oid sortcoltype, bool nulls_first,
+ deparse_expr_cxt *context);
+static void appendAggOrderBy(List *orderList, List *targetList,
+ deparse_expr_cxt *context);
+static void appendFunctionName(Oid funcid, deparse_expr_cxt *context);
+static Node *deparseSortGroupClause(Index ref, List *tlist, bool force_colno,
+ deparse_expr_cxt *context);
+
+/*
+ * Helper functions
+ */
+static bool is_subquery_var(Var *node, RelOptInfo *foreignrel,
+ int *relno, int *colno);
+static void get_relation_column_alias_ids(Var *node, RelOptInfo *foreignrel,
+ int *relno, int *colno);
+
+
+/*
+ * Examine each qual clause in input_conds, and classify them into two groups,
+ * which are returned as two lists:
+ * - remote_conds contains expressions that can be evaluated remotely
+ * - local_conds contains expressions that can't be evaluated remotely
+ */
+void
+classifyConditions(PlannerInfo *root,
+ RelOptInfo *baserel,
+ List *input_conds,
+ List **remote_conds,
+ List **local_conds)
+{
+ ListCell *lc;
+
+ *remote_conds = NIL;
+ *local_conds = NIL;
+
+ foreach(lc, input_conds)
+ {
+ RestrictInfo *ri = lfirst_node(RestrictInfo, lc);
+
+ if (is_foreign_expr(root, baserel, ri->clause))
+ *remote_conds = lappend(*remote_conds, ri);
+ else
+ *local_conds = lappend(*local_conds, ri);
+ }
+}
+
+/*
+ * Returns true if given expr is safe to evaluate on the foreign server.
+ */
+bool
+is_foreign_expr(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Expr *expr)
+{
+ foreign_glob_cxt glob_cxt;
+ foreign_loc_cxt loc_cxt;
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) (baserel->fdw_private);
+
+ /*
+ * Check that the expression consists of nodes that are safe to execute
+ * remotely.
+ */
+ glob_cxt.root = root;
+ glob_cxt.foreignrel = baserel;
+
+ /*
+ * For an upper relation, use relids from its underneath scan relation,
+ * because the upperrel's own relids currently aren't set to anything
+ * meaningful by the core code. For other relation, use their own relids.
+ */
+ if (IS_UPPER_REL(baserel))
+ glob_cxt.relids = fpinfo->outerrel->relids;
+ else
+ glob_cxt.relids = baserel->relids;
+ loc_cxt.collation = InvalidOid;
+ loc_cxt.state = FDW_COLLATE_NONE;
+ if (!foreign_expr_walker((Node *) expr, &glob_cxt, &loc_cxt))
+ return false;
+
+ /*
+ * If the expression has a valid collation that does not arise from a
+ * foreign var, the expression can not be sent over.
+ */
+ if (loc_cxt.state == FDW_COLLATE_UNSAFE)
+ return false;
+
+ /*
+ * An expression which includes any mutable functions can't be sent over
+ * because its result is not stable. For example, sending now() remote
+ * side could cause confusion from clock offsets. Future versions might
+ * be able to make this choice with more granularity. (We check this last
+ * because it requires a lot of expensive catalog lookups.)
+ */
+ if (contain_mutable_functions((Node *) expr))
+ return false;
+
+ /* OK to evaluate on the remote server */
+ return true;
+}
+
+/*
+ * Check if expression is safe to execute remotely, and return true if so.
+ *
+ * In addition, *outer_cxt is updated with collation information.
+ *
+ * We must check that the expression contains only node types we can deparse,
+ * that all types/functions/operators are safe to send (they are "shippable"),
+ * and that all collations used in the expression derive from Vars of the
+ * foreign table. Because of the latter, the logic is pretty close to
+ * assign_collations_walker() in parse_collate.c, though we can assume here
+ * that the given expression is valid. Note function mutability is not
+ * currently considered here.
+ */
+static bool
+foreign_expr_walker(Node *node,
+ foreign_glob_cxt *glob_cxt,
+ foreign_loc_cxt *outer_cxt)
+{
+ bool check_type = true;
+ PgFdwRelationInfo *fpinfo;
+ foreign_loc_cxt inner_cxt;
+ Oid collation;
+ FDWCollateState state;
+
+ /* Need do nothing for empty subexpressions */
+ if (node == NULL)
+ return true;
+
+ /* May need server info from baserel's fdw_private struct */
+ fpinfo = (PgFdwRelationInfo *) (glob_cxt->foreignrel->fdw_private);
+
+ /* Set up inner_cxt for possible recursion to child nodes */
+ inner_cxt.collation = InvalidOid;
+ inner_cxt.state = FDW_COLLATE_NONE;
+
+ switch (nodeTag(node))
+ {
+ case T_Var:
+ {
+ Var *var = (Var *) node;
+
+ /*
+ * If the Var is from the foreign table, we consider its
+ * collation (if any) safe to use. If it is from another
+ * table, we treat its collation the same way as we would a
+ * Param's collation, ie it's not safe for it to have a
+ * non-default collation.
+ */
+ if (bms_is_member(var->varno, glob_cxt->relids) &&
+ var->varlevelsup == 0)
+ {
+ /* Var belongs to foreign table */
+
+ /*
+ * System columns other than ctid should not be sent to
+ * the remote, since we don't make any effort to ensure
+ * that local and remote values match (tableoid, in
+ * particular, almost certainly doesn't match).
+ */
+ if (var->varattno < 0 &&
+ var->varattno != SelfItemPointerAttributeNumber)
+ return false;
+
+ /* Else check the collation */
+ collation = var->varcollid;
+ state = OidIsValid(collation) ? FDW_COLLATE_SAFE : FDW_COLLATE_NONE;
+ }
+ else
+ {
+ /* Var belongs to some other table */
+ collation = var->varcollid;
+ if (collation == InvalidOid ||
+ collation == DEFAULT_COLLATION_OID)
+ {
+ /*
+ * It's noncollatable, or it's safe to combine with a
+ * collatable foreign Var, so set state to NONE.
+ */
+ state = FDW_COLLATE_NONE;
+ }
+ else
+ {
+ /*
+ * Do not fail right away, since the Var might appear
+ * in a collation-insensitive context.
+ */
+ state = FDW_COLLATE_UNSAFE;
+ }
+ }
+ }
+ break;
+ case T_Const:
+ {
+ Const *c = (Const *) node;
+
+ /*
+ * If the constant has nondefault collation, either it's of a
+ * non-builtin type, or it reflects folding of a CollateExpr.
+ * It's unsafe to send to the remote unless it's used in a
+ * non-collation-sensitive context.
+ */
+ collation = c->constcollid;
+ if (collation == InvalidOid ||
+ collation == DEFAULT_COLLATION_OID)
+ state = FDW_COLLATE_NONE;
+ else
+ state = FDW_COLLATE_UNSAFE;
+ }
+ break;
+ case T_Param:
+ {
+ Param *p = (Param *) node;
+
+ /*
+ * If it's a MULTIEXPR Param, punt. We can't tell from here
+ * whether the referenced sublink/subplan contains any remote
+ * Vars; if it does, handling that is too complicated to
+ * consider supporting at present. Fortunately, MULTIEXPR
+ * Params are not reduced to plain PARAM_EXEC until the end of
+ * planning, so we can easily detect this case. (Normal
+ * PARAM_EXEC Params are safe to ship because their values
+ * come from somewhere else in the plan tree; but a MULTIEXPR
+ * references a sub-select elsewhere in the same targetlist,
+ * so we'd be on the hook to evaluate it somehow if we wanted
+ * to handle such cases as direct foreign updates.)
+ */
+ if (p->paramkind == PARAM_MULTIEXPR)
+ return false;
+
+ /*
+ * Collation rule is same as for Consts and non-foreign Vars.
+ */
+ collation = p->paramcollid;
+ if (collation == InvalidOid ||
+ collation == DEFAULT_COLLATION_OID)
+ state = FDW_COLLATE_NONE;
+ else
+ state = FDW_COLLATE_UNSAFE;
+ }
+ break;
+ case T_SubscriptingRef:
+ {
+ SubscriptingRef *sr = (SubscriptingRef *) node;
+
+ /* Assignment should not be in restrictions. */
+ if (sr->refassgnexpr != NULL)
+ return false;
+
+ /*
+ * Recurse into the remaining subexpressions. The container
+ * subscripts will not affect collation of the SubscriptingRef
+ * result, so do those first and reset inner_cxt afterwards.
+ */
+ if (!foreign_expr_walker((Node *) sr->refupperindexpr,
+ glob_cxt, &inner_cxt))
+ return false;
+ inner_cxt.collation = InvalidOid;
+ inner_cxt.state = FDW_COLLATE_NONE;
+ if (!foreign_expr_walker((Node *) sr->reflowerindexpr,
+ glob_cxt, &inner_cxt))
+ return false;
+ inner_cxt.collation = InvalidOid;
+ inner_cxt.state = FDW_COLLATE_NONE;
+ if (!foreign_expr_walker((Node *) sr->refexpr,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /*
+ * Container subscripting typically yields same collation as
+ * refexpr's, but in case it doesn't, use same logic as for
+ * function nodes.
+ */
+ collation = sr->refcollid;
+ if (collation == InvalidOid)
+ state = FDW_COLLATE_NONE;
+ else if (inner_cxt.state == FDW_COLLATE_SAFE &&
+ collation == inner_cxt.collation)
+ state = FDW_COLLATE_SAFE;
+ else if (collation == DEFAULT_COLLATION_OID)
+ state = FDW_COLLATE_NONE;
+ else
+ state = FDW_COLLATE_UNSAFE;
+ }
+ break;
+ case T_FuncExpr:
+ {
+ FuncExpr *fe = (FuncExpr *) node;
+
+ /*
+ * If function used by the expression is not shippable, it
+ * can't be sent to remote because it might have incompatible
+ * semantics on remote side.
+ */
+ if (!is_shippable(fe->funcid, ProcedureRelationId, fpinfo))
+ return false;
+
+ /*
+ * Recurse to input subexpressions.
+ */
+ if (!foreign_expr_walker((Node *) fe->args,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /*
+ * If function's input collation is not derived from a foreign
+ * Var, it can't be sent to remote.
+ */
+ if (fe->inputcollid == InvalidOid)
+ /* OK, inputs are all noncollatable */ ;
+ else if (inner_cxt.state != FDW_COLLATE_SAFE ||
+ fe->inputcollid != inner_cxt.collation)
+ return false;
+
+ /*
+ * Detect whether node is introducing a collation not derived
+ * from a foreign Var. (If so, we just mark it unsafe for now
+ * rather than immediately returning false, since the parent
+ * node might not care.)
+ */
+ collation = fe->funccollid;
+ if (collation == InvalidOid)
+ state = FDW_COLLATE_NONE;
+ else if (inner_cxt.state == FDW_COLLATE_SAFE &&
+ collation == inner_cxt.collation)
+ state = FDW_COLLATE_SAFE;
+ else if (collation == DEFAULT_COLLATION_OID)
+ state = FDW_COLLATE_NONE;
+ else
+ state = FDW_COLLATE_UNSAFE;
+ }
+ break;
+ case T_OpExpr:
+ case T_DistinctExpr: /* struct-equivalent to OpExpr */
+ {
+ OpExpr *oe = (OpExpr *) node;
+
+ /*
+ * Similarly, only shippable operators can be sent to remote.
+ * (If the operator is shippable, we assume its underlying
+ * function is too.)
+ */
+ if (!is_shippable(oe->opno, OperatorRelationId, fpinfo))
+ return false;
+
+ /*
+ * Recurse to input subexpressions.
+ */
+ if (!foreign_expr_walker((Node *) oe->args,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /*
+ * If operator's input collation is not derived from a foreign
+ * Var, it can't be sent to remote.
+ */
+ if (oe->inputcollid == InvalidOid)
+ /* OK, inputs are all noncollatable */ ;
+ else if (inner_cxt.state != FDW_COLLATE_SAFE ||
+ oe->inputcollid != inner_cxt.collation)
+ return false;
+
+ /* Result-collation handling is same as for functions */
+ collation = oe->opcollid;
+ if (collation == InvalidOid)
+ state = FDW_COLLATE_NONE;
+ else if (inner_cxt.state == FDW_COLLATE_SAFE &&
+ collation == inner_cxt.collation)
+ state = FDW_COLLATE_SAFE;
+ else if (collation == DEFAULT_COLLATION_OID)
+ state = FDW_COLLATE_NONE;
+ else
+ state = FDW_COLLATE_UNSAFE;
+ }
+ break;
+ case T_ScalarArrayOpExpr:
+ {
+ ScalarArrayOpExpr *oe = (ScalarArrayOpExpr *) node;
+
+ /*
+ * Again, only shippable operators can be sent to remote.
+ */
+ if (!is_shippable(oe->opno, OperatorRelationId, fpinfo))
+ return false;
+
+ /*
+ * Recurse to input subexpressions.
+ */
+ if (!foreign_expr_walker((Node *) oe->args,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /*
+ * If operator's input collation is not derived from a foreign
+ * Var, it can't be sent to remote.
+ */
+ if (oe->inputcollid == InvalidOid)
+ /* OK, inputs are all noncollatable */ ;
+ else if (inner_cxt.state != FDW_COLLATE_SAFE ||
+ oe->inputcollid != inner_cxt.collation)
+ return false;
+
+ /* Output is always boolean and so noncollatable. */
+ collation = InvalidOid;
+ state = FDW_COLLATE_NONE;
+ }
+ break;
+ case T_RelabelType:
+ {
+ RelabelType *r = (RelabelType *) node;
+
+ /*
+ * Recurse to input subexpression.
+ */
+ if (!foreign_expr_walker((Node *) r->arg,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /*
+ * RelabelType must not introduce a collation not derived from
+ * an input foreign Var (same logic as for a real function).
+ */
+ collation = r->resultcollid;
+ if (collation == InvalidOid)
+ state = FDW_COLLATE_NONE;
+ else if (inner_cxt.state == FDW_COLLATE_SAFE &&
+ collation == inner_cxt.collation)
+ state = FDW_COLLATE_SAFE;
+ else if (collation == DEFAULT_COLLATION_OID)
+ state = FDW_COLLATE_NONE;
+ else
+ state = FDW_COLLATE_UNSAFE;
+ }
+ break;
+ case T_BoolExpr:
+ {
+ BoolExpr *b = (BoolExpr *) node;
+
+ /*
+ * Recurse to input subexpressions.
+ */
+ if (!foreign_expr_walker((Node *) b->args,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /* Output is always boolean and so noncollatable. */
+ collation = InvalidOid;
+ state = FDW_COLLATE_NONE;
+ }
+ break;
+ case T_NullTest:
+ {
+ NullTest *nt = (NullTest *) node;
+
+ /*
+ * Recurse to input subexpressions.
+ */
+ if (!foreign_expr_walker((Node *) nt->arg,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /* Output is always boolean and so noncollatable. */
+ collation = InvalidOid;
+ state = FDW_COLLATE_NONE;
+ }
+ break;
+ case T_ArrayExpr:
+ {
+ ArrayExpr *a = (ArrayExpr *) node;
+
+ /*
+ * Recurse to input subexpressions.
+ */
+ if (!foreign_expr_walker((Node *) a->elements,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /*
+ * ArrayExpr must not introduce a collation not derived from
+ * an input foreign Var (same logic as for a function).
+ */
+ collation = a->array_collid;
+ if (collation == InvalidOid)
+ state = FDW_COLLATE_NONE;
+ else if (inner_cxt.state == FDW_COLLATE_SAFE &&
+ collation == inner_cxt.collation)
+ state = FDW_COLLATE_SAFE;
+ else if (collation == DEFAULT_COLLATION_OID)
+ state = FDW_COLLATE_NONE;
+ else
+ state = FDW_COLLATE_UNSAFE;
+ }
+ break;
+ case T_List:
+ {
+ List *l = (List *) node;
+ ListCell *lc;
+
+ /*
+ * Recurse to component subexpressions.
+ */
+ foreach(lc, l)
+ {
+ if (!foreign_expr_walker((Node *) lfirst(lc),
+ glob_cxt, &inner_cxt))
+ return false;
+ }
+
+ /*
+ * When processing a list, collation state just bubbles up
+ * from the list elements.
+ */
+ collation = inner_cxt.collation;
+ state = inner_cxt.state;
+
+ /* Don't apply exprType() to the list. */
+ check_type = false;
+ }
+ break;
+ case T_Aggref:
+ {
+ Aggref *agg = (Aggref *) node;
+ ListCell *lc;
+
+ /* Not safe to pushdown when not in grouping context */
+ if (!IS_UPPER_REL(glob_cxt->foreignrel))
+ return false;
+
+ /* Only non-split aggregates are pushable. */
+ if (agg->aggsplit != AGGSPLIT_SIMPLE)
+ return false;
+
+ /* As usual, it must be shippable. */
+ if (!is_shippable(agg->aggfnoid, ProcedureRelationId, fpinfo))
+ return false;
+
+ /*
+ * Recurse to input args. aggdirectargs, aggorder and
+ * aggdistinct are all present in args, so no need to check
+ * their shippability explicitly.
+ */
+ foreach(lc, agg->args)
+ {
+ Node *n = (Node *) lfirst(lc);
+
+ /* If TargetEntry, extract the expression from it */
+ if (IsA(n, TargetEntry))
+ {
+ TargetEntry *tle = (TargetEntry *) n;
+
+ n = (Node *) tle->expr;
+ }
+
+ if (!foreign_expr_walker(n, glob_cxt, &inner_cxt))
+ return false;
+ }
+
+ /*
+ * For aggorder elements, check whether the sort operator, if
+ * specified, is shippable or not.
+ */
+ if (agg->aggorder)
+ {
+ ListCell *lc;
+
+ foreach(lc, agg->aggorder)
+ {
+ SortGroupClause *srt = (SortGroupClause *) lfirst(lc);
+ Oid sortcoltype;
+ TypeCacheEntry *typentry;
+ TargetEntry *tle;
+
+ tle = get_sortgroupref_tle(srt->tleSortGroupRef,
+ agg->args);
+ sortcoltype = exprType((Node *) tle->expr);
+ typentry = lookup_type_cache(sortcoltype,
+ TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);
+ /* Check shippability of non-default sort operator. */
+ if (srt->sortop != typentry->lt_opr &&
+ srt->sortop != typentry->gt_opr &&
+ !is_shippable(srt->sortop, OperatorRelationId,
+ fpinfo))
+ return false;
+ }
+ }
+
+ /* Check aggregate filter */
+ if (!foreign_expr_walker((Node *) agg->aggfilter,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /*
+ * If aggregate's input collation is not derived from a
+ * foreign Var, it can't be sent to remote.
+ */
+ if (agg->inputcollid == InvalidOid)
+ /* OK, inputs are all noncollatable */ ;
+ else if (inner_cxt.state != FDW_COLLATE_SAFE ||
+ agg->inputcollid != inner_cxt.collation)
+ return false;
+
+ /*
+ * Detect whether node is introducing a collation not derived
+ * from a foreign Var. (If so, we just mark it unsafe for now
+ * rather than immediately returning false, since the parent
+ * node might not care.)
+ */
+ collation = agg->aggcollid;
+ if (collation == InvalidOid)
+ state = FDW_COLLATE_NONE;
+ else if (inner_cxt.state == FDW_COLLATE_SAFE &&
+ collation == inner_cxt.collation)
+ state = FDW_COLLATE_SAFE;
+ else if (collation == DEFAULT_COLLATION_OID)
+ state = FDW_COLLATE_NONE;
+ else
+ state = FDW_COLLATE_UNSAFE;
+ }
+ break;
+ default:
+
+ /*
+ * If it's anything else, assume it's unsafe. This list can be
+ * expanded later, but don't forget to add deparse support below.
+ */
+ return false;
+ }
+
+ /*
+ * If result type of given expression is not shippable, it can't be sent
+ * to remote because it might have incompatible semantics on remote side.
+ */
+ if (check_type && !is_shippable(exprType(node), TypeRelationId, fpinfo))
+ return false;
+
+ /*
+ * Now, merge my collation information into my parent's state.
+ */
+ if (state > outer_cxt->state)
+ {
+ /* Override previous parent state */
+ outer_cxt->collation = collation;
+ outer_cxt->state = state;
+ }
+ else if (state == outer_cxt->state)
+ {
+ /* Merge, or detect error if there's a collation conflict */
+ switch (state)
+ {
+ case FDW_COLLATE_NONE:
+ /* Nothing + nothing is still nothing */
+ break;
+ case FDW_COLLATE_SAFE:
+ if (collation != outer_cxt->collation)
+ {
+ /*
+ * Non-default collation always beats default.
+ */
+ if (outer_cxt->collation == DEFAULT_COLLATION_OID)
+ {
+ /* Override previous parent state */
+ outer_cxt->collation = collation;
+ }
+ else if (collation != DEFAULT_COLLATION_OID)
+ {
+ /*
+ * Conflict; show state as indeterminate. We don't
+ * want to "return false" right away, since parent
+ * node might not care about collation.
+ */
+ outer_cxt->state = FDW_COLLATE_UNSAFE;
+ }
+ }
+ break;
+ case FDW_COLLATE_UNSAFE:
+ /* We're still conflicted ... */
+ break;
+ }
+ }
+
+ /* It looks OK */
+ return true;
+}
+
+/*
+ * Returns true if given expr is something we'd have to send the value of
+ * to the foreign server.
+ *
+ * This should return true when the expression is a shippable node that
+ * deparseExpr would add to context->params_list. Note that we don't care
+ * if the expression *contains* such a node, only whether one appears at top
+ * level. We need this to detect cases where setrefs.c would recognize a
+ * false match between an fdw_exprs item (which came from the params_list)
+ * and an entry in fdw_scan_tlist (which we're considering putting the given
+ * expression into).
+ */
+bool
+is_foreign_param(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Expr *expr)
+{
+ if (expr == NULL)
+ return false;
+
+ switch (nodeTag(expr))
+ {
+ case T_Var:
+ {
+ /* It would have to be sent unless it's a foreign Var */
+ Var *var = (Var *) expr;
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) (baserel->fdw_private);
+ Relids relids;
+
+ if (IS_UPPER_REL(baserel))
+ relids = fpinfo->outerrel->relids;
+ else
+ relids = baserel->relids;
+
+ if (bms_is_member(var->varno, relids) && var->varlevelsup == 0)
+ return false; /* foreign Var, so not a param */
+ else
+ return true; /* it'd have to be a param */
+ break;
+ }
+ case T_Param:
+ /* Params always have to be sent to the foreign server */
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+/*
+ * Returns true if it's safe to push down the sort expression described by
+ * 'pathkey' to the foreign server.
+ */
+bool
+is_foreign_pathkey(PlannerInfo *root,
+ RelOptInfo *baserel,
+ PathKey *pathkey)
+{
+ EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) baserel->fdw_private;
+
+ /*
+ * is_foreign_expr would detect volatile expressions as well, but checking
+ * ec_has_volatile here saves some cycles.
+ */
+ if (pathkey_ec->ec_has_volatile)
+ return false;
+
+ /* can't push down the sort if the pathkey's opfamily is not shippable */
+ if (!is_shippable(pathkey->pk_opfamily, OperatorFamilyRelationId, fpinfo))
+ return false;
+
+ /* can push if a suitable EC member exists */
+ return (find_em_for_rel(root, pathkey_ec, baserel) != NULL);
+}
+
+/*
+ * Convert type OID + typmod info into a type name we can ship to the remote
+ * server. Someplace else had better have verified that this type name is
+ * expected to be known on the remote end.
+ *
+ * This is almost just format_type_with_typemod(), except that if left to its
+ * own devices, that function will make schema-qualification decisions based
+ * on the local search_path, which is wrong. We must schema-qualify all
+ * type names that are not in pg_catalog. We assume here that built-in types
+ * are all in pg_catalog and need not be qualified; otherwise, qualify.
+ */
+static char *
+deparse_type_name(Oid type_oid, int32 typemod)
+{
+ bits16 flags = FORMAT_TYPE_TYPEMOD_GIVEN;
+
+ if (!is_builtin(type_oid))
+ flags |= FORMAT_TYPE_FORCE_QUALIFY;
+
+ return format_type_extended(type_oid, typemod, flags);
+}
+
+/*
+ * Build the targetlist for given relation to be deparsed as SELECT clause.
+ *
+ * The output targetlist contains the columns that need to be fetched from the
+ * foreign server for the given relation. If foreignrel is an upper relation,
+ * then the output targetlist can also contain expressions to be evaluated on
+ * foreign server.
+ */
+List *
+build_tlist_to_deparse(RelOptInfo *foreignrel)
+{
+ List *tlist = NIL;
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) foreignrel->fdw_private;
+ ListCell *lc;
+
+ /*
+ * For an upper relation, we have already built the target list while
+ * checking shippability, so just return that.
+ */
+ if (IS_UPPER_REL(foreignrel))
+ return fpinfo->grouped_tlist;
+
+ /*
+ * We require columns specified in foreignrel->reltarget->exprs and those
+ * required for evaluating the local conditions.
+ */
+ tlist = add_to_flat_tlist(tlist,
+ pull_var_clause((Node *) foreignrel->reltarget->exprs,
+ PVC_RECURSE_PLACEHOLDERS));
+ foreach(lc, fpinfo->local_conds)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
+
+ tlist = add_to_flat_tlist(tlist,
+ pull_var_clause((Node *) rinfo->clause,
+ PVC_RECURSE_PLACEHOLDERS));
+ }
+
+ return tlist;
+}
+
+/*
+ * Deparse SELECT statement for given relation into buf.
+ *
+ * tlist contains the list of desired columns to be fetched from foreign server.
+ * For a base relation fpinfo->attrs_used is used to construct SELECT clause,
+ * hence the tlist is ignored for a base relation.
+ *
+ * remote_conds is the list of conditions to be deparsed into the WHERE clause
+ * (or, in the case of upper relations, into the HAVING clause).
+ *
+ * If params_list is not NULL, it receives a list of Params and other-relation
+ * Vars used in the clauses; these values must be transmitted to the remote
+ * server as parameter values.
+ *
+ * If params_list is NULL, we're generating the query for EXPLAIN purposes,
+ * so Params and other-relation Vars should be replaced by dummy values.
+ *
+ * pathkeys is the list of pathkeys to order the result by.
+ *
+ * is_subquery is the flag to indicate whether to deparse the specified
+ * relation as a subquery.
+ *
+ * List of columns selected is returned in retrieved_attrs.
+ */
+void
+deparseSelectStmtForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *rel,
+ List *tlist, List *remote_conds, List *pathkeys,
+ bool has_final_sort, bool has_limit, bool is_subquery,
+ List **retrieved_attrs, List **params_list)
+{
+ deparse_expr_cxt context;
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) rel->fdw_private;
+ List *quals;
+
+ /*
+ * We handle relations for foreign tables, joins between those and upper
+ * relations.
+ */
+ Assert(IS_JOIN_REL(rel) || IS_SIMPLE_REL(rel) || IS_UPPER_REL(rel));
+
+ /* Fill portions of context common to upper, join and base relation */
+ context.buf = buf;
+ context.root = root;
+ context.foreignrel = rel;
+ context.scanrel = IS_UPPER_REL(rel) ? fpinfo->outerrel : rel;
+ context.params_list = params_list;
+
+ /* Construct SELECT clause */
+ deparseSelectSql(tlist, is_subquery, retrieved_attrs, &context);
+
+ /*
+ * For upper relations, the WHERE clause is built from the remote
+ * conditions of the underlying scan relation; otherwise, we can use the
+ * supplied list of remote conditions directly.
+ */
+ if (IS_UPPER_REL(rel))
+ {
+ PgFdwRelationInfo *ofpinfo;
+
+ ofpinfo = (PgFdwRelationInfo *) fpinfo->outerrel->fdw_private;
+ quals = ofpinfo->remote_conds;
+ }
+ else
+ quals = remote_conds;
+
+ /* Construct FROM and WHERE clauses */
+ deparseFromExpr(quals, &context);
+
+ if (IS_UPPER_REL(rel))
+ {
+ /* Append GROUP BY clause */
+ appendGroupByClause(tlist, &context);
+
+ /* Append HAVING clause */
+ if (remote_conds)
+ {
+ appendStringInfoString(buf, " HAVING ");
+ appendConditions(remote_conds, &context);
+ }
+ }
+
+ /* Add ORDER BY clause if we found any useful pathkeys */
+ if (pathkeys)
+ appendOrderByClause(pathkeys, has_final_sort, &context);
+
+ /* Add LIMIT clause if necessary */
+ if (has_limit)
+ appendLimitClause(&context);
+
+ /* Add any necessary FOR UPDATE/SHARE. */
+ deparseLockingClause(&context);
+}
+
+/*
+ * Construct a simple SELECT statement that retrieves desired columns
+ * of the specified foreign table, and append it to "buf". The output
+ * contains just "SELECT ... ".
+ *
+ * We also create an integer List of the columns being retrieved, which is
+ * returned to *retrieved_attrs, unless we deparse the specified relation
+ * as a subquery.
+ *
+ * tlist is the list of desired columns. is_subquery is the flag to
+ * indicate whether to deparse the specified relation as a subquery.
+ * Read prologue of deparseSelectStmtForRel() for details.
+ */
+static void
+deparseSelectSql(List *tlist, bool is_subquery, List **retrieved_attrs,
+ deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ RelOptInfo *foreignrel = context->foreignrel;
+ PlannerInfo *root = context->root;
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) foreignrel->fdw_private;
+
+ /*
+ * Construct SELECT list
+ */
+ appendStringInfoString(buf, "SELECT ");
+
+ if (is_subquery)
+ {
+ /*
+ * For a relation that is deparsed as a subquery, emit expressions
+ * specified in the relation's reltarget. Note that since this is for
+ * the subquery, no need to care about *retrieved_attrs.
+ */
+ deparseSubqueryTargetList(context);
+ }
+ else if (IS_JOIN_REL(foreignrel) || IS_UPPER_REL(foreignrel))
+ {
+ /*
+ * For a join or upper relation the input tlist gives the list of
+ * columns required to be fetched from the foreign server.
+ */
+ deparseExplicitTargetList(tlist, false, retrieved_attrs, context);
+ }
+ else
+ {
+ /*
+ * For a base relation fpinfo->attrs_used gives the list of columns
+ * required to be fetched from the foreign server.
+ */
+ RangeTblEntry *rte = planner_rt_fetch(foreignrel->relid, root);
+
+ /*
+ * Core code already has some lock on each rel being planned, so we
+ * can use NoLock here.
+ */
+ Relation rel = table_open(rte->relid, NoLock);
+
+ deparseTargetList(buf, rte, foreignrel->relid, rel, false,
+ fpinfo->attrs_used, false, retrieved_attrs);
+ table_close(rel, NoLock);
+ }
+}
+
+/*
+ * Construct a FROM clause and, if needed, a WHERE clause, and append those to
+ * "buf".
+ *
+ * quals is the list of clauses to be included in the WHERE clause.
+ * (These may or may not include RestrictInfo decoration.)
+ */
+static void
+deparseFromExpr(List *quals, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ RelOptInfo *scanrel = context->scanrel;
+
+ /* For upper relations, scanrel must be either a joinrel or a baserel */
+ Assert(!IS_UPPER_REL(context->foreignrel) ||
+ IS_JOIN_REL(scanrel) || IS_SIMPLE_REL(scanrel));
+
+ /* Construct FROM clause */
+ appendStringInfoString(buf, " FROM ");
+ deparseFromExprForRel(buf, context->root, scanrel,
+ (bms_membership(scanrel->relids) == BMS_MULTIPLE),
+ (Index) 0, NULL, context->params_list);
+
+ /* Construct WHERE clause */
+ if (quals != NIL)
+ {
+ appendStringInfoString(buf, " WHERE ");
+ appendConditions(quals, context);
+ }
+}
+
+/*
+ * Emit a target list that retrieves the columns specified in attrs_used.
+ * This is used for both SELECT and RETURNING targetlists; the is_returning
+ * parameter is true only for a RETURNING targetlist.
+ *
+ * The tlist text is appended to buf, and we also create an integer List
+ * of the columns being retrieved, which is returned to *retrieved_attrs.
+ *
+ * If qualify_col is true, add relation alias before the column name.
+ */
+static void
+deparseTargetList(StringInfo buf,
+ RangeTblEntry *rte,
+ Index rtindex,
+ Relation rel,
+ bool is_returning,
+ Bitmapset *attrs_used,
+ bool qualify_col,
+ List **retrieved_attrs)
+{
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ bool have_wholerow;
+ bool first;
+ int i;
+
+ *retrieved_attrs = NIL;
+
+ /* If there's a whole-row reference, we'll need all the columns. */
+ have_wholerow = bms_is_member(0 - FirstLowInvalidHeapAttributeNumber,
+ attrs_used);
+
+ first = true;
+ for (i = 1; i <= tupdesc->natts; i++)
+ {
+ Form_pg_attribute attr = TupleDescAttr(tupdesc, i - 1);
+
+ /* Ignore dropped attributes. */
+ if (attr->attisdropped)
+ continue;
+
+ if (have_wholerow ||
+ bms_is_member(i - FirstLowInvalidHeapAttributeNumber,
+ attrs_used))
+ {
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ else if (is_returning)
+ appendStringInfoString(buf, " RETURNING ");
+ first = false;
+
+ deparseColumnRef(buf, rtindex, i, rte, qualify_col);
+
+ *retrieved_attrs = lappend_int(*retrieved_attrs, i);
+ }
+ }
+
+ /*
+ * Add ctid if needed. We currently don't support retrieving any other
+ * system columns.
+ */
+ if (bms_is_member(SelfItemPointerAttributeNumber - FirstLowInvalidHeapAttributeNumber,
+ attrs_used))
+ {
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ else if (is_returning)
+ appendStringInfoString(buf, " RETURNING ");
+ first = false;
+
+ if (qualify_col)
+ ADD_REL_QUALIFIER(buf, rtindex);
+ appendStringInfoString(buf, "ctid");
+
+ *retrieved_attrs = lappend_int(*retrieved_attrs,
+ SelfItemPointerAttributeNumber);
+ }
+
+ /* Don't generate bad syntax if no undropped columns */
+ if (first && !is_returning)
+ appendStringInfoString(buf, "NULL");
+}
+
+/*
+ * Deparse the appropriate locking clause (FOR UPDATE or FOR SHARE) for a
+ * given relation (context->scanrel).
+ */
+static void
+deparseLockingClause(deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ PlannerInfo *root = context->root;
+ RelOptInfo *rel = context->scanrel;
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) rel->fdw_private;
+ int relid = -1;
+
+ while ((relid = bms_next_member(rel->relids, relid)) >= 0)
+ {
+ /*
+ * Ignore relation if it appears in a lower subquery. Locking clause
+ * for such a relation is included in the subquery if necessary.
+ */
+ if (bms_is_member(relid, fpinfo->lower_subquery_rels))
+ continue;
+
+ /*
+ * Add FOR UPDATE/SHARE if appropriate. We apply locking during the
+ * initial row fetch, rather than later on as is done for local
+ * tables. The extra roundtrips involved in trying to duplicate the
+ * local semantics exactly don't seem worthwhile (see also comments
+ * for RowMarkType).
+ *
+ * Note: because we actually run the query as a cursor, this assumes
+ * that DECLARE CURSOR ... FOR UPDATE is supported, which it isn't
+ * before 8.3.
+ */
+ if (bms_is_member(relid, root->all_result_relids) &&
+ (root->parse->commandType == CMD_UPDATE ||
+ root->parse->commandType == CMD_DELETE))
+ {
+ /* Relation is UPDATE/DELETE target, so use FOR UPDATE */
+ appendStringInfoString(buf, " FOR UPDATE");
+
+ /* Add the relation alias if we are here for a join relation */
+ if (IS_JOIN_REL(rel))
+ appendStringInfo(buf, " OF %s%d", REL_ALIAS_PREFIX, relid);
+ }
+ else
+ {
+ PlanRowMark *rc = get_plan_rowmark(root->rowMarks, relid);
+
+ if (rc)
+ {
+ /*
+ * Relation is specified as a FOR UPDATE/SHARE target, so
+ * handle that. (But we could also see LCS_NONE, meaning this
+ * isn't a target relation after all.)
+ *
+ * For now, just ignore any [NO] KEY specification, since (a)
+ * it's not clear what that means for a remote table that we
+ * don't have complete information about, and (b) it wouldn't
+ * work anyway on older remote servers. Likewise, we don't
+ * worry about NOWAIT.
+ */
+ switch (rc->strength)
+ {
+ case LCS_NONE:
+ /* No locking needed */
+ break;
+ case LCS_FORKEYSHARE:
+ case LCS_FORSHARE:
+ appendStringInfoString(buf, " FOR SHARE");
+ break;
+ case LCS_FORNOKEYUPDATE:
+ case LCS_FORUPDATE:
+ appendStringInfoString(buf, " FOR UPDATE");
+ break;
+ }
+
+ /* Add the relation alias if we are here for a join relation */
+ if (bms_membership(rel->relids) == BMS_MULTIPLE &&
+ rc->strength != LCS_NONE)
+ appendStringInfo(buf, " OF %s%d", REL_ALIAS_PREFIX, relid);
+ }
+ }
+ }
+}
+
+/*
+ * Deparse conditions from the provided list and append them to buf.
+ *
+ * The conditions in the list are assumed to be ANDed. This function is used to
+ * deparse WHERE clauses, JOIN .. ON clauses and HAVING clauses.
+ *
+ * Depending on the caller, the list elements might be either RestrictInfos
+ * or bare clauses.
+ */
+static void
+appendConditions(List *exprs, deparse_expr_cxt *context)
+{
+ int nestlevel;
+ ListCell *lc;
+ bool is_first = true;
+ StringInfo buf = context->buf;
+
+ /* Make sure any constants in the exprs are printed portably */
+ nestlevel = set_transmission_modes();
+
+ foreach(lc, exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+
+ /* Extract clause from RestrictInfo, if required */
+ if (IsA(expr, RestrictInfo))
+ expr = ((RestrictInfo *) expr)->clause;
+
+ /* Connect expressions with "AND" and parenthesize each condition. */
+ if (!is_first)
+ appendStringInfoString(buf, " AND ");
+
+ appendStringInfoChar(buf, '(');
+ deparseExpr(expr, context);
+ appendStringInfoChar(buf, ')');
+
+ is_first = false;
+ }
+
+ reset_transmission_modes(nestlevel);
+}
+
+/* Output join name for given join type */
+const char *
+get_jointype_name(JoinType jointype)
+{
+ switch (jointype)
+ {
+ case JOIN_INNER:
+ return "INNER";
+
+ case JOIN_LEFT:
+ return "LEFT";
+
+ case JOIN_RIGHT:
+ return "RIGHT";
+
+ case JOIN_FULL:
+ return "FULL";
+
+ default:
+ /* Shouldn't come here, but protect from buggy code. */
+ elog(ERROR, "unsupported join type %d", jointype);
+ }
+
+ /* Keep compiler happy */
+ return NULL;
+}
+
+/*
+ * Deparse given targetlist and append it to context->buf.
+ *
+ * tlist is list of TargetEntry's which in turn contain Var nodes.
+ *
+ * retrieved_attrs is the list of continuously increasing integers starting
+ * from 1. It has same number of entries as tlist.
+ *
+ * This is used for both SELECT and RETURNING targetlists; the is_returning
+ * parameter is true only for a RETURNING targetlist.
+ */
+static void
+deparseExplicitTargetList(List *tlist,
+ bool is_returning,
+ List **retrieved_attrs,
+ deparse_expr_cxt *context)
+{
+ ListCell *lc;
+ StringInfo buf = context->buf;
+ int i = 0;
+
+ *retrieved_attrs = NIL;
+
+ foreach(lc, tlist)
+ {
+ TargetEntry *tle = lfirst_node(TargetEntry, lc);
+
+ if (i > 0)
+ appendStringInfoString(buf, ", ");
+ else if (is_returning)
+ appendStringInfoString(buf, " RETURNING ");
+
+ deparseExpr((Expr *) tle->expr, context);
+
+ *retrieved_attrs = lappend_int(*retrieved_attrs, i + 1);
+ i++;
+ }
+
+ if (i == 0 && !is_returning)
+ appendStringInfoString(buf, "NULL");
+}
+
+/*
+ * Emit expressions specified in the given relation's reltarget.
+ *
+ * This is used for deparsing the given relation as a subquery.
+ */
+static void
+deparseSubqueryTargetList(deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ RelOptInfo *foreignrel = context->foreignrel;
+ bool first;
+ ListCell *lc;
+
+ /* Should only be called in these cases. */
+ Assert(IS_SIMPLE_REL(foreignrel) || IS_JOIN_REL(foreignrel));
+
+ first = true;
+ foreach(lc, foreignrel->reltarget->exprs)
+ {
+ Node *node = (Node *) lfirst(lc);
+
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ deparseExpr((Expr *) node, context);
+ }
+
+ /* Don't generate bad syntax if no expressions */
+ if (first)
+ appendStringInfoString(buf, "NULL");
+}
+
+/*
+ * Construct FROM clause for given relation
+ *
+ * The function constructs ... JOIN ... ON ... for join relation. For a base
+ * relation it just returns schema-qualified tablename, with the appropriate
+ * alias if so requested.
+ *
+ * 'ignore_rel' is either zero or the RT index of a target relation. In the
+ * latter case the function constructs FROM clause of UPDATE or USING clause
+ * of DELETE; it deparses the join relation as if the relation never contained
+ * the target relation, and creates a List of conditions to be deparsed into
+ * the top-level WHERE clause, which is returned to *ignore_conds.
+ */
+static void
+deparseFromExprForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel,
+ bool use_alias, Index ignore_rel, List **ignore_conds,
+ List **params_list)
+{
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) foreignrel->fdw_private;
+
+ if (IS_JOIN_REL(foreignrel))
+ {
+ StringInfoData join_sql_o;
+ StringInfoData join_sql_i;
+ RelOptInfo *outerrel = fpinfo->outerrel;
+ RelOptInfo *innerrel = fpinfo->innerrel;
+ bool outerrel_is_target = false;
+ bool innerrel_is_target = false;
+
+ if (ignore_rel > 0 && bms_is_member(ignore_rel, foreignrel->relids))
+ {
+ /*
+ * If this is an inner join, add joinclauses to *ignore_conds and
+ * set it to empty so that those can be deparsed into the WHERE
+ * clause. Note that since the target relation can never be
+ * within the nullable side of an outer join, those could safely
+ * be pulled up into the WHERE clause (see foreign_join_ok()).
+ * Note also that since the target relation is only inner-joined
+ * to any other relation in the query, all conditions in the join
+ * tree mentioning the target relation could be deparsed into the
+ * WHERE clause by doing this recursively.
+ */
+ if (fpinfo->jointype == JOIN_INNER)
+ {
+ *ignore_conds = list_concat(*ignore_conds,
+ fpinfo->joinclauses);
+ fpinfo->joinclauses = NIL;
+ }
+
+ /*
+ * Check if either of the input relations is the target relation.
+ */
+ if (outerrel->relid == ignore_rel)
+ outerrel_is_target = true;
+ else if (innerrel->relid == ignore_rel)
+ innerrel_is_target = true;
+ }
+
+ /* Deparse outer relation if not the target relation. */
+ if (!outerrel_is_target)
+ {
+ initStringInfo(&join_sql_o);
+ deparseRangeTblRef(&join_sql_o, root, outerrel,
+ fpinfo->make_outerrel_subquery,
+ ignore_rel, ignore_conds, params_list);
+
+ /*
+ * If inner relation is the target relation, skip deparsing it.
+ * Note that since the join of the target relation with any other
+ * relation in the query is an inner join and can never be within
+ * the nullable side of an outer join, the join could be
+ * interchanged with higher-level joins (cf. identity 1 on outer
+ * join reordering shown in src/backend/optimizer/README), which
+ * means it's safe to skip the target-relation deparsing here.
+ */
+ if (innerrel_is_target)
+ {
+ Assert(fpinfo->jointype == JOIN_INNER);
+ Assert(fpinfo->joinclauses == NIL);
+ appendBinaryStringInfo(buf, join_sql_o.data, join_sql_o.len);
+ return;
+ }
+ }
+
+ /* Deparse inner relation if not the target relation. */
+ if (!innerrel_is_target)
+ {
+ initStringInfo(&join_sql_i);
+ deparseRangeTblRef(&join_sql_i, root, innerrel,
+ fpinfo->make_innerrel_subquery,
+ ignore_rel, ignore_conds, params_list);
+
+ /*
+ * If outer relation is the target relation, skip deparsing it.
+ * See the above note about safety.
+ */
+ if (outerrel_is_target)
+ {
+ Assert(fpinfo->jointype == JOIN_INNER);
+ Assert(fpinfo->joinclauses == NIL);
+ appendBinaryStringInfo(buf, join_sql_i.data, join_sql_i.len);
+ return;
+ }
+ }
+
+ /* Neither of the relations is the target relation. */
+ Assert(!outerrel_is_target && !innerrel_is_target);
+
+ /*
+ * For a join relation FROM clause entry is deparsed as
+ *
+ * ((outer relation) <join type> (inner relation) ON (joinclauses))
+ */
+ appendStringInfo(buf, "(%s %s JOIN %s ON ", join_sql_o.data,
+ get_jointype_name(fpinfo->jointype), join_sql_i.data);
+
+ /* Append join clause; (TRUE) if no join clause */
+ if (fpinfo->joinclauses)
+ {
+ deparse_expr_cxt context;
+
+ context.buf = buf;
+ context.foreignrel = foreignrel;
+ context.scanrel = foreignrel;
+ context.root = root;
+ context.params_list = params_list;
+
+ appendStringInfoChar(buf, '(');
+ appendConditions(fpinfo->joinclauses, &context);
+ appendStringInfoChar(buf, ')');
+ }
+ else
+ appendStringInfoString(buf, "(TRUE)");
+
+ /* End the FROM clause entry. */
+ appendStringInfoChar(buf, ')');
+ }
+ else
+ {
+ RangeTblEntry *rte = planner_rt_fetch(foreignrel->relid, root);
+
+ /*
+ * Core code already has some lock on each rel being planned, so we
+ * can use NoLock here.
+ */
+ Relation rel = table_open(rte->relid, NoLock);
+
+ deparseRelation(buf, rel);
+
+ /*
+ * Add a unique alias to avoid any conflict in relation names due to
+ * pulled up subqueries in the query being built for a pushed down
+ * join.
+ */
+ if (use_alias)
+ appendStringInfo(buf, " %s%d", REL_ALIAS_PREFIX, foreignrel->relid);
+
+ table_close(rel, NoLock);
+ }
+}
+
+/*
+ * Append FROM clause entry for the given relation into buf.
+ */
+static void
+deparseRangeTblRef(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel,
+ bool make_subquery, Index ignore_rel, List **ignore_conds,
+ List **params_list)
+{
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) foreignrel->fdw_private;
+
+ /* Should only be called in these cases. */
+ Assert(IS_SIMPLE_REL(foreignrel) || IS_JOIN_REL(foreignrel));
+
+ Assert(fpinfo->local_conds == NIL);
+
+ /* If make_subquery is true, deparse the relation as a subquery. */
+ if (make_subquery)
+ {
+ List *retrieved_attrs;
+ int ncols;
+
+ /*
+ * The given relation shouldn't contain the target relation, because
+ * this should only happen for input relations for a full join, and
+ * such relations can never contain an UPDATE/DELETE target.
+ */
+ Assert(ignore_rel == 0 ||
+ !bms_is_member(ignore_rel, foreignrel->relids));
+
+ /* Deparse the subquery representing the relation. */
+ appendStringInfoChar(buf, '(');
+ deparseSelectStmtForRel(buf, root, foreignrel, NIL,
+ fpinfo->remote_conds, NIL,
+ false, false, true,
+ &retrieved_attrs, params_list);
+ appendStringInfoChar(buf, ')');
+
+ /* Append the relation alias. */
+ appendStringInfo(buf, " %s%d", SUBQUERY_REL_ALIAS_PREFIX,
+ fpinfo->relation_index);
+
+ /*
+ * Append the column aliases if needed. Note that the subquery emits
+ * expressions specified in the relation's reltarget (see
+ * deparseSubqueryTargetList).
+ */
+ ncols = list_length(foreignrel->reltarget->exprs);
+ if (ncols > 0)
+ {
+ int i;
+
+ appendStringInfoChar(buf, '(');
+ for (i = 1; i <= ncols; i++)
+ {
+ if (i > 1)
+ appendStringInfoString(buf, ", ");
+
+ appendStringInfo(buf, "%s%d", SUBQUERY_COL_ALIAS_PREFIX, i);
+ }
+ appendStringInfoChar(buf, ')');
+ }
+ }
+ else
+ deparseFromExprForRel(buf, root, foreignrel, true, ignore_rel,
+ ignore_conds, params_list);
+}
+
+/*
+ * deparse remote INSERT statement
+ *
+ * The statement text is appended to buf, and we also create an integer List
+ * of the columns being retrieved by WITH CHECK OPTION or RETURNING (if any),
+ * which is returned to *retrieved_attrs.
+ *
+ * This also stores end position of the VALUES clause, so that we can rebuild
+ * an INSERT for a batch of rows later.
+ */
+void
+deparseInsertSql(StringInfo buf, RangeTblEntry *rte,
+ Index rtindex, Relation rel,
+ List *targetAttrs, bool doNothing,
+ List *withCheckOptionList, List *returningList,
+ List **retrieved_attrs, int *values_end_len)
+{
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ AttrNumber pindex;
+ bool first;
+ ListCell *lc;
+
+ appendStringInfoString(buf, "INSERT INTO ");
+ deparseRelation(buf, rel);
+
+ if (targetAttrs)
+ {
+ appendStringInfoChar(buf, '(');
+
+ first = true;
+ foreach(lc, targetAttrs)
+ {
+ int attnum = lfirst_int(lc);
+
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ deparseColumnRef(buf, rtindex, attnum, rte, false);
+ }
+
+ appendStringInfoString(buf, ") VALUES (");
+
+ pindex = 1;
+ first = true;
+ foreach(lc, targetAttrs)
+ {
+ int attnum = lfirst_int(lc);
+ Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1);
+
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ if (attr->attgenerated)
+ appendStringInfoString(buf, "DEFAULT");
+ else
+ {
+ appendStringInfo(buf, "$%d", pindex);
+ pindex++;
+ }
+ }
+
+ appendStringInfoChar(buf, ')');
+ }
+ else
+ appendStringInfoString(buf, " DEFAULT VALUES");
+ *values_end_len = buf->len;
+
+ if (doNothing)
+ appendStringInfoString(buf, " ON CONFLICT DO NOTHING");
+
+ deparseReturningList(buf, rte, rtindex, rel,
+ rel->trigdesc && rel->trigdesc->trig_insert_after_row,
+ withCheckOptionList, returningList, retrieved_attrs);
+}
+
+/*
+ * rebuild remote INSERT statement
+ *
+ * Provided a number of rows in a batch, builds INSERT statement with the
+ * right number of parameters.
+ */
+void
+rebuildInsertSql(StringInfo buf, Relation rel,
+ char *orig_query, List *target_attrs,
+ int values_end_len, int num_params,
+ int num_rows)
+{
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ int i;
+ int pindex;
+ bool first;
+ ListCell *lc;
+
+ /* Make sure the values_end_len is sensible */
+ Assert((values_end_len > 0) && (values_end_len <= strlen(orig_query)));
+
+ /* Copy up to the end of the first record from the original query */
+ appendBinaryStringInfo(buf, orig_query, values_end_len);
+
+ /*
+ * Add records to VALUES clause (we already have parameters for the first
+ * row, so start at the right offset).
+ */
+ pindex = num_params + 1;
+ for (i = 0; i < num_rows; i++)
+ {
+ appendStringInfoString(buf, ", (");
+
+ first = true;
+ foreach(lc, target_attrs)
+ {
+ int attnum = lfirst_int(lc);
+ Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1);
+
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ if (attr->attgenerated)
+ appendStringInfoString(buf, "DEFAULT");
+ else
+ {
+ appendStringInfo(buf, "$%d", pindex);
+ pindex++;
+ }
+ }
+
+ appendStringInfoChar(buf, ')');
+ }
+
+ /* Copy stuff after VALUES clause from the original query */
+ appendStringInfoString(buf, orig_query + values_end_len);
+}
+
+/*
+ * deparse remote UPDATE statement
+ *
+ * The statement text is appended to buf, and we also create an integer List
+ * of the columns being retrieved by WITH CHECK OPTION or RETURNING (if any),
+ * which is returned to *retrieved_attrs.
+ */
+void
+deparseUpdateSql(StringInfo buf, RangeTblEntry *rte,
+ Index rtindex, Relation rel,
+ List *targetAttrs,
+ List *withCheckOptionList, List *returningList,
+ List **retrieved_attrs)
+{
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ AttrNumber pindex;
+ bool first;
+ ListCell *lc;
+
+ appendStringInfoString(buf, "UPDATE ");
+ deparseRelation(buf, rel);
+ appendStringInfoString(buf, " SET ");
+
+ pindex = 2; /* ctid is always the first param */
+ first = true;
+ foreach(lc, targetAttrs)
+ {
+ int attnum = lfirst_int(lc);
+ Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1);
+
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ deparseColumnRef(buf, rtindex, attnum, rte, false);
+ if (attr->attgenerated)
+ appendStringInfoString(buf, " = DEFAULT");
+ else
+ {
+ appendStringInfo(buf, " = $%d", pindex);
+ pindex++;
+ }
+ }
+ appendStringInfoString(buf, " WHERE ctid = $1");
+
+ deparseReturningList(buf, rte, rtindex, rel,
+ rel->trigdesc && rel->trigdesc->trig_update_after_row,
+ withCheckOptionList, returningList, retrieved_attrs);
+}
+
+/*
+ * deparse remote UPDATE statement
+ *
+ * 'buf' is the output buffer to append the statement to
+ * 'rtindex' is the RT index of the associated target relation
+ * 'rel' is the relation descriptor for the target relation
+ * 'foreignrel' is the RelOptInfo for the target relation or the join relation
+ * containing all base relations in the query
+ * 'targetlist' is the tlist of the underlying foreign-scan plan node
+ * (note that this only contains new-value expressions and junk attrs)
+ * 'targetAttrs' is the target columns of the UPDATE
+ * 'remote_conds' is the qual clauses that must be evaluated remotely
+ * '*params_list' is an output list of exprs that will become remote Params
+ * 'returningList' is the RETURNING targetlist
+ * '*retrieved_attrs' is an output list of integers of columns being retrieved
+ * by RETURNING (if any)
+ */
+void
+deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root,
+ Index rtindex, Relation rel,
+ RelOptInfo *foreignrel,
+ List *targetlist,
+ List *targetAttrs,
+ List *remote_conds,
+ List **params_list,
+ List *returningList,
+ List **retrieved_attrs)
+{
+ deparse_expr_cxt context;
+ int nestlevel;
+ bool first;
+ RangeTblEntry *rte = planner_rt_fetch(rtindex, root);
+ ListCell *lc,
+ *lc2;
+
+ /* Set up context struct for recursion */
+ context.root = root;
+ context.foreignrel = foreignrel;
+ context.scanrel = foreignrel;
+ context.buf = buf;
+ context.params_list = params_list;
+
+ appendStringInfoString(buf, "UPDATE ");
+ deparseRelation(buf, rel);
+ if (foreignrel->reloptkind == RELOPT_JOINREL)
+ appendStringInfo(buf, " %s%d", REL_ALIAS_PREFIX, rtindex);
+ appendStringInfoString(buf, " SET ");
+
+ /* Make sure any constants in the exprs are printed portably */
+ nestlevel = set_transmission_modes();
+
+ first = true;
+ forboth(lc, targetlist, lc2, targetAttrs)
+ {
+ TargetEntry *tle = lfirst_node(TargetEntry, lc);
+ int attnum = lfirst_int(lc2);
+
+ /* update's new-value expressions shouldn't be resjunk */
+ Assert(!tle->resjunk);
+
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ deparseColumnRef(buf, rtindex, attnum, rte, false);
+ appendStringInfoString(buf, " = ");
+ deparseExpr((Expr *) tle->expr, &context);
+ }
+
+ reset_transmission_modes(nestlevel);
+
+ if (foreignrel->reloptkind == RELOPT_JOINREL)
+ {
+ List *ignore_conds = NIL;
+
+ appendStringInfoString(buf, " FROM ");
+ deparseFromExprForRel(buf, root, foreignrel, true, rtindex,
+ &ignore_conds, params_list);
+ remote_conds = list_concat(remote_conds, ignore_conds);
+ }
+
+ if (remote_conds)
+ {
+ appendStringInfoString(buf, " WHERE ");
+ appendConditions(remote_conds, &context);
+ }
+
+ if (foreignrel->reloptkind == RELOPT_JOINREL)
+ deparseExplicitTargetList(returningList, true, retrieved_attrs,
+ &context);
+ else
+ deparseReturningList(buf, rte, rtindex, rel, false,
+ NIL, returningList, retrieved_attrs);
+}
+
+/*
+ * deparse remote DELETE statement
+ *
+ * The statement text is appended to buf, and we also create an integer List
+ * of the columns being retrieved by RETURNING (if any), which is returned
+ * to *retrieved_attrs.
+ */
+void
+deparseDeleteSql(StringInfo buf, RangeTblEntry *rte,
+ Index rtindex, Relation rel,
+ List *returningList,
+ List **retrieved_attrs)
+{
+ appendStringInfoString(buf, "DELETE FROM ");
+ deparseRelation(buf, rel);
+ appendStringInfoString(buf, " WHERE ctid = $1");
+
+ deparseReturningList(buf, rte, rtindex, rel,
+ rel->trigdesc && rel->trigdesc->trig_delete_after_row,
+ NIL, returningList, retrieved_attrs);
+}
+
+/*
+ * deparse remote DELETE statement
+ *
+ * 'buf' is the output buffer to append the statement to
+ * 'rtindex' is the RT index of the associated target relation
+ * 'rel' is the relation descriptor for the target relation
+ * 'foreignrel' is the RelOptInfo for the target relation or the join relation
+ * containing all base relations in the query
+ * 'remote_conds' is the qual clauses that must be evaluated remotely
+ * '*params_list' is an output list of exprs that will become remote Params
+ * 'returningList' is the RETURNING targetlist
+ * '*retrieved_attrs' is an output list of integers of columns being retrieved
+ * by RETURNING (if any)
+ */
+void
+deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root,
+ Index rtindex, Relation rel,
+ RelOptInfo *foreignrel,
+ List *remote_conds,
+ List **params_list,
+ List *returningList,
+ List **retrieved_attrs)
+{
+ deparse_expr_cxt context;
+
+ /* Set up context struct for recursion */
+ context.root = root;
+ context.foreignrel = foreignrel;
+ context.scanrel = foreignrel;
+ context.buf = buf;
+ context.params_list = params_list;
+
+ appendStringInfoString(buf, "DELETE FROM ");
+ deparseRelation(buf, rel);
+ if (foreignrel->reloptkind == RELOPT_JOINREL)
+ appendStringInfo(buf, " %s%d", REL_ALIAS_PREFIX, rtindex);
+
+ if (foreignrel->reloptkind == RELOPT_JOINREL)
+ {
+ List *ignore_conds = NIL;
+
+ appendStringInfoString(buf, " USING ");
+ deparseFromExprForRel(buf, root, foreignrel, true, rtindex,
+ &ignore_conds, params_list);
+ remote_conds = list_concat(remote_conds, ignore_conds);
+ }
+
+ if (remote_conds)
+ {
+ appendStringInfoString(buf, " WHERE ");
+ appendConditions(remote_conds, &context);
+ }
+
+ if (foreignrel->reloptkind == RELOPT_JOINREL)
+ deparseExplicitTargetList(returningList, true, retrieved_attrs,
+ &context);
+ else
+ deparseReturningList(buf, planner_rt_fetch(rtindex, root),
+ rtindex, rel, false,
+ NIL, returningList, retrieved_attrs);
+}
+
+/*
+ * Add a RETURNING clause, if needed, to an INSERT/UPDATE/DELETE.
+ */
+static void
+deparseReturningList(StringInfo buf, RangeTblEntry *rte,
+ Index rtindex, Relation rel,
+ bool trig_after_row,
+ List *withCheckOptionList,
+ List *returningList,
+ List **retrieved_attrs)
+{
+ Bitmapset *attrs_used = NULL;
+
+ if (trig_after_row)
+ {
+ /* whole-row reference acquires all non-system columns */
+ attrs_used =
+ bms_make_singleton(0 - FirstLowInvalidHeapAttributeNumber);
+ }
+
+ if (withCheckOptionList != NIL)
+ {
+ /*
+ * We need the attrs, non-system and system, mentioned in the local
+ * query's WITH CHECK OPTION list.
+ *
+ * Note: we do this to ensure that WCO constraints will be evaluated
+ * on the data actually inserted/updated on the remote side, which
+ * might differ from the data supplied by the core code, for example
+ * as a result of remote triggers.
+ */
+ pull_varattnos((Node *) withCheckOptionList, rtindex,
+ &attrs_used);
+ }
+
+ if (returningList != NIL)
+ {
+ /*
+ * We need the attrs, non-system and system, mentioned in the local
+ * query's RETURNING list.
+ */
+ pull_varattnos((Node *) returningList, rtindex,
+ &attrs_used);
+ }
+
+ if (attrs_used != NULL)
+ deparseTargetList(buf, rte, rtindex, rel, true, attrs_used, false,
+ retrieved_attrs);
+ else
+ *retrieved_attrs = NIL;
+}
+
+/*
+ * Construct SELECT statement to acquire size in blocks of given relation.
+ *
+ * Note: we use local definition of block size, not remote definition.
+ * This is perhaps debatable.
+ *
+ * Note: pg_relation_size() exists in 8.1 and later.
+ */
+void
+deparseAnalyzeSizeSql(StringInfo buf, Relation rel)
+{
+ StringInfoData relname;
+
+ /* We'll need the remote relation name as a literal. */
+ initStringInfo(&relname);
+ deparseRelation(&relname, rel);
+
+ appendStringInfoString(buf, "SELECT pg_catalog.pg_relation_size(");
+ deparseStringLiteral(buf, relname.data);
+ appendStringInfo(buf, "::pg_catalog.regclass) / %d", BLCKSZ);
+}
+
+/*
+ * Construct SELECT statement to acquire sample rows of given relation.
+ *
+ * SELECT command is appended to buf, and list of columns retrieved
+ * is returned to *retrieved_attrs.
+ */
+void
+deparseAnalyzeSql(StringInfo buf, Relation rel, List **retrieved_attrs)
+{
+ Oid relid = RelationGetRelid(rel);
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ int i;
+ char *colname;
+ List *options;
+ ListCell *lc;
+ bool first = true;
+
+ *retrieved_attrs = NIL;
+
+ appendStringInfoString(buf, "SELECT ");
+ for (i = 0; i < tupdesc->natts; i++)
+ {
+ /* Ignore dropped columns. */
+ if (TupleDescAttr(tupdesc, i)->attisdropped)
+ continue;
+
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ /* Use attribute name or column_name option. */
+ colname = NameStr(TupleDescAttr(tupdesc, i)->attname);
+ options = GetForeignColumnOptions(relid, i + 1);
+
+ foreach(lc, options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "column_name") == 0)
+ {
+ colname = defGetString(def);
+ break;
+ }
+ }
+
+ appendStringInfoString(buf, quote_identifier(colname));
+
+ *retrieved_attrs = lappend_int(*retrieved_attrs, i + 1);
+ }
+
+ /* Don't generate bad syntax for zero-column relation. */
+ if (first)
+ appendStringInfoString(buf, "NULL");
+
+ /*
+ * Construct FROM clause
+ */
+ appendStringInfoString(buf, " FROM ");
+ deparseRelation(buf, rel);
+}
+
+/*
+ * Construct a simple "TRUNCATE rel" statement
+ */
+void
+deparseTruncateSql(StringInfo buf,
+ List *rels,
+ DropBehavior behavior,
+ bool restart_seqs)
+{
+ ListCell *cell;
+
+ appendStringInfoString(buf, "TRUNCATE ");
+
+ foreach(cell, rels)
+ {
+ Relation rel = lfirst(cell);
+
+ if (cell != list_head(rels))
+ appendStringInfoString(buf, ", ");
+
+ deparseRelation(buf, rel);
+ }
+
+ appendStringInfo(buf, " %s IDENTITY",
+ restart_seqs ? "RESTART" : "CONTINUE");
+
+ if (behavior == DROP_RESTRICT)
+ appendStringInfoString(buf, " RESTRICT");
+ else if (behavior == DROP_CASCADE)
+ appendStringInfoString(buf, " CASCADE");
+}
+
+/*
+ * Construct name to use for given column, and emit it into buf.
+ * If it has a column_name FDW option, use that instead of attribute name.
+ *
+ * If qualify_col is true, qualify column name with the alias of relation.
+ */
+static void
+deparseColumnRef(StringInfo buf, int varno, int varattno, RangeTblEntry *rte,
+ bool qualify_col)
+{
+ /* We support fetching the remote side's CTID and OID. */
+ if (varattno == SelfItemPointerAttributeNumber)
+ {
+ if (qualify_col)
+ ADD_REL_QUALIFIER(buf, varno);
+ appendStringInfoString(buf, "ctid");
+ }
+ else if (varattno < 0)
+ {
+ /*
+ * All other system attributes are fetched as 0, except for table OID,
+ * which is fetched as the local table OID. However, we must be
+ * careful; the table could be beneath an outer join, in which case it
+ * must go to NULL whenever the rest of the row does.
+ */
+ Oid fetchval = 0;
+
+ if (varattno == TableOidAttributeNumber)
+ fetchval = rte->relid;
+
+ if (qualify_col)
+ {
+ appendStringInfoString(buf, "CASE WHEN (");
+ ADD_REL_QUALIFIER(buf, varno);
+ appendStringInfo(buf, "*)::text IS NOT NULL THEN %u END", fetchval);
+ }
+ else
+ appendStringInfo(buf, "%u", fetchval);
+ }
+ else if (varattno == 0)
+ {
+ /* Whole row reference */
+ Relation rel;
+ Bitmapset *attrs_used;
+
+ /* Required only to be passed down to deparseTargetList(). */
+ List *retrieved_attrs;
+
+ /*
+ * The lock on the relation will be held by upper callers, so it's
+ * fine to open it with no lock here.
+ */
+ rel = table_open(rte->relid, NoLock);
+
+ /*
+ * The local name of the foreign table can not be recognized by the
+ * foreign server and the table it references on foreign server might
+ * have different column ordering or different columns than those
+ * declared locally. Hence we have to deparse whole-row reference as
+ * ROW(columns referenced locally). Construct this by deparsing a
+ * "whole row" attribute.
+ */
+ attrs_used = bms_add_member(NULL,
+ 0 - FirstLowInvalidHeapAttributeNumber);
+
+ /*
+ * In case the whole-row reference is under an outer join then it has
+ * to go NULL whenever the rest of the row goes NULL. Deparsing a join
+ * query would always involve multiple relations, thus qualify_col
+ * would be true.
+ */
+ if (qualify_col)
+ {
+ appendStringInfoString(buf, "CASE WHEN (");
+ ADD_REL_QUALIFIER(buf, varno);
+ appendStringInfoString(buf, "*)::text IS NOT NULL THEN ");
+ }
+
+ appendStringInfoString(buf, "ROW(");
+ deparseTargetList(buf, rte, varno, rel, false, attrs_used, qualify_col,
+ &retrieved_attrs);
+ appendStringInfoChar(buf, ')');
+
+ /* Complete the CASE WHEN statement started above. */
+ if (qualify_col)
+ appendStringInfoString(buf, " END");
+
+ table_close(rel, NoLock);
+ bms_free(attrs_used);
+ }
+ else
+ {
+ char *colname = NULL;
+ List *options;
+ ListCell *lc;
+
+ /* varno must not be any of OUTER_VAR, INNER_VAR and INDEX_VAR. */
+ Assert(!IS_SPECIAL_VARNO(varno));
+
+ /*
+ * If it's a column of a foreign table, and it has the column_name FDW
+ * option, use that value.
+ */
+ options = GetForeignColumnOptions(rte->relid, varattno);
+ foreach(lc, options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "column_name") == 0)
+ {
+ colname = defGetString(def);
+ break;
+ }
+ }
+
+ /*
+ * If it's a column of a regular table or it doesn't have column_name
+ * FDW option, use attribute name.
+ */
+ if (colname == NULL)
+ colname = get_attname(rte->relid, varattno, false);
+
+ if (qualify_col)
+ ADD_REL_QUALIFIER(buf, varno);
+
+ appendStringInfoString(buf, quote_identifier(colname));
+ }
+}
+
+/*
+ * Append remote name of specified foreign table to buf.
+ * Use value of table_name FDW option (if any) instead of relation's name.
+ * Similarly, schema_name FDW option overrides schema name.
+ */
+static void
+deparseRelation(StringInfo buf, Relation rel)
+{
+ ForeignTable *table;
+ const char *nspname = NULL;
+ const char *relname = NULL;
+ ListCell *lc;
+
+ /* obtain additional catalog information. */
+ table = GetForeignTable(RelationGetRelid(rel));
+
+ /*
+ * Use value of FDW options if any, instead of the name of object itself.
+ */
+ foreach(lc, table->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "schema_name") == 0)
+ nspname = defGetString(def);
+ else if (strcmp(def->defname, "table_name") == 0)
+ relname = defGetString(def);
+ }
+
+ /*
+ * Note: we could skip printing the schema name if it's pg_catalog, but
+ * that doesn't seem worth the trouble.
+ */
+ if (nspname == NULL)
+ nspname = get_namespace_name(RelationGetNamespace(rel));
+ if (relname == NULL)
+ relname = RelationGetRelationName(rel);
+
+ appendStringInfo(buf, "%s.%s",
+ quote_identifier(nspname), quote_identifier(relname));
+}
+
+/*
+ * Append a SQL string literal representing "val" to buf.
+ */
+void
+deparseStringLiteral(StringInfo buf, const char *val)
+{
+ const char *valptr;
+
+ /*
+ * Rather than making assumptions about the remote server's value of
+ * standard_conforming_strings, always use E'foo' syntax if there are any
+ * backslashes. This will fail on remote servers before 8.1, but those
+ * are long out of support.
+ */
+ if (strchr(val, '\\') != NULL)
+ appendStringInfoChar(buf, ESCAPE_STRING_SYNTAX);
+ appendStringInfoChar(buf, '\'');
+ for (valptr = val; *valptr; valptr++)
+ {
+ char ch = *valptr;
+
+ if (SQL_STR_DOUBLE(ch, true))
+ appendStringInfoChar(buf, ch);
+ appendStringInfoChar(buf, ch);
+ }
+ appendStringInfoChar(buf, '\'');
+}
+
+/*
+ * Deparse given expression into context->buf.
+ *
+ * This function must support all the same node types that foreign_expr_walker
+ * accepts.
+ *
+ * Note: unlike ruleutils.c, we just use a simple hard-wired parenthesization
+ * scheme: anything more complex than a Var, Const, function call or cast
+ * should be self-parenthesized.
+ */
+static void
+deparseExpr(Expr *node, deparse_expr_cxt *context)
+{
+ if (node == NULL)
+ return;
+
+ switch (nodeTag(node))
+ {
+ case T_Var:
+ deparseVar((Var *) node, context);
+ break;
+ case T_Const:
+ deparseConst((Const *) node, context, 0);
+ break;
+ case T_Param:
+ deparseParam((Param *) node, context);
+ break;
+ case T_SubscriptingRef:
+ deparseSubscriptingRef((SubscriptingRef *) node, context);
+ break;
+ case T_FuncExpr:
+ deparseFuncExpr((FuncExpr *) node, context);
+ break;
+ case T_OpExpr:
+ deparseOpExpr((OpExpr *) node, context);
+ break;
+ case T_DistinctExpr:
+ deparseDistinctExpr((DistinctExpr *) node, context);
+ break;
+ case T_ScalarArrayOpExpr:
+ deparseScalarArrayOpExpr((ScalarArrayOpExpr *) node, context);
+ break;
+ case T_RelabelType:
+ deparseRelabelType((RelabelType *) node, context);
+ break;
+ case T_BoolExpr:
+ deparseBoolExpr((BoolExpr *) node, context);
+ break;
+ case T_NullTest:
+ deparseNullTest((NullTest *) node, context);
+ break;
+ case T_ArrayExpr:
+ deparseArrayExpr((ArrayExpr *) node, context);
+ break;
+ case T_Aggref:
+ deparseAggref((Aggref *) node, context);
+ break;
+ default:
+ elog(ERROR, "unsupported expression type for deparse: %d",
+ (int) nodeTag(node));
+ break;
+ }
+}
+
+/*
+ * Deparse given Var node into context->buf.
+ *
+ * If the Var belongs to the foreign relation, just print its remote name.
+ * Otherwise, it's effectively a Param (and will in fact be a Param at
+ * run time). Handle it the same way we handle plain Params --- see
+ * deparseParam for comments.
+ */
+static void
+deparseVar(Var *node, deparse_expr_cxt *context)
+{
+ Relids relids = context->scanrel->relids;
+ int relno;
+ int colno;
+
+ /* Qualify columns when multiple relations are involved. */
+ bool qualify_col = (bms_membership(relids) == BMS_MULTIPLE);
+
+ /*
+ * If the Var belongs to the foreign relation that is deparsed as a
+ * subquery, use the relation and column alias to the Var provided by the
+ * subquery, instead of the remote name.
+ */
+ if (is_subquery_var(node, context->scanrel, &relno, &colno))
+ {
+ appendStringInfo(context->buf, "%s%d.%s%d",
+ SUBQUERY_REL_ALIAS_PREFIX, relno,
+ SUBQUERY_COL_ALIAS_PREFIX, colno);
+ return;
+ }
+
+ if (bms_is_member(node->varno, relids) && node->varlevelsup == 0)
+ deparseColumnRef(context->buf, node->varno, node->varattno,
+ planner_rt_fetch(node->varno, context->root),
+ qualify_col);
+ else
+ {
+ /* Treat like a Param */
+ if (context->params_list)
+ {
+ int pindex = 0;
+ ListCell *lc;
+
+ /* find its index in params_list */
+ foreach(lc, *context->params_list)
+ {
+ pindex++;
+ if (equal(node, (Node *) lfirst(lc)))
+ break;
+ }
+ if (lc == NULL)
+ {
+ /* not in list, so add it */
+ pindex++;
+ *context->params_list = lappend(*context->params_list, node);
+ }
+
+ printRemoteParam(pindex, node->vartype, node->vartypmod, context);
+ }
+ else
+ {
+ printRemotePlaceholder(node->vartype, node->vartypmod, context);
+ }
+ }
+}
+
+/*
+ * Deparse given constant value into context->buf.
+ *
+ * This function has to be kept in sync with ruleutils.c's get_const_expr.
+ * As for that function, showtype can be -1 to never show "::typename" decoration,
+ * or +1 to always show it, or 0 to show it only if the constant wouldn't be assumed
+ * to be the right type by default.
+ */
+static void
+deparseConst(Const *node, deparse_expr_cxt *context, int showtype)
+{
+ StringInfo buf = context->buf;
+ Oid typoutput;
+ bool typIsVarlena;
+ char *extval;
+ bool isfloat = false;
+ bool needlabel;
+
+ if (node->constisnull)
+ {
+ appendStringInfoString(buf, "NULL");
+ if (showtype >= 0)
+ appendStringInfo(buf, "::%s",
+ deparse_type_name(node->consttype,
+ node->consttypmod));
+ return;
+ }
+
+ getTypeOutputInfo(node->consttype,
+ &typoutput, &typIsVarlena);
+ extval = OidOutputFunctionCall(typoutput, node->constvalue);
+
+ switch (node->consttype)
+ {
+ case INT2OID:
+ case INT4OID:
+ case INT8OID:
+ case OIDOID:
+ case FLOAT4OID:
+ case FLOAT8OID:
+ case NUMERICOID:
+ {
+ /*
+ * No need to quote unless it's a special value such as 'NaN'.
+ * See comments in get_const_expr().
+ */
+ if (strspn(extval, "0123456789+-eE.") == strlen(extval))
+ {
+ if (extval[0] == '+' || extval[0] == '-')
+ appendStringInfo(buf, "(%s)", extval);
+ else
+ appendStringInfoString(buf, extval);
+ if (strcspn(extval, "eE.") != strlen(extval))
+ isfloat = true; /* it looks like a float */
+ }
+ else
+ appendStringInfo(buf, "'%s'", extval);
+ }
+ break;
+ case BITOID:
+ case VARBITOID:
+ appendStringInfo(buf, "B'%s'", extval);
+ break;
+ case BOOLOID:
+ if (strcmp(extval, "t") == 0)
+ appendStringInfoString(buf, "true");
+ else
+ appendStringInfoString(buf, "false");
+ break;
+ default:
+ deparseStringLiteral(buf, extval);
+ break;
+ }
+
+ pfree(extval);
+
+ if (showtype < 0)
+ return;
+
+ /*
+ * For showtype == 0, append ::typename unless the constant will be
+ * implicitly typed as the right type when it is read in.
+ *
+ * XXX this code has to be kept in sync with the behavior of the parser,
+ * especially make_const.
+ */
+ switch (node->consttype)
+ {
+ case BOOLOID:
+ case INT4OID:
+ case UNKNOWNOID:
+ needlabel = false;
+ break;
+ case NUMERICOID:
+ needlabel = !isfloat || (node->consttypmod >= 0);
+ break;
+ default:
+ needlabel = true;
+ break;
+ }
+ if (needlabel || showtype > 0)
+ appendStringInfo(buf, "::%s",
+ deparse_type_name(node->consttype,
+ node->consttypmod));
+}
+
+/*
+ * Deparse given Param node.
+ *
+ * If we're generating the query "for real", add the Param to
+ * context->params_list if it's not already present, and then use its index
+ * in that list as the remote parameter number. During EXPLAIN, there's
+ * no need to identify a parameter number.
+ */
+static void
+deparseParam(Param *node, deparse_expr_cxt *context)
+{
+ if (context->params_list)
+ {
+ int pindex = 0;
+ ListCell *lc;
+
+ /* find its index in params_list */
+ foreach(lc, *context->params_list)
+ {
+ pindex++;
+ if (equal(node, (Node *) lfirst(lc)))
+ break;
+ }
+ if (lc == NULL)
+ {
+ /* not in list, so add it */
+ pindex++;
+ *context->params_list = lappend(*context->params_list, node);
+ }
+
+ printRemoteParam(pindex, node->paramtype, node->paramtypmod, context);
+ }
+ else
+ {
+ printRemotePlaceholder(node->paramtype, node->paramtypmod, context);
+ }
+}
+
+/*
+ * Deparse a container subscript expression.
+ */
+static void
+deparseSubscriptingRef(SubscriptingRef *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ ListCell *lowlist_item;
+ ListCell *uplist_item;
+
+ /* Always parenthesize the expression. */
+ appendStringInfoChar(buf, '(');
+
+ /*
+ * Deparse referenced array expression first. If that expression includes
+ * a cast, we have to parenthesize to prevent the array subscript from
+ * being taken as typename decoration. We can avoid that in the typical
+ * case of subscripting a Var, but otherwise do it.
+ */
+ if (IsA(node->refexpr, Var))
+ deparseExpr(node->refexpr, context);
+ else
+ {
+ appendStringInfoChar(buf, '(');
+ deparseExpr(node->refexpr, context);
+ appendStringInfoChar(buf, ')');
+ }
+
+ /* Deparse subscript expressions. */
+ lowlist_item = list_head(node->reflowerindexpr); /* could be NULL */
+ foreach(uplist_item, node->refupperindexpr)
+ {
+ appendStringInfoChar(buf, '[');
+ if (lowlist_item)
+ {
+ deparseExpr(lfirst(lowlist_item), context);
+ appendStringInfoChar(buf, ':');
+ lowlist_item = lnext(node->reflowerindexpr, lowlist_item);
+ }
+ deparseExpr(lfirst(uplist_item), context);
+ appendStringInfoChar(buf, ']');
+ }
+
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * Deparse a function call.
+ */
+static void
+deparseFuncExpr(FuncExpr *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ bool use_variadic;
+ bool first;
+ ListCell *arg;
+
+ /*
+ * If the function call came from an implicit coercion, then just show the
+ * first argument.
+ */
+ if (node->funcformat == COERCE_IMPLICIT_CAST)
+ {
+ deparseExpr((Expr *) linitial(node->args), context);
+ return;
+ }
+
+ /*
+ * If the function call came from a cast, then show the first argument
+ * plus an explicit cast operation.
+ */
+ if (node->funcformat == COERCE_EXPLICIT_CAST)
+ {
+ Oid rettype = node->funcresulttype;
+ int32 coercedTypmod;
+
+ /* Get the typmod if this is a length-coercion function */
+ (void) exprIsLengthCoercion((Node *) node, &coercedTypmod);
+
+ deparseExpr((Expr *) linitial(node->args), context);
+ appendStringInfo(buf, "::%s",
+ deparse_type_name(rettype, coercedTypmod));
+ return;
+ }
+
+ /* Check if need to print VARIADIC (cf. ruleutils.c) */
+ use_variadic = node->funcvariadic;
+
+ /*
+ * Normal function: display as proname(args).
+ */
+ appendFunctionName(node->funcid, context);
+ appendStringInfoChar(buf, '(');
+
+ /* ... and all the arguments */
+ first = true;
+ foreach(arg, node->args)
+ {
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ if (use_variadic && lnext(node->args, arg) == NULL)
+ appendStringInfoString(buf, "VARIADIC ");
+ deparseExpr((Expr *) lfirst(arg), context);
+ first = false;
+ }
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * Deparse given operator expression. To avoid problems around
+ * priority of operations, we always parenthesize the arguments.
+ */
+static void
+deparseOpExpr(OpExpr *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ HeapTuple tuple;
+ Form_pg_operator form;
+ char oprkind;
+
+ /* Retrieve information about the operator from system catalog. */
+ tuple = SearchSysCache1(OPEROID, ObjectIdGetDatum(node->opno));
+ if (!HeapTupleIsValid(tuple))
+ elog(ERROR, "cache lookup failed for operator %u", node->opno);
+ form = (Form_pg_operator) GETSTRUCT(tuple);
+ oprkind = form->oprkind;
+
+ /* Sanity check. */
+ Assert((oprkind == 'l' && list_length(node->args) == 1) ||
+ (oprkind == 'b' && list_length(node->args) == 2));
+
+ /* Always parenthesize the expression. */
+ appendStringInfoChar(buf, '(');
+
+ /* Deparse left operand, if any. */
+ if (oprkind == 'b')
+ {
+ deparseExpr(linitial(node->args), context);
+ appendStringInfoChar(buf, ' ');
+ }
+
+ /* Deparse operator name. */
+ deparseOperatorName(buf, form);
+
+ /* Deparse right operand. */
+ appendStringInfoChar(buf, ' ');
+ deparseExpr(llast(node->args), context);
+
+ appendStringInfoChar(buf, ')');
+
+ ReleaseSysCache(tuple);
+}
+
+/*
+ * Print the name of an operator.
+ */
+static void
+deparseOperatorName(StringInfo buf, Form_pg_operator opform)
+{
+ char *opname;
+
+ /* opname is not a SQL identifier, so we should not quote it. */
+ opname = NameStr(opform->oprname);
+
+ /* Print schema name only if it's not pg_catalog */
+ if (opform->oprnamespace != PG_CATALOG_NAMESPACE)
+ {
+ const char *opnspname;
+
+ opnspname = get_namespace_name(opform->oprnamespace);
+ /* Print fully qualified operator name. */
+ appendStringInfo(buf, "OPERATOR(%s.%s)",
+ quote_identifier(opnspname), opname);
+ }
+ else
+ {
+ /* Just print operator name. */
+ appendStringInfoString(buf, opname);
+ }
+}
+
+/*
+ * Deparse IS DISTINCT FROM.
+ */
+static void
+deparseDistinctExpr(DistinctExpr *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+
+ Assert(list_length(node->args) == 2);
+
+ appendStringInfoChar(buf, '(');
+ deparseExpr(linitial(node->args), context);
+ appendStringInfoString(buf, " IS DISTINCT FROM ");
+ deparseExpr(lsecond(node->args), context);
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * Deparse given ScalarArrayOpExpr expression. To avoid problems
+ * around priority of operations, we always parenthesize the arguments.
+ */
+static void
+deparseScalarArrayOpExpr(ScalarArrayOpExpr *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ HeapTuple tuple;
+ Form_pg_operator form;
+ Expr *arg1;
+ Expr *arg2;
+
+ /* Retrieve information about the operator from system catalog. */
+ tuple = SearchSysCache1(OPEROID, ObjectIdGetDatum(node->opno));
+ if (!HeapTupleIsValid(tuple))
+ elog(ERROR, "cache lookup failed for operator %u", node->opno);
+ form = (Form_pg_operator) GETSTRUCT(tuple);
+
+ /* Sanity check. */
+ Assert(list_length(node->args) == 2);
+
+ /* Always parenthesize the expression. */
+ appendStringInfoChar(buf, '(');
+
+ /* Deparse left operand. */
+ arg1 = linitial(node->args);
+ deparseExpr(arg1, context);
+ appendStringInfoChar(buf, ' ');
+
+ /* Deparse operator name plus decoration. */
+ deparseOperatorName(buf, form);
+ appendStringInfo(buf, " %s (", node->useOr ? "ANY" : "ALL");
+
+ /* Deparse right operand. */
+ arg2 = lsecond(node->args);
+ deparseExpr(arg2, context);
+
+ appendStringInfoChar(buf, ')');
+
+ /* Always parenthesize the expression. */
+ appendStringInfoChar(buf, ')');
+
+ ReleaseSysCache(tuple);
+}
+
+/*
+ * Deparse a RelabelType (binary-compatible cast) node.
+ */
+static void
+deparseRelabelType(RelabelType *node, deparse_expr_cxt *context)
+{
+ deparseExpr(node->arg, context);
+ if (node->relabelformat != COERCE_IMPLICIT_CAST)
+ appendStringInfo(context->buf, "::%s",
+ deparse_type_name(node->resulttype,
+ node->resulttypmod));
+}
+
+/*
+ * Deparse a BoolExpr node.
+ */
+static void
+deparseBoolExpr(BoolExpr *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ const char *op = NULL; /* keep compiler quiet */
+ bool first;
+ ListCell *lc;
+
+ switch (node->boolop)
+ {
+ case AND_EXPR:
+ op = "AND";
+ break;
+ case OR_EXPR:
+ op = "OR";
+ break;
+ case NOT_EXPR:
+ appendStringInfoString(buf, "(NOT ");
+ deparseExpr(linitial(node->args), context);
+ appendStringInfoChar(buf, ')');
+ return;
+ }
+
+ appendStringInfoChar(buf, '(');
+ first = true;
+ foreach(lc, node->args)
+ {
+ if (!first)
+ appendStringInfo(buf, " %s ", op);
+ deparseExpr((Expr *) lfirst(lc), context);
+ first = false;
+ }
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * Deparse IS [NOT] NULL expression.
+ */
+static void
+deparseNullTest(NullTest *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+
+ appendStringInfoChar(buf, '(');
+ deparseExpr(node->arg, context);
+
+ /*
+ * For scalar inputs, we prefer to print as IS [NOT] NULL, which is
+ * shorter and traditional. If it's a rowtype input but we're applying a
+ * scalar test, must print IS [NOT] DISTINCT FROM NULL to be semantically
+ * correct.
+ */
+ if (node->argisrow || !type_is_rowtype(exprType((Node *) node->arg)))
+ {
+ if (node->nulltesttype == IS_NULL)
+ appendStringInfoString(buf, " IS NULL)");
+ else
+ appendStringInfoString(buf, " IS NOT NULL)");
+ }
+ else
+ {
+ if (node->nulltesttype == IS_NULL)
+ appendStringInfoString(buf, " IS NOT DISTINCT FROM NULL)");
+ else
+ appendStringInfoString(buf, " IS DISTINCT FROM NULL)");
+ }
+}
+
+/*
+ * Deparse ARRAY[...] construct.
+ */
+static void
+deparseArrayExpr(ArrayExpr *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ bool first = true;
+ ListCell *lc;
+
+ appendStringInfoString(buf, "ARRAY[");
+ foreach(lc, node->elements)
+ {
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ deparseExpr(lfirst(lc), context);
+ first = false;
+ }
+ appendStringInfoChar(buf, ']');
+
+ /* If the array is empty, we need an explicit cast to the array type. */
+ if (node->elements == NIL)
+ appendStringInfo(buf, "::%s",
+ deparse_type_name(node->array_typeid, -1));
+}
+
+/*
+ * Deparse an Aggref node.
+ */
+static void
+deparseAggref(Aggref *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ bool use_variadic;
+
+ /* Only basic, non-split aggregation accepted. */
+ Assert(node->aggsplit == AGGSPLIT_SIMPLE);
+
+ /* Check if need to print VARIADIC (cf. ruleutils.c) */
+ use_variadic = node->aggvariadic;
+
+ /* Find aggregate name from aggfnoid which is a pg_proc entry */
+ appendFunctionName(node->aggfnoid, context);
+ appendStringInfoChar(buf, '(');
+
+ /* Add DISTINCT */
+ appendStringInfoString(buf, (node->aggdistinct != NIL) ? "DISTINCT " : "");
+
+ if (AGGKIND_IS_ORDERED_SET(node->aggkind))
+ {
+ /* Add WITHIN GROUP (ORDER BY ..) */
+ ListCell *arg;
+ bool first = true;
+
+ Assert(!node->aggvariadic);
+ Assert(node->aggorder != NIL);
+
+ foreach(arg, node->aggdirectargs)
+ {
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ deparseExpr((Expr *) lfirst(arg), context);
+ }
+
+ appendStringInfoString(buf, ") WITHIN GROUP (ORDER BY ");
+ appendAggOrderBy(node->aggorder, node->args, context);
+ }
+ else
+ {
+ /* aggstar can be set only in zero-argument aggregates */
+ if (node->aggstar)
+ appendStringInfoChar(buf, '*');
+ else
+ {
+ ListCell *arg;
+ bool first = true;
+
+ /* Add all the arguments */
+ foreach(arg, node->args)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(arg);
+ Node *n = (Node *) tle->expr;
+
+ if (tle->resjunk)
+ continue;
+
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ /* Add VARIADIC */
+ if (use_variadic && lnext(node->args, arg) == NULL)
+ appendStringInfoString(buf, "VARIADIC ");
+
+ deparseExpr((Expr *) n, context);
+ }
+ }
+
+ /* Add ORDER BY */
+ if (node->aggorder != NIL)
+ {
+ appendStringInfoString(buf, " ORDER BY ");
+ appendAggOrderBy(node->aggorder, node->args, context);
+ }
+ }
+
+ /* Add FILTER (WHERE ..) */
+ if (node->aggfilter != NULL)
+ {
+ appendStringInfoString(buf, ") FILTER (WHERE ");
+ deparseExpr((Expr *) node->aggfilter, context);
+ }
+
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * Append ORDER BY within aggregate function.
+ */
+static void
+appendAggOrderBy(List *orderList, List *targetList, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ ListCell *lc;
+ bool first = true;
+
+ foreach(lc, orderList)
+ {
+ SortGroupClause *srt = (SortGroupClause *) lfirst(lc);
+ Node *sortexpr;
+
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ /* Deparse the sort expression proper. */
+ sortexpr = deparseSortGroupClause(srt->tleSortGroupRef, targetList,
+ false, context);
+ /* Add decoration as needed. */
+ appendOrderBySuffix(srt->sortop, exprType(sortexpr), srt->nulls_first,
+ context);
+ }
+}
+
+/*
+ * Append the ASC, DESC, USING <OPERATOR> and NULLS FIRST / NULLS LAST parts
+ * of an ORDER BY clause.
+ */
+static void
+appendOrderBySuffix(Oid sortop, Oid sortcoltype, bool nulls_first,
+ deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ TypeCacheEntry *typentry;
+
+ /* See whether operator is default < or > for sort expr's datatype. */
+ typentry = lookup_type_cache(sortcoltype,
+ TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);
+
+ if (sortop == typentry->lt_opr)
+ appendStringInfoString(buf, " ASC");
+ else if (sortop == typentry->gt_opr)
+ appendStringInfoString(buf, " DESC");
+ else
+ {
+ HeapTuple opertup;
+ Form_pg_operator operform;
+
+ appendStringInfoString(buf, " USING ");
+
+ /* Append operator name. */
+ opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(sortop));
+ if (!HeapTupleIsValid(opertup))
+ elog(ERROR, "cache lookup failed for operator %u", sortop);
+ operform = (Form_pg_operator) GETSTRUCT(opertup);
+ deparseOperatorName(buf, operform);
+ ReleaseSysCache(opertup);
+ }
+
+ if (nulls_first)
+ appendStringInfoString(buf, " NULLS FIRST");
+ else
+ appendStringInfoString(buf, " NULLS LAST");
+}
+
+/*
+ * Print the representation of a parameter to be sent to the remote side.
+ *
+ * Note: we always label the Param's type explicitly rather than relying on
+ * transmitting a numeric type OID in PQexecParams(). This allows us to
+ * avoid assuming that types have the same OIDs on the remote side as they
+ * do locally --- they need only have the same names.
+ */
+static void
+printRemoteParam(int paramindex, Oid paramtype, int32 paramtypmod,
+ deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ char *ptypename = deparse_type_name(paramtype, paramtypmod);
+
+ appendStringInfo(buf, "$%d::%s", paramindex, ptypename);
+}
+
+/*
+ * Print the representation of a placeholder for a parameter that will be
+ * sent to the remote side at execution time.
+ *
+ * This is used when we're just trying to EXPLAIN the remote query.
+ * We don't have the actual value of the runtime parameter yet, and we don't
+ * want the remote planner to generate a plan that depends on such a value
+ * anyway. Thus, we can't do something simple like "$1::paramtype".
+ * Instead, we emit "((SELECT null::paramtype)::paramtype)".
+ * In all extant versions of Postgres, the planner will see that as an unknown
+ * constant value, which is what we want. This might need adjustment if we
+ * ever make the planner flatten scalar subqueries. Note: the reason for the
+ * apparently useless outer cast is to ensure that the representation as a
+ * whole will be parsed as an a_expr and not a select_with_parens; the latter
+ * would do the wrong thing in the context "x = ANY(...)".
+ */
+static void
+printRemotePlaceholder(Oid paramtype, int32 paramtypmod,
+ deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ char *ptypename = deparse_type_name(paramtype, paramtypmod);
+
+ appendStringInfo(buf, "((SELECT null::%s)::%s)", ptypename, ptypename);
+}
+
+/*
+ * Deparse GROUP BY clause.
+ */
+static void
+appendGroupByClause(List *tlist, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ Query *query = context->root->parse;
+ ListCell *lc;
+ bool first = true;
+
+ /* Nothing to be done, if there's no GROUP BY clause in the query. */
+ if (!query->groupClause)
+ return;
+
+ appendStringInfoString(buf, " GROUP BY ");
+
+ /*
+ * Queries with grouping sets are not pushed down, so we don't expect
+ * grouping sets here.
+ */
+ Assert(!query->groupingSets);
+
+ foreach(lc, query->groupClause)
+ {
+ SortGroupClause *grp = (SortGroupClause *) lfirst(lc);
+
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ deparseSortGroupClause(grp->tleSortGroupRef, tlist, true, context);
+ }
+}
+
+/*
+ * Deparse ORDER BY clause defined by the given pathkeys.
+ *
+ * The clause should use Vars from context->scanrel if !has_final_sort,
+ * or from context->foreignrel's targetlist if has_final_sort.
+ *
+ * We find a suitable pathkey expression (some earlier step
+ * should have verified that there is one) and deparse it.
+ */
+static void
+appendOrderByClause(List *pathkeys, bool has_final_sort,
+ deparse_expr_cxt *context)
+{
+ ListCell *lcell;
+ int nestlevel;
+ const char *delim = " ";
+ StringInfo buf = context->buf;
+
+ /* Make sure any constants in the exprs are printed portably */
+ nestlevel = set_transmission_modes();
+
+ appendStringInfoString(buf, " ORDER BY");
+ foreach(lcell, pathkeys)
+ {
+ PathKey *pathkey = lfirst(lcell);
+ EquivalenceMember *em;
+ Expr *em_expr;
+ Oid oprid;
+
+ if (has_final_sort)
+ {
+ /*
+ * By construction, context->foreignrel is the input relation to
+ * the final sort.
+ */
+ em = find_em_for_rel_target(context->root,
+ pathkey->pk_eclass,
+ context->foreignrel);
+ }
+ else
+ em = find_em_for_rel(context->root,
+ pathkey->pk_eclass,
+ context->scanrel);
+
+ /*
+ * We don't expect any error here; it would mean that shippability
+ * wasn't verified earlier. For the same reason, we don't recheck
+ * shippability of the sort operator.
+ */
+ if (em == NULL)
+ elog(ERROR, "could not find pathkey item to sort");
+
+ em_expr = em->em_expr;
+
+ /*
+ * Lookup the operator corresponding to the strategy in the opclass.
+ * The datatype used by the opfamily is not necessarily the same as
+ * the expression type (for array types for example).
+ */
+ oprid = get_opfamily_member(pathkey->pk_opfamily,
+ em->em_datatype,
+ em->em_datatype,
+ pathkey->pk_strategy);
+ if (!OidIsValid(oprid))
+ elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
+ pathkey->pk_strategy, em->em_datatype, em->em_datatype,
+ pathkey->pk_opfamily);
+
+ appendStringInfoString(buf, delim);
+ deparseExpr(em_expr, context);
+
+ /*
+ * Here we need to use the expression's actual type to discover
+ * whether the desired operator will be the default or not.
+ */
+ appendOrderBySuffix(oprid, exprType((Node *) em_expr),
+ pathkey->pk_nulls_first, context);
+
+ delim = ", ";
+ }
+ reset_transmission_modes(nestlevel);
+}
+
+/*
+ * Deparse LIMIT/OFFSET clause.
+ */
+static void
+appendLimitClause(deparse_expr_cxt *context)
+{
+ PlannerInfo *root = context->root;
+ StringInfo buf = context->buf;
+ int nestlevel;
+
+ /* Make sure any constants in the exprs are printed portably */
+ nestlevel = set_transmission_modes();
+
+ if (root->parse->limitCount)
+ {
+ appendStringInfoString(buf, " LIMIT ");
+ deparseExpr((Expr *) root->parse->limitCount, context);
+ }
+ if (root->parse->limitOffset)
+ {
+ appendStringInfoString(buf, " OFFSET ");
+ deparseExpr((Expr *) root->parse->limitOffset, context);
+ }
+
+ reset_transmission_modes(nestlevel);
+}
+
+/*
+ * appendFunctionName
+ * Deparses function name from given function oid.
+ */
+static void
+appendFunctionName(Oid funcid, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ HeapTuple proctup;
+ Form_pg_proc procform;
+ const char *proname;
+
+ proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
+ if (!HeapTupleIsValid(proctup))
+ elog(ERROR, "cache lookup failed for function %u", funcid);
+ procform = (Form_pg_proc) GETSTRUCT(proctup);
+
+ /* Print schema name only if it's not pg_catalog */
+ if (procform->pronamespace != PG_CATALOG_NAMESPACE)
+ {
+ const char *schemaname;
+
+ schemaname = get_namespace_name(procform->pronamespace);
+ appendStringInfo(buf, "%s.", quote_identifier(schemaname));
+ }
+
+ /* Always print the function name */
+ proname = NameStr(procform->proname);
+ appendStringInfoString(buf, quote_identifier(proname));
+
+ ReleaseSysCache(proctup);
+}
+
+/*
+ * Appends a sort or group clause.
+ *
+ * Like get_rule_sortgroupclause(), returns the expression tree, so caller
+ * need not find it again.
+ */
+static Node *
+deparseSortGroupClause(Index ref, List *tlist, bool force_colno,
+ deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ TargetEntry *tle;
+ Expr *expr;
+
+ tle = get_sortgroupref_tle(ref, tlist);
+ expr = tle->expr;
+
+ if (force_colno)
+ {
+ /* Use column-number form when requested by caller. */
+ Assert(!tle->resjunk);
+ appendStringInfo(buf, "%d", tle->resno);
+ }
+ else if (expr && IsA(expr, Const))
+ {
+ /*
+ * Force a typecast here so that we don't emit something like "GROUP
+ * BY 2", which will be misconstrued as a column position rather than
+ * a constant.
+ */
+ deparseConst((Const *) expr, context, 1);
+ }
+ else if (!expr || IsA(expr, Var))
+ deparseExpr(expr, context);
+ else
+ {
+ /* Always parenthesize the expression. */
+ appendStringInfoChar(buf, '(');
+ deparseExpr(expr, context);
+ appendStringInfoChar(buf, ')');
+ }
+
+ return (Node *) expr;
+}
+
+
+/*
+ * Returns true if given Var is deparsed as a subquery output column, in
+ * which case, *relno and *colno are set to the IDs for the relation and
+ * column alias to the Var provided by the subquery.
+ */
+static bool
+is_subquery_var(Var *node, RelOptInfo *foreignrel, int *relno, int *colno)
+{
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) foreignrel->fdw_private;
+ RelOptInfo *outerrel = fpinfo->outerrel;
+ RelOptInfo *innerrel = fpinfo->innerrel;
+
+ /* Should only be called in these cases. */
+ Assert(IS_SIMPLE_REL(foreignrel) || IS_JOIN_REL(foreignrel));
+
+ /*
+ * If the given relation isn't a join relation, it doesn't have any lower
+ * subqueries, so the Var isn't a subquery output column.
+ */
+ if (!IS_JOIN_REL(foreignrel))
+ return false;
+
+ /*
+ * If the Var doesn't belong to any lower subqueries, it isn't a subquery
+ * output column.
+ */
+ if (!bms_is_member(node->varno, fpinfo->lower_subquery_rels))
+ return false;
+
+ if (bms_is_member(node->varno, outerrel->relids))
+ {
+ /*
+ * If outer relation is deparsed as a subquery, the Var is an output
+ * column of the subquery; get the IDs for the relation/column alias.
+ */
+ if (fpinfo->make_outerrel_subquery)
+ {
+ get_relation_column_alias_ids(node, outerrel, relno, colno);
+ return true;
+ }
+
+ /* Otherwise, recurse into the outer relation. */
+ return is_subquery_var(node, outerrel, relno, colno);
+ }
+ else
+ {
+ Assert(bms_is_member(node->varno, innerrel->relids));
+
+ /*
+ * If inner relation is deparsed as a subquery, the Var is an output
+ * column of the subquery; get the IDs for the relation/column alias.
+ */
+ if (fpinfo->make_innerrel_subquery)
+ {
+ get_relation_column_alias_ids(node, innerrel, relno, colno);
+ return true;
+ }
+
+ /* Otherwise, recurse into the inner relation. */
+ return is_subquery_var(node, innerrel, relno, colno);
+ }
+}
+
+/*
+ * Get the IDs for the relation and column alias to given Var belonging to
+ * given relation, which are returned into *relno and *colno.
+ */
+static void
+get_relation_column_alias_ids(Var *node, RelOptInfo *foreignrel,
+ int *relno, int *colno)
+{
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) foreignrel->fdw_private;
+ int i;
+ ListCell *lc;
+
+ /* Get the relation alias ID */
+ *relno = fpinfo->relation_index;
+
+ /* Get the column alias ID */
+ i = 1;
+ foreach(lc, foreignrel->reltarget->exprs)
+ {
+ if (equal(lfirst(lc), (Node *) node))
+ {
+ *colno = i;
+ return;
+ }
+ i++;
+ }
+
+ /* Shouldn't get here */
+ elog(ERROR, "unexpected expression in subquery output");
+}
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
new file mode 100644
index 0000000..e850db1
--- /dev/null
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -0,0 +1,10858 @@
+-- ===================================================================
+-- create FDW objects
+-- ===================================================================
+CREATE EXTENSION postgres_fdw;
+CREATE SERVER testserver1 FOREIGN DATA WRAPPER postgres_fdw;
+DO $d$
+ BEGIN
+ EXECUTE $$CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS (dbname '$$||current_database()||$$',
+ port '$$||current_setting('port')||$$'
+ )$$;
+ EXECUTE $$CREATE SERVER loopback2 FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS (dbname '$$||current_database()||$$',
+ port '$$||current_setting('port')||$$'
+ )$$;
+ EXECUTE $$CREATE SERVER loopback3 FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS (dbname '$$||current_database()||$$',
+ port '$$||current_setting('port')||$$'
+ )$$;
+ END;
+$d$;
+CREATE USER MAPPING FOR public SERVER testserver1
+ OPTIONS (user 'value', password 'value');
+CREATE USER MAPPING FOR CURRENT_USER SERVER loopback;
+CREATE USER MAPPING FOR CURRENT_USER SERVER loopback2;
+CREATE USER MAPPING FOR public SERVER loopback3;
+-- ===================================================================
+-- create objects used through FDW loopback server
+-- ===================================================================
+CREATE TYPE user_enum AS ENUM ('foo', 'bar', 'buz');
+CREATE SCHEMA "S 1";
+CREATE TABLE "S 1"."T 1" (
+ "C 1" int NOT NULL,
+ c2 int NOT NULL,
+ c3 text,
+ c4 timestamptz,
+ c5 timestamp,
+ c6 varchar(10),
+ c7 char(10),
+ c8 user_enum,
+ CONSTRAINT t1_pkey PRIMARY KEY ("C 1")
+);
+CREATE TABLE "S 1"."T 2" (
+ c1 int NOT NULL,
+ c2 text,
+ CONSTRAINT t2_pkey PRIMARY KEY (c1)
+);
+CREATE TABLE "S 1"."T 3" (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text,
+ CONSTRAINT t3_pkey PRIMARY KEY (c1)
+);
+CREATE TABLE "S 1"."T 4" (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text,
+ CONSTRAINT t4_pkey PRIMARY KEY (c1)
+);
+-- Disable autovacuum for these tables to avoid unexpected effects of that
+ALTER TABLE "S 1"."T 1" SET (autovacuum_enabled = 'false');
+ALTER TABLE "S 1"."T 2" SET (autovacuum_enabled = 'false');
+ALTER TABLE "S 1"."T 3" SET (autovacuum_enabled = 'false');
+ALTER TABLE "S 1"."T 4" SET (autovacuum_enabled = 'false');
+INSERT INTO "S 1"."T 1"
+ SELECT id,
+ id % 10,
+ to_char(id, 'FM00000'),
+ '1970-01-01'::timestamptz + ((id % 100) || ' days')::interval,
+ '1970-01-01'::timestamp + ((id % 100) || ' days')::interval,
+ id % 10,
+ id % 10,
+ 'foo'::user_enum
+ FROM generate_series(1, 1000) id;
+INSERT INTO "S 1"."T 2"
+ SELECT id,
+ 'AAA' || to_char(id, 'FM000')
+ FROM generate_series(1, 100) id;
+INSERT INTO "S 1"."T 3"
+ SELECT id,
+ id + 1,
+ 'AAA' || to_char(id, 'FM000')
+ FROM generate_series(1, 100) id;
+DELETE FROM "S 1"."T 3" WHERE c1 % 2 != 0; -- delete for outer join tests
+INSERT INTO "S 1"."T 4"
+ SELECT id,
+ id + 1,
+ 'AAA' || to_char(id, 'FM000')
+ FROM generate_series(1, 100) id;
+DELETE FROM "S 1"."T 4" WHERE c1 % 3 != 0; -- delete for outer join tests
+ANALYZE "S 1"."T 1";
+ANALYZE "S 1"."T 2";
+ANALYZE "S 1"."T 3";
+ANALYZE "S 1"."T 4";
+-- ===================================================================
+-- create foreign tables
+-- ===================================================================
+CREATE FOREIGN TABLE ft1 (
+ c0 int,
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text,
+ c4 timestamptz,
+ c5 timestamp,
+ c6 varchar(10),
+ c7 char(10) default 'ft1',
+ c8 user_enum
+) SERVER loopback;
+ALTER FOREIGN TABLE ft1 DROP COLUMN c0;
+CREATE FOREIGN TABLE ft2 (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ cx int,
+ c3 text,
+ c4 timestamptz,
+ c5 timestamp,
+ c6 varchar(10),
+ c7 char(10) default 'ft2',
+ c8 user_enum
+) SERVER loopback;
+ALTER FOREIGN TABLE ft2 DROP COLUMN cx;
+CREATE FOREIGN TABLE ft4 (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text
+) SERVER loopback OPTIONS (schema_name 'S 1', table_name 'T 3');
+CREATE FOREIGN TABLE ft5 (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text
+) SERVER loopback OPTIONS (schema_name 'S 1', table_name 'T 4');
+CREATE FOREIGN TABLE ft6 (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text
+) SERVER loopback2 OPTIONS (schema_name 'S 1', table_name 'T 4');
+CREATE FOREIGN TABLE ft7 (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text
+) SERVER loopback3 OPTIONS (schema_name 'S 1', table_name 'T 4');
+-- ===================================================================
+-- tests for validator
+-- ===================================================================
+-- requiressl and some other parameters are omitted because
+-- valid values for them depend on configure options
+ALTER SERVER testserver1 OPTIONS (
+ use_remote_estimate 'false',
+ updatable 'true',
+ fdw_startup_cost '123.456',
+ fdw_tuple_cost '0.123',
+ service 'value',
+ connect_timeout 'value',
+ dbname 'value',
+ host 'value',
+ hostaddr 'value',
+ port 'value',
+ --client_encoding 'value',
+ application_name 'value',
+ --fallback_application_name 'value',
+ keepalives 'value',
+ keepalives_idle 'value',
+ keepalives_interval 'value',
+ tcp_user_timeout 'value',
+ -- requiressl 'value',
+ sslcompression 'value',
+ sslmode 'value',
+ sslcert 'value',
+ sslkey 'value',
+ sslrootcert 'value',
+ sslcrl 'value',
+ --requirepeer 'value',
+ krbsrvname 'value',
+ gsslib 'value'
+ --replication 'value'
+);
+-- Error, invalid list syntax
+ALTER SERVER testserver1 OPTIONS (ADD extensions 'foo; bar');
+ERROR: parameter "extensions" must be a list of extension names
+-- OK but gets a warning
+ALTER SERVER testserver1 OPTIONS (ADD extensions 'foo, bar');
+WARNING: extension "foo" is not installed
+WARNING: extension "bar" is not installed
+ALTER SERVER testserver1 OPTIONS (DROP extensions);
+ALTER USER MAPPING FOR public SERVER testserver1
+ OPTIONS (DROP user, DROP password);
+-- Attempt to add a valid option that's not allowed in a user mapping
+ALTER USER MAPPING FOR public SERVER testserver1
+ OPTIONS (ADD sslmode 'require');
+ERROR: invalid option "sslmode"
+HINT: Valid options in this context are: user, password, sslpassword, password_required, sslcert, sslkey
+-- But we can add valid ones fine
+ALTER USER MAPPING FOR public SERVER testserver1
+ OPTIONS (ADD sslpassword 'dummy');
+-- Ensure valid options we haven't used in a user mapping yet are
+-- permitted to check validation.
+ALTER USER MAPPING FOR public SERVER testserver1
+ OPTIONS (ADD sslkey 'value', ADD sslcert 'value');
+ALTER FOREIGN TABLE ft1 OPTIONS (schema_name 'S 1', table_name 'T 1');
+ALTER FOREIGN TABLE ft2 OPTIONS (schema_name 'S 1', table_name 'T 1');
+ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
+ALTER FOREIGN TABLE ft2 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
+\det+
+ List of foreign tables
+ Schema | Table | Server | FDW options | Description
+--------+-------+-----------+---------------------------------------+-------------
+ public | ft1 | loopback | (schema_name 'S 1', table_name 'T 1') |
+ public | ft2 | loopback | (schema_name 'S 1', table_name 'T 1') |
+ public | ft4 | loopback | (schema_name 'S 1', table_name 'T 3') |
+ public | ft5 | loopback | (schema_name 'S 1', table_name 'T 4') |
+ public | ft6 | loopback2 | (schema_name 'S 1', table_name 'T 4') |
+ public | ft7 | loopback3 | (schema_name 'S 1', table_name 'T 4') |
+(6 rows)
+
+-- Test that alteration of server options causes reconnection
+-- Remote's errors might be non-English, so hide them to ensure stable results
+\set VERBOSITY terse
+SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work
+ c3 | c4
+-------+------------------------------
+ 00001 | Fri Jan 02 00:00:00 1970 PST
+(1 row)
+
+ALTER SERVER loopback OPTIONS (SET dbname 'no such database');
+SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail
+ERROR: could not connect to server "loopback"
+DO $d$
+ BEGIN
+ EXECUTE $$ALTER SERVER loopback
+ OPTIONS (SET dbname '$$||current_database()||$$')$$;
+ END;
+$d$;
+SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again
+ c3 | c4
+-------+------------------------------
+ 00001 | Fri Jan 02 00:00:00 1970 PST
+(1 row)
+
+-- Test that alteration of user mapping options causes reconnection
+ALTER USER MAPPING FOR CURRENT_USER SERVER loopback
+ OPTIONS (ADD user 'no such user');
+SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail
+ERROR: could not connect to server "loopback"
+ALTER USER MAPPING FOR CURRENT_USER SERVER loopback
+ OPTIONS (DROP user);
+SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again
+ c3 | c4
+-------+------------------------------
+ 00001 | Fri Jan 02 00:00:00 1970 PST
+(1 row)
+
+\set VERBOSITY default
+-- Now we should be able to run ANALYZE.
+-- To exercise multiple code paths, we use local stats on ft1
+-- and remote-estimate mode on ft2.
+ANALYZE ft1;
+ALTER FOREIGN TABLE ft2 OPTIONS (use_remote_estimate 'true');
+-- ===================================================================
+-- simple queries
+-- ===================================================================
+-- single table without alias
+EXPLAIN (COSTS OFF) SELECT * FROM ft1 ORDER BY c3, c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+---------------------
+ Foreign Scan on ft1
+(1 row)
+
+SELECT * FROM ft1 ORDER BY c3, c1 OFFSET 100 LIMIT 10;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+-----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+ 102 | 2 | 00102 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+ 103 | 3 | 00103 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
+ 104 | 4 | 00104 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
+ 105 | 5 | 00105 | Tue Jan 06 00:00:00 1970 PST | Tue Jan 06 00:00:00 1970 | 5 | 5 | foo
+ 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 107 | 7 | 00107 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 108 | 8 | 00108 | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 109 | 9 | 00109 | Sat Jan 10 00:00:00 1970 PST | Sat Jan 10 00:00:00 1970 | 9 | 9 | foo
+ 110 | 0 | 00110 | Sun Jan 11 00:00:00 1970 PST | Sun Jan 11 00:00:00 1970 | 0 | 0 | foo
+(10 rows)
+
+-- single table with alias - also test that tableoid sort is not pushed to remote side
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 ORDER BY t1.c3, t1.c1, t1.tableoid OFFSET 100 LIMIT 10;
+ QUERY PLAN
+-------------------------------------------------------------------------------------
+ Limit
+ Output: c1, c2, c3, c4, c5, c6, c7, c8, tableoid
+ -> Sort
+ Output: c1, c2, c3, c4, c5, c6, c7, c8, tableoid
+ Sort Key: t1.c3, t1.c1, t1.tableoid
+ -> Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8, tableoid
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
+(8 rows)
+
+SELECT * FROM ft1 t1 ORDER BY t1.c3, t1.c1, t1.tableoid OFFSET 100 LIMIT 10;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+-----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+ 102 | 2 | 00102 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+ 103 | 3 | 00103 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
+ 104 | 4 | 00104 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
+ 105 | 5 | 00105 | Tue Jan 06 00:00:00 1970 PST | Tue Jan 06 00:00:00 1970 | 5 | 5 | foo
+ 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 107 | 7 | 00107 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 108 | 8 | 00108 | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 109 | 9 | 00109 | Sat Jan 10 00:00:00 1970 PST | Sat Jan 10 00:00:00 1970 | 9 | 9 | foo
+ 110 | 0 | 00110 | Sun Jan 11 00:00:00 1970 PST | Sun Jan 11 00:00:00 1970 | 0 | 0 | foo
+(10 rows)
+
+-- whole-row reference
+EXPLAIN (VERBOSE, COSTS OFF) SELECT t1 FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: t1.*, c3, c1
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY c3 ASC NULLS LAST, "C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint
+(3 rows)
+
+SELECT t1 FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+ t1
+--------------------------------------------------------------------------------------------
+ (101,1,00101,"Fri Jan 02 00:00:00 1970 PST","Fri Jan 02 00:00:00 1970",1,"1 ",foo)
+ (102,2,00102,"Sat Jan 03 00:00:00 1970 PST","Sat Jan 03 00:00:00 1970",2,"2 ",foo)
+ (103,3,00103,"Sun Jan 04 00:00:00 1970 PST","Sun Jan 04 00:00:00 1970",3,"3 ",foo)
+ (104,4,00104,"Mon Jan 05 00:00:00 1970 PST","Mon Jan 05 00:00:00 1970",4,"4 ",foo)
+ (105,5,00105,"Tue Jan 06 00:00:00 1970 PST","Tue Jan 06 00:00:00 1970",5,"5 ",foo)
+ (106,6,00106,"Wed Jan 07 00:00:00 1970 PST","Wed Jan 07 00:00:00 1970",6,"6 ",foo)
+ (107,7,00107,"Thu Jan 08 00:00:00 1970 PST","Thu Jan 08 00:00:00 1970",7,"7 ",foo)
+ (108,8,00108,"Fri Jan 09 00:00:00 1970 PST","Fri Jan 09 00:00:00 1970",8,"8 ",foo)
+ (109,9,00109,"Sat Jan 10 00:00:00 1970 PST","Sat Jan 10 00:00:00 1970",9,"9 ",foo)
+ (110,0,00110,"Sun Jan 11 00:00:00 1970 PST","Sun Jan 11 00:00:00 1970",0,"0 ",foo)
+(10 rows)
+
+-- empty result
+SELECT * FROM ft1 WHERE false;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+----+----+----+----+----+----
+(0 rows)
+
+-- with WHERE clause
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1';
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((c7 >= '1'::bpchar)) AND (("C 1" = 101)) AND ((c6 = '1'::text))
+(3 rows)
+
+SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1';
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+-----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
+-- with FOR UPDATE/SHARE
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.*
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 101)) FOR UPDATE
+(3 rows)
+
+SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+-----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.*
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 102)) FOR SHARE
+(3 rows)
+
+SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+-----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 102 | 2 | 00102 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+(1 row)
+
+-- aggregate
+SELECT COUNT(*) FROM ft1 t1;
+ count
+-------
+ 1000
+(1 row)
+
+-- subquery
+SELECT * FROM ft1 t1 WHERE t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 <= 10) ORDER BY c1;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+ 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+ 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
+ 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
+ 5 | 5 | 00005 | Tue Jan 06 00:00:00 1970 PST | Tue Jan 06 00:00:00 1970 | 5 | 5 | foo
+ 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 7 | 7 | 00007 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 8 | 8 | 00008 | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 9 | 9 | 00009 | Sat Jan 10 00:00:00 1970 PST | Sat Jan 10 00:00:00 1970 | 9 | 9 | foo
+ 10 | 0 | 00010 | Sun Jan 11 00:00:00 1970 PST | Sun Jan 11 00:00:00 1970 | 0 | 0 | foo
+(10 rows)
+
+-- subquery+MAX
+SELECT * FROM ft1 t1 WHERE t1.c3 = (SELECT MAX(c3) FROM ft2 t2) ORDER BY c1;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1000 | 0 | 01000 | Thu Jan 01 00:00:00 1970 PST | Thu Jan 01 00:00:00 1970 | 0 | 0 | foo
+(1 row)
+
+-- used in CTE
+WITH t1 AS (SELECT * FROM ft1 WHERE c1 <= 10) SELECT t2.c1, t2.c2, t2.c3, t2.c4 FROM t1, ft2 t2 WHERE t1.c1 = t2.c1 ORDER BY t1.c1;
+ c1 | c2 | c3 | c4
+----+----+-------+------------------------------
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST
+ 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST
+ 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST
+ 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST
+ 5 | 5 | 00005 | Tue Jan 06 00:00:00 1970 PST
+ 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST
+ 7 | 7 | 00007 | Thu Jan 08 00:00:00 1970 PST
+ 8 | 8 | 00008 | Fri Jan 09 00:00:00 1970 PST
+ 9 | 9 | 00009 | Sat Jan 10 00:00:00 1970 PST
+ 10 | 0 | 00010 | Sun Jan 11 00:00:00 1970 PST
+(10 rows)
+
+-- fixed values
+SELECT 'fixed', NULL FROM ft1 t1 WHERE c1 = 1;
+ ?column? | ?column?
+----------+----------
+ fixed |
+(1 row)
+
+-- Test forcing the remote server to produce sorted data for a merge join.
+SET enable_hashjoin TO false;
+SET enable_nestloop TO false;
+-- inner join; expressions in the clauses appear in the equivalence class list
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT t1.c1, t2."C 1" FROM ft2 t1 JOIN "S 1"."T 1" t2 ON (t1.c1 = t2."C 1") OFFSET 100 LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------------------------
+ Limit
+ Output: t1.c1, t2."C 1"
+ -> Merge Join
+ Output: t1.c1, t2."C 1"
+ Inner Unique: true
+ Merge Cond: (t1.c1 = t2."C 1")
+ -> Foreign Scan on public.ft2 t1
+ Output: t1.c1
+ Remote SQL: SELECT "C 1" FROM "S 1"."T 1" ORDER BY "C 1" ASC NULLS LAST
+ -> Index Only Scan using t1_pkey on "S 1"."T 1" t2
+ Output: t2."C 1"
+(11 rows)
+
+SELECT t1.c1, t2."C 1" FROM ft2 t1 JOIN "S 1"."T 1" t2 ON (t1.c1 = t2."C 1") OFFSET 100 LIMIT 10;
+ c1 | C 1
+-----+-----
+ 101 | 101
+ 102 | 102
+ 103 | 103
+ 104 | 104
+ 105 | 105
+ 106 | 106
+ 107 | 107
+ 108 | 108
+ 109 | 109
+ 110 | 110
+(10 rows)
+
+-- outer join; expressions in the clauses do not appear in equivalence class
+-- list but no output change as compared to the previous query
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT t1.c1, t2."C 1" FROM ft2 t1 LEFT JOIN "S 1"."T 1" t2 ON (t1.c1 = t2."C 1") OFFSET 100 LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------------------------
+ Limit
+ Output: t1.c1, t2."C 1"
+ -> Merge Left Join
+ Output: t1.c1, t2."C 1"
+ Inner Unique: true
+ Merge Cond: (t1.c1 = t2."C 1")
+ -> Foreign Scan on public.ft2 t1
+ Output: t1.c1
+ Remote SQL: SELECT "C 1" FROM "S 1"."T 1" ORDER BY "C 1" ASC NULLS LAST
+ -> Index Only Scan using t1_pkey on "S 1"."T 1" t2
+ Output: t2."C 1"
+(11 rows)
+
+SELECT t1.c1, t2."C 1" FROM ft2 t1 LEFT JOIN "S 1"."T 1" t2 ON (t1.c1 = t2."C 1") OFFSET 100 LIMIT 10;
+ c1 | C 1
+-----+-----
+ 101 | 101
+ 102 | 102
+ 103 | 103
+ 104 | 104
+ 105 | 105
+ 106 | 106
+ 107 | 107
+ 108 | 108
+ 109 | 109
+ 110 | 110
+(10 rows)
+
+-- A join between local table and foreign join. ORDER BY clause is added to the
+-- foreign join so that the local table can be joined using merge join strategy.
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT t1."C 1" FROM "S 1"."T 1" t1 left join ft1 t2 join ft2 t3 on (t2.c1 = t3.c1) on (t3.c1 = t1."C 1") OFFSET 100 LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------
+ Limit
+ Output: t1."C 1"
+ -> Merge Right Join
+ Output: t1."C 1"
+ Inner Unique: true
+ Merge Cond: (t3.c1 = t1."C 1")
+ -> Foreign Scan
+ Output: t3.c1
+ Relations: (public.ft1 t2) INNER JOIN (public.ft2 t3)
+ Remote SQL: SELECT r3."C 1" FROM ("S 1"."T 1" r2 INNER JOIN "S 1"."T 1" r3 ON (((r2."C 1" = r3."C 1")))) ORDER BY r2."C 1" ASC NULLS LAST
+ -> Index Only Scan using t1_pkey on "S 1"."T 1" t1
+ Output: t1."C 1"
+(12 rows)
+
+SELECT t1."C 1" FROM "S 1"."T 1" t1 left join ft1 t2 join ft2 t3 on (t2.c1 = t3.c1) on (t3.c1 = t1."C 1") OFFSET 100 LIMIT 10;
+ C 1
+-----
+ 101
+ 102
+ 103
+ 104
+ 105
+ 106
+ 107
+ 108
+ 109
+ 110
+(10 rows)
+
+-- Test similar to above, except that the full join prevents any equivalence
+-- classes from being merged. This produces single relation equivalence classes
+-- included in join restrictions.
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT t1."C 1", t2.c1, t3.c1 FROM "S 1"."T 1" t1 left join ft1 t2 full join ft2 t3 on (t2.c1 = t3.c1) on (t3.c1 = t1."C 1") OFFSET 100 LIMIT 10;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Limit
+ Output: t1."C 1", t2.c1, t3.c1
+ -> Merge Right Join
+ Output: t1."C 1", t2.c1, t3.c1
+ Inner Unique: true
+ Merge Cond: (t3.c1 = t1."C 1")
+ -> Foreign Scan
+ Output: t3.c1, t2.c1
+ Relations: (public.ft2 t3) LEFT JOIN (public.ft1 t2)
+ Remote SQL: SELECT r3."C 1", r2."C 1" FROM ("S 1"."T 1" r3 LEFT JOIN "S 1"."T 1" r2 ON (((r2."C 1" = r3."C 1")))) ORDER BY r3."C 1" ASC NULLS LAST
+ -> Index Only Scan using t1_pkey on "S 1"."T 1" t1
+ Output: t1."C 1"
+(12 rows)
+
+SELECT t1."C 1", t2.c1, t3.c1 FROM "S 1"."T 1" t1 left join ft1 t2 full join ft2 t3 on (t2.c1 = t3.c1) on (t3.c1 = t1."C 1") OFFSET 100 LIMIT 10;
+ C 1 | c1 | c1
+-----+-----+-----
+ 101 | 101 | 101
+ 102 | 102 | 102
+ 103 | 103 | 103
+ 104 | 104 | 104
+ 105 | 105 | 105
+ 106 | 106 | 106
+ 107 | 107 | 107
+ 108 | 108 | 108
+ 109 | 109 | 109
+ 110 | 110 | 110
+(10 rows)
+
+-- Test similar to above with all full outer joins
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT t1."C 1", t2.c1, t3.c1 FROM "S 1"."T 1" t1 full join ft1 t2 full join ft2 t3 on (t2.c1 = t3.c1) on (t3.c1 = t1."C 1") OFFSET 100 LIMIT 10;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Limit
+ Output: t1."C 1", t2.c1, t3.c1
+ -> Merge Full Join
+ Output: t1."C 1", t2.c1, t3.c1
+ Inner Unique: true
+ Merge Cond: (t3.c1 = t1."C 1")
+ -> Foreign Scan
+ Output: t2.c1, t3.c1
+ Relations: (public.ft1 t2) FULL JOIN (public.ft2 t3)
+ Remote SQL: SELECT r2."C 1", r3."C 1" FROM ("S 1"."T 1" r2 FULL JOIN "S 1"."T 1" r3 ON (((r2."C 1" = r3."C 1")))) ORDER BY r3."C 1" ASC NULLS LAST
+ -> Index Only Scan using t1_pkey on "S 1"."T 1" t1
+ Output: t1."C 1"
+(12 rows)
+
+SELECT t1."C 1", t2.c1, t3.c1 FROM "S 1"."T 1" t1 full join ft1 t2 full join ft2 t3 on (t2.c1 = t3.c1) on (t3.c1 = t1."C 1") OFFSET 100 LIMIT 10;
+ C 1 | c1 | c1
+-----+-----+-----
+ 101 | 101 | 101
+ 102 | 102 | 102
+ 103 | 103 | 103
+ 104 | 104 | 104
+ 105 | 105 | 105
+ 106 | 106 | 106
+ 107 | 107 | 107
+ 108 | 108 | 108
+ 109 | 109 | 109
+ 110 | 110 | 110
+(10 rows)
+
+RESET enable_hashjoin;
+RESET enable_nestloop;
+-- Test executing assertion in estimate_path_cost_size() that makes sure that
+-- retrieved_rows for foreign rel re-used to cost pre-sorted foreign paths is
+-- a sensible value even when the rel has tuples=0
+CREATE TABLE loct_empty (c1 int NOT NULL, c2 text);
+CREATE FOREIGN TABLE ft_empty (c1 int NOT NULL, c2 text)
+ SERVER loopback OPTIONS (table_name 'loct_empty');
+INSERT INTO loct_empty
+ SELECT id, 'AAA' || to_char(id, 'FM000') FROM generate_series(1, 100) id;
+DELETE FROM loct_empty;
+ANALYZE ft_empty;
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft_empty ORDER BY c1;
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Foreign Scan on public.ft_empty
+ Output: c1, c2
+ Remote SQL: SELECT c1, c2 FROM public.loct_empty ORDER BY c1 ASC NULLS LAST
+(3 rows)
+
+-- ===================================================================
+-- WHERE with remotely-executable conditions
+-- ===================================================================
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE t1.c1 = 1; -- Var, OpExpr(b), Const
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE t1.c1 = 100 AND t1.c2 = 0; -- BoolExpr
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 100)) AND ((c2 = 0))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 IS NULL; -- NullTest
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" IS NULL))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 IS NOT NULL; -- NullTest
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" IS NOT NULL))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE round(abs(c1), 0) = 1; -- FuncExpr
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((round(abs("C 1"), 0) = 1::numeric))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = -c1; -- OpExpr(l)
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = (- "C 1")))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE (c1 IS NOT NULL) IS DISTINCT FROM (c1 IS NOT NULL); -- DistinctExpr
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((("C 1" IS NOT NULL) IS DISTINCT FROM ("C 1" IS NOT NULL)))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = ANY(ARRAY[c2, 1, c1 + 0]); -- ScalarArrayOpExpr
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = ANY (ARRAY[c2, 1, ("C 1" + 0)])))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- SubscriptingRef
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = ((ARRAY["C 1", c2, 3])[1])))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c6 = E'foo''s\\bar'; -- check special chars
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((c6 = E'foo''s\\bar'::text))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c8 = 'foo'; -- can't be sent to remote
+ QUERY PLAN
+-------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
+(4 rows)
+
+-- parameterized remote path for foreign table
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM "S 1"."T 1" a, ft2 b WHERE a."C 1" = 47 AND b.c1 = a.c2;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: a."C 1", a.c2, a.c3, a.c4, a.c5, a.c6, a.c7, a.c8, b.c1, b.c2, b.c3, b.c4, b.c5, b.c6, b.c7, b.c8
+ -> Index Scan using t1_pkey on "S 1"."T 1" a
+ Output: a."C 1", a.c2, a.c3, a.c4, a.c5, a.c6, a.c7, a.c8
+ Index Cond: (a."C 1" = 47)
+ -> Foreign Scan on public.ft2 b
+ Output: b.c1, b.c2, b.c3, b.c4, b.c5, b.c6, b.c7, b.c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (($1::integer = "C 1"))
+(8 rows)
+
+SELECT * FROM ft2 a, ft2 b WHERE a.c1 = 47 AND b.c1 = a.c2;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 47 | 7 | 00047 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo | 7 | 7 | 00007 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+(1 row)
+
+-- check both safe and unsafe join conditions
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM ft2 a, ft2 b
+ WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7);
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: a.c1, a.c2, a.c3, a.c4, a.c5, a.c6, a.c7, a.c8, b.c1, b.c2, b.c3, b.c4, b.c5, b.c6, b.c7, b.c8
+ -> Foreign Scan on public.ft2 a
+ Output: a.c1, a.c2, a.c3, a.c4, a.c5, a.c6, a.c7, a.c8
+ Filter: (a.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((c2 = 6))
+ -> Foreign Scan on public.ft2 b
+ Output: b.c1, b.c2, b.c3, b.c4, b.c5, b.c6, b.c7, b.c8
+ Filter: (upper((a.c7)::text) = (b.c7)::text)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (($1::integer = "C 1"))
+(10 rows)
+
+SELECT * FROM ft2 a, ft2 b
+WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7);
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+-----+----+-------+------------------------------+--------------------------+----+------------+-----+-----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 116 | 6 | 00116 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 116 | 6 | 00116 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 126 | 6 | 00126 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 126 | 6 | 00126 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 136 | 6 | 00136 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 136 | 6 | 00136 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 146 | 6 | 00146 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 146 | 6 | 00146 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 156 | 6 | 00156 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 156 | 6 | 00156 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 166 | 6 | 00166 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 166 | 6 | 00166 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 176 | 6 | 00176 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 176 | 6 | 00176 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 186 | 6 | 00186 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 186 | 6 | 00186 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 196 | 6 | 00196 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 196 | 6 | 00196 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 206 | 6 | 00206 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 206 | 6 | 00206 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 216 | 6 | 00216 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 216 | 6 | 00216 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 226 | 6 | 00226 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 226 | 6 | 00226 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 236 | 6 | 00236 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 236 | 6 | 00236 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 246 | 6 | 00246 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 246 | 6 | 00246 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 256 | 6 | 00256 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 256 | 6 | 00256 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 266 | 6 | 00266 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 266 | 6 | 00266 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 276 | 6 | 00276 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 276 | 6 | 00276 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 286 | 6 | 00286 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 286 | 6 | 00286 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 296 | 6 | 00296 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 296 | 6 | 00296 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 306 | 6 | 00306 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 306 | 6 | 00306 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 316 | 6 | 00316 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 316 | 6 | 00316 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 326 | 6 | 00326 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 326 | 6 | 00326 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 336 | 6 | 00336 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 336 | 6 | 00336 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 346 | 6 | 00346 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 346 | 6 | 00346 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 356 | 6 | 00356 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 356 | 6 | 00356 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 366 | 6 | 00366 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 366 | 6 | 00366 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 376 | 6 | 00376 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 376 | 6 | 00376 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 386 | 6 | 00386 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 386 | 6 | 00386 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 396 | 6 | 00396 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 396 | 6 | 00396 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 406 | 6 | 00406 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 406 | 6 | 00406 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 416 | 6 | 00416 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 416 | 6 | 00416 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 426 | 6 | 00426 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 426 | 6 | 00426 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 436 | 6 | 00436 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 436 | 6 | 00436 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 446 | 6 | 00446 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 446 | 6 | 00446 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 456 | 6 | 00456 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 456 | 6 | 00456 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 466 | 6 | 00466 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 466 | 6 | 00466 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 476 | 6 | 00476 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 476 | 6 | 00476 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 486 | 6 | 00486 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 486 | 6 | 00486 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 496 | 6 | 00496 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 496 | 6 | 00496 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 506 | 6 | 00506 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 506 | 6 | 00506 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 516 | 6 | 00516 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 516 | 6 | 00516 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 526 | 6 | 00526 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 526 | 6 | 00526 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 536 | 6 | 00536 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 536 | 6 | 00536 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 546 | 6 | 00546 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 546 | 6 | 00546 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 556 | 6 | 00556 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 556 | 6 | 00556 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 566 | 6 | 00566 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 566 | 6 | 00566 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 576 | 6 | 00576 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 576 | 6 | 00576 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 586 | 6 | 00586 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 586 | 6 | 00586 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 596 | 6 | 00596 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 596 | 6 | 00596 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 606 | 6 | 00606 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 606 | 6 | 00606 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 616 | 6 | 00616 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 616 | 6 | 00616 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 626 | 6 | 00626 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 626 | 6 | 00626 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 636 | 6 | 00636 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 636 | 6 | 00636 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 646 | 6 | 00646 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 646 | 6 | 00646 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 656 | 6 | 00656 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 656 | 6 | 00656 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 666 | 6 | 00666 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 666 | 6 | 00666 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 676 | 6 | 00676 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 676 | 6 | 00676 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 686 | 6 | 00686 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 686 | 6 | 00686 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 696 | 6 | 00696 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 696 | 6 | 00696 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 706 | 6 | 00706 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 706 | 6 | 00706 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 716 | 6 | 00716 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 716 | 6 | 00716 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 726 | 6 | 00726 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 726 | 6 | 00726 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 736 | 6 | 00736 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 736 | 6 | 00736 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 746 | 6 | 00746 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 746 | 6 | 00746 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 756 | 6 | 00756 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 756 | 6 | 00756 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 766 | 6 | 00766 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 766 | 6 | 00766 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 776 | 6 | 00776 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 776 | 6 | 00776 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 786 | 6 | 00786 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 786 | 6 | 00786 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 796 | 6 | 00796 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 796 | 6 | 00796 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 806 | 6 | 00806 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 806 | 6 | 00806 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 816 | 6 | 00816 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 816 | 6 | 00816 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 826 | 6 | 00826 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 826 | 6 | 00826 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 836 | 6 | 00836 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 836 | 6 | 00836 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 846 | 6 | 00846 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 846 | 6 | 00846 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 856 | 6 | 00856 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 856 | 6 | 00856 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 866 | 6 | 00866 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 866 | 6 | 00866 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 876 | 6 | 00876 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 876 | 6 | 00876 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 886 | 6 | 00886 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 886 | 6 | 00886 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 896 | 6 | 00896 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 896 | 6 | 00896 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 906 | 6 | 00906 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 906 | 6 | 00906 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 916 | 6 | 00916 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 916 | 6 | 00916 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 926 | 6 | 00926 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 926 | 6 | 00926 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 936 | 6 | 00936 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 936 | 6 | 00936 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 946 | 6 | 00946 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 946 | 6 | 00946 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 956 | 6 | 00956 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 956 | 6 | 00956 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 966 | 6 | 00966 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 966 | 6 | 00966 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 976 | 6 | 00976 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 976 | 6 | 00976 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 986 | 6 | 00986 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 986 | 6 | 00986 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 996 | 6 | 00996 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 996 | 6 | 00996 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+(100 rows)
+
+-- bug before 9.3.5 due to sloppy handling of remote-estimate parameters
+SELECT * FROM ft1 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft2 WHERE c1 < 5));
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+ 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+ 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
+ 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
+(4 rows)
+
+SELECT * FROM ft2 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft1 WHERE c1 < 5));
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+ 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+ 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
+ 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
+(4 rows)
+
+-- we should not push order by clause with volatile expressions or unsafe
+-- collations
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM ft2 ORDER BY ft2.c1, random();
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Sort
+ Output: c1, c2, c3, c4, c5, c6, c7, c8, (random())
+ Sort Key: ft2.c1, (random())
+ -> Foreign Scan on public.ft2
+ Output: c1, c2, c3, c4, c5, c6, c7, c8, random()
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
+(6 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM ft2 ORDER BY ft2.c1, ft2.c3 collate "C";
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Sort
+ Output: c1, c2, c3, c4, c5, c6, c7, c8, ((c3)::text)
+ Sort Key: ft2.c1, ft2.c3 COLLATE "C"
+ -> Foreign Scan on public.ft2
+ Output: c1, c2, c3, c4, c5, c6, c7, c8, c3
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
+(6 rows)
+
+-- user-defined operator/function
+CREATE FUNCTION postgres_fdw_abs(int) RETURNS int AS $$
+BEGIN
+RETURN abs($1);
+END
+$$ LANGUAGE plpgsql IMMUTABLE;
+CREATE OPERATOR === (
+ LEFTARG = int,
+ RIGHTARG = int,
+ PROCEDURE = int4eq,
+ COMMUTATOR = ===
+);
+-- built-in operators and functions can be shipped for remote execution
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = abs(t1.c2);
+ QUERY PLAN
+---------------------------------------------------------------------------
+ Foreign Scan
+ Output: (count(c3))
+ Relations: Aggregate on (public.ft1 t1)
+ Remote SQL: SELECT count(c3) FROM "S 1"."T 1" WHERE (("C 1" = abs(c2)))
+(4 rows)
+
+SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = abs(t1.c2);
+ count
+-------
+ 9
+(1 row)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = t1.c2;
+ QUERY PLAN
+----------------------------------------------------------------------
+ Foreign Scan
+ Output: (count(c3))
+ Relations: Aggregate on (public.ft1 t1)
+ Remote SQL: SELECT count(c3) FROM "S 1"."T 1" WHERE (("C 1" = c2))
+(4 rows)
+
+SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = t1.c2;
+ count
+-------
+ 9
+(1 row)
+
+-- by default, user-defined ones cannot
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = postgres_fdw_abs(t1.c2);
+ QUERY PLAN
+-----------------------------------------------------------
+ Aggregate
+ Output: count(c3)
+ -> Foreign Scan on public.ft1 t1
+ Output: c3
+ Filter: (t1.c1 = postgres_fdw_abs(t1.c2))
+ Remote SQL: SELECT "C 1", c2, c3 FROM "S 1"."T 1"
+(6 rows)
+
+SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = postgres_fdw_abs(t1.c2);
+ count
+-------
+ 9
+(1 row)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
+ QUERY PLAN
+-----------------------------------------------------------
+ Aggregate
+ Output: count(c3)
+ -> Foreign Scan on public.ft1 t1
+ Output: c3
+ Filter: (t1.c1 === t1.c2)
+ Remote SQL: SELECT "C 1", c2, c3 FROM "S 1"."T 1"
+(6 rows)
+
+SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
+ count
+-------
+ 9
+(1 row)
+
+-- ORDER BY can be shipped, though
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------
+ Limit
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ -> Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c1 === t1.c2)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY c2 ASC NULLS LAST
+(6 rows)
+
+SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
+-- but let's put them in an extension ...
+ALTER EXTENSION postgres_fdw ADD FUNCTION postgres_fdw_abs(int);
+ALTER EXTENSION postgres_fdw ADD OPERATOR === (int, int);
+ALTER SERVER loopback OPTIONS (ADD extensions 'postgres_fdw');
+-- ... now they can be shipped
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = postgres_fdw_abs(t1.c2);
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (count(c3))
+ Relations: Aggregate on (public.ft1 t1)
+ Remote SQL: SELECT count(c3) FROM "S 1"."T 1" WHERE (("C 1" = public.postgres_fdw_abs(c2)))
+(4 rows)
+
+SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = postgres_fdw_abs(t1.c2);
+ count
+-------
+ 9
+(1 row)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (count(c3))
+ Relations: Aggregate on (public.ft1 t1)
+ Remote SQL: SELECT count(c3) FROM "S 1"."T 1" WHERE (("C 1" OPERATOR(public.===) c2))
+(4 rows)
+
+SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
+ count
+-------
+ 9
+(1 row)
+
+-- and both ORDER BY and LIMIT can be shipped
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" OPERATOR(public.===) c2)) ORDER BY c2 ASC NULLS LAST LIMIT 1::bigint
+(3 rows)
+
+SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
+-- check schema-qualification of regconfig constant
+CREATE TEXT SEARCH CONFIGURATION public.custom_search
+ (COPY = pg_catalog.english);
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT c1, to_tsvector('custom_search'::regconfig, c3) FROM ft1
+WHERE c1 = 642 AND length(to_tsvector('custom_search'::regconfig, c3)) > 0;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1
+ Output: c1, to_tsvector('custom_search'::regconfig, c3)
+ Remote SQL: SELECT "C 1", c3 FROM "S 1"."T 1" WHERE (("C 1" = 642)) AND ((length(to_tsvector('public.custom_search'::regconfig, c3)) > 0))
+(3 rows)
+
+SELECT c1, to_tsvector('custom_search'::regconfig, c3) FROM ft1
+WHERE c1 = 642 AND length(to_tsvector('custom_search'::regconfig, c3)) > 0;
+ c1 | to_tsvector
+-----+-------------
+ 642 | '00642':1
+(1 row)
+
+-- ===================================================================
+-- JOIN queries
+-- ===================================================================
+-- Analyze ft4 and ft5 so that we have better statistics. These tables do not
+-- have use_remote_estimate set.
+ANALYZE ft4;
+ANALYZE ft5;
+-- join two tables
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c1, t1.c3
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3 FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+ c1 | c1
+-----+-----
+ 101 | 101
+ 102 | 102
+ 103 | 103
+ 104 | 104
+ 105 | 105
+ 106 | 106
+ 107 | 107
+ 108 | 108
+ 109 | 109
+ 110 | 110
+(10 rows)
+
+-- join three tables
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) JOIN ft4 t3 ON (t3.c1 = t1.c1) ORDER BY t1.c3, t1.c1 OFFSET 10 LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c2, t3.c3, t1.c3
+ Relations: ((public.ft1 t1) INNER JOIN (public.ft2 t2)) INNER JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3, r1.c3 FROM (("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) INNER JOIN "S 1"."T 3" r4 ON (((r1."C 1" = r4.c1)))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c2, t3.c3 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) JOIN ft4 t3 ON (t3.c1 = t1.c1) ORDER BY t1.c3, t1.c1 OFFSET 10 LIMIT 10;
+ c1 | c2 | c3
+----+----+--------
+ 22 | 2 | AAA022
+ 24 | 4 | AAA024
+ 26 | 6 | AAA026
+ 28 | 8 | AAA028
+ 30 | 0 | AAA030
+ 32 | 2 | AAA032
+ 34 | 4 | AAA034
+ 36 | 6 | AAA036
+ 38 | 8 | AAA038
+ 40 | 0 | AAA040
+(10 rows)
+
+-- left outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c1
+ Relations: (public.ft4 t1) LEFT JOIN (public.ft5 t2)
+ Remote SQL: SELECT r1.c1, r2.c1 FROM ("S 1"."T 3" r1 LEFT JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) ORDER BY r1.c1 ASC NULLS LAST, r2.c1 ASC NULLS LAST LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c1 FROM ft4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+ c1 | c1
+----+----
+ 22 |
+ 24 | 24
+ 26 |
+ 28 |
+ 30 | 30
+ 32 |
+ 34 |
+ 36 | 36
+ 38 |
+ 40 |
+(10 rows)
+
+-- left outer join three tables
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c2, t3.c3
+ Relations: ((public.ft2 t1) LEFT JOIN (public.ft2 t2)) LEFT JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r1 LEFT JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) LEFT JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ c1 | c2 | c3
+----+----+--------
+ 11 | 1 |
+ 12 | 2 | AAA012
+ 13 | 3 |
+ 14 | 4 | AAA014
+ 15 | 5 |
+ 16 | 6 | AAA016
+ 17 | 7 |
+ 18 | 8 | AAA018
+ 19 | 9 |
+ 20 | 0 | AAA020
+(10 rows)
+
+-- left outer join + placement of clauses.
+-- clauses within the nullable side are not pulled up, but top level clause on
+-- non-nullable side is pushed into non-nullable side
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t1.c2, t2.c1, t2.c2 FROM ft4 t1 LEFT JOIN (SELECT * FROM ft5 WHERE c1 < 10) t2 ON (t1.c1 = t2.c1) WHERE t1.c1 < 10;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t1.c2, ft5.c1, ft5.c2
+ Relations: (public.ft4 t1) LEFT JOIN (public.ft5)
+ Remote SQL: SELECT r1.c1, r1.c2, r4.c1, r4.c2 FROM ("S 1"."T 3" r1 LEFT JOIN "S 1"."T 4" r4 ON (((r1.c1 = r4.c1)) AND ((r4.c1 < 10)))) WHERE ((r1.c1 < 10))
+(4 rows)
+
+SELECT t1.c1, t1.c2, t2.c1, t2.c2 FROM ft4 t1 LEFT JOIN (SELECT * FROM ft5 WHERE c1 < 10) t2 ON (t1.c1 = t2.c1) WHERE t1.c1 < 10;
+ c1 | c2 | c1 | c2
+----+----+----+----
+ 2 | 3 | |
+ 4 | 5 | |
+ 6 | 7 | 6 | 7
+ 8 | 9 | |
+(4 rows)
+
+-- clauses within the nullable side are not pulled up, but the top level clause
+-- on nullable side is not pushed down into nullable side
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t1.c2, t2.c1, t2.c2 FROM ft4 t1 LEFT JOIN (SELECT * FROM ft5 WHERE c1 < 10) t2 ON (t1.c1 = t2.c1)
+ WHERE (t2.c1 < 10 OR t2.c1 IS NULL) AND t1.c1 < 10;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t1.c2, ft5.c1, ft5.c2
+ Relations: (public.ft4 t1) LEFT JOIN (public.ft5)
+ Remote SQL: SELECT r1.c1, r1.c2, r4.c1, r4.c2 FROM ("S 1"."T 3" r1 LEFT JOIN "S 1"."T 4" r4 ON (((r1.c1 = r4.c1)) AND ((r4.c1 < 10)))) WHERE (((r4.c1 < 10) OR (r4.c1 IS NULL))) AND ((r1.c1 < 10))
+(4 rows)
+
+SELECT t1.c1, t1.c2, t2.c1, t2.c2 FROM ft4 t1 LEFT JOIN (SELECT * FROM ft5 WHERE c1 < 10) t2 ON (t1.c1 = t2.c1)
+ WHERE (t2.c1 < 10 OR t2.c1 IS NULL) AND t1.c1 < 10;
+ c1 | c2 | c1 | c2
+----+----+----+----
+ 2 | 3 | |
+ 4 | 5 | |
+ 6 | 7 | 6 | 7
+ 8 | 9 | |
+(4 rows)
+
+-- right outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft5 t1 RIGHT JOIN ft4 t2 ON (t1.c1 = t2.c1) ORDER BY t2.c1, t1.c1 OFFSET 10 LIMIT 10;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c1
+ Relations: (public.ft4 t2) LEFT JOIN (public.ft5 t1)
+ Remote SQL: SELECT r1.c1, r2.c1 FROM ("S 1"."T 3" r2 LEFT JOIN "S 1"."T 4" r1 ON (((r1.c1 = r2.c1)))) ORDER BY r2.c1 ASC NULLS LAST, r1.c1 ASC NULLS LAST LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c1 FROM ft5 t1 RIGHT JOIN ft4 t2 ON (t1.c1 = t2.c1) ORDER BY t2.c1, t1.c1 OFFSET 10 LIMIT 10;
+ c1 | c1
+----+----
+ | 22
+ 24 | 24
+ | 26
+ | 28
+ 30 | 30
+ | 32
+ | 34
+ 36 | 36
+ | 38
+ | 40
+(10 rows)
+
+-- right outer join three tables
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c2, t3.c3
+ Relations: ((public.ft4 t3) LEFT JOIN (public.ft2 t2)) LEFT JOIN (public.ft2 t1)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 3" r4 LEFT JOIN "S 1"."T 1" r2 ON (((r2."C 1" = r4.c1)))) LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ c1 | c2 | c3
+----+----+--------
+ 22 | 2 | AAA022
+ 24 | 4 | AAA024
+ 26 | 6 | AAA026
+ 28 | 8 | AAA028
+ 30 | 0 | AAA030
+ 32 | 2 | AAA032
+ 34 | 4 | AAA034
+ 36 | 6 | AAA036
+ 38 | 8 | AAA038
+ 40 | 0 | AAA040
+(10 rows)
+
+-- full outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft4 t1 FULL JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 45 LIMIT 10;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c1
+ Relations: (public.ft4 t1) FULL JOIN (public.ft5 t2)
+ Remote SQL: SELECT r1.c1, r2.c1 FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) ORDER BY r1.c1 ASC NULLS LAST, r2.c1 ASC NULLS LAST LIMIT 10::bigint OFFSET 45::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c1 FROM ft4 t1 FULL JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 45 LIMIT 10;
+ c1 | c1
+-----+----
+ 92 |
+ 94 |
+ 96 | 96
+ 98 |
+ 100 |
+ | 3
+ | 9
+ | 15
+ | 21
+ | 27
+(10 rows)
+
+-- full outer join with restrictions on the joining relations
+-- a. the joining relations are both base relations
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: ft4.c1, ft5.c1
+ Relations: (public.ft4) FULL JOIN (public.ft5)
+ Remote SQL: SELECT s4.c1, s5.c1 FROM ((SELECT c1 FROM "S 1"."T 3" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s4(c1) FULL JOIN (SELECT c1 FROM "S 1"."T 4" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s5(c1) ON (((s4.c1 = s5.c1)))) ORDER BY s4.c1 ASC NULLS LAST, s5.c1 ASC NULLS LAST
+(4 rows)
+
+SELECT t1.c1, t2.c1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1;
+ c1 | c1
+----+----
+ 50 |
+ 52 |
+ 54 | 54
+ 56 |
+ 58 |
+ 60 | 60
+ | 51
+ | 57
+(8 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT 1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t2 ON (TRUE) OFFSET 10 LIMIT 10;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: 1
+ Relations: (public.ft4) FULL JOIN (public.ft5)
+ Remote SQL: SELECT NULL FROM ((SELECT NULL FROM "S 1"."T 3" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s4 FULL JOIN (SELECT NULL FROM "S 1"."T 4" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s5 ON (TRUE)) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT 1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t2 ON (TRUE) OFFSET 10 LIMIT 10;
+ ?column?
+----------
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+(10 rows)
+
+-- b. one of the joining relations is a base relation and the other is a join
+-- relation
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT t2.c1, t3.c1 FROM ft4 t2 LEFT JOIN ft5 t3 ON (t2.c1 = t3.c1) WHERE (t2.c1 between 50 and 60)) ss(a, b) ON (t1.c1 = ss.a) ORDER BY t1.c1, ss.a, ss.b;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: ft4.c1, t2.c1, t3.c1
+ Relations: (public.ft4) FULL JOIN ((public.ft4 t2) LEFT JOIN (public.ft5 t3))
+ Remote SQL: SELECT s4.c1, s8.c1, s8.c2 FROM ((SELECT c1 FROM "S 1"."T 3" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s4(c1) FULL JOIN (SELECT r5.c1, r6.c1 FROM ("S 1"."T 3" r5 LEFT JOIN "S 1"."T 4" r6 ON (((r5.c1 = r6.c1)))) WHERE ((r5.c1 >= 50)) AND ((r5.c1 <= 60))) s8(c1, c2) ON (((s4.c1 = s8.c1)))) ORDER BY s4.c1 ASC NULLS LAST, s8.c1 ASC NULLS LAST, s8.c2 ASC NULLS LAST
+(4 rows)
+
+SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT t2.c1, t3.c1 FROM ft4 t2 LEFT JOIN ft5 t3 ON (t2.c1 = t3.c1) WHERE (t2.c1 between 50 and 60)) ss(a, b) ON (t1.c1 = ss.a) ORDER BY t1.c1, ss.a, ss.b;
+ c1 | a | b
+----+----+----
+ 50 | 50 |
+ 52 | 52 |
+ 54 | 54 | 54
+ 56 | 56 |
+ 58 | 58 |
+ 60 | 60 | 60
+(6 rows)
+
+-- c. test deparsing the remote query as nested subqueries
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT t2.c1, t3.c1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t2 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t3 ON (t2.c1 = t3.c1) WHERE t2.c1 IS NULL OR t2.c1 IS NOT NULL) ss(a, b) ON (t1.c1 = ss.a) ORDER BY t1.c1, ss.a, ss.b;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: ft4.c1, ft4_1.c1, ft5.c1
+ Relations: (public.ft4) FULL JOIN ((public.ft4 ft4_1) FULL JOIN (public.ft5))
+ Remote SQL: SELECT s4.c1, s10.c1, s10.c2 FROM ((SELECT c1 FROM "S 1"."T 3" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s4(c1) FULL JOIN (SELECT s8.c1, s9.c1 FROM ((SELECT c1 FROM "S 1"."T 3" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s8(c1) FULL JOIN (SELECT c1 FROM "S 1"."T 4" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s9(c1) ON (((s8.c1 = s9.c1)))) WHERE (((s8.c1 IS NULL) OR (s8.c1 IS NOT NULL)))) s10(c1, c2) ON (((s4.c1 = s10.c1)))) ORDER BY s4.c1 ASC NULLS LAST, s10.c1 ASC NULLS LAST, s10.c2 ASC NULLS LAST
+(4 rows)
+
+SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT t2.c1, t3.c1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t2 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t3 ON (t2.c1 = t3.c1) WHERE t2.c1 IS NULL OR t2.c1 IS NOT NULL) ss(a, b) ON (t1.c1 = ss.a) ORDER BY t1.c1, ss.a, ss.b;
+ c1 | a | b
+----+----+----
+ 50 | 50 |
+ 52 | 52 |
+ 54 | 54 | 54
+ 56 | 56 |
+ 58 | 58 |
+ 60 | 60 | 60
+ | | 51
+ | | 57
+(8 rows)
+
+-- d. test deparsing rowmarked relations as subqueries
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM "S 1"."T 3" WHERE c1 = 50) t1 INNER JOIN (SELECT t2.c1, t3.c1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t2 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t3 ON (t2.c1 = t3.c1) WHERE t2.c1 IS NULL OR t2.c1 IS NOT NULL) ss(a, b) ON (TRUE) ORDER BY t1.c1, ss.a, ss.b FOR UPDATE OF t1;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ LockRows
+ Output: "T 3".c1, ft4.c1, ft5.c1, "T 3".ctid, ft4.*, ft5.*
+ -> Nested Loop
+ Output: "T 3".c1, ft4.c1, ft5.c1, "T 3".ctid, ft4.*, ft5.*
+ -> Foreign Scan
+ Output: ft4.c1, ft4.*, ft5.c1, ft5.*
+ Relations: (public.ft4) FULL JOIN (public.ft5)
+ Remote SQL: SELECT s8.c1, s8.c2, s9.c1, s9.c2 FROM ((SELECT c1, ROW(c1, c2, c3) FROM "S 1"."T 3" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s8(c1, c2) FULL JOIN (SELECT c1, ROW(c1, c2, c3) FROM "S 1"."T 4" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s9(c1, c2) ON (((s8.c1 = s9.c1)))) WHERE (((s8.c1 IS NULL) OR (s8.c1 IS NOT NULL))) ORDER BY s8.c1 ASC NULLS LAST, s9.c1 ASC NULLS LAST
+ -> Sort
+ Output: ft4.c1, ft4.*, ft5.c1, ft5.*
+ Sort Key: ft4.c1, ft5.c1
+ -> Hash Full Join
+ Output: ft4.c1, ft4.*, ft5.c1, ft5.*
+ Hash Cond: (ft4.c1 = ft5.c1)
+ Filter: ((ft4.c1 IS NULL) OR (ft4.c1 IS NOT NULL))
+ -> Foreign Scan on public.ft4
+ Output: ft4.c1, ft4.*
+ Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 3" WHERE ((c1 >= 50)) AND ((c1 <= 60))
+ -> Hash
+ Output: ft5.c1, ft5.*
+ -> Foreign Scan on public.ft5
+ Output: ft5.c1, ft5.*
+ Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 4" WHERE ((c1 >= 50)) AND ((c1 <= 60))
+ -> Materialize
+ Output: "T 3".c1, "T 3".ctid
+ -> Seq Scan on "S 1"."T 3"
+ Output: "T 3".c1, "T 3".ctid
+ Filter: ("T 3".c1 = 50)
+(28 rows)
+
+SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM "S 1"."T 3" WHERE c1 = 50) t1 INNER JOIN (SELECT t2.c1, t3.c1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t2 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t3 ON (t2.c1 = t3.c1) WHERE t2.c1 IS NULL OR t2.c1 IS NOT NULL) ss(a, b) ON (TRUE) ORDER BY t1.c1, ss.a, ss.b FOR UPDATE OF t1;
+ c1 | a | b
+----+----+----
+ 50 | 50 |
+ 50 | 52 |
+ 50 | 54 | 54
+ 50 | 56 |
+ 50 | 58 |
+ 50 | 60 | 60
+ 50 | | 51
+ 50 | | 57
+(8 rows)
+
+-- full outer join + inner join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1, t3.c1 FROM ft4 t1 INNER JOIN ft5 t2 ON (t1.c1 = t2.c1 + 1 and t1.c1 between 50 and 60) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) ORDER BY t1.c1, t2.c1, t3.c1 LIMIT 10;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c1, t3.c1
+ Relations: ((public.ft4 t1) INNER JOIN (public.ft5 t2)) FULL JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1.c1, r2.c1, r4.c1 FROM (("S 1"."T 3" r1 INNER JOIN "S 1"."T 4" r2 ON (((r1.c1 = (r2.c1 + 1))) AND ((r1.c1 >= 50)) AND ((r1.c1 <= 60)))) FULL JOIN "S 1"."T 3" r4 ON (((r2.c1 = r4.c1)))) ORDER BY r1.c1 ASC NULLS LAST, r2.c1 ASC NULLS LAST, r4.c1 ASC NULLS LAST LIMIT 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c1, t3.c1 FROM ft4 t1 INNER JOIN ft5 t2 ON (t1.c1 = t2.c1 + 1 and t1.c1 between 50 and 60) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) ORDER BY t1.c1, t2.c1, t3.c1 LIMIT 10;
+ c1 | c1 | c1
+----+----+----
+ 52 | 51 |
+ 58 | 57 |
+ | | 2
+ | | 4
+ | | 6
+ | | 8
+ | | 10
+ | | 12
+ | | 14
+ | | 16
+(10 rows)
+
+-- full outer join three tables
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c2, t3.c3
+ Relations: ((public.ft2 t1) FULL JOIN (public.ft2 t2)) FULL JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r1 FULL JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) FULL JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ c1 | c2 | c3
+----+----+--------
+ 11 | 1 |
+ 12 | 2 | AAA012
+ 13 | 3 |
+ 14 | 4 | AAA014
+ 15 | 5 |
+ 16 | 6 | AAA016
+ 17 | 7 |
+ 18 | 8 | AAA018
+ 19 | 9 |
+ 20 | 0 | AAA020
+(10 rows)
+
+-- full outer join + right outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c2, t3.c3
+ Relations: ((public.ft4 t3) LEFT JOIN (public.ft2 t2)) LEFT JOIN (public.ft2 t1)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 3" r4 LEFT JOIN "S 1"."T 1" r2 ON (((r2."C 1" = r4.c1)))) LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ c1 | c2 | c3
+----+----+--------
+ 22 | 2 | AAA022
+ 24 | 4 | AAA024
+ 26 | 6 | AAA026
+ 28 | 8 | AAA028
+ 30 | 0 | AAA030
+ 32 | 2 | AAA032
+ 34 | 4 | AAA034
+ 36 | 6 | AAA036
+ 38 | 8 | AAA038
+ 40 | 0 | AAA040
+(10 rows)
+
+-- right outer join + full outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c2, t3.c3
+ Relations: ((public.ft2 t2) LEFT JOIN (public.ft2 t1)) FULL JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r2 LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) FULL JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ c1 | c2 | c3
+----+----+--------
+ 11 | 1 |
+ 12 | 2 | AAA012
+ 13 | 3 |
+ 14 | 4 | AAA014
+ 15 | 5 |
+ 16 | 6 | AAA016
+ 17 | 7 |
+ 18 | 8 | AAA018
+ 19 | 9 |
+ 20 | 0 | AAA020
+(10 rows)
+
+-- full outer join + left outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c2, t3.c3
+ Relations: ((public.ft2 t1) FULL JOIN (public.ft2 t2)) LEFT JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r1 FULL JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) LEFT JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ c1 | c2 | c3
+----+----+--------
+ 11 | 1 |
+ 12 | 2 | AAA012
+ 13 | 3 |
+ 14 | 4 | AAA014
+ 15 | 5 |
+ 16 | 6 | AAA016
+ 17 | 7 |
+ 18 | 8 | AAA018
+ 19 | 9 |
+ 20 | 0 | AAA020
+(10 rows)
+
+-- left outer join + full outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c2, t3.c3
+ Relations: ((public.ft2 t1) LEFT JOIN (public.ft2 t2)) FULL JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r1 LEFT JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) FULL JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ c1 | c2 | c3
+----+----+--------
+ 11 | 1 |
+ 12 | 2 | AAA012
+ 13 | 3 |
+ 14 | 4 | AAA014
+ 15 | 5 |
+ 16 | 6 | AAA016
+ 17 | 7 |
+ 18 | 8 | AAA018
+ 19 | 9 |
+ 20 | 0 | AAA020
+(10 rows)
+
+SET enable_memoize TO off;
+-- right outer join + left outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c2, t3.c3
+ Relations: ((public.ft2 t2) LEFT JOIN (public.ft2 t1)) LEFT JOIN (public.ft4 t3)
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r2 LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) LEFT JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ c1 | c2 | c3
+----+----+--------
+ 11 | 1 |
+ 12 | 2 | AAA012
+ 13 | 3 |
+ 14 | 4 | AAA014
+ 15 | 5 |
+ 16 | 6 | AAA016
+ 17 | 7 |
+ 18 | 8 | AAA018
+ 19 | 9 |
+ 20 | 0 | AAA020
+(10 rows)
+
+RESET enable_memoize;
+-- left outer join + right outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c2, t3.c3
+ Relations: (public.ft4 t3) LEFT JOIN ((public.ft2 t1) INNER JOIN (public.ft2 t2))
+ Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM ("S 1"."T 3" r4 LEFT JOIN ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ON (((r2."C 1" = r4.c1)))) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+ c1 | c2 | c3
+----+----+--------
+ 22 | 2 | AAA022
+ 24 | 4 | AAA024
+ 26 | 6 | AAA026
+ 28 | 8 | AAA028
+ 30 | 0 | AAA030
+ 32 | 2 | AAA032
+ 34 | 4 | AAA034
+ 36 | 6 | AAA036
+ 38 | 8 | AAA038
+ 40 | 0 | AAA040
+(10 rows)
+
+-- full outer join + WHERE clause, only matched rows
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft4 t1 FULL JOIN ft5 t2 ON (t1.c1 = t2.c1) WHERE (t1.c1 = t2.c1 OR t1.c1 IS NULL) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Limit
+ Output: t1.c1, t2.c1
+ -> Sort
+ Output: t1.c1, t2.c1
+ Sort Key: t1.c1, t2.c1
+ -> Foreign Scan
+ Output: t1.c1, t2.c1
+ Relations: (public.ft4 t1) FULL JOIN (public.ft5 t2)
+ Remote SQL: SELECT r1.c1, r2.c1 FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 = r2.c1) OR (r1.c1 IS NULL)))
+(9 rows)
+
+SELECT t1.c1, t2.c1 FROM ft4 t1 FULL JOIN ft5 t2 ON (t1.c1 = t2.c1) WHERE (t1.c1 = t2.c1 OR t1.c1 IS NULL) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+ c1 | c1
+----+----
+ 66 | 66
+ 72 | 72
+ 78 | 78
+ 84 | 84
+ 90 | 90
+ 96 | 96
+ | 3
+ | 9
+ | 15
+ | 21
+(10 rows)
+
+-- full outer join + WHERE clause with shippable extensions set
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t1.c3 FROM ft1 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE postgres_fdw_abs(t1.c1) > 0 OFFSET 10 LIMIT 10;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c2, t1.c3
+ Relations: (public.ft1 t1) FULL JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2.c2, r1.c3 FROM ("S 1"."T 1" r1 FULL JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) WHERE ((public.postgres_fdw_abs(r1."C 1") > 0)) LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+ALTER SERVER loopback OPTIONS (DROP extensions);
+-- full outer join + WHERE clause with shippable extensions not set
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t1.c3 FROM ft1 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE postgres_fdw_abs(t1.c1) > 0 OFFSET 10 LIMIT 10;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------
+ Limit
+ Output: t1.c1, t2.c2, t1.c3
+ -> Foreign Scan
+ Output: t1.c1, t2.c2, t1.c3
+ Filter: (postgres_fdw_abs(t1.c1) > 0)
+ Relations: (public.ft1 t1) FULL JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2.c2, r1.c3 FROM ("S 1"."T 1" r1 FULL JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1"))))
+(7 rows)
+
+ALTER SERVER loopback OPTIONS (ADD extensions 'postgres_fdw');
+-- join two tables with FOR UPDATE clause
+-- tests whole-row reference for row marks
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE OF t1;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint FOR UPDATE OF r1
+(4 rows)
+
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE OF t1;
+ c1 | c1
+-----+-----
+ 101 | 101
+ 102 | 102
+ 103 | 103
+ 104 | 104
+ 105 | 105
+ 106 | 106
+ 107 | 107
+ 108 | 108
+ 109 | 109
+ 110 | 110
+(10 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint FOR UPDATE OF r1 FOR UPDATE OF r2
+(4 rows)
+
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE;
+ c1 | c1
+-----+-----
+ 101 | 101
+ 102 | 102
+ 103 | 103
+ 104 | 104
+ 105 | 105
+ 106 | 106
+ 107 | 107
+ 108 | 108
+ 109 | 109
+ 110 | 110
+(10 rows)
+
+-- join two tables with FOR SHARE clause
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE OF t1;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint FOR SHARE OF r1
+(4 rows)
+
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE OF t1;
+ c1 | c1
+-----+-----
+ 101 | 101
+ 102 | 102
+ 103 | 103
+ 104 | 104
+ 105 | 105
+ 106 | 106
+ 107 | 107
+ 108 | 108
+ 109 | 109
+ 110 | 110
+(10 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint FOR SHARE OF r1 FOR SHARE OF r2
+(4 rows)
+
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE;
+ c1 | c1
+-----+-----
+ 101 | 101
+ 102 | 102
+ 103 | 103
+ 104 | 104
+ 105 | 105
+ 106 | 106
+ 107 | 107
+ 108 | 108
+ 109 | 109
+ 110 | 110
+(10 rows)
+
+-- join in CTE
+EXPLAIN (VERBOSE, COSTS OFF)
+WITH t (c1_1, c1_3, c2_1) AS MATERIALIZED (SELECT t1.c1, t1.c3, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------
+ Limit
+ Output: t.c1_1, t.c2_1, t.c1_3
+ CTE t
+ -> Foreign Scan
+ Output: t1.c1, t1.c3, t2.c1
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r1.c3, r2."C 1" FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1"))))
+ -> Sort
+ Output: t.c1_1, t.c2_1, t.c1_3
+ Sort Key: t.c1_3, t.c1_1
+ -> CTE Scan on t
+ Output: t.c1_1, t.c2_1, t.c1_3
+(12 rows)
+
+WITH t (c1_1, c1_3, c2_1) AS MATERIALIZED (SELECT t1.c1, t1.c3, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1 OFFSET 100 LIMIT 10;
+ c1_1 | c2_1
+------+------
+ 101 | 101
+ 102 | 102
+ 103 | 103
+ 104 | 104
+ 105 | 105
+ 106 | 106
+ 107 | 107
+ 108 | 108
+ 109 | 109
+ 110 | 110
+(10 rows)
+
+-- ctid with whole-row reference
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.ctid, t1, t2, t1.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.ctid, t1.*, t2.*, t1.c1, t1.c3
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1.ctid, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END, r1."C 1", r1.c3 FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint
+(4 rows)
+
+-- SEMI JOIN, not pushed down
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1 FROM ft1 t1 WHERE EXISTS (SELECT 1 FROM ft2 t2 WHERE t1.c1 = t2.c1) ORDER BY t1.c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------------------------
+ Limit
+ Output: t1.c1
+ -> Merge Semi Join
+ Output: t1.c1
+ Merge Cond: (t1.c1 = t2.c1)
+ -> Foreign Scan on public.ft1 t1
+ Output: t1.c1
+ Remote SQL: SELECT "C 1" FROM "S 1"."T 1" ORDER BY "C 1" ASC NULLS LAST
+ -> Foreign Scan on public.ft2 t2
+ Output: t2.c1
+ Remote SQL: SELECT "C 1" FROM "S 1"."T 1" ORDER BY "C 1" ASC NULLS LAST
+(11 rows)
+
+SELECT t1.c1 FROM ft1 t1 WHERE EXISTS (SELECT 1 FROM ft2 t2 WHERE t1.c1 = t2.c1) ORDER BY t1.c1 OFFSET 100 LIMIT 10;
+ c1
+-----
+ 101
+ 102
+ 103
+ 104
+ 105
+ 106
+ 107
+ 108
+ 109
+ 110
+(10 rows)
+
+-- ANTI JOIN, not pushed down
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1 FROM ft1 t1 WHERE NOT EXISTS (SELECT 1 FROM ft2 t2 WHERE t1.c1 = t2.c2) ORDER BY t1.c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------------------------
+ Limit
+ Output: t1.c1
+ -> Merge Anti Join
+ Output: t1.c1
+ Merge Cond: (t1.c1 = t2.c2)
+ -> Foreign Scan on public.ft1 t1
+ Output: t1.c1
+ Remote SQL: SELECT "C 1" FROM "S 1"."T 1" ORDER BY "C 1" ASC NULLS LAST
+ -> Foreign Scan on public.ft2 t2
+ Output: t2.c2
+ Remote SQL: SELECT c2 FROM "S 1"."T 1" ORDER BY c2 ASC NULLS LAST
+(11 rows)
+
+SELECT t1.c1 FROM ft1 t1 WHERE NOT EXISTS (SELECT 1 FROM ft2 t2 WHERE t1.c1 = t2.c2) ORDER BY t1.c1 OFFSET 100 LIMIT 10;
+ c1
+-----
+ 110
+ 111
+ 112
+ 113
+ 114
+ 115
+ 116
+ 117
+ 118
+ 119
+(10 rows)
+
+-- CROSS JOIN can be pushed down
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 CROSS JOIN ft2 t2 ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c1, t2.c1
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1" FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (TRUE)) ORDER BY r1."C 1" ASC NULLS LAST, r2."C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 100::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c1 FROM ft1 t1 CROSS JOIN ft2 t2 ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
+ c1 | c1
+----+-----
+ 1 | 101
+ 1 | 102
+ 1 | 103
+ 1 | 104
+ 1 | 105
+ 1 | 106
+ 1 | 107
+ 1 | 108
+ 1 | 109
+ 1 | 110
+(10 rows)
+
+-- different server, not pushed down. No result expected.
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft5 t1 JOIN ft6 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------------------------
+ Limit
+ Output: t1.c1, t2.c1
+ -> Merge Join
+ Output: t1.c1, t2.c1
+ Merge Cond: (t2.c1 = t1.c1)
+ -> Foreign Scan on public.ft6 t2
+ Output: t2.c1, t2.c2, t2.c3
+ Remote SQL: SELECT c1 FROM "S 1"."T 4" ORDER BY c1 ASC NULLS LAST
+ -> Materialize
+ Output: t1.c1, t1.c2, t1.c3
+ -> Foreign Scan on public.ft5 t1
+ Output: t1.c1, t1.c2, t1.c3
+ Remote SQL: SELECT c1 FROM "S 1"."T 4" ORDER BY c1 ASC NULLS LAST
+(13 rows)
+
+SELECT t1.c1, t2.c1 FROM ft5 t1 JOIN ft6 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
+ c1 | c1
+----+----
+(0 rows)
+
+-- unsafe join conditions (c8 has a UDT), not pushed down. Practically a CROSS
+-- JOIN since c8 in both tables has same value.
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 LEFT JOIN ft2 t2 ON (t1.c8 = t2.c8) ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+-------------------------------------------------------------------------
+ Limit
+ Output: t1.c1, t2.c1
+ -> Sort
+ Output: t1.c1, t2.c1
+ Sort Key: t1.c1, t2.c1
+ -> Merge Left Join
+ Output: t1.c1, t2.c1
+ Merge Cond: (t1.c8 = t2.c8)
+ -> Sort
+ Output: t1.c1, t1.c8
+ Sort Key: t1.c8
+ -> Foreign Scan on public.ft1 t1
+ Output: t1.c1, t1.c8
+ Remote SQL: SELECT "C 1", c8 FROM "S 1"."T 1"
+ -> Sort
+ Output: t2.c1, t2.c8
+ Sort Key: t2.c8
+ -> Foreign Scan on public.ft2 t2
+ Output: t2.c1, t2.c8
+ Remote SQL: SELECT "C 1", c8 FROM "S 1"."T 1"
+(20 rows)
+
+SELECT t1.c1, t2.c1 FROM ft1 t1 LEFT JOIN ft2 t2 ON (t1.c8 = t2.c8) ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
+ c1 | c1
+----+-----
+ 1 | 101
+ 1 | 102
+ 1 | 103
+ 1 | 104
+ 1 | 105
+ 1 | 106
+ 1 | 107
+ 1 | 108
+ 1 | 109
+ 1 | 110
+(10 rows)
+
+-- unsafe conditions on one side (c8 has a UDT), not pushed down.
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE t1.c8 = 'foo' ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+-----------------------------------------------------------------------------
+ Limit
+ Output: t1.c1, t2.c1, t1.c3
+ -> Sort
+ Output: t1.c1, t2.c1, t1.c3
+ Sort Key: t1.c3, t1.c1
+ -> Hash Right Join
+ Output: t1.c1, t2.c1, t1.c3
+ Hash Cond: (t2.c1 = t1.c1)
+ -> Foreign Scan on public.ft2 t2
+ Output: t2.c1
+ Remote SQL: SELECT "C 1" FROM "S 1"."T 1"
+ -> Hash
+ Output: t1.c1, t1.c3
+ -> Foreign Scan on public.ft1 t1
+ Output: t1.c1, t1.c3
+ Filter: (t1.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c3, c8 FROM "S 1"."T 1"
+(17 rows)
+
+SELECT t1.c1, t2.c1 FROM ft1 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE t1.c8 = 'foo' ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+ c1 | c1
+-----+-----
+ 101 | 101
+ 102 | 102
+ 103 | 103
+ 104 | 104
+ 105 | 105
+ 106 | 106
+ 107 | 107
+ 108 | 108
+ 109 | 109
+ 110 | 110
+(10 rows)
+
+-- join where unsafe to pushdown condition in WHERE clause has a column not
+-- in the SELECT clause. In this test unsafe clause needs to have column
+-- references from both joining sides so that the clause is not pushed down
+-- into one of the joining sides.
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE t1.c8 = t2.c8 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------
+ Limit
+ Output: t1.c1, t2.c1, t1.c3
+ -> Sort
+ Output: t1.c1, t2.c1, t1.c3
+ Sort Key: t1.c3, t1.c1
+ -> Foreign Scan
+ Output: t1.c1, t2.c1, t1.c3
+ Filter: (t1.c8 = t2.c8)
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, r1.c8, r2.c8 FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1"))))
+(10 rows)
+
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE t1.c8 = t2.c8 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+ c1 | c1
+-----+-----
+ 101 | 101
+ 102 | 102
+ 103 | 103
+ 104 | 104
+ 105 | 105
+ 106 | 106
+ 107 | 107
+ 108 | 108
+ 109 | 109
+ 110 | 110
+(10 rows)
+
+-- Aggregate after UNION, for testing setrefs
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1c1, avg(t1c1 + t2c1) FROM (SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) UNION SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) AS t (t1c1, t2c1) GROUP BY t1c1 ORDER BY t1c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------
+ Limit
+ Output: t1.c1, (avg((t1.c1 + t2.c1)))
+ -> Sort
+ Output: t1.c1, (avg((t1.c1 + t2.c1)))
+ Sort Key: t1.c1
+ -> HashAggregate
+ Output: t1.c1, avg((t1.c1 + t2.c1))
+ Group Key: t1.c1
+ -> HashAggregate
+ Output: t1.c1, t2.c1
+ Group Key: t1.c1, t2.c1
+ -> Append
+ -> Foreign Scan
+ Output: t1.c1, t2.c1
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1" FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1"))))
+ -> Foreign Scan
+ Output: t1_1.c1, t2_1.c1
+ Relations: (public.ft1 t1_1) INNER JOIN (public.ft2 t2_1)
+ Remote SQL: SELECT r1."C 1", r2."C 1" FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1"))))
+(20 rows)
+
+SELECT t1c1, avg(t1c1 + t2c1) FROM (SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) UNION SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) AS t (t1c1, t2c1) GROUP BY t1c1 ORDER BY t1c1 OFFSET 100 LIMIT 10;
+ t1c1 | avg
+------+----------------------
+ 101 | 202.0000000000000000
+ 102 | 204.0000000000000000
+ 103 | 206.0000000000000000
+ 104 | 208.0000000000000000
+ 105 | 210.0000000000000000
+ 106 | 212.0000000000000000
+ 107 | 214.0000000000000000
+ 108 | 216.0000000000000000
+ 109 | 218.0000000000000000
+ 110 | 220.0000000000000000
+(10 rows)
+
+-- join with lateral reference
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Limit
+ Output: t1."C 1"
+ -> Nested Loop
+ Output: t1."C 1"
+ -> Index Scan using t1_pkey on "S 1"."T 1" t1
+ Output: t1."C 1", t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
+ -> Memoize
+ Cache Key: t1.c2
+ Cache Mode: binary
+ -> Subquery Scan on q
+ -> HashAggregate
+ Output: t2.c1, t3.c1
+ Group Key: t2.c1, t3.c1
+ -> Foreign Scan
+ Output: t2.c1, t3.c1
+ Relations: (public.ft1 t2) INNER JOIN (public.ft2 t3)
+ Remote SQL: SELECT r1."C 1", r2."C 1" FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")) AND ((r1.c2 = $1::integer))))
+(17 rows)
+
+SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
+ C 1
+-----
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+(10 rows)
+
+-- non-Var items in targetlist of the nullable rel of a join preventing
+-- push-down in some cases
+-- unable to push {ft1, ft2}
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT q.a, ft2.c1 FROM (SELECT 13 FROM ft1 WHERE c1 = 13) q(a) RIGHT JOIN ft2 ON (q.a = ft2.c1) WHERE ft2.c1 BETWEEN 10 AND 15;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------
+ Nested Loop Left Join
+ Output: (13), ft2.c1
+ Join Filter: (13 = ft2.c1)
+ -> Foreign Scan on public.ft2
+ Output: ft2.c1
+ Remote SQL: SELECT "C 1" FROM "S 1"."T 1" WHERE (("C 1" >= 10)) AND (("C 1" <= 15)) ORDER BY "C 1" ASC NULLS LAST
+ -> Materialize
+ Output: (13)
+ -> Foreign Scan on public.ft1
+ Output: 13
+ Remote SQL: SELECT NULL FROM "S 1"."T 1" WHERE (("C 1" = 13))
+(11 rows)
+
+SELECT q.a, ft2.c1 FROM (SELECT 13 FROM ft1 WHERE c1 = 13) q(a) RIGHT JOIN ft2 ON (q.a = ft2.c1) WHERE ft2.c1 BETWEEN 10 AND 15;
+ a | c1
+----+----
+ | 10
+ | 11
+ | 12
+ 13 | 13
+ | 14
+ | 15
+(6 rows)
+
+-- ok to push {ft1, ft2} but not {ft1, ft2, ft4}
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT ft4.c1, q.* FROM ft4 LEFT JOIN (SELECT 13, ft1.c1, ft2.c1 FROM ft1 RIGHT JOIN ft2 ON (ft1.c1 = ft2.c1) WHERE ft1.c1 = 12) q(a, b, c) ON (ft4.c1 = q.b) WHERE ft4.c1 BETWEEN 10 AND 15;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop Left Join
+ Output: ft4.c1, (13), ft1.c1, ft2.c1
+ Join Filter: (ft4.c1 = ft1.c1)
+ -> Foreign Scan on public.ft4
+ Output: ft4.c1, ft4.c2, ft4.c3
+ Remote SQL: SELECT c1 FROM "S 1"."T 3" WHERE ((c1 >= 10)) AND ((c1 <= 15))
+ -> Materialize
+ Output: ft1.c1, ft2.c1, (13)
+ -> Foreign Scan
+ Output: ft1.c1, ft2.c1, 13
+ Relations: (public.ft1) INNER JOIN (public.ft2)
+ Remote SQL: SELECT r4."C 1", r5."C 1" FROM ("S 1"."T 1" r4 INNER JOIN "S 1"."T 1" r5 ON (((r5."C 1" = 12)) AND ((r4."C 1" = 12)))) ORDER BY r4."C 1" ASC NULLS LAST
+(12 rows)
+
+SELECT ft4.c1, q.* FROM ft4 LEFT JOIN (SELECT 13, ft1.c1, ft2.c1 FROM ft1 RIGHT JOIN ft2 ON (ft1.c1 = ft2.c1) WHERE ft1.c1 = 12) q(a, b, c) ON (ft4.c1 = q.b) WHERE ft4.c1 BETWEEN 10 AND 15;
+ c1 | a | b | c
+----+----+----+----
+ 10 | | |
+ 12 | 13 | 12 | 12
+ 14 | | |
+(3 rows)
+
+-- join with nullable side with some columns with null values
+UPDATE ft5 SET c3 = null where c1 % 9 = 0;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT ft5, ft5.c1, ft5.c2, ft5.c3, ft4.c1, ft4.c2 FROM ft5 left join ft4 on ft5.c1 = ft4.c1 WHERE ft4.c1 BETWEEN 10 and 30 ORDER BY ft5.c1, ft4.c1;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: ft5.*, ft5.c1, ft5.c2, ft5.c3, ft4.c1, ft4.c2
+ Relations: (public.ft5) INNER JOIN (public.ft4)
+ Remote SQL: SELECT CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1.c1, r1.c2, r1.c3) END, r1.c1, r1.c2, r1.c3, r2.c1, r2.c2 FROM ("S 1"."T 4" r1 INNER JOIN "S 1"."T 3" r2 ON (((r1.c1 = r2.c1)) AND ((r2.c1 >= 10)) AND ((r2.c1 <= 30)))) ORDER BY r1.c1 ASC NULLS LAST
+(4 rows)
+
+SELECT ft5, ft5.c1, ft5.c2, ft5.c3, ft4.c1, ft4.c2 FROM ft5 left join ft4 on ft5.c1 = ft4.c1 WHERE ft4.c1 BETWEEN 10 and 30 ORDER BY ft5.c1, ft4.c1;
+ ft5 | c1 | c2 | c3 | c1 | c2
+----------------+----+----+--------+----+----
+ (12,13,AAA012) | 12 | 13 | AAA012 | 12 | 13
+ (18,19,) | 18 | 19 | | 18 | 19
+ (24,25,AAA024) | 24 | 25 | AAA024 | 24 | 25
+ (30,31,AAA030) | 30 | 31 | AAA030 | 30 | 31
+(4 rows)
+
+-- multi-way join involving multiple merge joins
+-- (this case used to have EPQ-related planning problems)
+CREATE TABLE local_tbl (c1 int NOT NULL, c2 int NOT NULL, c3 text, CONSTRAINT local_tbl_pkey PRIMARY KEY (c1));
+INSERT INTO local_tbl SELECT id, id % 10, to_char(id, 'FM0000') FROM generate_series(1, 1000) id;
+ANALYZE local_tbl;
+SET enable_nestloop TO false;
+SET enable_hashjoin TO false;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM ft1, ft2, ft4, ft5, local_tbl WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1
+ AND ft1.c2 = ft5.c1 AND ft1.c2 = local_tbl.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ LockRows
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3, local_tbl.c1, local_tbl.c2, local_tbl.c3, ft1.*, ft2.*, ft4.*, ft5.*, local_tbl.ctid
+ -> Merge Join
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3, local_tbl.c1, local_tbl.c2, local_tbl.c3, ft1.*, ft2.*, ft4.*, ft5.*, local_tbl.ctid
+ Inner Unique: true
+ Merge Cond: (ft1.c2 = local_tbl.c1)
+ -> Foreign Scan
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*, ft4.c1, ft4.c2, ft4.c3, ft4.*, ft5.c1, ft5.c2, ft5.c3, ft5.*
+ Relations: (((public.ft1) INNER JOIN (public.ft2)) INNER JOIN (public.ft4)) INNER JOIN (public.ft5)
+ Remote SQL: SELECT r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END, r3.c1, r3.c2, r3.c3, CASE WHEN (r3.*)::text IS NOT NULL THEN ROW(r3.c1, r3.c2, r3.c3) END, r4.c1, r4.c2, r4.c3, CASE WHEN (r4.*)::text IS NOT NULL THEN ROW(r4.c1, r4.c2, r4.c3) END FROM ((("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")) AND ((r2."C 1" < 100)) AND ((r1."C 1" < 100)))) INNER JOIN "S 1"."T 3" r3 ON (((r1.c2 = r3.c1)))) INNER JOIN "S 1"."T 4" r4 ON (((r1.c2 = r4.c1)))) ORDER BY r1.c2 ASC NULLS LAST FOR UPDATE OF r1 FOR UPDATE OF r2 FOR UPDATE OF r3 FOR UPDATE OF r4
+ -> Merge Join
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*, ft4.c1, ft4.c2, ft4.c3, ft4.*, ft5.c1, ft5.c2, ft5.c3, ft5.*
+ Merge Cond: (ft1.c2 = ft5.c1)
+ -> Merge Join
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*, ft4.c1, ft4.c2, ft4.c3, ft4.*
+ Merge Cond: (ft1.c2 = ft4.c1)
+ -> Sort
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*
+ Sort Key: ft1.c2
+ -> Merge Join
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*
+ Merge Cond: (ft1.c1 = ft2.c1)
+ -> Sort
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*
+ Sort Key: ft1.c1
+ -> Foreign Scan on public.ft1
+ Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < 100)) FOR UPDATE
+ -> Materialize
+ Output: ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*
+ -> Foreign Scan on public.ft2
+ Output: ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < 100)) ORDER BY "C 1" ASC NULLS LAST FOR UPDATE
+ -> Sort
+ Output: ft4.c1, ft4.c2, ft4.c3, ft4.*
+ Sort Key: ft4.c1
+ -> Foreign Scan on public.ft4
+ Output: ft4.c1, ft4.c2, ft4.c3, ft4.*
+ Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 3" FOR UPDATE
+ -> Sort
+ Output: ft5.c1, ft5.c2, ft5.c3, ft5.*
+ Sort Key: ft5.c1
+ -> Foreign Scan on public.ft5
+ Output: ft5.c1, ft5.c2, ft5.c3, ft5.*
+ Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 4" FOR UPDATE
+ -> Index Scan using local_tbl_pkey on public.local_tbl
+ Output: local_tbl.c1, local_tbl.c2, local_tbl.c3, local_tbl.ctid
+(47 rows)
+
+SELECT * FROM ft1, ft2, ft4, ft5, local_tbl WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1
+ AND ft1.c2 = ft5.c1 AND ft1.c2 = local_tbl.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c1 | c2 | c3 | c1 | c2 | c3
+----+----+-------+------------------------------+--------------------------+----+------------+-----+----+----+-------+------------------------------+--------------------------+----+------------+-----+----+----+--------+----+----+--------+----+----+------
+ 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+ 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
+(10 rows)
+
+RESET enable_nestloop;
+RESET enable_hashjoin;
+DROP TABLE local_tbl;
+-- check join pushdown in situations where multiple userids are involved
+CREATE ROLE regress_view_owner SUPERUSER;
+CREATE USER MAPPING FOR regress_view_owner SERVER loopback;
+GRANT SELECT ON ft4 TO regress_view_owner;
+GRANT SELECT ON ft5 TO regress_view_owner;
+CREATE VIEW v4 AS SELECT * FROM ft4;
+CREATE VIEW v5 AS SELECT * FROM ft5;
+ALTER VIEW v5 OWNER TO regress_view_owner;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN v5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10; -- can't be pushed down, different view owners
+ QUERY PLAN
+----------------------------------------------------------------------
+ Limit
+ Output: ft4.c1, ft5.c2, ft5.c1
+ -> Sort
+ Output: ft4.c1, ft5.c2, ft5.c1
+ Sort Key: ft4.c1, ft5.c1
+ -> Hash Left Join
+ Output: ft4.c1, ft5.c2, ft5.c1
+ Hash Cond: (ft4.c1 = ft5.c1)
+ -> Foreign Scan on public.ft4
+ Output: ft4.c1, ft4.c2, ft4.c3
+ Remote SQL: SELECT c1 FROM "S 1"."T 3"
+ -> Hash
+ Output: ft5.c2, ft5.c1
+ -> Foreign Scan on public.ft5
+ Output: ft5.c2, ft5.c1
+ Remote SQL: SELECT c1, c2 FROM "S 1"."T 4"
+(16 rows)
+
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN v5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+ c1 | c2
+----+----
+ 22 |
+ 24 | 25
+ 26 |
+ 28 |
+ 30 | 31
+ 32 |
+ 34 |
+ 36 | 37
+ 38 |
+ 40 |
+(10 rows)
+
+ALTER VIEW v4 OWNER TO regress_view_owner;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN v5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10; -- can be pushed down
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: ft4.c1, ft5.c2, ft5.c1
+ Relations: (public.ft4) LEFT JOIN (public.ft5)
+ Remote SQL: SELECT r6.c1, r9.c2, r9.c1 FROM ("S 1"."T 3" r6 LEFT JOIN "S 1"."T 4" r9 ON (((r6.c1 = r9.c1)))) ORDER BY r6.c1 ASC NULLS LAST, r9.c1 ASC NULLS LAST LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN v5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+ c1 | c2
+----+----
+ 22 |
+ 24 | 25
+ 26 |
+ 28 |
+ 30 | 31
+ 32 |
+ 34 |
+ 36 | 37
+ 38 |
+ 40 |
+(10 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10; -- can't be pushed down, view owner not current user
+ QUERY PLAN
+----------------------------------------------------------------------
+ Limit
+ Output: ft4.c1, t2.c2, t2.c1
+ -> Sort
+ Output: ft4.c1, t2.c2, t2.c1
+ Sort Key: ft4.c1, t2.c1
+ -> Hash Left Join
+ Output: ft4.c1, t2.c2, t2.c1
+ Hash Cond: (ft4.c1 = t2.c1)
+ -> Foreign Scan on public.ft4
+ Output: ft4.c1, ft4.c2, ft4.c3
+ Remote SQL: SELECT c1 FROM "S 1"."T 3"
+ -> Hash
+ Output: t2.c2, t2.c1
+ -> Foreign Scan on public.ft5 t2
+ Output: t2.c2, t2.c1
+ Remote SQL: SELECT c1, c2 FROM "S 1"."T 4"
+(16 rows)
+
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+ c1 | c2
+----+----
+ 22 |
+ 24 | 25
+ 26 |
+ 28 |
+ 30 | 31
+ 32 |
+ 34 |
+ 36 | 37
+ 38 |
+ 40 |
+(10 rows)
+
+ALTER VIEW v4 OWNER TO CURRENT_USER;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10; -- can be pushed down
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: ft4.c1, t2.c2, t2.c1
+ Relations: (public.ft4) LEFT JOIN (public.ft5 t2)
+ Remote SQL: SELECT r6.c1, r2.c2, r2.c1 FROM ("S 1"."T 3" r6 LEFT JOIN "S 1"."T 4" r2 ON (((r6.c1 = r2.c1)))) ORDER BY r6.c1 ASC NULLS LAST, r2.c1 ASC NULLS LAST LIMIT 10::bigint OFFSET 10::bigint
+(4 rows)
+
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+ c1 | c2
+----+----
+ 22 |
+ 24 | 25
+ 26 |
+ 28 |
+ 30 | 31
+ 32 |
+ 34 |
+ 36 | 37
+ 38 |
+ 40 |
+(10 rows)
+
+ALTER VIEW v4 OWNER TO regress_view_owner;
+-- cleanup
+DROP OWNED BY regress_view_owner;
+DROP ROLE regress_view_owner;
+-- ===================================================================
+-- Aggregate and grouping queries
+-- ===================================================================
+-- Simple aggregates
+explain (verbose, costs off)
+select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (count(c6)), (sum(c1)), (avg(c1)), (min(c2)), (max(c1)), (stddev(c2)), ((sum(c1)) * ((random() <= '1'::double precision))::integer), c2
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT count(c6), sum("C 1"), avg("C 1"), min(c2), max("C 1"), stddev(c2), c2 FROM "S 1"."T 1" WHERE ((c2 < 5)) GROUP BY 7 ORDER BY count(c6) ASC NULLS LAST, sum("C 1") ASC NULLS LAST
+(4 rows)
+
+select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2;
+ count | sum | avg | min | max | stddev | sum2
+-------+-------+----------------------+-----+------+--------+-------
+ 100 | 49600 | 496.0000000000000000 | 1 | 991 | 0 | 49600
+ 100 | 49700 | 497.0000000000000000 | 2 | 992 | 0 | 49700
+ 100 | 49800 | 498.0000000000000000 | 3 | 993 | 0 | 49800
+ 100 | 49900 | 499.0000000000000000 | 4 | 994 | 0 | 49900
+ 100 | 50500 | 505.0000000000000000 | 0 | 1000 | 0 | 50500
+(5 rows)
+
+explain (verbose, costs off)
+select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2 limit 1;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (count(c6)), (sum(c1)), (avg(c1)), (min(c2)), (max(c1)), (stddev(c2)), ((sum(c1)) * ((random() <= '1'::double precision))::integer), c2
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT count(c6), sum("C 1"), avg("C 1"), min(c2), max("C 1"), stddev(c2), c2 FROM "S 1"."T 1" WHERE ((c2 < 5)) GROUP BY 7 ORDER BY count(c6) ASC NULLS LAST, sum("C 1") ASC NULLS LAST LIMIT 1::bigint
+(4 rows)
+
+select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2 limit 1;
+ count | sum | avg | min | max | stddev | sum2
+-------+-------+----------------------+-----+-----+--------+-------
+ 100 | 49600 | 496.0000000000000000 | 1 | 991 | 0 | 49600
+(1 row)
+
+-- Aggregate is not pushed down as aggregation contains random()
+explain (verbose, costs off)
+select sum(c1 * (random() <= 1)::int) as sum, avg(c1) from ft1;
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Aggregate
+ Output: sum((c1 * ((random() <= '1'::double precision))::integer)), avg(c1)
+ -> Foreign Scan on public.ft1
+ Output: c1
+ Remote SQL: SELECT "C 1" FROM "S 1"."T 1"
+(5 rows)
+
+-- Aggregate over join query
+explain (verbose, costs off)
+select count(*), sum(t1.c1), avg(t2.c1) from ft1 t1 inner join ft1 t2 on (t1.c2 = t2.c2) where t1.c2 = 6;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (count(*)), (sum(t1.c1)), (avg(t2.c1))
+ Relations: Aggregate on ((public.ft1 t1) INNER JOIN (public.ft1 t2))
+ Remote SQL: SELECT count(*), sum(r1."C 1"), avg(r2."C 1") FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r2.c2 = 6)) AND ((r1.c2 = 6))))
+(4 rows)
+
+select count(*), sum(t1.c1), avg(t2.c1) from ft1 t1 inner join ft1 t2 on (t1.c2 = t2.c2) where t1.c2 = 6;
+ count | sum | avg
+-------+---------+----------------------
+ 10000 | 5010000 | 501.0000000000000000
+(1 row)
+
+-- Not pushed down due to local conditions present in underneath input rel
+explain (verbose, costs off)
+select sum(t1.c1), count(t2.c1) from ft1 t1 inner join ft2 t2 on (t1.c1 = t2.c1) where ((t1.c1 * t2.c1)/(t1.c1 * t2.c1)) * random() <= 1;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------
+ Aggregate
+ Output: sum(t1.c1), count(t2.c1)
+ -> Foreign Scan
+ Output: t1.c1, t2.c1
+ Filter: (((((t1.c1 * t2.c1) / (t1.c1 * t2.c1)))::double precision * random()) <= '1'::double precision)
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1."C 1", r2."C 1" FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1"))))
+(7 rows)
+
+-- GROUP BY clause having expressions
+explain (verbose, costs off)
+select c2/2, sum(c2) * (c2/2) from ft1 group by c2/2 order by c2/2;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: ((c2 / 2)), ((sum(c2) * (c2 / 2)))
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT (c2 / 2), (sum(c2) * (c2 / 2)) FROM "S 1"."T 1" GROUP BY 1 ORDER BY (c2 / 2) ASC NULLS LAST
+(4 rows)
+
+select c2/2, sum(c2) * (c2/2) from ft1 group by c2/2 order by c2/2;
+ ?column? | ?column?
+----------+----------
+ 0 | 0
+ 1 | 500
+ 2 | 1800
+ 3 | 3900
+ 4 | 6800
+(5 rows)
+
+-- Aggregates in subquery are pushed down.
+explain (verbose, costs off)
+select count(x.a), sum(x.a) from (select c2 a, sum(c1) b from ft1 group by c2, sqrt(c1) order by 1, 2) x;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------
+ Aggregate
+ Output: count(ft1.c2), sum(ft1.c2)
+ -> Foreign Scan
+ Output: ft1.c2, (sum(ft1.c1)), (sqrt((ft1.c1)::double precision))
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT c2, sum("C 1"), sqrt("C 1") FROM "S 1"."T 1" GROUP BY 1, 3 ORDER BY c2 ASC NULLS LAST, sum("C 1") ASC NULLS LAST
+(6 rows)
+
+select count(x.a), sum(x.a) from (select c2 a, sum(c1) b from ft1 group by c2, sqrt(c1) order by 1, 2) x;
+ count | sum
+-------+------
+ 1000 | 4500
+(1 row)
+
+-- Aggregate is still pushed down by taking unshippable expression out
+explain (verbose, costs off)
+select c2 * (random() <= 1)::int as sum1, sum(c1) * c2 as sum2 from ft1 group by c2 order by 1, 2;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------
+ Sort
+ Output: ((c2 * ((random() <= '1'::double precision))::integer)), ((sum(c1) * c2)), c2
+ Sort Key: ((ft1.c2 * ((random() <= '1'::double precision))::integer)), ((sum(ft1.c1) * ft1.c2))
+ -> Foreign Scan
+ Output: (c2 * ((random() <= '1'::double precision))::integer), ((sum(c1) * c2)), c2
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT (sum("C 1") * c2), c2 FROM "S 1"."T 1" GROUP BY 2
+(7 rows)
+
+select c2 * (random() <= 1)::int as sum1, sum(c1) * c2 as sum2 from ft1 group by c2 order by 1, 2;
+ sum1 | sum2
+------+--------
+ 0 | 0
+ 1 | 49600
+ 2 | 99400
+ 3 | 149400
+ 4 | 199600
+ 5 | 250000
+ 6 | 300600
+ 7 | 351400
+ 8 | 402400
+ 9 | 453600
+(10 rows)
+
+-- Aggregate with unshippable GROUP BY clause are not pushed
+explain (verbose, costs off)
+select c2 * (random() <= 1)::int as c2 from ft2 group by c2 * (random() <= 1)::int order by 1;
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Sort
+ Output: ((c2 * ((random() <= '1'::double precision))::integer))
+ Sort Key: ((ft2.c2 * ((random() <= '1'::double precision))::integer))
+ -> HashAggregate
+ Output: ((c2 * ((random() <= '1'::double precision))::integer))
+ Group Key: (ft2.c2 * ((random() <= '1'::double precision))::integer)
+ -> Foreign Scan on public.ft2
+ Output: (c2 * ((random() <= '1'::double precision))::integer)
+ Remote SQL: SELECT c2 FROM "S 1"."T 1"
+(9 rows)
+
+-- GROUP BY clause in various forms, cardinal, alias and constant expression
+explain (verbose, costs off)
+select count(c2) w, c2 x, 5 y, 7.0 z from ft1 group by 2, y, 9.0::int order by 2;
+ QUERY PLAN
+---------------------------------------------------------------------------------------
+ Sort
+ Output: (count(c2)), c2, 5, 7.0, 9
+ Sort Key: ft1.c2
+ -> Foreign Scan
+ Output: (count(c2)), c2, 5, 7.0, 9
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT count(c2), c2, 5, 7.0, 9 FROM "S 1"."T 1" GROUP BY 2, 3, 5
+(7 rows)
+
+select count(c2) w, c2 x, 5 y, 7.0 z from ft1 group by 2, y, 9.0::int order by 2;
+ w | x | y | z
+-----+---+---+-----
+ 100 | 0 | 5 | 7.0
+ 100 | 1 | 5 | 7.0
+ 100 | 2 | 5 | 7.0
+ 100 | 3 | 5 | 7.0
+ 100 | 4 | 5 | 7.0
+ 100 | 5 | 5 | 7.0
+ 100 | 6 | 5 | 7.0
+ 100 | 7 | 5 | 7.0
+ 100 | 8 | 5 | 7.0
+ 100 | 9 | 5 | 7.0
+(10 rows)
+
+-- GROUP BY clause referring to same column multiple times
+-- Also, ORDER BY contains an aggregate function
+explain (verbose, costs off)
+select c2, c2 from ft1 where c2 > 6 group by 1, 2 order by sum(c1);
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: c2, c2, (sum(c1))
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT c2, c2, sum("C 1") FROM "S 1"."T 1" WHERE ((c2 > 6)) GROUP BY 1, 2 ORDER BY sum("C 1") ASC NULLS LAST
+(4 rows)
+
+select c2, c2 from ft1 where c2 > 6 group by 1, 2 order by sum(c1);
+ c2 | c2
+----+----
+ 7 | 7
+ 8 | 8
+ 9 | 9
+(3 rows)
+
+-- Testing HAVING clause shippability
+explain (verbose, costs off)
+select c2, sum(c1) from ft2 group by c2 having avg(c1) < 500 and sum(c1) < 49800 order by c2;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: c2, (sum(c1))
+ Relations: Aggregate on (public.ft2)
+ Remote SQL: SELECT c2, sum("C 1") FROM "S 1"."T 1" GROUP BY 1 HAVING ((avg("C 1") < 500::numeric)) AND ((sum("C 1") < 49800)) ORDER BY c2 ASC NULLS LAST
+(4 rows)
+
+select c2, sum(c1) from ft2 group by c2 having avg(c1) < 500 and sum(c1) < 49800 order by c2;
+ c2 | sum
+----+-------
+ 1 | 49600
+ 2 | 49700
+(2 rows)
+
+-- Unshippable HAVING clause will be evaluated locally, and other qual in HAVING clause is pushed down
+explain (verbose, costs off)
+select count(*) from (select c5, count(c1) from ft1 group by c5, sqrt(c2) having (avg(c1) / avg(c1)) * random() <= 1 and avg(c1) < 500) x;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------
+ Aggregate
+ Output: count(*)
+ -> Foreign Scan
+ Output: ft1.c5, NULL::bigint, (sqrt((ft1.c2)::double precision))
+ Filter: (((((avg(ft1.c1)) / (avg(ft1.c1))))::double precision * random()) <= '1'::double precision)
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT c5, NULL::bigint, sqrt(c2), avg("C 1") FROM "S 1"."T 1" GROUP BY 1, 3 HAVING ((avg("C 1") < 500::numeric))
+(7 rows)
+
+select count(*) from (select c5, count(c1) from ft1 group by c5, sqrt(c2) having (avg(c1) / avg(c1)) * random() <= 1 and avg(c1) < 500) x;
+ count
+-------
+ 49
+(1 row)
+
+-- Aggregate in HAVING clause is not pushable, and thus aggregation is not pushed down
+explain (verbose, costs off)
+select sum(c1) from ft1 group by c2 having avg(c1 * (random() <= 1)::int) > 100 order by 1;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------
+ Sort
+ Output: (sum(c1)), c2
+ Sort Key: (sum(ft1.c1))
+ -> HashAggregate
+ Output: sum(c1), c2
+ Group Key: ft1.c2
+ Filter: (avg((ft1.c1 * ((random() <= '1'::double precision))::integer)) > '100'::numeric)
+ -> Foreign Scan on public.ft1
+ Output: c1, c2
+ Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1"
+(10 rows)
+
+-- Remote aggregate in combination with a local Param (for the output
+-- of an initplan) can be trouble, per bug #15781
+explain (verbose, costs off)
+select exists(select 1 from pg_enum), sum(c1) from ft1;
+ QUERY PLAN
+--------------------------------------------------
+ Foreign Scan
+ Output: $0, (sum(ft1.c1))
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT sum("C 1") FROM "S 1"."T 1"
+ InitPlan 1 (returns $0)
+ -> Seq Scan on pg_catalog.pg_enum
+(6 rows)
+
+select exists(select 1 from pg_enum), sum(c1) from ft1;
+ exists | sum
+--------+--------
+ t | 500500
+(1 row)
+
+explain (verbose, costs off)
+select exists(select 1 from pg_enum), sum(c1) from ft1 group by 1;
+ QUERY PLAN
+---------------------------------------------------
+ GroupAggregate
+ Output: ($0), sum(ft1.c1)
+ Group Key: $0
+ InitPlan 1 (returns $0)
+ -> Seq Scan on pg_catalog.pg_enum
+ -> Foreign Scan on public.ft1
+ Output: $0, ft1.c1
+ Remote SQL: SELECT "C 1" FROM "S 1"."T 1"
+(8 rows)
+
+select exists(select 1 from pg_enum), sum(c1) from ft1 group by 1;
+ exists | sum
+--------+--------
+ t | 500500
+(1 row)
+
+-- Testing ORDER BY, DISTINCT, FILTER, Ordered-sets and VARIADIC within aggregates
+-- ORDER BY within aggregate, same column used to order
+explain (verbose, costs off)
+select array_agg(c1 order by c1) from ft1 where c1 < 100 group by c2 order by 1;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (array_agg(c1 ORDER BY c1)), c2
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT array_agg("C 1" ORDER BY "C 1" ASC NULLS LAST), c2 FROM "S 1"."T 1" WHERE (("C 1" < 100)) GROUP BY 2 ORDER BY array_agg("C 1" ORDER BY "C 1" ASC NULLS LAST) ASC NULLS LAST
+(4 rows)
+
+select array_agg(c1 order by c1) from ft1 where c1 < 100 group by c2 order by 1;
+ array_agg
+--------------------------------
+ {1,11,21,31,41,51,61,71,81,91}
+ {2,12,22,32,42,52,62,72,82,92}
+ {3,13,23,33,43,53,63,73,83,93}
+ {4,14,24,34,44,54,64,74,84,94}
+ {5,15,25,35,45,55,65,75,85,95}
+ {6,16,26,36,46,56,66,76,86,96}
+ {7,17,27,37,47,57,67,77,87,97}
+ {8,18,28,38,48,58,68,78,88,98}
+ {9,19,29,39,49,59,69,79,89,99}
+ {10,20,30,40,50,60,70,80,90}
+(10 rows)
+
+-- ORDER BY within aggregate, different column used to order also using DESC
+explain (verbose, costs off)
+select array_agg(c5 order by c1 desc) from ft2 where c2 = 6 and c1 < 50;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (array_agg(c5 ORDER BY c1 DESC))
+ Relations: Aggregate on (public.ft2)
+ Remote SQL: SELECT array_agg(c5 ORDER BY "C 1" DESC NULLS FIRST) FROM "S 1"."T 1" WHERE (("C 1" < 50)) AND ((c2 = 6))
+(4 rows)
+
+select array_agg(c5 order by c1 desc) from ft2 where c2 = 6 and c1 < 50;
+ array_agg
+------------------------------------------------------------------------------------------------------------------------------------------
+ {"Mon Feb 16 00:00:00 1970","Fri Feb 06 00:00:00 1970","Tue Jan 27 00:00:00 1970","Sat Jan 17 00:00:00 1970","Wed Jan 07 00:00:00 1970"}
+(1 row)
+
+-- DISTINCT within aggregate
+explain (verbose, costs off)
+select array_agg(distinct (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (array_agg(DISTINCT (t1.c1 % 5))), ((t2.c1 % 3))
+ Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2))
+ Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5)), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY 2 ORDER BY array_agg(DISTINCT (r1.c1 % 5)) ASC NULLS LAST
+(4 rows)
+
+select array_agg(distinct (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
+ array_agg
+--------------
+ {0,1,2,3,4}
+ {1,2,3,NULL}
+(2 rows)
+
+-- DISTINCT combined with ORDER BY within aggregate
+explain (verbose, costs off)
+select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (array_agg(DISTINCT (t1.c1 % 5) ORDER BY (t1.c1 % 5))), ((t2.c1 % 3))
+ Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2))
+ Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5) ORDER BY ((r1.c1 % 5)) ASC NULLS LAST), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY 2 ORDER BY array_agg(DISTINCT (r1.c1 % 5) ORDER BY ((r1.c1 % 5)) ASC NULLS LAST) ASC NULLS LAST
+(4 rows)
+
+select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
+ array_agg
+--------------
+ {0,1,2,3,4}
+ {1,2,3,NULL}
+(2 rows)
+
+explain (verbose, costs off)
+select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5 desc nulls last) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (array_agg(DISTINCT (t1.c1 % 5) ORDER BY (t1.c1 % 5) DESC NULLS LAST)), ((t2.c1 % 3))
+ Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2))
+ Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5) ORDER BY ((r1.c1 % 5)) DESC NULLS LAST), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY 2 ORDER BY array_agg(DISTINCT (r1.c1 % 5) ORDER BY ((r1.c1 % 5)) DESC NULLS LAST) ASC NULLS LAST
+(4 rows)
+
+select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5 desc nulls last) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
+ array_agg
+--------------
+ {3,2,1,NULL}
+ {4,3,2,1,0}
+(2 rows)
+
+-- FILTER within aggregate
+explain (verbose, costs off)
+select sum(c1) filter (where c1 < 100 and c2 > 5) from ft1 group by c2 order by 1 nulls last;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (sum(c1) FILTER (WHERE ((c1 < 100) AND (c2 > 5)))), c2
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT sum("C 1") FILTER (WHERE (("C 1" < 100) AND (c2 > 5))), c2 FROM "S 1"."T 1" GROUP BY 2 ORDER BY sum("C 1") FILTER (WHERE (("C 1" < 100) AND (c2 > 5))) ASC NULLS LAST
+(4 rows)
+
+select sum(c1) filter (where c1 < 100 and c2 > 5) from ft1 group by c2 order by 1 nulls last;
+ sum
+-----
+ 510
+ 520
+ 530
+ 540
+
+
+
+
+
+
+(10 rows)
+
+-- DISTINCT, ORDER BY and FILTER within aggregate
+explain (verbose, costs off)
+select sum(c1%3), sum(distinct c1%3 order by c1%3) filter (where c1%3 < 2), c2 from ft1 where c2 = 6 group by c2;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (sum((c1 % 3))), (sum(DISTINCT (c1 % 3) ORDER BY (c1 % 3)) FILTER (WHERE ((c1 % 3) < 2))), c2
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT sum(("C 1" % 3)), sum(DISTINCT ("C 1" % 3) ORDER BY (("C 1" % 3)) ASC NULLS LAST) FILTER (WHERE (("C 1" % 3) < 2)), c2 FROM "S 1"."T 1" WHERE ((c2 = 6)) GROUP BY 3
+(4 rows)
+
+select sum(c1%3), sum(distinct c1%3 order by c1%3) filter (where c1%3 < 2), c2 from ft1 where c2 = 6 group by c2;
+ sum | sum | c2
+-----+-----+----
+ 99 | 1 | 6
+(1 row)
+
+-- Outer query is aggregation query
+explain (verbose, costs off)
+select distinct (select count(*) filter (where t2.c2 = 6 and t2.c1 < 10) from ft1 t1 where t1.c1 = 6) from ft2 t2 where t2.c2 % 6 = 0 order by 1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------
+ Unique
+ Output: ((SubPlan 1))
+ -> Sort
+ Output: ((SubPlan 1))
+ Sort Key: ((SubPlan 1))
+ -> Foreign Scan
+ Output: (SubPlan 1)
+ Relations: Aggregate on (public.ft2 t2)
+ Remote SQL: SELECT count(*) FILTER (WHERE ((c2 = 6) AND ("C 1" < 10))) FROM "S 1"."T 1" WHERE (((c2 % 6) = 0))
+ SubPlan 1
+ -> Foreign Scan on public.ft1 t1
+ Output: (count(*) FILTER (WHERE ((t2.c2 = 6) AND (t2.c1 < 10))))
+ Remote SQL: SELECT NULL FROM "S 1"."T 1" WHERE (("C 1" = 6))
+(13 rows)
+
+select distinct (select count(*) filter (where t2.c2 = 6 and t2.c1 < 10) from ft1 t1 where t1.c1 = 6) from ft2 t2 where t2.c2 % 6 = 0 order by 1;
+ count
+-------
+ 1
+(1 row)
+
+-- Inner query is aggregation query
+explain (verbose, costs off)
+select distinct (select count(t1.c1) filter (where t2.c2 = 6 and t2.c1 < 10) from ft1 t1 where t1.c1 = 6) from ft2 t2 where t2.c2 % 6 = 0 order by 1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------
+ Unique
+ Output: ((SubPlan 1))
+ -> Sort
+ Output: ((SubPlan 1))
+ Sort Key: ((SubPlan 1))
+ -> Foreign Scan on public.ft2 t2
+ Output: (SubPlan 1)
+ Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE (((c2 % 6) = 0))
+ SubPlan 1
+ -> Foreign Scan
+ Output: (count(t1.c1) FILTER (WHERE ((t2.c2 = 6) AND (t2.c1 < 10))))
+ Relations: Aggregate on (public.ft1 t1)
+ Remote SQL: SELECT count("C 1") FILTER (WHERE (($1::integer = 6) AND ($2::integer < 10))) FROM "S 1"."T 1" WHERE (("C 1" = 6))
+(13 rows)
+
+select distinct (select count(t1.c1) filter (where t2.c2 = 6 and t2.c1 < 10) from ft1 t1 where t1.c1 = 6) from ft2 t2 where t2.c2 % 6 = 0 order by 1;
+ count
+-------
+ 0
+ 1
+(2 rows)
+
+-- Aggregate not pushed down as FILTER condition is not pushable
+explain (verbose, costs off)
+select sum(c1) filter (where (c1 / c1) * random() <= 1) from ft1 group by c2 order by 1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------
+ Sort
+ Output: (sum(c1) FILTER (WHERE ((((c1 / c1))::double precision * random()) <= '1'::double precision))), c2
+ Sort Key: (sum(ft1.c1) FILTER (WHERE ((((ft1.c1 / ft1.c1))::double precision * random()) <= '1'::double precision)))
+ -> HashAggregate
+ Output: sum(c1) FILTER (WHERE ((((c1 / c1))::double precision * random()) <= '1'::double precision)), c2
+ Group Key: ft1.c2
+ -> Foreign Scan on public.ft1
+ Output: c1, c2
+ Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1"
+(9 rows)
+
+explain (verbose, costs off)
+select sum(c2) filter (where c2 in (select c2 from ft1 where c2 < 5)) from ft1;
+ QUERY PLAN
+-------------------------------------------------------------------
+ Aggregate
+ Output: sum(ft1.c2) FILTER (WHERE (hashed SubPlan 1))
+ -> Foreign Scan on public.ft1
+ Output: ft1.c2
+ Remote SQL: SELECT c2 FROM "S 1"."T 1"
+ SubPlan 1
+ -> Foreign Scan on public.ft1 ft1_1
+ Output: ft1_1.c2
+ Remote SQL: SELECT c2 FROM "S 1"."T 1" WHERE ((c2 < 5))
+(9 rows)
+
+-- Ordered-sets within aggregate
+explain (verbose, costs off)
+select c2, rank('10'::varchar) within group (order by c6), percentile_cont(c2/10::numeric) within group (order by c1) from ft1 where c2 < 10 group by c2 having percentile_cont(c2/10::numeric) within group (order by c1) < 500 order by c2;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Sort
+ Output: c2, (rank('10'::character varying) WITHIN GROUP (ORDER BY c6)), (percentile_cont((((c2)::numeric / '10'::numeric))::double precision) WITHIN GROUP (ORDER BY ((c1)::double precision)))
+ Sort Key: ft1.c2
+ -> Foreign Scan
+ Output: c2, (rank('10'::character varying) WITHIN GROUP (ORDER BY c6)), (percentile_cont((((c2)::numeric / '10'::numeric))::double precision) WITHIN GROUP (ORDER BY ((c1)::double precision)))
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT c2, rank('10'::character varying) WITHIN GROUP (ORDER BY c6 ASC NULLS LAST), percentile_cont((c2 / 10::numeric)) WITHIN GROUP (ORDER BY ("C 1") ASC NULLS LAST) FROM "S 1"."T 1" WHERE ((c2 < 10)) GROUP BY 1 HAVING ((percentile_cont((c2 / 10::numeric)) WITHIN GROUP (ORDER BY ("C 1") ASC NULLS LAST) < 500::double precision))
+(7 rows)
+
+select c2, rank('10'::varchar) within group (order by c6), percentile_cont(c2/10::numeric) within group (order by c1) from ft1 where c2 < 10 group by c2 having percentile_cont(c2/10::numeric) within group (order by c1) < 500 order by c2;
+ c2 | rank | percentile_cont
+----+------+-----------------
+ 0 | 101 | 10
+ 1 | 101 | 100
+ 2 | 1 | 200
+ 3 | 1 | 300
+ 4 | 1 | 400
+(5 rows)
+
+-- Using multiple arguments within aggregates
+explain (verbose, costs off)
+select c1, rank(c1, c2) within group (order by c1, c2) from ft1 group by c1, c2 having c1 = 6 order by 1;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: c1, (rank(c1, c2) WITHIN GROUP (ORDER BY c1, c2)), c2
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT "C 1", rank("C 1", c2) WITHIN GROUP (ORDER BY "C 1" ASC NULLS LAST, c2 ASC NULLS LAST), c2 FROM "S 1"."T 1" WHERE (("C 1" = 6)) GROUP BY 1, 3
+(4 rows)
+
+select c1, rank(c1, c2) within group (order by c1, c2) from ft1 group by c1, c2 having c1 = 6 order by 1;
+ c1 | rank
+----+------
+ 6 | 1
+(1 row)
+
+-- User defined function for user defined aggregate, VARIADIC
+create function least_accum(anyelement, variadic anyarray)
+returns anyelement language sql as
+ 'select least($1, min($2[i])) from generate_subscripts($2,1) g(i)';
+create aggregate least_agg(variadic items anyarray) (
+ stype = anyelement, sfunc = least_accum
+);
+-- Disable hash aggregation for plan stability.
+set enable_hashagg to false;
+-- Not pushed down due to user defined aggregate
+explain (verbose, costs off)
+select c2, least_agg(c1) from ft1 group by c2 order by c2;
+ QUERY PLAN
+----------------------------------------------------------------------------------
+ GroupAggregate
+ Output: c2, least_agg(VARIADIC ARRAY[c1])
+ Group Key: ft1.c2
+ -> Foreign Scan on public.ft1
+ Output: c2, c1
+ Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" ORDER BY c2 ASC NULLS LAST
+(6 rows)
+
+-- Add function and aggregate into extension
+alter extension postgres_fdw add function least_accum(anyelement, variadic anyarray);
+alter extension postgres_fdw add aggregate least_agg(variadic items anyarray);
+alter server loopback options (set extensions 'postgres_fdw');
+-- Now aggregate will be pushed. Aggregate will display VARIADIC argument.
+explain (verbose, costs off)
+select c2, least_agg(c1) from ft1 where c2 < 100 group by c2 order by c2;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------
+ Sort
+ Output: c2, (least_agg(VARIADIC ARRAY[c1]))
+ Sort Key: ft1.c2
+ -> Foreign Scan
+ Output: c2, (least_agg(VARIADIC ARRAY[c1]))
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT c2, public.least_agg(VARIADIC ARRAY["C 1"]) FROM "S 1"."T 1" WHERE ((c2 < 100)) GROUP BY 1
+(7 rows)
+
+select c2, least_agg(c1) from ft1 where c2 < 100 group by c2 order by c2;
+ c2 | least_agg
+----+-----------
+ 0 | 10
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+ 6 | 6
+ 7 | 7
+ 8 | 8
+ 9 | 9
+(10 rows)
+
+-- Remove function and aggregate from extension
+alter extension postgres_fdw drop function least_accum(anyelement, variadic anyarray);
+alter extension postgres_fdw drop aggregate least_agg(variadic items anyarray);
+alter server loopback options (set extensions 'postgres_fdw');
+-- Not pushed down as we have dropped objects from extension.
+explain (verbose, costs off)
+select c2, least_agg(c1) from ft1 group by c2 order by c2;
+ QUERY PLAN
+----------------------------------------------------------------------------------
+ GroupAggregate
+ Output: c2, least_agg(VARIADIC ARRAY[c1])
+ Group Key: ft1.c2
+ -> Foreign Scan on public.ft1
+ Output: c2, c1
+ Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" ORDER BY c2 ASC NULLS LAST
+(6 rows)
+
+-- Cleanup
+reset enable_hashagg;
+drop aggregate least_agg(variadic items anyarray);
+drop function least_accum(anyelement, variadic anyarray);
+-- Testing USING OPERATOR() in ORDER BY within aggregate.
+-- For this, we need user defined operators along with operator family and
+-- operator class. Create those and then add them in extension. Note that
+-- user defined objects are considered unshippable unless they are part of
+-- the extension.
+create operator public.<^ (
+ leftarg = int4,
+ rightarg = int4,
+ procedure = int4eq
+);
+create operator public.=^ (
+ leftarg = int4,
+ rightarg = int4,
+ procedure = int4lt
+);
+create operator public.>^ (
+ leftarg = int4,
+ rightarg = int4,
+ procedure = int4gt
+);
+create operator family my_op_family using btree;
+create function my_op_cmp(a int, b int) returns int as
+ $$begin return btint4cmp(a, b); end $$ language plpgsql;
+create operator class my_op_class for type int using btree family my_op_family as
+ operator 1 public.<^,
+ operator 3 public.=^,
+ operator 5 public.>^,
+ function 1 my_op_cmp(int, int);
+-- This will not be pushed as user defined sort operator is not part of the
+-- extension yet.
+explain (verbose, costs off)
+select array_agg(c1 order by c1 using operator(public.<^)) from ft2 where c2 = 6 and c1 < 100 group by c2;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------
+ GroupAggregate
+ Output: array_agg(c1 ORDER BY c1 USING <^ NULLS LAST), c2
+ Group Key: ft2.c2
+ -> Foreign Scan on public.ft2
+ Output: c1, c2
+ Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE (("C 1" < 100)) AND ((c2 = 6))
+(6 rows)
+
+-- This should not be pushed either.
+explain (verbose, costs off)
+select * from ft2 order by c1 using operator(public.<^);
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Sort
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Sort Key: ft2.c1 USING <^
+ -> Foreign Scan on public.ft2
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
+(6 rows)
+
+-- Update local stats on ft2
+ANALYZE ft2;
+-- Add into extension
+alter extension postgres_fdw add operator class my_op_class using btree;
+alter extension postgres_fdw add function my_op_cmp(a int, b int);
+alter extension postgres_fdw add operator family my_op_family using btree;
+alter extension postgres_fdw add operator public.<^(int, int);
+alter extension postgres_fdw add operator public.=^(int, int);
+alter extension postgres_fdw add operator public.>^(int, int);
+alter server loopback options (set extensions 'postgres_fdw');
+-- Now this will be pushed as sort operator is part of the extension.
+explain (verbose, costs off)
+select array_agg(c1 order by c1 using operator(public.<^)) from ft2 where c2 = 6 and c1 < 100 group by c2;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (array_agg(c1 ORDER BY c1 USING <^ NULLS LAST)), c2
+ Relations: Aggregate on (public.ft2)
+ Remote SQL: SELECT array_agg("C 1" ORDER BY "C 1" USING OPERATOR(public.<^) NULLS LAST), c2 FROM "S 1"."T 1" WHERE (("C 1" < 100)) AND ((c2 = 6)) GROUP BY 2
+(4 rows)
+
+select array_agg(c1 order by c1 using operator(public.<^)) from ft2 where c2 = 6 and c1 < 100 group by c2;
+ array_agg
+--------------------------------
+ {6,16,26,36,46,56,66,76,86,96}
+(1 row)
+
+-- This should be pushed too.
+explain (verbose, costs off)
+select * from ft2 order by c1 using operator(public.<^);
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft2
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY "C 1" USING OPERATOR(public.<^) NULLS LAST
+(3 rows)
+
+-- Remove from extension
+alter extension postgres_fdw drop operator class my_op_class using btree;
+alter extension postgres_fdw drop function my_op_cmp(a int, b int);
+alter extension postgres_fdw drop operator family my_op_family using btree;
+alter extension postgres_fdw drop operator public.<^(int, int);
+alter extension postgres_fdw drop operator public.=^(int, int);
+alter extension postgres_fdw drop operator public.>^(int, int);
+alter server loopback options (set extensions 'postgres_fdw');
+-- This will not be pushed as sort operator is now removed from the extension.
+explain (verbose, costs off)
+select array_agg(c1 order by c1 using operator(public.<^)) from ft2 where c2 = 6 and c1 < 100 group by c2;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------
+ GroupAggregate
+ Output: array_agg(c1 ORDER BY c1 USING <^ NULLS LAST), c2
+ Group Key: ft2.c2
+ -> Foreign Scan on public.ft2
+ Output: c1, c2
+ Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE (("C 1" < 100)) AND ((c2 = 6))
+(6 rows)
+
+-- Cleanup
+drop operator class my_op_class using btree;
+drop function my_op_cmp(a int, b int);
+drop operator family my_op_family using btree;
+drop operator public.>^(int, int);
+drop operator public.=^(int, int);
+drop operator public.<^(int, int);
+-- Input relation to aggregate push down hook is not safe to pushdown and thus
+-- the aggregate cannot be pushed down to foreign server.
+explain (verbose, costs off)
+select count(t1.c3) from ft2 t1 left join ft2 t2 on (t1.c1 = random() * t2.c2);
+ QUERY PLAN
+-------------------------------------------------------------------------------------------
+ Aggregate
+ Output: count(t1.c3)
+ -> Nested Loop Left Join
+ Output: t1.c3
+ Join Filter: ((t1.c1)::double precision = (random() * (t2.c2)::double precision))
+ -> Foreign Scan on public.ft2 t1
+ Output: t1.c3, t1.c1
+ Remote SQL: SELECT "C 1", c3 FROM "S 1"."T 1"
+ -> Materialize
+ Output: t2.c2
+ -> Foreign Scan on public.ft2 t2
+ Output: t2.c2
+ Remote SQL: SELECT c2 FROM "S 1"."T 1"
+(13 rows)
+
+-- Subquery in FROM clause having aggregate
+explain (verbose, costs off)
+select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x where ft1.c2 = x.a group by x.b order by 1, 2;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------
+ Sort
+ Output: (count(*)), x.b
+ Sort Key: (count(*)), x.b
+ -> HashAggregate
+ Output: count(*), x.b
+ Group Key: x.b
+ -> Hash Join
+ Output: x.b
+ Inner Unique: true
+ Hash Cond: (ft1.c2 = x.a)
+ -> Foreign Scan on public.ft1
+ Output: ft1.c2
+ Remote SQL: SELECT c2 FROM "S 1"."T 1"
+ -> Hash
+ Output: x.b, x.a
+ -> Subquery Scan on x
+ Output: x.b, x.a
+ -> Foreign Scan
+ Output: ft1_1.c2, (sum(ft1_1.c1))
+ Relations: Aggregate on (public.ft1 ft1_1)
+ Remote SQL: SELECT c2, sum("C 1") FROM "S 1"."T 1" GROUP BY 1
+(21 rows)
+
+select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x where ft1.c2 = x.a group by x.b order by 1, 2;
+ count | b
+-------+-------
+ 100 | 49600
+ 100 | 49700
+ 100 | 49800
+ 100 | 49900
+ 100 | 50000
+ 100 | 50100
+ 100 | 50200
+ 100 | 50300
+ 100 | 50400
+ 100 | 50500
+(10 rows)
+
+-- FULL join with IS NULL check in HAVING
+explain (verbose, costs off)
+select avg(t1.c1), sum(t2.c1) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) group by t2.c1 having (avg(t1.c1) is null and sum(t2.c1) < 10) or sum(t2.c1) is null order by 1 nulls last, 2;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (avg(t1.c1)), (sum(t2.c1)), t2.c1
+ Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2))
+ Remote SQL: SELECT avg(r1.c1), sum(r2.c1), r2.c1 FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) GROUP BY 3 HAVING ((((avg(r1.c1) IS NULL) AND (sum(r2.c1) < 10)) OR (sum(r2.c1) IS NULL))) ORDER BY avg(r1.c1) ASC NULLS LAST, sum(r2.c1) ASC NULLS LAST
+(4 rows)
+
+select avg(t1.c1), sum(t2.c1) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) group by t2.c1 having (avg(t1.c1) is null and sum(t2.c1) < 10) or sum(t2.c1) is null order by 1 nulls last, 2;
+ avg | sum
+---------------------+-----
+ 51.0000000000000000 |
+ | 3
+ | 9
+(3 rows)
+
+-- Aggregate over FULL join needing to deparse the joining relations as
+-- subqueries.
+explain (verbose, costs off)
+select count(*), sum(t1.c1), avg(t2.c1) from (select c1 from ft4 where c1 between 50 and 60) t1 full join (select c1 from ft5 where c1 between 50 and 60) t2 on (t1.c1 = t2.c1);
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (count(*)), (sum(ft4.c1)), (avg(ft5.c1))
+ Relations: Aggregate on ((public.ft4) FULL JOIN (public.ft5))
+ Remote SQL: SELECT count(*), sum(s4.c1), avg(s5.c1) FROM ((SELECT c1 FROM "S 1"."T 3" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s4(c1) FULL JOIN (SELECT c1 FROM "S 1"."T 4" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s5(c1) ON (((s4.c1 = s5.c1))))
+(4 rows)
+
+select count(*), sum(t1.c1), avg(t2.c1) from (select c1 from ft4 where c1 between 50 and 60) t1 full join (select c1 from ft5 where c1 between 50 and 60) t2 on (t1.c1 = t2.c1);
+ count | sum | avg
+-------+-----+---------------------
+ 8 | 330 | 55.5000000000000000
+(1 row)
+
+-- ORDER BY expression is part of the target list but not pushed down to
+-- foreign server.
+explain (verbose, costs off)
+select sum(c2) * (random() <= 1)::int as sum from ft1 order by 1;
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Sort
+ Output: (((sum(c2)) * ((random() <= '1'::double precision))::integer))
+ Sort Key: (((sum(ft1.c2)) * ((random() <= '1'::double precision))::integer))
+ -> Foreign Scan
+ Output: ((sum(c2)) * ((random() <= '1'::double precision))::integer)
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT sum(c2) FROM "S 1"."T 1"
+(7 rows)
+
+select sum(c2) * (random() <= 1)::int as sum from ft1 order by 1;
+ sum
+------
+ 4500
+(1 row)
+
+-- LATERAL join, with parameterization
+set enable_hashagg to false;
+explain (verbose, costs off)
+select c2, sum from "S 1"."T 1" t1, lateral (select sum(t2.c1 + t1."C 1") sum from ft2 t2 group by t2.c1) qry where t1.c2 * 2 = qry.sum and t1.c2 < 3 and t1."C 1" < 100 order by 1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------
+ Sort
+ Output: t1.c2, qry.sum
+ Sort Key: t1.c2
+ -> Nested Loop
+ Output: t1.c2, qry.sum
+ -> Index Scan using t1_pkey on "S 1"."T 1" t1
+ Output: t1."C 1", t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
+ Index Cond: (t1."C 1" < 100)
+ Filter: (t1.c2 < 3)
+ -> Subquery Scan on qry
+ Output: qry.sum, t2.c1
+ Filter: ((t1.c2 * 2) = qry.sum)
+ -> Foreign Scan
+ Output: (sum((t2.c1 + t1."C 1"))), t2.c1
+ Relations: Aggregate on (public.ft2 t2)
+ Remote SQL: SELECT sum(("C 1" + $1::integer)), "C 1" FROM "S 1"."T 1" GROUP BY 2
+(16 rows)
+
+select c2, sum from "S 1"."T 1" t1, lateral (select sum(t2.c1 + t1."C 1") sum from ft2 t2 group by t2.c1) qry where t1.c2 * 2 = qry.sum and t1.c2 < 3 and t1."C 1" < 100 order by 1;
+ c2 | sum
+----+-----
+ 1 | 2
+ 2 | 4
+(2 rows)
+
+reset enable_hashagg;
+-- bug #15613: bad plan for foreign table scan with lateral reference
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT ref_0.c2, subq_1.*
+FROM
+ "S 1"."T 1" AS ref_0,
+ LATERAL (
+ SELECT ref_0."C 1" c1, subq_0.*
+ FROM (SELECT ref_0.c2, ref_1.c3
+ FROM ft1 AS ref_1) AS subq_0
+ RIGHT JOIN ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
+ ) AS subq_1
+WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
+ORDER BY ref_0."C 1";
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: ref_0.c2, ref_0."C 1", (ref_0.c2), ref_1.c3, ref_0."C 1"
+ -> Nested Loop
+ Output: ref_0.c2, ref_0."C 1", ref_1.c3, (ref_0.c2)
+ -> Index Scan using t1_pkey on "S 1"."T 1" ref_0
+ Output: ref_0."C 1", ref_0.c2, ref_0.c3, ref_0.c4, ref_0.c5, ref_0.c6, ref_0.c7, ref_0.c8
+ Index Cond: (ref_0."C 1" < 10)
+ -> Foreign Scan on public.ft1 ref_1
+ Output: ref_1.c3, ref_0.c2
+ Remote SQL: SELECT c3 FROM "S 1"."T 1" WHERE ((c3 = '00001'::text))
+ -> Materialize
+ Output: ref_3.c3
+ -> Foreign Scan on public.ft2 ref_3
+ Output: ref_3.c3
+ Remote SQL: SELECT c3 FROM "S 1"."T 1" WHERE ((c3 = '00001'::text))
+(15 rows)
+
+SELECT ref_0.c2, subq_1.*
+FROM
+ "S 1"."T 1" AS ref_0,
+ LATERAL (
+ SELECT ref_0."C 1" c1, subq_0.*
+ FROM (SELECT ref_0.c2, ref_1.c3
+ FROM ft1 AS ref_1) AS subq_0
+ RIGHT JOIN ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
+ ) AS subq_1
+WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
+ORDER BY ref_0."C 1";
+ c2 | c1 | c2 | c3
+----+----+----+-------
+ 1 | 1 | 1 | 00001
+ 2 | 2 | 2 | 00001
+ 3 | 3 | 3 | 00001
+ 4 | 4 | 4 | 00001
+ 5 | 5 | 5 | 00001
+ 6 | 6 | 6 | 00001
+ 7 | 7 | 7 | 00001
+ 8 | 8 | 8 | 00001
+ 9 | 9 | 9 | 00001
+(9 rows)
+
+-- Check with placeHolderVars
+explain (verbose, costs off)
+select sum(q.a), count(q.b) from ft4 left join (select 13, avg(ft1.c1), sum(ft2.c1) from ft1 right join ft2 on (ft1.c1 = ft2.c1)) q(a, b, c) on (ft4.c1 <= q.b);
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------
+ Aggregate
+ Output: sum(q.a), count(q.b)
+ -> Nested Loop Left Join
+ Output: q.a, q.b
+ Inner Unique: true
+ Join Filter: ((ft4.c1)::numeric <= q.b)
+ -> Foreign Scan on public.ft4
+ Output: ft4.c1, ft4.c2, ft4.c3
+ Remote SQL: SELECT c1 FROM "S 1"."T 3"
+ -> Materialize
+ Output: q.a, q.b
+ -> Subquery Scan on q
+ Output: q.a, q.b
+ -> Foreign Scan
+ Output: 13, (avg(ft1.c1)), NULL::bigint
+ Relations: Aggregate on ((public.ft2) LEFT JOIN (public.ft1))
+ Remote SQL: SELECT 13, avg(r1."C 1"), NULL::bigint FROM ("S 1"."T 1" r2 LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1"))))
+(17 rows)
+
+select sum(q.a), count(q.b) from ft4 left join (select 13, avg(ft1.c1), sum(ft2.c1) from ft1 right join ft2 on (ft1.c1 = ft2.c1)) q(a, b, c) on (ft4.c1 <= q.b);
+ sum | count
+-----+-------
+ 650 | 50
+(1 row)
+
+-- Not supported cases
+-- Grouping sets
+explain (verbose, costs off)
+select c2, sum(c1) from ft1 where c2 < 3 group by rollup(c2) order by 1 nulls last;
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Sort
+ Output: c2, (sum(c1))
+ Sort Key: ft1.c2
+ -> MixedAggregate
+ Output: c2, sum(c1)
+ Hash Key: ft1.c2
+ Group Key: ()
+ -> Foreign Scan on public.ft1
+ Output: c2, c1
+ Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE ((c2 < 3))
+(10 rows)
+
+select c2, sum(c1) from ft1 where c2 < 3 group by rollup(c2) order by 1 nulls last;
+ c2 | sum
+----+--------
+ 0 | 50500
+ 1 | 49600
+ 2 | 49700
+ | 149800
+(4 rows)
+
+explain (verbose, costs off)
+select c2, sum(c1) from ft1 where c2 < 3 group by cube(c2) order by 1 nulls last;
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Sort
+ Output: c2, (sum(c1))
+ Sort Key: ft1.c2
+ -> MixedAggregate
+ Output: c2, sum(c1)
+ Hash Key: ft1.c2
+ Group Key: ()
+ -> Foreign Scan on public.ft1
+ Output: c2, c1
+ Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE ((c2 < 3))
+(10 rows)
+
+select c2, sum(c1) from ft1 where c2 < 3 group by cube(c2) order by 1 nulls last;
+ c2 | sum
+----+--------
+ 0 | 50500
+ 1 | 49600
+ 2 | 49700
+ | 149800
+(4 rows)
+
+explain (verbose, costs off)
+select c2, c6, sum(c1) from ft1 where c2 < 3 group by grouping sets(c2, c6) order by 1 nulls last, 2 nulls last;
+ QUERY PLAN
+----------------------------------------------------------------------------------
+ Sort
+ Output: c2, c6, (sum(c1))
+ Sort Key: ft1.c2, ft1.c6
+ -> HashAggregate
+ Output: c2, c6, sum(c1)
+ Hash Key: ft1.c2
+ Hash Key: ft1.c6
+ -> Foreign Scan on public.ft1
+ Output: c2, c6, c1
+ Remote SQL: SELECT "C 1", c2, c6 FROM "S 1"."T 1" WHERE ((c2 < 3))
+(10 rows)
+
+select c2, c6, sum(c1) from ft1 where c2 < 3 group by grouping sets(c2, c6) order by 1 nulls last, 2 nulls last;
+ c2 | c6 | sum
+----+----+-------
+ 0 | | 50500
+ 1 | | 49600
+ 2 | | 49700
+ | 0 | 50500
+ | 1 | 49600
+ | 2 | 49700
+(6 rows)
+
+explain (verbose, costs off)
+select c2, sum(c1), grouping(c2) from ft1 where c2 < 3 group by c2 order by 1 nulls last;
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Sort
+ Output: c2, (sum(c1)), (GROUPING(c2))
+ Sort Key: ft1.c2
+ -> HashAggregate
+ Output: c2, sum(c1), GROUPING(c2)
+ Group Key: ft1.c2
+ -> Foreign Scan on public.ft1
+ Output: c2, c1
+ Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE ((c2 < 3))
+(9 rows)
+
+select c2, sum(c1), grouping(c2) from ft1 where c2 < 3 group by c2 order by 1 nulls last;
+ c2 | sum | grouping
+----+-------+----------
+ 0 | 50500 | 0
+ 1 | 49600 | 0
+ 2 | 49700 | 0
+(3 rows)
+
+-- DISTINCT itself is not pushed down, whereas underneath aggregate is pushed
+explain (verbose, costs off)
+select distinct sum(c1)/1000 s from ft2 where c2 < 6 group by c2 order by 1;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------
+ Unique
+ Output: ((sum(c1) / 1000)), c2
+ -> Sort
+ Output: ((sum(c1) / 1000)), c2
+ Sort Key: ((sum(ft2.c1) / 1000))
+ -> Foreign Scan
+ Output: ((sum(c1) / 1000)), c2
+ Relations: Aggregate on (public.ft2)
+ Remote SQL: SELECT (sum("C 1") / 1000), c2 FROM "S 1"."T 1" WHERE ((c2 < 6)) GROUP BY 2
+(9 rows)
+
+select distinct sum(c1)/1000 s from ft2 where c2 < 6 group by c2 order by 1;
+ s
+----
+ 49
+ 50
+(2 rows)
+
+-- WindowAgg
+explain (verbose, costs off)
+select c2, sum(c2), count(c2) over (partition by c2%2) from ft2 where c2 < 10 group by c2 order by 1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------
+ Sort
+ Output: c2, (sum(c2)), (count(c2) OVER (?)), ((c2 % 2))
+ Sort Key: ft2.c2
+ -> WindowAgg
+ Output: c2, (sum(c2)), count(c2) OVER (?), ((c2 % 2))
+ -> Sort
+ Output: c2, ((c2 % 2)), (sum(c2))
+ Sort Key: ((ft2.c2 % 2))
+ -> Foreign Scan
+ Output: c2, ((c2 % 2)), (sum(c2))
+ Relations: Aggregate on (public.ft2)
+ Remote SQL: SELECT c2, (c2 % 2), sum(c2) FROM "S 1"."T 1" WHERE ((c2 < 10)) GROUP BY 1
+(12 rows)
+
+select c2, sum(c2), count(c2) over (partition by c2%2) from ft2 where c2 < 10 group by c2 order by 1;
+ c2 | sum | count
+----+-----+-------
+ 0 | 0 | 5
+ 1 | 100 | 5
+ 2 | 200 | 5
+ 3 | 300 | 5
+ 4 | 400 | 5
+ 5 | 500 | 5
+ 6 | 600 | 5
+ 7 | 700 | 5
+ 8 | 800 | 5
+ 9 | 900 | 5
+(10 rows)
+
+explain (verbose, costs off)
+select c2, array_agg(c2) over (partition by c2%2 order by c2 desc) from ft1 where c2 < 10 group by c2 order by 1;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------
+ Sort
+ Output: c2, (array_agg(c2) OVER (?)), ((c2 % 2))
+ Sort Key: ft1.c2
+ -> WindowAgg
+ Output: c2, array_agg(c2) OVER (?), ((c2 % 2))
+ -> Sort
+ Output: c2, ((c2 % 2))
+ Sort Key: ((ft1.c2 % 2)), ft1.c2 DESC
+ -> Foreign Scan
+ Output: c2, ((c2 % 2))
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT c2, (c2 % 2) FROM "S 1"."T 1" WHERE ((c2 < 10)) GROUP BY 1
+(12 rows)
+
+select c2, array_agg(c2) over (partition by c2%2 order by c2 desc) from ft1 where c2 < 10 group by c2 order by 1;
+ c2 | array_agg
+----+-------------
+ 0 | {8,6,4,2,0}
+ 1 | {9,7,5,3,1}
+ 2 | {8,6,4,2}
+ 3 | {9,7,5,3}
+ 4 | {8,6,4}
+ 5 | {9,7,5}
+ 6 | {8,6}
+ 7 | {9,7}
+ 8 | {8}
+ 9 | {9}
+(10 rows)
+
+explain (verbose, costs off)
+select c2, array_agg(c2) over (partition by c2%2 order by c2 range between current row and unbounded following) from ft1 where c2 < 10 group by c2 order by 1;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------
+ Sort
+ Output: c2, (array_agg(c2) OVER (?)), ((c2 % 2))
+ Sort Key: ft1.c2
+ -> WindowAgg
+ Output: c2, array_agg(c2) OVER (?), ((c2 % 2))
+ -> Sort
+ Output: c2, ((c2 % 2))
+ Sort Key: ((ft1.c2 % 2)), ft1.c2
+ -> Foreign Scan
+ Output: c2, ((c2 % 2))
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT c2, (c2 % 2) FROM "S 1"."T 1" WHERE ((c2 < 10)) GROUP BY 1
+(12 rows)
+
+select c2, array_agg(c2) over (partition by c2%2 order by c2 range between current row and unbounded following) from ft1 where c2 < 10 group by c2 order by 1;
+ c2 | array_agg
+----+-------------
+ 0 | {0,2,4,6,8}
+ 1 | {1,3,5,7,9}
+ 2 | {2,4,6,8}
+ 3 | {3,5,7,9}
+ 4 | {4,6,8}
+ 5 | {5,7,9}
+ 6 | {6,8}
+ 7 | {7,9}
+ 8 | {8}
+ 9 | {9}
+(10 rows)
+
+-- ===================================================================
+-- parameterized queries
+-- ===================================================================
+-- simple join
+PREPARE st1(int, int) AS SELECT t1.c3, t2.c3 FROM ft1 t1, ft2 t2 WHERE t1.c1 = $1 AND t2.c1 = $2;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st1(1, 2);
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.c3, t2.c3
+ Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
+ Remote SQL: SELECT r1.c3, r2.c3 FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r2."C 1" = 2)) AND ((r1."C 1" = 1))))
+(4 rows)
+
+EXECUTE st1(1, 1);
+ c3 | c3
+-------+-------
+ 00001 | 00001
+(1 row)
+
+EXECUTE st1(101, 101);
+ c3 | c3
+-------+-------
+ 00101 | 00101
+(1 row)
+
+-- subquery using stable function (can't be sent to remote)
+PREPARE st2(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND date(c4) = '1970-01-17'::date) ORDER BY c1;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st2(10, 20);
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------
+ Sort
+ Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
+ Sort Key: t1.c1
+ -> Nested Loop Semi Join
+ Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
+ Join Filter: (t1.c3 = t2.c3)
+ -> Foreign Scan on public.ft1 t1
+ Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < 20))
+ -> Materialize
+ Output: t2.c3
+ -> Foreign Scan on public.ft2 t2
+ Output: t2.c3
+ Filter: (date(t2.c4) = '01-17-1970'::date)
+ Remote SQL: SELECT c3, c4 FROM "S 1"."T 1" WHERE (("C 1" > 10))
+(15 rows)
+
+EXECUTE st2(10, 20);
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+(1 row)
+
+EXECUTE st2(101, 121);
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+-----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 116 | 6 | 00116 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+(1 row)
+
+-- subquery using immutable function (can be sent to remote)
+PREPARE st3(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND date(c5) = '1970-01-17'::date) ORDER BY c1;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st3(10, 20);
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------
+ Sort
+ Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
+ Sort Key: t1.c1
+ -> Nested Loop Semi Join
+ Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
+ Join Filter: (t1.c3 = t2.c3)
+ -> Foreign Scan on public.ft1 t1
+ Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < 20))
+ -> Materialize
+ Output: t2.c3
+ -> Foreign Scan on public.ft2 t2
+ Output: t2.c3
+ Remote SQL: SELECT c3 FROM "S 1"."T 1" WHERE (("C 1" > 10)) AND ((date(c5) = '1970-01-17'::date))
+(14 rows)
+
+EXECUTE st3(10, 20);
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+(1 row)
+
+EXECUTE st3(20, 30);
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+----+----+----+----+----+----
+(0 rows)
+
+-- custom plan should be chosen initially
+PREPARE st4(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 = $1;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st4(1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st4(1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st4(1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st4(1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st4(1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(3 rows)
+
+-- once we try it enough times, should switch to generic plan
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st4(1);
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
+(3 rows)
+
+-- value of $1 should not be sent to remote
+PREPARE st5(user_enum,int) AS SELECT * FROM ft1 t1 WHERE c8 = $1 and c1 = $2;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st5('foo', 1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(4 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st5('foo', 1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(4 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st5('foo', 1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(4 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st5('foo', 1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(4 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st5('foo', 1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(4 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st5('foo', 1);
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c8 = $1)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
+(4 rows)
+
+EXECUTE st5('foo', 1);
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
+-- altering FDW options requires replanning
+PREPARE st6 AS SELECT * FROM ft1 t1 WHERE t1.c1 = t1.c2;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st6;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = c2))
+(3 rows)
+
+PREPARE st7 AS INSERT INTO ft1 (c1,c2,c3) VALUES (1001,101,'foo');
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st7;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Insert on public.ft1
+ Remote SQL: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+ Batch Size: 1
+ -> Result
+ Output: NULL::integer, 1001, 101, 'foo'::text, NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying, 'ft1 '::character(10), NULL::user_enum
+(5 rows)
+
+ALTER TABLE "S 1"."T 1" RENAME TO "T 0";
+ALTER FOREIGN TABLE ft1 OPTIONS (SET table_name 'T 0');
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st6;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 0" WHERE (("C 1" = c2))
+(3 rows)
+
+EXECUTE st6;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+ 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+ 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
+ 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
+ 5 | 5 | 00005 | Tue Jan 06 00:00:00 1970 PST | Tue Jan 06 00:00:00 1970 | 5 | 5 | foo
+ 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 7 | 7 | 00007 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 8 | 8 | 00008 | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 9 | 9 | 00009 | Sat Jan 10 00:00:00 1970 PST | Sat Jan 10 00:00:00 1970 | 9 | 9 | foo
+(9 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st7;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Insert on public.ft1
+ Remote SQL: INSERT INTO "S 1"."T 0"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+ Batch Size: 1
+ -> Result
+ Output: NULL::integer, 1001, 101, 'foo'::text, NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying, 'ft1 '::character(10), NULL::user_enum
+(5 rows)
+
+ALTER TABLE "S 1"."T 0" RENAME TO "T 1";
+ALTER FOREIGN TABLE ft1 OPTIONS (SET table_name 'T 1');
+PREPARE st8 AS SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st8;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (count(c3))
+ Relations: Aggregate on (public.ft1 t1)
+ Remote SQL: SELECT count(c3) FROM "S 1"."T 1" WHERE (("C 1" OPERATOR(public.===) c2))
+(4 rows)
+
+ALTER SERVER loopback OPTIONS (DROP extensions);
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st8;
+ QUERY PLAN
+-----------------------------------------------------------
+ Aggregate
+ Output: count(c3)
+ -> Foreign Scan on public.ft1 t1
+ Output: c3
+ Filter: (t1.c1 === t1.c2)
+ Remote SQL: SELECT "C 1", c2, c3 FROM "S 1"."T 1"
+(6 rows)
+
+EXECUTE st8;
+ count
+-------
+ 9
+(1 row)
+
+ALTER SERVER loopback OPTIONS (ADD extensions 'postgres_fdw');
+-- cleanup
+DEALLOCATE st1;
+DEALLOCATE st2;
+DEALLOCATE st3;
+DEALLOCATE st4;
+DEALLOCATE st5;
+DEALLOCATE st6;
+DEALLOCATE st7;
+DEALLOCATE st8;
+-- System columns, except ctid and oid, should not be sent to remote
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM ft1 t1 WHERE t1.tableoid = 'pg_class'::regclass LIMIT 1;
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Limit
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ -> Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.tableoid = '1259'::oid)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
+(6 rows)
+
+SELECT * FROM ft1 t1 WHERE t1.tableoid = 'ft1'::regclass LIMIT 1;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: (tableoid)::regclass, c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" LIMIT 1::bigint
+(3 rows)
+
+SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1;
+ tableoid | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----------+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ ft1 | 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)';
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((ctid = '(0,2)'::tid))
+(3 rows)
+
+SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)';
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+(1 row)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT ctid, * FROM ft1 t1 LIMIT 1;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: ctid, c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8, ctid FROM "S 1"."T 1" LIMIT 1::bigint
+(3 rows)
+
+SELECT ctid, * FROM ft1 t1 LIMIT 1;
+ ctid | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+-------+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ (0,1) | 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
+-- ===================================================================
+-- used in PL/pgSQL function
+-- ===================================================================
+CREATE OR REPLACE FUNCTION f_test(p_c1 int) RETURNS int AS $$
+DECLARE
+ v_c1 int;
+BEGIN
+ SELECT c1 INTO v_c1 FROM ft1 WHERE c1 = p_c1 LIMIT 1;
+ PERFORM c1 FROM ft1 WHERE c1 = p_c1 AND p_c1 = v_c1 LIMIT 1;
+ RETURN v_c1;
+END;
+$$ LANGUAGE plpgsql;
+SELECT f_test(100);
+ f_test
+--------
+ 100
+(1 row)
+
+DROP FUNCTION f_test(int);
+-- ===================================================================
+-- REINDEX
+-- ===================================================================
+-- remote table is not created here
+CREATE FOREIGN TABLE reindex_foreign (c1 int, c2 int)
+ SERVER loopback2 OPTIONS (table_name 'reindex_local');
+REINDEX TABLE reindex_foreign; -- error
+ERROR: "reindex_foreign" is not a table or materialized view
+REINDEX TABLE CONCURRENTLY reindex_foreign; -- error
+ERROR: "reindex_foreign" is not a table or materialized view
+DROP FOREIGN TABLE reindex_foreign;
+-- partitions and foreign tables
+CREATE TABLE reind_fdw_parent (c1 int) PARTITION BY RANGE (c1);
+CREATE TABLE reind_fdw_0_10 PARTITION OF reind_fdw_parent
+ FOR VALUES FROM (0) TO (10);
+CREATE FOREIGN TABLE reind_fdw_10_20 PARTITION OF reind_fdw_parent
+ FOR VALUES FROM (10) TO (20)
+ SERVER loopback OPTIONS (table_name 'reind_local_10_20');
+REINDEX TABLE reind_fdw_parent; -- ok
+REINDEX TABLE CONCURRENTLY reind_fdw_parent; -- ok
+DROP TABLE reind_fdw_parent;
+-- ===================================================================
+-- conversion error
+-- ===================================================================
+ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE int;
+SELECT * FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8) WHERE x1 = 1; -- ERROR
+ERROR: invalid input syntax for type integer: "foo"
+CONTEXT: column "x8" of foreign table "ftx"
+SELECT ftx.x1, ft2.c2, ftx.x8 FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2
+ WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR
+ERROR: invalid input syntax for type integer: "foo"
+CONTEXT: column "x8" of foreign table "ftx"
+SELECT ftx.x1, ft2.c2, ftx FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2
+ WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR
+ERROR: invalid input syntax for type integer: "foo"
+CONTEXT: whole-row reference to foreign table "ftx"
+SELECT sum(c2), array_agg(c8) FROM ft1 GROUP BY c8; -- ERROR
+ERROR: invalid input syntax for type integer: "foo"
+CONTEXT: processing expression at position 2 in select list
+ANALYZE ft1; -- ERROR
+ERROR: invalid input syntax for type integer: "foo"
+CONTEXT: column "c8" of foreign table "ft1"
+ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE user_enum;
+-- ===================================================================
+-- subtransaction
+-- + local/remote error doesn't break cursor
+-- ===================================================================
+BEGIN;
+DECLARE c CURSOR FOR SELECT * FROM ft1 ORDER BY c1;
+FETCH c;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
+SAVEPOINT s;
+ERROR OUT; -- ERROR
+ERROR: syntax error at or near "ERROR"
+LINE 1: ERROR OUT;
+ ^
+ROLLBACK TO s;
+FETCH c;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+(1 row)
+
+SAVEPOINT s;
+SELECT * FROM ft1 WHERE 1 / (c1 - 1) > 0; -- ERROR
+ERROR: division by zero
+CONTEXT: remote SQL command: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (((1 / ("C 1" - 1)) > 0))
+ROLLBACK TO s;
+FETCH c;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
+(1 row)
+
+SELECT * FROM ft1 ORDER BY c1 LIMIT 1;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
+COMMIT;
+-- ===================================================================
+-- test handling of collations
+-- ===================================================================
+create table loct3 (f1 text collate "C" unique, f2 text, f3 varchar(10) unique);
+create foreign table ft3 (f1 text collate "C", f2 text, f3 varchar(10))
+ server loopback options (table_name 'loct3', use_remote_estimate 'true');
+-- can be sent to remote
+explain (verbose, costs off) select * from ft3 where f1 = 'foo';
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2, f3
+ Remote SQL: SELECT f1, f2, f3 FROM public.loct3 WHERE ((f1 = 'foo'::text))
+(3 rows)
+
+explain (verbose, costs off) select * from ft3 where f1 COLLATE "C" = 'foo';
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2, f3
+ Remote SQL: SELECT f1, f2, f3 FROM public.loct3 WHERE ((f1 = 'foo'::text))
+(3 rows)
+
+explain (verbose, costs off) select * from ft3 where f2 = 'foo';
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2, f3
+ Remote SQL: SELECT f1, f2, f3 FROM public.loct3 WHERE ((f2 = 'foo'::text))
+(3 rows)
+
+explain (verbose, costs off) select * from ft3 where f3 = 'foo';
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2, f3
+ Remote SQL: SELECT f1, f2, f3 FROM public.loct3 WHERE ((f3 = 'foo'::text))
+(3 rows)
+
+explain (verbose, costs off) select * from ft3 f, loct3 l
+ where f.f3 = l.f3 and l.f1 = 'foo';
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: f.f1, f.f2, f.f3, l.f1, l.f2, l.f3
+ -> Index Scan using loct3_f1_key on public.loct3 l
+ Output: l.f1, l.f2, l.f3
+ Index Cond: (l.f1 = 'foo'::text)
+ -> Foreign Scan on public.ft3 f
+ Output: f.f1, f.f2, f.f3
+ Remote SQL: SELECT f1, f2, f3 FROM public.loct3 WHERE (($1::character varying(10) = f3))
+(8 rows)
+
+-- can't be sent to remote
+explain (verbose, costs off) select * from ft3 where f1 COLLATE "POSIX" = 'foo';
+ QUERY PLAN
+---------------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2, f3
+ Filter: ((ft3.f1)::text = 'foo'::text)
+ Remote SQL: SELECT f1, f2, f3 FROM public.loct3
+(4 rows)
+
+explain (verbose, costs off) select * from ft3 where f1 = 'foo' COLLATE "C";
+ QUERY PLAN
+---------------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2, f3
+ Filter: (ft3.f1 = 'foo'::text COLLATE "C")
+ Remote SQL: SELECT f1, f2, f3 FROM public.loct3
+(4 rows)
+
+explain (verbose, costs off) select * from ft3 where f2 COLLATE "C" = 'foo';
+ QUERY PLAN
+---------------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2, f3
+ Filter: ((ft3.f2)::text = 'foo'::text)
+ Remote SQL: SELECT f1, f2, f3 FROM public.loct3
+(4 rows)
+
+explain (verbose, costs off) select * from ft3 where f2 = 'foo' COLLATE "C";
+ QUERY PLAN
+---------------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2, f3
+ Filter: (ft3.f2 = 'foo'::text COLLATE "C")
+ Remote SQL: SELECT f1, f2, f3 FROM public.loct3
+(4 rows)
+
+explain (verbose, costs off) select * from ft3 f, loct3 l
+ where f.f3 = l.f3 COLLATE "POSIX" and l.f1 = 'foo';
+ QUERY PLAN
+-------------------------------------------------------------
+ Hash Join
+ Output: f.f1, f.f2, f.f3, l.f1, l.f2, l.f3
+ Inner Unique: true
+ Hash Cond: ((f.f3)::text = (l.f3)::text)
+ -> Foreign Scan on public.ft3 f
+ Output: f.f1, f.f2, f.f3
+ Remote SQL: SELECT f1, f2, f3 FROM public.loct3
+ -> Hash
+ Output: l.f1, l.f2, l.f3
+ -> Index Scan using loct3_f1_key on public.loct3 l
+ Output: l.f1, l.f2, l.f3
+ Index Cond: (l.f1 = 'foo'::text)
+(12 rows)
+
+-- ===================================================================
+-- test writable foreign table stuff
+-- ===================================================================
+EXPLAIN (verbose, costs off)
+INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Insert on public.ft2
+ Remote SQL: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+ Batch Size: 1
+ -> Subquery Scan on "*SELECT*"
+ Output: "*SELECT*"."?column?", "*SELECT*"."?column?_1", NULL::integer, "*SELECT*"."?column?_2", NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying, 'ft2 '::character(10), NULL::user_enum
+ -> Foreign Scan on public.ft2 ft2_1
+ Output: (ft2_1.c1 + 1000), (ft2_1.c2 + 100), (ft2_1.c3 || ft2_1.c3)
+ Remote SQL: SELECT "C 1", c2, c3 FROM "S 1"."T 1" LIMIT 20::bigint
+(8 rows)
+
+INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20;
+INSERT INTO ft2 (c1,c2,c3)
+ VALUES (1101,201,'aaa'), (1102,202,'bbb'), (1103,203,'ccc') RETURNING *;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+-----+-----+----+----+----+------------+----
+ 1101 | 201 | aaa | | | | ft2 |
+ 1102 | 202 | bbb | | | | ft2 |
+ 1103 | 203 | ccc | | | | ft2 |
+(3 rows)
+
+INSERT INTO ft2 (c1,c2,c3) VALUES (1104,204,'ddd'), (1105,205,'eee');
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c2 = c2 + 300, c3 = c3 || '_update3' WHERE c1 % 10 = 3; -- can be pushed down
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------
+ Update on public.ft2
+ -> Foreign Update on public.ft2
+ Remote SQL: UPDATE "S 1"."T 1" SET c2 = (c2 + 300), c3 = (c3 || '_update3'::text) WHERE ((("C 1" % 10) = 3))
+(3 rows)
+
+UPDATE ft2 SET c2 = c2 + 300, c3 = c3 || '_update3' WHERE c1 % 10 = 3;
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c2 = c2 + 400, c3 = c3 || '_update7' WHERE c1 % 10 = 7 RETURNING *; -- can be pushed down
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Update on public.ft2
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ -> Foreign Update on public.ft2
+ Remote SQL: UPDATE "S 1"."T 1" SET c2 = (c2 + 400), c3 = (c3 || '_update7'::text) WHERE ((("C 1" % 10) = 7)) RETURNING "C 1", c2, c3, c4, c5, c6, c7, c8
+(4 rows)
+
+UPDATE ft2 SET c2 = c2 + 400, c3 = c3 || '_update7' WHERE c1 % 10 = 7 RETURNING *;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+-----+--------------------+------------------------------+--------------------------+----+------------+-----
+ 7 | 407 | 00007_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 17 | 407 | 00017_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 27 | 407 | 00027_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 37 | 407 | 00037_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 47 | 407 | 00047_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 57 | 407 | 00057_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 67 | 407 | 00067_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 77 | 407 | 00077_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 87 | 407 | 00087_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 97 | 407 | 00097_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 107 | 407 | 00107_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 117 | 407 | 00117_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 127 | 407 | 00127_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 137 | 407 | 00137_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 147 | 407 | 00147_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 157 | 407 | 00157_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 167 | 407 | 00167_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 177 | 407 | 00177_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 187 | 407 | 00187_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 197 | 407 | 00197_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 207 | 407 | 00207_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 217 | 407 | 00217_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 227 | 407 | 00227_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 237 | 407 | 00237_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 247 | 407 | 00247_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 257 | 407 | 00257_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 267 | 407 | 00267_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 277 | 407 | 00277_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 287 | 407 | 00287_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 297 | 407 | 00297_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 307 | 407 | 00307_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 317 | 407 | 00317_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 327 | 407 | 00327_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 337 | 407 | 00337_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 347 | 407 | 00347_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 357 | 407 | 00357_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 367 | 407 | 00367_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 377 | 407 | 00377_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 387 | 407 | 00387_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 397 | 407 | 00397_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 407 | 407 | 00407_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 417 | 407 | 00417_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 427 | 407 | 00427_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 437 | 407 | 00437_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 447 | 407 | 00447_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 457 | 407 | 00457_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 467 | 407 | 00467_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 477 | 407 | 00477_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 487 | 407 | 00487_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 497 | 407 | 00497_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 507 | 407 | 00507_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 517 | 407 | 00517_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 527 | 407 | 00527_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 537 | 407 | 00537_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 547 | 407 | 00547_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 557 | 407 | 00557_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 567 | 407 | 00567_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 577 | 407 | 00577_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 587 | 407 | 00587_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 597 | 407 | 00597_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 607 | 407 | 00607_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 617 | 407 | 00617_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 627 | 407 | 00627_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 637 | 407 | 00637_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 647 | 407 | 00647_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 657 | 407 | 00657_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 667 | 407 | 00667_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 677 | 407 | 00677_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 687 | 407 | 00687_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 697 | 407 | 00697_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 707 | 407 | 00707_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 717 | 407 | 00717_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 727 | 407 | 00727_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 737 | 407 | 00737_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 747 | 407 | 00747_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 757 | 407 | 00757_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 767 | 407 | 00767_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 777 | 407 | 00777_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 787 | 407 | 00787_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 797 | 407 | 00797_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 807 | 407 | 00807_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 817 | 407 | 00817_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 827 | 407 | 00827_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 837 | 407 | 00837_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 847 | 407 | 00847_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 857 | 407 | 00857_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 867 | 407 | 00867_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 877 | 407 | 00877_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 887 | 407 | 00887_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 897 | 407 | 00897_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 907 | 407 | 00907_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 917 | 407 | 00917_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 927 | 407 | 00927_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 937 | 407 | 00937_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 947 | 407 | 00947_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 957 | 407 | 00957_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 967 | 407 | 00967_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 977 | 407 | 00977_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 987 | 407 | 00987_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 997 | 407 | 00997_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 1007 | 507 | 0000700007_update7 | | | | ft2 |
+ 1017 | 507 | 0001700017_update7 | | | | ft2 |
+(102 rows)
+
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT
+ FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9; -- can be pushed down
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Update on public.ft2
+ -> Foreign Update
+ Remote SQL: UPDATE "S 1"."T 1" r1 SET c2 = (r1.c2 + 500), c3 = (r1.c3 || '_update9'::text), c7 = 'ft2 '::character(10) FROM "S 1"."T 1" r2 WHERE ((r1.c2 = r2."C 1")) AND (((r2."C 1" % 10) = 9))
+(3 rows)
+
+UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT
+ FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9;
+EXPLAIN (verbose, costs off)
+ DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4; -- can be pushed down
+ QUERY PLAN
+--------------------------------------------------------------------------------------------
+ Delete on public.ft2
+ Output: c1, c4
+ -> Foreign Delete on public.ft2
+ Remote SQL: DELETE FROM "S 1"."T 1" WHERE ((("C 1" % 10) = 5)) RETURNING "C 1", c4
+(4 rows)
+
+DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4;
+ c1 | c4
+------+------------------------------
+ 5 | Tue Jan 06 00:00:00 1970 PST
+ 15 | Fri Jan 16 00:00:00 1970 PST
+ 25 | Mon Jan 26 00:00:00 1970 PST
+ 35 | Thu Feb 05 00:00:00 1970 PST
+ 45 | Sun Feb 15 00:00:00 1970 PST
+ 55 | Wed Feb 25 00:00:00 1970 PST
+ 65 | Sat Mar 07 00:00:00 1970 PST
+ 75 | Tue Mar 17 00:00:00 1970 PST
+ 85 | Fri Mar 27 00:00:00 1970 PST
+ 95 | Mon Apr 06 00:00:00 1970 PST
+ 105 | Tue Jan 06 00:00:00 1970 PST
+ 115 | Fri Jan 16 00:00:00 1970 PST
+ 125 | Mon Jan 26 00:00:00 1970 PST
+ 135 | Thu Feb 05 00:00:00 1970 PST
+ 145 | Sun Feb 15 00:00:00 1970 PST
+ 155 | Wed Feb 25 00:00:00 1970 PST
+ 165 | Sat Mar 07 00:00:00 1970 PST
+ 175 | Tue Mar 17 00:00:00 1970 PST
+ 185 | Fri Mar 27 00:00:00 1970 PST
+ 195 | Mon Apr 06 00:00:00 1970 PST
+ 205 | Tue Jan 06 00:00:00 1970 PST
+ 215 | Fri Jan 16 00:00:00 1970 PST
+ 225 | Mon Jan 26 00:00:00 1970 PST
+ 235 | Thu Feb 05 00:00:00 1970 PST
+ 245 | Sun Feb 15 00:00:00 1970 PST
+ 255 | Wed Feb 25 00:00:00 1970 PST
+ 265 | Sat Mar 07 00:00:00 1970 PST
+ 275 | Tue Mar 17 00:00:00 1970 PST
+ 285 | Fri Mar 27 00:00:00 1970 PST
+ 295 | Mon Apr 06 00:00:00 1970 PST
+ 305 | Tue Jan 06 00:00:00 1970 PST
+ 315 | Fri Jan 16 00:00:00 1970 PST
+ 325 | Mon Jan 26 00:00:00 1970 PST
+ 335 | Thu Feb 05 00:00:00 1970 PST
+ 345 | Sun Feb 15 00:00:00 1970 PST
+ 355 | Wed Feb 25 00:00:00 1970 PST
+ 365 | Sat Mar 07 00:00:00 1970 PST
+ 375 | Tue Mar 17 00:00:00 1970 PST
+ 385 | Fri Mar 27 00:00:00 1970 PST
+ 395 | Mon Apr 06 00:00:00 1970 PST
+ 405 | Tue Jan 06 00:00:00 1970 PST
+ 415 | Fri Jan 16 00:00:00 1970 PST
+ 425 | Mon Jan 26 00:00:00 1970 PST
+ 435 | Thu Feb 05 00:00:00 1970 PST
+ 445 | Sun Feb 15 00:00:00 1970 PST
+ 455 | Wed Feb 25 00:00:00 1970 PST
+ 465 | Sat Mar 07 00:00:00 1970 PST
+ 475 | Tue Mar 17 00:00:00 1970 PST
+ 485 | Fri Mar 27 00:00:00 1970 PST
+ 495 | Mon Apr 06 00:00:00 1970 PST
+ 505 | Tue Jan 06 00:00:00 1970 PST
+ 515 | Fri Jan 16 00:00:00 1970 PST
+ 525 | Mon Jan 26 00:00:00 1970 PST
+ 535 | Thu Feb 05 00:00:00 1970 PST
+ 545 | Sun Feb 15 00:00:00 1970 PST
+ 555 | Wed Feb 25 00:00:00 1970 PST
+ 565 | Sat Mar 07 00:00:00 1970 PST
+ 575 | Tue Mar 17 00:00:00 1970 PST
+ 585 | Fri Mar 27 00:00:00 1970 PST
+ 595 | Mon Apr 06 00:00:00 1970 PST
+ 605 | Tue Jan 06 00:00:00 1970 PST
+ 615 | Fri Jan 16 00:00:00 1970 PST
+ 625 | Mon Jan 26 00:00:00 1970 PST
+ 635 | Thu Feb 05 00:00:00 1970 PST
+ 645 | Sun Feb 15 00:00:00 1970 PST
+ 655 | Wed Feb 25 00:00:00 1970 PST
+ 665 | Sat Mar 07 00:00:00 1970 PST
+ 675 | Tue Mar 17 00:00:00 1970 PST
+ 685 | Fri Mar 27 00:00:00 1970 PST
+ 695 | Mon Apr 06 00:00:00 1970 PST
+ 705 | Tue Jan 06 00:00:00 1970 PST
+ 715 | Fri Jan 16 00:00:00 1970 PST
+ 725 | Mon Jan 26 00:00:00 1970 PST
+ 735 | Thu Feb 05 00:00:00 1970 PST
+ 745 | Sun Feb 15 00:00:00 1970 PST
+ 755 | Wed Feb 25 00:00:00 1970 PST
+ 765 | Sat Mar 07 00:00:00 1970 PST
+ 775 | Tue Mar 17 00:00:00 1970 PST
+ 785 | Fri Mar 27 00:00:00 1970 PST
+ 795 | Mon Apr 06 00:00:00 1970 PST
+ 805 | Tue Jan 06 00:00:00 1970 PST
+ 815 | Fri Jan 16 00:00:00 1970 PST
+ 825 | Mon Jan 26 00:00:00 1970 PST
+ 835 | Thu Feb 05 00:00:00 1970 PST
+ 845 | Sun Feb 15 00:00:00 1970 PST
+ 855 | Wed Feb 25 00:00:00 1970 PST
+ 865 | Sat Mar 07 00:00:00 1970 PST
+ 875 | Tue Mar 17 00:00:00 1970 PST
+ 885 | Fri Mar 27 00:00:00 1970 PST
+ 895 | Mon Apr 06 00:00:00 1970 PST
+ 905 | Tue Jan 06 00:00:00 1970 PST
+ 915 | Fri Jan 16 00:00:00 1970 PST
+ 925 | Mon Jan 26 00:00:00 1970 PST
+ 935 | Thu Feb 05 00:00:00 1970 PST
+ 945 | Sun Feb 15 00:00:00 1970 PST
+ 955 | Wed Feb 25 00:00:00 1970 PST
+ 965 | Sat Mar 07 00:00:00 1970 PST
+ 975 | Tue Mar 17 00:00:00 1970 PST
+ 985 | Fri Mar 27 00:00:00 1970 PST
+ 995 | Mon Apr 06 00:00:00 1970 PST
+ 1005 |
+ 1015 |
+ 1105 |
+(103 rows)
+
+EXPLAIN (verbose, costs off)
+DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2; -- can be pushed down
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------
+ Delete on public.ft2
+ -> Foreign Delete
+ Remote SQL: DELETE FROM "S 1"."T 1" r1 USING "S 1"."T 1" r2 WHERE ((r1.c2 = r2."C 1")) AND (((r2."C 1" % 10) = 2))
+(3 rows)
+
+DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2;
+SELECT c1,c2,c3,c4 FROM ft2 ORDER BY c1;
+ c1 | c2 | c3 | c4
+------+-----+--------------------+------------------------------
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST
+ 3 | 303 | 00003_update3 | Sun Jan 04 00:00:00 1970 PST
+ 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST
+ 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST
+ 7 | 407 | 00007_update7 | Thu Jan 08 00:00:00 1970 PST
+ 8 | 8 | 00008 | Fri Jan 09 00:00:00 1970 PST
+ 9 | 509 | 00009_update9 | Sat Jan 10 00:00:00 1970 PST
+ 10 | 0 | 00010 | Sun Jan 11 00:00:00 1970 PST
+ 11 | 1 | 00011 | Mon Jan 12 00:00:00 1970 PST
+ 13 | 303 | 00013_update3 | Wed Jan 14 00:00:00 1970 PST
+ 14 | 4 | 00014 | Thu Jan 15 00:00:00 1970 PST
+ 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST
+ 17 | 407 | 00017_update7 | Sun Jan 18 00:00:00 1970 PST
+ 18 | 8 | 00018 | Mon Jan 19 00:00:00 1970 PST
+ 19 | 509 | 00019_update9 | Tue Jan 20 00:00:00 1970 PST
+ 20 | 0 | 00020 | Wed Jan 21 00:00:00 1970 PST
+ 21 | 1 | 00021 | Thu Jan 22 00:00:00 1970 PST
+ 23 | 303 | 00023_update3 | Sat Jan 24 00:00:00 1970 PST
+ 24 | 4 | 00024 | Sun Jan 25 00:00:00 1970 PST
+ 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST
+ 27 | 407 | 00027_update7 | Wed Jan 28 00:00:00 1970 PST
+ 28 | 8 | 00028 | Thu Jan 29 00:00:00 1970 PST
+ 29 | 509 | 00029_update9 | Fri Jan 30 00:00:00 1970 PST
+ 30 | 0 | 00030 | Sat Jan 31 00:00:00 1970 PST
+ 31 | 1 | 00031 | Sun Feb 01 00:00:00 1970 PST
+ 33 | 303 | 00033_update3 | Tue Feb 03 00:00:00 1970 PST
+ 34 | 4 | 00034 | Wed Feb 04 00:00:00 1970 PST
+ 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST
+ 37 | 407 | 00037_update7 | Sat Feb 07 00:00:00 1970 PST
+ 38 | 8 | 00038 | Sun Feb 08 00:00:00 1970 PST
+ 39 | 509 | 00039_update9 | Mon Feb 09 00:00:00 1970 PST
+ 40 | 0 | 00040 | Tue Feb 10 00:00:00 1970 PST
+ 41 | 1 | 00041 | Wed Feb 11 00:00:00 1970 PST
+ 43 | 303 | 00043_update3 | Fri Feb 13 00:00:00 1970 PST
+ 44 | 4 | 00044 | Sat Feb 14 00:00:00 1970 PST
+ 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST
+ 47 | 407 | 00047_update7 | Tue Feb 17 00:00:00 1970 PST
+ 48 | 8 | 00048 | Wed Feb 18 00:00:00 1970 PST
+ 49 | 509 | 00049_update9 | Thu Feb 19 00:00:00 1970 PST
+ 50 | 0 | 00050 | Fri Feb 20 00:00:00 1970 PST
+ 51 | 1 | 00051 | Sat Feb 21 00:00:00 1970 PST
+ 53 | 303 | 00053_update3 | Mon Feb 23 00:00:00 1970 PST
+ 54 | 4 | 00054 | Tue Feb 24 00:00:00 1970 PST
+ 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST
+ 57 | 407 | 00057_update7 | Fri Feb 27 00:00:00 1970 PST
+ 58 | 8 | 00058 | Sat Feb 28 00:00:00 1970 PST
+ 59 | 509 | 00059_update9 | Sun Mar 01 00:00:00 1970 PST
+ 60 | 0 | 00060 | Mon Mar 02 00:00:00 1970 PST
+ 61 | 1 | 00061 | Tue Mar 03 00:00:00 1970 PST
+ 63 | 303 | 00063_update3 | Thu Mar 05 00:00:00 1970 PST
+ 64 | 4 | 00064 | Fri Mar 06 00:00:00 1970 PST
+ 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST
+ 67 | 407 | 00067_update7 | Mon Mar 09 00:00:00 1970 PST
+ 68 | 8 | 00068 | Tue Mar 10 00:00:00 1970 PST
+ 69 | 509 | 00069_update9 | Wed Mar 11 00:00:00 1970 PST
+ 70 | 0 | 00070 | Thu Mar 12 00:00:00 1970 PST
+ 71 | 1 | 00071 | Fri Mar 13 00:00:00 1970 PST
+ 73 | 303 | 00073_update3 | Sun Mar 15 00:00:00 1970 PST
+ 74 | 4 | 00074 | Mon Mar 16 00:00:00 1970 PST
+ 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST
+ 77 | 407 | 00077_update7 | Thu Mar 19 00:00:00 1970 PST
+ 78 | 8 | 00078 | Fri Mar 20 00:00:00 1970 PST
+ 79 | 509 | 00079_update9 | Sat Mar 21 00:00:00 1970 PST
+ 80 | 0 | 00080 | Sun Mar 22 00:00:00 1970 PST
+ 81 | 1 | 00081 | Mon Mar 23 00:00:00 1970 PST
+ 83 | 303 | 00083_update3 | Wed Mar 25 00:00:00 1970 PST
+ 84 | 4 | 00084 | Thu Mar 26 00:00:00 1970 PST
+ 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST
+ 87 | 407 | 00087_update7 | Sun Mar 29 00:00:00 1970 PST
+ 88 | 8 | 00088 | Mon Mar 30 00:00:00 1970 PST
+ 89 | 509 | 00089_update9 | Tue Mar 31 00:00:00 1970 PST
+ 90 | 0 | 00090 | Wed Apr 01 00:00:00 1970 PST
+ 91 | 1 | 00091 | Thu Apr 02 00:00:00 1970 PST
+ 93 | 303 | 00093_update3 | Sat Apr 04 00:00:00 1970 PST
+ 94 | 4 | 00094 | Sun Apr 05 00:00:00 1970 PST
+ 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST
+ 97 | 407 | 00097_update7 | Wed Apr 08 00:00:00 1970 PST
+ 98 | 8 | 00098 | Thu Apr 09 00:00:00 1970 PST
+ 99 | 509 | 00099_update9 | Fri Apr 10 00:00:00 1970 PST
+ 100 | 0 | 00100 | Thu Jan 01 00:00:00 1970 PST
+ 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST
+ 103 | 303 | 00103_update3 | Sun Jan 04 00:00:00 1970 PST
+ 104 | 4 | 00104 | Mon Jan 05 00:00:00 1970 PST
+ 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST
+ 107 | 407 | 00107_update7 | Thu Jan 08 00:00:00 1970 PST
+ 108 | 8 | 00108 | Fri Jan 09 00:00:00 1970 PST
+ 109 | 509 | 00109_update9 | Sat Jan 10 00:00:00 1970 PST
+ 110 | 0 | 00110 | Sun Jan 11 00:00:00 1970 PST
+ 111 | 1 | 00111 | Mon Jan 12 00:00:00 1970 PST
+ 113 | 303 | 00113_update3 | Wed Jan 14 00:00:00 1970 PST
+ 114 | 4 | 00114 | Thu Jan 15 00:00:00 1970 PST
+ 116 | 6 | 00116 | Sat Jan 17 00:00:00 1970 PST
+ 117 | 407 | 00117_update7 | Sun Jan 18 00:00:00 1970 PST
+ 118 | 8 | 00118 | Mon Jan 19 00:00:00 1970 PST
+ 119 | 509 | 00119_update9 | Tue Jan 20 00:00:00 1970 PST
+ 120 | 0 | 00120 | Wed Jan 21 00:00:00 1970 PST
+ 121 | 1 | 00121 | Thu Jan 22 00:00:00 1970 PST
+ 123 | 303 | 00123_update3 | Sat Jan 24 00:00:00 1970 PST
+ 124 | 4 | 00124 | Sun Jan 25 00:00:00 1970 PST
+ 126 | 6 | 00126 | Tue Jan 27 00:00:00 1970 PST
+ 127 | 407 | 00127_update7 | Wed Jan 28 00:00:00 1970 PST
+ 128 | 8 | 00128 | Thu Jan 29 00:00:00 1970 PST
+ 129 | 509 | 00129_update9 | Fri Jan 30 00:00:00 1970 PST
+ 130 | 0 | 00130 | Sat Jan 31 00:00:00 1970 PST
+ 131 | 1 | 00131 | Sun Feb 01 00:00:00 1970 PST
+ 133 | 303 | 00133_update3 | Tue Feb 03 00:00:00 1970 PST
+ 134 | 4 | 00134 | Wed Feb 04 00:00:00 1970 PST
+ 136 | 6 | 00136 | Fri Feb 06 00:00:00 1970 PST
+ 137 | 407 | 00137_update7 | Sat Feb 07 00:00:00 1970 PST
+ 138 | 8 | 00138 | Sun Feb 08 00:00:00 1970 PST
+ 139 | 509 | 00139_update9 | Mon Feb 09 00:00:00 1970 PST
+ 140 | 0 | 00140 | Tue Feb 10 00:00:00 1970 PST
+ 141 | 1 | 00141 | Wed Feb 11 00:00:00 1970 PST
+ 143 | 303 | 00143_update3 | Fri Feb 13 00:00:00 1970 PST
+ 144 | 4 | 00144 | Sat Feb 14 00:00:00 1970 PST
+ 146 | 6 | 00146 | Mon Feb 16 00:00:00 1970 PST
+ 147 | 407 | 00147_update7 | Tue Feb 17 00:00:00 1970 PST
+ 148 | 8 | 00148 | Wed Feb 18 00:00:00 1970 PST
+ 149 | 509 | 00149_update9 | Thu Feb 19 00:00:00 1970 PST
+ 150 | 0 | 00150 | Fri Feb 20 00:00:00 1970 PST
+ 151 | 1 | 00151 | Sat Feb 21 00:00:00 1970 PST
+ 153 | 303 | 00153_update3 | Mon Feb 23 00:00:00 1970 PST
+ 154 | 4 | 00154 | Tue Feb 24 00:00:00 1970 PST
+ 156 | 6 | 00156 | Thu Feb 26 00:00:00 1970 PST
+ 157 | 407 | 00157_update7 | Fri Feb 27 00:00:00 1970 PST
+ 158 | 8 | 00158 | Sat Feb 28 00:00:00 1970 PST
+ 159 | 509 | 00159_update9 | Sun Mar 01 00:00:00 1970 PST
+ 160 | 0 | 00160 | Mon Mar 02 00:00:00 1970 PST
+ 161 | 1 | 00161 | Tue Mar 03 00:00:00 1970 PST
+ 163 | 303 | 00163_update3 | Thu Mar 05 00:00:00 1970 PST
+ 164 | 4 | 00164 | Fri Mar 06 00:00:00 1970 PST
+ 166 | 6 | 00166 | Sun Mar 08 00:00:00 1970 PST
+ 167 | 407 | 00167_update7 | Mon Mar 09 00:00:00 1970 PST
+ 168 | 8 | 00168 | Tue Mar 10 00:00:00 1970 PST
+ 169 | 509 | 00169_update9 | Wed Mar 11 00:00:00 1970 PST
+ 170 | 0 | 00170 | Thu Mar 12 00:00:00 1970 PST
+ 171 | 1 | 00171 | Fri Mar 13 00:00:00 1970 PST
+ 173 | 303 | 00173_update3 | Sun Mar 15 00:00:00 1970 PST
+ 174 | 4 | 00174 | Mon Mar 16 00:00:00 1970 PST
+ 176 | 6 | 00176 | Wed Mar 18 00:00:00 1970 PST
+ 177 | 407 | 00177_update7 | Thu Mar 19 00:00:00 1970 PST
+ 178 | 8 | 00178 | Fri Mar 20 00:00:00 1970 PST
+ 179 | 509 | 00179_update9 | Sat Mar 21 00:00:00 1970 PST
+ 180 | 0 | 00180 | Sun Mar 22 00:00:00 1970 PST
+ 181 | 1 | 00181 | Mon Mar 23 00:00:00 1970 PST
+ 183 | 303 | 00183_update3 | Wed Mar 25 00:00:00 1970 PST
+ 184 | 4 | 00184 | Thu Mar 26 00:00:00 1970 PST
+ 186 | 6 | 00186 | Sat Mar 28 00:00:00 1970 PST
+ 187 | 407 | 00187_update7 | Sun Mar 29 00:00:00 1970 PST
+ 188 | 8 | 00188 | Mon Mar 30 00:00:00 1970 PST
+ 189 | 509 | 00189_update9 | Tue Mar 31 00:00:00 1970 PST
+ 190 | 0 | 00190 | Wed Apr 01 00:00:00 1970 PST
+ 191 | 1 | 00191 | Thu Apr 02 00:00:00 1970 PST
+ 193 | 303 | 00193_update3 | Sat Apr 04 00:00:00 1970 PST
+ 194 | 4 | 00194 | Sun Apr 05 00:00:00 1970 PST
+ 196 | 6 | 00196 | Tue Apr 07 00:00:00 1970 PST
+ 197 | 407 | 00197_update7 | Wed Apr 08 00:00:00 1970 PST
+ 198 | 8 | 00198 | Thu Apr 09 00:00:00 1970 PST
+ 199 | 509 | 00199_update9 | Fri Apr 10 00:00:00 1970 PST
+ 200 | 0 | 00200 | Thu Jan 01 00:00:00 1970 PST
+ 201 | 1 | 00201 | Fri Jan 02 00:00:00 1970 PST
+ 203 | 303 | 00203_update3 | Sun Jan 04 00:00:00 1970 PST
+ 204 | 4 | 00204 | Mon Jan 05 00:00:00 1970 PST
+ 206 | 6 | 00206 | Wed Jan 07 00:00:00 1970 PST
+ 207 | 407 | 00207_update7 | Thu Jan 08 00:00:00 1970 PST
+ 208 | 8 | 00208 | Fri Jan 09 00:00:00 1970 PST
+ 209 | 509 | 00209_update9 | Sat Jan 10 00:00:00 1970 PST
+ 210 | 0 | 00210 | Sun Jan 11 00:00:00 1970 PST
+ 211 | 1 | 00211 | Mon Jan 12 00:00:00 1970 PST
+ 213 | 303 | 00213_update3 | Wed Jan 14 00:00:00 1970 PST
+ 214 | 4 | 00214 | Thu Jan 15 00:00:00 1970 PST
+ 216 | 6 | 00216 | Sat Jan 17 00:00:00 1970 PST
+ 217 | 407 | 00217_update7 | Sun Jan 18 00:00:00 1970 PST
+ 218 | 8 | 00218 | Mon Jan 19 00:00:00 1970 PST
+ 219 | 509 | 00219_update9 | Tue Jan 20 00:00:00 1970 PST
+ 220 | 0 | 00220 | Wed Jan 21 00:00:00 1970 PST
+ 221 | 1 | 00221 | Thu Jan 22 00:00:00 1970 PST
+ 223 | 303 | 00223_update3 | Sat Jan 24 00:00:00 1970 PST
+ 224 | 4 | 00224 | Sun Jan 25 00:00:00 1970 PST
+ 226 | 6 | 00226 | Tue Jan 27 00:00:00 1970 PST
+ 227 | 407 | 00227_update7 | Wed Jan 28 00:00:00 1970 PST
+ 228 | 8 | 00228 | Thu Jan 29 00:00:00 1970 PST
+ 229 | 509 | 00229_update9 | Fri Jan 30 00:00:00 1970 PST
+ 230 | 0 | 00230 | Sat Jan 31 00:00:00 1970 PST
+ 231 | 1 | 00231 | Sun Feb 01 00:00:00 1970 PST
+ 233 | 303 | 00233_update3 | Tue Feb 03 00:00:00 1970 PST
+ 234 | 4 | 00234 | Wed Feb 04 00:00:00 1970 PST
+ 236 | 6 | 00236 | Fri Feb 06 00:00:00 1970 PST
+ 237 | 407 | 00237_update7 | Sat Feb 07 00:00:00 1970 PST
+ 238 | 8 | 00238 | Sun Feb 08 00:00:00 1970 PST
+ 239 | 509 | 00239_update9 | Mon Feb 09 00:00:00 1970 PST
+ 240 | 0 | 00240 | Tue Feb 10 00:00:00 1970 PST
+ 241 | 1 | 00241 | Wed Feb 11 00:00:00 1970 PST
+ 243 | 303 | 00243_update3 | Fri Feb 13 00:00:00 1970 PST
+ 244 | 4 | 00244 | Sat Feb 14 00:00:00 1970 PST
+ 246 | 6 | 00246 | Mon Feb 16 00:00:00 1970 PST
+ 247 | 407 | 00247_update7 | Tue Feb 17 00:00:00 1970 PST
+ 248 | 8 | 00248 | Wed Feb 18 00:00:00 1970 PST
+ 249 | 509 | 00249_update9 | Thu Feb 19 00:00:00 1970 PST
+ 250 | 0 | 00250 | Fri Feb 20 00:00:00 1970 PST
+ 251 | 1 | 00251 | Sat Feb 21 00:00:00 1970 PST
+ 253 | 303 | 00253_update3 | Mon Feb 23 00:00:00 1970 PST
+ 254 | 4 | 00254 | Tue Feb 24 00:00:00 1970 PST
+ 256 | 6 | 00256 | Thu Feb 26 00:00:00 1970 PST
+ 257 | 407 | 00257_update7 | Fri Feb 27 00:00:00 1970 PST
+ 258 | 8 | 00258 | Sat Feb 28 00:00:00 1970 PST
+ 259 | 509 | 00259_update9 | Sun Mar 01 00:00:00 1970 PST
+ 260 | 0 | 00260 | Mon Mar 02 00:00:00 1970 PST
+ 261 | 1 | 00261 | Tue Mar 03 00:00:00 1970 PST
+ 263 | 303 | 00263_update3 | Thu Mar 05 00:00:00 1970 PST
+ 264 | 4 | 00264 | Fri Mar 06 00:00:00 1970 PST
+ 266 | 6 | 00266 | Sun Mar 08 00:00:00 1970 PST
+ 267 | 407 | 00267_update7 | Mon Mar 09 00:00:00 1970 PST
+ 268 | 8 | 00268 | Tue Mar 10 00:00:00 1970 PST
+ 269 | 509 | 00269_update9 | Wed Mar 11 00:00:00 1970 PST
+ 270 | 0 | 00270 | Thu Mar 12 00:00:00 1970 PST
+ 271 | 1 | 00271 | Fri Mar 13 00:00:00 1970 PST
+ 273 | 303 | 00273_update3 | Sun Mar 15 00:00:00 1970 PST
+ 274 | 4 | 00274 | Mon Mar 16 00:00:00 1970 PST
+ 276 | 6 | 00276 | Wed Mar 18 00:00:00 1970 PST
+ 277 | 407 | 00277_update7 | Thu Mar 19 00:00:00 1970 PST
+ 278 | 8 | 00278 | Fri Mar 20 00:00:00 1970 PST
+ 279 | 509 | 00279_update9 | Sat Mar 21 00:00:00 1970 PST
+ 280 | 0 | 00280 | Sun Mar 22 00:00:00 1970 PST
+ 281 | 1 | 00281 | Mon Mar 23 00:00:00 1970 PST
+ 283 | 303 | 00283_update3 | Wed Mar 25 00:00:00 1970 PST
+ 284 | 4 | 00284 | Thu Mar 26 00:00:00 1970 PST
+ 286 | 6 | 00286 | Sat Mar 28 00:00:00 1970 PST
+ 287 | 407 | 00287_update7 | Sun Mar 29 00:00:00 1970 PST
+ 288 | 8 | 00288 | Mon Mar 30 00:00:00 1970 PST
+ 289 | 509 | 00289_update9 | Tue Mar 31 00:00:00 1970 PST
+ 290 | 0 | 00290 | Wed Apr 01 00:00:00 1970 PST
+ 291 | 1 | 00291 | Thu Apr 02 00:00:00 1970 PST
+ 293 | 303 | 00293_update3 | Sat Apr 04 00:00:00 1970 PST
+ 294 | 4 | 00294 | Sun Apr 05 00:00:00 1970 PST
+ 296 | 6 | 00296 | Tue Apr 07 00:00:00 1970 PST
+ 297 | 407 | 00297_update7 | Wed Apr 08 00:00:00 1970 PST
+ 298 | 8 | 00298 | Thu Apr 09 00:00:00 1970 PST
+ 299 | 509 | 00299_update9 | Fri Apr 10 00:00:00 1970 PST
+ 300 | 0 | 00300 | Thu Jan 01 00:00:00 1970 PST
+ 301 | 1 | 00301 | Fri Jan 02 00:00:00 1970 PST
+ 303 | 303 | 00303_update3 | Sun Jan 04 00:00:00 1970 PST
+ 304 | 4 | 00304 | Mon Jan 05 00:00:00 1970 PST
+ 306 | 6 | 00306 | Wed Jan 07 00:00:00 1970 PST
+ 307 | 407 | 00307_update7 | Thu Jan 08 00:00:00 1970 PST
+ 308 | 8 | 00308 | Fri Jan 09 00:00:00 1970 PST
+ 309 | 509 | 00309_update9 | Sat Jan 10 00:00:00 1970 PST
+ 310 | 0 | 00310 | Sun Jan 11 00:00:00 1970 PST
+ 311 | 1 | 00311 | Mon Jan 12 00:00:00 1970 PST
+ 313 | 303 | 00313_update3 | Wed Jan 14 00:00:00 1970 PST
+ 314 | 4 | 00314 | Thu Jan 15 00:00:00 1970 PST
+ 316 | 6 | 00316 | Sat Jan 17 00:00:00 1970 PST
+ 317 | 407 | 00317_update7 | Sun Jan 18 00:00:00 1970 PST
+ 318 | 8 | 00318 | Mon Jan 19 00:00:00 1970 PST
+ 319 | 509 | 00319_update9 | Tue Jan 20 00:00:00 1970 PST
+ 320 | 0 | 00320 | Wed Jan 21 00:00:00 1970 PST
+ 321 | 1 | 00321 | Thu Jan 22 00:00:00 1970 PST
+ 323 | 303 | 00323_update3 | Sat Jan 24 00:00:00 1970 PST
+ 324 | 4 | 00324 | Sun Jan 25 00:00:00 1970 PST
+ 326 | 6 | 00326 | Tue Jan 27 00:00:00 1970 PST
+ 327 | 407 | 00327_update7 | Wed Jan 28 00:00:00 1970 PST
+ 328 | 8 | 00328 | Thu Jan 29 00:00:00 1970 PST
+ 329 | 509 | 00329_update9 | Fri Jan 30 00:00:00 1970 PST
+ 330 | 0 | 00330 | Sat Jan 31 00:00:00 1970 PST
+ 331 | 1 | 00331 | Sun Feb 01 00:00:00 1970 PST
+ 333 | 303 | 00333_update3 | Tue Feb 03 00:00:00 1970 PST
+ 334 | 4 | 00334 | Wed Feb 04 00:00:00 1970 PST
+ 336 | 6 | 00336 | Fri Feb 06 00:00:00 1970 PST
+ 337 | 407 | 00337_update7 | Sat Feb 07 00:00:00 1970 PST
+ 338 | 8 | 00338 | Sun Feb 08 00:00:00 1970 PST
+ 339 | 509 | 00339_update9 | Mon Feb 09 00:00:00 1970 PST
+ 340 | 0 | 00340 | Tue Feb 10 00:00:00 1970 PST
+ 341 | 1 | 00341 | Wed Feb 11 00:00:00 1970 PST
+ 343 | 303 | 00343_update3 | Fri Feb 13 00:00:00 1970 PST
+ 344 | 4 | 00344 | Sat Feb 14 00:00:00 1970 PST
+ 346 | 6 | 00346 | Mon Feb 16 00:00:00 1970 PST
+ 347 | 407 | 00347_update7 | Tue Feb 17 00:00:00 1970 PST
+ 348 | 8 | 00348 | Wed Feb 18 00:00:00 1970 PST
+ 349 | 509 | 00349_update9 | Thu Feb 19 00:00:00 1970 PST
+ 350 | 0 | 00350 | Fri Feb 20 00:00:00 1970 PST
+ 351 | 1 | 00351 | Sat Feb 21 00:00:00 1970 PST
+ 353 | 303 | 00353_update3 | Mon Feb 23 00:00:00 1970 PST
+ 354 | 4 | 00354 | Tue Feb 24 00:00:00 1970 PST
+ 356 | 6 | 00356 | Thu Feb 26 00:00:00 1970 PST
+ 357 | 407 | 00357_update7 | Fri Feb 27 00:00:00 1970 PST
+ 358 | 8 | 00358 | Sat Feb 28 00:00:00 1970 PST
+ 359 | 509 | 00359_update9 | Sun Mar 01 00:00:00 1970 PST
+ 360 | 0 | 00360 | Mon Mar 02 00:00:00 1970 PST
+ 361 | 1 | 00361 | Tue Mar 03 00:00:00 1970 PST
+ 363 | 303 | 00363_update3 | Thu Mar 05 00:00:00 1970 PST
+ 364 | 4 | 00364 | Fri Mar 06 00:00:00 1970 PST
+ 366 | 6 | 00366 | Sun Mar 08 00:00:00 1970 PST
+ 367 | 407 | 00367_update7 | Mon Mar 09 00:00:00 1970 PST
+ 368 | 8 | 00368 | Tue Mar 10 00:00:00 1970 PST
+ 369 | 509 | 00369_update9 | Wed Mar 11 00:00:00 1970 PST
+ 370 | 0 | 00370 | Thu Mar 12 00:00:00 1970 PST
+ 371 | 1 | 00371 | Fri Mar 13 00:00:00 1970 PST
+ 373 | 303 | 00373_update3 | Sun Mar 15 00:00:00 1970 PST
+ 374 | 4 | 00374 | Mon Mar 16 00:00:00 1970 PST
+ 376 | 6 | 00376 | Wed Mar 18 00:00:00 1970 PST
+ 377 | 407 | 00377_update7 | Thu Mar 19 00:00:00 1970 PST
+ 378 | 8 | 00378 | Fri Mar 20 00:00:00 1970 PST
+ 379 | 509 | 00379_update9 | Sat Mar 21 00:00:00 1970 PST
+ 380 | 0 | 00380 | Sun Mar 22 00:00:00 1970 PST
+ 381 | 1 | 00381 | Mon Mar 23 00:00:00 1970 PST
+ 383 | 303 | 00383_update3 | Wed Mar 25 00:00:00 1970 PST
+ 384 | 4 | 00384 | Thu Mar 26 00:00:00 1970 PST
+ 386 | 6 | 00386 | Sat Mar 28 00:00:00 1970 PST
+ 387 | 407 | 00387_update7 | Sun Mar 29 00:00:00 1970 PST
+ 388 | 8 | 00388 | Mon Mar 30 00:00:00 1970 PST
+ 389 | 509 | 00389_update9 | Tue Mar 31 00:00:00 1970 PST
+ 390 | 0 | 00390 | Wed Apr 01 00:00:00 1970 PST
+ 391 | 1 | 00391 | Thu Apr 02 00:00:00 1970 PST
+ 393 | 303 | 00393_update3 | Sat Apr 04 00:00:00 1970 PST
+ 394 | 4 | 00394 | Sun Apr 05 00:00:00 1970 PST
+ 396 | 6 | 00396 | Tue Apr 07 00:00:00 1970 PST
+ 397 | 407 | 00397_update7 | Wed Apr 08 00:00:00 1970 PST
+ 398 | 8 | 00398 | Thu Apr 09 00:00:00 1970 PST
+ 399 | 509 | 00399_update9 | Fri Apr 10 00:00:00 1970 PST
+ 400 | 0 | 00400 | Thu Jan 01 00:00:00 1970 PST
+ 401 | 1 | 00401 | Fri Jan 02 00:00:00 1970 PST
+ 403 | 303 | 00403_update3 | Sun Jan 04 00:00:00 1970 PST
+ 404 | 4 | 00404 | Mon Jan 05 00:00:00 1970 PST
+ 406 | 6 | 00406 | Wed Jan 07 00:00:00 1970 PST
+ 407 | 407 | 00407_update7 | Thu Jan 08 00:00:00 1970 PST
+ 408 | 8 | 00408 | Fri Jan 09 00:00:00 1970 PST
+ 409 | 509 | 00409_update9 | Sat Jan 10 00:00:00 1970 PST
+ 410 | 0 | 00410 | Sun Jan 11 00:00:00 1970 PST
+ 411 | 1 | 00411 | Mon Jan 12 00:00:00 1970 PST
+ 413 | 303 | 00413_update3 | Wed Jan 14 00:00:00 1970 PST
+ 414 | 4 | 00414 | Thu Jan 15 00:00:00 1970 PST
+ 416 | 6 | 00416 | Sat Jan 17 00:00:00 1970 PST
+ 417 | 407 | 00417_update7 | Sun Jan 18 00:00:00 1970 PST
+ 418 | 8 | 00418 | Mon Jan 19 00:00:00 1970 PST
+ 419 | 509 | 00419_update9 | Tue Jan 20 00:00:00 1970 PST
+ 420 | 0 | 00420 | Wed Jan 21 00:00:00 1970 PST
+ 421 | 1 | 00421 | Thu Jan 22 00:00:00 1970 PST
+ 423 | 303 | 00423_update3 | Sat Jan 24 00:00:00 1970 PST
+ 424 | 4 | 00424 | Sun Jan 25 00:00:00 1970 PST
+ 426 | 6 | 00426 | Tue Jan 27 00:00:00 1970 PST
+ 427 | 407 | 00427_update7 | Wed Jan 28 00:00:00 1970 PST
+ 428 | 8 | 00428 | Thu Jan 29 00:00:00 1970 PST
+ 429 | 509 | 00429_update9 | Fri Jan 30 00:00:00 1970 PST
+ 430 | 0 | 00430 | Sat Jan 31 00:00:00 1970 PST
+ 431 | 1 | 00431 | Sun Feb 01 00:00:00 1970 PST
+ 433 | 303 | 00433_update3 | Tue Feb 03 00:00:00 1970 PST
+ 434 | 4 | 00434 | Wed Feb 04 00:00:00 1970 PST
+ 436 | 6 | 00436 | Fri Feb 06 00:00:00 1970 PST
+ 437 | 407 | 00437_update7 | Sat Feb 07 00:00:00 1970 PST
+ 438 | 8 | 00438 | Sun Feb 08 00:00:00 1970 PST
+ 439 | 509 | 00439_update9 | Mon Feb 09 00:00:00 1970 PST
+ 440 | 0 | 00440 | Tue Feb 10 00:00:00 1970 PST
+ 441 | 1 | 00441 | Wed Feb 11 00:00:00 1970 PST
+ 443 | 303 | 00443_update3 | Fri Feb 13 00:00:00 1970 PST
+ 444 | 4 | 00444 | Sat Feb 14 00:00:00 1970 PST
+ 446 | 6 | 00446 | Mon Feb 16 00:00:00 1970 PST
+ 447 | 407 | 00447_update7 | Tue Feb 17 00:00:00 1970 PST
+ 448 | 8 | 00448 | Wed Feb 18 00:00:00 1970 PST
+ 449 | 509 | 00449_update9 | Thu Feb 19 00:00:00 1970 PST
+ 450 | 0 | 00450 | Fri Feb 20 00:00:00 1970 PST
+ 451 | 1 | 00451 | Sat Feb 21 00:00:00 1970 PST
+ 453 | 303 | 00453_update3 | Mon Feb 23 00:00:00 1970 PST
+ 454 | 4 | 00454 | Tue Feb 24 00:00:00 1970 PST
+ 456 | 6 | 00456 | Thu Feb 26 00:00:00 1970 PST
+ 457 | 407 | 00457_update7 | Fri Feb 27 00:00:00 1970 PST
+ 458 | 8 | 00458 | Sat Feb 28 00:00:00 1970 PST
+ 459 | 509 | 00459_update9 | Sun Mar 01 00:00:00 1970 PST
+ 460 | 0 | 00460 | Mon Mar 02 00:00:00 1970 PST
+ 461 | 1 | 00461 | Tue Mar 03 00:00:00 1970 PST
+ 463 | 303 | 00463_update3 | Thu Mar 05 00:00:00 1970 PST
+ 464 | 4 | 00464 | Fri Mar 06 00:00:00 1970 PST
+ 466 | 6 | 00466 | Sun Mar 08 00:00:00 1970 PST
+ 467 | 407 | 00467_update7 | Mon Mar 09 00:00:00 1970 PST
+ 468 | 8 | 00468 | Tue Mar 10 00:00:00 1970 PST
+ 469 | 509 | 00469_update9 | Wed Mar 11 00:00:00 1970 PST
+ 470 | 0 | 00470 | Thu Mar 12 00:00:00 1970 PST
+ 471 | 1 | 00471 | Fri Mar 13 00:00:00 1970 PST
+ 473 | 303 | 00473_update3 | Sun Mar 15 00:00:00 1970 PST
+ 474 | 4 | 00474 | Mon Mar 16 00:00:00 1970 PST
+ 476 | 6 | 00476 | Wed Mar 18 00:00:00 1970 PST
+ 477 | 407 | 00477_update7 | Thu Mar 19 00:00:00 1970 PST
+ 478 | 8 | 00478 | Fri Mar 20 00:00:00 1970 PST
+ 479 | 509 | 00479_update9 | Sat Mar 21 00:00:00 1970 PST
+ 480 | 0 | 00480 | Sun Mar 22 00:00:00 1970 PST
+ 481 | 1 | 00481 | Mon Mar 23 00:00:00 1970 PST
+ 483 | 303 | 00483_update3 | Wed Mar 25 00:00:00 1970 PST
+ 484 | 4 | 00484 | Thu Mar 26 00:00:00 1970 PST
+ 486 | 6 | 00486 | Sat Mar 28 00:00:00 1970 PST
+ 487 | 407 | 00487_update7 | Sun Mar 29 00:00:00 1970 PST
+ 488 | 8 | 00488 | Mon Mar 30 00:00:00 1970 PST
+ 489 | 509 | 00489_update9 | Tue Mar 31 00:00:00 1970 PST
+ 490 | 0 | 00490 | Wed Apr 01 00:00:00 1970 PST
+ 491 | 1 | 00491 | Thu Apr 02 00:00:00 1970 PST
+ 493 | 303 | 00493_update3 | Sat Apr 04 00:00:00 1970 PST
+ 494 | 4 | 00494 | Sun Apr 05 00:00:00 1970 PST
+ 496 | 6 | 00496 | Tue Apr 07 00:00:00 1970 PST
+ 497 | 407 | 00497_update7 | Wed Apr 08 00:00:00 1970 PST
+ 498 | 8 | 00498 | Thu Apr 09 00:00:00 1970 PST
+ 499 | 509 | 00499_update9 | Fri Apr 10 00:00:00 1970 PST
+ 500 | 0 | 00500 | Thu Jan 01 00:00:00 1970 PST
+ 501 | 1 | 00501 | Fri Jan 02 00:00:00 1970 PST
+ 503 | 303 | 00503_update3 | Sun Jan 04 00:00:00 1970 PST
+ 504 | 4 | 00504 | Mon Jan 05 00:00:00 1970 PST
+ 506 | 6 | 00506 | Wed Jan 07 00:00:00 1970 PST
+ 507 | 407 | 00507_update7 | Thu Jan 08 00:00:00 1970 PST
+ 508 | 8 | 00508 | Fri Jan 09 00:00:00 1970 PST
+ 509 | 509 | 00509_update9 | Sat Jan 10 00:00:00 1970 PST
+ 510 | 0 | 00510 | Sun Jan 11 00:00:00 1970 PST
+ 511 | 1 | 00511 | Mon Jan 12 00:00:00 1970 PST
+ 513 | 303 | 00513_update3 | Wed Jan 14 00:00:00 1970 PST
+ 514 | 4 | 00514 | Thu Jan 15 00:00:00 1970 PST
+ 516 | 6 | 00516 | Sat Jan 17 00:00:00 1970 PST
+ 517 | 407 | 00517_update7 | Sun Jan 18 00:00:00 1970 PST
+ 518 | 8 | 00518 | Mon Jan 19 00:00:00 1970 PST
+ 519 | 509 | 00519_update9 | Tue Jan 20 00:00:00 1970 PST
+ 520 | 0 | 00520 | Wed Jan 21 00:00:00 1970 PST
+ 521 | 1 | 00521 | Thu Jan 22 00:00:00 1970 PST
+ 523 | 303 | 00523_update3 | Sat Jan 24 00:00:00 1970 PST
+ 524 | 4 | 00524 | Sun Jan 25 00:00:00 1970 PST
+ 526 | 6 | 00526 | Tue Jan 27 00:00:00 1970 PST
+ 527 | 407 | 00527_update7 | Wed Jan 28 00:00:00 1970 PST
+ 528 | 8 | 00528 | Thu Jan 29 00:00:00 1970 PST
+ 529 | 509 | 00529_update9 | Fri Jan 30 00:00:00 1970 PST
+ 530 | 0 | 00530 | Sat Jan 31 00:00:00 1970 PST
+ 531 | 1 | 00531 | Sun Feb 01 00:00:00 1970 PST
+ 533 | 303 | 00533_update3 | Tue Feb 03 00:00:00 1970 PST
+ 534 | 4 | 00534 | Wed Feb 04 00:00:00 1970 PST
+ 536 | 6 | 00536 | Fri Feb 06 00:00:00 1970 PST
+ 537 | 407 | 00537_update7 | Sat Feb 07 00:00:00 1970 PST
+ 538 | 8 | 00538 | Sun Feb 08 00:00:00 1970 PST
+ 539 | 509 | 00539_update9 | Mon Feb 09 00:00:00 1970 PST
+ 540 | 0 | 00540 | Tue Feb 10 00:00:00 1970 PST
+ 541 | 1 | 00541 | Wed Feb 11 00:00:00 1970 PST
+ 543 | 303 | 00543_update3 | Fri Feb 13 00:00:00 1970 PST
+ 544 | 4 | 00544 | Sat Feb 14 00:00:00 1970 PST
+ 546 | 6 | 00546 | Mon Feb 16 00:00:00 1970 PST
+ 547 | 407 | 00547_update7 | Tue Feb 17 00:00:00 1970 PST
+ 548 | 8 | 00548 | Wed Feb 18 00:00:00 1970 PST
+ 549 | 509 | 00549_update9 | Thu Feb 19 00:00:00 1970 PST
+ 550 | 0 | 00550 | Fri Feb 20 00:00:00 1970 PST
+ 551 | 1 | 00551 | Sat Feb 21 00:00:00 1970 PST
+ 553 | 303 | 00553_update3 | Mon Feb 23 00:00:00 1970 PST
+ 554 | 4 | 00554 | Tue Feb 24 00:00:00 1970 PST
+ 556 | 6 | 00556 | Thu Feb 26 00:00:00 1970 PST
+ 557 | 407 | 00557_update7 | Fri Feb 27 00:00:00 1970 PST
+ 558 | 8 | 00558 | Sat Feb 28 00:00:00 1970 PST
+ 559 | 509 | 00559_update9 | Sun Mar 01 00:00:00 1970 PST
+ 560 | 0 | 00560 | Mon Mar 02 00:00:00 1970 PST
+ 561 | 1 | 00561 | Tue Mar 03 00:00:00 1970 PST
+ 563 | 303 | 00563_update3 | Thu Mar 05 00:00:00 1970 PST
+ 564 | 4 | 00564 | Fri Mar 06 00:00:00 1970 PST
+ 566 | 6 | 00566 | Sun Mar 08 00:00:00 1970 PST
+ 567 | 407 | 00567_update7 | Mon Mar 09 00:00:00 1970 PST
+ 568 | 8 | 00568 | Tue Mar 10 00:00:00 1970 PST
+ 569 | 509 | 00569_update9 | Wed Mar 11 00:00:00 1970 PST
+ 570 | 0 | 00570 | Thu Mar 12 00:00:00 1970 PST
+ 571 | 1 | 00571 | Fri Mar 13 00:00:00 1970 PST
+ 573 | 303 | 00573_update3 | Sun Mar 15 00:00:00 1970 PST
+ 574 | 4 | 00574 | Mon Mar 16 00:00:00 1970 PST
+ 576 | 6 | 00576 | Wed Mar 18 00:00:00 1970 PST
+ 577 | 407 | 00577_update7 | Thu Mar 19 00:00:00 1970 PST
+ 578 | 8 | 00578 | Fri Mar 20 00:00:00 1970 PST
+ 579 | 509 | 00579_update9 | Sat Mar 21 00:00:00 1970 PST
+ 580 | 0 | 00580 | Sun Mar 22 00:00:00 1970 PST
+ 581 | 1 | 00581 | Mon Mar 23 00:00:00 1970 PST
+ 583 | 303 | 00583_update3 | Wed Mar 25 00:00:00 1970 PST
+ 584 | 4 | 00584 | Thu Mar 26 00:00:00 1970 PST
+ 586 | 6 | 00586 | Sat Mar 28 00:00:00 1970 PST
+ 587 | 407 | 00587_update7 | Sun Mar 29 00:00:00 1970 PST
+ 588 | 8 | 00588 | Mon Mar 30 00:00:00 1970 PST
+ 589 | 509 | 00589_update9 | Tue Mar 31 00:00:00 1970 PST
+ 590 | 0 | 00590 | Wed Apr 01 00:00:00 1970 PST
+ 591 | 1 | 00591 | Thu Apr 02 00:00:00 1970 PST
+ 593 | 303 | 00593_update3 | Sat Apr 04 00:00:00 1970 PST
+ 594 | 4 | 00594 | Sun Apr 05 00:00:00 1970 PST
+ 596 | 6 | 00596 | Tue Apr 07 00:00:00 1970 PST
+ 597 | 407 | 00597_update7 | Wed Apr 08 00:00:00 1970 PST
+ 598 | 8 | 00598 | Thu Apr 09 00:00:00 1970 PST
+ 599 | 509 | 00599_update9 | Fri Apr 10 00:00:00 1970 PST
+ 600 | 0 | 00600 | Thu Jan 01 00:00:00 1970 PST
+ 601 | 1 | 00601 | Fri Jan 02 00:00:00 1970 PST
+ 603 | 303 | 00603_update3 | Sun Jan 04 00:00:00 1970 PST
+ 604 | 4 | 00604 | Mon Jan 05 00:00:00 1970 PST
+ 606 | 6 | 00606 | Wed Jan 07 00:00:00 1970 PST
+ 607 | 407 | 00607_update7 | Thu Jan 08 00:00:00 1970 PST
+ 608 | 8 | 00608 | Fri Jan 09 00:00:00 1970 PST
+ 609 | 509 | 00609_update9 | Sat Jan 10 00:00:00 1970 PST
+ 610 | 0 | 00610 | Sun Jan 11 00:00:00 1970 PST
+ 611 | 1 | 00611 | Mon Jan 12 00:00:00 1970 PST
+ 613 | 303 | 00613_update3 | Wed Jan 14 00:00:00 1970 PST
+ 614 | 4 | 00614 | Thu Jan 15 00:00:00 1970 PST
+ 616 | 6 | 00616 | Sat Jan 17 00:00:00 1970 PST
+ 617 | 407 | 00617_update7 | Sun Jan 18 00:00:00 1970 PST
+ 618 | 8 | 00618 | Mon Jan 19 00:00:00 1970 PST
+ 619 | 509 | 00619_update9 | Tue Jan 20 00:00:00 1970 PST
+ 620 | 0 | 00620 | Wed Jan 21 00:00:00 1970 PST
+ 621 | 1 | 00621 | Thu Jan 22 00:00:00 1970 PST
+ 623 | 303 | 00623_update3 | Sat Jan 24 00:00:00 1970 PST
+ 624 | 4 | 00624 | Sun Jan 25 00:00:00 1970 PST
+ 626 | 6 | 00626 | Tue Jan 27 00:00:00 1970 PST
+ 627 | 407 | 00627_update7 | Wed Jan 28 00:00:00 1970 PST
+ 628 | 8 | 00628 | Thu Jan 29 00:00:00 1970 PST
+ 629 | 509 | 00629_update9 | Fri Jan 30 00:00:00 1970 PST
+ 630 | 0 | 00630 | Sat Jan 31 00:00:00 1970 PST
+ 631 | 1 | 00631 | Sun Feb 01 00:00:00 1970 PST
+ 633 | 303 | 00633_update3 | Tue Feb 03 00:00:00 1970 PST
+ 634 | 4 | 00634 | Wed Feb 04 00:00:00 1970 PST
+ 636 | 6 | 00636 | Fri Feb 06 00:00:00 1970 PST
+ 637 | 407 | 00637_update7 | Sat Feb 07 00:00:00 1970 PST
+ 638 | 8 | 00638 | Sun Feb 08 00:00:00 1970 PST
+ 639 | 509 | 00639_update9 | Mon Feb 09 00:00:00 1970 PST
+ 640 | 0 | 00640 | Tue Feb 10 00:00:00 1970 PST
+ 641 | 1 | 00641 | Wed Feb 11 00:00:00 1970 PST
+ 643 | 303 | 00643_update3 | Fri Feb 13 00:00:00 1970 PST
+ 644 | 4 | 00644 | Sat Feb 14 00:00:00 1970 PST
+ 646 | 6 | 00646 | Mon Feb 16 00:00:00 1970 PST
+ 647 | 407 | 00647_update7 | Tue Feb 17 00:00:00 1970 PST
+ 648 | 8 | 00648 | Wed Feb 18 00:00:00 1970 PST
+ 649 | 509 | 00649_update9 | Thu Feb 19 00:00:00 1970 PST
+ 650 | 0 | 00650 | Fri Feb 20 00:00:00 1970 PST
+ 651 | 1 | 00651 | Sat Feb 21 00:00:00 1970 PST
+ 653 | 303 | 00653_update3 | Mon Feb 23 00:00:00 1970 PST
+ 654 | 4 | 00654 | Tue Feb 24 00:00:00 1970 PST
+ 656 | 6 | 00656 | Thu Feb 26 00:00:00 1970 PST
+ 657 | 407 | 00657_update7 | Fri Feb 27 00:00:00 1970 PST
+ 658 | 8 | 00658 | Sat Feb 28 00:00:00 1970 PST
+ 659 | 509 | 00659_update9 | Sun Mar 01 00:00:00 1970 PST
+ 660 | 0 | 00660 | Mon Mar 02 00:00:00 1970 PST
+ 661 | 1 | 00661 | Tue Mar 03 00:00:00 1970 PST
+ 663 | 303 | 00663_update3 | Thu Mar 05 00:00:00 1970 PST
+ 664 | 4 | 00664 | Fri Mar 06 00:00:00 1970 PST
+ 666 | 6 | 00666 | Sun Mar 08 00:00:00 1970 PST
+ 667 | 407 | 00667_update7 | Mon Mar 09 00:00:00 1970 PST
+ 668 | 8 | 00668 | Tue Mar 10 00:00:00 1970 PST
+ 669 | 509 | 00669_update9 | Wed Mar 11 00:00:00 1970 PST
+ 670 | 0 | 00670 | Thu Mar 12 00:00:00 1970 PST
+ 671 | 1 | 00671 | Fri Mar 13 00:00:00 1970 PST
+ 673 | 303 | 00673_update3 | Sun Mar 15 00:00:00 1970 PST
+ 674 | 4 | 00674 | Mon Mar 16 00:00:00 1970 PST
+ 676 | 6 | 00676 | Wed Mar 18 00:00:00 1970 PST
+ 677 | 407 | 00677_update7 | Thu Mar 19 00:00:00 1970 PST
+ 678 | 8 | 00678 | Fri Mar 20 00:00:00 1970 PST
+ 679 | 509 | 00679_update9 | Sat Mar 21 00:00:00 1970 PST
+ 680 | 0 | 00680 | Sun Mar 22 00:00:00 1970 PST
+ 681 | 1 | 00681 | Mon Mar 23 00:00:00 1970 PST
+ 683 | 303 | 00683_update3 | Wed Mar 25 00:00:00 1970 PST
+ 684 | 4 | 00684 | Thu Mar 26 00:00:00 1970 PST
+ 686 | 6 | 00686 | Sat Mar 28 00:00:00 1970 PST
+ 687 | 407 | 00687_update7 | Sun Mar 29 00:00:00 1970 PST
+ 688 | 8 | 00688 | Mon Mar 30 00:00:00 1970 PST
+ 689 | 509 | 00689_update9 | Tue Mar 31 00:00:00 1970 PST
+ 690 | 0 | 00690 | Wed Apr 01 00:00:00 1970 PST
+ 691 | 1 | 00691 | Thu Apr 02 00:00:00 1970 PST
+ 693 | 303 | 00693_update3 | Sat Apr 04 00:00:00 1970 PST
+ 694 | 4 | 00694 | Sun Apr 05 00:00:00 1970 PST
+ 696 | 6 | 00696 | Tue Apr 07 00:00:00 1970 PST
+ 697 | 407 | 00697_update7 | Wed Apr 08 00:00:00 1970 PST
+ 698 | 8 | 00698 | Thu Apr 09 00:00:00 1970 PST
+ 699 | 509 | 00699_update9 | Fri Apr 10 00:00:00 1970 PST
+ 700 | 0 | 00700 | Thu Jan 01 00:00:00 1970 PST
+ 701 | 1 | 00701 | Fri Jan 02 00:00:00 1970 PST
+ 703 | 303 | 00703_update3 | Sun Jan 04 00:00:00 1970 PST
+ 704 | 4 | 00704 | Mon Jan 05 00:00:00 1970 PST
+ 706 | 6 | 00706 | Wed Jan 07 00:00:00 1970 PST
+ 707 | 407 | 00707_update7 | Thu Jan 08 00:00:00 1970 PST
+ 708 | 8 | 00708 | Fri Jan 09 00:00:00 1970 PST
+ 709 | 509 | 00709_update9 | Sat Jan 10 00:00:00 1970 PST
+ 710 | 0 | 00710 | Sun Jan 11 00:00:00 1970 PST
+ 711 | 1 | 00711 | Mon Jan 12 00:00:00 1970 PST
+ 713 | 303 | 00713_update3 | Wed Jan 14 00:00:00 1970 PST
+ 714 | 4 | 00714 | Thu Jan 15 00:00:00 1970 PST
+ 716 | 6 | 00716 | Sat Jan 17 00:00:00 1970 PST
+ 717 | 407 | 00717_update7 | Sun Jan 18 00:00:00 1970 PST
+ 718 | 8 | 00718 | Mon Jan 19 00:00:00 1970 PST
+ 719 | 509 | 00719_update9 | Tue Jan 20 00:00:00 1970 PST
+ 720 | 0 | 00720 | Wed Jan 21 00:00:00 1970 PST
+ 721 | 1 | 00721 | Thu Jan 22 00:00:00 1970 PST
+ 723 | 303 | 00723_update3 | Sat Jan 24 00:00:00 1970 PST
+ 724 | 4 | 00724 | Sun Jan 25 00:00:00 1970 PST
+ 726 | 6 | 00726 | Tue Jan 27 00:00:00 1970 PST
+ 727 | 407 | 00727_update7 | Wed Jan 28 00:00:00 1970 PST
+ 728 | 8 | 00728 | Thu Jan 29 00:00:00 1970 PST
+ 729 | 509 | 00729_update9 | Fri Jan 30 00:00:00 1970 PST
+ 730 | 0 | 00730 | Sat Jan 31 00:00:00 1970 PST
+ 731 | 1 | 00731 | Sun Feb 01 00:00:00 1970 PST
+ 733 | 303 | 00733_update3 | Tue Feb 03 00:00:00 1970 PST
+ 734 | 4 | 00734 | Wed Feb 04 00:00:00 1970 PST
+ 736 | 6 | 00736 | Fri Feb 06 00:00:00 1970 PST
+ 737 | 407 | 00737_update7 | Sat Feb 07 00:00:00 1970 PST
+ 738 | 8 | 00738 | Sun Feb 08 00:00:00 1970 PST
+ 739 | 509 | 00739_update9 | Mon Feb 09 00:00:00 1970 PST
+ 740 | 0 | 00740 | Tue Feb 10 00:00:00 1970 PST
+ 741 | 1 | 00741 | Wed Feb 11 00:00:00 1970 PST
+ 743 | 303 | 00743_update3 | Fri Feb 13 00:00:00 1970 PST
+ 744 | 4 | 00744 | Sat Feb 14 00:00:00 1970 PST
+ 746 | 6 | 00746 | Mon Feb 16 00:00:00 1970 PST
+ 747 | 407 | 00747_update7 | Tue Feb 17 00:00:00 1970 PST
+ 748 | 8 | 00748 | Wed Feb 18 00:00:00 1970 PST
+ 749 | 509 | 00749_update9 | Thu Feb 19 00:00:00 1970 PST
+ 750 | 0 | 00750 | Fri Feb 20 00:00:00 1970 PST
+ 751 | 1 | 00751 | Sat Feb 21 00:00:00 1970 PST
+ 753 | 303 | 00753_update3 | Mon Feb 23 00:00:00 1970 PST
+ 754 | 4 | 00754 | Tue Feb 24 00:00:00 1970 PST
+ 756 | 6 | 00756 | Thu Feb 26 00:00:00 1970 PST
+ 757 | 407 | 00757_update7 | Fri Feb 27 00:00:00 1970 PST
+ 758 | 8 | 00758 | Sat Feb 28 00:00:00 1970 PST
+ 759 | 509 | 00759_update9 | Sun Mar 01 00:00:00 1970 PST
+ 760 | 0 | 00760 | Mon Mar 02 00:00:00 1970 PST
+ 761 | 1 | 00761 | Tue Mar 03 00:00:00 1970 PST
+ 763 | 303 | 00763_update3 | Thu Mar 05 00:00:00 1970 PST
+ 764 | 4 | 00764 | Fri Mar 06 00:00:00 1970 PST
+ 766 | 6 | 00766 | Sun Mar 08 00:00:00 1970 PST
+ 767 | 407 | 00767_update7 | Mon Mar 09 00:00:00 1970 PST
+ 768 | 8 | 00768 | Tue Mar 10 00:00:00 1970 PST
+ 769 | 509 | 00769_update9 | Wed Mar 11 00:00:00 1970 PST
+ 770 | 0 | 00770 | Thu Mar 12 00:00:00 1970 PST
+ 771 | 1 | 00771 | Fri Mar 13 00:00:00 1970 PST
+ 773 | 303 | 00773_update3 | Sun Mar 15 00:00:00 1970 PST
+ 774 | 4 | 00774 | Mon Mar 16 00:00:00 1970 PST
+ 776 | 6 | 00776 | Wed Mar 18 00:00:00 1970 PST
+ 777 | 407 | 00777_update7 | Thu Mar 19 00:00:00 1970 PST
+ 778 | 8 | 00778 | Fri Mar 20 00:00:00 1970 PST
+ 779 | 509 | 00779_update9 | Sat Mar 21 00:00:00 1970 PST
+ 780 | 0 | 00780 | Sun Mar 22 00:00:00 1970 PST
+ 781 | 1 | 00781 | Mon Mar 23 00:00:00 1970 PST
+ 783 | 303 | 00783_update3 | Wed Mar 25 00:00:00 1970 PST
+ 784 | 4 | 00784 | Thu Mar 26 00:00:00 1970 PST
+ 786 | 6 | 00786 | Sat Mar 28 00:00:00 1970 PST
+ 787 | 407 | 00787_update7 | Sun Mar 29 00:00:00 1970 PST
+ 788 | 8 | 00788 | Mon Mar 30 00:00:00 1970 PST
+ 789 | 509 | 00789_update9 | Tue Mar 31 00:00:00 1970 PST
+ 790 | 0 | 00790 | Wed Apr 01 00:00:00 1970 PST
+ 791 | 1 | 00791 | Thu Apr 02 00:00:00 1970 PST
+ 793 | 303 | 00793_update3 | Sat Apr 04 00:00:00 1970 PST
+ 794 | 4 | 00794 | Sun Apr 05 00:00:00 1970 PST
+ 796 | 6 | 00796 | Tue Apr 07 00:00:00 1970 PST
+ 797 | 407 | 00797_update7 | Wed Apr 08 00:00:00 1970 PST
+ 798 | 8 | 00798 | Thu Apr 09 00:00:00 1970 PST
+ 799 | 509 | 00799_update9 | Fri Apr 10 00:00:00 1970 PST
+ 800 | 0 | 00800 | Thu Jan 01 00:00:00 1970 PST
+ 801 | 1 | 00801 | Fri Jan 02 00:00:00 1970 PST
+ 803 | 303 | 00803_update3 | Sun Jan 04 00:00:00 1970 PST
+ 804 | 4 | 00804 | Mon Jan 05 00:00:00 1970 PST
+ 806 | 6 | 00806 | Wed Jan 07 00:00:00 1970 PST
+ 807 | 407 | 00807_update7 | Thu Jan 08 00:00:00 1970 PST
+ 808 | 8 | 00808 | Fri Jan 09 00:00:00 1970 PST
+ 809 | 509 | 00809_update9 | Sat Jan 10 00:00:00 1970 PST
+ 810 | 0 | 00810 | Sun Jan 11 00:00:00 1970 PST
+ 811 | 1 | 00811 | Mon Jan 12 00:00:00 1970 PST
+ 813 | 303 | 00813_update3 | Wed Jan 14 00:00:00 1970 PST
+ 814 | 4 | 00814 | Thu Jan 15 00:00:00 1970 PST
+ 816 | 6 | 00816 | Sat Jan 17 00:00:00 1970 PST
+ 817 | 407 | 00817_update7 | Sun Jan 18 00:00:00 1970 PST
+ 818 | 8 | 00818 | Mon Jan 19 00:00:00 1970 PST
+ 819 | 509 | 00819_update9 | Tue Jan 20 00:00:00 1970 PST
+ 820 | 0 | 00820 | Wed Jan 21 00:00:00 1970 PST
+ 821 | 1 | 00821 | Thu Jan 22 00:00:00 1970 PST
+ 823 | 303 | 00823_update3 | Sat Jan 24 00:00:00 1970 PST
+ 824 | 4 | 00824 | Sun Jan 25 00:00:00 1970 PST
+ 826 | 6 | 00826 | Tue Jan 27 00:00:00 1970 PST
+ 827 | 407 | 00827_update7 | Wed Jan 28 00:00:00 1970 PST
+ 828 | 8 | 00828 | Thu Jan 29 00:00:00 1970 PST
+ 829 | 509 | 00829_update9 | Fri Jan 30 00:00:00 1970 PST
+ 830 | 0 | 00830 | Sat Jan 31 00:00:00 1970 PST
+ 831 | 1 | 00831 | Sun Feb 01 00:00:00 1970 PST
+ 833 | 303 | 00833_update3 | Tue Feb 03 00:00:00 1970 PST
+ 834 | 4 | 00834 | Wed Feb 04 00:00:00 1970 PST
+ 836 | 6 | 00836 | Fri Feb 06 00:00:00 1970 PST
+ 837 | 407 | 00837_update7 | Sat Feb 07 00:00:00 1970 PST
+ 838 | 8 | 00838 | Sun Feb 08 00:00:00 1970 PST
+ 839 | 509 | 00839_update9 | Mon Feb 09 00:00:00 1970 PST
+ 840 | 0 | 00840 | Tue Feb 10 00:00:00 1970 PST
+ 841 | 1 | 00841 | Wed Feb 11 00:00:00 1970 PST
+ 843 | 303 | 00843_update3 | Fri Feb 13 00:00:00 1970 PST
+ 844 | 4 | 00844 | Sat Feb 14 00:00:00 1970 PST
+ 846 | 6 | 00846 | Mon Feb 16 00:00:00 1970 PST
+ 847 | 407 | 00847_update7 | Tue Feb 17 00:00:00 1970 PST
+ 848 | 8 | 00848 | Wed Feb 18 00:00:00 1970 PST
+ 849 | 509 | 00849_update9 | Thu Feb 19 00:00:00 1970 PST
+ 850 | 0 | 00850 | Fri Feb 20 00:00:00 1970 PST
+ 851 | 1 | 00851 | Sat Feb 21 00:00:00 1970 PST
+ 853 | 303 | 00853_update3 | Mon Feb 23 00:00:00 1970 PST
+ 854 | 4 | 00854 | Tue Feb 24 00:00:00 1970 PST
+ 856 | 6 | 00856 | Thu Feb 26 00:00:00 1970 PST
+ 857 | 407 | 00857_update7 | Fri Feb 27 00:00:00 1970 PST
+ 858 | 8 | 00858 | Sat Feb 28 00:00:00 1970 PST
+ 859 | 509 | 00859_update9 | Sun Mar 01 00:00:00 1970 PST
+ 860 | 0 | 00860 | Mon Mar 02 00:00:00 1970 PST
+ 861 | 1 | 00861 | Tue Mar 03 00:00:00 1970 PST
+ 863 | 303 | 00863_update3 | Thu Mar 05 00:00:00 1970 PST
+ 864 | 4 | 00864 | Fri Mar 06 00:00:00 1970 PST
+ 866 | 6 | 00866 | Sun Mar 08 00:00:00 1970 PST
+ 867 | 407 | 00867_update7 | Mon Mar 09 00:00:00 1970 PST
+ 868 | 8 | 00868 | Tue Mar 10 00:00:00 1970 PST
+ 869 | 509 | 00869_update9 | Wed Mar 11 00:00:00 1970 PST
+ 870 | 0 | 00870 | Thu Mar 12 00:00:00 1970 PST
+ 871 | 1 | 00871 | Fri Mar 13 00:00:00 1970 PST
+ 873 | 303 | 00873_update3 | Sun Mar 15 00:00:00 1970 PST
+ 874 | 4 | 00874 | Mon Mar 16 00:00:00 1970 PST
+ 876 | 6 | 00876 | Wed Mar 18 00:00:00 1970 PST
+ 877 | 407 | 00877_update7 | Thu Mar 19 00:00:00 1970 PST
+ 878 | 8 | 00878 | Fri Mar 20 00:00:00 1970 PST
+ 879 | 509 | 00879_update9 | Sat Mar 21 00:00:00 1970 PST
+ 880 | 0 | 00880 | Sun Mar 22 00:00:00 1970 PST
+ 881 | 1 | 00881 | Mon Mar 23 00:00:00 1970 PST
+ 883 | 303 | 00883_update3 | Wed Mar 25 00:00:00 1970 PST
+ 884 | 4 | 00884 | Thu Mar 26 00:00:00 1970 PST
+ 886 | 6 | 00886 | Sat Mar 28 00:00:00 1970 PST
+ 887 | 407 | 00887_update7 | Sun Mar 29 00:00:00 1970 PST
+ 888 | 8 | 00888 | Mon Mar 30 00:00:00 1970 PST
+ 889 | 509 | 00889_update9 | Tue Mar 31 00:00:00 1970 PST
+ 890 | 0 | 00890 | Wed Apr 01 00:00:00 1970 PST
+ 891 | 1 | 00891 | Thu Apr 02 00:00:00 1970 PST
+ 893 | 303 | 00893_update3 | Sat Apr 04 00:00:00 1970 PST
+ 894 | 4 | 00894 | Sun Apr 05 00:00:00 1970 PST
+ 896 | 6 | 00896 | Tue Apr 07 00:00:00 1970 PST
+ 897 | 407 | 00897_update7 | Wed Apr 08 00:00:00 1970 PST
+ 898 | 8 | 00898 | Thu Apr 09 00:00:00 1970 PST
+ 899 | 509 | 00899_update9 | Fri Apr 10 00:00:00 1970 PST
+ 900 | 0 | 00900 | Thu Jan 01 00:00:00 1970 PST
+ 901 | 1 | 00901 | Fri Jan 02 00:00:00 1970 PST
+ 903 | 303 | 00903_update3 | Sun Jan 04 00:00:00 1970 PST
+ 904 | 4 | 00904 | Mon Jan 05 00:00:00 1970 PST
+ 906 | 6 | 00906 | Wed Jan 07 00:00:00 1970 PST
+ 907 | 407 | 00907_update7 | Thu Jan 08 00:00:00 1970 PST
+ 908 | 8 | 00908 | Fri Jan 09 00:00:00 1970 PST
+ 909 | 509 | 00909_update9 | Sat Jan 10 00:00:00 1970 PST
+ 910 | 0 | 00910 | Sun Jan 11 00:00:00 1970 PST
+ 911 | 1 | 00911 | Mon Jan 12 00:00:00 1970 PST
+ 913 | 303 | 00913_update3 | Wed Jan 14 00:00:00 1970 PST
+ 914 | 4 | 00914 | Thu Jan 15 00:00:00 1970 PST
+ 916 | 6 | 00916 | Sat Jan 17 00:00:00 1970 PST
+ 917 | 407 | 00917_update7 | Sun Jan 18 00:00:00 1970 PST
+ 918 | 8 | 00918 | Mon Jan 19 00:00:00 1970 PST
+ 919 | 509 | 00919_update9 | Tue Jan 20 00:00:00 1970 PST
+ 920 | 0 | 00920 | Wed Jan 21 00:00:00 1970 PST
+ 921 | 1 | 00921 | Thu Jan 22 00:00:00 1970 PST
+ 923 | 303 | 00923_update3 | Sat Jan 24 00:00:00 1970 PST
+ 924 | 4 | 00924 | Sun Jan 25 00:00:00 1970 PST
+ 926 | 6 | 00926 | Tue Jan 27 00:00:00 1970 PST
+ 927 | 407 | 00927_update7 | Wed Jan 28 00:00:00 1970 PST
+ 928 | 8 | 00928 | Thu Jan 29 00:00:00 1970 PST
+ 929 | 509 | 00929_update9 | Fri Jan 30 00:00:00 1970 PST
+ 930 | 0 | 00930 | Sat Jan 31 00:00:00 1970 PST
+ 931 | 1 | 00931 | Sun Feb 01 00:00:00 1970 PST
+ 933 | 303 | 00933_update3 | Tue Feb 03 00:00:00 1970 PST
+ 934 | 4 | 00934 | Wed Feb 04 00:00:00 1970 PST
+ 936 | 6 | 00936 | Fri Feb 06 00:00:00 1970 PST
+ 937 | 407 | 00937_update7 | Sat Feb 07 00:00:00 1970 PST
+ 938 | 8 | 00938 | Sun Feb 08 00:00:00 1970 PST
+ 939 | 509 | 00939_update9 | Mon Feb 09 00:00:00 1970 PST
+ 940 | 0 | 00940 | Tue Feb 10 00:00:00 1970 PST
+ 941 | 1 | 00941 | Wed Feb 11 00:00:00 1970 PST
+ 943 | 303 | 00943_update3 | Fri Feb 13 00:00:00 1970 PST
+ 944 | 4 | 00944 | Sat Feb 14 00:00:00 1970 PST
+ 946 | 6 | 00946 | Mon Feb 16 00:00:00 1970 PST
+ 947 | 407 | 00947_update7 | Tue Feb 17 00:00:00 1970 PST
+ 948 | 8 | 00948 | Wed Feb 18 00:00:00 1970 PST
+ 949 | 509 | 00949_update9 | Thu Feb 19 00:00:00 1970 PST
+ 950 | 0 | 00950 | Fri Feb 20 00:00:00 1970 PST
+ 951 | 1 | 00951 | Sat Feb 21 00:00:00 1970 PST
+ 953 | 303 | 00953_update3 | Mon Feb 23 00:00:00 1970 PST
+ 954 | 4 | 00954 | Tue Feb 24 00:00:00 1970 PST
+ 956 | 6 | 00956 | Thu Feb 26 00:00:00 1970 PST
+ 957 | 407 | 00957_update7 | Fri Feb 27 00:00:00 1970 PST
+ 958 | 8 | 00958 | Sat Feb 28 00:00:00 1970 PST
+ 959 | 509 | 00959_update9 | Sun Mar 01 00:00:00 1970 PST
+ 960 | 0 | 00960 | Mon Mar 02 00:00:00 1970 PST
+ 961 | 1 | 00961 | Tue Mar 03 00:00:00 1970 PST
+ 963 | 303 | 00963_update3 | Thu Mar 05 00:00:00 1970 PST
+ 964 | 4 | 00964 | Fri Mar 06 00:00:00 1970 PST
+ 966 | 6 | 00966 | Sun Mar 08 00:00:00 1970 PST
+ 967 | 407 | 00967_update7 | Mon Mar 09 00:00:00 1970 PST
+ 968 | 8 | 00968 | Tue Mar 10 00:00:00 1970 PST
+ 969 | 509 | 00969_update9 | Wed Mar 11 00:00:00 1970 PST
+ 970 | 0 | 00970 | Thu Mar 12 00:00:00 1970 PST
+ 971 | 1 | 00971 | Fri Mar 13 00:00:00 1970 PST
+ 973 | 303 | 00973_update3 | Sun Mar 15 00:00:00 1970 PST
+ 974 | 4 | 00974 | Mon Mar 16 00:00:00 1970 PST
+ 976 | 6 | 00976 | Wed Mar 18 00:00:00 1970 PST
+ 977 | 407 | 00977_update7 | Thu Mar 19 00:00:00 1970 PST
+ 978 | 8 | 00978 | Fri Mar 20 00:00:00 1970 PST
+ 979 | 509 | 00979_update9 | Sat Mar 21 00:00:00 1970 PST
+ 980 | 0 | 00980 | Sun Mar 22 00:00:00 1970 PST
+ 981 | 1 | 00981 | Mon Mar 23 00:00:00 1970 PST
+ 983 | 303 | 00983_update3 | Wed Mar 25 00:00:00 1970 PST
+ 984 | 4 | 00984 | Thu Mar 26 00:00:00 1970 PST
+ 986 | 6 | 00986 | Sat Mar 28 00:00:00 1970 PST
+ 987 | 407 | 00987_update7 | Sun Mar 29 00:00:00 1970 PST
+ 988 | 8 | 00988 | Mon Mar 30 00:00:00 1970 PST
+ 989 | 509 | 00989_update9 | Tue Mar 31 00:00:00 1970 PST
+ 990 | 0 | 00990 | Wed Apr 01 00:00:00 1970 PST
+ 991 | 1 | 00991 | Thu Apr 02 00:00:00 1970 PST
+ 993 | 303 | 00993_update3 | Sat Apr 04 00:00:00 1970 PST
+ 994 | 4 | 00994 | Sun Apr 05 00:00:00 1970 PST
+ 996 | 6 | 00996 | Tue Apr 07 00:00:00 1970 PST
+ 997 | 407 | 00997_update7 | Wed Apr 08 00:00:00 1970 PST
+ 998 | 8 | 00998 | Thu Apr 09 00:00:00 1970 PST
+ 999 | 509 | 00999_update9 | Fri Apr 10 00:00:00 1970 PST
+ 1000 | 0 | 01000 | Thu Jan 01 00:00:00 1970 PST
+ 1001 | 101 | 0000100001 |
+ 1003 | 403 | 0000300003_update3 |
+ 1004 | 104 | 0000400004 |
+ 1006 | 106 | 0000600006 |
+ 1007 | 507 | 0000700007_update7 |
+ 1008 | 108 | 0000800008 |
+ 1009 | 609 | 0000900009_update9 |
+ 1010 | 100 | 0001000010 |
+ 1011 | 101 | 0001100011 |
+ 1013 | 403 | 0001300013_update3 |
+ 1014 | 104 | 0001400014 |
+ 1016 | 106 | 0001600016 |
+ 1017 | 507 | 0001700017_update7 |
+ 1018 | 108 | 0001800018 |
+ 1019 | 609 | 0001900019_update9 |
+ 1020 | 100 | 0002000020 |
+ 1101 | 201 | aaa |
+ 1103 | 503 | ccc_update3 |
+ 1104 | 204 | ddd |
+(819 rows)
+
+EXPLAIN (verbose, costs off)
+INSERT INTO ft2 (c1,c2,c3) VALUES (1200,999,'foo') RETURNING tableoid::regclass;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Insert on public.ft2
+ Output: (ft2.tableoid)::regclass
+ Remote SQL: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+ Batch Size: 1
+ -> Result
+ Output: 1200, 999, NULL::integer, 'foo'::text, NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying, 'ft2 '::character(10), NULL::user_enum
+(6 rows)
+
+INSERT INTO ft2 (c1,c2,c3) VALUES (1200,999,'foo') RETURNING tableoid::regclass;
+ tableoid
+----------
+ ft2
+(1 row)
+
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c3 = 'bar' WHERE c1 = 1200 RETURNING tableoid::regclass; -- can be pushed down
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Update on public.ft2
+ Output: (tableoid)::regclass
+ -> Foreign Update on public.ft2
+ Remote SQL: UPDATE "S 1"."T 1" SET c3 = 'bar'::text WHERE (("C 1" = 1200))
+(4 rows)
+
+UPDATE ft2 SET c3 = 'bar' WHERE c1 = 1200 RETURNING tableoid::regclass;
+ tableoid
+----------
+ ft2
+(1 row)
+
+EXPLAIN (verbose, costs off)
+DELETE FROM ft2 WHERE c1 = 1200 RETURNING tableoid::regclass; -- can be pushed down
+ QUERY PLAN
+--------------------------------------------------------------------
+ Delete on public.ft2
+ Output: (tableoid)::regclass
+ -> Foreign Delete on public.ft2
+ Remote SQL: DELETE FROM "S 1"."T 1" WHERE (("C 1" = 1200))
+(4 rows)
+
+DELETE FROM ft2 WHERE c1 = 1200 RETURNING tableoid::regclass;
+ tableoid
+----------
+ ft2
+(1 row)
+
+-- Test UPDATE/DELETE with RETURNING on a three-table join
+INSERT INTO ft2 (c1,c2,c3)
+ SELECT id, id - 1200, to_char(id, 'FM00000') FROM generate_series(1201, 1300) id;
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c3 = 'foo'
+ FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1)
+ WHERE ft2.c1 > 1200 AND ft2.c2 = ft4.c1
+ RETURNING ft2, ft2.*, ft4, ft4.*; -- can be pushed down
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Update on public.ft2
+ Output: ft2.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.*, ft4.c1, ft4.c2, ft4.c3
+ -> Foreign Update
+ Remote SQL: UPDATE "S 1"."T 1" r1 SET c3 = 'foo'::text FROM ("S 1"."T 3" r2 INNER JOIN "S 1"."T 4" r3 ON (TRUE)) WHERE ((r2.c1 = r3.c1)) AND ((r1.c2 = r2.c1)) AND ((r1."C 1" > 1200)) RETURNING r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2.c1, r2.c2, r2.c3) END, r2.c1, r2.c2, r2.c3
+(4 rows)
+
+UPDATE ft2 SET c3 = 'foo'
+ FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1)
+ WHERE ft2.c1 > 1200 AND ft2.c2 = ft4.c1
+ RETURNING ft2, ft2.*, ft4, ft4.*;
+ ft2 | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | ft4 | c1 | c2 | c3
+--------------------------------+------+----+-----+----+----+----+------------+----+----------------+----+----+--------
+ (1206,6,foo,,,,"ft2 ",) | 1206 | 6 | foo | | | | ft2 | | (6,7,AAA006) | 6 | 7 | AAA006
+ (1212,12,foo,,,,"ft2 ",) | 1212 | 12 | foo | | | | ft2 | | (12,13,AAA012) | 12 | 13 | AAA012
+ (1218,18,foo,,,,"ft2 ",) | 1218 | 18 | foo | | | | ft2 | | (18,19,AAA018) | 18 | 19 | AAA018
+ (1224,24,foo,,,,"ft2 ",) | 1224 | 24 | foo | | | | ft2 | | (24,25,AAA024) | 24 | 25 | AAA024
+ (1230,30,foo,,,,"ft2 ",) | 1230 | 30 | foo | | | | ft2 | | (30,31,AAA030) | 30 | 31 | AAA030
+ (1236,36,foo,,,,"ft2 ",) | 1236 | 36 | foo | | | | ft2 | | (36,37,AAA036) | 36 | 37 | AAA036
+ (1242,42,foo,,,,"ft2 ",) | 1242 | 42 | foo | | | | ft2 | | (42,43,AAA042) | 42 | 43 | AAA042
+ (1248,48,foo,,,,"ft2 ",) | 1248 | 48 | foo | | | | ft2 | | (48,49,AAA048) | 48 | 49 | AAA048
+ (1254,54,foo,,,,"ft2 ",) | 1254 | 54 | foo | | | | ft2 | | (54,55,AAA054) | 54 | 55 | AAA054
+ (1260,60,foo,,,,"ft2 ",) | 1260 | 60 | foo | | | | ft2 | | (60,61,AAA060) | 60 | 61 | AAA060
+ (1266,66,foo,,,,"ft2 ",) | 1266 | 66 | foo | | | | ft2 | | (66,67,AAA066) | 66 | 67 | AAA066
+ (1272,72,foo,,,,"ft2 ",) | 1272 | 72 | foo | | | | ft2 | | (72,73,AAA072) | 72 | 73 | AAA072
+ (1278,78,foo,,,,"ft2 ",) | 1278 | 78 | foo | | | | ft2 | | (78,79,AAA078) | 78 | 79 | AAA078
+ (1284,84,foo,,,,"ft2 ",) | 1284 | 84 | foo | | | | ft2 | | (84,85,AAA084) | 84 | 85 | AAA084
+ (1290,90,foo,,,,"ft2 ",) | 1290 | 90 | foo | | | | ft2 | | (90,91,AAA090) | 90 | 91 | AAA090
+ (1296,96,foo,,,,"ft2 ",) | 1296 | 96 | foo | | | | ft2 | | (96,97,AAA096) | 96 | 97 | AAA096
+(16 rows)
+
+EXPLAIN (verbose, costs off)
+DELETE FROM ft2
+ USING ft4 LEFT JOIN ft5 ON (ft4.c1 = ft5.c1)
+ WHERE ft2.c1 > 1200 AND ft2.c1 % 10 = 0 AND ft2.c2 = ft4.c1
+ RETURNING 100; -- can be pushed down
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Delete on public.ft2
+ Output: 100
+ -> Foreign Delete
+ Remote SQL: DELETE FROM "S 1"."T 1" r1 USING ("S 1"."T 3" r2 LEFT JOIN "S 1"."T 4" r3 ON (((r2.c1 = r3.c1)))) WHERE ((r1.c2 = r2.c1)) AND ((r1."C 1" > 1200)) AND (((r1."C 1" % 10) = 0))
+(4 rows)
+
+DELETE FROM ft2
+ USING ft4 LEFT JOIN ft5 ON (ft4.c1 = ft5.c1)
+ WHERE ft2.c1 > 1200 AND ft2.c1 % 10 = 0 AND ft2.c2 = ft4.c1
+ RETURNING 100;
+ ?column?
+----------
+ 100
+ 100
+ 100
+ 100
+ 100
+ 100
+ 100
+ 100
+ 100
+ 100
+(10 rows)
+
+DELETE FROM ft2 WHERE ft2.c1 > 1200;
+-- Test UPDATE with a MULTIEXPR sub-select
+-- (maybe someday this'll be remotely executable, but not today)
+EXPLAIN (verbose, costs off)
+UPDATE ft2 AS target SET (c2, c7) = (
+ SELECT c2 * 10, c7
+ FROM ft2 AS src
+ WHERE target.c1 = src.c1
+) WHERE c1 > 1100;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------
+ Update on public.ft2 target
+ Remote SQL: UPDATE "S 1"."T 1" SET c2 = $2, c7 = $3 WHERE ctid = $1
+ -> Foreign Scan on public.ft2 target
+ Output: $1, $2, (SubPlan 1 (returns $1,$2)), target.ctid, target.*
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8, ctid FROM "S 1"."T 1" WHERE (("C 1" > 1100)) FOR UPDATE
+ SubPlan 1 (returns $1,$2)
+ -> Foreign Scan on public.ft2 src
+ Output: (src.c2 * 10), src.c7
+ Remote SQL: SELECT c2, c7 FROM "S 1"."T 1" WHERE (($1::integer = "C 1"))
+(9 rows)
+
+UPDATE ft2 AS target SET (c2, c7) = (
+ SELECT c2 * 10, c7
+ FROM ft2 AS src
+ WHERE target.c1 = src.c1
+) WHERE c1 > 1100;
+UPDATE ft2 AS target SET (c2) = (
+ SELECT c2 / 10
+ FROM ft2 AS src
+ WHERE target.c1 = src.c1
+) WHERE c1 > 1100;
+-- Test UPDATE involving a join that can be pushed down,
+-- but a SET clause that can't be
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE ft2 d SET c2 = CASE WHEN random() >= 0 THEN d.c2 ELSE 0 END
+ FROM ft2 AS t WHERE d.c1 = t.c1 AND d.c1 > 1000;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Update on public.ft2 d
+ Remote SQL: UPDATE "S 1"."T 1" SET c2 = $2 WHERE ctid = $1
+ -> Foreign Scan
+ Output: CASE WHEN (random() >= '0'::double precision) THEN d.c2 ELSE 0 END, d.ctid, d.*, t.*
+ Relations: (public.ft2 d) INNER JOIN (public.ft2 t)
+ Remote SQL: SELECT r1.c2, r1.ctid, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")) AND ((r1."C 1" > 1000)))) FOR UPDATE OF r1
+ -> Hash Join
+ Output: d.c2, d.ctid, d.*, t.*
+ Hash Cond: (d.c1 = t.c1)
+ -> Foreign Scan on public.ft2 d
+ Output: d.c2, d.ctid, d.*, d.c1
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8, ctid FROM "S 1"."T 1" WHERE (("C 1" > 1000)) ORDER BY "C 1" ASC NULLS LAST FOR UPDATE
+ -> Hash
+ Output: t.*, t.c1
+ -> Foreign Scan on public.ft2 t
+ Output: t.*, t.c1
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
+(17 rows)
+
+UPDATE ft2 d SET c2 = CASE WHEN random() >= 0 THEN d.c2 ELSE 0 END
+ FROM ft2 AS t WHERE d.c1 = t.c1 AND d.c1 > 1000;
+-- Test UPDATE/DELETE with WHERE or JOIN/ON conditions containing
+-- user-defined operators/functions
+ALTER SERVER loopback OPTIONS (DROP extensions);
+INSERT INTO ft2 (c1,c2,c3)
+ SELECT id, id % 10, to_char(id, 'FM00000') FROM generate_series(2001, 2010) id;
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c3 = 'bar' WHERE postgres_fdw_abs(c1) > 2000 RETURNING *; -- can't be pushed down
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------
+ Update on public.ft2
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: UPDATE "S 1"."T 1" SET c3 = $2 WHERE ctid = $1 RETURNING "C 1", c2, c3, c4, c5, c6, c7, c8
+ -> Foreign Scan on public.ft2
+ Output: 'bar'::text, ctid, ft2.*
+ Filter: (postgres_fdw_abs(ft2.c1) > 2000)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8, ctid FROM "S 1"."T 1" FOR UPDATE
+(7 rows)
+
+UPDATE ft2 SET c3 = 'bar' WHERE postgres_fdw_abs(c1) > 2000 RETURNING *;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+----+-----+----+----+----+------------+----
+ 2001 | 1 | bar | | | | ft2 |
+ 2002 | 2 | bar | | | | ft2 |
+ 2003 | 3 | bar | | | | ft2 |
+ 2004 | 4 | bar | | | | ft2 |
+ 2005 | 5 | bar | | | | ft2 |
+ 2006 | 6 | bar | | | | ft2 |
+ 2007 | 7 | bar | | | | ft2 |
+ 2008 | 8 | bar | | | | ft2 |
+ 2009 | 9 | bar | | | | ft2 |
+ 2010 | 0 | bar | | | | ft2 |
+(10 rows)
+
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c3 = 'baz'
+ FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1)
+ WHERE ft2.c1 > 2000 AND ft2.c2 === ft4.c1
+ RETURNING ft2.*, ft4.*, ft5.*; -- can't be pushed down
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Update on public.ft2
+ Output: ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3
+ Remote SQL: UPDATE "S 1"."T 1" SET c3 = $2 WHERE ctid = $1 RETURNING "C 1", c2, c3, c4, c5, c6, c7, c8
+ -> Nested Loop
+ Output: 'baz'::text, ft2.ctid, ft2.*, ft4.*, ft5.*, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3
+ Join Filter: (ft2.c2 === ft4.c1)
+ -> Foreign Scan on public.ft2
+ Output: ft2.ctid, ft2.*, ft2.c2
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8, ctid FROM "S 1"."T 1" WHERE (("C 1" > 2000)) FOR UPDATE
+ -> Foreign Scan
+ Output: ft4.*, ft4.c1, ft4.c2, ft4.c3, ft5.*, ft5.c1, ft5.c2, ft5.c3
+ Relations: (public.ft4) INNER JOIN (public.ft5)
+ Remote SQL: SELECT CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2.c1, r2.c2, r2.c3) END, r2.c1, r2.c2, r2.c3, CASE WHEN (r3.*)::text IS NOT NULL THEN ROW(r3.c1, r3.c2, r3.c3) END, r3.c1, r3.c2, r3.c3 FROM ("S 1"."T 3" r2 INNER JOIN "S 1"."T 4" r3 ON (((r2.c1 = r3.c1))))
+ -> Hash Join
+ Output: ft4.*, ft4.c1, ft4.c2, ft4.c3, ft5.*, ft5.c1, ft5.c2, ft5.c3
+ Hash Cond: (ft4.c1 = ft5.c1)
+ -> Foreign Scan on public.ft4
+ Output: ft4.*, ft4.c1, ft4.c2, ft4.c3
+ Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 3"
+ -> Hash
+ Output: ft5.*, ft5.c1, ft5.c2, ft5.c3
+ -> Foreign Scan on public.ft5
+ Output: ft5.*, ft5.c1, ft5.c2, ft5.c3
+ Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 4"
+(24 rows)
+
+UPDATE ft2 SET c3 = 'baz'
+ FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1)
+ WHERE ft2.c1 > 2000 AND ft2.c2 === ft4.c1
+ RETURNING ft2.*, ft4.*, ft5.*;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c1 | c2 | c3
+------+----+-----+----+----+----+------------+----+----+----+--------+----+----+--------
+ 2006 | 6 | baz | | | | ft2 | | 6 | 7 | AAA006 | 6 | 7 | AAA006
+(1 row)
+
+EXPLAIN (verbose, costs off)
+DELETE FROM ft2
+ USING ft4 INNER JOIN ft5 ON (ft4.c1 === ft5.c1)
+ WHERE ft2.c1 > 2000 AND ft2.c2 = ft4.c1
+ RETURNING ft2.c1, ft2.c2, ft2.c3; -- can't be pushed down
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Delete on public.ft2
+ Output: ft2.c1, ft2.c2, ft2.c3
+ Remote SQL: DELETE FROM "S 1"."T 1" WHERE ctid = $1 RETURNING "C 1", c2, c3
+ -> Foreign Scan
+ Output: ft2.ctid, ft4.*, ft5.*
+ Filter: (ft4.c1 === ft5.c1)
+ Relations: ((public.ft2) INNER JOIN (public.ft4)) INNER JOIN (public.ft5)
+ Remote SQL: SELECT r1.ctid, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2.c1, r2.c2, r2.c3) END, CASE WHEN (r3.*)::text IS NOT NULL THEN ROW(r3.c1, r3.c2, r3.c3) END, r2.c1, r3.c1 FROM (("S 1"."T 1" r1 INNER JOIN "S 1"."T 3" r2 ON (((r1.c2 = r2.c1)) AND ((r1."C 1" > 2000)))) INNER JOIN "S 1"."T 4" r3 ON (TRUE)) FOR UPDATE OF r1
+ -> Nested Loop
+ Output: ft2.ctid, ft4.*, ft5.*, ft4.c1, ft5.c1
+ -> Nested Loop
+ Output: ft2.ctid, ft4.*, ft4.c1
+ Join Filter: (ft2.c2 = ft4.c1)
+ -> Foreign Scan on public.ft2
+ Output: ft2.ctid, ft2.c2
+ Remote SQL: SELECT c2, ctid FROM "S 1"."T 1" WHERE (("C 1" > 2000)) FOR UPDATE
+ -> Foreign Scan on public.ft4
+ Output: ft4.*, ft4.c1
+ Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 3"
+ -> Foreign Scan on public.ft5
+ Output: ft5.*, ft5.c1
+ Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 4"
+(22 rows)
+
+DELETE FROM ft2
+ USING ft4 INNER JOIN ft5 ON (ft4.c1 === ft5.c1)
+ WHERE ft2.c1 > 2000 AND ft2.c2 = ft4.c1
+ RETURNING ft2.c1, ft2.c2, ft2.c3;
+ c1 | c2 | c3
+------+----+-----
+ 2006 | 6 | baz
+(1 row)
+
+DELETE FROM ft2 WHERE ft2.c1 > 2000;
+ALTER SERVER loopback OPTIONS (ADD extensions 'postgres_fdw');
+-- Test that trigger on remote table works as expected
+CREATE OR REPLACE FUNCTION "S 1".F_BRTRIG() RETURNS trigger AS $$
+BEGIN
+ NEW.c3 = NEW.c3 || '_trig_update';
+ RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
+CREATE TRIGGER t1_br_insert BEFORE INSERT OR UPDATE
+ ON "S 1"."T 1" FOR EACH ROW EXECUTE PROCEDURE "S 1".F_BRTRIG();
+INSERT INTO ft2 (c1,c2,c3) VALUES (1208, 818, 'fff') RETURNING *;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+-----+-----------------+----+----+----+------------+----
+ 1208 | 818 | fff_trig_update | | | | ft2 |
+(1 row)
+
+INSERT INTO ft2 (c1,c2,c3,c6) VALUES (1218, 818, 'ggg', '(--;') RETURNING *;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+-----+-----------------+----+----+------+------------+----
+ 1218 | 818 | ggg_trig_update | | | (--; | ft2 |
+(1 row)
+
+UPDATE ft2 SET c2 = c2 + 600 WHERE c1 % 10 = 8 AND c1 < 1200 RETURNING *;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+-----+------------------------+------------------------------+--------------------------+----+------------+-----
+ 8 | 608 | 00008_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 18 | 608 | 00018_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 28 | 608 | 00028_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 38 | 608 | 00038_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 48 | 608 | 00048_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 58 | 608 | 00058_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 68 | 608 | 00068_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 78 | 608 | 00078_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 88 | 608 | 00088_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 98 | 608 | 00098_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 108 | 608 | 00108_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 118 | 608 | 00118_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 128 | 608 | 00128_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 138 | 608 | 00138_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 148 | 608 | 00148_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 158 | 608 | 00158_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 168 | 608 | 00168_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 178 | 608 | 00178_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 188 | 608 | 00188_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 198 | 608 | 00198_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 208 | 608 | 00208_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 218 | 608 | 00218_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 228 | 608 | 00228_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 238 | 608 | 00238_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 248 | 608 | 00248_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 258 | 608 | 00258_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 268 | 608 | 00268_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 278 | 608 | 00278_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 288 | 608 | 00288_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 298 | 608 | 00298_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 308 | 608 | 00308_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 318 | 608 | 00318_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 328 | 608 | 00328_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 338 | 608 | 00338_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 348 | 608 | 00348_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 358 | 608 | 00358_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 368 | 608 | 00368_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 378 | 608 | 00378_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 388 | 608 | 00388_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 398 | 608 | 00398_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 408 | 608 | 00408_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 418 | 608 | 00418_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 428 | 608 | 00428_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 438 | 608 | 00438_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 448 | 608 | 00448_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 458 | 608 | 00458_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 468 | 608 | 00468_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 478 | 608 | 00478_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 488 | 608 | 00488_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 498 | 608 | 00498_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 508 | 608 | 00508_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 518 | 608 | 00518_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 528 | 608 | 00528_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 538 | 608 | 00538_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 548 | 608 | 00548_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 558 | 608 | 00558_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 568 | 608 | 00568_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 578 | 608 | 00578_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 588 | 608 | 00588_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 598 | 608 | 00598_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 608 | 608 | 00608_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 618 | 608 | 00618_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 628 | 608 | 00628_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 638 | 608 | 00638_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 648 | 608 | 00648_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 658 | 608 | 00658_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 668 | 608 | 00668_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 678 | 608 | 00678_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 688 | 608 | 00688_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 698 | 608 | 00698_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 708 | 608 | 00708_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 718 | 608 | 00718_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 728 | 608 | 00728_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 738 | 608 | 00738_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 748 | 608 | 00748_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 758 | 608 | 00758_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 768 | 608 | 00768_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 778 | 608 | 00778_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 788 | 608 | 00788_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 798 | 608 | 00798_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 808 | 608 | 00808_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 818 | 608 | 00818_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 828 | 608 | 00828_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 838 | 608 | 00838_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 848 | 608 | 00848_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 858 | 608 | 00858_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 868 | 608 | 00868_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 878 | 608 | 00878_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 888 | 608 | 00888_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 898 | 608 | 00898_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 908 | 608 | 00908_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 918 | 608 | 00918_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 928 | 608 | 00928_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 938 | 608 | 00938_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 948 | 608 | 00948_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 958 | 608 | 00958_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 968 | 608 | 00968_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 978 | 608 | 00978_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 988 | 608 | 00988_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 998 | 608 | 00998_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 1008 | 708 | 0000800008_trig_update | | | | ft2 |
+ 1018 | 708 | 0001800018_trig_update | | | | ft2 |
+(102 rows)
+
+-- Test errors thrown on remote side during update
+ALTER TABLE "S 1"."T 1" ADD CONSTRAINT c2positive CHECK (c2 >= 0);
+INSERT INTO ft1(c1, c2) VALUES(11, 12); -- duplicate key
+ERROR: duplicate key value violates unique constraint "t1_pkey"
+DETAIL: Key ("C 1")=(11) already exists.
+CONTEXT: remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT DO NOTHING; -- works
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT (c1, c2) DO NOTHING; -- unsupported
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT (c1, c2) DO UPDATE SET c3 = 'ffg'; -- unsupported
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive
+ERROR: new row for relation "T 1" violates check constraint "c2positive"
+DETAIL: Failing row contains (1111, -2, null, null, null, null, ft1 , null).
+CONTEXT: remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive
+ERROR: new row for relation "T 1" violates check constraint "c2positive"
+DETAIL: Failing row contains (1, -1, 00001_trig_update, 1970-01-02 08:00:00+00, 1970-01-02 00:00:00, 1, 1 , foo).
+CONTEXT: remote SQL command: UPDATE "S 1"."T 1" SET c2 = (- c2) WHERE (("C 1" = 1))
+-- Test savepoint/rollback behavior
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 0 | 100
+ 1 | 100
+ 4 | 100
+ 6 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 0 | 100
+ 1 | 100
+ 4 | 100
+ 6 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+begin;
+update ft2 set c2 = 42 where c2 = 0;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 4 | 100
+ 6 | 100
+ 42 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+savepoint s1;
+update ft2 set c2 = 44 where c2 = 4;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+release savepoint s1;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+savepoint s2;
+update ft2 set c2 = 46 where c2 = 6;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 42 | 100
+ 44 | 100
+ 46 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+rollback to savepoint s2;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+release savepoint s2;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+savepoint s3;
+update ft2 set c2 = -2 where c2 = 42 and c1 = 10; -- fail on remote side
+ERROR: new row for relation "T 1" violates check constraint "c2positive"
+DETAIL: Failing row contains (10, -2, 00010_trig_update_trig_update, 1970-01-11 08:00:00+00, 1970-01-11 00:00:00, 0, 0 , foo).
+CONTEXT: remote SQL command: UPDATE "S 1"."T 1" SET c2 = (-2) WHERE ((c2 = 42)) AND (("C 1" = 10))
+rollback to savepoint s3;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+release savepoint s3;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+-- none of the above is committed yet remotely
+select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 0 | 100
+ 1 | 100
+ 4 | 100
+ 6 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+commit;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+VACUUM ANALYZE "S 1"."T 1";
+-- Above DMLs add data with c6 as NULL in ft1, so test ORDER BY NULLS LAST and NULLs
+-- FIRST behavior here.
+-- ORDER BY DESC NULLS LAST options
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 ORDER BY c6 DESC NULLS LAST, c1 OFFSET 795 LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY c6 DESC NULLS LAST, "C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 795::bigint
+(3 rows)
+
+SELECT * FROM ft1 ORDER BY c6 DESC NULLS LAST, c1 OFFSET 795 LIMIT 10;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+-----+--------------------+------------------------------+--------------------------+------+------------+-----
+ 960 | 42 | 00960_trig_update | Mon Mar 02 00:00:00 1970 PST | Mon Mar 02 00:00:00 1970 | 0 | 0 | foo
+ 970 | 42 | 00970_trig_update | Thu Mar 12 00:00:00 1970 PST | Thu Mar 12 00:00:00 1970 | 0 | 0 | foo
+ 980 | 42 | 00980_trig_update | Sun Mar 22 00:00:00 1970 PST | Sun Mar 22 00:00:00 1970 | 0 | 0 | foo
+ 990 | 42 | 00990_trig_update | Wed Apr 01 00:00:00 1970 PST | Wed Apr 01 00:00:00 1970 | 0 | 0 | foo
+ 1000 | 42 | 01000_trig_update | Thu Jan 01 00:00:00 1970 PST | Thu Jan 01 00:00:00 1970 | 0 | 0 | foo
+ 1218 | 818 | ggg_trig_update | | | (--; | ft2 |
+ 1001 | 101 | 0000100001 | | | | ft2 |
+ 1003 | 403 | 0000300003_update3 | | | | ft2 |
+ 1004 | 104 | 0000400004 | | | | ft2 |
+ 1006 | 106 | 0000600006 | | | | ft2 |
+(10 rows)
+
+-- ORDER BY DESC NULLS FIRST options
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 ORDER BY c6 DESC NULLS FIRST, c1 OFFSET 15 LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY c6 DESC NULLS FIRST, "C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 15::bigint
+(3 rows)
+
+SELECT * FROM ft1 ORDER BY c6 DESC NULLS FIRST, c1 OFFSET 15 LIMIT 10;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+-----+-----------------+------------------------------+--------------------------+----+------------+-----
+ 1020 | 100 | 0002000020 | | | | ft2 |
+ 1101 | 201 | aaa | | | | ft2 |
+ 1103 | 503 | ccc_update3 | | | | ft2 |
+ 1104 | 204 | ddd | | | | ft2 |
+ 1208 | 818 | fff_trig_update | | | | ft2 |
+ 9 | 509 | 00009_update9 | Sat Jan 10 00:00:00 1970 PST | Sat Jan 10 00:00:00 1970 | 9 | ft2 | foo
+ 19 | 509 | 00019_update9 | Tue Jan 20 00:00:00 1970 PST | Tue Jan 20 00:00:00 1970 | 9 | ft2 | foo
+ 29 | 509 | 00029_update9 | Fri Jan 30 00:00:00 1970 PST | Fri Jan 30 00:00:00 1970 | 9 | ft2 | foo
+ 39 | 509 | 00039_update9 | Mon Feb 09 00:00:00 1970 PST | Mon Feb 09 00:00:00 1970 | 9 | ft2 | foo
+ 49 | 509 | 00049_update9 | Thu Feb 19 00:00:00 1970 PST | Thu Feb 19 00:00:00 1970 | 9 | ft2 | foo
+(10 rows)
+
+-- ORDER BY ASC NULLS FIRST options
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 ORDER BY c6 ASC NULLS FIRST, c1 OFFSET 15 LIMIT 10;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" ORDER BY c6 ASC NULLS FIRST, "C 1" ASC NULLS LAST LIMIT 10::bigint OFFSET 15::bigint
+(3 rows)
+
+SELECT * FROM ft1 ORDER BY c6 ASC NULLS FIRST, c1 OFFSET 15 LIMIT 10;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+-----+-------------------+------------------------------+--------------------------+------+------------+-----
+ 1020 | 100 | 0002000020 | | | | ft2 |
+ 1101 | 201 | aaa | | | | ft2 |
+ 1103 | 503 | ccc_update3 | | | | ft2 |
+ 1104 | 204 | ddd | | | | ft2 |
+ 1208 | 818 | fff_trig_update | | | | ft2 |
+ 1218 | 818 | ggg_trig_update | | | (--; | ft2 |
+ 10 | 42 | 00010_trig_update | Sun Jan 11 00:00:00 1970 PST | Sun Jan 11 00:00:00 1970 | 0 | 0 | foo
+ 20 | 42 | 00020_trig_update | Wed Jan 21 00:00:00 1970 PST | Wed Jan 21 00:00:00 1970 | 0 | 0 | foo
+ 30 | 42 | 00030_trig_update | Sat Jan 31 00:00:00 1970 PST | Sat Jan 31 00:00:00 1970 | 0 | 0 | foo
+ 40 | 42 | 00040_trig_update | Tue Feb 10 00:00:00 1970 PST | Tue Feb 10 00:00:00 1970 | 0 | 0 | foo
+(10 rows)
+
+-- ===================================================================
+-- test check constraints
+-- ===================================================================
+-- Consistent check constraints provide consistent results
+ALTER FOREIGN TABLE ft1 ADD CONSTRAINT ft1_c2positive CHECK (c2 >= 0);
+EXPLAIN (VERBOSE, COSTS OFF) SELECT count(*) FROM ft1 WHERE c2 < 0;
+ QUERY PLAN
+-----------------------------------------------------------------
+ Foreign Scan
+ Output: (count(*))
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT count(*) FROM "S 1"."T 1" WHERE ((c2 < 0))
+(4 rows)
+
+SELECT count(*) FROM ft1 WHERE c2 < 0;
+ count
+-------
+ 0
+(1 row)
+
+SET constraint_exclusion = 'on';
+EXPLAIN (VERBOSE, COSTS OFF) SELECT count(*) FROM ft1 WHERE c2 < 0;
+ QUERY PLAN
+--------------------------------
+ Aggregate
+ Output: count(*)
+ -> Result
+ One-Time Filter: false
+(4 rows)
+
+SELECT count(*) FROM ft1 WHERE c2 < 0;
+ count
+-------
+ 0
+(1 row)
+
+RESET constraint_exclusion;
+-- check constraint is enforced on the remote side, not locally
+INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive
+ERROR: new row for relation "T 1" violates check constraint "c2positive"
+DETAIL: Failing row contains (1111, -2, null, null, null, null, ft1 , null).
+CONTEXT: remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive
+ERROR: new row for relation "T 1" violates check constraint "c2positive"
+DETAIL: Failing row contains (1, -1, 00001_trig_update, 1970-01-02 08:00:00+00, 1970-01-02 00:00:00, 1, 1 , foo).
+CONTEXT: remote SQL command: UPDATE "S 1"."T 1" SET c2 = (- c2) WHERE (("C 1" = 1))
+ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c2positive;
+-- But inconsistent check constraints provide inconsistent results
+ALTER FOREIGN TABLE ft1 ADD CONSTRAINT ft1_c2negative CHECK (c2 < 0);
+EXPLAIN (VERBOSE, COSTS OFF) SELECT count(*) FROM ft1 WHERE c2 >= 0;
+ QUERY PLAN
+------------------------------------------------------------------
+ Foreign Scan
+ Output: (count(*))
+ Relations: Aggregate on (public.ft1)
+ Remote SQL: SELECT count(*) FROM "S 1"."T 1" WHERE ((c2 >= 0))
+(4 rows)
+
+SELECT count(*) FROM ft1 WHERE c2 >= 0;
+ count
+-------
+ 821
+(1 row)
+
+SET constraint_exclusion = 'on';
+EXPLAIN (VERBOSE, COSTS OFF) SELECT count(*) FROM ft1 WHERE c2 >= 0;
+ QUERY PLAN
+--------------------------------
+ Aggregate
+ Output: count(*)
+ -> Result
+ One-Time Filter: false
+(4 rows)
+
+SELECT count(*) FROM ft1 WHERE c2 >= 0;
+ count
+-------
+ 0
+(1 row)
+
+RESET constraint_exclusion;
+-- local check constraint is not actually enforced
+INSERT INTO ft1(c1, c2) VALUES(1111, 2);
+UPDATE ft1 SET c2 = c2 + 1 WHERE c1 = 1;
+ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c2negative;
+-- ===================================================================
+-- test WITH CHECK OPTION constraints
+-- ===================================================================
+CREATE FUNCTION row_before_insupd_trigfunc() RETURNS trigger AS $$BEGIN NEW.a := NEW.a + 10; RETURN NEW; END$$ LANGUAGE plpgsql;
+CREATE TABLE base_tbl (a int, b int);
+ALTER TABLE base_tbl SET (autovacuum_enabled = 'false');
+CREATE TRIGGER row_before_insupd_trigger BEFORE INSERT OR UPDATE ON base_tbl FOR EACH ROW EXECUTE PROCEDURE row_before_insupd_trigfunc();
+CREATE FOREIGN TABLE foreign_tbl (a int, b int)
+ SERVER loopback OPTIONS (table_name 'base_tbl');
+CREATE VIEW rw_view AS SELECT * FROM foreign_tbl
+ WHERE a < b WITH CHECK OPTION;
+\d+ rw_view
+ View "public.rw_view"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+--------+---------+-----------+----------+---------+---------+-------------
+ a | integer | | | | plain |
+ b | integer | | | | plain |
+View definition:
+ SELECT foreign_tbl.a,
+ foreign_tbl.b
+ FROM foreign_tbl
+ WHERE foreign_tbl.a < foreign_tbl.b;
+Options: check_option=cascaded
+
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 5);
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Insert on public.foreign_tbl
+ Remote SQL: INSERT INTO public.base_tbl(a, b) VALUES ($1, $2) RETURNING a, b
+ Batch Size: 1
+ -> Result
+ Output: 0, 5
+(5 rows)
+
+INSERT INTO rw_view VALUES (0, 5); -- should fail
+ERROR: new row violates check option for view "rw_view"
+DETAIL: Failing row contains (10, 5).
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 15);
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Insert on public.foreign_tbl
+ Remote SQL: INSERT INTO public.base_tbl(a, b) VALUES ($1, $2) RETURNING a, b
+ Batch Size: 1
+ -> Result
+ Output: 0, 15
+(5 rows)
+
+INSERT INTO rw_view VALUES (0, 15); -- ok
+SELECT * FROM foreign_tbl;
+ a | b
+----+----
+ 10 | 15
+(1 row)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 5;
+ QUERY PLAN
+---------------------------------------------------------------------------------------
+ Update on public.foreign_tbl
+ Remote SQL: UPDATE public.base_tbl SET b = $2 WHERE ctid = $1 RETURNING a, b
+ -> Foreign Scan on public.foreign_tbl
+ Output: (foreign_tbl.b + 5), foreign_tbl.ctid, foreign_tbl.*
+ Remote SQL: SELECT a, b, ctid FROM public.base_tbl WHERE ((a < b)) FOR UPDATE
+(5 rows)
+
+UPDATE rw_view SET b = b + 5; -- should fail
+ERROR: new row violates check option for view "rw_view"
+DETAIL: Failing row contains (20, 20).
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 15;
+ QUERY PLAN
+---------------------------------------------------------------------------------------
+ Update on public.foreign_tbl
+ Remote SQL: UPDATE public.base_tbl SET b = $2 WHERE ctid = $1 RETURNING a, b
+ -> Foreign Scan on public.foreign_tbl
+ Output: (foreign_tbl.b + 15), foreign_tbl.ctid, foreign_tbl.*
+ Remote SQL: SELECT a, b, ctid FROM public.base_tbl WHERE ((a < b)) FOR UPDATE
+(5 rows)
+
+UPDATE rw_view SET b = b + 15; -- ok
+SELECT * FROM foreign_tbl;
+ a | b
+----+----
+ 20 | 30
+(1 row)
+
+-- We don't allow batch insert when there are any WCO constraints
+ALTER SERVER loopback OPTIONS (ADD batch_size '10');
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 15), (0, 5);
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Insert on public.foreign_tbl
+ Remote SQL: INSERT INTO public.base_tbl(a, b) VALUES ($1, $2) RETURNING a, b
+ Batch Size: 1
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".column1, "*VALUES*".column2
+(5 rows)
+
+INSERT INTO rw_view VALUES (0, 15), (0, 5); -- should fail
+ERROR: new row violates check option for view "rw_view"
+DETAIL: Failing row contains (10, 5).
+SELECT * FROM foreign_tbl;
+ a | b
+----+----
+ 20 | 30
+(1 row)
+
+ALTER SERVER loopback OPTIONS (DROP batch_size);
+DROP FOREIGN TABLE foreign_tbl CASCADE;
+NOTICE: drop cascades to view rw_view
+DROP TRIGGER row_before_insupd_trigger ON base_tbl;
+DROP TABLE base_tbl;
+-- test WCO for partitions
+CREATE TABLE child_tbl (a int, b int);
+ALTER TABLE child_tbl SET (autovacuum_enabled = 'false');
+CREATE TRIGGER row_before_insupd_trigger BEFORE INSERT OR UPDATE ON child_tbl FOR EACH ROW EXECUTE PROCEDURE row_before_insupd_trigfunc();
+CREATE FOREIGN TABLE foreign_tbl (a int, b int)
+ SERVER loopback OPTIONS (table_name 'child_tbl');
+CREATE TABLE parent_tbl (a int, b int) PARTITION BY RANGE(a);
+ALTER TABLE parent_tbl ATTACH PARTITION foreign_tbl FOR VALUES FROM (0) TO (100);
+-- Detach and re-attach once, to stress the concurrent detach case.
+ALTER TABLE parent_tbl DETACH PARTITION foreign_tbl CONCURRENTLY;
+ALTER TABLE parent_tbl ATTACH PARTITION foreign_tbl FOR VALUES FROM (0) TO (100);
+CREATE VIEW rw_view AS SELECT * FROM parent_tbl
+ WHERE a < b WITH CHECK OPTION;
+\d+ rw_view
+ View "public.rw_view"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+--------+---------+-----------+----------+---------+---------+-------------
+ a | integer | | | | plain |
+ b | integer | | | | plain |
+View definition:
+ SELECT parent_tbl.a,
+ parent_tbl.b
+ FROM parent_tbl
+ WHERE parent_tbl.a < parent_tbl.b;
+Options: check_option=cascaded
+
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 5);
+ QUERY PLAN
+-----------------------------
+ Insert on public.parent_tbl
+ -> Result
+ Output: 0, 5
+(3 rows)
+
+INSERT INTO rw_view VALUES (0, 5); -- should fail
+ERROR: new row violates check option for view "rw_view"
+DETAIL: Failing row contains (10, 5).
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 15);
+ QUERY PLAN
+-----------------------------
+ Insert on public.parent_tbl
+ -> Result
+ Output: 0, 15
+(3 rows)
+
+INSERT INTO rw_view VALUES (0, 15); -- ok
+SELECT * FROM foreign_tbl;
+ a | b
+----+----
+ 10 | 15
+(1 row)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 5;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------
+ Update on public.parent_tbl
+ Foreign Update on public.foreign_tbl parent_tbl_1
+ Remote SQL: UPDATE public.child_tbl SET b = $2 WHERE ctid = $1 RETURNING a, b
+ -> Foreign Scan on public.foreign_tbl parent_tbl_1
+ Output: (parent_tbl_1.b + 5), parent_tbl_1.tableoid, parent_tbl_1.ctid, parent_tbl_1.*
+ Remote SQL: SELECT a, b, ctid FROM public.child_tbl WHERE ((a < b)) FOR UPDATE
+(6 rows)
+
+UPDATE rw_view SET b = b + 5; -- should fail
+ERROR: new row violates check option for view "rw_view"
+DETAIL: Failing row contains (20, 20).
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 15;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------
+ Update on public.parent_tbl
+ Foreign Update on public.foreign_tbl parent_tbl_1
+ Remote SQL: UPDATE public.child_tbl SET b = $2 WHERE ctid = $1 RETURNING a, b
+ -> Foreign Scan on public.foreign_tbl parent_tbl_1
+ Output: (parent_tbl_1.b + 15), parent_tbl_1.tableoid, parent_tbl_1.ctid, parent_tbl_1.*
+ Remote SQL: SELECT a, b, ctid FROM public.child_tbl WHERE ((a < b)) FOR UPDATE
+(6 rows)
+
+UPDATE rw_view SET b = b + 15; -- ok
+SELECT * FROM foreign_tbl;
+ a | b
+----+----
+ 20 | 30
+(1 row)
+
+-- We don't allow batch insert when there are any WCO constraints
+ALTER SERVER loopback OPTIONS (ADD batch_size '10');
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 15), (0, 5);
+ QUERY PLAN
+--------------------------------------------------------
+ Insert on public.parent_tbl
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".column1, "*VALUES*".column2
+(3 rows)
+
+INSERT INTO rw_view VALUES (0, 15), (0, 5); -- should fail
+ERROR: new row violates check option for view "rw_view"
+DETAIL: Failing row contains (10, 5).
+SELECT * FROM foreign_tbl;
+ a | b
+----+----
+ 20 | 30
+(1 row)
+
+ALTER SERVER loopback OPTIONS (DROP batch_size);
+DROP FOREIGN TABLE foreign_tbl CASCADE;
+DROP TRIGGER row_before_insupd_trigger ON child_tbl;
+DROP TABLE parent_tbl CASCADE;
+NOTICE: drop cascades to view rw_view
+DROP FUNCTION row_before_insupd_trigfunc;
+-- ===================================================================
+-- test serial columns (ie, sequence-based defaults)
+-- ===================================================================
+create table loc1 (f1 serial, f2 text);
+alter table loc1 set (autovacuum_enabled = 'false');
+create foreign table rem1 (f1 serial, f2 text)
+ server loopback options(table_name 'loc1');
+select pg_catalog.setval('rem1_f1_seq', 10, false);
+ setval
+--------
+ 10
+(1 row)
+
+insert into loc1(f2) values('hi');
+insert into rem1(f2) values('hi remote');
+insert into loc1(f2) values('bye');
+insert into rem1(f2) values('bye remote');
+select * from loc1;
+ f1 | f2
+----+------------
+ 1 | hi
+ 10 | hi remote
+ 2 | bye
+ 11 | bye remote
+(4 rows)
+
+select * from rem1;
+ f1 | f2
+----+------------
+ 1 | hi
+ 10 | hi remote
+ 2 | bye
+ 11 | bye remote
+(4 rows)
+
+-- ===================================================================
+-- test generated columns
+-- ===================================================================
+create table gloc1 (
+ a int,
+ b int generated always as (a * 2) stored);
+alter table gloc1 set (autovacuum_enabled = 'false');
+create foreign table grem1 (
+ a int,
+ b int generated always as (a * 2) stored)
+ server loopback options(table_name 'gloc1');
+explain (verbose, costs off)
+insert into grem1 (a) values (1), (2);
+ QUERY PLAN
+-------------------------------------------------------------------
+ Insert on public.grem1
+ Remote SQL: INSERT INTO public.gloc1(a, b) VALUES ($1, DEFAULT)
+ Batch Size: 1
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".column1, NULL::integer
+(5 rows)
+
+insert into grem1 (a) values (1), (2);
+explain (verbose, costs off)
+update grem1 set a = 22 where a = 2;
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Update on public.grem1
+ Remote SQL: UPDATE public.gloc1 SET a = $2, b = DEFAULT WHERE ctid = $1
+ -> Foreign Scan on public.grem1
+ Output: 22, ctid, grem1.*
+ Remote SQL: SELECT a, b, ctid FROM public.gloc1 WHERE ((a = 2)) FOR UPDATE
+(5 rows)
+
+update grem1 set a = 22 where a = 2;
+select * from gloc1;
+ a | b
+----+----
+ 1 | 2
+ 22 | 44
+(2 rows)
+
+select * from grem1;
+ a | b
+----+----
+ 1 | 2
+ 22 | 44
+(2 rows)
+
+delete from grem1;
+-- test copy from
+copy grem1 from stdin;
+select * from gloc1;
+ a | b
+---+---
+ 1 | 2
+ 2 | 4
+(2 rows)
+
+select * from grem1;
+ a | b
+---+---
+ 1 | 2
+ 2 | 4
+(2 rows)
+
+delete from grem1;
+-- test batch insert
+alter server loopback options (add batch_size '10');
+explain (verbose, costs off)
+insert into grem1 (a) values (1), (2);
+ QUERY PLAN
+-------------------------------------------------------------------
+ Insert on public.grem1
+ Remote SQL: INSERT INTO public.gloc1(a, b) VALUES ($1, DEFAULT)
+ Batch Size: 10
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".column1, NULL::integer
+(5 rows)
+
+insert into grem1 (a) values (1), (2);
+select * from gloc1;
+ a | b
+---+---
+ 1 | 2
+ 2 | 4
+(2 rows)
+
+select * from grem1;
+ a | b
+---+---
+ 1 | 2
+ 2 | 4
+(2 rows)
+
+delete from grem1;
+alter server loopback options (drop batch_size);
+-- ===================================================================
+-- test local triggers
+-- ===================================================================
+-- Trigger functions "borrowed" from triggers regress test.
+CREATE FUNCTION trigger_func() RETURNS trigger LANGUAGE plpgsql AS $$
+BEGIN
+ RAISE NOTICE 'trigger_func(%) called: action = %, when = %, level = %',
+ TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL;
+ RETURN NULL;
+END;$$;
+CREATE TRIGGER trig_stmt_before BEFORE DELETE OR INSERT OR UPDATE ON rem1
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+CREATE TRIGGER trig_stmt_after AFTER DELETE OR INSERT OR UPDATE ON rem1
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger
+LANGUAGE plpgsql AS $$
+
+declare
+ oldnew text[];
+ relid text;
+ argstr text;
+begin
+
+ relid := TG_relid::regclass;
+ argstr := '';
+ for i in 0 .. TG_nargs - 1 loop
+ if i > 0 then
+ argstr := argstr || ', ';
+ end if;
+ argstr := argstr || TG_argv[i];
+ end loop;
+
+ RAISE NOTICE '%(%) % % % ON %',
+ tg_name, argstr, TG_when, TG_level, TG_OP, relid;
+ oldnew := '{}'::text[];
+ if TG_OP != 'INSERT' then
+ oldnew := array_append(oldnew, format('OLD: %s', OLD));
+ end if;
+
+ if TG_OP != 'DELETE' then
+ oldnew := array_append(oldnew, format('NEW: %s', NEW));
+ end if;
+
+ RAISE NOTICE '%', array_to_string(oldnew, ',');
+
+ if TG_OP = 'DELETE' then
+ return OLD;
+ else
+ return NEW;
+ end if;
+end;
+$$;
+-- Test basic functionality
+CREATE TRIGGER trig_row_before
+BEFORE INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+CREATE TRIGGER trig_row_after
+AFTER INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+delete from rem1;
+NOTICE: trigger_func(<NULL>) called: action = DELETE, when = BEFORE, level = STATEMENT
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW DELETE ON rem1
+NOTICE: OLD: (1,hi)
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW DELETE ON rem1
+NOTICE: OLD: (10,"hi remote")
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW DELETE ON rem1
+NOTICE: OLD: (2,bye)
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW DELETE ON rem1
+NOTICE: OLD: (11,"bye remote")
+NOTICE: trig_row_after(23, skidoo) AFTER ROW DELETE ON rem1
+NOTICE: OLD: (1,hi)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW DELETE ON rem1
+NOTICE: OLD: (10,"hi remote")
+NOTICE: trig_row_after(23, skidoo) AFTER ROW DELETE ON rem1
+NOTICE: OLD: (2,bye)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW DELETE ON rem1
+NOTICE: OLD: (11,"bye remote")
+NOTICE: trigger_func(<NULL>) called: action = DELETE, when = AFTER, level = STATEMENT
+insert into rem1 values(1,'insert');
+NOTICE: trigger_func(<NULL>) called: action = INSERT, when = BEFORE, level = STATEMENT
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW INSERT ON rem1
+NOTICE: NEW: (1,insert)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW INSERT ON rem1
+NOTICE: NEW: (1,insert)
+NOTICE: trigger_func(<NULL>) called: action = INSERT, when = AFTER, level = STATEMENT
+update rem1 set f2 = 'update' where f1 = 1;
+NOTICE: trigger_func(<NULL>) called: action = UPDATE, when = BEFORE, level = STATEMENT
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW UPDATE ON rem1
+NOTICE: OLD: (1,insert),NEW: (1,update)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW UPDATE ON rem1
+NOTICE: OLD: (1,insert),NEW: (1,update)
+NOTICE: trigger_func(<NULL>) called: action = UPDATE, when = AFTER, level = STATEMENT
+update rem1 set f2 = f2 || f2;
+NOTICE: trigger_func(<NULL>) called: action = UPDATE, when = BEFORE, level = STATEMENT
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW UPDATE ON rem1
+NOTICE: OLD: (1,update),NEW: (1,updateupdate)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW UPDATE ON rem1
+NOTICE: OLD: (1,update),NEW: (1,updateupdate)
+NOTICE: trigger_func(<NULL>) called: action = UPDATE, when = AFTER, level = STATEMENT
+-- cleanup
+DROP TRIGGER trig_row_before ON rem1;
+DROP TRIGGER trig_row_after ON rem1;
+DROP TRIGGER trig_stmt_before ON rem1;
+DROP TRIGGER trig_stmt_after ON rem1;
+DELETE from rem1;
+-- Test multiple AFTER ROW triggers on a foreign table
+CREATE TRIGGER trig_row_after1
+AFTER INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+CREATE TRIGGER trig_row_after2
+AFTER INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+insert into rem1 values(1,'insert');
+NOTICE: trig_row_after1(23, skidoo) AFTER ROW INSERT ON rem1
+NOTICE: NEW: (1,insert)
+NOTICE: trig_row_after2(23, skidoo) AFTER ROW INSERT ON rem1
+NOTICE: NEW: (1,insert)
+update rem1 set f2 = 'update' where f1 = 1;
+NOTICE: trig_row_after1(23, skidoo) AFTER ROW UPDATE ON rem1
+NOTICE: OLD: (1,insert),NEW: (1,update)
+NOTICE: trig_row_after2(23, skidoo) AFTER ROW UPDATE ON rem1
+NOTICE: OLD: (1,insert),NEW: (1,update)
+update rem1 set f2 = f2 || f2;
+NOTICE: trig_row_after1(23, skidoo) AFTER ROW UPDATE ON rem1
+NOTICE: OLD: (1,update),NEW: (1,updateupdate)
+NOTICE: trig_row_after2(23, skidoo) AFTER ROW UPDATE ON rem1
+NOTICE: OLD: (1,update),NEW: (1,updateupdate)
+delete from rem1;
+NOTICE: trig_row_after1(23, skidoo) AFTER ROW DELETE ON rem1
+NOTICE: OLD: (1,updateupdate)
+NOTICE: trig_row_after2(23, skidoo) AFTER ROW DELETE ON rem1
+NOTICE: OLD: (1,updateupdate)
+-- cleanup
+DROP TRIGGER trig_row_after1 ON rem1;
+DROP TRIGGER trig_row_after2 ON rem1;
+-- Test WHEN conditions
+CREATE TRIGGER trig_row_before_insupd
+BEFORE INSERT OR UPDATE ON rem1
+FOR EACH ROW
+WHEN (NEW.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+CREATE TRIGGER trig_row_after_insupd
+AFTER INSERT OR UPDATE ON rem1
+FOR EACH ROW
+WHEN (NEW.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+-- Insert or update not matching: nothing happens
+INSERT INTO rem1 values(1, 'insert');
+UPDATE rem1 set f2 = 'test';
+-- Insert or update matching: triggers are fired
+INSERT INTO rem1 values(2, 'update');
+NOTICE: trig_row_before_insupd(23, skidoo) BEFORE ROW INSERT ON rem1
+NOTICE: NEW: (2,update)
+NOTICE: trig_row_after_insupd(23, skidoo) AFTER ROW INSERT ON rem1
+NOTICE: NEW: (2,update)
+UPDATE rem1 set f2 = 'update update' where f1 = '2';
+NOTICE: trig_row_before_insupd(23, skidoo) BEFORE ROW UPDATE ON rem1
+NOTICE: OLD: (2,update),NEW: (2,"update update")
+NOTICE: trig_row_after_insupd(23, skidoo) AFTER ROW UPDATE ON rem1
+NOTICE: OLD: (2,update),NEW: (2,"update update")
+CREATE TRIGGER trig_row_before_delete
+BEFORE DELETE ON rem1
+FOR EACH ROW
+WHEN (OLD.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+CREATE TRIGGER trig_row_after_delete
+AFTER DELETE ON rem1
+FOR EACH ROW
+WHEN (OLD.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+-- Trigger is fired for f1=2, not for f1=1
+DELETE FROM rem1;
+NOTICE: trig_row_before_delete(23, skidoo) BEFORE ROW DELETE ON rem1
+NOTICE: OLD: (2,"update update")
+NOTICE: trig_row_after_delete(23, skidoo) AFTER ROW DELETE ON rem1
+NOTICE: OLD: (2,"update update")
+-- cleanup
+DROP TRIGGER trig_row_before_insupd ON rem1;
+DROP TRIGGER trig_row_after_insupd ON rem1;
+DROP TRIGGER trig_row_before_delete ON rem1;
+DROP TRIGGER trig_row_after_delete ON rem1;
+-- Test various RETURN statements in BEFORE triggers.
+CREATE FUNCTION trig_row_before_insupdate() RETURNS TRIGGER AS $$
+ BEGIN
+ NEW.f2 := NEW.f2 || ' triggered !';
+ RETURN NEW;
+ END
+$$ language plpgsql;
+CREATE TRIGGER trig_row_before_insupd
+BEFORE INSERT OR UPDATE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
+-- The new values should have 'triggered' appended
+INSERT INTO rem1 values(1, 'insert');
+SELECT * from loc1;
+ f1 | f2
+----+--------------------
+ 1 | insert triggered !
+(1 row)
+
+INSERT INTO rem1 values(2, 'insert') RETURNING f2;
+ f2
+--------------------
+ insert triggered !
+(1 row)
+
+SELECT * from loc1;
+ f1 | f2
+----+--------------------
+ 1 | insert triggered !
+ 2 | insert triggered !
+(2 rows)
+
+UPDATE rem1 set f2 = '';
+SELECT * from loc1;
+ f1 | f2
+----+--------------
+ 1 | triggered !
+ 2 | triggered !
+(2 rows)
+
+UPDATE rem1 set f2 = 'skidoo' RETURNING f2;
+ f2
+--------------------
+ skidoo triggered !
+ skidoo triggered !
+(2 rows)
+
+SELECT * from loc1;
+ f1 | f2
+----+--------------------
+ 1 | skidoo triggered !
+ 2 | skidoo triggered !
+(2 rows)
+
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f1 = 10; -- all columns should be transmitted
+ QUERY PLAN
+-----------------------------------------------------------------------
+ Update on public.rem1
+ Remote SQL: UPDATE public.loc1 SET f1 = $2, f2 = $3 WHERE ctid = $1
+ -> Foreign Scan on public.rem1
+ Output: 10, ctid, rem1.*
+ Remote SQL: SELECT f1, f2, ctid FROM public.loc1 FOR UPDATE
+(5 rows)
+
+UPDATE rem1 set f1 = 10;
+SELECT * from loc1;
+ f1 | f2
+----+--------------------------------
+ 10 | skidoo triggered ! triggered !
+ 10 | skidoo triggered ! triggered !
+(2 rows)
+
+DELETE FROM rem1;
+-- Add a second trigger, to check that the changes are propagated correctly
+-- from trigger to trigger
+CREATE TRIGGER trig_row_before_insupd2
+BEFORE INSERT OR UPDATE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
+INSERT INTO rem1 values(1, 'insert');
+SELECT * from loc1;
+ f1 | f2
+----+--------------------------------
+ 1 | insert triggered ! triggered !
+(1 row)
+
+INSERT INTO rem1 values(2, 'insert') RETURNING f2;
+ f2
+--------------------------------
+ insert triggered ! triggered !
+(1 row)
+
+SELECT * from loc1;
+ f1 | f2
+----+--------------------------------
+ 1 | insert triggered ! triggered !
+ 2 | insert triggered ! triggered !
+(2 rows)
+
+UPDATE rem1 set f2 = '';
+SELECT * from loc1;
+ f1 | f2
+----+--------------------------
+ 1 | triggered ! triggered !
+ 2 | triggered ! triggered !
+(2 rows)
+
+UPDATE rem1 set f2 = 'skidoo' RETURNING f2;
+ f2
+--------------------------------
+ skidoo triggered ! triggered !
+ skidoo triggered ! triggered !
+(2 rows)
+
+SELECT * from loc1;
+ f1 | f2
+----+--------------------------------
+ 1 | skidoo triggered ! triggered !
+ 2 | skidoo triggered ! triggered !
+(2 rows)
+
+DROP TRIGGER trig_row_before_insupd ON rem1;
+DROP TRIGGER trig_row_before_insupd2 ON rem1;
+DELETE from rem1;
+INSERT INTO rem1 VALUES (1, 'test');
+-- Test with a trigger returning NULL
+CREATE FUNCTION trig_null() RETURNS TRIGGER AS $$
+ BEGIN
+ RETURN NULL;
+ END
+$$ language plpgsql;
+CREATE TRIGGER trig_null
+BEFORE INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trig_null();
+-- Nothing should have changed.
+INSERT INTO rem1 VALUES (2, 'test2');
+SELECT * from loc1;
+ f1 | f2
+----+------
+ 1 | test
+(1 row)
+
+UPDATE rem1 SET f2 = 'test2';
+SELECT * from loc1;
+ f1 | f2
+----+------
+ 1 | test
+(1 row)
+
+DELETE from rem1;
+SELECT * from loc1;
+ f1 | f2
+----+------
+ 1 | test
+(1 row)
+
+DROP TRIGGER trig_null ON rem1;
+DELETE from rem1;
+-- Test a combination of local and remote triggers
+CREATE TRIGGER trig_row_before
+BEFORE INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+CREATE TRIGGER trig_row_after
+AFTER INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+CREATE TRIGGER trig_local_before BEFORE INSERT OR UPDATE ON loc1
+FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
+INSERT INTO rem1(f2) VALUES ('test');
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW INSERT ON rem1
+NOTICE: NEW: (12,test)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW INSERT ON rem1
+NOTICE: NEW: (12,"test triggered !")
+UPDATE rem1 SET f2 = 'testo';
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW UPDATE ON rem1
+NOTICE: OLD: (12,"test triggered !"),NEW: (12,testo)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW UPDATE ON rem1
+NOTICE: OLD: (12,"test triggered !"),NEW: (12,"testo triggered !")
+-- Test returning a system attribute
+INSERT INTO rem1(f2) VALUES ('test') RETURNING ctid;
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW INSERT ON rem1
+NOTICE: NEW: (13,test)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW INSERT ON rem1
+NOTICE: NEW: (13,"test triggered !")
+ ctid
+--------
+ (0,32)
+(1 row)
+
+-- cleanup
+DROP TRIGGER trig_row_before ON rem1;
+DROP TRIGGER trig_row_after ON rem1;
+DROP TRIGGER trig_local_before ON loc1;
+-- Test direct foreign table modification functionality
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can be pushed down
+ QUERY PLAN
+---------------------------------------------
+ Delete on public.rem1
+ -> Foreign Delete on public.rem1
+ Remote SQL: DELETE FROM public.loc1
+(3 rows)
+
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1 WHERE false; -- currently can't be pushed down
+ QUERY PLAN
+-------------------------------------------------------
+ Delete on public.rem1
+ Remote SQL: DELETE FROM public.loc1 WHERE ctid = $1
+ -> Result
+ Output: ctid
+ One-Time Filter: false
+(5 rows)
+
+-- Test with statement-level triggers
+CREATE TRIGGER trig_stmt_before
+ BEFORE DELETE OR INSERT OR UPDATE ON rem1
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can be pushed down
+ QUERY PLAN
+----------------------------------------------------------
+ Update on public.rem1
+ -> Foreign Update on public.rem1
+ Remote SQL: UPDATE public.loc1 SET f2 = ''::text
+(3 rows)
+
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can be pushed down
+ QUERY PLAN
+---------------------------------------------
+ Delete on public.rem1
+ -> Foreign Delete on public.rem1
+ Remote SQL: DELETE FROM public.loc1
+(3 rows)
+
+DROP TRIGGER trig_stmt_before ON rem1;
+CREATE TRIGGER trig_stmt_after
+ AFTER DELETE OR INSERT OR UPDATE ON rem1
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can be pushed down
+ QUERY PLAN
+----------------------------------------------------------
+ Update on public.rem1
+ -> Foreign Update on public.rem1
+ Remote SQL: UPDATE public.loc1 SET f2 = ''::text
+(3 rows)
+
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can be pushed down
+ QUERY PLAN
+---------------------------------------------
+ Delete on public.rem1
+ -> Foreign Delete on public.rem1
+ Remote SQL: DELETE FROM public.loc1
+(3 rows)
+
+DROP TRIGGER trig_stmt_after ON rem1;
+-- Test with row-level ON INSERT triggers
+CREATE TRIGGER trig_row_before_insert
+BEFORE INSERT ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can be pushed down
+ QUERY PLAN
+----------------------------------------------------------
+ Update on public.rem1
+ -> Foreign Update on public.rem1
+ Remote SQL: UPDATE public.loc1 SET f2 = ''::text
+(3 rows)
+
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can be pushed down
+ QUERY PLAN
+---------------------------------------------
+ Delete on public.rem1
+ -> Foreign Delete on public.rem1
+ Remote SQL: DELETE FROM public.loc1
+(3 rows)
+
+DROP TRIGGER trig_row_before_insert ON rem1;
+CREATE TRIGGER trig_row_after_insert
+AFTER INSERT ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can be pushed down
+ QUERY PLAN
+----------------------------------------------------------
+ Update on public.rem1
+ -> Foreign Update on public.rem1
+ Remote SQL: UPDATE public.loc1 SET f2 = ''::text
+(3 rows)
+
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can be pushed down
+ QUERY PLAN
+---------------------------------------------
+ Delete on public.rem1
+ -> Foreign Delete on public.rem1
+ Remote SQL: DELETE FROM public.loc1
+(3 rows)
+
+DROP TRIGGER trig_row_after_insert ON rem1;
+-- Test with row-level ON UPDATE triggers
+CREATE TRIGGER trig_row_before_update
+BEFORE UPDATE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can't be pushed down
+ QUERY PLAN
+-----------------------------------------------------------------------
+ Update on public.rem1
+ Remote SQL: UPDATE public.loc1 SET f1 = $2, f2 = $3 WHERE ctid = $1
+ -> Foreign Scan on public.rem1
+ Output: ''::text, ctid, rem1.*
+ Remote SQL: SELECT f1, f2, ctid FROM public.loc1 FOR UPDATE
+(5 rows)
+
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can be pushed down
+ QUERY PLAN
+---------------------------------------------
+ Delete on public.rem1
+ -> Foreign Delete on public.rem1
+ Remote SQL: DELETE FROM public.loc1
+(3 rows)
+
+DROP TRIGGER trig_row_before_update ON rem1;
+CREATE TRIGGER trig_row_after_update
+AFTER UPDATE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can't be pushed down
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Update on public.rem1
+ Remote SQL: UPDATE public.loc1 SET f2 = $2 WHERE ctid = $1 RETURNING f1, f2
+ -> Foreign Scan on public.rem1
+ Output: ''::text, ctid, rem1.*
+ Remote SQL: SELECT f1, f2, ctid FROM public.loc1 FOR UPDATE
+(5 rows)
+
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can be pushed down
+ QUERY PLAN
+---------------------------------------------
+ Delete on public.rem1
+ -> Foreign Delete on public.rem1
+ Remote SQL: DELETE FROM public.loc1
+(3 rows)
+
+DROP TRIGGER trig_row_after_update ON rem1;
+-- Test with row-level ON DELETE triggers
+CREATE TRIGGER trig_row_before_delete
+BEFORE DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can be pushed down
+ QUERY PLAN
+----------------------------------------------------------
+ Update on public.rem1
+ -> Foreign Update on public.rem1
+ Remote SQL: UPDATE public.loc1 SET f2 = ''::text
+(3 rows)
+
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can't be pushed down
+ QUERY PLAN
+---------------------------------------------------------------------
+ Delete on public.rem1
+ Remote SQL: DELETE FROM public.loc1 WHERE ctid = $1
+ -> Foreign Scan on public.rem1
+ Output: ctid, rem1.*
+ Remote SQL: SELECT f1, f2, ctid FROM public.loc1 FOR UPDATE
+(5 rows)
+
+DROP TRIGGER trig_row_before_delete ON rem1;
+CREATE TRIGGER trig_row_after_delete
+AFTER DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can be pushed down
+ QUERY PLAN
+----------------------------------------------------------
+ Update on public.rem1
+ -> Foreign Update on public.rem1
+ Remote SQL: UPDATE public.loc1 SET f2 = ''::text
+(3 rows)
+
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can't be pushed down
+ QUERY PLAN
+------------------------------------------------------------------------
+ Delete on public.rem1
+ Remote SQL: DELETE FROM public.loc1 WHERE ctid = $1 RETURNING f1, f2
+ -> Foreign Scan on public.rem1
+ Output: ctid, rem1.*
+ Remote SQL: SELECT f1, f2, ctid FROM public.loc1 FOR UPDATE
+(5 rows)
+
+DROP TRIGGER trig_row_after_delete ON rem1;
+-- ===================================================================
+-- test inheritance features
+-- ===================================================================
+CREATE TABLE a (aa TEXT);
+CREATE TABLE loct (aa TEXT, bb TEXT);
+ALTER TABLE a SET (autovacuum_enabled = 'false');
+ALTER TABLE loct SET (autovacuum_enabled = 'false');
+CREATE FOREIGN TABLE b (bb TEXT) INHERITS (a)
+ SERVER loopback OPTIONS (table_name 'loct');
+INSERT INTO a(aa) VALUES('aaa');
+INSERT INTO a(aa) VALUES('aaaa');
+INSERT INTO a(aa) VALUES('aaaaa');
+INSERT INTO b(aa) VALUES('bbb');
+INSERT INTO b(aa) VALUES('bbbb');
+INSERT INTO b(aa) VALUES('bbbbb');
+SELECT tableoid::regclass, * FROM a;
+ tableoid | aa
+----------+-------
+ a | aaa
+ a | aaaa
+ a | aaaaa
+ b | bbb
+ b | bbbb
+ b | bbbbb
+(6 rows)
+
+SELECT tableoid::regclass, * FROM b;
+ tableoid | aa | bb
+----------+-------+----
+ b | bbb |
+ b | bbbb |
+ b | bbbbb |
+(3 rows)
+
+SELECT tableoid::regclass, * FROM ONLY a;
+ tableoid | aa
+----------+-------
+ a | aaa
+ a | aaaa
+ a | aaaaa
+(3 rows)
+
+UPDATE a SET aa = 'zzzzzz' WHERE aa LIKE 'aaaa%';
+SELECT tableoid::regclass, * FROM a;
+ tableoid | aa
+----------+--------
+ a | aaa
+ a | zzzzzz
+ a | zzzzzz
+ b | bbb
+ b | bbbb
+ b | bbbbb
+(6 rows)
+
+SELECT tableoid::regclass, * FROM b;
+ tableoid | aa | bb
+----------+-------+----
+ b | bbb |
+ b | bbbb |
+ b | bbbbb |
+(3 rows)
+
+SELECT tableoid::regclass, * FROM ONLY a;
+ tableoid | aa
+----------+--------
+ a | aaa
+ a | zzzzzz
+ a | zzzzzz
+(3 rows)
+
+UPDATE b SET aa = 'new';
+SELECT tableoid::regclass, * FROM a;
+ tableoid | aa
+----------+--------
+ a | aaa
+ a | zzzzzz
+ a | zzzzzz
+ b | new
+ b | new
+ b | new
+(6 rows)
+
+SELECT tableoid::regclass, * FROM b;
+ tableoid | aa | bb
+----------+-----+----
+ b | new |
+ b | new |
+ b | new |
+(3 rows)
+
+SELECT tableoid::regclass, * FROM ONLY a;
+ tableoid | aa
+----------+--------
+ a | aaa
+ a | zzzzzz
+ a | zzzzzz
+(3 rows)
+
+UPDATE a SET aa = 'newtoo';
+SELECT tableoid::regclass, * FROM a;
+ tableoid | aa
+----------+--------
+ a | newtoo
+ a | newtoo
+ a | newtoo
+ b | newtoo
+ b | newtoo
+ b | newtoo
+(6 rows)
+
+SELECT tableoid::regclass, * FROM b;
+ tableoid | aa | bb
+----------+--------+----
+ b | newtoo |
+ b | newtoo |
+ b | newtoo |
+(3 rows)
+
+SELECT tableoid::regclass, * FROM ONLY a;
+ tableoid | aa
+----------+--------
+ a | newtoo
+ a | newtoo
+ a | newtoo
+(3 rows)
+
+DELETE FROM a;
+SELECT tableoid::regclass, * FROM a;
+ tableoid | aa
+----------+----
+(0 rows)
+
+SELECT tableoid::regclass, * FROM b;
+ tableoid | aa | bb
+----------+----+----
+(0 rows)
+
+SELECT tableoid::regclass, * FROM ONLY a;
+ tableoid | aa
+----------+----
+(0 rows)
+
+DROP TABLE a CASCADE;
+NOTICE: drop cascades to foreign table b
+DROP TABLE loct;
+-- Check SELECT FOR UPDATE/SHARE with an inherited source table
+create table loct1 (f1 int, f2 int, f3 int);
+create table loct2 (f1 int, f2 int, f3 int);
+alter table loct1 set (autovacuum_enabled = 'false');
+alter table loct2 set (autovacuum_enabled = 'false');
+create table foo (f1 int, f2 int);
+create foreign table foo2 (f3 int) inherits (foo)
+ server loopback options (table_name 'loct1');
+create table bar (f1 int, f2 int);
+create foreign table bar2 (f3 int) inherits (bar)
+ server loopback options (table_name 'loct2');
+alter table foo set (autovacuum_enabled = 'false');
+alter table bar set (autovacuum_enabled = 'false');
+insert into foo values(1,1);
+insert into foo values(3,3);
+insert into foo2 values(2,2,2);
+insert into foo2 values(4,4,4);
+insert into bar values(1,11);
+insert into bar values(2,22);
+insert into bar values(6,66);
+insert into bar2 values(3,33,33);
+insert into bar2 values(4,44,44);
+insert into bar2 values(7,77,77);
+explain (verbose, costs off)
+select * from bar where f1 in (select f1 from foo) for update;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------
+ LockRows
+ Output: bar.f1, bar.f2, bar.ctid, foo.ctid, bar.*, bar.tableoid, foo.*, foo.tableoid
+ -> Hash Join
+ Output: bar.f1, bar.f2, bar.ctid, foo.ctid, bar.*, bar.tableoid, foo.*, foo.tableoid
+ Inner Unique: true
+ Hash Cond: (bar.f1 = foo.f1)
+ -> Append
+ -> Seq Scan on public.bar bar_1
+ Output: bar_1.f1, bar_1.f2, bar_1.ctid, bar_1.*, bar_1.tableoid
+ -> Foreign Scan on public.bar2 bar_2
+ Output: bar_2.f1, bar_2.f2, bar_2.ctid, bar_2.*, bar_2.tableoid
+ Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR UPDATE
+ -> Hash
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
+ -> HashAggregate
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
+ Group Key: foo.f1
+ -> Append
+ -> Seq Scan on public.foo foo_1
+ Output: foo_1.ctid, foo_1.f1, foo_1.*, foo_1.tableoid
+ -> Foreign Scan on public.foo2 foo_2
+ Output: foo_2.ctid, foo_2.f1, foo_2.*, foo_2.tableoid
+ Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
+(23 rows)
+
+select * from bar where f1 in (select f1 from foo) for update;
+ f1 | f2
+----+----
+ 1 | 11
+ 2 | 22
+ 3 | 33
+ 4 | 44
+(4 rows)
+
+explain (verbose, costs off)
+select * from bar where f1 in (select f1 from foo) for share;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------
+ LockRows
+ Output: bar.f1, bar.f2, bar.ctid, foo.ctid, bar.*, bar.tableoid, foo.*, foo.tableoid
+ -> Hash Join
+ Output: bar.f1, bar.f2, bar.ctid, foo.ctid, bar.*, bar.tableoid, foo.*, foo.tableoid
+ Inner Unique: true
+ Hash Cond: (bar.f1 = foo.f1)
+ -> Append
+ -> Seq Scan on public.bar bar_1
+ Output: bar_1.f1, bar_1.f2, bar_1.ctid, bar_1.*, bar_1.tableoid
+ -> Foreign Scan on public.bar2 bar_2
+ Output: bar_2.f1, bar_2.f2, bar_2.ctid, bar_2.*, bar_2.tableoid
+ Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR SHARE
+ -> Hash
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
+ -> HashAggregate
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
+ Group Key: foo.f1
+ -> Append
+ -> Seq Scan on public.foo foo_1
+ Output: foo_1.ctid, foo_1.f1, foo_1.*, foo_1.tableoid
+ -> Foreign Scan on public.foo2 foo_2
+ Output: foo_2.ctid, foo_2.f1, foo_2.*, foo_2.tableoid
+ Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
+(23 rows)
+
+select * from bar where f1 in (select f1 from foo) for share;
+ f1 | f2
+----+----
+ 1 | 11
+ 2 | 22
+ 3 | 33
+ 4 | 44
+(4 rows)
+
+-- Now check SELECT FOR UPDATE/SHARE with an inherited source table,
+-- where the parent is itself a foreign table
+create table loct4 (f1 int, f2 int, f3 int);
+create foreign table foo2child (f3 int) inherits (foo2)
+ server loopback options (table_name 'loct4');
+NOTICE: moving and merging column "f3" with inherited definition
+DETAIL: User-specified column moved to the position of the inherited column.
+explain (verbose, costs off)
+select * from bar where f1 in (select f1 from foo2) for share;
+ QUERY PLAN
+--------------------------------------------------------------------------------------
+ LockRows
+ Output: bar.f1, bar.f2, bar.ctid, foo2.*, bar.*, bar.tableoid, foo2.tableoid
+ -> Hash Join
+ Output: bar.f1, bar.f2, bar.ctid, foo2.*, bar.*, bar.tableoid, foo2.tableoid
+ Inner Unique: true
+ Hash Cond: (bar.f1 = foo2.f1)
+ -> Append
+ -> Seq Scan on public.bar bar_1
+ Output: bar_1.f1, bar_1.f2, bar_1.ctid, bar_1.*, bar_1.tableoid
+ -> Foreign Scan on public.bar2 bar_2
+ Output: bar_2.f1, bar_2.f2, bar_2.ctid, bar_2.*, bar_2.tableoid
+ Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR SHARE
+ -> Hash
+ Output: foo2.*, foo2.f1, foo2.tableoid
+ -> HashAggregate
+ Output: foo2.*, foo2.f1, foo2.tableoid
+ Group Key: foo2.f1
+ -> Append
+ -> Foreign Scan on public.foo2 foo2_1
+ Output: foo2_1.*, foo2_1.f1, foo2_1.tableoid
+ Remote SQL: SELECT f1, f2, f3 FROM public.loct1
+ -> Foreign Scan on public.foo2child foo2_2
+ Output: foo2_2.*, foo2_2.f1, foo2_2.tableoid
+ Remote SQL: SELECT f1, f2, f3 FROM public.loct4
+(24 rows)
+
+select * from bar where f1 in (select f1 from foo2) for share;
+ f1 | f2
+----+----
+ 2 | 22
+ 4 | 44
+(2 rows)
+
+drop foreign table foo2child;
+-- And with a local child relation of the foreign table parent
+create table foo2child (f3 int) inherits (foo2);
+NOTICE: moving and merging column "f3" with inherited definition
+DETAIL: User-specified column moved to the position of the inherited column.
+explain (verbose, costs off)
+select * from bar where f1 in (select f1 from foo2) for share;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------
+ LockRows
+ Output: bar.f1, bar.f2, bar.ctid, foo2.*, bar.*, bar.tableoid, foo2.ctid, foo2.tableoid
+ -> Hash Join
+ Output: bar.f1, bar.f2, bar.ctid, foo2.*, bar.*, bar.tableoid, foo2.ctid, foo2.tableoid
+ Inner Unique: true
+ Hash Cond: (bar.f1 = foo2.f1)
+ -> Append
+ -> Seq Scan on public.bar bar_1
+ Output: bar_1.f1, bar_1.f2, bar_1.ctid, bar_1.*, bar_1.tableoid
+ -> Foreign Scan on public.bar2 bar_2
+ Output: bar_2.f1, bar_2.f2, bar_2.ctid, bar_2.*, bar_2.tableoid
+ Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR SHARE
+ -> Hash
+ Output: foo2.*, foo2.f1, foo2.ctid, foo2.tableoid
+ -> HashAggregate
+ Output: foo2.*, foo2.f1, foo2.ctid, foo2.tableoid
+ Group Key: foo2.f1
+ -> Append
+ -> Foreign Scan on public.foo2 foo2_1
+ Output: foo2_1.*, foo2_1.f1, foo2_1.ctid, foo2_1.tableoid
+ Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
+ -> Seq Scan on public.foo2child foo2_2
+ Output: foo2_2.*, foo2_2.f1, foo2_2.ctid, foo2_2.tableoid
+(23 rows)
+
+select * from bar where f1 in (select f1 from foo2) for share;
+ f1 | f2
+----+----
+ 2 | 22
+ 4 | 44
+(2 rows)
+
+drop table foo2child;
+-- Check UPDATE with inherited target and an inherited source table
+explain (verbose, costs off)
+update bar set f2 = f2 + 100 where f1 in (select f1 from foo);
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------
+ Update on public.bar
+ Update on public.bar bar_1
+ Foreign Update on public.bar2 bar_2
+ Remote SQL: UPDATE public.loct2 SET f2 = $2 WHERE ctid = $1
+ -> Hash Join
+ Output: (bar.f2 + 100), foo.ctid, bar.tableoid, bar.ctid, (NULL::record), foo.*, foo.tableoid
+ Inner Unique: true
+ Hash Cond: (bar.f1 = foo.f1)
+ -> Append
+ -> Seq Scan on public.bar bar_1
+ Output: bar_1.f2, bar_1.f1, bar_1.tableoid, bar_1.ctid, NULL::record
+ -> Foreign Scan on public.bar2 bar_2
+ Output: bar_2.f2, bar_2.f1, bar_2.tableoid, bar_2.ctid, bar_2.*
+ Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR UPDATE
+ -> Hash
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
+ -> HashAggregate
+ Output: foo.ctid, foo.f1, foo.*, foo.tableoid
+ Group Key: foo.f1
+ -> Append
+ -> Seq Scan on public.foo foo_1
+ Output: foo_1.ctid, foo_1.f1, foo_1.*, foo_1.tableoid
+ -> Foreign Scan on public.foo2 foo_2
+ Output: foo_2.ctid, foo_2.f1, foo_2.*, foo_2.tableoid
+ Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
+(25 rows)
+
+update bar set f2 = f2 + 100 where f1 in (select f1 from foo);
+select tableoid::regclass, * from bar order by 1,2;
+ tableoid | f1 | f2
+----------+----+-----
+ bar | 1 | 111
+ bar | 2 | 122
+ bar | 6 | 66
+ bar2 | 3 | 133
+ bar2 | 4 | 144
+ bar2 | 7 | 77
+(6 rows)
+
+-- Check UPDATE with inherited target and an appendrel subquery
+explain (verbose, costs off)
+update bar set f2 = f2 + 100
+from
+ ( select f1 from foo union all select f1+3 from foo ) ss
+where bar.f1 = ss.f1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------
+ Update on public.bar
+ Update on public.bar bar_1
+ Foreign Update on public.bar2 bar_2
+ Remote SQL: UPDATE public.loct2 SET f2 = $2 WHERE ctid = $1
+ -> Merge Join
+ Output: (bar.f2 + 100), (ROW(foo.f1)), bar.tableoid, bar.ctid, (NULL::record)
+ Merge Cond: (bar.f1 = foo.f1)
+ -> Sort
+ Output: bar.f2, bar.f1, bar.tableoid, bar.ctid, (NULL::record)
+ Sort Key: bar.f1
+ -> Append
+ -> Seq Scan on public.bar bar_1
+ Output: bar_1.f2, bar_1.f1, bar_1.tableoid, bar_1.ctid, NULL::record
+ -> Foreign Scan on public.bar2 bar_2
+ Output: bar_2.f2, bar_2.f1, bar_2.tableoid, bar_2.ctid, bar_2.*
+ Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR UPDATE
+ -> Sort
+ Output: (ROW(foo.f1)), foo.f1
+ Sort Key: foo.f1
+ -> Append
+ -> Seq Scan on public.foo
+ Output: ROW(foo.f1), foo.f1
+ -> Foreign Scan on public.foo2 foo_1
+ Output: ROW(foo_1.f1), foo_1.f1
+ Remote SQL: SELECT f1 FROM public.loct1
+ -> Seq Scan on public.foo foo_2
+ Output: ROW((foo_2.f1 + 3)), (foo_2.f1 + 3)
+ -> Foreign Scan on public.foo2 foo_3
+ Output: ROW((foo_3.f1 + 3)), (foo_3.f1 + 3)
+ Remote SQL: SELECT f1 FROM public.loct1
+(30 rows)
+
+update bar set f2 = f2 + 100
+from
+ ( select f1 from foo union all select f1+3 from foo ) ss
+where bar.f1 = ss.f1;
+select tableoid::regclass, * from bar order by 1,2;
+ tableoid | f1 | f2
+----------+----+-----
+ bar | 1 | 211
+ bar | 2 | 222
+ bar | 6 | 166
+ bar2 | 3 | 233
+ bar2 | 4 | 244
+ bar2 | 7 | 177
+(6 rows)
+
+-- Test forcing the remote server to produce sorted data for a merge join,
+-- but the foreign table is an inheritance child.
+truncate table loct1;
+truncate table only foo;
+\set num_rows_foo 2000
+insert into loct1 select generate_series(0, :num_rows_foo, 2), generate_series(0, :num_rows_foo, 2), generate_series(0, :num_rows_foo, 2);
+insert into foo select generate_series(1, :num_rows_foo, 2), generate_series(1, :num_rows_foo, 2);
+SET enable_hashjoin to false;
+SET enable_nestloop to false;
+alter foreign table foo2 options (use_remote_estimate 'true');
+create index i_loct1_f1 on loct1(f1);
+create index i_foo_f1 on foo(f1);
+analyze foo;
+analyze loct1;
+-- inner join; expressions in the clauses appear in the equivalence class list
+explain (verbose, costs off)
+ select foo.f1, loct1.f1 from foo join loct1 on (foo.f1 = loct1.f1) order by foo.f2 offset 10 limit 10;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------
+ Limit
+ Output: foo.f1, loct1.f1, foo.f2
+ -> Sort
+ Output: foo.f1, loct1.f1, foo.f2
+ Sort Key: foo.f2
+ -> Merge Join
+ Output: foo.f1, loct1.f1, foo.f2
+ Merge Cond: (foo.f1 = loct1.f1)
+ -> Merge Append
+ Sort Key: foo.f1
+ -> Index Scan using i_foo_f1 on public.foo foo_1
+ Output: foo_1.f1, foo_1.f2
+ -> Foreign Scan on public.foo2 foo_2
+ Output: foo_2.f1, foo_2.f2
+ Remote SQL: SELECT f1, f2 FROM public.loct1 ORDER BY f1 ASC NULLS LAST
+ -> Index Only Scan using i_loct1_f1 on public.loct1
+ Output: loct1.f1
+(17 rows)
+
+select foo.f1, loct1.f1 from foo join loct1 on (foo.f1 = loct1.f1) order by foo.f2 offset 10 limit 10;
+ f1 | f1
+----+----
+ 20 | 20
+ 22 | 22
+ 24 | 24
+ 26 | 26
+ 28 | 28
+ 30 | 30
+ 32 | 32
+ 34 | 34
+ 36 | 36
+ 38 | 38
+(10 rows)
+
+-- outer join; expressions in the clauses do not appear in equivalence class
+-- list but no output change as compared to the previous query
+explain (verbose, costs off)
+ select foo.f1, loct1.f1 from foo left join loct1 on (foo.f1 = loct1.f1) order by foo.f2 offset 10 limit 10;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------
+ Limit
+ Output: foo.f1, loct1.f1, foo.f2
+ -> Sort
+ Output: foo.f1, loct1.f1, foo.f2
+ Sort Key: foo.f2
+ -> Merge Left Join
+ Output: foo.f1, loct1.f1, foo.f2
+ Merge Cond: (foo.f1 = loct1.f1)
+ -> Merge Append
+ Sort Key: foo.f1
+ -> Index Scan using i_foo_f1 on public.foo foo_1
+ Output: foo_1.f1, foo_1.f2
+ -> Foreign Scan on public.foo2 foo_2
+ Output: foo_2.f1, foo_2.f2
+ Remote SQL: SELECT f1, f2 FROM public.loct1 ORDER BY f1 ASC NULLS LAST
+ -> Index Only Scan using i_loct1_f1 on public.loct1
+ Output: loct1.f1
+(17 rows)
+
+select foo.f1, loct1.f1 from foo left join loct1 on (foo.f1 = loct1.f1) order by foo.f2 offset 10 limit 10;
+ f1 | f1
+----+----
+ 10 | 10
+ 11 |
+ 12 | 12
+ 13 |
+ 14 | 14
+ 15 |
+ 16 | 16
+ 17 |
+ 18 | 18
+ 19 |
+(10 rows)
+
+RESET enable_hashjoin;
+RESET enable_nestloop;
+-- Test that WHERE CURRENT OF is not supported
+begin;
+declare c cursor for select * from bar where f1 = 7;
+fetch from c;
+ f1 | f2
+----+-----
+ 7 | 177
+(1 row)
+
+update bar set f2 = null where current of c;
+ERROR: WHERE CURRENT OF is not supported for this table type
+rollback;
+explain (verbose, costs off)
+delete from foo where f1 < 5 returning *;
+ QUERY PLAN
+--------------------------------------------------------------------------------------
+ Delete on public.foo
+ Output: foo_1.f1, foo_1.f2
+ Delete on public.foo foo_1
+ Foreign Delete on public.foo2 foo_2
+ -> Append
+ -> Index Scan using i_foo_f1 on public.foo foo_1
+ Output: foo_1.tableoid, foo_1.ctid
+ Index Cond: (foo_1.f1 < 5)
+ -> Foreign Delete on public.foo2 foo_2
+ Remote SQL: DELETE FROM public.loct1 WHERE ((f1 < 5)) RETURNING f1, f2
+(10 rows)
+
+delete from foo where f1 < 5 returning *;
+ f1 | f2
+----+----
+ 1 | 1
+ 3 | 3
+ 0 | 0
+ 2 | 2
+ 4 | 4
+(5 rows)
+
+explain (verbose, costs off)
+update bar set f2 = f2 + 100 returning *;
+ QUERY PLAN
+------------------------------------------------------------------------------------------
+ Update on public.bar
+ Output: bar_1.f1, bar_1.f2
+ Update on public.bar bar_1
+ Foreign Update on public.bar2 bar_2
+ -> Result
+ Output: (bar.f2 + 100), bar.tableoid, bar.ctid, (NULL::record)
+ -> Append
+ -> Seq Scan on public.bar bar_1
+ Output: bar_1.f2, bar_1.tableoid, bar_1.ctid, NULL::record
+ -> Foreign Update on public.bar2 bar_2
+ Remote SQL: UPDATE public.loct2 SET f2 = (f2 + 100) RETURNING f1, f2
+(11 rows)
+
+update bar set f2 = f2 + 100 returning *;
+ f1 | f2
+----+-----
+ 1 | 311
+ 2 | 322
+ 6 | 266
+ 3 | 333
+ 4 | 344
+ 7 | 277
+(6 rows)
+
+-- Test that UPDATE/DELETE with inherited target works with row-level triggers
+CREATE TRIGGER trig_row_before
+BEFORE UPDATE OR DELETE ON bar2
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+CREATE TRIGGER trig_row_after
+AFTER UPDATE OR DELETE ON bar2
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+explain (verbose, costs off)
+update bar set f2 = f2 + 100;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------
+ Update on public.bar
+ Update on public.bar bar_1
+ Foreign Update on public.bar2 bar_2
+ Remote SQL: UPDATE public.loct2 SET f1 = $2, f2 = $3, f3 = $4 WHERE ctid = $1 RETURNING f1, f2, f3
+ -> Result
+ Output: (bar.f2 + 100), bar.tableoid, bar.ctid, (NULL::record)
+ -> Append
+ -> Seq Scan on public.bar bar_1
+ Output: bar_1.f2, bar_1.tableoid, bar_1.ctid, NULL::record
+ -> Foreign Scan on public.bar2 bar_2
+ Output: bar_2.f2, bar_2.tableoid, bar_2.ctid, bar_2.*
+ Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR UPDATE
+(12 rows)
+
+update bar set f2 = f2 + 100;
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW UPDATE ON bar2
+NOTICE: OLD: (3,333,33),NEW: (3,433,33)
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW UPDATE ON bar2
+NOTICE: OLD: (4,344,44),NEW: (4,444,44)
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW UPDATE ON bar2
+NOTICE: OLD: (7,277,77),NEW: (7,377,77)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW UPDATE ON bar2
+NOTICE: OLD: (3,333,33),NEW: (3,433,33)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW UPDATE ON bar2
+NOTICE: OLD: (4,344,44),NEW: (4,444,44)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW UPDATE ON bar2
+NOTICE: OLD: (7,277,77),NEW: (7,377,77)
+explain (verbose, costs off)
+delete from bar where f2 < 400;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------
+ Delete on public.bar
+ Delete on public.bar bar_1
+ Foreign Delete on public.bar2 bar_2
+ Remote SQL: DELETE FROM public.loct2 WHERE ctid = $1 RETURNING f1, f2, f3
+ -> Append
+ -> Seq Scan on public.bar bar_1
+ Output: bar_1.tableoid, bar_1.ctid, NULL::record
+ Filter: (bar_1.f2 < 400)
+ -> Foreign Scan on public.bar2 bar_2
+ Output: bar_2.tableoid, bar_2.ctid, bar_2.*
+ Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 WHERE ((f2 < 400)) FOR UPDATE
+(11 rows)
+
+delete from bar where f2 < 400;
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW DELETE ON bar2
+NOTICE: OLD: (7,377,77)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW DELETE ON bar2
+NOTICE: OLD: (7,377,77)
+-- cleanup
+drop table foo cascade;
+NOTICE: drop cascades to foreign table foo2
+drop table bar cascade;
+NOTICE: drop cascades to foreign table bar2
+drop table loct1;
+drop table loct2;
+-- Test pushing down UPDATE/DELETE joins to the remote server
+create table parent (a int, b text);
+create table loct1 (a int, b text);
+create table loct2 (a int, b text);
+create foreign table remt1 (a int, b text)
+ server loopback options (table_name 'loct1');
+create foreign table remt2 (a int, b text)
+ server loopback options (table_name 'loct2');
+alter foreign table remt1 inherit parent;
+insert into remt1 values (1, 'foo');
+insert into remt1 values (2, 'bar');
+insert into remt2 values (1, 'foo');
+insert into remt2 values (2, 'bar');
+analyze remt1;
+analyze remt2;
+explain (verbose, costs off)
+update parent set b = parent.b || remt2.b from remt2 where parent.a = remt2.a returning *;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------
+ Update on public.parent
+ Output: parent_1.a, parent_1.b, remt2.a, remt2.b
+ Update on public.parent parent_1
+ Foreign Update on public.remt1 parent_2
+ Remote SQL: UPDATE public.loct1 SET b = $2 WHERE ctid = $1 RETURNING a, b
+ -> Nested Loop
+ Output: (parent.b || remt2.b), remt2.*, remt2.a, remt2.b, parent.tableoid, parent.ctid, (NULL::record)
+ Join Filter: (parent.a = remt2.a)
+ -> Append
+ -> Seq Scan on public.parent parent_1
+ Output: parent_1.b, parent_1.a, parent_1.tableoid, parent_1.ctid, NULL::record
+ -> Foreign Scan on public.remt1 parent_2
+ Output: parent_2.b, parent_2.a, parent_2.tableoid, parent_2.ctid, parent_2.*
+ Remote SQL: SELECT a, b, ctid FROM public.loct1 FOR UPDATE
+ -> Materialize
+ Output: remt2.b, remt2.*, remt2.a
+ -> Foreign Scan on public.remt2
+ Output: remt2.b, remt2.*, remt2.a
+ Remote SQL: SELECT a, b FROM public.loct2
+(19 rows)
+
+update parent set b = parent.b || remt2.b from remt2 where parent.a = remt2.a returning *;
+ a | b | a | b
+---+--------+---+-----
+ 1 | foofoo | 1 | foo
+ 2 | barbar | 2 | bar
+(2 rows)
+
+explain (verbose, costs off)
+delete from parent using remt2 where parent.a = remt2.a returning parent;
+ QUERY PLAN
+-----------------------------------------------------------------------------
+ Delete on public.parent
+ Output: parent_1.*
+ Delete on public.parent parent_1
+ Foreign Delete on public.remt1 parent_2
+ Remote SQL: DELETE FROM public.loct1 WHERE ctid = $1 RETURNING a, b
+ -> Nested Loop
+ Output: remt2.*, parent.tableoid, parent.ctid
+ Join Filter: (parent.a = remt2.a)
+ -> Append
+ -> Seq Scan on public.parent parent_1
+ Output: parent_1.a, parent_1.tableoid, parent_1.ctid
+ -> Foreign Scan on public.remt1 parent_2
+ Output: parent_2.a, parent_2.tableoid, parent_2.ctid
+ Remote SQL: SELECT a, ctid FROM public.loct1 FOR UPDATE
+ -> Materialize
+ Output: remt2.*, remt2.a
+ -> Foreign Scan on public.remt2
+ Output: remt2.*, remt2.a
+ Remote SQL: SELECT a, b FROM public.loct2
+(19 rows)
+
+delete from parent using remt2 where parent.a = remt2.a returning parent;
+ parent
+------------
+ (1,foofoo)
+ (2,barbar)
+(2 rows)
+
+-- cleanup
+drop foreign table remt1;
+drop foreign table remt2;
+drop table loct1;
+drop table loct2;
+drop table parent;
+-- ===================================================================
+-- test tuple routing for foreign-table partitions
+-- ===================================================================
+-- Test insert tuple routing
+create table itrtest (a int, b text) partition by list (a);
+create table loct1 (a int check (a in (1)), b text);
+create foreign table remp1 (a int check (a in (1)), b text) server loopback options (table_name 'loct1');
+create table loct2 (a int check (a in (2)), b text);
+create foreign table remp2 (b text, a int check (a in (2))) server loopback options (table_name 'loct2');
+alter table itrtest attach partition remp1 for values in (1);
+alter table itrtest attach partition remp2 for values in (2);
+insert into itrtest values (1, 'foo');
+insert into itrtest values (1, 'bar') returning *;
+ a | b
+---+-----
+ 1 | bar
+(1 row)
+
+insert into itrtest values (2, 'baz');
+insert into itrtest values (2, 'qux') returning *;
+ a | b
+---+-----
+ 2 | qux
+(1 row)
+
+insert into itrtest values (1, 'test1'), (2, 'test2') returning *;
+ a | b
+---+-------
+ 1 | test1
+ 2 | test2
+(2 rows)
+
+select tableoid::regclass, * FROM itrtest;
+ tableoid | a | b
+----------+---+-------
+ remp1 | 1 | foo
+ remp1 | 1 | bar
+ remp1 | 1 | test1
+ remp2 | 2 | baz
+ remp2 | 2 | qux
+ remp2 | 2 | test2
+(6 rows)
+
+select tableoid::regclass, * FROM remp1;
+ tableoid | a | b
+----------+---+-------
+ remp1 | 1 | foo
+ remp1 | 1 | bar
+ remp1 | 1 | test1
+(3 rows)
+
+select tableoid::regclass, * FROM remp2;
+ tableoid | b | a
+----------+-------+---
+ remp2 | baz | 2
+ remp2 | qux | 2
+ remp2 | test2 | 2
+(3 rows)
+
+delete from itrtest;
+create unique index loct1_idx on loct1 (a);
+-- DO NOTHING without an inference specification is supported
+insert into itrtest values (1, 'foo') on conflict do nothing returning *;
+ a | b
+---+-----
+ 1 | foo
+(1 row)
+
+insert into itrtest values (1, 'foo') on conflict do nothing returning *;
+ a | b
+---+---
+(0 rows)
+
+-- But other cases are not supported
+insert into itrtest values (1, 'bar') on conflict (a) do nothing;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into itrtest values (1, 'bar') on conflict (a) do update set b = excluded.b;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+select tableoid::regclass, * FROM itrtest;
+ tableoid | a | b
+----------+---+-----
+ remp1 | 1 | foo
+(1 row)
+
+delete from itrtest;
+drop index loct1_idx;
+-- Test that remote triggers work with insert tuple routing
+create function br_insert_trigfunc() returns trigger as $$
+begin
+ new.b := new.b || ' triggered !';
+ return new;
+end
+$$ language plpgsql;
+create trigger loct1_br_insert_trigger before insert on loct1
+ for each row execute procedure br_insert_trigfunc();
+create trigger loct2_br_insert_trigger before insert on loct2
+ for each row execute procedure br_insert_trigfunc();
+-- The new values are concatenated with ' triggered !'
+insert into itrtest values (1, 'foo') returning *;
+ a | b
+---+-----------------
+ 1 | foo triggered !
+(1 row)
+
+insert into itrtest values (2, 'qux') returning *;
+ a | b
+---+-----------------
+ 2 | qux triggered !
+(1 row)
+
+insert into itrtest values (1, 'test1'), (2, 'test2') returning *;
+ a | b
+---+-------------------
+ 1 | test1 triggered !
+ 2 | test2 triggered !
+(2 rows)
+
+with result as (insert into itrtest values (1, 'test1'), (2, 'test2') returning *) select * from result;
+ a | b
+---+-------------------
+ 1 | test1 triggered !
+ 2 | test2 triggered !
+(2 rows)
+
+drop trigger loct1_br_insert_trigger on loct1;
+drop trigger loct2_br_insert_trigger on loct2;
+drop table itrtest;
+drop table loct1;
+drop table loct2;
+-- Test update tuple routing
+create table utrtest (a int, b text) partition by list (a);
+create table loct (a int check (a in (1)), b text);
+create foreign table remp (a int check (a in (1)), b text) server loopback options (table_name 'loct');
+create table locp (a int check (a in (2)), b text);
+alter table utrtest attach partition remp for values in (1);
+alter table utrtest attach partition locp for values in (2);
+insert into utrtest values (1, 'foo');
+insert into utrtest values (2, 'qux');
+select tableoid::regclass, * FROM utrtest;
+ tableoid | a | b
+----------+---+-----
+ remp | 1 | foo
+ locp | 2 | qux
+(2 rows)
+
+select tableoid::regclass, * FROM remp;
+ tableoid | a | b
+----------+---+-----
+ remp | 1 | foo
+(1 row)
+
+select tableoid::regclass, * FROM locp;
+ tableoid | a | b
+----------+---+-----
+ locp | 2 | qux
+(1 row)
+
+-- It's not allowed to move a row from a partition that is foreign to another
+update utrtest set a = 2 where b = 'foo' returning *;
+ERROR: new row for relation "loct" violates check constraint "loct_a_check"
+DETAIL: Failing row contains (2, foo).
+CONTEXT: remote SQL command: UPDATE public.loct SET a = 2 WHERE ((b = 'foo'::text)) RETURNING a, b
+-- But the reverse is allowed
+update utrtest set a = 1 where b = 'qux' returning *;
+ERROR: cannot route tuples into foreign table to be updated "remp"
+select tableoid::regclass, * FROM utrtest;
+ tableoid | a | b
+----------+---+-----
+ remp | 1 | foo
+ locp | 2 | qux
+(2 rows)
+
+select tableoid::regclass, * FROM remp;
+ tableoid | a | b
+----------+---+-----
+ remp | 1 | foo
+(1 row)
+
+select tableoid::regclass, * FROM locp;
+ tableoid | a | b
+----------+---+-----
+ locp | 2 | qux
+(1 row)
+
+-- The executor should not let unexercised FDWs shut down
+update utrtest set a = 1 where b = 'foo';
+-- Test that remote triggers work with update tuple routing
+create trigger loct_br_insert_trigger before insert on loct
+ for each row execute procedure br_insert_trigfunc();
+delete from utrtest;
+insert into utrtest values (2, 'qux');
+-- Check case where the foreign partition is a subplan target rel
+explain (verbose, costs off)
+update utrtest set a = 1 where a = 1 or a = 2 returning *;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------
+ Update on public.utrtest
+ Output: utrtest_1.a, utrtest_1.b
+ Foreign Update on public.remp utrtest_1
+ Update on public.locp utrtest_2
+ -> Append
+ -> Foreign Update on public.remp utrtest_1
+ Remote SQL: UPDATE public.loct SET a = 1 WHERE (((a = 1) OR (a = 2))) RETURNING a, b
+ -> Seq Scan on public.locp utrtest_2
+ Output: 1, utrtest_2.tableoid, utrtest_2.ctid, NULL::record
+ Filter: ((utrtest_2.a = 1) OR (utrtest_2.a = 2))
+(10 rows)
+
+-- The new values are concatenated with ' triggered !'
+update utrtest set a = 1 where a = 1 or a = 2 returning *;
+ERROR: cannot route tuples into foreign table to be updated "remp"
+delete from utrtest;
+insert into utrtest values (2, 'qux');
+-- Check case where the foreign partition isn't a subplan target rel
+explain (verbose, costs off)
+update utrtest set a = 1 where a = 2 returning *;
+ QUERY PLAN
+-------------------------------------------------------
+ Update on public.utrtest
+ Output: utrtest_1.a, utrtest_1.b
+ Update on public.locp utrtest_1
+ -> Seq Scan on public.locp utrtest_1
+ Output: 1, utrtest_1.tableoid, utrtest_1.ctid
+ Filter: (utrtest_1.a = 2)
+(6 rows)
+
+-- The new values are concatenated with ' triggered !'
+update utrtest set a = 1 where a = 2 returning *;
+ a | b
+---+-----------------
+ 1 | qux triggered !
+(1 row)
+
+drop trigger loct_br_insert_trigger on loct;
+-- We can move rows to a foreign partition that has been updated already,
+-- but can't move rows to a foreign partition that hasn't been updated yet
+delete from utrtest;
+insert into utrtest values (1, 'foo');
+insert into utrtest values (2, 'qux');
+-- Test the former case:
+-- with a direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 1 returning *;
+ QUERY PLAN
+---------------------------------------------------------------------------
+ Update on public.utrtest
+ Output: utrtest_1.a, utrtest_1.b
+ Foreign Update on public.remp utrtest_1
+ Update on public.locp utrtest_2
+ -> Append
+ -> Foreign Update on public.remp utrtest_1
+ Remote SQL: UPDATE public.loct SET a = 1 RETURNING a, b
+ -> Seq Scan on public.locp utrtest_2
+ Output: 1, utrtest_2.tableoid, utrtest_2.ctid, NULL::record
+(9 rows)
+
+update utrtest set a = 1 returning *;
+ERROR: cannot route tuples into foreign table to be updated "remp"
+delete from utrtest;
+insert into utrtest values (1, 'foo');
+insert into utrtest values (2, 'qux');
+-- with a non-direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 1 from (values (1), (2)) s(x) where a = s.x returning *;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------
+ Update on public.utrtest
+ Output: utrtest_1.a, utrtest_1.b, "*VALUES*".column1
+ Foreign Update on public.remp utrtest_1
+ Remote SQL: UPDATE public.loct SET a = $2 WHERE ctid = $1 RETURNING a, b
+ Update on public.locp utrtest_2
+ -> Hash Join
+ Output: 1, "*VALUES*".*, "*VALUES*".column1, utrtest.tableoid, utrtest.ctid, utrtest.*
+ Hash Cond: (utrtest.a = "*VALUES*".column1)
+ -> Append
+ -> Foreign Scan on public.remp utrtest_1
+ Output: utrtest_1.a, utrtest_1.tableoid, utrtest_1.ctid, utrtest_1.*
+ Remote SQL: SELECT a, b, ctid FROM public.loct FOR UPDATE
+ -> Seq Scan on public.locp utrtest_2
+ Output: utrtest_2.a, utrtest_2.tableoid, utrtest_2.ctid, NULL::record
+ -> Hash
+ Output: "*VALUES*".*, "*VALUES*".column1
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".*, "*VALUES*".column1
+(18 rows)
+
+update utrtest set a = 1 from (values (1), (2)) s(x) where a = s.x returning *;
+ERROR: cannot route tuples into foreign table to be updated "remp"
+-- Change the definition of utrtest so that the foreign partition get updated
+-- after the local partition
+delete from utrtest;
+alter table utrtest detach partition remp;
+drop foreign table remp;
+alter table loct drop constraint loct_a_check;
+alter table loct add check (a in (3));
+create foreign table remp (a int check (a in (3)), b text) server loopback options (table_name 'loct');
+alter table utrtest attach partition remp for values in (3);
+insert into utrtest values (2, 'qux');
+insert into utrtest values (3, 'xyzzy');
+-- Test the latter case:
+-- with a direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 3 returning *;
+ QUERY PLAN
+---------------------------------------------------------------------------
+ Update on public.utrtest
+ Output: utrtest_1.a, utrtest_1.b
+ Update on public.locp utrtest_1
+ Foreign Update on public.remp utrtest_2
+ -> Append
+ -> Seq Scan on public.locp utrtest_1
+ Output: 3, utrtest_1.tableoid, utrtest_1.ctid, NULL::record
+ -> Foreign Update on public.remp utrtest_2
+ Remote SQL: UPDATE public.loct SET a = 3 RETURNING a, b
+(9 rows)
+
+update utrtest set a = 3 returning *; -- ERROR
+ERROR: cannot route tuples into foreign table to be updated "remp"
+-- with a non-direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 3 from (values (2), (3)) s(x) where a = s.x returning *;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------
+ Update on public.utrtest
+ Output: utrtest_1.a, utrtest_1.b, "*VALUES*".column1
+ Update on public.locp utrtest_1
+ Foreign Update on public.remp utrtest_2
+ Remote SQL: UPDATE public.loct SET a = $2 WHERE ctid = $1 RETURNING a, b
+ -> Hash Join
+ Output: 3, "*VALUES*".*, "*VALUES*".column1, utrtest.tableoid, utrtest.ctid, (NULL::record)
+ Hash Cond: (utrtest.a = "*VALUES*".column1)
+ -> Append
+ -> Seq Scan on public.locp utrtest_1
+ Output: utrtest_1.a, utrtest_1.tableoid, utrtest_1.ctid, NULL::record
+ -> Foreign Scan on public.remp utrtest_2
+ Output: utrtest_2.a, utrtest_2.tableoid, utrtest_2.ctid, utrtest_2.*
+ Remote SQL: SELECT a, b, ctid FROM public.loct FOR UPDATE
+ -> Hash
+ Output: "*VALUES*".*, "*VALUES*".column1
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".*, "*VALUES*".column1
+(18 rows)
+
+update utrtest set a = 3 from (values (2), (3)) s(x) where a = s.x returning *; -- ERROR
+ERROR: cannot route tuples into foreign table to be updated "remp"
+drop table utrtest;
+drop table loct;
+-- Test copy tuple routing
+create table ctrtest (a int, b text) partition by list (a);
+create table loct1 (a int check (a in (1)), b text);
+create foreign table remp1 (a int check (a in (1)), b text) server loopback options (table_name 'loct1');
+create table loct2 (a int check (a in (2)), b text);
+create foreign table remp2 (b text, a int check (a in (2))) server loopback options (table_name 'loct2');
+alter table ctrtest attach partition remp1 for values in (1);
+alter table ctrtest attach partition remp2 for values in (2);
+copy ctrtest from stdin;
+select tableoid::regclass, * FROM ctrtest;
+ tableoid | a | b
+----------+---+-----
+ remp1 | 1 | foo
+ remp2 | 2 | qux
+(2 rows)
+
+select tableoid::regclass, * FROM remp1;
+ tableoid | a | b
+----------+---+-----
+ remp1 | 1 | foo
+(1 row)
+
+select tableoid::regclass, * FROM remp2;
+ tableoid | b | a
+----------+-----+---
+ remp2 | qux | 2
+(1 row)
+
+-- Copying into foreign partitions directly should work as well
+copy remp1 from stdin;
+select tableoid::regclass, * FROM remp1;
+ tableoid | a | b
+----------+---+-----
+ remp1 | 1 | foo
+ remp1 | 1 | bar
+(2 rows)
+
+drop table ctrtest;
+drop table loct1;
+drop table loct2;
+-- ===================================================================
+-- test COPY FROM
+-- ===================================================================
+create table loc2 (f1 int, f2 text);
+alter table loc2 set (autovacuum_enabled = 'false');
+create foreign table rem2 (f1 int, f2 text) server loopback options(table_name 'loc2');
+-- Test basic functionality
+copy rem2 from stdin;
+select * from rem2;
+ f1 | f2
+----+-----
+ 1 | foo
+ 2 | bar
+(2 rows)
+
+delete from rem2;
+-- Test check constraints
+alter table loc2 add constraint loc2_f1positive check (f1 >= 0);
+alter foreign table rem2 add constraint rem2_f1positive check (f1 >= 0);
+-- check constraint is enforced on the remote side, not locally
+copy rem2 from stdin;
+copy rem2 from stdin; -- ERROR
+ERROR: new row for relation "loc2" violates check constraint "loc2_f1positive"
+DETAIL: Failing row contains (-1, xyzzy).
+CONTEXT: remote SQL command: INSERT INTO public.loc2(f1, f2) VALUES ($1, $2)
+COPY rem2, line 1: "-1 xyzzy"
+select * from rem2;
+ f1 | f2
+----+-----
+ 1 | foo
+ 2 | bar
+(2 rows)
+
+alter foreign table rem2 drop constraint rem2_f1positive;
+alter table loc2 drop constraint loc2_f1positive;
+delete from rem2;
+-- Test local triggers
+create trigger trig_stmt_before before insert on rem2
+ for each statement execute procedure trigger_func();
+create trigger trig_stmt_after after insert on rem2
+ for each statement execute procedure trigger_func();
+create trigger trig_row_before before insert on rem2
+ for each row execute procedure trigger_data(23,'skidoo');
+create trigger trig_row_after after insert on rem2
+ for each row execute procedure trigger_data(23,'skidoo');
+copy rem2 from stdin;
+NOTICE: trigger_func(<NULL>) called: action = INSERT, when = BEFORE, level = STATEMENT
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW INSERT ON rem2
+NOTICE: NEW: (1,foo)
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW INSERT ON rem2
+NOTICE: NEW: (2,bar)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW INSERT ON rem2
+NOTICE: NEW: (1,foo)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW INSERT ON rem2
+NOTICE: NEW: (2,bar)
+NOTICE: trigger_func(<NULL>) called: action = INSERT, when = AFTER, level = STATEMENT
+select * from rem2;
+ f1 | f2
+----+-----
+ 1 | foo
+ 2 | bar
+(2 rows)
+
+drop trigger trig_row_before on rem2;
+drop trigger trig_row_after on rem2;
+drop trigger trig_stmt_before on rem2;
+drop trigger trig_stmt_after on rem2;
+delete from rem2;
+create trigger trig_row_before_insert before insert on rem2
+ for each row execute procedure trig_row_before_insupdate();
+-- The new values are concatenated with ' triggered !'
+copy rem2 from stdin;
+select * from rem2;
+ f1 | f2
+----+-----------------
+ 1 | foo triggered !
+ 2 | bar triggered !
+(2 rows)
+
+drop trigger trig_row_before_insert on rem2;
+delete from rem2;
+create trigger trig_null before insert on rem2
+ for each row execute procedure trig_null();
+-- Nothing happens
+copy rem2 from stdin;
+select * from rem2;
+ f1 | f2
+----+----
+(0 rows)
+
+drop trigger trig_null on rem2;
+delete from rem2;
+-- Test remote triggers
+create trigger trig_row_before_insert before insert on loc2
+ for each row execute procedure trig_row_before_insupdate();
+-- The new values are concatenated with ' triggered !'
+copy rem2 from stdin;
+select * from rem2;
+ f1 | f2
+----+-----------------
+ 1 | foo triggered !
+ 2 | bar triggered !
+(2 rows)
+
+drop trigger trig_row_before_insert on loc2;
+delete from rem2;
+create trigger trig_null before insert on loc2
+ for each row execute procedure trig_null();
+-- Nothing happens
+copy rem2 from stdin;
+select * from rem2;
+ f1 | f2
+----+----
+(0 rows)
+
+drop trigger trig_null on loc2;
+delete from rem2;
+-- Test a combination of local and remote triggers
+create trigger rem2_trig_row_before before insert on rem2
+ for each row execute procedure trigger_data(23,'skidoo');
+create trigger rem2_trig_row_after after insert on rem2
+ for each row execute procedure trigger_data(23,'skidoo');
+create trigger loc2_trig_row_before_insert before insert on loc2
+ for each row execute procedure trig_row_before_insupdate();
+copy rem2 from stdin;
+NOTICE: rem2_trig_row_before(23, skidoo) BEFORE ROW INSERT ON rem2
+NOTICE: NEW: (1,foo)
+NOTICE: rem2_trig_row_before(23, skidoo) BEFORE ROW INSERT ON rem2
+NOTICE: NEW: (2,bar)
+NOTICE: rem2_trig_row_after(23, skidoo) AFTER ROW INSERT ON rem2
+NOTICE: NEW: (1,"foo triggered !")
+NOTICE: rem2_trig_row_after(23, skidoo) AFTER ROW INSERT ON rem2
+NOTICE: NEW: (2,"bar triggered !")
+select * from rem2;
+ f1 | f2
+----+-----------------
+ 1 | foo triggered !
+ 2 | bar triggered !
+(2 rows)
+
+drop trigger rem2_trig_row_before on rem2;
+drop trigger rem2_trig_row_after on rem2;
+drop trigger loc2_trig_row_before_insert on loc2;
+delete from rem2;
+-- test COPY FROM with foreign table created in the same transaction
+create table loc3 (f1 int, f2 text);
+begin;
+create foreign table rem3 (f1 int, f2 text)
+ server loopback options(table_name 'loc3');
+copy rem3 from stdin;
+commit;
+select * from rem3;
+ f1 | f2
+----+-----
+ 1 | foo
+ 2 | bar
+(2 rows)
+
+drop foreign table rem3;
+drop table loc3;
+-- ===================================================================
+-- test for TRUNCATE
+-- ===================================================================
+CREATE TABLE tru_rtable0 (id int primary key);
+CREATE FOREIGN TABLE tru_ftable (id int)
+ SERVER loopback OPTIONS (table_name 'tru_rtable0');
+INSERT INTO tru_rtable0 (SELECT x FROM generate_series(1,10) x);
+CREATE TABLE tru_ptable (id int) PARTITION BY HASH(id);
+CREATE TABLE tru_ptable__p0 PARTITION OF tru_ptable
+ FOR VALUES WITH (MODULUS 2, REMAINDER 0);
+CREATE TABLE tru_rtable1 (id int primary key);
+CREATE FOREIGN TABLE tru_ftable__p1 PARTITION OF tru_ptable
+ FOR VALUES WITH (MODULUS 2, REMAINDER 1)
+ SERVER loopback OPTIONS (table_name 'tru_rtable1');
+INSERT INTO tru_ptable (SELECT x FROM generate_series(11,20) x);
+CREATE TABLE tru_pk_table(id int primary key);
+CREATE TABLE tru_fk_table(fkey int references tru_pk_table(id));
+INSERT INTO tru_pk_table (SELECT x FROM generate_series(1,10) x);
+INSERT INTO tru_fk_table (SELECT x % 10 + 1 FROM generate_series(5,25) x);
+CREATE FOREIGN TABLE tru_pk_ftable (id int)
+ SERVER loopback OPTIONS (table_name 'tru_pk_table');
+CREATE TABLE tru_rtable_parent (id int);
+CREATE TABLE tru_rtable_child (id int);
+CREATE FOREIGN TABLE tru_ftable_parent (id int)
+ SERVER loopback OPTIONS (table_name 'tru_rtable_parent');
+CREATE FOREIGN TABLE tru_ftable_child () INHERITS (tru_ftable_parent)
+ SERVER loopback OPTIONS (table_name 'tru_rtable_child');
+INSERT INTO tru_rtable_parent (SELECT x FROM generate_series(1,8) x);
+INSERT INTO tru_rtable_child (SELECT x FROM generate_series(10, 18) x);
+-- normal truncate
+SELECT sum(id) FROM tru_ftable; -- 55
+ sum
+-----
+ 55
+(1 row)
+
+TRUNCATE tru_ftable;
+SELECT count(*) FROM tru_rtable0; -- 0
+ count
+-------
+ 0
+(1 row)
+
+SELECT count(*) FROM tru_ftable; -- 0
+ count
+-------
+ 0
+(1 row)
+
+-- 'truncatable' option
+ALTER SERVER loopback OPTIONS (ADD truncatable 'false');
+TRUNCATE tru_ftable; -- error
+ERROR: foreign table "tru_ftable" does not allow truncates
+ALTER FOREIGN TABLE tru_ftable OPTIONS (ADD truncatable 'true');
+TRUNCATE tru_ftable; -- accepted
+ALTER FOREIGN TABLE tru_ftable OPTIONS (SET truncatable 'false');
+TRUNCATE tru_ftable; -- error
+ERROR: foreign table "tru_ftable" does not allow truncates
+ALTER SERVER loopback OPTIONS (DROP truncatable);
+ALTER FOREIGN TABLE tru_ftable OPTIONS (SET truncatable 'false');
+TRUNCATE tru_ftable; -- error
+ERROR: foreign table "tru_ftable" does not allow truncates
+ALTER FOREIGN TABLE tru_ftable OPTIONS (SET truncatable 'true');
+TRUNCATE tru_ftable; -- accepted
+-- partitioned table with both local and foreign tables as partitions
+SELECT sum(id) FROM tru_ptable; -- 155
+ sum
+-----
+ 155
+(1 row)
+
+TRUNCATE tru_ptable;
+SELECT count(*) FROM tru_ptable; -- 0
+ count
+-------
+ 0
+(1 row)
+
+SELECT count(*) FROM tru_ptable__p0; -- 0
+ count
+-------
+ 0
+(1 row)
+
+SELECT count(*) FROM tru_ftable__p1; -- 0
+ count
+-------
+ 0
+(1 row)
+
+SELECT count(*) FROM tru_rtable1; -- 0
+ count
+-------
+ 0
+(1 row)
+
+-- 'CASCADE' option
+SELECT sum(id) FROM tru_pk_ftable; -- 55
+ sum
+-----
+ 55
+(1 row)
+
+TRUNCATE tru_pk_ftable; -- failed by FK reference
+ERROR: cannot truncate a table referenced in a foreign key constraint
+DETAIL: Table "tru_fk_table" references "tru_pk_table".
+HINT: Truncate table "tru_fk_table" at the same time, or use TRUNCATE ... CASCADE.
+CONTEXT: remote SQL command: TRUNCATE public.tru_pk_table CONTINUE IDENTITY RESTRICT
+TRUNCATE tru_pk_ftable CASCADE;
+SELECT count(*) FROM tru_pk_ftable; -- 0
+ count
+-------
+ 0
+(1 row)
+
+SELECT count(*) FROM tru_fk_table; -- also truncated,0
+ count
+-------
+ 0
+(1 row)
+
+-- truncate two tables at a command
+INSERT INTO tru_ftable (SELECT x FROM generate_series(1,8) x);
+INSERT INTO tru_pk_ftable (SELECT x FROM generate_series(3,10) x);
+SELECT count(*) from tru_ftable; -- 8
+ count
+-------
+ 8
+(1 row)
+
+SELECT count(*) from tru_pk_ftable; -- 8
+ count
+-------
+ 8
+(1 row)
+
+TRUNCATE tru_ftable, tru_pk_ftable CASCADE;
+SELECT count(*) from tru_ftable; -- 0
+ count
+-------
+ 0
+(1 row)
+
+SELECT count(*) from tru_pk_ftable; -- 0
+ count
+-------
+ 0
+(1 row)
+
+-- truncate with ONLY clause
+-- Since ONLY is specified, the table tru_ftable_child that inherits
+-- tru_ftable_parent locally is not truncated.
+TRUNCATE ONLY tru_ftable_parent;
+SELECT sum(id) FROM tru_ftable_parent; -- 126
+ sum
+-----
+ 126
+(1 row)
+
+TRUNCATE tru_ftable_parent;
+SELECT count(*) FROM tru_ftable_parent; -- 0
+ count
+-------
+ 0
+(1 row)
+
+-- in case when remote table has inherited children
+CREATE TABLE tru_rtable0_child () INHERITS (tru_rtable0);
+INSERT INTO tru_rtable0 (SELECT x FROM generate_series(5,9) x);
+INSERT INTO tru_rtable0_child (SELECT x FROM generate_series(10,14) x);
+SELECT sum(id) FROM tru_ftable; -- 95
+ sum
+-----
+ 95
+(1 row)
+
+-- Both parent and child tables in the foreign server are truncated
+-- even though ONLY is specified because ONLY has no effect
+-- when truncating a foreign table.
+TRUNCATE ONLY tru_ftable;
+SELECT count(*) FROM tru_ftable; -- 0
+ count
+-------
+ 0
+(1 row)
+
+INSERT INTO tru_rtable0 (SELECT x FROM generate_series(21,25) x);
+INSERT INTO tru_rtable0_child (SELECT x FROM generate_series(26,30) x);
+SELECT sum(id) FROM tru_ftable; -- 255
+ sum
+-----
+ 255
+(1 row)
+
+TRUNCATE tru_ftable; -- truncate both of parent and child
+SELECT count(*) FROM tru_ftable; -- 0
+ count
+-------
+ 0
+(1 row)
+
+-- cleanup
+DROP FOREIGN TABLE tru_ftable_parent, tru_ftable_child, tru_pk_ftable,tru_ftable__p1,tru_ftable;
+DROP TABLE tru_rtable0, tru_rtable1, tru_ptable, tru_ptable__p0, tru_pk_table, tru_fk_table,
+tru_rtable_parent,tru_rtable_child, tru_rtable0_child;
+-- ===================================================================
+-- test IMPORT FOREIGN SCHEMA
+-- ===================================================================
+CREATE SCHEMA import_source;
+CREATE TABLE import_source.t1 (c1 int, c2 varchar NOT NULL);
+CREATE TABLE import_source.t2 (c1 int default 42, c2 varchar NULL, c3 text collate "POSIX");
+CREATE TYPE typ1 AS (m1 int, m2 varchar);
+CREATE TABLE import_source.t3 (c1 timestamptz default now(), c2 typ1);
+CREATE TABLE import_source."x 4" (c1 float8, "C 2" text, c3 varchar(42));
+CREATE TABLE import_source."x 5" (c1 float8);
+ALTER TABLE import_source."x 5" DROP COLUMN c1;
+CREATE TABLE import_source."x 6" (c1 int, c2 int generated always as (c1 * 2) stored);
+CREATE TABLE import_source.t4 (c1 int) PARTITION BY RANGE (c1);
+CREATE TABLE import_source.t4_part PARTITION OF import_source.t4
+ FOR VALUES FROM (1) TO (100);
+CREATE TABLE import_source.t4_part2 PARTITION OF import_source.t4
+ FOR VALUES FROM (100) TO (200);
+CREATE SCHEMA import_dest1;
+IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest1;
+\det+ import_dest1.*
+ List of foreign tables
+ Schema | Table | Server | FDW options | Description
+--------------+-------+----------+-------------------------------------------------+-------------
+ import_dest1 | t1 | loopback | (schema_name 'import_source', table_name 't1') |
+ import_dest1 | t2 | loopback | (schema_name 'import_source', table_name 't2') |
+ import_dest1 | t3 | loopback | (schema_name 'import_source', table_name 't3') |
+ import_dest1 | t4 | loopback | (schema_name 'import_source', table_name 't4') |
+ import_dest1 | x 4 | loopback | (schema_name 'import_source', table_name 'x 4') |
+ import_dest1 | x 5 | loopback | (schema_name 'import_source', table_name 'x 5') |
+ import_dest1 | x 6 | loopback | (schema_name 'import_source', table_name 'x 6') |
+(7 rows)
+
+\d import_dest1.*
+ Foreign table "import_dest1.t1"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+-------------------+-----------+----------+---------+--------------------
+ c1 | integer | | | | (column_name 'c1')
+ c2 | character varying | | not null | | (column_name 'c2')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 't1')
+
+ Foreign table "import_dest1.t2"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+-------------------+-----------+----------+---------+--------------------
+ c1 | integer | | | | (column_name 'c1')
+ c2 | character varying | | | | (column_name 'c2')
+ c3 | text | POSIX | | | (column_name 'c3')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 't2')
+
+ Foreign table "import_dest1.t3"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+--------------------------+-----------+----------+---------+--------------------
+ c1 | timestamp with time zone | | | | (column_name 'c1')
+ c2 | typ1 | | | | (column_name 'c2')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 't3')
+
+ Foreign table "import_dest1.t4"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+---------+-----------+----------+---------+--------------------
+ c1 | integer | | | | (column_name 'c1')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 't4')
+
+ Foreign table "import_dest1.x 4"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+-----------------------+-----------+----------+---------+---------------------
+ c1 | double precision | | | | (column_name 'c1')
+ C 2 | text | | | | (column_name 'C 2')
+ c3 | character varying(42) | | | | (column_name 'c3')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 'x 4')
+
+ Foreign table "import_dest1.x 5"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+------+-----------+----------+---------+-------------
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 'x 5')
+
+ Foreign table "import_dest1.x 6"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+---------+-----------+----------+-------------------------------------+--------------------
+ c1 | integer | | | | (column_name 'c1')
+ c2 | integer | | | generated always as (c1 * 2) stored | (column_name 'c2')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 'x 6')
+
+-- Options
+CREATE SCHEMA import_dest2;
+IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest2
+ OPTIONS (import_default 'true');
+\det+ import_dest2.*
+ List of foreign tables
+ Schema | Table | Server | FDW options | Description
+--------------+-------+----------+-------------------------------------------------+-------------
+ import_dest2 | t1 | loopback | (schema_name 'import_source', table_name 't1') |
+ import_dest2 | t2 | loopback | (schema_name 'import_source', table_name 't2') |
+ import_dest2 | t3 | loopback | (schema_name 'import_source', table_name 't3') |
+ import_dest2 | t4 | loopback | (schema_name 'import_source', table_name 't4') |
+ import_dest2 | x 4 | loopback | (schema_name 'import_source', table_name 'x 4') |
+ import_dest2 | x 5 | loopback | (schema_name 'import_source', table_name 'x 5') |
+ import_dest2 | x 6 | loopback | (schema_name 'import_source', table_name 'x 6') |
+(7 rows)
+
+\d import_dest2.*
+ Foreign table "import_dest2.t1"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+-------------------+-----------+----------+---------+--------------------
+ c1 | integer | | | | (column_name 'c1')
+ c2 | character varying | | not null | | (column_name 'c2')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 't1')
+
+ Foreign table "import_dest2.t2"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+-------------------+-----------+----------+---------+--------------------
+ c1 | integer | | | 42 | (column_name 'c1')
+ c2 | character varying | | | | (column_name 'c2')
+ c3 | text | POSIX | | | (column_name 'c3')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 't2')
+
+ Foreign table "import_dest2.t3"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+--------------------------+-----------+----------+---------+--------------------
+ c1 | timestamp with time zone | | | now() | (column_name 'c1')
+ c2 | typ1 | | | | (column_name 'c2')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 't3')
+
+ Foreign table "import_dest2.t4"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+---------+-----------+----------+---------+--------------------
+ c1 | integer | | | | (column_name 'c1')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 't4')
+
+ Foreign table "import_dest2.x 4"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+-----------------------+-----------+----------+---------+---------------------
+ c1 | double precision | | | | (column_name 'c1')
+ C 2 | text | | | | (column_name 'C 2')
+ c3 | character varying(42) | | | | (column_name 'c3')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 'x 4')
+
+ Foreign table "import_dest2.x 5"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+------+-----------+----------+---------+-------------
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 'x 5')
+
+ Foreign table "import_dest2.x 6"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+---------+-----------+----------+-------------------------------------+--------------------
+ c1 | integer | | | | (column_name 'c1')
+ c2 | integer | | | generated always as (c1 * 2) stored | (column_name 'c2')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 'x 6')
+
+CREATE SCHEMA import_dest3;
+IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest3
+ OPTIONS (import_collate 'false', import_generated 'false', import_not_null 'false');
+\det+ import_dest3.*
+ List of foreign tables
+ Schema | Table | Server | FDW options | Description
+--------------+-------+----------+-------------------------------------------------+-------------
+ import_dest3 | t1 | loopback | (schema_name 'import_source', table_name 't1') |
+ import_dest3 | t2 | loopback | (schema_name 'import_source', table_name 't2') |
+ import_dest3 | t3 | loopback | (schema_name 'import_source', table_name 't3') |
+ import_dest3 | t4 | loopback | (schema_name 'import_source', table_name 't4') |
+ import_dest3 | x 4 | loopback | (schema_name 'import_source', table_name 'x 4') |
+ import_dest3 | x 5 | loopback | (schema_name 'import_source', table_name 'x 5') |
+ import_dest3 | x 6 | loopback | (schema_name 'import_source', table_name 'x 6') |
+(7 rows)
+
+\d import_dest3.*
+ Foreign table "import_dest3.t1"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+-------------------+-----------+----------+---------+--------------------
+ c1 | integer | | | | (column_name 'c1')
+ c2 | character varying | | | | (column_name 'c2')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 't1')
+
+ Foreign table "import_dest3.t2"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+-------------------+-----------+----------+---------+--------------------
+ c1 | integer | | | | (column_name 'c1')
+ c2 | character varying | | | | (column_name 'c2')
+ c3 | text | | | | (column_name 'c3')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 't2')
+
+ Foreign table "import_dest3.t3"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+--------------------------+-----------+----------+---------+--------------------
+ c1 | timestamp with time zone | | | | (column_name 'c1')
+ c2 | typ1 | | | | (column_name 'c2')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 't3')
+
+ Foreign table "import_dest3.t4"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+---------+-----------+----------+---------+--------------------
+ c1 | integer | | | | (column_name 'c1')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 't4')
+
+ Foreign table "import_dest3.x 4"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+-----------------------+-----------+----------+---------+---------------------
+ c1 | double precision | | | | (column_name 'c1')
+ C 2 | text | | | | (column_name 'C 2')
+ c3 | character varying(42) | | | | (column_name 'c3')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 'x 4')
+
+ Foreign table "import_dest3.x 5"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+------+-----------+----------+---------+-------------
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 'x 5')
+
+ Foreign table "import_dest3.x 6"
+ Column | Type | Collation | Nullable | Default | FDW options
+--------+---------+-----------+----------+---------+--------------------
+ c1 | integer | | | | (column_name 'c1')
+ c2 | integer | | | | (column_name 'c2')
+Server: loopback
+FDW options: (schema_name 'import_source', table_name 'x 6')
+
+-- Check LIMIT TO and EXCEPT
+CREATE SCHEMA import_dest4;
+IMPORT FOREIGN SCHEMA import_source LIMIT TO (t1, nonesuch, t4_part)
+ FROM SERVER loopback INTO import_dest4;
+\det+ import_dest4.*
+ List of foreign tables
+ Schema | Table | Server | FDW options | Description
+--------------+---------+----------+-----------------------------------------------------+-------------
+ import_dest4 | t1 | loopback | (schema_name 'import_source', table_name 't1') |
+ import_dest4 | t4_part | loopback | (schema_name 'import_source', table_name 't4_part') |
+(2 rows)
+
+IMPORT FOREIGN SCHEMA import_source EXCEPT (t1, "x 4", nonesuch, t4_part)
+ FROM SERVER loopback INTO import_dest4;
+\det+ import_dest4.*
+ List of foreign tables
+ Schema | Table | Server | FDW options | Description
+--------------+---------+----------+-----------------------------------------------------+-------------
+ import_dest4 | t1 | loopback | (schema_name 'import_source', table_name 't1') |
+ import_dest4 | t2 | loopback | (schema_name 'import_source', table_name 't2') |
+ import_dest4 | t3 | loopback | (schema_name 'import_source', table_name 't3') |
+ import_dest4 | t4 | loopback | (schema_name 'import_source', table_name 't4') |
+ import_dest4 | t4_part | loopback | (schema_name 'import_source', table_name 't4_part') |
+ import_dest4 | x 5 | loopback | (schema_name 'import_source', table_name 'x 5') |
+ import_dest4 | x 6 | loopback | (schema_name 'import_source', table_name 'x 6') |
+(7 rows)
+
+-- Assorted error cases
+IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest4;
+ERROR: relation "t1" already exists
+CONTEXT: importing foreign table "t1"
+IMPORT FOREIGN SCHEMA nonesuch FROM SERVER loopback INTO import_dest4;
+ERROR: schema "nonesuch" is not present on foreign server "loopback"
+IMPORT FOREIGN SCHEMA nonesuch FROM SERVER loopback INTO notthere;
+ERROR: schema "notthere" does not exist
+IMPORT FOREIGN SCHEMA nonesuch FROM SERVER nowhere INTO notthere;
+ERROR: server "nowhere" does not exist
+-- Check case of a type present only on the remote server.
+-- We can fake this by dropping the type locally in our transaction.
+CREATE TYPE "Colors" AS ENUM ('red', 'green', 'blue');
+CREATE TABLE import_source.t5 (c1 int, c2 text collate "C", "Col" "Colors");
+CREATE SCHEMA import_dest5;
+BEGIN;
+DROP TYPE "Colors" CASCADE;
+NOTICE: drop cascades to column Col of table import_source.t5
+IMPORT FOREIGN SCHEMA import_source LIMIT TO (t5)
+ FROM SERVER loopback INTO import_dest5; -- ERROR
+ERROR: type "public.Colors" does not exist
+LINE 4: "Col" public."Colors" OPTIONS (column_name 'Col')
+ ^
+QUERY: CREATE FOREIGN TABLE t5 (
+ c1 integer OPTIONS (column_name 'c1'),
+ c2 text OPTIONS (column_name 'c2') COLLATE pg_catalog."C",
+ "Col" public."Colors" OPTIONS (column_name 'Col')
+) SERVER loopback
+OPTIONS (schema_name 'import_source', table_name 't5');
+CONTEXT: importing foreign table "t5"
+ROLLBACK;
+BEGIN;
+CREATE SERVER fetch101 FOREIGN DATA WRAPPER postgres_fdw OPTIONS( fetch_size '101' );
+SELECT count(*)
+FROM pg_foreign_server
+WHERE srvname = 'fetch101'
+AND srvoptions @> array['fetch_size=101'];
+ count
+-------
+ 1
+(1 row)
+
+ALTER SERVER fetch101 OPTIONS( SET fetch_size '202' );
+SELECT count(*)
+FROM pg_foreign_server
+WHERE srvname = 'fetch101'
+AND srvoptions @> array['fetch_size=101'];
+ count
+-------
+ 0
+(1 row)
+
+SELECT count(*)
+FROM pg_foreign_server
+WHERE srvname = 'fetch101'
+AND srvoptions @> array['fetch_size=202'];
+ count
+-------
+ 1
+(1 row)
+
+CREATE FOREIGN TABLE table30000 ( x int ) SERVER fetch101 OPTIONS ( fetch_size '30000' );
+SELECT COUNT(*)
+FROM pg_foreign_table
+WHERE ftrelid = 'table30000'::regclass
+AND ftoptions @> array['fetch_size=30000'];
+ count
+-------
+ 1
+(1 row)
+
+ALTER FOREIGN TABLE table30000 OPTIONS ( SET fetch_size '60000');
+SELECT COUNT(*)
+FROM pg_foreign_table
+WHERE ftrelid = 'table30000'::regclass
+AND ftoptions @> array['fetch_size=30000'];
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(*)
+FROM pg_foreign_table
+WHERE ftrelid = 'table30000'::regclass
+AND ftoptions @> array['fetch_size=60000'];
+ count
+-------
+ 1
+(1 row)
+
+ROLLBACK;
+-- ===================================================================
+-- test partitionwise joins
+-- ===================================================================
+SET enable_partitionwise_join=on;
+CREATE TABLE fprt1 (a int, b int, c varchar) PARTITION BY RANGE(a);
+CREATE TABLE fprt1_p1 (LIKE fprt1);
+CREATE TABLE fprt1_p2 (LIKE fprt1);
+ALTER TABLE fprt1_p1 SET (autovacuum_enabled = 'false');
+ALTER TABLE fprt1_p2 SET (autovacuum_enabled = 'false');
+INSERT INTO fprt1_p1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 249, 2) i;
+INSERT INTO fprt1_p2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(250, 499, 2) i;
+CREATE FOREIGN TABLE ftprt1_p1 PARTITION OF fprt1 FOR VALUES FROM (0) TO (250)
+ SERVER loopback OPTIONS (table_name 'fprt1_p1', use_remote_estimate 'true');
+CREATE FOREIGN TABLE ftprt1_p2 PARTITION OF fprt1 FOR VALUES FROM (250) TO (500)
+ SERVER loopback OPTIONS (TABLE_NAME 'fprt1_p2');
+ANALYZE fprt1;
+ANALYZE fprt1_p1;
+ANALYZE fprt1_p2;
+CREATE TABLE fprt2 (a int, b int, c varchar) PARTITION BY RANGE(b);
+CREATE TABLE fprt2_p1 (LIKE fprt2);
+CREATE TABLE fprt2_p2 (LIKE fprt2);
+ALTER TABLE fprt2_p1 SET (autovacuum_enabled = 'false');
+ALTER TABLE fprt2_p2 SET (autovacuum_enabled = 'false');
+INSERT INTO fprt2_p1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 249, 3) i;
+INSERT INTO fprt2_p2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(250, 499, 3) i;
+CREATE FOREIGN TABLE ftprt2_p1 (b int, c varchar, a int)
+ SERVER loopback OPTIONS (table_name 'fprt2_p1', use_remote_estimate 'true');
+ALTER TABLE fprt2 ATTACH PARTITION ftprt2_p1 FOR VALUES FROM (0) TO (250);
+CREATE FOREIGN TABLE ftprt2_p2 PARTITION OF fprt2 FOR VALUES FROM (250) TO (500)
+ SERVER loopback OPTIONS (table_name 'fprt2_p2', use_remote_estimate 'true');
+ANALYZE fprt2;
+ANALYZE fprt2_p1;
+ANALYZE fprt2_p2;
+-- inner join three tables
+EXPLAIN (COSTS OFF)
+SELECT t1.a,t2.b,t3.c FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) INNER JOIN fprt1 t3 ON (t2.b = t3.a) WHERE t1.a % 25 =0 ORDER BY 1,2,3;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------
+ Sort
+ Sort Key: t1.a, t3.c
+ -> Append
+ -> Foreign Scan
+ Relations: ((ftprt1_p1 t1_1) INNER JOIN (ftprt2_p1 t2_1)) INNER JOIN (ftprt1_p1 t3_1)
+ -> Foreign Scan
+ Relations: ((ftprt1_p2 t1_2) INNER JOIN (ftprt2_p2 t2_2)) INNER JOIN (ftprt1_p2 t3_2)
+(7 rows)
+
+SELECT t1.a,t2.b,t3.c FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) INNER JOIN fprt1 t3 ON (t2.b = t3.a) WHERE t1.a % 25 =0 ORDER BY 1,2,3;
+ a | b | c
+-----+-----+------
+ 0 | 0 | 0000
+ 150 | 150 | 0003
+ 250 | 250 | 0005
+ 400 | 400 | 0008
+(4 rows)
+
+-- left outer join + nullable clause
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.a,t2.b,t2.c FROM fprt1 t1 LEFT JOIN (SELECT * FROM fprt2 WHERE a < 10) t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a < 10 ORDER BY 1,2,3;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: t1.a, fprt2.b, fprt2.c
+ Relations: (public.ftprt1_p1 t1) LEFT JOIN (public.ftprt2_p1 fprt2)
+ Remote SQL: SELECT r5.a, r6.b, r6.c FROM (public.fprt1_p1 r5 LEFT JOIN public.fprt2_p1 r6 ON (((r5.a = r6.b)) AND ((r5.b = r6.a)) AND ((r6.a < 10)))) WHERE ((r5.a < 10)) ORDER BY r5.a ASC NULLS LAST, r6.b ASC NULLS LAST, r6.c ASC NULLS LAST
+(4 rows)
+
+SELECT t1.a,t2.b,t2.c FROM fprt1 t1 LEFT JOIN (SELECT * FROM fprt2 WHERE a < 10) t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a < 10 ORDER BY 1,2,3;
+ a | b | c
+---+---+------
+ 0 | 0 | 0000
+ 2 | |
+ 4 | |
+ 6 | 6 | 0000
+ 8 | |
+(5 rows)
+
+-- with whole-row reference; partitionwise join does not apply
+EXPLAIN (COSTS OFF)
+SELECT t1.wr, t2.wr FROM (SELECT t1 wr, a FROM fprt1 t1 WHERE t1.a % 25 = 0) t1 FULL JOIN (SELECT t2 wr, b FROM fprt2 t2 WHERE t2.b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY 1,2;
+ QUERY PLAN
+--------------------------------------------------------
+ Sort
+ Sort Key: ((t1.*)::fprt1), ((t2.*)::fprt2)
+ -> Hash Full Join
+ Hash Cond: (t1.a = t2.b)
+ -> Append
+ -> Foreign Scan on ftprt1_p1 t1_1
+ -> Foreign Scan on ftprt1_p2 t1_2
+ -> Hash
+ -> Append
+ -> Foreign Scan on ftprt2_p1 t2_1
+ -> Foreign Scan on ftprt2_p2 t2_2
+(11 rows)
+
+SELECT t1.wr, t2.wr FROM (SELECT t1 wr, a FROM fprt1 t1 WHERE t1.a % 25 = 0) t1 FULL JOIN (SELECT t2 wr, b FROM fprt2 t2 WHERE t2.b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY 1,2;
+ wr | wr
+----------------+----------------
+ (0,0,0000) | (0,0,0000)
+ (50,50,0001) |
+ (100,100,0002) |
+ (150,150,0003) | (150,150,0003)
+ (200,200,0004) |
+ (250,250,0005) | (250,250,0005)
+ (300,300,0006) |
+ (350,350,0007) |
+ (400,400,0008) | (400,400,0008)
+ (450,450,0009) |
+ | (75,75,0001)
+ | (225,225,0004)
+ | (325,325,0006)
+ | (475,475,0009)
+(14 rows)
+
+-- join with lateral reference
+EXPLAIN (COSTS OFF)
+SELECT t1.a,t1.b FROM fprt1 t1, LATERAL (SELECT t2.a, t2.b FROM fprt2 t2 WHERE t1.a = t2.b AND t1.b = t2.a) q WHERE t1.a%25 = 0 ORDER BY 1,2;
+ QUERY PLAN
+-----------------------------------------------------------------------
+ Sort
+ Sort Key: t1.a, t1.b
+ -> Append
+ -> Foreign Scan
+ Relations: (ftprt1_p1 t1_1) INNER JOIN (ftprt2_p1 t2_1)
+ -> Foreign Scan
+ Relations: (ftprt1_p2 t1_2) INNER JOIN (ftprt2_p2 t2_2)
+(7 rows)
+
+SELECT t1.a,t1.b FROM fprt1 t1, LATERAL (SELECT t2.a, t2.b FROM fprt2 t2 WHERE t1.a = t2.b AND t1.b = t2.a) q WHERE t1.a%25 = 0 ORDER BY 1,2;
+ a | b
+-----+-----
+ 0 | 0
+ 150 | 150
+ 250 | 250
+ 400 | 400
+(4 rows)
+
+-- with PHVs, partitionwise join selected but no join pushdown
+EXPLAIN (COSTS OFF)
+SELECT t1.a, t1.phv, t2.b, t2.phv FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE a % 25 = 0) t1 FULL JOIN (SELECT 't2_phv' phv, * FROM fprt2 WHERE b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY t1.a, t2.b;
+ QUERY PLAN
+-----------------------------------------------------------
+ Sort
+ Sort Key: fprt1.a, fprt2.b
+ -> Append
+ -> Hash Full Join
+ Hash Cond: (fprt1_1.a = fprt2_1.b)
+ -> Foreign Scan on ftprt1_p1 fprt1_1
+ -> Hash
+ -> Foreign Scan on ftprt2_p1 fprt2_1
+ -> Hash Full Join
+ Hash Cond: (fprt1_2.a = fprt2_2.b)
+ -> Foreign Scan on ftprt1_p2 fprt1_2
+ -> Hash
+ -> Foreign Scan on ftprt2_p2 fprt2_2
+(13 rows)
+
+SELECT t1.a, t1.phv, t2.b, t2.phv FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE a % 25 = 0) t1 FULL JOIN (SELECT 't2_phv' phv, * FROM fprt2 WHERE b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY t1.a, t2.b;
+ a | phv | b | phv
+-----+--------+-----+--------
+ 0 | t1_phv | 0 | t2_phv
+ 50 | t1_phv | |
+ 100 | t1_phv | |
+ 150 | t1_phv | 150 | t2_phv
+ 200 | t1_phv | |
+ 250 | t1_phv | 250 | t2_phv
+ 300 | t1_phv | |
+ 350 | t1_phv | |
+ 400 | t1_phv | 400 | t2_phv
+ 450 | t1_phv | |
+ | | 75 | t2_phv
+ | | 225 | t2_phv
+ | | 325 | t2_phv
+ | | 475 | t2_phv
+(14 rows)
+
+-- test FOR UPDATE; partitionwise join does not apply
+EXPLAIN (COSTS OFF)
+SELECT t1.a, t2.b FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) WHERE t1.a % 25 = 0 ORDER BY 1,2 FOR UPDATE OF t1;
+ QUERY PLAN
+--------------------------------------------------------------
+ LockRows
+ -> Sort
+ Sort Key: t1.a
+ -> Hash Join
+ Hash Cond: (t2.b = t1.a)
+ -> Append
+ -> Foreign Scan on ftprt2_p1 t2_1
+ -> Foreign Scan on ftprt2_p2 t2_2
+ -> Hash
+ -> Append
+ -> Foreign Scan on ftprt1_p1 t1_1
+ -> Foreign Scan on ftprt1_p2 t1_2
+(12 rows)
+
+SELECT t1.a, t2.b FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) WHERE t1.a % 25 = 0 ORDER BY 1,2 FOR UPDATE OF t1;
+ a | b
+-----+-----
+ 0 | 0
+ 150 | 150
+ 250 | 250
+ 400 | 400
+(4 rows)
+
+RESET enable_partitionwise_join;
+-- ===================================================================
+-- test partitionwise aggregates
+-- ===================================================================
+CREATE TABLE pagg_tab (a int, b int, c text) PARTITION BY RANGE(a);
+CREATE TABLE pagg_tab_p1 (LIKE pagg_tab);
+CREATE TABLE pagg_tab_p2 (LIKE pagg_tab);
+CREATE TABLE pagg_tab_p3 (LIKE pagg_tab);
+INSERT INTO pagg_tab_p1 SELECT i % 30, i % 50, to_char(i/30, 'FM0000') FROM generate_series(1, 3000) i WHERE (i % 30) < 10;
+INSERT INTO pagg_tab_p2 SELECT i % 30, i % 50, to_char(i/30, 'FM0000') FROM generate_series(1, 3000) i WHERE (i % 30) < 20 and (i % 30) >= 10;
+INSERT INTO pagg_tab_p3 SELECT i % 30, i % 50, to_char(i/30, 'FM0000') FROM generate_series(1, 3000) i WHERE (i % 30) < 30 and (i % 30) >= 20;
+-- Create foreign partitions
+CREATE FOREIGN TABLE fpagg_tab_p1 PARTITION OF pagg_tab FOR VALUES FROM (0) TO (10) SERVER loopback OPTIONS (table_name 'pagg_tab_p1');
+CREATE FOREIGN TABLE fpagg_tab_p2 PARTITION OF pagg_tab FOR VALUES FROM (10) TO (20) SERVER loopback OPTIONS (table_name 'pagg_tab_p2');
+CREATE FOREIGN TABLE fpagg_tab_p3 PARTITION OF pagg_tab FOR VALUES FROM (20) TO (30) SERVER loopback OPTIONS (table_name 'pagg_tab_p3');
+ANALYZE pagg_tab;
+ANALYZE fpagg_tab_p1;
+ANALYZE fpagg_tab_p2;
+ANALYZE fpagg_tab_p3;
+-- When GROUP BY clause matches with PARTITION KEY.
+-- Plan with partitionwise aggregates is disabled
+SET enable_partitionwise_aggregate TO false;
+EXPLAIN (COSTS OFF)
+SELECT a, sum(b), min(b), count(*) FROM pagg_tab GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
+ QUERY PLAN
+-----------------------------------------------------------
+ Sort
+ Sort Key: pagg_tab.a
+ -> HashAggregate
+ Group Key: pagg_tab.a
+ Filter: (avg(pagg_tab.b) < '22'::numeric)
+ -> Append
+ -> Foreign Scan on fpagg_tab_p1 pagg_tab_1
+ -> Foreign Scan on fpagg_tab_p2 pagg_tab_2
+ -> Foreign Scan on fpagg_tab_p3 pagg_tab_3
+(9 rows)
+
+-- Plan with partitionwise aggregates is enabled
+SET enable_partitionwise_aggregate TO true;
+EXPLAIN (COSTS OFF)
+SELECT a, sum(b), min(b), count(*) FROM pagg_tab GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
+ QUERY PLAN
+-----------------------------------------------------------------
+ Sort
+ Sort Key: pagg_tab.a
+ -> Append
+ -> Foreign Scan
+ Relations: Aggregate on (fpagg_tab_p1 pagg_tab)
+ -> Foreign Scan
+ Relations: Aggregate on (fpagg_tab_p2 pagg_tab_1)
+ -> Foreign Scan
+ Relations: Aggregate on (fpagg_tab_p3 pagg_tab_2)
+(9 rows)
+
+SELECT a, sum(b), min(b), count(*) FROM pagg_tab GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
+ a | sum | min | count
+----+------+-----+-------
+ 0 | 2000 | 0 | 100
+ 1 | 2100 | 1 | 100
+ 10 | 2000 | 0 | 100
+ 11 | 2100 | 1 | 100
+ 20 | 2000 | 0 | 100
+ 21 | 2100 | 1 | 100
+(6 rows)
+
+-- Check with whole-row reference
+-- Should have all the columns in the target list for the given relation
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
+ QUERY PLAN
+------------------------------------------------------------------------
+ Sort
+ Output: t1.a, (count(((t1.*)::pagg_tab)))
+ Sort Key: t1.a
+ -> Append
+ -> HashAggregate
+ Output: t1.a, count(((t1.*)::pagg_tab))
+ Group Key: t1.a
+ Filter: (avg(t1.b) < '22'::numeric)
+ -> Foreign Scan on public.fpagg_tab_p1 t1
+ Output: t1.a, t1.*, t1.b
+ Remote SQL: SELECT a, b, c FROM public.pagg_tab_p1
+ -> HashAggregate
+ Output: t1_1.a, count(((t1_1.*)::pagg_tab))
+ Group Key: t1_1.a
+ Filter: (avg(t1_1.b) < '22'::numeric)
+ -> Foreign Scan on public.fpagg_tab_p2 t1_1
+ Output: t1_1.a, t1_1.*, t1_1.b
+ Remote SQL: SELECT a, b, c FROM public.pagg_tab_p2
+ -> HashAggregate
+ Output: t1_2.a, count(((t1_2.*)::pagg_tab))
+ Group Key: t1_2.a
+ Filter: (avg(t1_2.b) < '22'::numeric)
+ -> Foreign Scan on public.fpagg_tab_p3 t1_2
+ Output: t1_2.a, t1_2.*, t1_2.b
+ Remote SQL: SELECT a, b, c FROM public.pagg_tab_p3
+(25 rows)
+
+SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
+ a | count
+----+-------
+ 0 | 100
+ 1 | 100
+ 10 | 100
+ 11 | 100
+ 20 | 100
+ 21 | 100
+(6 rows)
+
+-- When GROUP BY clause does not match with PARTITION KEY.
+EXPLAIN (COSTS OFF)
+SELECT b, avg(a), max(a), count(*) FROM pagg_tab GROUP BY b HAVING sum(a) < 700 ORDER BY 1;
+ QUERY PLAN
+-----------------------------------------------------------------
+ Sort
+ Sort Key: pagg_tab.b
+ -> Finalize HashAggregate
+ Group Key: pagg_tab.b
+ Filter: (sum(pagg_tab.a) < 700)
+ -> Append
+ -> Partial HashAggregate
+ Group Key: pagg_tab.b
+ -> Foreign Scan on fpagg_tab_p1 pagg_tab
+ -> Partial HashAggregate
+ Group Key: pagg_tab_1.b
+ -> Foreign Scan on fpagg_tab_p2 pagg_tab_1
+ -> Partial HashAggregate
+ Group Key: pagg_tab_2.b
+ -> Foreign Scan on fpagg_tab_p3 pagg_tab_2
+(15 rows)
+
+-- ===================================================================
+-- access rights and superuser
+-- ===================================================================
+-- Non-superuser cannot create a FDW without a password in the connstr
+CREATE ROLE regress_nosuper NOSUPERUSER;
+GRANT USAGE ON FOREIGN DATA WRAPPER postgres_fdw TO regress_nosuper;
+SET ROLE regress_nosuper;
+SHOW is_superuser;
+ is_superuser
+--------------
+ off
+(1 row)
+
+-- This will be OK, we can create the FDW
+DO $d$
+ BEGIN
+ EXECUTE $$CREATE SERVER loopback_nopw FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS (dbname '$$||current_database()||$$',
+ port '$$||current_setting('port')||$$'
+ )$$;
+ END;
+$d$;
+-- But creation of user mappings for non-superusers should fail
+CREATE USER MAPPING FOR public SERVER loopback_nopw;
+CREATE USER MAPPING FOR CURRENT_USER SERVER loopback_nopw;
+CREATE FOREIGN TABLE ft1_nopw (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text,
+ c4 timestamptz,
+ c5 timestamp,
+ c6 varchar(10),
+ c7 char(10) default 'ft1',
+ c8 user_enum
+) SERVER loopback_nopw OPTIONS (schema_name 'public', table_name 'ft1');
+SELECT 1 FROM ft1_nopw LIMIT 1;
+ERROR: password is required
+DETAIL: Non-superusers must provide a password in the user mapping.
+-- If we add a password to the connstr it'll fail, because we don't allow passwords
+-- in connstrs only in user mappings.
+DO $d$
+ BEGIN
+ EXECUTE $$ALTER SERVER loopback_nopw OPTIONS (ADD password 'dummypw')$$;
+ END;
+$d$;
+ERROR: invalid option "password"
+HINT: Valid options in this context are: service, passfile, channel_binding, connect_timeout, dbname, host, hostaddr, port, options, application_name, keepalives, keepalives_idle, keepalives_interval, keepalives_count, tcp_user_timeout, sslmode, sslcompression, sslcert, sslkey, sslrootcert, sslcrl, sslcrldir, sslsni, requirepeer, ssl_min_protocol_version, ssl_max_protocol_version, gssencmode, krbsrvname, gsslib, target_session_attrs, use_remote_estimate, fdw_startup_cost, fdw_tuple_cost, extensions, updatable, truncatable, fetch_size, batch_size, async_capable, keep_connections
+CONTEXT: SQL statement "ALTER SERVER loopback_nopw OPTIONS (ADD password 'dummypw')"
+PL/pgSQL function inline_code_block line 3 at EXECUTE
+-- If we add a password for our user mapping instead, we should get a different
+-- error because the password wasn't actually *used* when we run with trust auth.
+--
+-- This won't work with installcheck, but neither will most of the FDW checks.
+ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password 'dummypw');
+SELECT 1 FROM ft1_nopw LIMIT 1;
+ERROR: password is required
+DETAIL: Non-superuser cannot connect if the server does not request a password.
+HINT: Target server's authentication method must be changed or password_required=false set in the user mapping attributes.
+-- Unpriv user cannot make the mapping passwordless
+ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password_required 'false');
+ERROR: password_required=false is superuser-only
+HINT: User mappings with the password_required option set to false may only be created or modified by the superuser
+SELECT 1 FROM ft1_nopw LIMIT 1;
+ERROR: password is required
+DETAIL: Non-superuser cannot connect if the server does not request a password.
+HINT: Target server's authentication method must be changed or password_required=false set in the user mapping attributes.
+RESET ROLE;
+-- But the superuser can
+ALTER USER MAPPING FOR regress_nosuper SERVER loopback_nopw OPTIONS (ADD password_required 'false');
+SET ROLE regress_nosuper;
+-- Should finally work now
+SELECT 1 FROM ft1_nopw LIMIT 1;
+ ?column?
+----------
+ 1
+(1 row)
+
+-- unpriv user also cannot set sslcert / sslkey on the user mapping
+-- first set password_required so we see the right error messages
+ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (SET password_required 'true');
+ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD sslcert 'foo.crt');
+ERROR: sslcert and sslkey are superuser-only
+HINT: User mappings with the sslcert or sslkey options set may only be created or modified by the superuser
+ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD sslkey 'foo.key');
+ERROR: sslcert and sslkey are superuser-only
+HINT: User mappings with the sslcert or sslkey options set may only be created or modified by the superuser
+-- We're done with the role named after a specific user and need to check the
+-- changes to the public mapping.
+DROP USER MAPPING FOR CURRENT_USER SERVER loopback_nopw;
+-- This will fail again as it'll resolve the user mapping for public, which
+-- lacks password_required=false
+SELECT 1 FROM ft1_nopw LIMIT 1;
+ERROR: password is required
+DETAIL: Non-superusers must provide a password in the user mapping.
+RESET ROLE;
+-- The user mapping for public is passwordless and lacks the password_required=false
+-- mapping option, but will work because the current user is a superuser.
+SELECT 1 FROM ft1_nopw LIMIT 1;
+ ?column?
+----------
+ 1
+(1 row)
+
+-- cleanup
+DROP USER MAPPING FOR public SERVER loopback_nopw;
+DROP OWNED BY regress_nosuper;
+DROP ROLE regress_nosuper;
+-- Clean-up
+RESET enable_partitionwise_aggregate;
+-- Two-phase transactions are not supported.
+BEGIN;
+SELECT count(*) FROM ft1;
+ count
+-------
+ 822
+(1 row)
+
+-- error here
+PREPARE TRANSACTION 'fdw_tpc';
+ERROR: cannot PREPARE a transaction that has operated on postgres_fdw foreign tables
+ROLLBACK;
+WARNING: there is no transaction in progress
+-- ===================================================================
+-- reestablish new connection
+-- ===================================================================
+-- Change application_name of remote connection to special one
+-- so that we can easily terminate the connection later.
+ALTER SERVER loopback OPTIONS (application_name 'fdw_retry_check');
+-- If debug_discard_caches is active, it results in
+-- dropping remote connections after every transaction, making it
+-- impossible to test termination meaningfully. So turn that off
+-- for this test.
+SET debug_discard_caches = 0;
+-- Make sure we have a remote connection.
+SELECT 1 FROM ft1 LIMIT 1;
+ ?column?
+----------
+ 1
+(1 row)
+
+-- Terminate the remote connection and wait for the termination to complete.
+SELECT pg_terminate_backend(pid, 180000) FROM pg_stat_activity
+ WHERE application_name = 'fdw_retry_check';
+ pg_terminate_backend
+----------------------
+ t
+(1 row)
+
+-- This query should detect the broken connection when starting new remote
+-- transaction, reestablish new connection, and then succeed.
+BEGIN;
+SELECT 1 FROM ft1 LIMIT 1;
+ ?column?
+----------
+ 1
+(1 row)
+
+-- If we detect the broken connection when starting a new remote
+-- subtransaction, we should fail instead of establishing a new connection.
+-- Terminate the remote connection and wait for the termination to complete.
+SELECT pg_terminate_backend(pid, 180000) FROM pg_stat_activity
+ WHERE application_name = 'fdw_retry_check';
+ pg_terminate_backend
+----------------------
+ t
+(1 row)
+
+SAVEPOINT s;
+-- The text of the error might vary across platforms, so only show SQLSTATE.
+\set VERBOSITY sqlstate
+SELECT 1 FROM ft1 LIMIT 1; -- should fail
+ERROR: 08006
+\set VERBOSITY default
+COMMIT;
+RESET debug_discard_caches;
+-- =============================================================================
+-- test connection invalidation cases and postgres_fdw_get_connections function
+-- =============================================================================
+-- Let's ensure to close all the existing cached connections.
+SELECT 1 FROM postgres_fdw_disconnect_all();
+ ?column?
+----------
+ 1
+(1 row)
+
+-- No cached connections, so no records should be output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+ server_name
+-------------
+(0 rows)
+
+-- This test case is for closing the connection in pgfdw_xact_callback
+BEGIN;
+-- Connection xact depth becomes 1 i.e. the connection is in midst of the xact.
+SELECT 1 FROM ft1 LIMIT 1;
+ ?column?
+----------
+ 1
+(1 row)
+
+SELECT 1 FROM ft7 LIMIT 1;
+ ?column?
+----------
+ 1
+(1 row)
+
+-- List all the existing cached connections. loopback and loopback3 should be
+-- output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+ server_name
+-------------
+ loopback
+ loopback3
+(2 rows)
+
+-- Connections are not closed at the end of the alter and drop statements.
+-- That's because the connections are in midst of this xact,
+-- they are just marked as invalid in pgfdw_inval_callback.
+ALTER SERVER loopback OPTIONS (ADD use_remote_estimate 'off');
+DROP SERVER loopback3 CASCADE;
+NOTICE: drop cascades to 2 other objects
+DETAIL: drop cascades to user mapping for public on server loopback3
+drop cascades to foreign table ft7
+-- List all the existing cached connections. loopback and loopback3
+-- should be output as invalid connections. Also the server name for
+-- loopback3 should be NULL because the server was dropped.
+SELECT * FROM postgres_fdw_get_connections() ORDER BY 1;
+ server_name | valid
+-------------+-------
+ loopback | f
+ | f
+(2 rows)
+
+-- The invalid connections get closed in pgfdw_xact_callback during commit.
+COMMIT;
+-- All cached connections were closed while committing above xact, so no
+-- records should be output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+ server_name
+-------------
+(0 rows)
+
+-- =======================================================================
+-- test postgres_fdw_disconnect and postgres_fdw_disconnect_all functions
+-- =======================================================================
+BEGIN;
+-- Ensure to cache loopback connection.
+SELECT 1 FROM ft1 LIMIT 1;
+ ?column?
+----------
+ 1
+(1 row)
+
+-- Ensure to cache loopback2 connection.
+SELECT 1 FROM ft6 LIMIT 1;
+ ?column?
+----------
+ 1
+(1 row)
+
+-- List all the existing cached connections. loopback and loopback2 should be
+-- output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+ server_name
+-------------
+ loopback
+ loopback2
+(2 rows)
+
+-- Issue a warning and return false as loopback connection is still in use and
+-- can not be closed.
+SELECT postgres_fdw_disconnect('loopback');
+WARNING: cannot close connection for server "loopback" because it is still in use
+ postgres_fdw_disconnect
+-------------------------
+ f
+(1 row)
+
+-- List all the existing cached connections. loopback and loopback2 should be
+-- output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+ server_name
+-------------
+ loopback
+ loopback2
+(2 rows)
+
+-- Return false as connections are still in use, warnings are issued.
+-- But disable warnings temporarily because the order of them is not stable.
+SET client_min_messages = 'ERROR';
+SELECT postgres_fdw_disconnect_all();
+ postgres_fdw_disconnect_all
+-----------------------------
+ f
+(1 row)
+
+RESET client_min_messages;
+COMMIT;
+-- Ensure that loopback2 connection is closed.
+SELECT 1 FROM postgres_fdw_disconnect('loopback2');
+ ?column?
+----------
+ 1
+(1 row)
+
+SELECT server_name FROM postgres_fdw_get_connections() WHERE server_name = 'loopback2';
+ server_name
+-------------
+(0 rows)
+
+-- Return false as loopback2 connection is closed already.
+SELECT postgres_fdw_disconnect('loopback2');
+ postgres_fdw_disconnect
+-------------------------
+ f
+(1 row)
+
+-- Return an error as there is no foreign server with given name.
+SELECT postgres_fdw_disconnect('unknownserver');
+ERROR: server "unknownserver" does not exist
+-- Let's ensure to close all the existing cached connections.
+SELECT 1 FROM postgres_fdw_disconnect_all();
+ ?column?
+----------
+ 1
+(1 row)
+
+-- No cached connections, so no records should be output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+ server_name
+-------------
+(0 rows)
+
+-- =============================================================================
+-- test case for having multiple cached connections for a foreign server
+-- =============================================================================
+CREATE ROLE regress_multi_conn_user1 SUPERUSER;
+CREATE ROLE regress_multi_conn_user2 SUPERUSER;
+CREATE USER MAPPING FOR regress_multi_conn_user1 SERVER loopback;
+CREATE USER MAPPING FOR regress_multi_conn_user2 SERVER loopback;
+BEGIN;
+-- Will cache loopback connection with user mapping for regress_multi_conn_user1
+SET ROLE regress_multi_conn_user1;
+SELECT 1 FROM ft1 LIMIT 1;
+ ?column?
+----------
+ 1
+(1 row)
+
+RESET ROLE;
+-- Will cache loopback connection with user mapping for regress_multi_conn_user2
+SET ROLE regress_multi_conn_user2;
+SELECT 1 FROM ft1 LIMIT 1;
+ ?column?
+----------
+ 1
+(1 row)
+
+RESET ROLE;
+-- Should output two connections for loopback server
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+ server_name
+-------------
+ loopback
+ loopback
+(2 rows)
+
+COMMIT;
+-- Let's ensure to close all the existing cached connections.
+SELECT 1 FROM postgres_fdw_disconnect_all();
+ ?column?
+----------
+ 1
+(1 row)
+
+-- No cached connections, so no records should be output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+ server_name
+-------------
+(0 rows)
+
+-- Clean up
+DROP USER MAPPING FOR regress_multi_conn_user1 SERVER loopback;
+DROP USER MAPPING FOR regress_multi_conn_user2 SERVER loopback;
+DROP ROLE regress_multi_conn_user1;
+DROP ROLE regress_multi_conn_user2;
+-- ===================================================================
+-- Test foreign server level option keep_connections
+-- ===================================================================
+-- By default, the connections associated with foreign server are cached i.e.
+-- keep_connections option is on. Set it to off.
+ALTER SERVER loopback OPTIONS (keep_connections 'off');
+-- connection to loopback server is closed at the end of xact
+-- as keep_connections was set to off.
+SELECT 1 FROM ft1 LIMIT 1;
+ ?column?
+----------
+ 1
+(1 row)
+
+-- No cached connections, so no records should be output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+ server_name
+-------------
+(0 rows)
+
+ALTER SERVER loopback OPTIONS (SET keep_connections 'on');
+-- ===================================================================
+-- batch insert
+-- ===================================================================
+BEGIN;
+CREATE SERVER batch10 FOREIGN DATA WRAPPER postgres_fdw OPTIONS( batch_size '10' );
+SELECT count(*)
+FROM pg_foreign_server
+WHERE srvname = 'batch10'
+AND srvoptions @> array['batch_size=10'];
+ count
+-------
+ 1
+(1 row)
+
+ALTER SERVER batch10 OPTIONS( SET batch_size '20' );
+SELECT count(*)
+FROM pg_foreign_server
+WHERE srvname = 'batch10'
+AND srvoptions @> array['batch_size=10'];
+ count
+-------
+ 0
+(1 row)
+
+SELECT count(*)
+FROM pg_foreign_server
+WHERE srvname = 'batch10'
+AND srvoptions @> array['batch_size=20'];
+ count
+-------
+ 1
+(1 row)
+
+CREATE FOREIGN TABLE table30 ( x int ) SERVER batch10 OPTIONS ( batch_size '30' );
+SELECT COUNT(*)
+FROM pg_foreign_table
+WHERE ftrelid = 'table30'::regclass
+AND ftoptions @> array['batch_size=30'];
+ count
+-------
+ 1
+(1 row)
+
+ALTER FOREIGN TABLE table30 OPTIONS ( SET batch_size '40');
+SELECT COUNT(*)
+FROM pg_foreign_table
+WHERE ftrelid = 'table30'::regclass
+AND ftoptions @> array['batch_size=30'];
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(*)
+FROM pg_foreign_table
+WHERE ftrelid = 'table30'::regclass
+AND ftoptions @> array['batch_size=40'];
+ count
+-------
+ 1
+(1 row)
+
+ROLLBACK;
+CREATE TABLE batch_table ( x int );
+CREATE FOREIGN TABLE ftable ( x int ) SERVER loopback OPTIONS ( table_name 'batch_table', batch_size '10' );
+EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO ftable SELECT * FROM generate_series(1, 10) i;
+ QUERY PLAN
+-------------------------------------------------------------
+ Insert on public.ftable
+ Remote SQL: INSERT INTO public.batch_table(x) VALUES ($1)
+ Batch Size: 10
+ -> Function Scan on pg_catalog.generate_series i
+ Output: i.i
+ Function Call: generate_series(1, 10)
+(6 rows)
+
+INSERT INTO ftable SELECT * FROM generate_series(1, 10) i;
+INSERT INTO ftable SELECT * FROM generate_series(11, 31) i;
+INSERT INTO ftable VALUES (32);
+INSERT INTO ftable VALUES (33), (34);
+SELECT COUNT(*) FROM ftable;
+ count
+-------
+ 34
+(1 row)
+
+TRUNCATE batch_table;
+DROP FOREIGN TABLE ftable;
+-- try if large batches exceed max number of bind parameters
+CREATE FOREIGN TABLE ftable ( x int ) SERVER loopback OPTIONS ( table_name 'batch_table', batch_size '100000' );
+INSERT INTO ftable SELECT * FROM generate_series(1, 70000) i;
+SELECT COUNT(*) FROM ftable;
+ count
+-------
+ 70000
+(1 row)
+
+TRUNCATE batch_table;
+DROP FOREIGN TABLE ftable;
+-- Disable batch insert
+CREATE FOREIGN TABLE ftable ( x int ) SERVER loopback OPTIONS ( table_name 'batch_table', batch_size '1' );
+EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO ftable VALUES (1), (2);
+ QUERY PLAN
+-------------------------------------------------------------
+ Insert on public.ftable
+ Remote SQL: INSERT INTO public.batch_table(x) VALUES ($1)
+ Batch Size: 1
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".column1
+(5 rows)
+
+INSERT INTO ftable VALUES (1), (2);
+SELECT COUNT(*) FROM ftable;
+ count
+-------
+ 2
+(1 row)
+
+-- Disable batch inserting into foreign tables with BEFORE ROW INSERT triggers
+-- even if the batch_size option is enabled.
+ALTER FOREIGN TABLE ftable OPTIONS ( SET batch_size '10' );
+CREATE TRIGGER trig_row_before BEFORE INSERT ON ftable
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO ftable VALUES (3), (4);
+ QUERY PLAN
+-------------------------------------------------------------
+ Insert on public.ftable
+ Remote SQL: INSERT INTO public.batch_table(x) VALUES ($1)
+ Batch Size: 1
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".column1
+(5 rows)
+
+INSERT INTO ftable VALUES (3), (4);
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW INSERT ON ftable
+NOTICE: NEW: (3)
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW INSERT ON ftable
+NOTICE: NEW: (4)
+SELECT COUNT(*) FROM ftable;
+ count
+-------
+ 4
+(1 row)
+
+-- Clean up
+DROP TRIGGER trig_row_before ON ftable;
+DROP FOREIGN TABLE ftable;
+DROP TABLE batch_table;
+-- Use partitioning
+CREATE TABLE batch_table ( x int ) PARTITION BY HASH (x);
+CREATE TABLE batch_table_p0 (LIKE batch_table);
+CREATE FOREIGN TABLE batch_table_p0f
+ PARTITION OF batch_table
+ FOR VALUES WITH (MODULUS 3, REMAINDER 0)
+ SERVER loopback
+ OPTIONS (table_name 'batch_table_p0', batch_size '10');
+CREATE TABLE batch_table_p1 (LIKE batch_table);
+CREATE FOREIGN TABLE batch_table_p1f
+ PARTITION OF batch_table
+ FOR VALUES WITH (MODULUS 3, REMAINDER 1)
+ SERVER loopback
+ OPTIONS (table_name 'batch_table_p1', batch_size '1');
+CREATE TABLE batch_table_p2
+ PARTITION OF batch_table
+ FOR VALUES WITH (MODULUS 3, REMAINDER 2);
+INSERT INTO batch_table SELECT * FROM generate_series(1, 66) i;
+SELECT COUNT(*) FROM batch_table;
+ count
+-------
+ 66
+(1 row)
+
+-- Check that enabling batched inserts doesn't interfere with cross-partition
+-- updates
+CREATE TABLE batch_cp_upd_test (a int) PARTITION BY LIST (a);
+CREATE TABLE batch_cp_upd_test1 (LIKE batch_cp_upd_test);
+CREATE FOREIGN TABLE batch_cp_upd_test1_f
+ PARTITION OF batch_cp_upd_test
+ FOR VALUES IN (1)
+ SERVER loopback
+ OPTIONS (table_name 'batch_cp_upd_test1', batch_size '10');
+CREATE TABLE batch_cp_up_test1 PARTITION OF batch_cp_upd_test
+ FOR VALUES IN (2);
+INSERT INTO batch_cp_upd_test VALUES (1), (2);
+-- The following moves a row from the local partition to the foreign one
+UPDATE batch_cp_upd_test t SET a = 1 FROM (VALUES (1), (2)) s(a) WHERE t.a = s.a;
+ERROR: cannot route tuples into foreign table to be updated "batch_cp_upd_test1_f"
+SELECT tableoid::regclass, * FROM batch_cp_upd_test;
+ tableoid | a
+----------------------+---
+ batch_cp_upd_test1_f | 1
+ batch_cp_up_test1 | 2
+(2 rows)
+
+-- Clean up
+DROP TABLE batch_table, batch_cp_upd_test, batch_table_p0, batch_table_p1 CASCADE;
+-- Use partitioning
+ALTER SERVER loopback OPTIONS (ADD batch_size '10');
+CREATE TABLE batch_table ( x int, field1 text, field2 text) PARTITION BY HASH (x);
+CREATE TABLE batch_table_p0 (LIKE batch_table);
+ALTER TABLE batch_table_p0 ADD CONSTRAINT p0_pkey PRIMARY KEY (x);
+CREATE FOREIGN TABLE batch_table_p0f
+ PARTITION OF batch_table
+ FOR VALUES WITH (MODULUS 2, REMAINDER 0)
+ SERVER loopback
+ OPTIONS (table_name 'batch_table_p0');
+CREATE TABLE batch_table_p1 (LIKE batch_table);
+ALTER TABLE batch_table_p1 ADD CONSTRAINT p1_pkey PRIMARY KEY (x);
+CREATE FOREIGN TABLE batch_table_p1f
+ PARTITION OF batch_table
+ FOR VALUES WITH (MODULUS 2, REMAINDER 1)
+ SERVER loopback
+ OPTIONS (table_name 'batch_table_p1');
+INSERT INTO batch_table SELECT i, 'test'||i, 'test'|| i FROM generate_series(1, 50) i;
+SELECT COUNT(*) FROM batch_table;
+ count
+-------
+ 50
+(1 row)
+
+SELECT * FROM batch_table ORDER BY x;
+ x | field1 | field2
+----+--------+--------
+ 1 | test1 | test1
+ 2 | test2 | test2
+ 3 | test3 | test3
+ 4 | test4 | test4
+ 5 | test5 | test5
+ 6 | test6 | test6
+ 7 | test7 | test7
+ 8 | test8 | test8
+ 9 | test9 | test9
+ 10 | test10 | test10
+ 11 | test11 | test11
+ 12 | test12 | test12
+ 13 | test13 | test13
+ 14 | test14 | test14
+ 15 | test15 | test15
+ 16 | test16 | test16
+ 17 | test17 | test17
+ 18 | test18 | test18
+ 19 | test19 | test19
+ 20 | test20 | test20
+ 21 | test21 | test21
+ 22 | test22 | test22
+ 23 | test23 | test23
+ 24 | test24 | test24
+ 25 | test25 | test25
+ 26 | test26 | test26
+ 27 | test27 | test27
+ 28 | test28 | test28
+ 29 | test29 | test29
+ 30 | test30 | test30
+ 31 | test31 | test31
+ 32 | test32 | test32
+ 33 | test33 | test33
+ 34 | test34 | test34
+ 35 | test35 | test35
+ 36 | test36 | test36
+ 37 | test37 | test37
+ 38 | test38 | test38
+ 39 | test39 | test39
+ 40 | test40 | test40
+ 41 | test41 | test41
+ 42 | test42 | test42
+ 43 | test43 | test43
+ 44 | test44 | test44
+ 45 | test45 | test45
+ 46 | test46 | test46
+ 47 | test47 | test47
+ 48 | test48 | test48
+ 49 | test49 | test49
+ 50 | test50 | test50
+(50 rows)
+
+ALTER SERVER loopback OPTIONS (DROP batch_size);
+-- ===================================================================
+-- test asynchronous execution
+-- ===================================================================
+ALTER SERVER loopback OPTIONS (DROP extensions);
+ALTER SERVER loopback OPTIONS (ADD async_capable 'true');
+ALTER SERVER loopback2 OPTIONS (ADD async_capable 'true');
+CREATE TABLE async_pt (a int, b int, c text) PARTITION BY RANGE (a);
+CREATE TABLE base_tbl1 (a int, b int, c text);
+CREATE TABLE base_tbl2 (a int, b int, c text);
+CREATE FOREIGN TABLE async_p1 PARTITION OF async_pt FOR VALUES FROM (1000) TO (2000)
+ SERVER loopback OPTIONS (table_name 'base_tbl1');
+CREATE FOREIGN TABLE async_p2 PARTITION OF async_pt FOR VALUES FROM (2000) TO (3000)
+ SERVER loopback2 OPTIONS (table_name 'base_tbl2');
+INSERT INTO async_p1 SELECT 1000 + i, i, to_char(i, 'FM0000') FROM generate_series(0, 999, 5) i;
+INSERT INTO async_p2 SELECT 2000 + i, i, to_char(i, 'FM0000') FROM generate_series(0, 999, 5) i;
+ANALYZE async_pt;
+-- simple queries
+CREATE TABLE result_tbl (a int, b int, c text);
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b % 100 = 0;
+ QUERY PLAN
+----------------------------------------------------------------------------------------
+ Insert on public.result_tbl
+ -> Append
+ -> Async Foreign Scan on public.async_p1 async_pt_1
+ Output: async_pt_1.a, async_pt_1.b, async_pt_1.c
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1 WHERE (((b % 100) = 0))
+ -> Async Foreign Scan on public.async_p2 async_pt_2
+ Output: async_pt_2.a, async_pt_2.b, async_pt_2.c
+ Remote SQL: SELECT a, b, c FROM public.base_tbl2 WHERE (((b % 100) = 0))
+(8 rows)
+
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b % 100 = 0;
+SELECT * FROM result_tbl ORDER BY a;
+ a | b | c
+------+-----+------
+ 1000 | 0 | 0000
+ 1100 | 100 | 0100
+ 1200 | 200 | 0200
+ 1300 | 300 | 0300
+ 1400 | 400 | 0400
+ 1500 | 500 | 0500
+ 1600 | 600 | 0600
+ 1700 | 700 | 0700
+ 1800 | 800 | 0800
+ 1900 | 900 | 0900
+ 2000 | 0 | 0000
+ 2100 | 100 | 0100
+ 2200 | 200 | 0200
+ 2300 | 300 | 0300
+ 2400 | 400 | 0400
+ 2500 | 500 | 0500
+ 2600 | 600 | 0600
+ 2700 | 700 | 0700
+ 2800 | 800 | 0800
+ 2900 | 900 | 0900
+(20 rows)
+
+DELETE FROM result_tbl;
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b === 505;
+ QUERY PLAN
+----------------------------------------------------------------
+ Insert on public.result_tbl
+ -> Append
+ -> Async Foreign Scan on public.async_p1 async_pt_1
+ Output: async_pt_1.a, async_pt_1.b, async_pt_1.c
+ Filter: (async_pt_1.b === 505)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1
+ -> Async Foreign Scan on public.async_p2 async_pt_2
+ Output: async_pt_2.a, async_pt_2.b, async_pt_2.c
+ Filter: (async_pt_2.b === 505)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl2
+(10 rows)
+
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b === 505;
+SELECT * FROM result_tbl ORDER BY a;
+ a | b | c
+------+-----+------
+ 1505 | 505 | 0505
+ 2505 | 505 | 0505
+(2 rows)
+
+DELETE FROM result_tbl;
+-- Check case where multiple partitions use the same connection
+CREATE TABLE base_tbl3 (a int, b int, c text);
+CREATE FOREIGN TABLE async_p3 PARTITION OF async_pt FOR VALUES FROM (3000) TO (4000)
+ SERVER loopback2 OPTIONS (table_name 'base_tbl3');
+INSERT INTO async_p3 SELECT 3000 + i, i, to_char(i, 'FM0000') FROM generate_series(0, 999, 5) i;
+ANALYZE async_pt;
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b === 505;
+ QUERY PLAN
+----------------------------------------------------------------
+ Insert on public.result_tbl
+ -> Append
+ -> Async Foreign Scan on public.async_p1 async_pt_1
+ Output: async_pt_1.a, async_pt_1.b, async_pt_1.c
+ Filter: (async_pt_1.b === 505)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1
+ -> Async Foreign Scan on public.async_p2 async_pt_2
+ Output: async_pt_2.a, async_pt_2.b, async_pt_2.c
+ Filter: (async_pt_2.b === 505)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl2
+ -> Async Foreign Scan on public.async_p3 async_pt_3
+ Output: async_pt_3.a, async_pt_3.b, async_pt_3.c
+ Filter: (async_pt_3.b === 505)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl3
+(14 rows)
+
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b === 505;
+SELECT * FROM result_tbl ORDER BY a;
+ a | b | c
+------+-----+------
+ 1505 | 505 | 0505
+ 2505 | 505 | 0505
+ 3505 | 505 | 0505
+(3 rows)
+
+DELETE FROM result_tbl;
+DROP FOREIGN TABLE async_p3;
+DROP TABLE base_tbl3;
+-- Check case where the partitioned table has local/remote partitions
+CREATE TABLE async_p3 PARTITION OF async_pt FOR VALUES FROM (3000) TO (4000);
+INSERT INTO async_p3 SELECT 3000 + i, i, to_char(i, 'FM0000') FROM generate_series(0, 999, 5) i;
+ANALYZE async_pt;
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b === 505;
+ QUERY PLAN
+----------------------------------------------------------------
+ Insert on public.result_tbl
+ -> Append
+ -> Async Foreign Scan on public.async_p1 async_pt_1
+ Output: async_pt_1.a, async_pt_1.b, async_pt_1.c
+ Filter: (async_pt_1.b === 505)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1
+ -> Async Foreign Scan on public.async_p2 async_pt_2
+ Output: async_pt_2.a, async_pt_2.b, async_pt_2.c
+ Filter: (async_pt_2.b === 505)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl2
+ -> Seq Scan on public.async_p3 async_pt_3
+ Output: async_pt_3.a, async_pt_3.b, async_pt_3.c
+ Filter: (async_pt_3.b === 505)
+(13 rows)
+
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b === 505;
+SELECT * FROM result_tbl ORDER BY a;
+ a | b | c
+------+-----+------
+ 1505 | 505 | 0505
+ 2505 | 505 | 0505
+ 3505 | 505 | 0505
+(3 rows)
+
+DELETE FROM result_tbl;
+-- partitionwise joins
+SET enable_partitionwise_join TO true;
+CREATE TABLE join_tbl (a1 int, b1 int, c1 text, a2 int, b2 int, c2 text);
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO join_tbl SELECT * FROM async_pt t1, async_pt t2 WHERE t1.a = t2.a AND t1.b = t2.b AND t1.b % 100 = 0;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Insert on public.join_tbl
+ -> Append
+ -> Async Foreign Scan
+ Output: t1_1.a, t1_1.b, t1_1.c, t2_1.a, t2_1.b, t2_1.c
+ Relations: (public.async_p1 t1_1) INNER JOIN (public.async_p1 t2_1)
+ Remote SQL: SELECT r5.a, r5.b, r5.c, r8.a, r8.b, r8.c FROM (public.base_tbl1 r5 INNER JOIN public.base_tbl1 r8 ON (((r5.a = r8.a)) AND ((r5.b = r8.b)) AND (((r5.b % 100) = 0))))
+ -> Async Foreign Scan
+ Output: t1_2.a, t1_2.b, t1_2.c, t2_2.a, t2_2.b, t2_2.c
+ Relations: (public.async_p2 t1_2) INNER JOIN (public.async_p2 t2_2)
+ Remote SQL: SELECT r6.a, r6.b, r6.c, r9.a, r9.b, r9.c FROM (public.base_tbl2 r6 INNER JOIN public.base_tbl2 r9 ON (((r6.a = r9.a)) AND ((r6.b = r9.b)) AND (((r6.b % 100) = 0))))
+ -> Hash Join
+ Output: t1_3.a, t1_3.b, t1_3.c, t2_3.a, t2_3.b, t2_3.c
+ Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.b = t1_3.b))
+ -> Seq Scan on public.async_p3 t2_3
+ Output: t2_3.a, t2_3.b, t2_3.c
+ -> Hash
+ Output: t1_3.a, t1_3.b, t1_3.c
+ -> Seq Scan on public.async_p3 t1_3
+ Output: t1_3.a, t1_3.b, t1_3.c
+ Filter: ((t1_3.b % 100) = 0)
+(20 rows)
+
+INSERT INTO join_tbl SELECT * FROM async_pt t1, async_pt t2 WHERE t1.a = t2.a AND t1.b = t2.b AND t1.b % 100 = 0;
+SELECT * FROM join_tbl ORDER BY a1;
+ a1 | b1 | c1 | a2 | b2 | c2
+------+-----+------+------+-----+------
+ 1000 | 0 | 0000 | 1000 | 0 | 0000
+ 1100 | 100 | 0100 | 1100 | 100 | 0100
+ 1200 | 200 | 0200 | 1200 | 200 | 0200
+ 1300 | 300 | 0300 | 1300 | 300 | 0300
+ 1400 | 400 | 0400 | 1400 | 400 | 0400
+ 1500 | 500 | 0500 | 1500 | 500 | 0500
+ 1600 | 600 | 0600 | 1600 | 600 | 0600
+ 1700 | 700 | 0700 | 1700 | 700 | 0700
+ 1800 | 800 | 0800 | 1800 | 800 | 0800
+ 1900 | 900 | 0900 | 1900 | 900 | 0900
+ 2000 | 0 | 0000 | 2000 | 0 | 0000
+ 2100 | 100 | 0100 | 2100 | 100 | 0100
+ 2200 | 200 | 0200 | 2200 | 200 | 0200
+ 2300 | 300 | 0300 | 2300 | 300 | 0300
+ 2400 | 400 | 0400 | 2400 | 400 | 0400
+ 2500 | 500 | 0500 | 2500 | 500 | 0500
+ 2600 | 600 | 0600 | 2600 | 600 | 0600
+ 2700 | 700 | 0700 | 2700 | 700 | 0700
+ 2800 | 800 | 0800 | 2800 | 800 | 0800
+ 2900 | 900 | 0900 | 2900 | 900 | 0900
+ 3000 | 0 | 0000 | 3000 | 0 | 0000
+ 3100 | 100 | 0100 | 3100 | 100 | 0100
+ 3200 | 200 | 0200 | 3200 | 200 | 0200
+ 3300 | 300 | 0300 | 3300 | 300 | 0300
+ 3400 | 400 | 0400 | 3400 | 400 | 0400
+ 3500 | 500 | 0500 | 3500 | 500 | 0500
+ 3600 | 600 | 0600 | 3600 | 600 | 0600
+ 3700 | 700 | 0700 | 3700 | 700 | 0700
+ 3800 | 800 | 0800 | 3800 | 800 | 0800
+ 3900 | 900 | 0900 | 3900 | 900 | 0900
+(30 rows)
+
+DELETE FROM join_tbl;
+RESET enable_partitionwise_join;
+-- Test rescan of an async Append node with do_exec_prune=false
+SET enable_hashjoin TO false;
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO join_tbl SELECT * FROM async_p1 t1, async_pt t2 WHERE t1.a = t2.a AND t1.b = t2.b AND t1.b % 100 = 0;
+ QUERY PLAN
+----------------------------------------------------------------------------------------
+ Insert on public.join_tbl
+ -> Nested Loop
+ Output: t1.a, t1.b, t1.c, t2.a, t2.b, t2.c
+ Join Filter: ((t1.a = t2.a) AND (t1.b = t2.b))
+ -> Foreign Scan on public.async_p1 t1
+ Output: t1.a, t1.b, t1.c
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1 WHERE (((b % 100) = 0))
+ -> Append
+ -> Async Foreign Scan on public.async_p1 t2_1
+ Output: t2_1.a, t2_1.b, t2_1.c
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1
+ -> Async Foreign Scan on public.async_p2 t2_2
+ Output: t2_2.a, t2_2.b, t2_2.c
+ Remote SQL: SELECT a, b, c FROM public.base_tbl2
+ -> Seq Scan on public.async_p3 t2_3
+ Output: t2_3.a, t2_3.b, t2_3.c
+(16 rows)
+
+INSERT INTO join_tbl SELECT * FROM async_p1 t1, async_pt t2 WHERE t1.a = t2.a AND t1.b = t2.b AND t1.b % 100 = 0;
+SELECT * FROM join_tbl ORDER BY a1;
+ a1 | b1 | c1 | a2 | b2 | c2
+------+-----+------+------+-----+------
+ 1000 | 0 | 0000 | 1000 | 0 | 0000
+ 1100 | 100 | 0100 | 1100 | 100 | 0100
+ 1200 | 200 | 0200 | 1200 | 200 | 0200
+ 1300 | 300 | 0300 | 1300 | 300 | 0300
+ 1400 | 400 | 0400 | 1400 | 400 | 0400
+ 1500 | 500 | 0500 | 1500 | 500 | 0500
+ 1600 | 600 | 0600 | 1600 | 600 | 0600
+ 1700 | 700 | 0700 | 1700 | 700 | 0700
+ 1800 | 800 | 0800 | 1800 | 800 | 0800
+ 1900 | 900 | 0900 | 1900 | 900 | 0900
+(10 rows)
+
+DELETE FROM join_tbl;
+RESET enable_hashjoin;
+-- Test interaction of async execution with plan-time partition pruning
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM async_pt WHERE a < 3000;
+ QUERY PLAN
+-----------------------------------------------------------------------------
+ Append
+ -> Async Foreign Scan on public.async_p1 async_pt_1
+ Output: async_pt_1.a, async_pt_1.b, async_pt_1.c
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1 WHERE ((a < 3000))
+ -> Async Foreign Scan on public.async_p2 async_pt_2
+ Output: async_pt_2.a, async_pt_2.b, async_pt_2.c
+ Remote SQL: SELECT a, b, c FROM public.base_tbl2 WHERE ((a < 3000))
+(7 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM async_pt WHERE a < 2000;
+ QUERY PLAN
+-----------------------------------------------------------------------
+ Foreign Scan on public.async_p1 async_pt
+ Output: async_pt.a, async_pt.b, async_pt.c
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1 WHERE ((a < 2000))
+(3 rows)
+
+-- Test interaction of async execution with run-time partition pruning
+SET plan_cache_mode TO force_generic_plan;
+PREPARE async_pt_query (int, int) AS
+ INSERT INTO result_tbl SELECT * FROM async_pt WHERE a < $1 AND b === $2;
+EXPLAIN (VERBOSE, COSTS OFF)
+EXECUTE async_pt_query (3000, 505);
+ QUERY PLAN
+------------------------------------------------------------------------------------------
+ Insert on public.result_tbl
+ -> Append
+ Subplans Removed: 1
+ -> Async Foreign Scan on public.async_p1 async_pt_1
+ Output: async_pt_1.a, async_pt_1.b, async_pt_1.c
+ Filter: (async_pt_1.b === $2)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1 WHERE ((a < $1::integer))
+ -> Async Foreign Scan on public.async_p2 async_pt_2
+ Output: async_pt_2.a, async_pt_2.b, async_pt_2.c
+ Filter: (async_pt_2.b === $2)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl2 WHERE ((a < $1::integer))
+(11 rows)
+
+EXECUTE async_pt_query (3000, 505);
+SELECT * FROM result_tbl ORDER BY a;
+ a | b | c
+------+-----+------
+ 1505 | 505 | 0505
+ 2505 | 505 | 0505
+(2 rows)
+
+DELETE FROM result_tbl;
+EXPLAIN (VERBOSE, COSTS OFF)
+EXECUTE async_pt_query (2000, 505);
+ QUERY PLAN
+------------------------------------------------------------------------------------------
+ Insert on public.result_tbl
+ -> Append
+ Subplans Removed: 2
+ -> Async Foreign Scan on public.async_p1 async_pt_1
+ Output: async_pt_1.a, async_pt_1.b, async_pt_1.c
+ Filter: (async_pt_1.b === $2)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1 WHERE ((a < $1::integer))
+(7 rows)
+
+EXECUTE async_pt_query (2000, 505);
+SELECT * FROM result_tbl ORDER BY a;
+ a | b | c
+------+-----+------
+ 1505 | 505 | 0505
+(1 row)
+
+DELETE FROM result_tbl;
+RESET plan_cache_mode;
+CREATE TABLE local_tbl(a int, b int, c text);
+INSERT INTO local_tbl VALUES (1505, 505, 'foo'), (2505, 505, 'bar');
+ANALYZE local_tbl;
+CREATE INDEX base_tbl1_idx ON base_tbl1 (a);
+CREATE INDEX base_tbl2_idx ON base_tbl2 (a);
+CREATE INDEX async_p3_idx ON async_p3 (a);
+ANALYZE base_tbl1;
+ANALYZE base_tbl2;
+ANALYZE async_p3;
+ALTER FOREIGN TABLE async_p1 OPTIONS (use_remote_estimate 'true');
+ALTER FOREIGN TABLE async_p2 OPTIONS (use_remote_estimate 'true');
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM local_tbl, async_pt WHERE local_tbl.a = async_pt.a AND local_tbl.c = 'bar';
+ QUERY PLAN
+------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: local_tbl.a, local_tbl.b, local_tbl.c, async_pt.a, async_pt.b, async_pt.c
+ -> Seq Scan on public.local_tbl
+ Output: local_tbl.a, local_tbl.b, local_tbl.c
+ Filter: (local_tbl.c = 'bar'::text)
+ -> Append
+ -> Async Foreign Scan on public.async_p1 async_pt_1
+ Output: async_pt_1.a, async_pt_1.b, async_pt_1.c
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1 WHERE (($1::integer = a))
+ -> Async Foreign Scan on public.async_p2 async_pt_2
+ Output: async_pt_2.a, async_pt_2.b, async_pt_2.c
+ Remote SQL: SELECT a, b, c FROM public.base_tbl2 WHERE (($1::integer = a))
+ -> Seq Scan on public.async_p3 async_pt_3
+ Output: async_pt_3.a, async_pt_3.b, async_pt_3.c
+ Filter: (local_tbl.a = async_pt_3.a)
+(15 rows)
+
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+SELECT * FROM local_tbl, async_pt WHERE local_tbl.a = async_pt.a AND local_tbl.c = 'bar';
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Nested Loop (actual rows=1 loops=1)
+ -> Seq Scan on local_tbl (actual rows=1 loops=1)
+ Filter: (c = 'bar'::text)
+ Rows Removed by Filter: 1
+ -> Append (actual rows=1 loops=1)
+ -> Async Foreign Scan on async_p1 async_pt_1 (never executed)
+ -> Async Foreign Scan on async_p2 async_pt_2 (actual rows=1 loops=1)
+ -> Seq Scan on async_p3 async_pt_3 (never executed)
+ Filter: (local_tbl.a = a)
+(9 rows)
+
+SELECT * FROM local_tbl, async_pt WHERE local_tbl.a = async_pt.a AND local_tbl.c = 'bar';
+ a | b | c | a | b | c
+------+-----+-----+------+-----+------
+ 2505 | 505 | bar | 2505 | 505 | 0505
+(1 row)
+
+ALTER FOREIGN TABLE async_p1 OPTIONS (DROP use_remote_estimate);
+ALTER FOREIGN TABLE async_p2 OPTIONS (DROP use_remote_estimate);
+DROP TABLE local_tbl;
+DROP INDEX base_tbl1_idx;
+DROP INDEX base_tbl2_idx;
+DROP INDEX async_p3_idx;
+-- Disable async execution if we use gating Result nodes for pseudoconstant
+-- quals
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM async_pt WHERE CURRENT_USER = SESSION_USER;
+ QUERY PLAN
+----------------------------------------------------------------
+ Append
+ -> Result
+ Output: async_pt_1.a, async_pt_1.b, async_pt_1.c
+ One-Time Filter: (CURRENT_USER = SESSION_USER)
+ -> Foreign Scan on public.async_p1 async_pt_1
+ Output: async_pt_1.a, async_pt_1.b, async_pt_1.c
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1
+ -> Result
+ Output: async_pt_2.a, async_pt_2.b, async_pt_2.c
+ One-Time Filter: (CURRENT_USER = SESSION_USER)
+ -> Foreign Scan on public.async_p2 async_pt_2
+ Output: async_pt_2.a, async_pt_2.b, async_pt_2.c
+ Remote SQL: SELECT a, b, c FROM public.base_tbl2
+ -> Result
+ Output: async_pt_3.a, async_pt_3.b, async_pt_3.c
+ One-Time Filter: (CURRENT_USER = SESSION_USER)
+ -> Seq Scan on public.async_p3 async_pt_3
+ Output: async_pt_3.a, async_pt_3.b, async_pt_3.c
+(18 rows)
+
+-- Test that pending requests are processed properly
+SET enable_mergejoin TO false;
+SET enable_hashjoin TO false;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505;
+ QUERY PLAN
+----------------------------------------------------------------
+ Nested Loop
+ Output: t1.a, t1.b, t1.c, t2.a, t2.b, t2.c
+ Join Filter: (t1.a = t2.a)
+ -> Append
+ -> Async Foreign Scan on public.async_p1 t1_1
+ Output: t1_1.a, t1_1.b, t1_1.c
+ Filter: (t1_1.b === 505)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1
+ -> Async Foreign Scan on public.async_p2 t1_2
+ Output: t1_2.a, t1_2.b, t1_2.c
+ Filter: (t1_2.b === 505)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl2
+ -> Seq Scan on public.async_p3 t1_3
+ Output: t1_3.a, t1_3.b, t1_3.c
+ Filter: (t1_3.b === 505)
+ -> Materialize
+ Output: t2.a, t2.b, t2.c
+ -> Foreign Scan on public.async_p2 t2
+ Output: t2.a, t2.b, t2.c
+ Remote SQL: SELECT a, b, c FROM public.base_tbl2
+(20 rows)
+
+SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505;
+ a | b | c | a | b | c
+------+-----+------+------+-----+------
+ 2505 | 505 | 0505 | 2505 | 505 | 0505
+(1 row)
+
+CREATE TABLE local_tbl (a int, b int, c text);
+INSERT INTO local_tbl VALUES (1505, 505, 'foo');
+ANALYZE local_tbl;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a;
+ QUERY PLAN
+----------------------------------------------------------------------------------------
+ Nested Loop Left Join
+ Output: t1.a, t1.b, t1.c, async_pt.a, async_pt.b, async_pt.c, ($0)
+ Join Filter: (t1.a = async_pt.a)
+ InitPlan 1 (returns $0)
+ -> Aggregate
+ Output: count(*)
+ -> Append
+ -> Async Foreign Scan on public.async_p1 async_pt_4
+ Remote SQL: SELECT NULL FROM public.base_tbl1 WHERE ((a < 3000))
+ -> Async Foreign Scan on public.async_p2 async_pt_5
+ Remote SQL: SELECT NULL FROM public.base_tbl2 WHERE ((a < 3000))
+ -> Seq Scan on public.local_tbl t1
+ Output: t1.a, t1.b, t1.c
+ -> Append
+ -> Async Foreign Scan on public.async_p1 async_pt_1
+ Output: async_pt_1.a, async_pt_1.b, async_pt_1.c, $0
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1 WHERE ((a < 3000))
+ -> Async Foreign Scan on public.async_p2 async_pt_2
+ Output: async_pt_2.a, async_pt_2.b, async_pt_2.c, $0
+ Remote SQL: SELECT a, b, c FROM public.base_tbl2 WHERE ((a < 3000))
+(20 rows)
+
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------
+ Nested Loop Left Join (actual rows=1 loops=1)
+ Join Filter: (t1.a = async_pt.a)
+ Rows Removed by Join Filter: 399
+ InitPlan 1 (returns $0)
+ -> Aggregate (actual rows=1 loops=1)
+ -> Append (actual rows=400 loops=1)
+ -> Async Foreign Scan on async_p1 async_pt_4 (actual rows=200 loops=1)
+ -> Async Foreign Scan on async_p2 async_pt_5 (actual rows=200 loops=1)
+ -> Seq Scan on local_tbl t1 (actual rows=1 loops=1)
+ -> Append (actual rows=400 loops=1)
+ -> Async Foreign Scan on async_p1 async_pt_1 (actual rows=200 loops=1)
+ -> Async Foreign Scan on async_p2 async_pt_2 (actual rows=200 loops=1)
+(12 rows)
+
+SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a;
+ a | b | c | a | b | c | count
+------+-----+-----+------+-----+------+-------
+ 1505 | 505 | foo | 1505 | 505 | 0505 | 400
+(1 row)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1;
+ QUERY PLAN
+----------------------------------------------------------------
+ Limit
+ Output: t1.a, t1.b, t1.c
+ -> Append
+ -> Async Foreign Scan on public.async_p1 t1_1
+ Output: t1_1.a, t1_1.b, t1_1.c
+ Filter: (t1_1.b === 505)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1
+ -> Async Foreign Scan on public.async_p2 t1_2
+ Output: t1_2.a, t1_2.b, t1_2.c
+ Filter: (t1_2.b === 505)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl2
+ -> Seq Scan on public.async_p3 t1_3
+ Output: t1_3.a, t1_3.b, t1_3.c
+ Filter: (t1_3.b === 505)
+(14 rows)
+
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1;
+ QUERY PLAN
+-------------------------------------------------------------------------
+ Limit (actual rows=1 loops=1)
+ -> Append (actual rows=1 loops=1)
+ -> Async Foreign Scan on async_p1 t1_1 (actual rows=0 loops=1)
+ Filter: (b === 505)
+ -> Async Foreign Scan on async_p2 t1_2 (actual rows=0 loops=1)
+ Filter: (b === 505)
+ -> Seq Scan on async_p3 t1_3 (actual rows=1 loops=1)
+ Filter: (b === 505)
+ Rows Removed by Filter: 101
+(9 rows)
+
+SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1;
+ a | b | c
+------+-----+------
+ 3505 | 505 | 0505
+(1 row)
+
+-- Check with foreign modify
+CREATE TABLE base_tbl3 (a int, b int, c text);
+CREATE FOREIGN TABLE remote_tbl (a int, b int, c text)
+ SERVER loopback OPTIONS (table_name 'base_tbl3');
+INSERT INTO remote_tbl VALUES (2505, 505, 'bar');
+CREATE TABLE base_tbl4 (a int, b int, c text);
+CREATE FOREIGN TABLE insert_tbl (a int, b int, c text)
+ SERVER loopback OPTIONS (table_name 'base_tbl4');
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO insert_tbl (SELECT * FROM local_tbl UNION ALL SELECT * FROM remote_tbl);
+ QUERY PLAN
+-------------------------------------------------------------------------
+ Insert on public.insert_tbl
+ Remote SQL: INSERT INTO public.base_tbl4(a, b, c) VALUES ($1, $2, $3)
+ Batch Size: 1
+ -> Append
+ -> Seq Scan on public.local_tbl
+ Output: local_tbl.a, local_tbl.b, local_tbl.c
+ -> Async Foreign Scan on public.remote_tbl
+ Output: remote_tbl.a, remote_tbl.b, remote_tbl.c
+ Remote SQL: SELECT a, b, c FROM public.base_tbl3
+(9 rows)
+
+INSERT INTO insert_tbl (SELECT * FROM local_tbl UNION ALL SELECT * FROM remote_tbl);
+SELECT * FROM insert_tbl ORDER BY a;
+ a | b | c
+------+-----+-----
+ 1505 | 505 | foo
+ 2505 | 505 | bar
+(2 rows)
+
+-- Check with direct modify
+EXPLAIN (VERBOSE, COSTS OFF)
+WITH t AS (UPDATE remote_tbl SET c = c || c RETURNING *)
+INSERT INTO join_tbl SELECT * FROM async_pt LEFT JOIN t ON (async_pt.a = t.a AND async_pt.b = t.b) WHERE async_pt.b === 505;
+ QUERY PLAN
+----------------------------------------------------------------------------------------
+ Insert on public.join_tbl
+ CTE t
+ -> Update on public.remote_tbl
+ Output: remote_tbl.a, remote_tbl.b, remote_tbl.c
+ -> Foreign Update on public.remote_tbl
+ Remote SQL: UPDATE public.base_tbl3 SET c = (c || c) RETURNING a, b, c
+ -> Nested Loop Left Join
+ Output: async_pt.a, async_pt.b, async_pt.c, t.a, t.b, t.c
+ Join Filter: ((async_pt.a = t.a) AND (async_pt.b = t.b))
+ -> Append
+ -> Async Foreign Scan on public.async_p1 async_pt_1
+ Output: async_pt_1.a, async_pt_1.b, async_pt_1.c
+ Filter: (async_pt_1.b === 505)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl1
+ -> Async Foreign Scan on public.async_p2 async_pt_2
+ Output: async_pt_2.a, async_pt_2.b, async_pt_2.c
+ Filter: (async_pt_2.b === 505)
+ Remote SQL: SELECT a, b, c FROM public.base_tbl2
+ -> Seq Scan on public.async_p3 async_pt_3
+ Output: async_pt_3.a, async_pt_3.b, async_pt_3.c
+ Filter: (async_pt_3.b === 505)
+ -> CTE Scan on t
+ Output: t.a, t.b, t.c
+(23 rows)
+
+WITH t AS (UPDATE remote_tbl SET c = c || c RETURNING *)
+INSERT INTO join_tbl SELECT * FROM async_pt LEFT JOIN t ON (async_pt.a = t.a AND async_pt.b = t.b) WHERE async_pt.b === 505;
+SELECT * FROM join_tbl ORDER BY a1;
+ a1 | b1 | c1 | a2 | b2 | c2
+------+-----+------+------+-----+--------
+ 1505 | 505 | 0505 | | |
+ 2505 | 505 | 0505 | 2505 | 505 | barbar
+ 3505 | 505 | 0505 | | |
+(3 rows)
+
+DELETE FROM join_tbl;
+DROP TABLE local_tbl;
+DROP FOREIGN TABLE remote_tbl;
+DROP FOREIGN TABLE insert_tbl;
+DROP TABLE base_tbl3;
+DROP TABLE base_tbl4;
+RESET enable_mergejoin;
+RESET enable_hashjoin;
+-- Test that UPDATE/DELETE with inherited target works with async_capable enabled
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE async_pt SET c = c || c WHERE b = 0 RETURNING *;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------
+ Update on public.async_pt
+ Output: async_pt_1.a, async_pt_1.b, async_pt_1.c
+ Foreign Update on public.async_p1 async_pt_1
+ Foreign Update on public.async_p2 async_pt_2
+ Update on public.async_p3 async_pt_3
+ -> Append
+ -> Foreign Update on public.async_p1 async_pt_1
+ Remote SQL: UPDATE public.base_tbl1 SET c = (c || c) WHERE ((b = 0)) RETURNING a, b, c
+ -> Foreign Update on public.async_p2 async_pt_2
+ Remote SQL: UPDATE public.base_tbl2 SET c = (c || c) WHERE ((b = 0)) RETURNING a, b, c
+ -> Seq Scan on public.async_p3 async_pt_3
+ Output: (async_pt_3.c || async_pt_3.c), async_pt_3.tableoid, async_pt_3.ctid, NULL::record
+ Filter: (async_pt_3.b = 0)
+(13 rows)
+
+UPDATE async_pt SET c = c || c WHERE b = 0 RETURNING *;
+ a | b | c
+------+---+----------
+ 1000 | 0 | 00000000
+ 2000 | 0 | 00000000
+ 3000 | 0 | 00000000
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+DELETE FROM async_pt WHERE b = 0 RETURNING *;
+ QUERY PLAN
+------------------------------------------------------------------------------------------
+ Delete on public.async_pt
+ Output: async_pt_1.a, async_pt_1.b, async_pt_1.c
+ Foreign Delete on public.async_p1 async_pt_1
+ Foreign Delete on public.async_p2 async_pt_2
+ Delete on public.async_p3 async_pt_3
+ -> Append
+ -> Foreign Delete on public.async_p1 async_pt_1
+ Remote SQL: DELETE FROM public.base_tbl1 WHERE ((b = 0)) RETURNING a, b, c
+ -> Foreign Delete on public.async_p2 async_pt_2
+ Remote SQL: DELETE FROM public.base_tbl2 WHERE ((b = 0)) RETURNING a, b, c
+ -> Seq Scan on public.async_p3 async_pt_3
+ Output: async_pt_3.tableoid, async_pt_3.ctid
+ Filter: (async_pt_3.b = 0)
+(13 rows)
+
+DELETE FROM async_pt WHERE b = 0 RETURNING *;
+ a | b | c
+------+---+----------
+ 1000 | 0 | 00000000
+ 2000 | 0 | 00000000
+ 3000 | 0 | 00000000
+(3 rows)
+
+-- Check EXPLAIN ANALYZE for a query that scans empty partitions asynchronously
+DELETE FROM async_p1;
+DELETE FROM async_p2;
+DELETE FROM async_p3;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+SELECT * FROM async_pt;
+ QUERY PLAN
+-------------------------------------------------------------------------
+ Append (actual rows=0 loops=1)
+ -> Async Foreign Scan on async_p1 async_pt_1 (actual rows=0 loops=1)
+ -> Async Foreign Scan on async_p2 async_pt_2 (actual rows=0 loops=1)
+ -> Seq Scan on async_p3 async_pt_3 (actual rows=0 loops=1)
+(4 rows)
+
+-- Clean up
+DROP TABLE async_pt;
+DROP TABLE base_tbl1;
+DROP TABLE base_tbl2;
+DROP TABLE result_tbl;
+DROP TABLE join_tbl;
+-- Test that an asynchronous fetch is processed before restarting the scan in
+-- ReScanForeignScan
+CREATE TABLE base_tbl (a int, b int);
+INSERT INTO base_tbl VALUES (1, 11), (2, 22), (3, 33);
+CREATE FOREIGN TABLE foreign_tbl (b int)
+ SERVER loopback OPTIONS (table_name 'base_tbl');
+CREATE FOREIGN TABLE foreign_tbl2 () INHERITS (foreign_tbl)
+ SERVER loopback OPTIONS (table_name 'base_tbl');
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT a FROM base_tbl WHERE a IN (SELECT a FROM foreign_tbl);
+ QUERY PLAN
+-----------------------------------------------------------------------------
+ Seq Scan on public.base_tbl
+ Output: base_tbl.a
+ Filter: (SubPlan 1)
+ SubPlan 1
+ -> Result
+ Output: base_tbl.a
+ -> Append
+ -> Async Foreign Scan on public.foreign_tbl foreign_tbl_1
+ Remote SQL: SELECT NULL FROM public.base_tbl
+ -> Async Foreign Scan on public.foreign_tbl2 foreign_tbl_2
+ Remote SQL: SELECT NULL FROM public.base_tbl
+(11 rows)
+
+SELECT a FROM base_tbl WHERE a IN (SELECT a FROM foreign_tbl);
+ a
+---
+ 1
+ 2
+ 3
+(3 rows)
+
+-- Clean up
+DROP FOREIGN TABLE foreign_tbl CASCADE;
+NOTICE: drop cascades to foreign table foreign_tbl2
+DROP TABLE base_tbl;
+ALTER SERVER loopback OPTIONS (DROP async_capable);
+ALTER SERVER loopback2 OPTIONS (DROP async_capable);
+-- ===================================================================
+-- test invalid server and foreign table options
+-- ===================================================================
+-- Invalid fdw_startup_cost option
+CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS(fdw_startup_cost '100$%$#$#');
+ERROR: invalid value for floating point option "fdw_startup_cost": 100$%$#$#
+-- Invalid fdw_tuple_cost option
+CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS(fdw_tuple_cost '100$%$#$#');
+ERROR: invalid value for floating point option "fdw_tuple_cost": 100$%$#$#
+-- Invalid fetch_size option
+CREATE FOREIGN TABLE inv_fsz (c1 int )
+ SERVER loopback OPTIONS (fetch_size '100$%$#$#');
+ERROR: invalid value for integer option "fetch_size": 100$%$#$#
+-- Invalid batch_size option
+CREATE FOREIGN TABLE inv_bsz (c1 int )
+ SERVER loopback OPTIONS (batch_size '100$%$#$#');
+ERROR: invalid value for integer option "batch_size": 100$%$#$#
diff --git a/contrib/postgres_fdw/option.c b/contrib/postgres_fdw/option.c
new file mode 100644
index 0000000..c574ca2
--- /dev/null
+++ b/contrib/postgres_fdw/option.c
@@ -0,0 +1,437 @@
+/*-------------------------------------------------------------------------
+ *
+ * option.c
+ * FDW option handling for postgres_fdw
+ *
+ * Portions Copyright (c) 2012-2021, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/postgres_fdw/option.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/reloptions.h"
+#include "catalog/pg_foreign_server.h"
+#include "catalog/pg_foreign_table.h"
+#include "catalog/pg_user_mapping.h"
+#include "commands/defrem.h"
+#include "commands/extension.h"
+#include "postgres_fdw.h"
+#include "utils/builtins.h"
+#include "utils/guc.h"
+#include "utils/varlena.h"
+
+/*
+ * Describes the valid options for objects that this wrapper uses.
+ */
+typedef struct PgFdwOption
+{
+ const char *keyword;
+ Oid optcontext; /* OID of catalog in which option may appear */
+ bool is_libpq_opt; /* true if it's used in libpq */
+} PgFdwOption;
+
+/*
+ * Valid options for postgres_fdw.
+ * Allocated and filled in InitPgFdwOptions.
+ */
+static PgFdwOption *postgres_fdw_options;
+
+/*
+ * Valid options for libpq.
+ * Allocated and filled in InitPgFdwOptions.
+ */
+static PQconninfoOption *libpq_options;
+
+/*
+ * Helper functions
+ */
+static void InitPgFdwOptions(void);
+static bool is_valid_option(const char *keyword, Oid context);
+static bool is_libpq_option(const char *keyword);
+
+#include "miscadmin.h"
+
+/*
+ * Validate the generic options given to a FOREIGN DATA WRAPPER, SERVER,
+ * USER MAPPING or FOREIGN TABLE that uses postgres_fdw.
+ *
+ * Raise an ERROR if the option or its value is considered invalid.
+ */
+PG_FUNCTION_INFO_V1(postgres_fdw_validator);
+
+Datum
+postgres_fdw_validator(PG_FUNCTION_ARGS)
+{
+ List *options_list = untransformRelOptions(PG_GETARG_DATUM(0));
+ Oid catalog = PG_GETARG_OID(1);
+ ListCell *cell;
+
+ /* Build our options lists if we didn't yet. */
+ InitPgFdwOptions();
+
+ /*
+ * Check that only options supported by postgres_fdw, and allowed for the
+ * current object type, are given.
+ */
+ foreach(cell, options_list)
+ {
+ DefElem *def = (DefElem *) lfirst(cell);
+
+ if (!is_valid_option(def->defname, catalog))
+ {
+ /*
+ * Unknown option specified, complain about it. Provide a hint
+ * with list of valid options for the object.
+ */
+ PgFdwOption *opt;
+ StringInfoData buf;
+
+ initStringInfo(&buf);
+ for (opt = postgres_fdw_options; opt->keyword; opt++)
+ {
+ if (catalog == opt->optcontext)
+ appendStringInfo(&buf, "%s%s", (buf.len > 0) ? ", " : "",
+ opt->keyword);
+ }
+
+ ereport(ERROR,
+ (errcode(ERRCODE_FDW_INVALID_OPTION_NAME),
+ errmsg("invalid option \"%s\"", def->defname),
+ errhint("Valid options in this context are: %s",
+ buf.data)));
+ }
+
+ /*
+ * Validate option value, when we can do so without any context.
+ */
+ if (strcmp(def->defname, "use_remote_estimate") == 0 ||
+ strcmp(def->defname, "updatable") == 0 ||
+ strcmp(def->defname, "truncatable") == 0 ||
+ strcmp(def->defname, "async_capable") == 0 ||
+ strcmp(def->defname, "keep_connections") == 0)
+ {
+ /* these accept only boolean values */
+ (void) defGetBoolean(def);
+ }
+ else if (strcmp(def->defname, "fdw_startup_cost") == 0 ||
+ strcmp(def->defname, "fdw_tuple_cost") == 0)
+ {
+ /*
+ * These must have a floating point value greater than or equal to
+ * zero.
+ */
+ char *value;
+ double real_val;
+ bool is_parsed;
+
+ value = defGetString(def);
+ is_parsed = parse_real(value, &real_val, 0, NULL);
+
+ if (!is_parsed)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("invalid value for floating point option \"%s\": %s",
+ def->defname, value)));
+
+ if (real_val < 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("\"%s\" must be a floating point value greater than or equal to zero",
+ def->defname)));
+ }
+ else if (strcmp(def->defname, "extensions") == 0)
+ {
+ /* check list syntax, warn about uninstalled extensions */
+ (void) ExtractExtensionList(defGetString(def), true);
+ }
+ else if (strcmp(def->defname, "fetch_size") == 0 ||
+ strcmp(def->defname, "batch_size") == 0)
+ {
+ char *value;
+ int int_val;
+ bool is_parsed;
+
+ value = defGetString(def);
+ is_parsed = parse_int(value, &int_val, 0, NULL);
+
+ if (!is_parsed)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("invalid value for integer option \"%s\": %s",
+ def->defname, value)));
+
+ if (int_val <= 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("\"%s\" must be an integer value greater than zero",
+ def->defname)));
+ }
+ else if (strcmp(def->defname, "password_required") == 0)
+ {
+ bool pw_required = defGetBoolean(def);
+
+ /*
+ * Only the superuser may set this option on a user mapping, or
+ * alter a user mapping on which this option is set. We allow a
+ * user to clear this option if it's set - in fact, we don't have
+ * a choice since we can't see the old mapping when validating an
+ * alter.
+ */
+ if (!superuser() && !pw_required)
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("password_required=false is superuser-only"),
+ errhint("User mappings with the password_required option set to false may only be created or modified by the superuser")));
+ }
+ else if (strcmp(def->defname, "sslcert") == 0 ||
+ strcmp(def->defname, "sslkey") == 0)
+ {
+ /* similarly for sslcert / sslkey on user mapping */
+ if (catalog == UserMappingRelationId && !superuser())
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("sslcert and sslkey are superuser-only"),
+ errhint("User mappings with the sslcert or sslkey options set may only be created or modified by the superuser")));
+ }
+ }
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Initialize option lists.
+ */
+static void
+InitPgFdwOptions(void)
+{
+ int num_libpq_opts;
+ PQconninfoOption *lopt;
+ PgFdwOption *popt;
+
+ /* non-libpq FDW-specific FDW options */
+ static const PgFdwOption non_libpq_options[] = {
+ {"schema_name", ForeignTableRelationId, false},
+ {"table_name", ForeignTableRelationId, false},
+ {"column_name", AttributeRelationId, false},
+ /* use_remote_estimate is available on both server and table */
+ {"use_remote_estimate", ForeignServerRelationId, false},
+ {"use_remote_estimate", ForeignTableRelationId, false},
+ /* cost factors */
+ {"fdw_startup_cost", ForeignServerRelationId, false},
+ {"fdw_tuple_cost", ForeignServerRelationId, false},
+ /* shippable extensions */
+ {"extensions", ForeignServerRelationId, false},
+ /* updatable is available on both server and table */
+ {"updatable", ForeignServerRelationId, false},
+ {"updatable", ForeignTableRelationId, false},
+ /* truncatable is available on both server and table */
+ {"truncatable", ForeignServerRelationId, false},
+ {"truncatable", ForeignTableRelationId, false},
+ /* fetch_size is available on both server and table */
+ {"fetch_size", ForeignServerRelationId, false},
+ {"fetch_size", ForeignTableRelationId, false},
+ /* batch_size is available on both server and table */
+ {"batch_size", ForeignServerRelationId, false},
+ {"batch_size", ForeignTableRelationId, false},
+ /* async_capable is available on both server and table */
+ {"async_capable", ForeignServerRelationId, false},
+ {"async_capable", ForeignTableRelationId, false},
+ {"keep_connections", ForeignServerRelationId, false},
+ {"password_required", UserMappingRelationId, false},
+
+ /*
+ * sslcert and sslkey are in fact libpq options, but we repeat them
+ * here to allow them to appear in both foreign server context (when
+ * we generate libpq options) and user mapping context (from here).
+ */
+ {"sslcert", UserMappingRelationId, true},
+ {"sslkey", UserMappingRelationId, true},
+
+ {NULL, InvalidOid, false}
+ };
+
+ /* Prevent redundant initialization. */
+ if (postgres_fdw_options)
+ return;
+
+ /*
+ * Get list of valid libpq options.
+ *
+ * To avoid unnecessary work, we get the list once and use it throughout
+ * the lifetime of this backend process. We don't need to care about
+ * memory context issues, because PQconndefaults allocates with malloc.
+ */
+ libpq_options = PQconndefaults();
+ if (!libpq_options) /* assume reason for failure is OOM */
+ ereport(ERROR,
+ (errcode(ERRCODE_FDW_OUT_OF_MEMORY),
+ errmsg("out of memory"),
+ errdetail("Could not get libpq's default connection options.")));
+
+ /* Count how many libpq options are available. */
+ num_libpq_opts = 0;
+ for (lopt = libpq_options; lopt->keyword; lopt++)
+ num_libpq_opts++;
+
+ /*
+ * Construct an array which consists of all valid options for
+ * postgres_fdw, by appending FDW-specific options to libpq options.
+ *
+ * We use plain malloc here to allocate postgres_fdw_options because it
+ * lives as long as the backend process does. Besides, keeping
+ * libpq_options in memory allows us to avoid copying every keyword
+ * string.
+ */
+ postgres_fdw_options = (PgFdwOption *)
+ malloc(sizeof(PgFdwOption) * num_libpq_opts +
+ sizeof(non_libpq_options));
+ if (postgres_fdw_options == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_FDW_OUT_OF_MEMORY),
+ errmsg("out of memory")));
+
+ popt = postgres_fdw_options;
+ for (lopt = libpq_options; lopt->keyword; lopt++)
+ {
+ /* Hide debug options, as well as settings we override internally. */
+ if (strchr(lopt->dispchar, 'D') ||
+ strcmp(lopt->keyword, "fallback_application_name") == 0 ||
+ strcmp(lopt->keyword, "client_encoding") == 0)
+ continue;
+
+ /* We don't have to copy keyword string, as described above. */
+ popt->keyword = lopt->keyword;
+
+ /*
+ * "user" and any secret options are allowed only on user mappings.
+ * Everything else is a server option.
+ */
+ if (strcmp(lopt->keyword, "user") == 0 || strchr(lopt->dispchar, '*'))
+ popt->optcontext = UserMappingRelationId;
+ else
+ popt->optcontext = ForeignServerRelationId;
+ popt->is_libpq_opt = true;
+
+ popt++;
+ }
+
+ /* Append FDW-specific options and dummy terminator. */
+ memcpy(popt, non_libpq_options, sizeof(non_libpq_options));
+}
+
+/*
+ * Check whether the given option is one of the valid postgres_fdw options.
+ * context is the Oid of the catalog holding the object the option is for.
+ */
+static bool
+is_valid_option(const char *keyword, Oid context)
+{
+ PgFdwOption *opt;
+
+ Assert(postgres_fdw_options); /* must be initialized already */
+
+ for (opt = postgres_fdw_options; opt->keyword; opt++)
+ {
+ if (context == opt->optcontext && strcmp(opt->keyword, keyword) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Check whether the given option is one of the valid libpq options.
+ */
+static bool
+is_libpq_option(const char *keyword)
+{
+ PgFdwOption *opt;
+
+ Assert(postgres_fdw_options); /* must be initialized already */
+
+ for (opt = postgres_fdw_options; opt->keyword; opt++)
+ {
+ if (opt->is_libpq_opt && strcmp(opt->keyword, keyword) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Generate key-value arrays which include only libpq options from the
+ * given list (which can contain any kind of options). Caller must have
+ * allocated large-enough arrays. Returns number of options found.
+ */
+int
+ExtractConnectionOptions(List *defelems, const char **keywords,
+ const char **values)
+{
+ ListCell *lc;
+ int i;
+
+ /* Build our options lists if we didn't yet. */
+ InitPgFdwOptions();
+
+ i = 0;
+ foreach(lc, defelems)
+ {
+ DefElem *d = (DefElem *) lfirst(lc);
+
+ if (is_libpq_option(d->defname))
+ {
+ keywords[i] = d->defname;
+ values[i] = defGetString(d);
+ i++;
+ }
+ }
+ return i;
+}
+
+/*
+ * Parse a comma-separated string and return a List of the OIDs of the
+ * extensions named in the string. If any names in the list cannot be
+ * found, report a warning if warnOnMissing is true, else just silently
+ * ignore them.
+ */
+List *
+ExtractExtensionList(const char *extensionsString, bool warnOnMissing)
+{
+ List *extensionOids = NIL;
+ List *extlist;
+ ListCell *lc;
+
+ /* SplitIdentifierString scribbles on its input, so pstrdup first */
+ if (!SplitIdentifierString(pstrdup(extensionsString), ',', &extlist))
+ {
+ /* syntax error in name list */
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("parameter \"%s\" must be a list of extension names",
+ "extensions")));
+ }
+
+ foreach(lc, extlist)
+ {
+ const char *extension_name = (const char *) lfirst(lc);
+ Oid extension_oid = get_extension_oid(extension_name, true);
+
+ if (OidIsValid(extension_oid))
+ {
+ extensionOids = lappend_oid(extensionOids, extension_oid);
+ }
+ else if (warnOnMissing)
+ {
+ ereport(WARNING,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("extension \"%s\" is not installed",
+ extension_name)));
+ }
+ }
+
+ list_free(extlist);
+ return extensionOids;
+}
diff --git a/contrib/postgres_fdw/postgres_fdw--1.0--1.1.sql b/contrib/postgres_fdw/postgres_fdw--1.0--1.1.sql
new file mode 100644
index 0000000..ed4ca37
--- /dev/null
+++ b/contrib/postgres_fdw/postgres_fdw--1.0--1.1.sql
@@ -0,0 +1,20 @@
+/* contrib/postgres_fdw/postgres_fdw--1.0--1.1.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION postgres_fdw UPDATE TO '1.1'" to load this file. \quit
+
+CREATE FUNCTION postgres_fdw_get_connections (OUT server_name text,
+ OUT valid boolean)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT PARALLEL RESTRICTED;
+
+CREATE FUNCTION postgres_fdw_disconnect (text)
+RETURNS bool
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT PARALLEL RESTRICTED;
+
+CREATE FUNCTION postgres_fdw_disconnect_all ()
+RETURNS bool
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT PARALLEL RESTRICTED;
diff --git a/contrib/postgres_fdw/postgres_fdw--1.0.sql b/contrib/postgres_fdw/postgres_fdw--1.0.sql
new file mode 100644
index 0000000..a0f0fc1
--- /dev/null
+++ b/contrib/postgres_fdw/postgres_fdw--1.0.sql
@@ -0,0 +1,18 @@
+/* contrib/postgres_fdw/postgres_fdw--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION postgres_fdw" to load this file. \quit
+
+CREATE FUNCTION postgres_fdw_handler()
+RETURNS fdw_handler
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION postgres_fdw_validator(text[], oid)
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FOREIGN DATA WRAPPER postgres_fdw
+ HANDLER postgres_fdw_handler
+ VALIDATOR postgres_fdw_validator;
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
new file mode 100644
index 0000000..63c578b
--- /dev/null
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -0,0 +1,7558 @@
+/*-------------------------------------------------------------------------
+ *
+ * postgres_fdw.c
+ * Foreign-data wrapper for remote PostgreSQL servers
+ *
+ * Portions Copyright (c) 2012-2021, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/postgres_fdw/postgres_fdw.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <limits.h>
+
+#include "access/htup_details.h"
+#include "access/sysattr.h"
+#include "access/table.h"
+#include "catalog/pg_class.h"
+#include "catalog/pg_opfamily.h"
+#include "commands/defrem.h"
+#include "commands/explain.h"
+#include "commands/vacuum.h"
+#include "executor/execAsync.h"
+#include "foreign/fdwapi.h"
+#include "funcapi.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/appendinfo.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/planmain.h"
+#include "optimizer/prep.h"
+#include "optimizer/restrictinfo.h"
+#include "optimizer/tlist.h"
+#include "parser/parsetree.h"
+#include "postgres_fdw.h"
+#include "storage/latch.h"
+#include "utils/builtins.h"
+#include "utils/float.h"
+#include "utils/guc.h"
+#include "utils/lsyscache.h"
+#include "utils/memutils.h"
+#include "utils/rel.h"
+#include "utils/sampling.h"
+#include "utils/selfuncs.h"
+
+PG_MODULE_MAGIC;
+
+/* Default CPU cost to start up a foreign query. */
+#define DEFAULT_FDW_STARTUP_COST 100.0
+
+/* Default CPU cost to process 1 row (above and beyond cpu_tuple_cost). */
+#define DEFAULT_FDW_TUPLE_COST 0.01
+
+/* If no remote estimates, assume a sort costs 20% extra */
+#define DEFAULT_FDW_SORT_MULTIPLIER 1.2
+
+/*
+ * Indexes of FDW-private information stored in fdw_private lists.
+ *
+ * These items are indexed with the enum FdwScanPrivateIndex, so an item
+ * can be fetched with list_nth(). For example, to get the SELECT statement:
+ * sql = strVal(list_nth(fdw_private, FdwScanPrivateSelectSql));
+ */
+enum FdwScanPrivateIndex
+{
+ /* SQL statement to execute remotely (as a String node) */
+ FdwScanPrivateSelectSql,
+ /* Integer list of attribute numbers retrieved by the SELECT */
+ FdwScanPrivateRetrievedAttrs,
+ /* Integer representing the desired fetch_size */
+ FdwScanPrivateFetchSize,
+
+ /*
+ * String describing join i.e. names of relations being joined and types
+ * of join, added when the scan is join
+ */
+ FdwScanPrivateRelations
+};
+
+/*
+ * Similarly, this enum describes what's kept in the fdw_private list for
+ * a ModifyTable node referencing a postgres_fdw foreign table. We store:
+ *
+ * 1) INSERT/UPDATE/DELETE statement text to be sent to the remote server
+ * 2) Integer list of target attribute numbers for INSERT/UPDATE
+ * (NIL for a DELETE)
+ * 3) Length till the end of VALUES clause for INSERT
+ * (-1 for a DELETE/UPDATE)
+ * 4) Boolean flag showing if the remote query has a RETURNING clause
+ * 5) Integer list of attribute numbers retrieved by RETURNING, if any
+ */
+enum FdwModifyPrivateIndex
+{
+ /* SQL statement to execute remotely (as a String node) */
+ FdwModifyPrivateUpdateSql,
+ /* Integer list of target attribute numbers for INSERT/UPDATE */
+ FdwModifyPrivateTargetAttnums,
+ /* Length till the end of VALUES clause (as an integer Value node) */
+ FdwModifyPrivateLen,
+ /* has-returning flag (as an integer Value node) */
+ FdwModifyPrivateHasReturning,
+ /* Integer list of attribute numbers retrieved by RETURNING */
+ FdwModifyPrivateRetrievedAttrs
+};
+
+/*
+ * Similarly, this enum describes what's kept in the fdw_private list for
+ * a ForeignScan node that modifies a foreign table directly. We store:
+ *
+ * 1) UPDATE/DELETE statement text to be sent to the remote server
+ * 2) Boolean flag showing if the remote query has a RETURNING clause
+ * 3) Integer list of attribute numbers retrieved by RETURNING, if any
+ * 4) Boolean flag showing if we set the command es_processed
+ */
+enum FdwDirectModifyPrivateIndex
+{
+ /* SQL statement to execute remotely (as a String node) */
+ FdwDirectModifyPrivateUpdateSql,
+ /* has-returning flag (as an integer Value node) */
+ FdwDirectModifyPrivateHasReturning,
+ /* Integer list of attribute numbers retrieved by RETURNING */
+ FdwDirectModifyPrivateRetrievedAttrs,
+ /* set-processed flag (as an integer Value node) */
+ FdwDirectModifyPrivateSetProcessed
+};
+
+/*
+ * Execution state of a foreign scan using postgres_fdw.
+ */
+typedef struct PgFdwScanState
+{
+ Relation rel; /* relcache entry for the foreign table. NULL
+ * for a foreign join scan. */
+ TupleDesc tupdesc; /* tuple descriptor of scan */
+ AttInMetadata *attinmeta; /* attribute datatype conversion metadata */
+
+ /* extracted fdw_private data */
+ char *query; /* text of SELECT command */
+ List *retrieved_attrs; /* list of retrieved attribute numbers */
+
+ /* for remote query execution */
+ PGconn *conn; /* connection for the scan */
+ PgFdwConnState *conn_state; /* extra per-connection state */
+ unsigned int cursor_number; /* quasi-unique ID for my cursor */
+ bool cursor_exists; /* have we created the cursor? */
+ int numParams; /* number of parameters passed to query */
+ FmgrInfo *param_flinfo; /* output conversion functions for them */
+ List *param_exprs; /* executable expressions for param values */
+ const char **param_values; /* textual values of query parameters */
+
+ /* for storing result tuples */
+ HeapTuple *tuples; /* array of currently-retrieved tuples */
+ int num_tuples; /* # of tuples in array */
+ int next_tuple; /* index of next one to return */
+
+ /* batch-level state, for optimizing rewinds and avoiding useless fetch */
+ int fetch_ct_2; /* Min(# of fetches done, 2) */
+ bool eof_reached; /* true if last fetch reached EOF */
+
+ /* for asynchronous execution */
+ bool async_capable; /* engage asynchronous-capable logic? */
+
+ /* working memory contexts */
+ MemoryContext batch_cxt; /* context holding current batch of tuples */
+ MemoryContext temp_cxt; /* context for per-tuple temporary data */
+
+ int fetch_size; /* number of tuples per fetch */
+} PgFdwScanState;
+
+/*
+ * Execution state of a foreign insert/update/delete operation.
+ */
+typedef struct PgFdwModifyState
+{
+ Relation rel; /* relcache entry for the foreign table */
+ AttInMetadata *attinmeta; /* attribute datatype conversion metadata */
+
+ /* for remote query execution */
+ PGconn *conn; /* connection for the scan */
+ PgFdwConnState *conn_state; /* extra per-connection state */
+ char *p_name; /* name of prepared statement, if created */
+
+ /* extracted fdw_private data */
+ char *query; /* text of INSERT/UPDATE/DELETE command */
+ char *orig_query; /* original text of INSERT command */
+ List *target_attrs; /* list of target attribute numbers */
+ int values_end; /* length up to the end of VALUES */
+ int batch_size; /* value of FDW option "batch_size" */
+ bool has_returning; /* is there a RETURNING clause? */
+ List *retrieved_attrs; /* attr numbers retrieved by RETURNING */
+
+ /* info about parameters for prepared statement */
+ AttrNumber ctidAttno; /* attnum of input resjunk ctid column */
+ int p_nums; /* number of parameters to transmit */
+ FmgrInfo *p_flinfo; /* output conversion functions for them */
+
+ /* batch operation stuff */
+ int num_slots; /* number of slots to insert */
+
+ /* working memory context */
+ MemoryContext temp_cxt; /* context for per-tuple temporary data */
+
+ /* for update row movement if subplan result rel */
+ struct PgFdwModifyState *aux_fmstate; /* foreign-insert state, if
+ * created */
+} PgFdwModifyState;
+
+/*
+ * Execution state of a foreign scan that modifies a foreign table directly.
+ */
+typedef struct PgFdwDirectModifyState
+{
+ Relation rel; /* relcache entry for the foreign table */
+ AttInMetadata *attinmeta; /* attribute datatype conversion metadata */
+
+ /* extracted fdw_private data */
+ char *query; /* text of UPDATE/DELETE command */
+ bool has_returning; /* is there a RETURNING clause? */
+ List *retrieved_attrs; /* attr numbers retrieved by RETURNING */
+ bool set_processed; /* do we set the command es_processed? */
+
+ /* for remote query execution */
+ PGconn *conn; /* connection for the update */
+ PgFdwConnState *conn_state; /* extra per-connection state */
+ int numParams; /* number of parameters passed to query */
+ FmgrInfo *param_flinfo; /* output conversion functions for them */
+ List *param_exprs; /* executable expressions for param values */
+ const char **param_values; /* textual values of query parameters */
+
+ /* for storing result tuples */
+ PGresult *result; /* result for query */
+ int num_tuples; /* # of result tuples */
+ int next_tuple; /* index of next one to return */
+ Relation resultRel; /* relcache entry for the target relation */
+ AttrNumber *attnoMap; /* array of attnums of input user columns */
+ AttrNumber ctidAttno; /* attnum of input ctid column */
+ AttrNumber oidAttno; /* attnum of input oid column */
+ bool hasSystemCols; /* are there system columns of resultRel? */
+
+ /* working memory context */
+ MemoryContext temp_cxt; /* context for per-tuple temporary data */
+} PgFdwDirectModifyState;
+
+/*
+ * Workspace for analyzing a foreign table.
+ */
+typedef struct PgFdwAnalyzeState
+{
+ Relation rel; /* relcache entry for the foreign table */
+ AttInMetadata *attinmeta; /* attribute datatype conversion metadata */
+ List *retrieved_attrs; /* attr numbers retrieved by query */
+
+ /* collected sample rows */
+ HeapTuple *rows; /* array of size targrows */
+ int targrows; /* target # of sample rows */
+ int numrows; /* # of sample rows collected */
+
+ /* for random sampling */
+ double samplerows; /* # of rows fetched */
+ double rowstoskip; /* # of rows to skip before next sample */
+ ReservoirStateData rstate; /* state for reservoir sampling */
+
+ /* working memory contexts */
+ MemoryContext anl_cxt; /* context for per-analyze lifespan data */
+ MemoryContext temp_cxt; /* context for per-tuple temporary data */
+} PgFdwAnalyzeState;
+
+/*
+ * This enum describes what's kept in the fdw_private list for a ForeignPath.
+ * We store:
+ *
+ * 1) Boolean flag showing if the remote query has the final sort
+ * 2) Boolean flag showing if the remote query has the LIMIT clause
+ */
+enum FdwPathPrivateIndex
+{
+ /* has-final-sort flag (as an integer Value node) */
+ FdwPathPrivateHasFinalSort,
+ /* has-limit flag (as an integer Value node) */
+ FdwPathPrivateHasLimit
+};
+
+/* Struct for extra information passed to estimate_path_cost_size() */
+typedef struct
+{
+ PathTarget *target;
+ bool has_final_sort;
+ bool has_limit;
+ double limit_tuples;
+ int64 count_est;
+ int64 offset_est;
+} PgFdwPathExtraData;
+
+/*
+ * Identify the attribute where data conversion fails.
+ */
+typedef struct ConversionLocation
+{
+ AttrNumber cur_attno; /* attribute number being processed, or 0 */
+ Relation rel; /* foreign table being processed, or NULL */
+ ForeignScanState *fsstate; /* plan node being processed, or NULL */
+} ConversionLocation;
+
+/* Callback argument for ec_member_matches_foreign */
+typedef struct
+{
+ Expr *current; /* current expr, or NULL if not yet found */
+ List *already_used; /* expressions already dealt with */
+} ec_member_foreign_arg;
+
+/*
+ * SQL functions
+ */
+PG_FUNCTION_INFO_V1(postgres_fdw_handler);
+
+/*
+ * FDW callback routines
+ */
+static void postgresGetForeignRelSize(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Oid foreigntableid);
+static void postgresGetForeignPaths(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Oid foreigntableid);
+static ForeignScan *postgresGetForeignPlan(PlannerInfo *root,
+ RelOptInfo *foreignrel,
+ Oid foreigntableid,
+ ForeignPath *best_path,
+ List *tlist,
+ List *scan_clauses,
+ Plan *outer_plan);
+static void postgresBeginForeignScan(ForeignScanState *node, int eflags);
+static TupleTableSlot *postgresIterateForeignScan(ForeignScanState *node);
+static void postgresReScanForeignScan(ForeignScanState *node);
+static void postgresEndForeignScan(ForeignScanState *node);
+static void postgresAddForeignUpdateTargets(PlannerInfo *root,
+ Index rtindex,
+ RangeTblEntry *target_rte,
+ Relation target_relation);
+static List *postgresPlanForeignModify(PlannerInfo *root,
+ ModifyTable *plan,
+ Index resultRelation,
+ int subplan_index);
+static void postgresBeginForeignModify(ModifyTableState *mtstate,
+ ResultRelInfo *resultRelInfo,
+ List *fdw_private,
+ int subplan_index,
+ int eflags);
+static TupleTableSlot *postgresExecForeignInsert(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot);
+static TupleTableSlot **postgresExecForeignBatchInsert(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot **slots,
+ TupleTableSlot **planSlots,
+ int *numSlots);
+static int postgresGetForeignModifyBatchSize(ResultRelInfo *resultRelInfo);
+static TupleTableSlot *postgresExecForeignUpdate(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot);
+static TupleTableSlot *postgresExecForeignDelete(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot);
+static void postgresEndForeignModify(EState *estate,
+ ResultRelInfo *resultRelInfo);
+static void postgresBeginForeignInsert(ModifyTableState *mtstate,
+ ResultRelInfo *resultRelInfo);
+static void postgresEndForeignInsert(EState *estate,
+ ResultRelInfo *resultRelInfo);
+static int postgresIsForeignRelUpdatable(Relation rel);
+static bool postgresPlanDirectModify(PlannerInfo *root,
+ ModifyTable *plan,
+ Index resultRelation,
+ int subplan_index);
+static void postgresBeginDirectModify(ForeignScanState *node, int eflags);
+static TupleTableSlot *postgresIterateDirectModify(ForeignScanState *node);
+static void postgresEndDirectModify(ForeignScanState *node);
+static void postgresExplainForeignScan(ForeignScanState *node,
+ ExplainState *es);
+static void postgresExplainForeignModify(ModifyTableState *mtstate,
+ ResultRelInfo *rinfo,
+ List *fdw_private,
+ int subplan_index,
+ ExplainState *es);
+static void postgresExplainDirectModify(ForeignScanState *node,
+ ExplainState *es);
+static void postgresExecForeignTruncate(List *rels,
+ DropBehavior behavior,
+ bool restart_seqs);
+static bool postgresAnalyzeForeignTable(Relation relation,
+ AcquireSampleRowsFunc *func,
+ BlockNumber *totalpages);
+static List *postgresImportForeignSchema(ImportForeignSchemaStmt *stmt,
+ Oid serverOid);
+static void postgresGetForeignJoinPaths(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ JoinPathExtraData *extra);
+static bool postgresRecheckForeignScan(ForeignScanState *node,
+ TupleTableSlot *slot);
+static void postgresGetForeignUpperPaths(PlannerInfo *root,
+ UpperRelationKind stage,
+ RelOptInfo *input_rel,
+ RelOptInfo *output_rel,
+ void *extra);
+static bool postgresIsForeignPathAsyncCapable(ForeignPath *path);
+static void postgresForeignAsyncRequest(AsyncRequest *areq);
+static void postgresForeignAsyncConfigureWait(AsyncRequest *areq);
+static void postgresForeignAsyncNotify(AsyncRequest *areq);
+
+/*
+ * Helper functions
+ */
+static void estimate_path_cost_size(PlannerInfo *root,
+ RelOptInfo *foreignrel,
+ List *param_join_conds,
+ List *pathkeys,
+ PgFdwPathExtraData *fpextra,
+ double *p_rows, int *p_width,
+ Cost *p_startup_cost, Cost *p_total_cost);
+static void get_remote_estimate(const char *sql,
+ PGconn *conn,
+ double *rows,
+ int *width,
+ Cost *startup_cost,
+ Cost *total_cost);
+static void adjust_foreign_grouping_path_cost(PlannerInfo *root,
+ List *pathkeys,
+ double retrieved_rows,
+ double width,
+ double limit_tuples,
+ Cost *p_startup_cost,
+ Cost *p_run_cost);
+static bool ec_member_matches_foreign(PlannerInfo *root, RelOptInfo *rel,
+ EquivalenceClass *ec, EquivalenceMember *em,
+ void *arg);
+static void create_cursor(ForeignScanState *node);
+static void fetch_more_data(ForeignScanState *node);
+static void close_cursor(PGconn *conn, unsigned int cursor_number,
+ PgFdwConnState *conn_state);
+static PgFdwModifyState *create_foreign_modify(EState *estate,
+ RangeTblEntry *rte,
+ ResultRelInfo *resultRelInfo,
+ CmdType operation,
+ Plan *subplan,
+ char *query,
+ List *target_attrs,
+ int len,
+ bool has_returning,
+ List *retrieved_attrs);
+static TupleTableSlot **execute_foreign_modify(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ CmdType operation,
+ TupleTableSlot **slots,
+ TupleTableSlot **planSlots,
+ int *numSlots);
+static void prepare_foreign_modify(PgFdwModifyState *fmstate);
+static const char **convert_prep_stmt_params(PgFdwModifyState *fmstate,
+ ItemPointer tupleid,
+ TupleTableSlot **slots,
+ int numSlots);
+static void store_returning_result(PgFdwModifyState *fmstate,
+ TupleTableSlot *slot, PGresult *res);
+static void finish_foreign_modify(PgFdwModifyState *fmstate);
+static void deallocate_query(PgFdwModifyState *fmstate);
+static List *build_remote_returning(Index rtindex, Relation rel,
+ List *returningList);
+static void rebuild_fdw_scan_tlist(ForeignScan *fscan, List *tlist);
+static void execute_dml_stmt(ForeignScanState *node);
+static TupleTableSlot *get_returning_data(ForeignScanState *node);
+static void init_returning_filter(PgFdwDirectModifyState *dmstate,
+ List *fdw_scan_tlist,
+ Index rtindex);
+static TupleTableSlot *apply_returning_filter(PgFdwDirectModifyState *dmstate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ EState *estate);
+static void prepare_query_params(PlanState *node,
+ List *fdw_exprs,
+ int numParams,
+ FmgrInfo **param_flinfo,
+ List **param_exprs,
+ const char ***param_values);
+static void process_query_params(ExprContext *econtext,
+ FmgrInfo *param_flinfo,
+ List *param_exprs,
+ const char **param_values);
+static int postgresAcquireSampleRowsFunc(Relation relation, int elevel,
+ HeapTuple *rows, int targrows,
+ double *totalrows,
+ double *totaldeadrows);
+static void analyze_row_processor(PGresult *res, int row,
+ PgFdwAnalyzeState *astate);
+static void produce_tuple_asynchronously(AsyncRequest *areq, bool fetch);
+static void fetch_more_data_begin(AsyncRequest *areq);
+static void complete_pending_request(AsyncRequest *areq);
+static HeapTuple make_tuple_from_result_row(PGresult *res,
+ int row,
+ Relation rel,
+ AttInMetadata *attinmeta,
+ List *retrieved_attrs,
+ ForeignScanState *fsstate,
+ MemoryContext temp_context);
+static void conversion_error_callback(void *arg);
+static bool foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel,
+ JoinType jointype, RelOptInfo *outerrel, RelOptInfo *innerrel,
+ JoinPathExtraData *extra);
+static bool foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel,
+ Node *havingQual);
+static List *get_useful_pathkeys_for_relation(PlannerInfo *root,
+ RelOptInfo *rel);
+static List *get_useful_ecs_for_relation(PlannerInfo *root, RelOptInfo *rel);
+static void add_paths_with_pathkeys_for_rel(PlannerInfo *root, RelOptInfo *rel,
+ Path *epq_path);
+static void add_foreign_grouping_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ RelOptInfo *grouped_rel,
+ GroupPathExtraData *extra);
+static void add_foreign_ordered_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ RelOptInfo *ordered_rel);
+static void add_foreign_final_paths(PlannerInfo *root,
+ RelOptInfo *input_rel,
+ RelOptInfo *final_rel,
+ FinalPathExtraData *extra);
+static void apply_server_options(PgFdwRelationInfo *fpinfo);
+static void apply_table_options(PgFdwRelationInfo *fpinfo);
+static void merge_fdw_options(PgFdwRelationInfo *fpinfo,
+ const PgFdwRelationInfo *fpinfo_o,
+ const PgFdwRelationInfo *fpinfo_i);
+static int get_batch_size_option(Relation rel);
+
+
+/*
+ * Foreign-data wrapper handler function: return a struct with pointers
+ * to my callback routines.
+ */
+Datum
+postgres_fdw_handler(PG_FUNCTION_ARGS)
+{
+ FdwRoutine *routine = makeNode(FdwRoutine);
+
+ /* Functions for scanning foreign tables */
+ routine->GetForeignRelSize = postgresGetForeignRelSize;
+ routine->GetForeignPaths = postgresGetForeignPaths;
+ routine->GetForeignPlan = postgresGetForeignPlan;
+ routine->BeginForeignScan = postgresBeginForeignScan;
+ routine->IterateForeignScan = postgresIterateForeignScan;
+ routine->ReScanForeignScan = postgresReScanForeignScan;
+ routine->EndForeignScan = postgresEndForeignScan;
+
+ /* Functions for updating foreign tables */
+ routine->AddForeignUpdateTargets = postgresAddForeignUpdateTargets;
+ routine->PlanForeignModify = postgresPlanForeignModify;
+ routine->BeginForeignModify = postgresBeginForeignModify;
+ routine->ExecForeignInsert = postgresExecForeignInsert;
+ routine->ExecForeignBatchInsert = postgresExecForeignBatchInsert;
+ routine->GetForeignModifyBatchSize = postgresGetForeignModifyBatchSize;
+ routine->ExecForeignUpdate = postgresExecForeignUpdate;
+ routine->ExecForeignDelete = postgresExecForeignDelete;
+ routine->EndForeignModify = postgresEndForeignModify;
+ routine->BeginForeignInsert = postgresBeginForeignInsert;
+ routine->EndForeignInsert = postgresEndForeignInsert;
+ routine->IsForeignRelUpdatable = postgresIsForeignRelUpdatable;
+ routine->PlanDirectModify = postgresPlanDirectModify;
+ routine->BeginDirectModify = postgresBeginDirectModify;
+ routine->IterateDirectModify = postgresIterateDirectModify;
+ routine->EndDirectModify = postgresEndDirectModify;
+
+ /* Function for EvalPlanQual rechecks */
+ routine->RecheckForeignScan = postgresRecheckForeignScan;
+ /* Support functions for EXPLAIN */
+ routine->ExplainForeignScan = postgresExplainForeignScan;
+ routine->ExplainForeignModify = postgresExplainForeignModify;
+ routine->ExplainDirectModify = postgresExplainDirectModify;
+
+ /* Support function for TRUNCATE */
+ routine->ExecForeignTruncate = postgresExecForeignTruncate;
+
+ /* Support functions for ANALYZE */
+ routine->AnalyzeForeignTable = postgresAnalyzeForeignTable;
+
+ /* Support functions for IMPORT FOREIGN SCHEMA */
+ routine->ImportForeignSchema = postgresImportForeignSchema;
+
+ /* Support functions for join push-down */
+ routine->GetForeignJoinPaths = postgresGetForeignJoinPaths;
+
+ /* Support functions for upper relation push-down */
+ routine->GetForeignUpperPaths = postgresGetForeignUpperPaths;
+
+ /* Support functions for asynchronous execution */
+ routine->IsForeignPathAsyncCapable = postgresIsForeignPathAsyncCapable;
+ routine->ForeignAsyncRequest = postgresForeignAsyncRequest;
+ routine->ForeignAsyncConfigureWait = postgresForeignAsyncConfigureWait;
+ routine->ForeignAsyncNotify = postgresForeignAsyncNotify;
+
+ PG_RETURN_POINTER(routine);
+}
+
+/*
+ * postgresGetForeignRelSize
+ * Estimate # of rows and width of the result of the scan
+ *
+ * We should consider the effect of all baserestrictinfo clauses here, but
+ * not any join clauses.
+ */
+static void
+postgresGetForeignRelSize(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Oid foreigntableid)
+{
+ PgFdwRelationInfo *fpinfo;
+ ListCell *lc;
+ RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root);
+
+ /*
+ * We use PgFdwRelationInfo to pass various information to subsequent
+ * functions.
+ */
+ fpinfo = (PgFdwRelationInfo *) palloc0(sizeof(PgFdwRelationInfo));
+ baserel->fdw_private = (void *) fpinfo;
+
+ /* Base foreign tables need to be pushed down always. */
+ fpinfo->pushdown_safe = true;
+
+ /* Look up foreign-table catalog info. */
+ fpinfo->table = GetForeignTable(foreigntableid);
+ fpinfo->server = GetForeignServer(fpinfo->table->serverid);
+
+ /*
+ * Extract user-settable option values. Note that per-table settings of
+ * use_remote_estimate, fetch_size and async_capable override per-server
+ * settings of them, respectively.
+ */
+ fpinfo->use_remote_estimate = false;
+ fpinfo->fdw_startup_cost = DEFAULT_FDW_STARTUP_COST;
+ fpinfo->fdw_tuple_cost = DEFAULT_FDW_TUPLE_COST;
+ fpinfo->shippable_extensions = NIL;
+ fpinfo->fetch_size = 100;
+ fpinfo->async_capable = false;
+
+ apply_server_options(fpinfo);
+ apply_table_options(fpinfo);
+
+ /*
+ * If the table or the server is configured to use remote estimates,
+ * identify which user to do remote access as during planning. This
+ * should match what ExecCheckRTEPerms() does. If we fail due to lack of
+ * permissions, the query would have failed at runtime anyway.
+ */
+ if (fpinfo->use_remote_estimate)
+ {
+ Oid userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
+
+ fpinfo->user = GetUserMapping(userid, fpinfo->server->serverid);
+ }
+ else
+ fpinfo->user = NULL;
+
+ /*
+ * Identify which baserestrictinfo clauses can be sent to the remote
+ * server and which can't.
+ */
+ classifyConditions(root, baserel, baserel->baserestrictinfo,
+ &fpinfo->remote_conds, &fpinfo->local_conds);
+
+ /*
+ * Identify which attributes will need to be retrieved from the remote
+ * server. These include all attrs needed for joins or final output, plus
+ * all attrs used in the local_conds. (Note: if we end up using a
+ * parameterized scan, it's possible that some of the join clauses will be
+ * sent to the remote and thus we wouldn't really need to retrieve the
+ * columns used in them. Doesn't seem worth detecting that case though.)
+ */
+ fpinfo->attrs_used = NULL;
+ pull_varattnos((Node *) baserel->reltarget->exprs, baserel->relid,
+ &fpinfo->attrs_used);
+ foreach(lc, fpinfo->local_conds)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
+
+ pull_varattnos((Node *) rinfo->clause, baserel->relid,
+ &fpinfo->attrs_used);
+ }
+
+ /*
+ * Compute the selectivity and cost of the local_conds, so we don't have
+ * to do it over again for each path. The best we can do for these
+ * conditions is to estimate selectivity on the basis of local statistics.
+ */
+ fpinfo->local_conds_sel = clauselist_selectivity(root,
+ fpinfo->local_conds,
+ baserel->relid,
+ JOIN_INNER,
+ NULL);
+
+ cost_qual_eval(&fpinfo->local_conds_cost, fpinfo->local_conds, root);
+
+ /*
+ * Set # of retrieved rows and cached relation costs to some negative
+ * value, so that we can detect when they are set to some sensible values,
+ * during one (usually the first) of the calls to estimate_path_cost_size.
+ */
+ fpinfo->retrieved_rows = -1;
+ fpinfo->rel_startup_cost = -1;
+ fpinfo->rel_total_cost = -1;
+
+ /*
+ * If the table or the server is configured to use remote estimates,
+ * connect to the foreign server and execute EXPLAIN to estimate the
+ * number of rows selected by the restriction clauses, as well as the
+ * average row width. Otherwise, estimate using whatever statistics we
+ * have locally, in a way similar to ordinary tables.
+ */
+ if (fpinfo->use_remote_estimate)
+ {
+ /*
+ * Get cost/size estimates with help of remote server. Save the
+ * values in fpinfo so we don't need to do it again to generate the
+ * basic foreign path.
+ */
+ estimate_path_cost_size(root, baserel, NIL, NIL, NULL,
+ &fpinfo->rows, &fpinfo->width,
+ &fpinfo->startup_cost, &fpinfo->total_cost);
+
+ /* Report estimated baserel size to planner. */
+ baserel->rows = fpinfo->rows;
+ baserel->reltarget->width = fpinfo->width;
+ }
+ else
+ {
+ /*
+ * If the foreign table has never been ANALYZEd, it will have
+ * reltuples < 0, meaning "unknown". We can't do much if we're not
+ * allowed to consult the remote server, but we can use a hack similar
+ * to plancat.c's treatment of empty relations: use a minimum size
+ * estimate of 10 pages, and divide by the column-datatype-based width
+ * estimate to get the corresponding number of tuples.
+ */
+ if (baserel->tuples < 0)
+ {
+ baserel->pages = 10;
+ baserel->tuples =
+ (10 * BLCKSZ) / (baserel->reltarget->width +
+ MAXALIGN(SizeofHeapTupleHeader));
+ }
+
+ /* Estimate baserel size as best we can with local statistics. */
+ set_baserel_size_estimates(root, baserel);
+
+ /* Fill in basically-bogus cost estimates for use later. */
+ estimate_path_cost_size(root, baserel, NIL, NIL, NULL,
+ &fpinfo->rows, &fpinfo->width,
+ &fpinfo->startup_cost, &fpinfo->total_cost);
+ }
+
+ /*
+ * fpinfo->relation_name gets the numeric rangetable index of the foreign
+ * table RTE. (If this query gets EXPLAIN'd, we'll convert that to a
+ * human-readable string at that time.)
+ */
+ fpinfo->relation_name = psprintf("%u", baserel->relid);
+
+ /* No outer and inner relations. */
+ fpinfo->make_outerrel_subquery = false;
+ fpinfo->make_innerrel_subquery = false;
+ fpinfo->lower_subquery_rels = NULL;
+ /* Set the relation index. */
+ fpinfo->relation_index = baserel->relid;
+}
+
+/*
+ * get_useful_ecs_for_relation
+ * Determine which EquivalenceClasses might be involved in useful
+ * orderings of this relation.
+ *
+ * This function is in some respects a mirror image of the core function
+ * pathkeys_useful_for_merging: for a regular table, we know what indexes
+ * we have and want to test whether any of them are useful. For a foreign
+ * table, we don't know what indexes are present on the remote side but
+ * want to speculate about which ones we'd like to use if they existed.
+ *
+ * This function returns a list of potentially-useful equivalence classes,
+ * but it does not guarantee that an EquivalenceMember exists which contains
+ * Vars only from the given relation. For example, given ft1 JOIN t1 ON
+ * ft1.x + t1.x = 0, this function will say that the equivalence class
+ * containing ft1.x + t1.x is potentially useful. Supposing ft1 is remote and
+ * t1 is local (or on a different server), it will turn out that no useful
+ * ORDER BY clause can be generated. It's not our job to figure that out
+ * here; we're only interested in identifying relevant ECs.
+ */
+static List *
+get_useful_ecs_for_relation(PlannerInfo *root, RelOptInfo *rel)
+{
+ List *useful_eclass_list = NIL;
+ ListCell *lc;
+ Relids relids;
+
+ /*
+ * First, consider whether any active EC is potentially useful for a merge
+ * join against this relation.
+ */
+ if (rel->has_eclass_joins)
+ {
+ foreach(lc, root->eq_classes)
+ {
+ EquivalenceClass *cur_ec = (EquivalenceClass *) lfirst(lc);
+
+ if (eclass_useful_for_merging(root, cur_ec, rel))
+ useful_eclass_list = lappend(useful_eclass_list, cur_ec);
+ }
+ }
+
+ /*
+ * Next, consider whether there are any non-EC derivable join clauses that
+ * are merge-joinable. If the joininfo list is empty, we can exit
+ * quickly.
+ */
+ if (rel->joininfo == NIL)
+ return useful_eclass_list;
+
+ /* If this is a child rel, we must use the topmost parent rel to search. */
+ if (IS_OTHER_REL(rel))
+ {
+ Assert(!bms_is_empty(rel->top_parent_relids));
+ relids = rel->top_parent_relids;
+ }
+ else
+ relids = rel->relids;
+
+ /* Check each join clause in turn. */
+ foreach(lc, rel->joininfo)
+ {
+ RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(lc);
+
+ /* Consider only mergejoinable clauses */
+ if (restrictinfo->mergeopfamilies == NIL)
+ continue;
+
+ /* Make sure we've got canonical ECs. */
+ update_mergeclause_eclasses(root, restrictinfo);
+
+ /*
+ * restrictinfo->mergeopfamilies != NIL is sufficient to guarantee
+ * that left_ec and right_ec will be initialized, per comments in
+ * distribute_qual_to_rels.
+ *
+ * We want to identify which side of this merge-joinable clause
+ * contains columns from the relation produced by this RelOptInfo. We
+ * test for overlap, not containment, because there could be extra
+ * relations on either side. For example, suppose we've got something
+ * like ((A JOIN B ON A.x = B.x) JOIN C ON A.y = C.y) LEFT JOIN D ON
+ * A.y = D.y. The input rel might be the joinrel between A and B, and
+ * we'll consider the join clause A.y = D.y. relids contains a
+ * relation not involved in the join class (B) and the equivalence
+ * class for the left-hand side of the clause contains a relation not
+ * involved in the input rel (C). Despite the fact that we have only
+ * overlap and not containment in either direction, A.y is potentially
+ * useful as a sort column.
+ *
+ * Note that it's even possible that relids overlaps neither side of
+ * the join clause. For example, consider A LEFT JOIN B ON A.x = B.x
+ * AND A.x = 1. The clause A.x = 1 will appear in B's joininfo list,
+ * but overlaps neither side of B. In that case, we just skip this
+ * join clause, since it doesn't suggest a useful sort order for this
+ * relation.
+ */
+ if (bms_overlap(relids, restrictinfo->right_ec->ec_relids))
+ useful_eclass_list = list_append_unique_ptr(useful_eclass_list,
+ restrictinfo->right_ec);
+ else if (bms_overlap(relids, restrictinfo->left_ec->ec_relids))
+ useful_eclass_list = list_append_unique_ptr(useful_eclass_list,
+ restrictinfo->left_ec);
+ }
+
+ return useful_eclass_list;
+}
+
+/*
+ * get_useful_pathkeys_for_relation
+ * Determine which orderings of a relation might be useful.
+ *
+ * Getting data in sorted order can be useful either because the requested
+ * order matches the final output ordering for the overall query we're
+ * planning, or because it enables an efficient merge join. Here, we try
+ * to figure out which pathkeys to consider.
+ */
+static List *
+get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel)
+{
+ List *useful_pathkeys_list = NIL;
+ List *useful_eclass_list;
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) rel->fdw_private;
+ EquivalenceClass *query_ec = NULL;
+ ListCell *lc;
+
+ /*
+ * Pushing the query_pathkeys to the remote server is always worth
+ * considering, because it might let us avoid a local sort.
+ */
+ fpinfo->qp_is_pushdown_safe = false;
+ if (root->query_pathkeys)
+ {
+ bool query_pathkeys_ok = true;
+
+ foreach(lc, root->query_pathkeys)
+ {
+ PathKey *pathkey = (PathKey *) lfirst(lc);
+
+ /*
+ * The planner and executor don't have any clever strategy for
+ * taking data sorted by a prefix of the query's pathkeys and
+ * getting it to be sorted by all of those pathkeys. We'll just
+ * end up resorting the entire data set. So, unless we can push
+ * down all of the query pathkeys, forget it.
+ */
+ if (!is_foreign_pathkey(root, rel, pathkey))
+ {
+ query_pathkeys_ok = false;
+ break;
+ }
+ }
+
+ if (query_pathkeys_ok)
+ {
+ useful_pathkeys_list = list_make1(list_copy(root->query_pathkeys));
+ fpinfo->qp_is_pushdown_safe = true;
+ }
+ }
+
+ /*
+ * Even if we're not using remote estimates, having the remote side do the
+ * sort generally won't be any worse than doing it locally, and it might
+ * be much better if the remote side can generate data in the right order
+ * without needing a sort at all. However, what we're going to do next is
+ * try to generate pathkeys that seem promising for possible merge joins,
+ * and that's more speculative. A wrong choice might hurt quite a bit, so
+ * bail out if we can't use remote estimates.
+ */
+ if (!fpinfo->use_remote_estimate)
+ return useful_pathkeys_list;
+
+ /* Get the list of interesting EquivalenceClasses. */
+ useful_eclass_list = get_useful_ecs_for_relation(root, rel);
+
+ /* Extract unique EC for query, if any, so we don't consider it again. */
+ if (list_length(root->query_pathkeys) == 1)
+ {
+ PathKey *query_pathkey = linitial(root->query_pathkeys);
+
+ query_ec = query_pathkey->pk_eclass;
+ }
+
+ /*
+ * As a heuristic, the only pathkeys we consider here are those of length
+ * one. It's surely possible to consider more, but since each one we
+ * choose to consider will generate a round-trip to the remote side, we
+ * need to be a bit cautious here. It would sure be nice to have a local
+ * cache of information about remote index definitions...
+ */
+ foreach(lc, useful_eclass_list)
+ {
+ EquivalenceClass *cur_ec = lfirst(lc);
+ PathKey *pathkey;
+
+ /* If redundant with what we did above, skip it. */
+ if (cur_ec == query_ec)
+ continue;
+
+ /* Can't push down the sort if the EC's opfamily is not shippable. */
+ if (!is_shippable(linitial_oid(cur_ec->ec_opfamilies),
+ OperatorFamilyRelationId, fpinfo))
+ continue;
+
+ /* If no pushable expression for this rel, skip it. */
+ if (find_em_for_rel(root, cur_ec, rel) == NULL)
+ continue;
+
+ /* Looks like we can generate a pathkey, so let's do it. */
+ pathkey = make_canonical_pathkey(root, cur_ec,
+ linitial_oid(cur_ec->ec_opfamilies),
+ BTLessStrategyNumber,
+ false);
+ useful_pathkeys_list = lappend(useful_pathkeys_list,
+ list_make1(pathkey));
+ }
+
+ return useful_pathkeys_list;
+}
+
+/*
+ * postgresGetForeignPaths
+ * Create possible scan paths for a scan on the foreign table
+ */
+static void
+postgresGetForeignPaths(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Oid foreigntableid)
+{
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) baserel->fdw_private;
+ ForeignPath *path;
+ List *ppi_list;
+ ListCell *lc;
+
+ /*
+ * Create simplest ForeignScan path node and add it to baserel. This path
+ * corresponds to SeqScan path of regular tables (though depending on what
+ * baserestrict conditions we were able to send to remote, there might
+ * actually be an indexscan happening there). We already did all the work
+ * to estimate cost and size of this path.
+ *
+ * Although this path uses no join clauses, it could still have required
+ * parameterization due to LATERAL refs in its tlist.
+ */
+ path = create_foreignscan_path(root, baserel,
+ NULL, /* default pathtarget */
+ fpinfo->rows,
+ fpinfo->startup_cost,
+ fpinfo->total_cost,
+ NIL, /* no pathkeys */
+ baserel->lateral_relids,
+ NULL, /* no extra plan */
+ NIL); /* no fdw_private list */
+ add_path(baserel, (Path *) path);
+
+ /* Add paths with pathkeys */
+ add_paths_with_pathkeys_for_rel(root, baserel, NULL);
+
+ /*
+ * If we're not using remote estimates, stop here. We have no way to
+ * estimate whether any join clauses would be worth sending across, so
+ * don't bother building parameterized paths.
+ */
+ if (!fpinfo->use_remote_estimate)
+ return;
+
+ /*
+ * Thumb through all join clauses for the rel to identify which outer
+ * relations could supply one or more safe-to-send-to-remote join clauses.
+ * We'll build a parameterized path for each such outer relation.
+ *
+ * It's convenient to manage this by representing each candidate outer
+ * relation by the ParamPathInfo node for it. We can then use the
+ * ppi_clauses list in the ParamPathInfo node directly as a list of the
+ * interesting join clauses for that rel. This takes care of the
+ * possibility that there are multiple safe join clauses for such a rel,
+ * and also ensures that we account for unsafe join clauses that we'll
+ * still have to enforce locally (since the parameterized-path machinery
+ * insists that we handle all movable clauses).
+ */
+ ppi_list = NIL;
+ foreach(lc, baserel->joininfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+ Relids required_outer;
+ ParamPathInfo *param_info;
+
+ /* Check if clause can be moved to this rel */
+ if (!join_clause_is_movable_to(rinfo, baserel))
+ continue;
+
+ /* See if it is safe to send to remote */
+ if (!is_foreign_expr(root, baserel, rinfo->clause))
+ continue;
+
+ /* Calculate required outer rels for the resulting path */
+ required_outer = bms_union(rinfo->clause_relids,
+ baserel->lateral_relids);
+ /* We do not want the foreign rel itself listed in required_outer */
+ required_outer = bms_del_member(required_outer, baserel->relid);
+
+ /*
+ * required_outer probably can't be empty here, but if it were, we
+ * couldn't make a parameterized path.
+ */
+ if (bms_is_empty(required_outer))
+ continue;
+
+ /* Get the ParamPathInfo */
+ param_info = get_baserel_parampathinfo(root, baserel,
+ required_outer);
+ Assert(param_info != NULL);
+
+ /*
+ * Add it to list unless we already have it. Testing pointer equality
+ * is OK since get_baserel_parampathinfo won't make duplicates.
+ */
+ ppi_list = list_append_unique_ptr(ppi_list, param_info);
+ }
+
+ /*
+ * The above scan examined only "generic" join clauses, not those that
+ * were absorbed into EquivalenceClauses. See if we can make anything out
+ * of EquivalenceClauses.
+ */
+ if (baserel->has_eclass_joins)
+ {
+ /*
+ * We repeatedly scan the eclass list looking for column references
+ * (or expressions) belonging to the foreign rel. Each time we find
+ * one, we generate a list of equivalence joinclauses for it, and then
+ * see if any are safe to send to the remote. Repeat till there are
+ * no more candidate EC members.
+ */
+ ec_member_foreign_arg arg;
+
+ arg.already_used = NIL;
+ for (;;)
+ {
+ List *clauses;
+
+ /* Make clauses, skipping any that join to lateral_referencers */
+ arg.current = NULL;
+ clauses = generate_implied_equalities_for_column(root,
+ baserel,
+ ec_member_matches_foreign,
+ (void *) &arg,
+ baserel->lateral_referencers);
+
+ /* Done if there are no more expressions in the foreign rel */
+ if (arg.current == NULL)
+ {
+ Assert(clauses == NIL);
+ break;
+ }
+
+ /* Scan the extracted join clauses */
+ foreach(lc, clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+ Relids required_outer;
+ ParamPathInfo *param_info;
+
+ /* Check if clause can be moved to this rel */
+ if (!join_clause_is_movable_to(rinfo, baserel))
+ continue;
+
+ /* See if it is safe to send to remote */
+ if (!is_foreign_expr(root, baserel, rinfo->clause))
+ continue;
+
+ /* Calculate required outer rels for the resulting path */
+ required_outer = bms_union(rinfo->clause_relids,
+ baserel->lateral_relids);
+ required_outer = bms_del_member(required_outer, baserel->relid);
+ if (bms_is_empty(required_outer))
+ continue;
+
+ /* Get the ParamPathInfo */
+ param_info = get_baserel_parampathinfo(root, baserel,
+ required_outer);
+ Assert(param_info != NULL);
+
+ /* Add it to list unless we already have it */
+ ppi_list = list_append_unique_ptr(ppi_list, param_info);
+ }
+
+ /* Try again, now ignoring the expression we found this time */
+ arg.already_used = lappend(arg.already_used, arg.current);
+ }
+ }
+
+ /*
+ * Now build a path for each useful outer relation.
+ */
+ foreach(lc, ppi_list)
+ {
+ ParamPathInfo *param_info = (ParamPathInfo *) lfirst(lc);
+ double rows;
+ int width;
+ Cost startup_cost;
+ Cost total_cost;
+
+ /* Get a cost estimate from the remote */
+ estimate_path_cost_size(root, baserel,
+ param_info->ppi_clauses, NIL, NULL,
+ &rows, &width,
+ &startup_cost, &total_cost);
+
+ /*
+ * ppi_rows currently won't get looked at by anything, but still we
+ * may as well ensure that it matches our idea of the rowcount.
+ */
+ param_info->ppi_rows = rows;
+
+ /* Make the path */
+ path = create_foreignscan_path(root, baserel,
+ NULL, /* default pathtarget */
+ rows,
+ startup_cost,
+ total_cost,
+ NIL, /* no pathkeys */
+ param_info->ppi_req_outer,
+ NULL,
+ NIL); /* no fdw_private list */
+ add_path(baserel, (Path *) path);
+ }
+}
+
+/*
+ * postgresGetForeignPlan
+ * Create ForeignScan plan node which implements selected best path
+ */
+static ForeignScan *
+postgresGetForeignPlan(PlannerInfo *root,
+ RelOptInfo *foreignrel,
+ Oid foreigntableid,
+ ForeignPath *best_path,
+ List *tlist,
+ List *scan_clauses,
+ Plan *outer_plan)
+{
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) foreignrel->fdw_private;
+ Index scan_relid;
+ List *fdw_private;
+ List *remote_exprs = NIL;
+ List *local_exprs = NIL;
+ List *params_list = NIL;
+ List *fdw_scan_tlist = NIL;
+ List *fdw_recheck_quals = NIL;
+ List *retrieved_attrs;
+ StringInfoData sql;
+ bool has_final_sort = false;
+ bool has_limit = false;
+ ListCell *lc;
+
+ /*
+ * Get FDW private data created by postgresGetForeignUpperPaths(), if any.
+ */
+ if (best_path->fdw_private)
+ {
+ has_final_sort = intVal(list_nth(best_path->fdw_private,
+ FdwPathPrivateHasFinalSort));
+ has_limit = intVal(list_nth(best_path->fdw_private,
+ FdwPathPrivateHasLimit));
+ }
+
+ if (IS_SIMPLE_REL(foreignrel))
+ {
+ /*
+ * For base relations, set scan_relid as the relid of the relation.
+ */
+ scan_relid = foreignrel->relid;
+
+ /*
+ * In a base-relation scan, we must apply the given scan_clauses.
+ *
+ * Separate the scan_clauses into those that can be executed remotely
+ * and those that can't. baserestrictinfo clauses that were
+ * previously determined to be safe or unsafe by classifyConditions
+ * are found in fpinfo->remote_conds and fpinfo->local_conds. Anything
+ * else in the scan_clauses list will be a join clause, which we have
+ * to check for remote-safety.
+ *
+ * Note: the join clauses we see here should be the exact same ones
+ * previously examined by postgresGetForeignPaths. Possibly it'd be
+ * worth passing forward the classification work done then, rather
+ * than repeating it here.
+ *
+ * This code must match "extract_actual_clauses(scan_clauses, false)"
+ * except for the additional decision about remote versus local
+ * execution.
+ */
+ foreach(lc, scan_clauses)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
+
+ /* Ignore any pseudoconstants, they're dealt with elsewhere */
+ if (rinfo->pseudoconstant)
+ continue;
+
+ if (list_member_ptr(fpinfo->remote_conds, rinfo))
+ remote_exprs = lappend(remote_exprs, rinfo->clause);
+ else if (list_member_ptr(fpinfo->local_conds, rinfo))
+ local_exprs = lappend(local_exprs, rinfo->clause);
+ else if (is_foreign_expr(root, foreignrel, rinfo->clause))
+ remote_exprs = lappend(remote_exprs, rinfo->clause);
+ else
+ local_exprs = lappend(local_exprs, rinfo->clause);
+ }
+
+ /*
+ * For a base-relation scan, we have to support EPQ recheck, which
+ * should recheck all the remote quals.
+ */
+ fdw_recheck_quals = remote_exprs;
+ }
+ else
+ {
+ /*
+ * Join relation or upper relation - set scan_relid to 0.
+ */
+ scan_relid = 0;
+
+ /*
+ * For a join rel, baserestrictinfo is NIL and we are not considering
+ * parameterization right now, so there should be no scan_clauses for
+ * a joinrel or an upper rel either.
+ */
+ Assert(!scan_clauses);
+
+ /*
+ * Instead we get the conditions to apply from the fdw_private
+ * structure.
+ */
+ remote_exprs = extract_actual_clauses(fpinfo->remote_conds, false);
+ local_exprs = extract_actual_clauses(fpinfo->local_conds, false);
+
+ /*
+ * We leave fdw_recheck_quals empty in this case, since we never need
+ * to apply EPQ recheck clauses. In the case of a joinrel, EPQ
+ * recheck is handled elsewhere --- see postgresGetForeignJoinPaths().
+ * If we're planning an upperrel (ie, remote grouping or aggregation)
+ * then there's no EPQ to do because SELECT FOR UPDATE wouldn't be
+ * allowed, and indeed we *can't* put the remote clauses into
+ * fdw_recheck_quals because the unaggregated Vars won't be available
+ * locally.
+ */
+
+ /* Build the list of columns to be fetched from the foreign server. */
+ fdw_scan_tlist = build_tlist_to_deparse(foreignrel);
+
+ /*
+ * Ensure that the outer plan produces a tuple whose descriptor
+ * matches our scan tuple slot. Also, remove the local conditions
+ * from outer plan's quals, lest they be evaluated twice, once by the
+ * local plan and once by the scan.
+ */
+ if (outer_plan)
+ {
+ ListCell *lc;
+
+ /*
+ * Right now, we only consider grouping and aggregation beyond
+ * joins. Queries involving aggregates or grouping do not require
+ * EPQ mechanism, hence should not have an outer plan here.
+ */
+ Assert(!IS_UPPER_REL(foreignrel));
+
+ /*
+ * First, update the plan's qual list if possible. In some cases
+ * the quals might be enforced below the topmost plan level, in
+ * which case we'll fail to remove them; it's not worth working
+ * harder than this.
+ */
+ foreach(lc, local_exprs)
+ {
+ Node *qual = lfirst(lc);
+
+ outer_plan->qual = list_delete(outer_plan->qual, qual);
+
+ /*
+ * For an inner join the local conditions of foreign scan plan
+ * can be part of the joinquals as well. (They might also be
+ * in the mergequals or hashquals, but we can't touch those
+ * without breaking the plan.)
+ */
+ if (IsA(outer_plan, NestLoop) ||
+ IsA(outer_plan, MergeJoin) ||
+ IsA(outer_plan, HashJoin))
+ {
+ Join *join_plan = (Join *) outer_plan;
+
+ if (join_plan->jointype == JOIN_INNER)
+ join_plan->joinqual = list_delete(join_plan->joinqual,
+ qual);
+ }
+ }
+
+ /*
+ * Now fix the subplan's tlist --- this might result in inserting
+ * a Result node atop the plan tree.
+ */
+ outer_plan = change_plan_targetlist(outer_plan, fdw_scan_tlist,
+ best_path->path.parallel_safe);
+ }
+ }
+
+ /*
+ * Build the query string to be sent for execution, and identify
+ * expressions to be sent as parameters.
+ */
+ initStringInfo(&sql);
+ deparseSelectStmtForRel(&sql, root, foreignrel, fdw_scan_tlist,
+ remote_exprs, best_path->path.pathkeys,
+ has_final_sort, has_limit, false,
+ &retrieved_attrs, &params_list);
+
+ /* Remember remote_exprs for possible use by postgresPlanDirectModify */
+ fpinfo->final_remote_exprs = remote_exprs;
+
+ /*
+ * Build the fdw_private list that will be available to the executor.
+ * Items in the list must match order in enum FdwScanPrivateIndex.
+ */
+ fdw_private = list_make3(makeString(sql.data),
+ retrieved_attrs,
+ makeInteger(fpinfo->fetch_size));
+ if (IS_JOIN_REL(foreignrel) || IS_UPPER_REL(foreignrel))
+ fdw_private = lappend(fdw_private,
+ makeString(fpinfo->relation_name));
+
+ /*
+ * Create the ForeignScan node for the given relation.
+ *
+ * Note that the remote parameter expressions are stored in the fdw_exprs
+ * field of the finished plan node; we can't keep them in private state
+ * because then they wouldn't be subject to later planner processing.
+ */
+ return make_foreignscan(tlist,
+ local_exprs,
+ scan_relid,
+ params_list,
+ fdw_private,
+ fdw_scan_tlist,
+ fdw_recheck_quals,
+ outer_plan);
+}
+
+/*
+ * Construct a tuple descriptor for the scan tuples handled by a foreign join.
+ */
+static TupleDesc
+get_tupdesc_for_join_scan_tuples(ForeignScanState *node)
+{
+ ForeignScan *fsplan = (ForeignScan *) node->ss.ps.plan;
+ EState *estate = node->ss.ps.state;
+ TupleDesc tupdesc;
+
+ /*
+ * The core code has already set up a scan tuple slot based on
+ * fsplan->fdw_scan_tlist, and this slot's tupdesc is mostly good enough,
+ * but there's one case where it isn't. If we have any whole-row row
+ * identifier Vars, they may have vartype RECORD, and we need to replace
+ * that with the associated table's actual composite type. This ensures
+ * that when we read those ROW() expression values from the remote server,
+ * we can convert them to a composite type the local server knows.
+ */
+ tupdesc = CreateTupleDescCopy(node->ss.ss_ScanTupleSlot->tts_tupleDescriptor);
+ for (int i = 0; i < tupdesc->natts; i++)
+ {
+ Form_pg_attribute att = TupleDescAttr(tupdesc, i);
+ Var *var;
+ RangeTblEntry *rte;
+ Oid reltype;
+
+ /* Nothing to do if it's not a generic RECORD attribute */
+ if (att->atttypid != RECORDOID || att->atttypmod >= 0)
+ continue;
+
+ /*
+ * If we can't identify the referenced table, do nothing. This'll
+ * likely lead to failure later, but perhaps we can muddle through.
+ */
+ var = (Var *) list_nth_node(TargetEntry, fsplan->fdw_scan_tlist,
+ i)->expr;
+ if (!IsA(var, Var) || var->varattno != 0)
+ continue;
+ rte = list_nth(estate->es_range_table, var->varno - 1);
+ if (rte->rtekind != RTE_RELATION)
+ continue;
+ reltype = get_rel_type_id(rte->relid);
+ if (!OidIsValid(reltype))
+ continue;
+ att->atttypid = reltype;
+ /* shouldn't need to change anything else */
+ }
+ return tupdesc;
+}
+
+/*
+ * postgresBeginForeignScan
+ * Initiate an executor scan of a foreign PostgreSQL table.
+ */
+static void
+postgresBeginForeignScan(ForeignScanState *node, int eflags)
+{
+ ForeignScan *fsplan = (ForeignScan *) node->ss.ps.plan;
+ EState *estate = node->ss.ps.state;
+ PgFdwScanState *fsstate;
+ RangeTblEntry *rte;
+ Oid userid;
+ ForeignTable *table;
+ UserMapping *user;
+ int rtindex;
+ int numParams;
+
+ /*
+ * Do nothing in EXPLAIN (no ANALYZE) case. node->fdw_state stays NULL.
+ */
+ if (eflags & EXEC_FLAG_EXPLAIN_ONLY)
+ return;
+
+ /*
+ * We'll save private state in node->fdw_state.
+ */
+ fsstate = (PgFdwScanState *) palloc0(sizeof(PgFdwScanState));
+ node->fdw_state = (void *) fsstate;
+
+ /*
+ * Identify which user to do the remote access as. This should match what
+ * ExecCheckRTEPerms() does. In case of a join or aggregate, use the
+ * lowest-numbered member RTE as a representative; we would get the same
+ * result from any.
+ */
+ if (fsplan->scan.scanrelid > 0)
+ rtindex = fsplan->scan.scanrelid;
+ else
+ rtindex = bms_next_member(fsplan->fs_relids, -1);
+ rte = exec_rt_fetch(rtindex, estate);
+ userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
+
+ /* Get info about foreign table. */
+ table = GetForeignTable(rte->relid);
+ user = GetUserMapping(userid, table->serverid);
+
+ /*
+ * Get connection to the foreign server. Connection manager will
+ * establish new connection if necessary.
+ */
+ fsstate->conn = GetConnection(user, false, &fsstate->conn_state);
+
+ /* Assign a unique ID for my cursor */
+ fsstate->cursor_number = GetCursorNumber(fsstate->conn);
+ fsstate->cursor_exists = false;
+
+ /* Get private info created by planner functions. */
+ fsstate->query = strVal(list_nth(fsplan->fdw_private,
+ FdwScanPrivateSelectSql));
+ fsstate->retrieved_attrs = (List *) list_nth(fsplan->fdw_private,
+ FdwScanPrivateRetrievedAttrs);
+ fsstate->fetch_size = intVal(list_nth(fsplan->fdw_private,
+ FdwScanPrivateFetchSize));
+
+ /* Create contexts for batches of tuples and per-tuple temp workspace. */
+ fsstate->batch_cxt = AllocSetContextCreate(estate->es_query_cxt,
+ "postgres_fdw tuple data",
+ ALLOCSET_DEFAULT_SIZES);
+ fsstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt,
+ "postgres_fdw temporary data",
+ ALLOCSET_SMALL_SIZES);
+
+ /*
+ * Get info we'll need for converting data fetched from the foreign server
+ * into local representation and error reporting during that process.
+ */
+ if (fsplan->scan.scanrelid > 0)
+ {
+ fsstate->rel = node->ss.ss_currentRelation;
+ fsstate->tupdesc = RelationGetDescr(fsstate->rel);
+ }
+ else
+ {
+ fsstate->rel = NULL;
+ fsstate->tupdesc = get_tupdesc_for_join_scan_tuples(node);
+ }
+
+ fsstate->attinmeta = TupleDescGetAttInMetadata(fsstate->tupdesc);
+
+ /*
+ * Prepare for processing of parameters used in remote query, if any.
+ */
+ numParams = list_length(fsplan->fdw_exprs);
+ fsstate->numParams = numParams;
+ if (numParams > 0)
+ prepare_query_params((PlanState *) node,
+ fsplan->fdw_exprs,
+ numParams,
+ &fsstate->param_flinfo,
+ &fsstate->param_exprs,
+ &fsstate->param_values);
+
+ /* Set the async-capable flag */
+ fsstate->async_capable = node->ss.ps.async_capable;
+}
+
+/*
+ * postgresIterateForeignScan
+ * Retrieve next row from the result set, or clear tuple slot to indicate
+ * EOF.
+ */
+static TupleTableSlot *
+postgresIterateForeignScan(ForeignScanState *node)
+{
+ PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+ TupleTableSlot *slot = node->ss.ss_ScanTupleSlot;
+
+ /*
+ * In sync mode, if this is the first call after Begin or ReScan, we need
+ * to create the cursor on the remote side. In async mode, we would have
+ * already created the cursor before we get here, even if this is the
+ * first call after Begin or ReScan.
+ */
+ if (!fsstate->cursor_exists)
+ create_cursor(node);
+
+ /*
+ * Get some more tuples, if we've run out.
+ */
+ if (fsstate->next_tuple >= fsstate->num_tuples)
+ {
+ /* In async mode, just clear tuple slot. */
+ if (fsstate->async_capable)
+ return ExecClearTuple(slot);
+ /* No point in another fetch if we already detected EOF, though. */
+ if (!fsstate->eof_reached)
+ fetch_more_data(node);
+ /* If we didn't get any tuples, must be end of data. */
+ if (fsstate->next_tuple >= fsstate->num_tuples)
+ return ExecClearTuple(slot);
+ }
+
+ /*
+ * Return the next tuple.
+ */
+ ExecStoreHeapTuple(fsstate->tuples[fsstate->next_tuple++],
+ slot,
+ false);
+
+ return slot;
+}
+
+/*
+ * postgresReScanForeignScan
+ * Restart the scan.
+ */
+static void
+postgresReScanForeignScan(ForeignScanState *node)
+{
+ PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+ char sql[64];
+ PGresult *res;
+
+ /* If we haven't created the cursor yet, nothing to do. */
+ if (!fsstate->cursor_exists)
+ return;
+
+ /*
+ * If the node is async-capable, and an asynchronous fetch for it has been
+ * begun, the asynchronous fetch might not have yet completed. Check if
+ * the node is async-capable, and an asynchronous fetch for it is still in
+ * progress; if so, complete the asynchronous fetch before restarting the
+ * scan.
+ */
+ if (fsstate->async_capable &&
+ fsstate->conn_state->pendingAreq &&
+ fsstate->conn_state->pendingAreq->requestee == (PlanState *) node)
+ fetch_more_data(node);
+
+ /*
+ * If any internal parameters affecting this node have changed, we'd
+ * better destroy and recreate the cursor. Otherwise, rewinding it should
+ * be good enough. If we've only fetched zero or one batch, we needn't
+ * even rewind the cursor, just rescan what we have.
+ */
+ if (node->ss.ps.chgParam != NULL)
+ {
+ fsstate->cursor_exists = false;
+ snprintf(sql, sizeof(sql), "CLOSE c%u",
+ fsstate->cursor_number);
+ }
+ else if (fsstate->fetch_ct_2 > 1)
+ {
+ snprintf(sql, sizeof(sql), "MOVE BACKWARD ALL IN c%u",
+ fsstate->cursor_number);
+ }
+ else
+ {
+ /* Easy: just rescan what we already have in memory, if anything */
+ fsstate->next_tuple = 0;
+ return;
+ }
+
+ /*
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = pgfdw_exec_query(fsstate->conn, sql, fsstate->conn_state);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(ERROR, res, fsstate->conn, true, sql);
+ PQclear(res);
+
+ /* Now force a fresh FETCH. */
+ fsstate->tuples = NULL;
+ fsstate->num_tuples = 0;
+ fsstate->next_tuple = 0;
+ fsstate->fetch_ct_2 = 0;
+ fsstate->eof_reached = false;
+}
+
+/*
+ * postgresEndForeignScan
+ * Finish scanning foreign table and dispose objects used for this scan
+ */
+static void
+postgresEndForeignScan(ForeignScanState *node)
+{
+ PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+
+ /* if fsstate is NULL, we are in EXPLAIN; nothing to do */
+ if (fsstate == NULL)
+ return;
+
+ /* Close the cursor if open, to prevent accumulation of cursors */
+ if (fsstate->cursor_exists)
+ close_cursor(fsstate->conn, fsstate->cursor_number,
+ fsstate->conn_state);
+
+ /* Release remote connection */
+ ReleaseConnection(fsstate->conn);
+ fsstate->conn = NULL;
+
+ /* MemoryContexts will be deleted automatically. */
+}
+
+/*
+ * postgresAddForeignUpdateTargets
+ * Add resjunk column(s) needed for update/delete on a foreign table
+ */
+static void
+postgresAddForeignUpdateTargets(PlannerInfo *root,
+ Index rtindex,
+ RangeTblEntry *target_rte,
+ Relation target_relation)
+{
+ Var *var;
+
+ /*
+ * In postgres_fdw, what we need is the ctid, same as for a regular table.
+ */
+
+ /* Make a Var representing the desired value */
+ var = makeVar(rtindex,
+ SelfItemPointerAttributeNumber,
+ TIDOID,
+ -1,
+ InvalidOid,
+ 0);
+
+ /* Register it as a row-identity column needed by this target rel */
+ add_row_identity_var(root, var, rtindex, "ctid");
+}
+
+/*
+ * postgresPlanForeignModify
+ * Plan an insert/update/delete operation on a foreign table
+ */
+static List *
+postgresPlanForeignModify(PlannerInfo *root,
+ ModifyTable *plan,
+ Index resultRelation,
+ int subplan_index)
+{
+ CmdType operation = plan->operation;
+ RangeTblEntry *rte = planner_rt_fetch(resultRelation, root);
+ Relation rel;
+ StringInfoData sql;
+ List *targetAttrs = NIL;
+ List *withCheckOptionList = NIL;
+ List *returningList = NIL;
+ List *retrieved_attrs = NIL;
+ bool doNothing = false;
+ int values_end_len = -1;
+
+ initStringInfo(&sql);
+
+ /*
+ * Core code already has some lock on each rel being planned, so we can
+ * use NoLock here.
+ */
+ rel = table_open(rte->relid, NoLock);
+
+ /*
+ * In an INSERT, we transmit all columns that are defined in the foreign
+ * table. In an UPDATE, if there are BEFORE ROW UPDATE triggers on the
+ * foreign table, we transmit all columns like INSERT; else we transmit
+ * only columns that were explicitly targets of the UPDATE, so as to avoid
+ * unnecessary data transmission. (We can't do that for INSERT since we
+ * would miss sending default values for columns not listed in the source
+ * statement, and for UPDATE if there are BEFORE ROW UPDATE triggers since
+ * those triggers might change values for non-target columns, in which
+ * case we would miss sending changed values for those columns.)
+ */
+ if (operation == CMD_INSERT ||
+ (operation == CMD_UPDATE &&
+ rel->trigdesc &&
+ rel->trigdesc->trig_update_before_row))
+ {
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ int attnum;
+
+ for (attnum = 1; attnum <= tupdesc->natts; attnum++)
+ {
+ Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1);
+
+ if (!attr->attisdropped)
+ targetAttrs = lappend_int(targetAttrs, attnum);
+ }
+ }
+ else if (operation == CMD_UPDATE)
+ {
+ int col;
+ Bitmapset *allUpdatedCols = bms_union(rte->updatedCols, rte->extraUpdatedCols);
+
+ col = -1;
+ while ((col = bms_next_member(allUpdatedCols, col)) >= 0)
+ {
+ /* bit numbers are offset by FirstLowInvalidHeapAttributeNumber */
+ AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
+
+ if (attno <= InvalidAttrNumber) /* shouldn't happen */
+ elog(ERROR, "system-column update is not supported");
+ targetAttrs = lappend_int(targetAttrs, attno);
+ }
+ }
+
+ /*
+ * Extract the relevant WITH CHECK OPTION list if any.
+ */
+ if (plan->withCheckOptionLists)
+ withCheckOptionList = (List *) list_nth(plan->withCheckOptionLists,
+ subplan_index);
+
+ /*
+ * Extract the relevant RETURNING list if any.
+ */
+ if (plan->returningLists)
+ returningList = (List *) list_nth(plan->returningLists, subplan_index);
+
+ /*
+ * ON CONFLICT DO UPDATE and DO NOTHING case with inference specification
+ * should have already been rejected in the optimizer, as presently there
+ * is no way to recognize an arbiter index on a foreign table. Only DO
+ * NOTHING is supported without an inference specification.
+ */
+ if (plan->onConflictAction == ONCONFLICT_NOTHING)
+ doNothing = true;
+ else if (plan->onConflictAction != ONCONFLICT_NONE)
+ elog(ERROR, "unexpected ON CONFLICT specification: %d",
+ (int) plan->onConflictAction);
+
+ /*
+ * Construct the SQL command string.
+ */
+ switch (operation)
+ {
+ case CMD_INSERT:
+ deparseInsertSql(&sql, rte, resultRelation, rel,
+ targetAttrs, doNothing,
+ withCheckOptionList, returningList,
+ &retrieved_attrs, &values_end_len);
+ break;
+ case CMD_UPDATE:
+ deparseUpdateSql(&sql, rte, resultRelation, rel,
+ targetAttrs,
+ withCheckOptionList, returningList,
+ &retrieved_attrs);
+ break;
+ case CMD_DELETE:
+ deparseDeleteSql(&sql, rte, resultRelation, rel,
+ returningList,
+ &retrieved_attrs);
+ break;
+ default:
+ elog(ERROR, "unexpected operation: %d", (int) operation);
+ break;
+ }
+
+ table_close(rel, NoLock);
+
+ /*
+ * Build the fdw_private list that will be available to the executor.
+ * Items in the list must match enum FdwModifyPrivateIndex, above.
+ */
+ return list_make5(makeString(sql.data),
+ targetAttrs,
+ makeInteger(values_end_len),
+ makeInteger((retrieved_attrs != NIL)),
+ retrieved_attrs);
+}
+
+/*
+ * postgresBeginForeignModify
+ * Begin an insert/update/delete operation on a foreign table
+ */
+static void
+postgresBeginForeignModify(ModifyTableState *mtstate,
+ ResultRelInfo *resultRelInfo,
+ List *fdw_private,
+ int subplan_index,
+ int eflags)
+{
+ PgFdwModifyState *fmstate;
+ char *query;
+ List *target_attrs;
+ bool has_returning;
+ int values_end_len;
+ List *retrieved_attrs;
+ RangeTblEntry *rte;
+
+ /*
+ * Do nothing in EXPLAIN (no ANALYZE) case. resultRelInfo->ri_FdwState
+ * stays NULL.
+ */
+ if (eflags & EXEC_FLAG_EXPLAIN_ONLY)
+ return;
+
+ /* Deconstruct fdw_private data. */
+ query = strVal(list_nth(fdw_private,
+ FdwModifyPrivateUpdateSql));
+ target_attrs = (List *) list_nth(fdw_private,
+ FdwModifyPrivateTargetAttnums);
+ values_end_len = intVal(list_nth(fdw_private,
+ FdwModifyPrivateLen));
+ has_returning = intVal(list_nth(fdw_private,
+ FdwModifyPrivateHasReturning));
+ retrieved_attrs = (List *) list_nth(fdw_private,
+ FdwModifyPrivateRetrievedAttrs);
+
+ /* Find RTE. */
+ rte = exec_rt_fetch(resultRelInfo->ri_RangeTableIndex,
+ mtstate->ps.state);
+
+ /* Construct an execution state. */
+ fmstate = create_foreign_modify(mtstate->ps.state,
+ rte,
+ resultRelInfo,
+ mtstate->operation,
+ outerPlanState(mtstate)->plan,
+ query,
+ target_attrs,
+ values_end_len,
+ has_returning,
+ retrieved_attrs);
+
+ resultRelInfo->ri_FdwState = fmstate;
+}
+
+/*
+ * postgresExecForeignInsert
+ * Insert one row into a foreign table
+ */
+static TupleTableSlot *
+postgresExecForeignInsert(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot)
+{
+ PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
+ TupleTableSlot **rslot;
+ int numSlots = 1;
+
+ /*
+ * If the fmstate has aux_fmstate set, use the aux_fmstate (see
+ * postgresBeginForeignInsert())
+ */
+ if (fmstate->aux_fmstate)
+ resultRelInfo->ri_FdwState = fmstate->aux_fmstate;
+ rslot = execute_foreign_modify(estate, resultRelInfo, CMD_INSERT,
+ &slot, &planSlot, &numSlots);
+ /* Revert that change */
+ if (fmstate->aux_fmstate)
+ resultRelInfo->ri_FdwState = fmstate;
+
+ return rslot ? *rslot : NULL;
+}
+
+/*
+ * postgresExecForeignBatchInsert
+ * Insert multiple rows into a foreign table
+ */
+static TupleTableSlot **
+postgresExecForeignBatchInsert(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot **slots,
+ TupleTableSlot **planSlots,
+ int *numSlots)
+{
+ PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
+ TupleTableSlot **rslot;
+
+ /*
+ * If the fmstate has aux_fmstate set, use the aux_fmstate (see
+ * postgresBeginForeignInsert())
+ */
+ if (fmstate->aux_fmstate)
+ resultRelInfo->ri_FdwState = fmstate->aux_fmstate;
+ rslot = execute_foreign_modify(estate, resultRelInfo, CMD_INSERT,
+ slots, planSlots, numSlots);
+ /* Revert that change */
+ if (fmstate->aux_fmstate)
+ resultRelInfo->ri_FdwState = fmstate;
+
+ return rslot;
+}
+
+/*
+ * postgresGetForeignModifyBatchSize
+ * Determine the maximum number of tuples that can be inserted in bulk
+ *
+ * Returns the batch size specified for server or table. When batching is not
+ * allowed (e.g. for tables with BEFORE/AFTER ROW triggers or with RETURNING
+ * clause), returns 1.
+ */
+static int
+postgresGetForeignModifyBatchSize(ResultRelInfo *resultRelInfo)
+{
+ int batch_size;
+ PgFdwModifyState *fmstate = resultRelInfo->ri_FdwState ?
+ (PgFdwModifyState *) resultRelInfo->ri_FdwState :
+ NULL;
+
+ /* should be called only once */
+ Assert(resultRelInfo->ri_BatchSize == 0);
+
+ /*
+ * Should never get called when the insert is being performed as part of a
+ * row movement operation.
+ */
+ Assert(fmstate == NULL || fmstate->aux_fmstate == NULL);
+
+ /*
+ * In EXPLAIN without ANALYZE, ri_FdwState is NULL, so we have to lookup
+ * the option directly in server/table options. Otherwise just use the
+ * value we determined earlier.
+ */
+ if (fmstate)
+ batch_size = fmstate->batch_size;
+ else
+ batch_size = get_batch_size_option(resultRelInfo->ri_RelationDesc);
+
+ /*
+ * Disable batching when we have to use RETURNING, there are any
+ * BEFORE/AFTER ROW INSERT triggers on the foreign table, or there are any
+ * WITH CHECK OPTION constraints from parent views.
+ *
+ * When there are any BEFORE ROW INSERT triggers on the table, we can't
+ * support it, because such triggers might query the table we're inserting
+ * into and act differently if the tuples that have already been processed
+ * and prepared for insertion are not there.
+ */
+ if (resultRelInfo->ri_projectReturning != NULL ||
+ resultRelInfo->ri_WithCheckOptions != NIL ||
+ (resultRelInfo->ri_TrigDesc &&
+ (resultRelInfo->ri_TrigDesc->trig_insert_before_row ||
+ resultRelInfo->ri_TrigDesc->trig_insert_after_row)))
+ return 1;
+
+ /*
+ * Otherwise use the batch size specified for server/table. The number of
+ * parameters in a batch is limited to 65535 (uint16), so make sure we
+ * don't exceed this limit by using the maximum batch_size possible.
+ */
+ if (fmstate && fmstate->p_nums > 0)
+ batch_size = Min(batch_size, PQ_QUERY_PARAM_MAX_LIMIT / fmstate->p_nums);
+
+ return batch_size;
+}
+
+/*
+ * postgresExecForeignUpdate
+ * Update one row in a foreign table
+ */
+static TupleTableSlot *
+postgresExecForeignUpdate(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot)
+{
+ TupleTableSlot **rslot;
+ int numSlots = 1;
+
+ rslot = execute_foreign_modify(estate, resultRelInfo, CMD_UPDATE,
+ &slot, &planSlot, &numSlots);
+
+ return rslot ? rslot[0] : NULL;
+}
+
+/*
+ * postgresExecForeignDelete
+ * Delete one row from a foreign table
+ */
+static TupleTableSlot *
+postgresExecForeignDelete(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot)
+{
+ TupleTableSlot **rslot;
+ int numSlots = 1;
+
+ rslot = execute_foreign_modify(estate, resultRelInfo, CMD_DELETE,
+ &slot, &planSlot, &numSlots);
+
+ return rslot ? rslot[0] : NULL;
+}
+
+/*
+ * postgresEndForeignModify
+ * Finish an insert/update/delete operation on a foreign table
+ */
+static void
+postgresEndForeignModify(EState *estate,
+ ResultRelInfo *resultRelInfo)
+{
+ PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
+
+ /* If fmstate is NULL, we are in EXPLAIN; nothing to do */
+ if (fmstate == NULL)
+ return;
+
+ /* Destroy the execution state */
+ finish_foreign_modify(fmstate);
+}
+
+/*
+ * postgresBeginForeignInsert
+ * Begin an insert operation on a foreign table
+ */
+static void
+postgresBeginForeignInsert(ModifyTableState *mtstate,
+ ResultRelInfo *resultRelInfo)
+{
+ PgFdwModifyState *fmstate;
+ ModifyTable *plan = castNode(ModifyTable, mtstate->ps.plan);
+ EState *estate = mtstate->ps.state;
+ Index resultRelation;
+ Relation rel = resultRelInfo->ri_RelationDesc;
+ RangeTblEntry *rte;
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ int attnum;
+ int values_end_len;
+ StringInfoData sql;
+ List *targetAttrs = NIL;
+ List *retrieved_attrs = NIL;
+ bool doNothing = false;
+
+ /*
+ * If the foreign table we are about to insert routed rows into is also an
+ * UPDATE subplan result rel that will be updated later, proceeding with
+ * the INSERT will result in the later UPDATE incorrectly modifying those
+ * routed rows, so prevent the INSERT --- it would be nice if we could
+ * handle this case; but for now, throw an error for safety.
+ */
+ if (plan && plan->operation == CMD_UPDATE &&
+ (resultRelInfo->ri_usesFdwDirectModify ||
+ resultRelInfo->ri_FdwState))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot route tuples into foreign table to be updated \"%s\"",
+ RelationGetRelationName(rel))));
+
+ initStringInfo(&sql);
+
+ /* We transmit all columns that are defined in the foreign table. */
+ for (attnum = 1; attnum <= tupdesc->natts; attnum++)
+ {
+ Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1);
+
+ if (!attr->attisdropped)
+ targetAttrs = lappend_int(targetAttrs, attnum);
+ }
+
+ /* Check if we add the ON CONFLICT clause to the remote query. */
+ if (plan)
+ {
+ OnConflictAction onConflictAction = plan->onConflictAction;
+
+ /* We only support DO NOTHING without an inference specification. */
+ if (onConflictAction == ONCONFLICT_NOTHING)
+ doNothing = true;
+ else if (onConflictAction != ONCONFLICT_NONE)
+ elog(ERROR, "unexpected ON CONFLICT specification: %d",
+ (int) onConflictAction);
+ }
+
+ /*
+ * If the foreign table is a partition that doesn't have a corresponding
+ * RTE entry, we need to create a new RTE describing the foreign table for
+ * use by deparseInsertSql and create_foreign_modify() below, after first
+ * copying the parent's RTE and modifying some fields to describe the
+ * foreign partition to work on. However, if this is invoked by UPDATE,
+ * the existing RTE may already correspond to this partition if it is one
+ * of the UPDATE subplan target rels; in that case, we can just use the
+ * existing RTE as-is.
+ */
+ if (resultRelInfo->ri_RangeTableIndex == 0)
+ {
+ ResultRelInfo *rootResultRelInfo = resultRelInfo->ri_RootResultRelInfo;
+
+ rte = exec_rt_fetch(rootResultRelInfo->ri_RangeTableIndex, estate);
+ rte = copyObject(rte);
+ rte->relid = RelationGetRelid(rel);
+ rte->relkind = RELKIND_FOREIGN_TABLE;
+
+ /*
+ * For UPDATE, we must use the RT index of the first subplan target
+ * rel's RTE, because the core code would have built expressions for
+ * the partition, such as RETURNING, using that RT index as varno of
+ * Vars contained in those expressions.
+ */
+ if (plan && plan->operation == CMD_UPDATE &&
+ rootResultRelInfo->ri_RangeTableIndex == plan->rootRelation)
+ resultRelation = mtstate->resultRelInfo[0].ri_RangeTableIndex;
+ else
+ resultRelation = rootResultRelInfo->ri_RangeTableIndex;
+ }
+ else
+ {
+ resultRelation = resultRelInfo->ri_RangeTableIndex;
+ rte = exec_rt_fetch(resultRelation, estate);
+ }
+
+ /* Construct the SQL command string. */
+ deparseInsertSql(&sql, rte, resultRelation, rel, targetAttrs, doNothing,
+ resultRelInfo->ri_WithCheckOptions,
+ resultRelInfo->ri_returningList,
+ &retrieved_attrs, &values_end_len);
+
+ /* Construct an execution state. */
+ fmstate = create_foreign_modify(mtstate->ps.state,
+ rte,
+ resultRelInfo,
+ CMD_INSERT,
+ NULL,
+ sql.data,
+ targetAttrs,
+ values_end_len,
+ retrieved_attrs != NIL,
+ retrieved_attrs);
+
+ /*
+ * If the given resultRelInfo already has PgFdwModifyState set, it means
+ * the foreign table is an UPDATE subplan result rel; in which case, store
+ * the resulting state into the aux_fmstate of the PgFdwModifyState.
+ */
+ if (resultRelInfo->ri_FdwState)
+ {
+ Assert(plan && plan->operation == CMD_UPDATE);
+ Assert(resultRelInfo->ri_usesFdwDirectModify == false);
+ ((PgFdwModifyState *) resultRelInfo->ri_FdwState)->aux_fmstate = fmstate;
+ }
+ else
+ resultRelInfo->ri_FdwState = fmstate;
+}
+
+/*
+ * postgresEndForeignInsert
+ * Finish an insert operation on a foreign table
+ */
+static void
+postgresEndForeignInsert(EState *estate,
+ ResultRelInfo *resultRelInfo)
+{
+ PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
+
+ Assert(fmstate != NULL);
+
+ /*
+ * If the fmstate has aux_fmstate set, get the aux_fmstate (see
+ * postgresBeginForeignInsert())
+ */
+ if (fmstate->aux_fmstate)
+ fmstate = fmstate->aux_fmstate;
+
+ /* Destroy the execution state */
+ finish_foreign_modify(fmstate);
+}
+
+/*
+ * postgresIsForeignRelUpdatable
+ * Determine whether a foreign table supports INSERT, UPDATE and/or
+ * DELETE.
+ */
+static int
+postgresIsForeignRelUpdatable(Relation rel)
+{
+ bool updatable;
+ ForeignTable *table;
+ ForeignServer *server;
+ ListCell *lc;
+
+ /*
+ * By default, all postgres_fdw foreign tables are assumed updatable. This
+ * can be overridden by a per-server setting, which in turn can be
+ * overridden by a per-table setting.
+ */
+ updatable = true;
+
+ table = GetForeignTable(RelationGetRelid(rel));
+ server = GetForeignServer(table->serverid);
+
+ foreach(lc, server->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "updatable") == 0)
+ updatable = defGetBoolean(def);
+ }
+ foreach(lc, table->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "updatable") == 0)
+ updatable = defGetBoolean(def);
+ }
+
+ /*
+ * Currently "updatable" means support for INSERT, UPDATE and DELETE.
+ */
+ return updatable ?
+ (1 << CMD_INSERT) | (1 << CMD_UPDATE) | (1 << CMD_DELETE) : 0;
+}
+
+/*
+ * postgresRecheckForeignScan
+ * Execute a local join execution plan for a foreign join
+ */
+static bool
+postgresRecheckForeignScan(ForeignScanState *node, TupleTableSlot *slot)
+{
+ Index scanrelid = ((Scan *) node->ss.ps.plan)->scanrelid;
+ PlanState *outerPlan = outerPlanState(node);
+ TupleTableSlot *result;
+
+ /* For base foreign relations, it suffices to set fdw_recheck_quals */
+ if (scanrelid > 0)
+ return true;
+
+ Assert(outerPlan != NULL);
+
+ /* Execute a local join execution plan */
+ result = ExecProcNode(outerPlan);
+ if (TupIsNull(result))
+ return false;
+
+ /* Store result in the given slot */
+ ExecCopySlot(slot, result);
+
+ return true;
+}
+
+/*
+ * find_modifytable_subplan
+ * Helper routine for postgresPlanDirectModify to find the
+ * ModifyTable subplan node that scans the specified RTI.
+ *
+ * Returns NULL if the subplan couldn't be identified. That's not a fatal
+ * error condition, we just abandon trying to do the update directly.
+ */
+static ForeignScan *
+find_modifytable_subplan(PlannerInfo *root,
+ ModifyTable *plan,
+ Index rtindex,
+ int subplan_index)
+{
+ Plan *subplan = outerPlan(plan);
+
+ /*
+ * The cases we support are (1) the desired ForeignScan is the immediate
+ * child of ModifyTable, or (2) it is the subplan_index'th child of an
+ * Append node that is the immediate child of ModifyTable. There is no
+ * point in looking further down, as that would mean that local joins are
+ * involved, so we can't do the update directly.
+ *
+ * There could be a Result atop the Append too, acting to compute the
+ * UPDATE targetlist values. We ignore that here; the tlist will be
+ * checked by our caller.
+ *
+ * In principle we could examine all the children of the Append, but it's
+ * currently unlikely that the core planner would generate such a plan
+ * with the children out-of-order. Moreover, such a search risks costing
+ * O(N^2) time when there are a lot of children.
+ */
+ if (IsA(subplan, Append))
+ {
+ Append *appendplan = (Append *) subplan;
+
+ if (subplan_index < list_length(appendplan->appendplans))
+ subplan = (Plan *) list_nth(appendplan->appendplans, subplan_index);
+ }
+ else if (IsA(subplan, Result) &&
+ outerPlan(subplan) != NULL &&
+ IsA(outerPlan(subplan), Append))
+ {
+ Append *appendplan = (Append *) outerPlan(subplan);
+
+ if (subplan_index < list_length(appendplan->appendplans))
+ subplan = (Plan *) list_nth(appendplan->appendplans, subplan_index);
+ }
+
+ /* Now, have we got a ForeignScan on the desired rel? */
+ if (IsA(subplan, ForeignScan))
+ {
+ ForeignScan *fscan = (ForeignScan *) subplan;
+
+ if (bms_is_member(rtindex, fscan->fs_relids))
+ return fscan;
+ }
+
+ return NULL;
+}
+
+/*
+ * postgresPlanDirectModify
+ * Consider a direct foreign table modification
+ *
+ * Decide whether it is safe to modify a foreign table directly, and if so,
+ * rewrite subplan accordingly.
+ */
+static bool
+postgresPlanDirectModify(PlannerInfo *root,
+ ModifyTable *plan,
+ Index resultRelation,
+ int subplan_index)
+{
+ CmdType operation = plan->operation;
+ RelOptInfo *foreignrel;
+ RangeTblEntry *rte;
+ PgFdwRelationInfo *fpinfo;
+ Relation rel;
+ StringInfoData sql;
+ ForeignScan *fscan;
+ List *processed_tlist = NIL;
+ List *targetAttrs = NIL;
+ List *remote_exprs;
+ List *params_list = NIL;
+ List *returningList = NIL;
+ List *retrieved_attrs = NIL;
+
+ /*
+ * Decide whether it is safe to modify a foreign table directly.
+ */
+
+ /*
+ * The table modification must be an UPDATE or DELETE.
+ */
+ if (operation != CMD_UPDATE && operation != CMD_DELETE)
+ return false;
+
+ /*
+ * Try to locate the ForeignScan subplan that's scanning resultRelation.
+ */
+ fscan = find_modifytable_subplan(root, plan, resultRelation, subplan_index);
+ if (!fscan)
+ return false;
+
+ /*
+ * It's unsafe to modify a foreign table directly if there are any quals
+ * that should be evaluated locally.
+ */
+ if (fscan->scan.plan.qual != NIL)
+ return false;
+
+ /* Safe to fetch data about the target foreign rel */
+ if (fscan->scan.scanrelid == 0)
+ {
+ foreignrel = find_join_rel(root, fscan->fs_relids);
+ /* We should have a rel for this foreign join. */
+ Assert(foreignrel);
+ }
+ else
+ foreignrel = root->simple_rel_array[resultRelation];
+ rte = root->simple_rte_array[resultRelation];
+ fpinfo = (PgFdwRelationInfo *) foreignrel->fdw_private;
+
+ /*
+ * It's unsafe to update a foreign table directly, if any expressions to
+ * assign to the target columns are unsafe to evaluate remotely.
+ */
+ if (operation == CMD_UPDATE)
+ {
+ ListCell *lc,
+ *lc2;
+
+ /*
+ * The expressions of concern are the first N columns of the processed
+ * targetlist, where N is the length of the rel's update_colnos.
+ */
+ get_translated_update_targetlist(root, resultRelation,
+ &processed_tlist, &targetAttrs);
+ forboth(lc, processed_tlist, lc2, targetAttrs)
+ {
+ TargetEntry *tle = lfirst_node(TargetEntry, lc);
+ AttrNumber attno = lfirst_int(lc2);
+
+ /* update's new-value expressions shouldn't be resjunk */
+ Assert(!tle->resjunk);
+
+ if (attno <= InvalidAttrNumber) /* shouldn't happen */
+ elog(ERROR, "system-column update is not supported");
+
+ if (!is_foreign_expr(root, foreignrel, (Expr *) tle->expr))
+ return false;
+ }
+ }
+
+ /*
+ * Ok, rewrite subplan so as to modify the foreign table directly.
+ */
+ initStringInfo(&sql);
+
+ /*
+ * Core code already has some lock on each rel being planned, so we can
+ * use NoLock here.
+ */
+ rel = table_open(rte->relid, NoLock);
+
+ /*
+ * Recall the qual clauses that must be evaluated remotely. (These are
+ * bare clauses not RestrictInfos, but deparse.c's appendConditions()
+ * doesn't care.)
+ */
+ remote_exprs = fpinfo->final_remote_exprs;
+
+ /*
+ * Extract the relevant RETURNING list if any.
+ */
+ if (plan->returningLists)
+ {
+ returningList = (List *) list_nth(plan->returningLists, subplan_index);
+
+ /*
+ * When performing an UPDATE/DELETE .. RETURNING on a join directly,
+ * we fetch from the foreign server any Vars specified in RETURNING
+ * that refer not only to the target relation but to non-target
+ * relations. So we'll deparse them into the RETURNING clause of the
+ * remote query; use a targetlist consisting of them instead, which
+ * will be adjusted to be new fdw_scan_tlist of the foreign-scan plan
+ * node below.
+ */
+ if (fscan->scan.scanrelid == 0)
+ returningList = build_remote_returning(resultRelation, rel,
+ returningList);
+ }
+
+ /*
+ * Construct the SQL command string.
+ */
+ switch (operation)
+ {
+ case CMD_UPDATE:
+ deparseDirectUpdateSql(&sql, root, resultRelation, rel,
+ foreignrel,
+ processed_tlist,
+ targetAttrs,
+ remote_exprs, &params_list,
+ returningList, &retrieved_attrs);
+ break;
+ case CMD_DELETE:
+ deparseDirectDeleteSql(&sql, root, resultRelation, rel,
+ foreignrel,
+ remote_exprs, &params_list,
+ returningList, &retrieved_attrs);
+ break;
+ default:
+ elog(ERROR, "unexpected operation: %d", (int) operation);
+ break;
+ }
+
+ /*
+ * Update the operation and target relation info.
+ */
+ fscan->operation = operation;
+ fscan->resultRelation = resultRelation;
+
+ /*
+ * Update the fdw_exprs list that will be available to the executor.
+ */
+ fscan->fdw_exprs = params_list;
+
+ /*
+ * Update the fdw_private list that will be available to the executor.
+ * Items in the list must match enum FdwDirectModifyPrivateIndex, above.
+ */
+ fscan->fdw_private = list_make4(makeString(sql.data),
+ makeInteger((retrieved_attrs != NIL)),
+ retrieved_attrs,
+ makeInteger(plan->canSetTag));
+
+ /*
+ * Update the foreign-join-related fields.
+ */
+ if (fscan->scan.scanrelid == 0)
+ {
+ /* No need for the outer subplan. */
+ fscan->scan.plan.lefttree = NULL;
+
+ /* Build new fdw_scan_tlist if UPDATE/DELETE .. RETURNING. */
+ if (returningList)
+ rebuild_fdw_scan_tlist(fscan, returningList);
+ }
+
+ /*
+ * Finally, unset the async-capable flag if it is set, as we currently
+ * don't support asynchronous execution of direct modifications.
+ */
+ if (fscan->scan.plan.async_capable)
+ fscan->scan.plan.async_capable = false;
+
+ table_close(rel, NoLock);
+ return true;
+}
+
+/*
+ * postgresBeginDirectModify
+ * Prepare a direct foreign table modification
+ */
+static void
+postgresBeginDirectModify(ForeignScanState *node, int eflags)
+{
+ ForeignScan *fsplan = (ForeignScan *) node->ss.ps.plan;
+ EState *estate = node->ss.ps.state;
+ PgFdwDirectModifyState *dmstate;
+ Index rtindex;
+ RangeTblEntry *rte;
+ Oid userid;
+ ForeignTable *table;
+ UserMapping *user;
+ int numParams;
+
+ /*
+ * Do nothing in EXPLAIN (no ANALYZE) case. node->fdw_state stays NULL.
+ */
+ if (eflags & EXEC_FLAG_EXPLAIN_ONLY)
+ return;
+
+ /*
+ * We'll save private state in node->fdw_state.
+ */
+ dmstate = (PgFdwDirectModifyState *) palloc0(sizeof(PgFdwDirectModifyState));
+ node->fdw_state = (void *) dmstate;
+
+ /*
+ * Identify which user to do the remote access as. This should match what
+ * ExecCheckRTEPerms() does.
+ */
+ rtindex = node->resultRelInfo->ri_RangeTableIndex;
+ rte = exec_rt_fetch(rtindex, estate);
+ userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
+
+ /* Get info about foreign table. */
+ if (fsplan->scan.scanrelid == 0)
+ dmstate->rel = ExecOpenScanRelation(estate, rtindex, eflags);
+ else
+ dmstate->rel = node->ss.ss_currentRelation;
+ table = GetForeignTable(RelationGetRelid(dmstate->rel));
+ user = GetUserMapping(userid, table->serverid);
+
+ /*
+ * Get connection to the foreign server. Connection manager will
+ * establish new connection if necessary.
+ */
+ dmstate->conn = GetConnection(user, false, &dmstate->conn_state);
+
+ /* Update the foreign-join-related fields. */
+ if (fsplan->scan.scanrelid == 0)
+ {
+ /* Save info about foreign table. */
+ dmstate->resultRel = dmstate->rel;
+
+ /*
+ * Set dmstate->rel to NULL to teach get_returning_data() and
+ * make_tuple_from_result_row() that columns fetched from the remote
+ * server are described by fdw_scan_tlist of the foreign-scan plan
+ * node, not the tuple descriptor for the target relation.
+ */
+ dmstate->rel = NULL;
+ }
+
+ /* Initialize state variable */
+ dmstate->num_tuples = -1; /* -1 means not set yet */
+
+ /* Get private info created by planner functions. */
+ dmstate->query = strVal(list_nth(fsplan->fdw_private,
+ FdwDirectModifyPrivateUpdateSql));
+ dmstate->has_returning = intVal(list_nth(fsplan->fdw_private,
+ FdwDirectModifyPrivateHasReturning));
+ dmstate->retrieved_attrs = (List *) list_nth(fsplan->fdw_private,
+ FdwDirectModifyPrivateRetrievedAttrs);
+ dmstate->set_processed = intVal(list_nth(fsplan->fdw_private,
+ FdwDirectModifyPrivateSetProcessed));
+
+ /* Create context for per-tuple temp workspace. */
+ dmstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt,
+ "postgres_fdw temporary data",
+ ALLOCSET_SMALL_SIZES);
+
+ /* Prepare for input conversion of RETURNING results. */
+ if (dmstate->has_returning)
+ {
+ TupleDesc tupdesc;
+
+ if (fsplan->scan.scanrelid == 0)
+ tupdesc = get_tupdesc_for_join_scan_tuples(node);
+ else
+ tupdesc = RelationGetDescr(dmstate->rel);
+
+ dmstate->attinmeta = TupleDescGetAttInMetadata(tupdesc);
+
+ /*
+ * When performing an UPDATE/DELETE .. RETURNING on a join directly,
+ * initialize a filter to extract an updated/deleted tuple from a scan
+ * tuple.
+ */
+ if (fsplan->scan.scanrelid == 0)
+ init_returning_filter(dmstate, fsplan->fdw_scan_tlist, rtindex);
+ }
+
+ /*
+ * Prepare for processing of parameters used in remote query, if any.
+ */
+ numParams = list_length(fsplan->fdw_exprs);
+ dmstate->numParams = numParams;
+ if (numParams > 0)
+ prepare_query_params((PlanState *) node,
+ fsplan->fdw_exprs,
+ numParams,
+ &dmstate->param_flinfo,
+ &dmstate->param_exprs,
+ &dmstate->param_values);
+}
+
+/*
+ * postgresIterateDirectModify
+ * Execute a direct foreign table modification
+ */
+static TupleTableSlot *
+postgresIterateDirectModify(ForeignScanState *node)
+{
+ PgFdwDirectModifyState *dmstate = (PgFdwDirectModifyState *) node->fdw_state;
+ EState *estate = node->ss.ps.state;
+ ResultRelInfo *resultRelInfo = node->resultRelInfo;
+
+ /*
+ * If this is the first call after Begin, execute the statement.
+ */
+ if (dmstate->num_tuples == -1)
+ execute_dml_stmt(node);
+
+ /*
+ * If the local query doesn't specify RETURNING, just clear tuple slot.
+ */
+ if (!resultRelInfo->ri_projectReturning)
+ {
+ TupleTableSlot *slot = node->ss.ss_ScanTupleSlot;
+ Instrumentation *instr = node->ss.ps.instrument;
+
+ Assert(!dmstate->has_returning);
+
+ /* Increment the command es_processed count if necessary. */
+ if (dmstate->set_processed)
+ estate->es_processed += dmstate->num_tuples;
+
+ /* Increment the tuple count for EXPLAIN ANALYZE if necessary. */
+ if (instr)
+ instr->tuplecount += dmstate->num_tuples;
+
+ return ExecClearTuple(slot);
+ }
+
+ /*
+ * Get the next RETURNING tuple.
+ */
+ return get_returning_data(node);
+}
+
+/*
+ * postgresEndDirectModify
+ * Finish a direct foreign table modification
+ */
+static void
+postgresEndDirectModify(ForeignScanState *node)
+{
+ PgFdwDirectModifyState *dmstate = (PgFdwDirectModifyState *) node->fdw_state;
+
+ /* if dmstate is NULL, we are in EXPLAIN; nothing to do */
+ if (dmstate == NULL)
+ return;
+
+ /* Release PGresult */
+ if (dmstate->result)
+ PQclear(dmstate->result);
+
+ /* Release remote connection */
+ ReleaseConnection(dmstate->conn);
+ dmstate->conn = NULL;
+
+ /* MemoryContext will be deleted automatically. */
+}
+
+/*
+ * postgresExplainForeignScan
+ * Produce extra output for EXPLAIN of a ForeignScan on a foreign table
+ */
+static void
+postgresExplainForeignScan(ForeignScanState *node, ExplainState *es)
+{
+ ForeignScan *plan = castNode(ForeignScan, node->ss.ps.plan);
+ List *fdw_private = plan->fdw_private;
+
+ /*
+ * Identify foreign scans that are really joins or upper relations. The
+ * input looks something like "(1) LEFT JOIN (2)", and we must replace the
+ * digit string(s), which are RT indexes, with the correct relation names.
+ * We do that here, not when the plan is created, because we can't know
+ * what aliases ruleutils.c will assign at plan creation time.
+ */
+ if (list_length(fdw_private) > FdwScanPrivateRelations)
+ {
+ StringInfo relations;
+ char *rawrelations;
+ char *ptr;
+ int minrti,
+ rtoffset;
+
+ rawrelations = strVal(list_nth(fdw_private, FdwScanPrivateRelations));
+
+ /*
+ * A difficulty with using a string representation of RT indexes is
+ * that setrefs.c won't update the string when flattening the
+ * rangetable. To find out what rtoffset was applied, identify the
+ * minimum RT index appearing in the string and compare it to the
+ * minimum member of plan->fs_relids. (We expect all the relids in
+ * the join will have been offset by the same amount; the Asserts
+ * below should catch it if that ever changes.)
+ */
+ minrti = INT_MAX;
+ ptr = rawrelations;
+ while (*ptr)
+ {
+ if (isdigit((unsigned char) *ptr))
+ {
+ int rti = strtol(ptr, &ptr, 10);
+
+ if (rti < minrti)
+ minrti = rti;
+ }
+ else
+ ptr++;
+ }
+ rtoffset = bms_next_member(plan->fs_relids, -1) - minrti;
+
+ /* Now we can translate the string */
+ relations = makeStringInfo();
+ ptr = rawrelations;
+ while (*ptr)
+ {
+ if (isdigit((unsigned char) *ptr))
+ {
+ int rti = strtol(ptr, &ptr, 10);
+ RangeTblEntry *rte;
+ char *relname;
+ char *refname;
+
+ rti += rtoffset;
+ Assert(bms_is_member(rti, plan->fs_relids));
+ rte = rt_fetch(rti, es->rtable);
+ Assert(rte->rtekind == RTE_RELATION);
+ /* This logic should agree with explain.c's ExplainTargetRel */
+ relname = get_rel_name(rte->relid);
+ if (es->verbose)
+ {
+ char *namespace;
+
+ namespace = get_namespace_name(get_rel_namespace(rte->relid));
+ appendStringInfo(relations, "%s.%s",
+ quote_identifier(namespace),
+ quote_identifier(relname));
+ }
+ else
+ appendStringInfoString(relations,
+ quote_identifier(relname));
+ refname = (char *) list_nth(es->rtable_names, rti - 1);
+ if (refname == NULL)
+ refname = rte->eref->aliasname;
+ if (strcmp(refname, relname) != 0)
+ appendStringInfo(relations, " %s",
+ quote_identifier(refname));
+ }
+ else
+ appendStringInfoChar(relations, *ptr++);
+ }
+ ExplainPropertyText("Relations", relations->data, es);
+ }
+
+ /*
+ * Add remote query, when VERBOSE option is specified.
+ */
+ if (es->verbose)
+ {
+ char *sql;
+
+ sql = strVal(list_nth(fdw_private, FdwScanPrivateSelectSql));
+ ExplainPropertyText("Remote SQL", sql, es);
+ }
+}
+
+/*
+ * postgresExplainForeignModify
+ * Produce extra output for EXPLAIN of a ModifyTable on a foreign table
+ */
+static void
+postgresExplainForeignModify(ModifyTableState *mtstate,
+ ResultRelInfo *rinfo,
+ List *fdw_private,
+ int subplan_index,
+ ExplainState *es)
+{
+ if (es->verbose)
+ {
+ char *sql = strVal(list_nth(fdw_private,
+ FdwModifyPrivateUpdateSql));
+
+ ExplainPropertyText("Remote SQL", sql, es);
+
+ /*
+ * For INSERT we should always have batch size >= 1, but UPDATE and
+ * DELETE don't support batching so don't show the property.
+ */
+ if (rinfo->ri_BatchSize > 0)
+ ExplainPropertyInteger("Batch Size", NULL, rinfo->ri_BatchSize, es);
+ }
+}
+
+/*
+ * postgresExplainDirectModify
+ * Produce extra output for EXPLAIN of a ForeignScan that modifies a
+ * foreign table directly
+ */
+static void
+postgresExplainDirectModify(ForeignScanState *node, ExplainState *es)
+{
+ List *fdw_private;
+ char *sql;
+
+ if (es->verbose)
+ {
+ fdw_private = ((ForeignScan *) node->ss.ps.plan)->fdw_private;
+ sql = strVal(list_nth(fdw_private, FdwDirectModifyPrivateUpdateSql));
+ ExplainPropertyText("Remote SQL", sql, es);
+ }
+}
+
+/*
+ * postgresExecForeignTruncate
+ * Truncate one or more foreign tables
+ */
+static void
+postgresExecForeignTruncate(List *rels,
+ DropBehavior behavior,
+ bool restart_seqs)
+{
+ Oid serverid = InvalidOid;
+ UserMapping *user = NULL;
+ PGconn *conn = NULL;
+ StringInfoData sql;
+ ListCell *lc;
+ bool server_truncatable = true;
+
+ /*
+ * By default, all postgres_fdw foreign tables are assumed truncatable.
+ * This can be overridden by a per-server setting, which in turn can be
+ * overridden by a per-table setting.
+ */
+ foreach(lc, rels)
+ {
+ ForeignServer *server = NULL;
+ Relation rel = lfirst(lc);
+ ForeignTable *table = GetForeignTable(RelationGetRelid(rel));
+ ListCell *cell;
+ bool truncatable;
+
+ /*
+ * First time through, determine whether the foreign server allows
+ * truncates. Since all specified foreign tables are assumed to belong
+ * to the same foreign server, this result can be used for other
+ * foreign tables.
+ */
+ if (!OidIsValid(serverid))
+ {
+ serverid = table->serverid;
+ server = GetForeignServer(serverid);
+
+ foreach(cell, server->options)
+ {
+ DefElem *defel = (DefElem *) lfirst(cell);
+
+ if (strcmp(defel->defname, "truncatable") == 0)
+ {
+ server_truncatable = defGetBoolean(defel);
+ break;
+ }
+ }
+ }
+
+ /*
+ * Confirm that all specified foreign tables belong to the same
+ * foreign server.
+ */
+ Assert(table->serverid == serverid);
+
+ /* Determine whether this foreign table allows truncations */
+ truncatable = server_truncatable;
+ foreach(cell, table->options)
+ {
+ DefElem *defel = (DefElem *) lfirst(cell);
+
+ if (strcmp(defel->defname, "truncatable") == 0)
+ {
+ truncatable = defGetBoolean(defel);
+ break;
+ }
+ }
+
+ if (!truncatable)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("foreign table \"%s\" does not allow truncates",
+ RelationGetRelationName(rel))));
+ }
+ Assert(OidIsValid(serverid));
+
+ /*
+ * Get connection to the foreign server. Connection manager will
+ * establish new connection if necessary.
+ */
+ user = GetUserMapping(GetUserId(), serverid);
+ conn = GetConnection(user, false, NULL);
+
+ /* Construct the TRUNCATE command string */
+ initStringInfo(&sql);
+ deparseTruncateSql(&sql, rels, behavior, restart_seqs);
+
+ /* Issue the TRUNCATE command to remote server */
+ do_sql_command(conn, sql.data);
+
+ pfree(sql.data);
+}
+
+/*
+ * estimate_path_cost_size
+ * Get cost and size estimates for a foreign scan on given foreign relation
+ * either a base relation or a join between foreign relations or an upper
+ * relation containing foreign relations.
+ *
+ * param_join_conds are the parameterization clauses with outer relations.
+ * pathkeys specify the expected sort order if any for given path being costed.
+ * fpextra specifies additional post-scan/join-processing steps such as the
+ * final sort and the LIMIT restriction.
+ *
+ * The function returns the cost and size estimates in p_rows, p_width,
+ * p_startup_cost and p_total_cost variables.
+ */
+static void
+estimate_path_cost_size(PlannerInfo *root,
+ RelOptInfo *foreignrel,
+ List *param_join_conds,
+ List *pathkeys,
+ PgFdwPathExtraData *fpextra,
+ double *p_rows, int *p_width,
+ Cost *p_startup_cost, Cost *p_total_cost)
+{
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) foreignrel->fdw_private;
+ double rows;
+ double retrieved_rows;
+ int width;
+ Cost startup_cost;
+ Cost total_cost;
+
+ /* Make sure the core code has set up the relation's reltarget */
+ Assert(foreignrel->reltarget);
+
+ /*
+ * If the table or the server is configured to use remote estimates,
+ * connect to the foreign server and execute EXPLAIN to estimate the
+ * number of rows selected by the restriction+join clauses. Otherwise,
+ * estimate rows using whatever statistics we have locally, in a way
+ * similar to ordinary tables.
+ */
+ if (fpinfo->use_remote_estimate)
+ {
+ List *remote_param_join_conds;
+ List *local_param_join_conds;
+ StringInfoData sql;
+ PGconn *conn;
+ Selectivity local_sel;
+ QualCost local_cost;
+ List *fdw_scan_tlist = NIL;
+ List *remote_conds;
+
+ /* Required only to be passed to deparseSelectStmtForRel */
+ List *retrieved_attrs;
+
+ /*
+ * param_join_conds might contain both clauses that are safe to send
+ * across, and clauses that aren't.
+ */
+ classifyConditions(root, foreignrel, param_join_conds,
+ &remote_param_join_conds, &local_param_join_conds);
+
+ /* Build the list of columns to be fetched from the foreign server. */
+ if (IS_JOIN_REL(foreignrel) || IS_UPPER_REL(foreignrel))
+ fdw_scan_tlist = build_tlist_to_deparse(foreignrel);
+ else
+ fdw_scan_tlist = NIL;
+
+ /*
+ * The complete list of remote conditions includes everything from
+ * baserestrictinfo plus any extra join_conds relevant to this
+ * particular path.
+ */
+ remote_conds = list_concat(remote_param_join_conds,
+ fpinfo->remote_conds);
+
+ /*
+ * Construct EXPLAIN query including the desired SELECT, FROM, and
+ * WHERE clauses. Params and other-relation Vars are replaced by dummy
+ * values, so don't request params_list.
+ */
+ initStringInfo(&sql);
+ appendStringInfoString(&sql, "EXPLAIN ");
+ deparseSelectStmtForRel(&sql, root, foreignrel, fdw_scan_tlist,
+ remote_conds, pathkeys,
+ fpextra ? fpextra->has_final_sort : false,
+ fpextra ? fpextra->has_limit : false,
+ false, &retrieved_attrs, NULL);
+
+ /* Get the remote estimate */
+ conn = GetConnection(fpinfo->user, false, NULL);
+ get_remote_estimate(sql.data, conn, &rows, &width,
+ &startup_cost, &total_cost);
+ ReleaseConnection(conn);
+
+ retrieved_rows = rows;
+
+ /* Factor in the selectivity of the locally-checked quals */
+ local_sel = clauselist_selectivity(root,
+ local_param_join_conds,
+ foreignrel->relid,
+ JOIN_INNER,
+ NULL);
+ local_sel *= fpinfo->local_conds_sel;
+
+ rows = clamp_row_est(rows * local_sel);
+
+ /* Add in the eval cost of the locally-checked quals */
+ startup_cost += fpinfo->local_conds_cost.startup;
+ total_cost += fpinfo->local_conds_cost.per_tuple * retrieved_rows;
+ cost_qual_eval(&local_cost, local_param_join_conds, root);
+ startup_cost += local_cost.startup;
+ total_cost += local_cost.per_tuple * retrieved_rows;
+
+ /*
+ * Add in tlist eval cost for each output row. In case of an
+ * aggregate, some of the tlist expressions such as grouping
+ * expressions will be evaluated remotely, so adjust the costs.
+ */
+ startup_cost += foreignrel->reltarget->cost.startup;
+ total_cost += foreignrel->reltarget->cost.startup;
+ total_cost += foreignrel->reltarget->cost.per_tuple * rows;
+ if (IS_UPPER_REL(foreignrel))
+ {
+ QualCost tlist_cost;
+
+ cost_qual_eval(&tlist_cost, fdw_scan_tlist, root);
+ startup_cost -= tlist_cost.startup;
+ total_cost -= tlist_cost.startup;
+ total_cost -= tlist_cost.per_tuple * rows;
+ }
+ }
+ else
+ {
+ Cost run_cost = 0;
+
+ /*
+ * We don't support join conditions in this mode (hence, no
+ * parameterized paths can be made).
+ */
+ Assert(param_join_conds == NIL);
+
+ /*
+ * We will come here again and again with different set of pathkeys or
+ * additional post-scan/join-processing steps that caller wants to
+ * cost. We don't need to calculate the cost/size estimates for the
+ * underlying scan, join, or grouping each time. Instead, use those
+ * estimates if we have cached them already.
+ */
+ if (fpinfo->rel_startup_cost >= 0 && fpinfo->rel_total_cost >= 0)
+ {
+ Assert(fpinfo->retrieved_rows >= 0);
+
+ rows = fpinfo->rows;
+ retrieved_rows = fpinfo->retrieved_rows;
+ width = fpinfo->width;
+ startup_cost = fpinfo->rel_startup_cost;
+ run_cost = fpinfo->rel_total_cost - fpinfo->rel_startup_cost;
+
+ /*
+ * If we estimate the costs of a foreign scan or a foreign join
+ * with additional post-scan/join-processing steps, the scan or
+ * join costs obtained from the cache wouldn't yet contain the
+ * eval costs for the final scan/join target, which would've been
+ * updated by apply_scanjoin_target_to_paths(); add the eval costs
+ * now.
+ */
+ if (fpextra && !IS_UPPER_REL(foreignrel))
+ {
+ /* Shouldn't get here unless we have LIMIT */
+ Assert(fpextra->has_limit);
+ Assert(foreignrel->reloptkind == RELOPT_BASEREL ||
+ foreignrel->reloptkind == RELOPT_JOINREL);
+ startup_cost += foreignrel->reltarget->cost.startup;
+ run_cost += foreignrel->reltarget->cost.per_tuple * rows;
+ }
+ }
+ else if (IS_JOIN_REL(foreignrel))
+ {
+ PgFdwRelationInfo *fpinfo_i;
+ PgFdwRelationInfo *fpinfo_o;
+ QualCost join_cost;
+ QualCost remote_conds_cost;
+ double nrows;
+
+ /* Use rows/width estimates made by the core code. */
+ rows = foreignrel->rows;
+ width = foreignrel->reltarget->width;
+
+ /* For join we expect inner and outer relations set */
+ Assert(fpinfo->innerrel && fpinfo->outerrel);
+
+ fpinfo_i = (PgFdwRelationInfo *) fpinfo->innerrel->fdw_private;
+ fpinfo_o = (PgFdwRelationInfo *) fpinfo->outerrel->fdw_private;
+
+ /* Estimate of number of rows in cross product */
+ nrows = fpinfo_i->rows * fpinfo_o->rows;
+
+ /*
+ * Back into an estimate of the number of retrieved rows. Just in
+ * case this is nuts, clamp to at most nrows.
+ */
+ retrieved_rows = clamp_row_est(rows / fpinfo->local_conds_sel);
+ retrieved_rows = Min(retrieved_rows, nrows);
+
+ /*
+ * The cost of foreign join is estimated as cost of generating
+ * rows for the joining relations + cost for applying quals on the
+ * rows.
+ */
+
+ /*
+ * Calculate the cost of clauses pushed down to the foreign server
+ */
+ cost_qual_eval(&remote_conds_cost, fpinfo->remote_conds, root);
+ /* Calculate the cost of applying join clauses */
+ cost_qual_eval(&join_cost, fpinfo->joinclauses, root);
+
+ /*
+ * Startup cost includes startup cost of joining relations and the
+ * startup cost for join and other clauses. We do not include the
+ * startup cost specific to join strategy (e.g. setting up hash
+ * tables) since we do not know what strategy the foreign server
+ * is going to use.
+ */
+ startup_cost = fpinfo_i->rel_startup_cost + fpinfo_o->rel_startup_cost;
+ startup_cost += join_cost.startup;
+ startup_cost += remote_conds_cost.startup;
+ startup_cost += fpinfo->local_conds_cost.startup;
+
+ /*
+ * Run time cost includes:
+ *
+ * 1. Run time cost (total_cost - startup_cost) of relations being
+ * joined
+ *
+ * 2. Run time cost of applying join clauses on the cross product
+ * of the joining relations.
+ *
+ * 3. Run time cost of applying pushed down other clauses on the
+ * result of join
+ *
+ * 4. Run time cost of applying nonpushable other clauses locally
+ * on the result fetched from the foreign server.
+ */
+ run_cost = fpinfo_i->rel_total_cost - fpinfo_i->rel_startup_cost;
+ run_cost += fpinfo_o->rel_total_cost - fpinfo_o->rel_startup_cost;
+ run_cost += nrows * join_cost.per_tuple;
+ nrows = clamp_row_est(nrows * fpinfo->joinclause_sel);
+ run_cost += nrows * remote_conds_cost.per_tuple;
+ run_cost += fpinfo->local_conds_cost.per_tuple * retrieved_rows;
+
+ /* Add in tlist eval cost for each output row */
+ startup_cost += foreignrel->reltarget->cost.startup;
+ run_cost += foreignrel->reltarget->cost.per_tuple * rows;
+ }
+ else if (IS_UPPER_REL(foreignrel))
+ {
+ RelOptInfo *outerrel = fpinfo->outerrel;
+ PgFdwRelationInfo *ofpinfo;
+ AggClauseCosts aggcosts;
+ double input_rows;
+ int numGroupCols;
+ double numGroups = 1;
+
+ /* The upper relation should have its outer relation set */
+ Assert(outerrel);
+ /* and that outer relation should have its reltarget set */
+ Assert(outerrel->reltarget);
+
+ /*
+ * This cost model is mixture of costing done for sorted and
+ * hashed aggregates in cost_agg(). We are not sure which
+ * strategy will be considered at remote side, thus for
+ * simplicity, we put all startup related costs in startup_cost
+ * and all finalization and run cost are added in total_cost.
+ */
+
+ ofpinfo = (PgFdwRelationInfo *) outerrel->fdw_private;
+
+ /* Get rows from input rel */
+ input_rows = ofpinfo->rows;
+
+ /* Collect statistics about aggregates for estimating costs. */
+ MemSet(&aggcosts, 0, sizeof(AggClauseCosts));
+ if (root->parse->hasAggs)
+ {
+ get_agg_clause_costs(root, AGGSPLIT_SIMPLE, &aggcosts);
+ }
+
+ /* Get number of grouping columns and possible number of groups */
+ numGroupCols = list_length(root->parse->groupClause);
+ numGroups = estimate_num_groups(root,
+ get_sortgrouplist_exprs(root->parse->groupClause,
+ fpinfo->grouped_tlist),
+ input_rows, NULL, NULL);
+
+ /*
+ * Get the retrieved_rows and rows estimates. If there are HAVING
+ * quals, account for their selectivity.
+ */
+ if (root->parse->havingQual)
+ {
+ /* Factor in the selectivity of the remotely-checked quals */
+ retrieved_rows =
+ clamp_row_est(numGroups *
+ clauselist_selectivity(root,
+ fpinfo->remote_conds,
+ 0,
+ JOIN_INNER,
+ NULL));
+ /* Factor in the selectivity of the locally-checked quals */
+ rows = clamp_row_est(retrieved_rows * fpinfo->local_conds_sel);
+ }
+ else
+ {
+ rows = retrieved_rows = numGroups;
+ }
+
+ /* Use width estimate made by the core code. */
+ width = foreignrel->reltarget->width;
+
+ /*-----
+ * Startup cost includes:
+ * 1. Startup cost for underneath input relation, adjusted for
+ * tlist replacement by apply_scanjoin_target_to_paths()
+ * 2. Cost of performing aggregation, per cost_agg()
+ *-----
+ */
+ startup_cost = ofpinfo->rel_startup_cost;
+ startup_cost += outerrel->reltarget->cost.startup;
+ startup_cost += aggcosts.transCost.startup;
+ startup_cost += aggcosts.transCost.per_tuple * input_rows;
+ startup_cost += aggcosts.finalCost.startup;
+ startup_cost += (cpu_operator_cost * numGroupCols) * input_rows;
+
+ /*-----
+ * Run time cost includes:
+ * 1. Run time cost of underneath input relation, adjusted for
+ * tlist replacement by apply_scanjoin_target_to_paths()
+ * 2. Run time cost of performing aggregation, per cost_agg()
+ *-----
+ */
+ run_cost = ofpinfo->rel_total_cost - ofpinfo->rel_startup_cost;
+ run_cost += outerrel->reltarget->cost.per_tuple * input_rows;
+ run_cost += aggcosts.finalCost.per_tuple * numGroups;
+ run_cost += cpu_tuple_cost * numGroups;
+
+ /* Account for the eval cost of HAVING quals, if any */
+ if (root->parse->havingQual)
+ {
+ QualCost remote_cost;
+
+ /* Add in the eval cost of the remotely-checked quals */
+ cost_qual_eval(&remote_cost, fpinfo->remote_conds, root);
+ startup_cost += remote_cost.startup;
+ run_cost += remote_cost.per_tuple * numGroups;
+ /* Add in the eval cost of the locally-checked quals */
+ startup_cost += fpinfo->local_conds_cost.startup;
+ run_cost += fpinfo->local_conds_cost.per_tuple * retrieved_rows;
+ }
+
+ /* Add in tlist eval cost for each output row */
+ startup_cost += foreignrel->reltarget->cost.startup;
+ run_cost += foreignrel->reltarget->cost.per_tuple * rows;
+ }
+ else
+ {
+ Cost cpu_per_tuple;
+
+ /* Use rows/width estimates made by set_baserel_size_estimates. */
+ rows = foreignrel->rows;
+ width = foreignrel->reltarget->width;
+
+ /*
+ * Back into an estimate of the number of retrieved rows. Just in
+ * case this is nuts, clamp to at most foreignrel->tuples.
+ */
+ retrieved_rows = clamp_row_est(rows / fpinfo->local_conds_sel);
+ retrieved_rows = Min(retrieved_rows, foreignrel->tuples);
+
+ /*
+ * Cost as though this were a seqscan, which is pessimistic. We
+ * effectively imagine the local_conds are being evaluated
+ * remotely, too.
+ */
+ startup_cost = 0;
+ run_cost = 0;
+ run_cost += seq_page_cost * foreignrel->pages;
+
+ startup_cost += foreignrel->baserestrictcost.startup;
+ cpu_per_tuple = cpu_tuple_cost + foreignrel->baserestrictcost.per_tuple;
+ run_cost += cpu_per_tuple * foreignrel->tuples;
+
+ /* Add in tlist eval cost for each output row */
+ startup_cost += foreignrel->reltarget->cost.startup;
+ run_cost += foreignrel->reltarget->cost.per_tuple * rows;
+ }
+
+ /*
+ * Without remote estimates, we have no real way to estimate the cost
+ * of generating sorted output. It could be free if the query plan
+ * the remote side would have chosen generates properly-sorted output
+ * anyway, but in most cases it will cost something. Estimate a value
+ * high enough that we won't pick the sorted path when the ordering
+ * isn't locally useful, but low enough that we'll err on the side of
+ * pushing down the ORDER BY clause when it's useful to do so.
+ */
+ if (pathkeys != NIL)
+ {
+ if (IS_UPPER_REL(foreignrel))
+ {
+ Assert(foreignrel->reloptkind == RELOPT_UPPER_REL &&
+ fpinfo->stage == UPPERREL_GROUP_AGG);
+ adjust_foreign_grouping_path_cost(root, pathkeys,
+ retrieved_rows, width,
+ fpextra->limit_tuples,
+ &startup_cost, &run_cost);
+ }
+ else
+ {
+ startup_cost *= DEFAULT_FDW_SORT_MULTIPLIER;
+ run_cost *= DEFAULT_FDW_SORT_MULTIPLIER;
+ }
+ }
+
+ total_cost = startup_cost + run_cost;
+
+ /* Adjust the cost estimates if we have LIMIT */
+ if (fpextra && fpextra->has_limit)
+ {
+ adjust_limit_rows_costs(&rows, &startup_cost, &total_cost,
+ fpextra->offset_est, fpextra->count_est);
+ retrieved_rows = rows;
+ }
+ }
+
+ /*
+ * If this includes the final sort step, the given target, which will be
+ * applied to the resulting path, might have different expressions from
+ * the foreignrel's reltarget (see make_sort_input_target()); adjust tlist
+ * eval costs.
+ */
+ if (fpextra && fpextra->has_final_sort &&
+ fpextra->target != foreignrel->reltarget)
+ {
+ QualCost oldcost = foreignrel->reltarget->cost;
+ QualCost newcost = fpextra->target->cost;
+
+ startup_cost += newcost.startup - oldcost.startup;
+ total_cost += newcost.startup - oldcost.startup;
+ total_cost += (newcost.per_tuple - oldcost.per_tuple) * rows;
+ }
+
+ /*
+ * Cache the retrieved rows and cost estimates for scans, joins, or
+ * groupings without any parameterization, pathkeys, or additional
+ * post-scan/join-processing steps, before adding the costs for
+ * transferring data from the foreign server. These estimates are useful
+ * for costing remote joins involving this relation or costing other
+ * remote operations on this relation such as remote sorts and remote
+ * LIMIT restrictions, when the costs can not be obtained from the foreign
+ * server. This function will be called at least once for every foreign
+ * relation without any parameterization, pathkeys, or additional
+ * post-scan/join-processing steps.
+ */
+ if (pathkeys == NIL && param_join_conds == NIL && fpextra == NULL)
+ {
+ fpinfo->retrieved_rows = retrieved_rows;
+ fpinfo->rel_startup_cost = startup_cost;
+ fpinfo->rel_total_cost = total_cost;
+ }
+
+ /*
+ * Add some additional cost factors to account for connection overhead
+ * (fdw_startup_cost), transferring data across the network
+ * (fdw_tuple_cost per retrieved row), and local manipulation of the data
+ * (cpu_tuple_cost per retrieved row).
+ */
+ startup_cost += fpinfo->fdw_startup_cost;
+ total_cost += fpinfo->fdw_startup_cost;
+ total_cost += fpinfo->fdw_tuple_cost * retrieved_rows;
+ total_cost += cpu_tuple_cost * retrieved_rows;
+
+ /*
+ * If we have LIMIT, we should prefer performing the restriction remotely
+ * rather than locally, as the former avoids extra row fetches from the
+ * remote that the latter might cause. But since the core code doesn't
+ * account for such fetches when estimating the costs of the local
+ * restriction (see create_limit_path()), there would be no difference
+ * between the costs of the local restriction and the costs of the remote
+ * restriction estimated above if we don't use remote estimates (except
+ * for the case where the foreignrel is a grouping relation, the given
+ * pathkeys is not NIL, and the effects of a bounded sort for that rel is
+ * accounted for in costing the remote restriction). Tweak the costs of
+ * the remote restriction to ensure we'll prefer it if LIMIT is a useful
+ * one.
+ */
+ if (!fpinfo->use_remote_estimate &&
+ fpextra && fpextra->has_limit &&
+ fpextra->limit_tuples > 0 &&
+ fpextra->limit_tuples < fpinfo->rows)
+ {
+ Assert(fpinfo->rows > 0);
+ total_cost -= (total_cost - startup_cost) * 0.05 *
+ (fpinfo->rows - fpextra->limit_tuples) / fpinfo->rows;
+ }
+
+ /* Return results. */
+ *p_rows = rows;
+ *p_width = width;
+ *p_startup_cost = startup_cost;
+ *p_total_cost = total_cost;
+}
+
+/*
+ * Estimate costs of executing a SQL statement remotely.
+ * The given "sql" must be an EXPLAIN command.
+ */
+static void
+get_remote_estimate(const char *sql, PGconn *conn,
+ double *rows, int *width,
+ Cost *startup_cost, Cost *total_cost)
+{
+ PGresult *volatile res = NULL;
+
+ /* PGresult must be released before leaving this function. */
+ PG_TRY();
+ {
+ char *line;
+ char *p;
+ int n;
+
+ /*
+ * Execute EXPLAIN remotely.
+ */
+ res = pgfdw_exec_query(conn, sql, NULL);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(ERROR, res, conn, false, sql);
+
+ /*
+ * Extract cost numbers for topmost plan node. Note we search for a
+ * left paren from the end of the line to avoid being confused by
+ * other uses of parentheses.
+ */
+ line = PQgetvalue(res, 0, 0);
+ p = strrchr(line, '(');
+ if (p == NULL)
+ elog(ERROR, "could not interpret EXPLAIN output: \"%s\"", line);
+ n = sscanf(p, "(cost=%lf..%lf rows=%lf width=%d)",
+ startup_cost, total_cost, rows, width);
+ if (n != 4)
+ elog(ERROR, "could not interpret EXPLAIN output: \"%s\"", line);
+ }
+ PG_FINALLY();
+ {
+ if (res)
+ PQclear(res);
+ }
+ PG_END_TRY();
+}
+
+/*
+ * Adjust the cost estimates of a foreign grouping path to include the cost of
+ * generating properly-sorted output.
+ */
+static void
+adjust_foreign_grouping_path_cost(PlannerInfo *root,
+ List *pathkeys,
+ double retrieved_rows,
+ double width,
+ double limit_tuples,
+ Cost *p_startup_cost,
+ Cost *p_run_cost)
+{
+ /*
+ * If the GROUP BY clause isn't sort-able, the plan chosen by the remote
+ * side is unlikely to generate properly-sorted output, so it would need
+ * an explicit sort; adjust the given costs with cost_sort(). Likewise,
+ * if the GROUP BY clause is sort-able but isn't a superset of the given
+ * pathkeys, adjust the costs with that function. Otherwise, adjust the
+ * costs by applying the same heuristic as for the scan or join case.
+ */
+ if (!grouping_is_sortable(root->parse->groupClause) ||
+ !pathkeys_contained_in(pathkeys, root->group_pathkeys))
+ {
+ Path sort_path; /* dummy for result of cost_sort */
+
+ cost_sort(&sort_path,
+ root,
+ pathkeys,
+ *p_startup_cost + *p_run_cost,
+ retrieved_rows,
+ width,
+ 0.0,
+ work_mem,
+ limit_tuples);
+
+ *p_startup_cost = sort_path.startup_cost;
+ *p_run_cost = sort_path.total_cost - sort_path.startup_cost;
+ }
+ else
+ {
+ /*
+ * The default extra cost seems too large for foreign-grouping cases;
+ * add 1/4th of that default.
+ */
+ double sort_multiplier = 1.0 + (DEFAULT_FDW_SORT_MULTIPLIER
+ - 1.0) * 0.25;
+
+ *p_startup_cost *= sort_multiplier;
+ *p_run_cost *= sort_multiplier;
+ }
+}
+
+/*
+ * Detect whether we want to process an EquivalenceClass member.
+ *
+ * This is a callback for use by generate_implied_equalities_for_column.
+ */
+static bool
+ec_member_matches_foreign(PlannerInfo *root, RelOptInfo *rel,
+ EquivalenceClass *ec, EquivalenceMember *em,
+ void *arg)
+{
+ ec_member_foreign_arg *state = (ec_member_foreign_arg *) arg;
+ Expr *expr = em->em_expr;
+
+ /*
+ * If we've identified what we're processing in the current scan, we only
+ * want to match that expression.
+ */
+ if (state->current != NULL)
+ return equal(expr, state->current);
+
+ /*
+ * Otherwise, ignore anything we've already processed.
+ */
+ if (list_member(state->already_used, expr))
+ return false;
+
+ /* This is the new target to process. */
+ state->current = expr;
+ return true;
+}
+
+/*
+ * Create cursor for node's query with current parameter values.
+ */
+static void
+create_cursor(ForeignScanState *node)
+{
+ PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+ ExprContext *econtext = node->ss.ps.ps_ExprContext;
+ int numParams = fsstate->numParams;
+ const char **values = fsstate->param_values;
+ PGconn *conn = fsstate->conn;
+ StringInfoData buf;
+ PGresult *res;
+
+ /* First, process a pending asynchronous request, if any. */
+ if (fsstate->conn_state->pendingAreq)
+ process_pending_request(fsstate->conn_state->pendingAreq);
+
+ /*
+ * Construct array of query parameter values in text format. We do the
+ * conversions in the short-lived per-tuple context, so as not to cause a
+ * memory leak over repeated scans.
+ */
+ if (numParams > 0)
+ {
+ MemoryContext oldcontext;
+
+ oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
+
+ process_query_params(econtext,
+ fsstate->param_flinfo,
+ fsstate->param_exprs,
+ values);
+
+ MemoryContextSwitchTo(oldcontext);
+ }
+
+ /* Construct the DECLARE CURSOR command */
+ initStringInfo(&buf);
+ appendStringInfo(&buf, "DECLARE c%u CURSOR FOR\n%s",
+ fsstate->cursor_number, fsstate->query);
+
+ /*
+ * Notice that we pass NULL for paramTypes, thus forcing the remote server
+ * to infer types for all parameters. Since we explicitly cast every
+ * parameter (see deparse.c), the "inference" is trivial and will produce
+ * the desired result. This allows us to avoid assuming that the remote
+ * server has the same OIDs we do for the parameters' types.
+ */
+ if (!PQsendQueryParams(conn, buf.data, numParams,
+ NULL, values, NULL, NULL, 0))
+ pgfdw_report_error(ERROR, NULL, conn, false, buf.data);
+
+ /*
+ * Get the result, and check for success.
+ *
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = pgfdw_get_result(conn, buf.data);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(ERROR, res, conn, true, fsstate->query);
+ PQclear(res);
+
+ /* Mark the cursor as created, and show no tuples have been retrieved */
+ fsstate->cursor_exists = true;
+ fsstate->tuples = NULL;
+ fsstate->num_tuples = 0;
+ fsstate->next_tuple = 0;
+ fsstate->fetch_ct_2 = 0;
+ fsstate->eof_reached = false;
+
+ /* Clean up */
+ pfree(buf.data);
+}
+
+/*
+ * Fetch some more rows from the node's cursor.
+ */
+static void
+fetch_more_data(ForeignScanState *node)
+{
+ PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+ PGresult *volatile res = NULL;
+ MemoryContext oldcontext;
+
+ /*
+ * We'll store the tuples in the batch_cxt. First, flush the previous
+ * batch.
+ */
+ fsstate->tuples = NULL;
+ MemoryContextReset(fsstate->batch_cxt);
+ oldcontext = MemoryContextSwitchTo(fsstate->batch_cxt);
+
+ /* PGresult must be released before leaving this function. */
+ PG_TRY();
+ {
+ PGconn *conn = fsstate->conn;
+ int numrows;
+ int i;
+
+ if (fsstate->async_capable)
+ {
+ Assert(fsstate->conn_state->pendingAreq);
+
+ /*
+ * The query was already sent by an earlier call to
+ * fetch_more_data_begin. So now we just fetch the result.
+ */
+ res = pgfdw_get_result(conn, fsstate->query);
+ /* On error, report the original query, not the FETCH. */
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(ERROR, res, conn, false, fsstate->query);
+
+ /* Reset per-connection state */
+ fsstate->conn_state->pendingAreq = NULL;
+ }
+ else
+ {
+ char sql[64];
+
+ /* This is a regular synchronous fetch. */
+ snprintf(sql, sizeof(sql), "FETCH %d FROM c%u",
+ fsstate->fetch_size, fsstate->cursor_number);
+
+ res = pgfdw_exec_query(conn, sql, fsstate->conn_state);
+ /* On error, report the original query, not the FETCH. */
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(ERROR, res, conn, false, fsstate->query);
+ }
+
+ /* Convert the data into HeapTuples */
+ numrows = PQntuples(res);
+ fsstate->tuples = (HeapTuple *) palloc0(numrows * sizeof(HeapTuple));
+ fsstate->num_tuples = numrows;
+ fsstate->next_tuple = 0;
+
+ for (i = 0; i < numrows; i++)
+ {
+ Assert(IsA(node->ss.ps.plan, ForeignScan));
+
+ fsstate->tuples[i] =
+ make_tuple_from_result_row(res, i,
+ fsstate->rel,
+ fsstate->attinmeta,
+ fsstate->retrieved_attrs,
+ node,
+ fsstate->temp_cxt);
+ }
+
+ /* Update fetch_ct_2 */
+ if (fsstate->fetch_ct_2 < 2)
+ fsstate->fetch_ct_2++;
+
+ /* Must be EOF if we didn't get as many tuples as we asked for. */
+ fsstate->eof_reached = (numrows < fsstate->fetch_size);
+ }
+ PG_FINALLY();
+ {
+ if (res)
+ PQclear(res);
+ }
+ PG_END_TRY();
+
+ MemoryContextSwitchTo(oldcontext);
+}
+
+/*
+ * Force assorted GUC parameters to settings that ensure that we'll output
+ * data values in a form that is unambiguous to the remote server.
+ *
+ * This is rather expensive and annoying to do once per row, but there's
+ * little choice if we want to be sure values are transmitted accurately;
+ * we can't leave the settings in place between rows for fear of affecting
+ * user-visible computations.
+ *
+ * We use the equivalent of a function SET option to allow the settings to
+ * persist only until the caller calls reset_transmission_modes(). If an
+ * error is thrown in between, guc.c will take care of undoing the settings.
+ *
+ * The return value is the nestlevel that must be passed to
+ * reset_transmission_modes() to undo things.
+ */
+int
+set_transmission_modes(void)
+{
+ int nestlevel = NewGUCNestLevel();
+
+ /*
+ * The values set here should match what pg_dump does. See also
+ * configure_remote_session in connection.c.
+ */
+ if (DateStyle != USE_ISO_DATES)
+ (void) set_config_option("datestyle", "ISO",
+ PGC_USERSET, PGC_S_SESSION,
+ GUC_ACTION_SAVE, true, 0, false);
+ if (IntervalStyle != INTSTYLE_POSTGRES)
+ (void) set_config_option("intervalstyle", "postgres",
+ PGC_USERSET, PGC_S_SESSION,
+ GUC_ACTION_SAVE, true, 0, false);
+ if (extra_float_digits < 3)
+ (void) set_config_option("extra_float_digits", "3",
+ PGC_USERSET, PGC_S_SESSION,
+ GUC_ACTION_SAVE, true, 0, false);
+
+ /*
+ * In addition force restrictive search_path, in case there are any
+ * regproc or similar constants to be printed.
+ */
+ (void) set_config_option("search_path", "pg_catalog",
+ PGC_USERSET, PGC_S_SESSION,
+ GUC_ACTION_SAVE, true, 0, false);
+
+ return nestlevel;
+}
+
+/*
+ * Undo the effects of set_transmission_modes().
+ */
+void
+reset_transmission_modes(int nestlevel)
+{
+ AtEOXact_GUC(true, nestlevel);
+}
+
+/*
+ * Utility routine to close a cursor.
+ */
+static void
+close_cursor(PGconn *conn, unsigned int cursor_number,
+ PgFdwConnState *conn_state)
+{
+ char sql[64];
+ PGresult *res;
+
+ snprintf(sql, sizeof(sql), "CLOSE c%u", cursor_number);
+
+ /*
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = pgfdw_exec_query(conn, sql, conn_state);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(ERROR, res, conn, true, sql);
+ PQclear(res);
+}
+
+/*
+ * create_foreign_modify
+ * Construct an execution state of a foreign insert/update/delete
+ * operation
+ */
+static PgFdwModifyState *
+create_foreign_modify(EState *estate,
+ RangeTblEntry *rte,
+ ResultRelInfo *resultRelInfo,
+ CmdType operation,
+ Plan *subplan,
+ char *query,
+ List *target_attrs,
+ int values_end,
+ bool has_returning,
+ List *retrieved_attrs)
+{
+ PgFdwModifyState *fmstate;
+ Relation rel = resultRelInfo->ri_RelationDesc;
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ Oid userid;
+ ForeignTable *table;
+ UserMapping *user;
+ AttrNumber n_params;
+ Oid typefnoid;
+ bool isvarlena;
+ ListCell *lc;
+
+ /* Begin constructing PgFdwModifyState. */
+ fmstate = (PgFdwModifyState *) palloc0(sizeof(PgFdwModifyState));
+ fmstate->rel = rel;
+
+ /*
+ * Identify which user to do the remote access as. This should match what
+ * ExecCheckRTEPerms() does.
+ */
+ userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
+
+ /* Get info about foreign table. */
+ table = GetForeignTable(RelationGetRelid(rel));
+ user = GetUserMapping(userid, table->serverid);
+
+ /* Open connection; report that we'll create a prepared statement. */
+ fmstate->conn = GetConnection(user, true, &fmstate->conn_state);
+ fmstate->p_name = NULL; /* prepared statement not made yet */
+
+ /* Set up remote query information. */
+ fmstate->query = query;
+ if (operation == CMD_INSERT)
+ {
+ fmstate->query = pstrdup(fmstate->query);
+ fmstate->orig_query = pstrdup(fmstate->query);
+ }
+ fmstate->target_attrs = target_attrs;
+ fmstate->values_end = values_end;
+ fmstate->has_returning = has_returning;
+ fmstate->retrieved_attrs = retrieved_attrs;
+
+ /* Create context for per-tuple temp workspace. */
+ fmstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt,
+ "postgres_fdw temporary data",
+ ALLOCSET_SMALL_SIZES);
+
+ /* Prepare for input conversion of RETURNING results. */
+ if (fmstate->has_returning)
+ fmstate->attinmeta = TupleDescGetAttInMetadata(tupdesc);
+
+ /* Prepare for output conversion of parameters used in prepared stmt. */
+ n_params = list_length(fmstate->target_attrs) + 1;
+ fmstate->p_flinfo = (FmgrInfo *) palloc0(sizeof(FmgrInfo) * n_params);
+ fmstate->p_nums = 0;
+
+ if (operation == CMD_UPDATE || operation == CMD_DELETE)
+ {
+ Assert(subplan != NULL);
+
+ /* Find the ctid resjunk column in the subplan's result */
+ fmstate->ctidAttno = ExecFindJunkAttributeInTlist(subplan->targetlist,
+ "ctid");
+ if (!AttributeNumberIsValid(fmstate->ctidAttno))
+ elog(ERROR, "could not find junk ctid column");
+
+ /* First transmittable parameter will be ctid */
+ getTypeOutputInfo(TIDOID, &typefnoid, &isvarlena);
+ fmgr_info(typefnoid, &fmstate->p_flinfo[fmstate->p_nums]);
+ fmstate->p_nums++;
+ }
+
+ if (operation == CMD_INSERT || operation == CMD_UPDATE)
+ {
+ /* Set up for remaining transmittable parameters */
+ foreach(lc, fmstate->target_attrs)
+ {
+ int attnum = lfirst_int(lc);
+ Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1);
+
+ Assert(!attr->attisdropped);
+
+ /* Ignore generated columns; they are set to DEFAULT */
+ if (attr->attgenerated)
+ continue;
+ getTypeOutputInfo(attr->atttypid, &typefnoid, &isvarlena);
+ fmgr_info(typefnoid, &fmstate->p_flinfo[fmstate->p_nums]);
+ fmstate->p_nums++;
+ }
+ }
+
+ Assert(fmstate->p_nums <= n_params);
+
+ /* Set batch_size from foreign server/table options. */
+ if (operation == CMD_INSERT)
+ fmstate->batch_size = get_batch_size_option(rel);
+
+ fmstate->num_slots = 1;
+
+ /* Initialize auxiliary state */
+ fmstate->aux_fmstate = NULL;
+
+ return fmstate;
+}
+
+/*
+ * execute_foreign_modify
+ * Perform foreign-table modification as required, and fetch RETURNING
+ * result if any. (This is the shared guts of postgresExecForeignInsert,
+ * postgresExecForeignBatchInsert, postgresExecForeignUpdate, and
+ * postgresExecForeignDelete.)
+ */
+static TupleTableSlot **
+execute_foreign_modify(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ CmdType operation,
+ TupleTableSlot **slots,
+ TupleTableSlot **planSlots,
+ int *numSlots)
+{
+ PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
+ ItemPointer ctid = NULL;
+ const char **p_values;
+ PGresult *res;
+ int n_rows;
+ StringInfoData sql;
+
+ /* The operation should be INSERT, UPDATE, or DELETE */
+ Assert(operation == CMD_INSERT ||
+ operation == CMD_UPDATE ||
+ operation == CMD_DELETE);
+
+ /* First, process a pending asynchronous request, if any. */
+ if (fmstate->conn_state->pendingAreq)
+ process_pending_request(fmstate->conn_state->pendingAreq);
+
+ /*
+ * If the existing query was deparsed and prepared for a different number
+ * of rows, rebuild it for the proper number.
+ */
+ if (operation == CMD_INSERT && fmstate->num_slots != *numSlots)
+ {
+ /* Destroy the prepared statement created previously */
+ if (fmstate->p_name)
+ deallocate_query(fmstate);
+
+ /* Build INSERT string with numSlots records in its VALUES clause. */
+ initStringInfo(&sql);
+ rebuildInsertSql(&sql, fmstate->rel,
+ fmstate->orig_query, fmstate->target_attrs,
+ fmstate->values_end, fmstate->p_nums,
+ *numSlots - 1);
+ pfree(fmstate->query);
+ fmstate->query = sql.data;
+ fmstate->num_slots = *numSlots;
+ }
+
+ /* Set up the prepared statement on the remote server, if we didn't yet */
+ if (!fmstate->p_name)
+ prepare_foreign_modify(fmstate);
+
+ /*
+ * For UPDATE/DELETE, get the ctid that was passed up as a resjunk column
+ */
+ if (operation == CMD_UPDATE || operation == CMD_DELETE)
+ {
+ Datum datum;
+ bool isNull;
+
+ datum = ExecGetJunkAttribute(planSlots[0],
+ fmstate->ctidAttno,
+ &isNull);
+ /* shouldn't ever get a null result... */
+ if (isNull)
+ elog(ERROR, "ctid is NULL");
+ ctid = (ItemPointer) DatumGetPointer(datum);
+ }
+
+ /* Convert parameters needed by prepared statement to text form */
+ p_values = convert_prep_stmt_params(fmstate, ctid, slots, *numSlots);
+
+ /*
+ * Execute the prepared statement.
+ */
+ if (!PQsendQueryPrepared(fmstate->conn,
+ fmstate->p_name,
+ fmstate->p_nums * (*numSlots),
+ p_values,
+ NULL,
+ NULL,
+ 0))
+ pgfdw_report_error(ERROR, NULL, fmstate->conn, false, fmstate->query);
+
+ /*
+ * Get the result, and check for success.
+ *
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = pgfdw_get_result(fmstate->conn, fmstate->query);
+ if (PQresultStatus(res) !=
+ (fmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
+ pgfdw_report_error(ERROR, res, fmstate->conn, true, fmstate->query);
+
+ /* Check number of rows affected, and fetch RETURNING tuple if any */
+ if (fmstate->has_returning)
+ {
+ Assert(*numSlots == 1);
+ n_rows = PQntuples(res);
+ if (n_rows > 0)
+ store_returning_result(fmstate, slots[0], res);
+ }
+ else
+ n_rows = atoi(PQcmdTuples(res));
+
+ /* And clean up */
+ PQclear(res);
+
+ MemoryContextReset(fmstate->temp_cxt);
+
+ *numSlots = n_rows;
+
+ /*
+ * Return NULL if nothing was inserted/updated/deleted on the remote end
+ */
+ return (n_rows > 0) ? slots : NULL;
+}
+
+/*
+ * prepare_foreign_modify
+ * Establish a prepared statement for execution of INSERT/UPDATE/DELETE
+ */
+static void
+prepare_foreign_modify(PgFdwModifyState *fmstate)
+{
+ char prep_name[NAMEDATALEN];
+ char *p_name;
+ PGresult *res;
+
+ /*
+ * The caller would already have processed a pending asynchronous request
+ * if any, so no need to do it here.
+ */
+
+ /* Construct name we'll use for the prepared statement. */
+ snprintf(prep_name, sizeof(prep_name), "pgsql_fdw_prep_%u",
+ GetPrepStmtNumber(fmstate->conn));
+ p_name = pstrdup(prep_name);
+
+ /*
+ * We intentionally do not specify parameter types here, but leave the
+ * remote server to derive them by default. This avoids possible problems
+ * with the remote server using different type OIDs than we do. All of
+ * the prepared statements we use in this module are simple enough that
+ * the remote server will make the right choices.
+ */
+ if (!PQsendPrepare(fmstate->conn,
+ p_name,
+ fmstate->query,
+ 0,
+ NULL))
+ pgfdw_report_error(ERROR, NULL, fmstate->conn, false, fmstate->query);
+
+ /*
+ * Get the result, and check for success.
+ *
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = pgfdw_get_result(fmstate->conn, fmstate->query);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(ERROR, res, fmstate->conn, true, fmstate->query);
+ PQclear(res);
+
+ /* This action shows that the prepare has been done. */
+ fmstate->p_name = p_name;
+}
+
+/*
+ * convert_prep_stmt_params
+ * Create array of text strings representing parameter values
+ *
+ * tupleid is ctid to send, or NULL if none
+ * slot is slot to get remaining parameters from, or NULL if none
+ *
+ * Data is constructed in temp_cxt; caller should reset that after use.
+ */
+static const char **
+convert_prep_stmt_params(PgFdwModifyState *fmstate,
+ ItemPointer tupleid,
+ TupleTableSlot **slots,
+ int numSlots)
+{
+ const char **p_values;
+ int i;
+ int j;
+ int pindex = 0;
+ MemoryContext oldcontext;
+
+ oldcontext = MemoryContextSwitchTo(fmstate->temp_cxt);
+
+ p_values = (const char **) palloc(sizeof(char *) * fmstate->p_nums * numSlots);
+
+ /* ctid is provided only for UPDATE/DELETE, which don't allow batching */
+ Assert(!(tupleid != NULL && numSlots > 1));
+
+ /* 1st parameter should be ctid, if it's in use */
+ if (tupleid != NULL)
+ {
+ Assert(numSlots == 1);
+ /* don't need set_transmission_modes for TID output */
+ p_values[pindex] = OutputFunctionCall(&fmstate->p_flinfo[pindex],
+ PointerGetDatum(tupleid));
+ pindex++;
+ }
+
+ /* get following parameters from slots */
+ if (slots != NULL && fmstate->target_attrs != NIL)
+ {
+ TupleDesc tupdesc = RelationGetDescr(fmstate->rel);
+ int nestlevel;
+ ListCell *lc;
+
+ nestlevel = set_transmission_modes();
+
+ for (i = 0; i < numSlots; i++)
+ {
+ j = (tupleid != NULL) ? 1 : 0;
+ foreach(lc, fmstate->target_attrs)
+ {
+ int attnum = lfirst_int(lc);
+ Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1);
+ Datum value;
+ bool isnull;
+
+ /* Ignore generated columns; they are set to DEFAULT */
+ if (attr->attgenerated)
+ continue;
+ value = slot_getattr(slots[i], attnum, &isnull);
+ if (isnull)
+ p_values[pindex] = NULL;
+ else
+ p_values[pindex] = OutputFunctionCall(&fmstate->p_flinfo[j],
+ value);
+ pindex++;
+ j++;
+ }
+ }
+
+ reset_transmission_modes(nestlevel);
+ }
+
+ Assert(pindex == fmstate->p_nums * numSlots);
+
+ MemoryContextSwitchTo(oldcontext);
+
+ return p_values;
+}
+
+/*
+ * store_returning_result
+ * Store the result of a RETURNING clause
+ *
+ * On error, be sure to release the PGresult on the way out. Callers do not
+ * have PG_TRY blocks to ensure this happens.
+ */
+static void
+store_returning_result(PgFdwModifyState *fmstate,
+ TupleTableSlot *slot, PGresult *res)
+{
+ PG_TRY();
+ {
+ HeapTuple newtup;
+
+ newtup = make_tuple_from_result_row(res, 0,
+ fmstate->rel,
+ fmstate->attinmeta,
+ fmstate->retrieved_attrs,
+ NULL,
+ fmstate->temp_cxt);
+
+ /*
+ * The returning slot will not necessarily be suitable to store
+ * heaptuples directly, so allow for conversion.
+ */
+ ExecForceStoreHeapTuple(newtup, slot, true);
+ }
+ PG_CATCH();
+ {
+ if (res)
+ PQclear(res);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+}
+
+/*
+ * finish_foreign_modify
+ * Release resources for a foreign insert/update/delete operation
+ */
+static void
+finish_foreign_modify(PgFdwModifyState *fmstate)
+{
+ Assert(fmstate != NULL);
+
+ /* If we created a prepared statement, destroy it */
+ deallocate_query(fmstate);
+
+ /* Release remote connection */
+ ReleaseConnection(fmstate->conn);
+ fmstate->conn = NULL;
+}
+
+/*
+ * deallocate_query
+ * Deallocate a prepared statement for a foreign insert/update/delete
+ * operation
+ */
+static void
+deallocate_query(PgFdwModifyState *fmstate)
+{
+ char sql[64];
+ PGresult *res;
+
+ /* do nothing if the query is not allocated */
+ if (!fmstate->p_name)
+ return;
+
+ snprintf(sql, sizeof(sql), "DEALLOCATE %s", fmstate->p_name);
+
+ /*
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = pgfdw_exec_query(fmstate->conn, sql, fmstate->conn_state);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(ERROR, res, fmstate->conn, true, sql);
+ PQclear(res);
+ pfree(fmstate->p_name);
+ fmstate->p_name = NULL;
+}
+
+/*
+ * build_remote_returning
+ * Build a RETURNING targetlist of a remote query for performing an
+ * UPDATE/DELETE .. RETURNING on a join directly
+ */
+static List *
+build_remote_returning(Index rtindex, Relation rel, List *returningList)
+{
+ bool have_wholerow = false;
+ List *tlist = NIL;
+ List *vars;
+ ListCell *lc;
+
+ Assert(returningList);
+
+ vars = pull_var_clause((Node *) returningList, PVC_INCLUDE_PLACEHOLDERS);
+
+ /*
+ * If there's a whole-row reference to the target relation, then we'll
+ * need all the columns of the relation.
+ */
+ foreach(lc, vars)
+ {
+ Var *var = (Var *) lfirst(lc);
+
+ if (IsA(var, Var) &&
+ var->varno == rtindex &&
+ var->varattno == InvalidAttrNumber)
+ {
+ have_wholerow = true;
+ break;
+ }
+ }
+
+ if (have_wholerow)
+ {
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ int i;
+
+ for (i = 1; i <= tupdesc->natts; i++)
+ {
+ Form_pg_attribute attr = TupleDescAttr(tupdesc, i - 1);
+ Var *var;
+
+ /* Ignore dropped attributes. */
+ if (attr->attisdropped)
+ continue;
+
+ var = makeVar(rtindex,
+ i,
+ attr->atttypid,
+ attr->atttypmod,
+ attr->attcollation,
+ 0);
+
+ tlist = lappend(tlist,
+ makeTargetEntry((Expr *) var,
+ list_length(tlist) + 1,
+ NULL,
+ false));
+ }
+ }
+
+ /* Now add any remaining columns to tlist. */
+ foreach(lc, vars)
+ {
+ Var *var = (Var *) lfirst(lc);
+
+ /*
+ * No need for whole-row references to the target relation. We don't
+ * need system columns other than ctid and oid either, since those are
+ * set locally.
+ */
+ if (IsA(var, Var) &&
+ var->varno == rtindex &&
+ var->varattno <= InvalidAttrNumber &&
+ var->varattno != SelfItemPointerAttributeNumber)
+ continue; /* don't need it */
+
+ if (tlist_member((Expr *) var, tlist))
+ continue; /* already got it */
+
+ tlist = lappend(tlist,
+ makeTargetEntry((Expr *) var,
+ list_length(tlist) + 1,
+ NULL,
+ false));
+ }
+
+ list_free(vars);
+
+ return tlist;
+}
+
+/*
+ * rebuild_fdw_scan_tlist
+ * Build new fdw_scan_tlist of given foreign-scan plan node from given
+ * tlist
+ *
+ * There might be columns that the fdw_scan_tlist of the given foreign-scan
+ * plan node contains that the given tlist doesn't. The fdw_scan_tlist would
+ * have contained resjunk columns such as 'ctid' of the target relation and
+ * 'wholerow' of non-target relations, but the tlist might not contain them,
+ * for example. So, adjust the tlist so it contains all the columns specified
+ * in the fdw_scan_tlist; else setrefs.c will get confused.
+ */
+static void
+rebuild_fdw_scan_tlist(ForeignScan *fscan, List *tlist)
+{
+ List *new_tlist = tlist;
+ List *old_tlist = fscan->fdw_scan_tlist;
+ ListCell *lc;
+
+ foreach(lc, old_tlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(lc);
+
+ if (tlist_member(tle->expr, new_tlist))
+ continue; /* already got it */
+
+ new_tlist = lappend(new_tlist,
+ makeTargetEntry(tle->expr,
+ list_length(new_tlist) + 1,
+ NULL,
+ false));
+ }
+ fscan->fdw_scan_tlist = new_tlist;
+}
+
+/*
+ * Execute a direct UPDATE/DELETE statement.
+ */
+static void
+execute_dml_stmt(ForeignScanState *node)
+{
+ PgFdwDirectModifyState *dmstate = (PgFdwDirectModifyState *) node->fdw_state;
+ ExprContext *econtext = node->ss.ps.ps_ExprContext;
+ int numParams = dmstate->numParams;
+ const char **values = dmstate->param_values;
+
+ /* First, process a pending asynchronous request, if any. */
+ if (dmstate->conn_state->pendingAreq)
+ process_pending_request(dmstate->conn_state->pendingAreq);
+
+ /*
+ * Construct array of query parameter values in text format.
+ */
+ if (numParams > 0)
+ process_query_params(econtext,
+ dmstate->param_flinfo,
+ dmstate->param_exprs,
+ values);
+
+ /*
+ * Notice that we pass NULL for paramTypes, thus forcing the remote server
+ * to infer types for all parameters. Since we explicitly cast every
+ * parameter (see deparse.c), the "inference" is trivial and will produce
+ * the desired result. This allows us to avoid assuming that the remote
+ * server has the same OIDs we do for the parameters' types.
+ */
+ if (!PQsendQueryParams(dmstate->conn, dmstate->query, numParams,
+ NULL, values, NULL, NULL, 0))
+ pgfdw_report_error(ERROR, NULL, dmstate->conn, false, dmstate->query);
+
+ /*
+ * Get the result, and check for success.
+ *
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ dmstate->result = pgfdw_get_result(dmstate->conn, dmstate->query);
+ if (PQresultStatus(dmstate->result) !=
+ (dmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
+ pgfdw_report_error(ERROR, dmstate->result, dmstate->conn, true,
+ dmstate->query);
+
+ /* Get the number of rows affected. */
+ if (dmstate->has_returning)
+ dmstate->num_tuples = PQntuples(dmstate->result);
+ else
+ dmstate->num_tuples = atoi(PQcmdTuples(dmstate->result));
+}
+
+/*
+ * Get the result of a RETURNING clause.
+ */
+static TupleTableSlot *
+get_returning_data(ForeignScanState *node)
+{
+ PgFdwDirectModifyState *dmstate = (PgFdwDirectModifyState *) node->fdw_state;
+ EState *estate = node->ss.ps.state;
+ ResultRelInfo *resultRelInfo = node->resultRelInfo;
+ TupleTableSlot *slot = node->ss.ss_ScanTupleSlot;
+ TupleTableSlot *resultSlot;
+
+ Assert(resultRelInfo->ri_projectReturning);
+
+ /* If we didn't get any tuples, must be end of data. */
+ if (dmstate->next_tuple >= dmstate->num_tuples)
+ return ExecClearTuple(slot);
+
+ /* Increment the command es_processed count if necessary. */
+ if (dmstate->set_processed)
+ estate->es_processed += 1;
+
+ /*
+ * Store a RETURNING tuple. If has_returning is false, just emit a dummy
+ * tuple. (has_returning is false when the local query is of the form
+ * "UPDATE/DELETE .. RETURNING 1" for example.)
+ */
+ if (!dmstate->has_returning)
+ {
+ ExecStoreAllNullTuple(slot);
+ resultSlot = slot;
+ }
+ else
+ {
+ /*
+ * On error, be sure to release the PGresult on the way out. Callers
+ * do not have PG_TRY blocks to ensure this happens.
+ */
+ PG_TRY();
+ {
+ HeapTuple newtup;
+
+ newtup = make_tuple_from_result_row(dmstate->result,
+ dmstate->next_tuple,
+ dmstate->rel,
+ dmstate->attinmeta,
+ dmstate->retrieved_attrs,
+ node,
+ dmstate->temp_cxt);
+ ExecStoreHeapTuple(newtup, slot, false);
+ }
+ PG_CATCH();
+ {
+ if (dmstate->result)
+ PQclear(dmstate->result);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ /* Get the updated/deleted tuple. */
+ if (dmstate->rel)
+ resultSlot = slot;
+ else
+ resultSlot = apply_returning_filter(dmstate, resultRelInfo, slot, estate);
+ }
+ dmstate->next_tuple++;
+
+ /* Make slot available for evaluation of the local query RETURNING list. */
+ resultRelInfo->ri_projectReturning->pi_exprContext->ecxt_scantuple =
+ resultSlot;
+
+ return slot;
+}
+
+/*
+ * Initialize a filter to extract an updated/deleted tuple from a scan tuple.
+ */
+static void
+init_returning_filter(PgFdwDirectModifyState *dmstate,
+ List *fdw_scan_tlist,
+ Index rtindex)
+{
+ TupleDesc resultTupType = RelationGetDescr(dmstate->resultRel);
+ ListCell *lc;
+ int i;
+
+ /*
+ * Calculate the mapping between the fdw_scan_tlist's entries and the
+ * result tuple's attributes.
+ *
+ * The "map" is an array of indexes of the result tuple's attributes in
+ * fdw_scan_tlist, i.e., one entry for every attribute of the result
+ * tuple. We store zero for any attributes that don't have the
+ * corresponding entries in that list, marking that a NULL is needed in
+ * the result tuple.
+ *
+ * Also get the indexes of the entries for ctid and oid if any.
+ */
+ dmstate->attnoMap = (AttrNumber *)
+ palloc0(resultTupType->natts * sizeof(AttrNumber));
+
+ dmstate->ctidAttno = dmstate->oidAttno = 0;
+
+ i = 1;
+ dmstate->hasSystemCols = false;
+ foreach(lc, fdw_scan_tlist)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(lc);
+ Var *var = (Var *) tle->expr;
+
+ Assert(IsA(var, Var));
+
+ /*
+ * If the Var is a column of the target relation to be retrieved from
+ * the foreign server, get the index of the entry.
+ */
+ if (var->varno == rtindex &&
+ list_member_int(dmstate->retrieved_attrs, i))
+ {
+ int attrno = var->varattno;
+
+ if (attrno < 0)
+ {
+ /*
+ * We don't retrieve system columns other than ctid and oid.
+ */
+ if (attrno == SelfItemPointerAttributeNumber)
+ dmstate->ctidAttno = i;
+ else
+ Assert(false);
+ dmstate->hasSystemCols = true;
+ }
+ else
+ {
+ /*
+ * We don't retrieve whole-row references to the target
+ * relation either.
+ */
+ Assert(attrno > 0);
+
+ dmstate->attnoMap[attrno - 1] = i;
+ }
+ }
+ i++;
+ }
+}
+
+/*
+ * Extract and return an updated/deleted tuple from a scan tuple.
+ */
+static TupleTableSlot *
+apply_returning_filter(PgFdwDirectModifyState *dmstate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ EState *estate)
+{
+ TupleDesc resultTupType = RelationGetDescr(dmstate->resultRel);
+ TupleTableSlot *resultSlot;
+ Datum *values;
+ bool *isnull;
+ Datum *old_values;
+ bool *old_isnull;
+ int i;
+
+ /*
+ * Use the return tuple slot as a place to store the result tuple.
+ */
+ resultSlot = ExecGetReturningSlot(estate, resultRelInfo);
+
+ /*
+ * Extract all the values of the scan tuple.
+ */
+ slot_getallattrs(slot);
+ old_values = slot->tts_values;
+ old_isnull = slot->tts_isnull;
+
+ /*
+ * Prepare to build the result tuple.
+ */
+ ExecClearTuple(resultSlot);
+ values = resultSlot->tts_values;
+ isnull = resultSlot->tts_isnull;
+
+ /*
+ * Transpose data into proper fields of the result tuple.
+ */
+ for (i = 0; i < resultTupType->natts; i++)
+ {
+ int j = dmstate->attnoMap[i];
+
+ if (j == 0)
+ {
+ values[i] = (Datum) 0;
+ isnull[i] = true;
+ }
+ else
+ {
+ values[i] = old_values[j - 1];
+ isnull[i] = old_isnull[j - 1];
+ }
+ }
+
+ /*
+ * Build the virtual tuple.
+ */
+ ExecStoreVirtualTuple(resultSlot);
+
+ /*
+ * If we have any system columns to return, materialize a heap tuple in
+ * the slot from column values set above and install system columns in
+ * that tuple.
+ */
+ if (dmstate->hasSystemCols)
+ {
+ HeapTuple resultTup = ExecFetchSlotHeapTuple(resultSlot, true, NULL);
+
+ /* ctid */
+ if (dmstate->ctidAttno)
+ {
+ ItemPointer ctid = NULL;
+
+ ctid = (ItemPointer) DatumGetPointer(old_values[dmstate->ctidAttno - 1]);
+ resultTup->t_self = *ctid;
+ }
+
+ /*
+ * And remaining columns
+ *
+ * Note: since we currently don't allow the target relation to appear
+ * on the nullable side of an outer join, any system columns wouldn't
+ * go to NULL.
+ *
+ * Note: no need to care about tableoid here because it will be
+ * initialized in ExecProcessReturning().
+ */
+ HeapTupleHeaderSetXmin(resultTup->t_data, InvalidTransactionId);
+ HeapTupleHeaderSetXmax(resultTup->t_data, InvalidTransactionId);
+ HeapTupleHeaderSetCmin(resultTup->t_data, InvalidTransactionId);
+ }
+
+ /*
+ * And return the result tuple.
+ */
+ return resultSlot;
+}
+
+/*
+ * Prepare for processing of parameters used in remote query.
+ */
+static void
+prepare_query_params(PlanState *node,
+ List *fdw_exprs,
+ int numParams,
+ FmgrInfo **param_flinfo,
+ List **param_exprs,
+ const char ***param_values)
+{
+ int i;
+ ListCell *lc;
+
+ Assert(numParams > 0);
+
+ /* Prepare for output conversion of parameters used in remote query. */
+ *param_flinfo = (FmgrInfo *) palloc0(sizeof(FmgrInfo) * numParams);
+
+ i = 0;
+ foreach(lc, fdw_exprs)
+ {
+ Node *param_expr = (Node *) lfirst(lc);
+ Oid typefnoid;
+ bool isvarlena;
+
+ getTypeOutputInfo(exprType(param_expr), &typefnoid, &isvarlena);
+ fmgr_info(typefnoid, &(*param_flinfo)[i]);
+ i++;
+ }
+
+ /*
+ * Prepare remote-parameter expressions for evaluation. (Note: in
+ * practice, we expect that all these expressions will be just Params, so
+ * we could possibly do something more efficient than using the full
+ * expression-eval machinery for this. But probably there would be little
+ * benefit, and it'd require postgres_fdw to know more than is desirable
+ * about Param evaluation.)
+ */
+ *param_exprs = ExecInitExprList(fdw_exprs, node);
+
+ /* Allocate buffer for text form of query parameters. */
+ *param_values = (const char **) palloc0(numParams * sizeof(char *));
+}
+
+/*
+ * Construct array of query parameter values in text format.
+ */
+static void
+process_query_params(ExprContext *econtext,
+ FmgrInfo *param_flinfo,
+ List *param_exprs,
+ const char **param_values)
+{
+ int nestlevel;
+ int i;
+ ListCell *lc;
+
+ nestlevel = set_transmission_modes();
+
+ i = 0;
+ foreach(lc, param_exprs)
+ {
+ ExprState *expr_state = (ExprState *) lfirst(lc);
+ Datum expr_value;
+ bool isNull;
+
+ /* Evaluate the parameter expression */
+ expr_value = ExecEvalExpr(expr_state, econtext, &isNull);
+
+ /*
+ * Get string representation of each parameter value by invoking
+ * type-specific output function, unless the value is null.
+ */
+ if (isNull)
+ param_values[i] = NULL;
+ else
+ param_values[i] = OutputFunctionCall(&param_flinfo[i], expr_value);
+
+ i++;
+ }
+
+ reset_transmission_modes(nestlevel);
+}
+
+/*
+ * postgresAnalyzeForeignTable
+ * Test whether analyzing this foreign table is supported
+ */
+static bool
+postgresAnalyzeForeignTable(Relation relation,
+ AcquireSampleRowsFunc *func,
+ BlockNumber *totalpages)
+{
+ ForeignTable *table;
+ UserMapping *user;
+ PGconn *conn;
+ StringInfoData sql;
+ PGresult *volatile res = NULL;
+
+ /* Return the row-analysis function pointer */
+ *func = postgresAcquireSampleRowsFunc;
+
+ /*
+ * Now we have to get the number of pages. It's annoying that the ANALYZE
+ * API requires us to return that now, because it forces some duplication
+ * of effort between this routine and postgresAcquireSampleRowsFunc. But
+ * it's probably not worth redefining that API at this point.
+ */
+
+ /*
+ * Get the connection to use. We do the remote access as the table's
+ * owner, even if the ANALYZE was started by some other user.
+ */
+ table = GetForeignTable(RelationGetRelid(relation));
+ user = GetUserMapping(relation->rd_rel->relowner, table->serverid);
+ conn = GetConnection(user, false, NULL);
+
+ /*
+ * Construct command to get page count for relation.
+ */
+ initStringInfo(&sql);
+ deparseAnalyzeSizeSql(&sql, relation);
+
+ /* In what follows, do not risk leaking any PGresults. */
+ PG_TRY();
+ {
+ res = pgfdw_exec_query(conn, sql.data, NULL);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(ERROR, res, conn, false, sql.data);
+
+ if (PQntuples(res) != 1 || PQnfields(res) != 1)
+ elog(ERROR, "unexpected result from deparseAnalyzeSizeSql query");
+ *totalpages = strtoul(PQgetvalue(res, 0, 0), NULL, 10);
+ }
+ PG_FINALLY();
+ {
+ if (res)
+ PQclear(res);
+ }
+ PG_END_TRY();
+
+ ReleaseConnection(conn);
+
+ return true;
+}
+
+/*
+ * Acquire a random sample of rows from foreign table managed by postgres_fdw.
+ *
+ * We fetch the whole table from the remote side and pick out some sample rows.
+ *
+ * Selected rows are returned in the caller-allocated array rows[],
+ * which must have at least targrows entries.
+ * The actual number of rows selected is returned as the function result.
+ * We also count the total number of rows in the table and return it into
+ * *totalrows. Note that *totaldeadrows is always set to 0.
+ *
+ * Note that the returned list of rows is not always in order by physical
+ * position in the table. Therefore, correlation estimates derived later
+ * may be meaningless, but it's OK because we don't use the estimates
+ * currently (the planner only pays attention to correlation for indexscans).
+ */
+static int
+postgresAcquireSampleRowsFunc(Relation relation, int elevel,
+ HeapTuple *rows, int targrows,
+ double *totalrows,
+ double *totaldeadrows)
+{
+ PgFdwAnalyzeState astate;
+ ForeignTable *table;
+ ForeignServer *server;
+ UserMapping *user;
+ PGconn *conn;
+ unsigned int cursor_number;
+ StringInfoData sql;
+ PGresult *volatile res = NULL;
+
+ /* Initialize workspace state */
+ astate.rel = relation;
+ astate.attinmeta = TupleDescGetAttInMetadata(RelationGetDescr(relation));
+
+ astate.rows = rows;
+ astate.targrows = targrows;
+ astate.numrows = 0;
+ astate.samplerows = 0;
+ astate.rowstoskip = -1; /* -1 means not set yet */
+ reservoir_init_selection_state(&astate.rstate, targrows);
+
+ /* Remember ANALYZE context, and create a per-tuple temp context */
+ astate.anl_cxt = CurrentMemoryContext;
+ astate.temp_cxt = AllocSetContextCreate(CurrentMemoryContext,
+ "postgres_fdw temporary data",
+ ALLOCSET_SMALL_SIZES);
+
+ /*
+ * Get the connection to use. We do the remote access as the table's
+ * owner, even if the ANALYZE was started by some other user.
+ */
+ table = GetForeignTable(RelationGetRelid(relation));
+ server = GetForeignServer(table->serverid);
+ user = GetUserMapping(relation->rd_rel->relowner, table->serverid);
+ conn = GetConnection(user, false, NULL);
+
+ /*
+ * Construct cursor that retrieves whole rows from remote.
+ */
+ cursor_number = GetCursorNumber(conn);
+ initStringInfo(&sql);
+ appendStringInfo(&sql, "DECLARE c%u CURSOR FOR ", cursor_number);
+ deparseAnalyzeSql(&sql, relation, &astate.retrieved_attrs);
+
+ /* In what follows, do not risk leaking any PGresults. */
+ PG_TRY();
+ {
+ char fetch_sql[64];
+ int fetch_size;
+ ListCell *lc;
+
+ res = pgfdw_exec_query(conn, sql.data, NULL);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(ERROR, res, conn, false, sql.data);
+ PQclear(res);
+ res = NULL;
+
+ /*
+ * Determine the fetch size. The default is arbitrary, but shouldn't
+ * be enormous.
+ */
+ fetch_size = 100;
+ foreach(lc, server->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "fetch_size") == 0)
+ {
+ (void) parse_int(defGetString(def), &fetch_size, 0, NULL);
+ break;
+ }
+ }
+ foreach(lc, table->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "fetch_size") == 0)
+ {
+ (void) parse_int(defGetString(def), &fetch_size, 0, NULL);
+ break;
+ }
+ }
+
+ /* Construct command to fetch rows from remote. */
+ snprintf(fetch_sql, sizeof(fetch_sql), "FETCH %d FROM c%u",
+ fetch_size, cursor_number);
+
+ /* Retrieve and process rows a batch at a time. */
+ for (;;)
+ {
+ int numrows;
+ int i;
+
+ /* Allow users to cancel long query */
+ CHECK_FOR_INTERRUPTS();
+
+ /*
+ * XXX possible future improvement: if rowstoskip is large, we
+ * could issue a MOVE rather than physically fetching the rows,
+ * then just adjust rowstoskip and samplerows appropriately.
+ */
+
+ /* Fetch some rows */
+ res = pgfdw_exec_query(conn, fetch_sql, NULL);
+ /* On error, report the original query, not the FETCH. */
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(ERROR, res, conn, false, sql.data);
+
+ /* Process whatever we got. */
+ numrows = PQntuples(res);
+ for (i = 0; i < numrows; i++)
+ analyze_row_processor(res, i, &astate);
+
+ PQclear(res);
+ res = NULL;
+
+ /* Must be EOF if we didn't get all the rows requested. */
+ if (numrows < fetch_size)
+ break;
+ }
+
+ /* Close the cursor, just to be tidy. */
+ close_cursor(conn, cursor_number, NULL);
+ }
+ PG_CATCH();
+ {
+ if (res)
+ PQclear(res);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ ReleaseConnection(conn);
+
+ /* We assume that we have no dead tuple. */
+ *totaldeadrows = 0.0;
+
+ /* We've retrieved all living tuples from foreign server. */
+ *totalrows = astate.samplerows;
+
+ /*
+ * Emit some interesting relation info
+ */
+ ereport(elevel,
+ (errmsg("\"%s\": table contains %.0f rows, %d rows in sample",
+ RelationGetRelationName(relation),
+ astate.samplerows, astate.numrows)));
+
+ return astate.numrows;
+}
+
+/*
+ * Collect sample rows from the result of query.
+ * - Use all tuples in sample until target # of samples are collected.
+ * - Subsequently, replace already-sampled tuples randomly.
+ */
+static void
+analyze_row_processor(PGresult *res, int row, PgFdwAnalyzeState *astate)
+{
+ int targrows = astate->targrows;
+ int pos; /* array index to store tuple in */
+ MemoryContext oldcontext;
+
+ /* Always increment sample row counter. */
+ astate->samplerows += 1;
+
+ /*
+ * Determine the slot where this sample row should be stored. Set pos to
+ * negative value to indicate the row should be skipped.
+ */
+ if (astate->numrows < targrows)
+ {
+ /* First targrows rows are always included into the sample */
+ pos = astate->numrows++;
+ }
+ else
+ {
+ /*
+ * Now we start replacing tuples in the sample until we reach the end
+ * of the relation. Same algorithm as in acquire_sample_rows in
+ * analyze.c; see Jeff Vitter's paper.
+ */
+ if (astate->rowstoskip < 0)
+ astate->rowstoskip = reservoir_get_next_S(&astate->rstate, astate->samplerows, targrows);
+
+ if (astate->rowstoskip <= 0)
+ {
+ /* Choose a random reservoir element to replace. */
+ pos = (int) (targrows * sampler_random_fract(astate->rstate.randstate));
+ Assert(pos >= 0 && pos < targrows);
+ heap_freetuple(astate->rows[pos]);
+ }
+ else
+ {
+ /* Skip this tuple. */
+ pos = -1;
+ }
+
+ astate->rowstoskip -= 1;
+ }
+
+ if (pos >= 0)
+ {
+ /*
+ * Create sample tuple from current result row, and store it in the
+ * position determined above. The tuple has to be created in anl_cxt.
+ */
+ oldcontext = MemoryContextSwitchTo(astate->anl_cxt);
+
+ astate->rows[pos] = make_tuple_from_result_row(res, row,
+ astate->rel,
+ astate->attinmeta,
+ astate->retrieved_attrs,
+ NULL,
+ astate->temp_cxt);
+
+ MemoryContextSwitchTo(oldcontext);
+ }
+}
+
+/*
+ * Import a foreign schema
+ */
+static List *
+postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid)
+{
+ List *commands = NIL;
+ bool import_collate = true;
+ bool import_default = false;
+ bool import_generated = true;
+ bool import_not_null = true;
+ ForeignServer *server;
+ UserMapping *mapping;
+ PGconn *conn;
+ StringInfoData buf;
+ PGresult *volatile res = NULL;
+ int numrows,
+ i;
+ ListCell *lc;
+
+ /* Parse statement options */
+ foreach(lc, stmt->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "import_collate") == 0)
+ import_collate = defGetBoolean(def);
+ else if (strcmp(def->defname, "import_default") == 0)
+ import_default = defGetBoolean(def);
+ else if (strcmp(def->defname, "import_generated") == 0)
+ import_generated = defGetBoolean(def);
+ else if (strcmp(def->defname, "import_not_null") == 0)
+ import_not_null = defGetBoolean(def);
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_FDW_INVALID_OPTION_NAME),
+ errmsg("invalid option \"%s\"", def->defname)));
+ }
+
+ /*
+ * Get connection to the foreign server. Connection manager will
+ * establish new connection if necessary.
+ */
+ server = GetForeignServer(serverOid);
+ mapping = GetUserMapping(GetUserId(), server->serverid);
+ conn = GetConnection(mapping, false, NULL);
+
+ /* Don't attempt to import collation if remote server hasn't got it */
+ if (PQserverVersion(conn) < 90100)
+ import_collate = false;
+
+ /* Create workspace for strings */
+ initStringInfo(&buf);
+
+ /* In what follows, do not risk leaking any PGresults. */
+ PG_TRY();
+ {
+ /* Check that the schema really exists */
+ appendStringInfoString(&buf, "SELECT 1 FROM pg_catalog.pg_namespace WHERE nspname = ");
+ deparseStringLiteral(&buf, stmt->remote_schema);
+
+ res = pgfdw_exec_query(conn, buf.data, NULL);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(ERROR, res, conn, false, buf.data);
+
+ if (PQntuples(res) != 1)
+ ereport(ERROR,
+ (errcode(ERRCODE_FDW_SCHEMA_NOT_FOUND),
+ errmsg("schema \"%s\" is not present on foreign server \"%s\"",
+ stmt->remote_schema, server->servername)));
+
+ PQclear(res);
+ res = NULL;
+ resetStringInfo(&buf);
+
+ /*
+ * Fetch all table data from this schema, possibly restricted by
+ * EXCEPT or LIMIT TO. (We don't actually need to pay any attention
+ * to EXCEPT/LIMIT TO here, because the core code will filter the
+ * statements we return according to those lists anyway. But it
+ * should save a few cycles to not process excluded tables in the
+ * first place.)
+ *
+ * Import table data for partitions only when they are explicitly
+ * specified in LIMIT TO clause. Otherwise ignore them and only
+ * include the definitions of the root partitioned tables to allow
+ * access to the complete remote data set locally in the schema
+ * imported.
+ *
+ * Note: because we run the connection with search_path restricted to
+ * pg_catalog, the format_type() and pg_get_expr() outputs will always
+ * include a schema name for types/functions in other schemas, which
+ * is what we want.
+ */
+ appendStringInfoString(&buf,
+ "SELECT relname, "
+ " attname, "
+ " format_type(atttypid, atttypmod), "
+ " attnotnull, ");
+
+ /* Generated columns are supported since Postgres 12 */
+ if (PQserverVersion(conn) >= 120000)
+ appendStringInfoString(&buf,
+ " attgenerated, "
+ " pg_get_expr(adbin, adrelid), ");
+ else
+ appendStringInfoString(&buf,
+ " NULL, "
+ " pg_get_expr(adbin, adrelid), ");
+
+ if (import_collate)
+ appendStringInfoString(&buf,
+ " collname, "
+ " collnsp.nspname "
+ "FROM pg_class c "
+ " JOIN pg_namespace n ON "
+ " relnamespace = n.oid "
+ " LEFT JOIN pg_attribute a ON "
+ " attrelid = c.oid AND attnum > 0 "
+ " AND NOT attisdropped "
+ " LEFT JOIN pg_attrdef ad ON "
+ " adrelid = c.oid AND adnum = attnum "
+ " LEFT JOIN pg_collation coll ON "
+ " coll.oid = attcollation "
+ " LEFT JOIN pg_namespace collnsp ON "
+ " collnsp.oid = collnamespace ");
+ else
+ appendStringInfoString(&buf,
+ " NULL, NULL "
+ "FROM pg_class c "
+ " JOIN pg_namespace n ON "
+ " relnamespace = n.oid "
+ " LEFT JOIN pg_attribute a ON "
+ " attrelid = c.oid AND attnum > 0 "
+ " AND NOT attisdropped "
+ " LEFT JOIN pg_attrdef ad ON "
+ " adrelid = c.oid AND adnum = attnum ");
+
+ appendStringInfoString(&buf,
+ "WHERE c.relkind IN ("
+ CppAsString2(RELKIND_RELATION) ","
+ CppAsString2(RELKIND_VIEW) ","
+ CppAsString2(RELKIND_FOREIGN_TABLE) ","
+ CppAsString2(RELKIND_MATVIEW) ","
+ CppAsString2(RELKIND_PARTITIONED_TABLE) ") "
+ " AND n.nspname = ");
+ deparseStringLiteral(&buf, stmt->remote_schema);
+
+ /* Partitions are supported since Postgres 10 */
+ if (PQserverVersion(conn) >= 100000 &&
+ stmt->list_type != FDW_IMPORT_SCHEMA_LIMIT_TO)
+ appendStringInfoString(&buf, " AND NOT c.relispartition ");
+
+ /* Apply restrictions for LIMIT TO and EXCEPT */
+ if (stmt->list_type == FDW_IMPORT_SCHEMA_LIMIT_TO ||
+ stmt->list_type == FDW_IMPORT_SCHEMA_EXCEPT)
+ {
+ bool first_item = true;
+
+ appendStringInfoString(&buf, " AND c.relname ");
+ if (stmt->list_type == FDW_IMPORT_SCHEMA_EXCEPT)
+ appendStringInfoString(&buf, "NOT ");
+ appendStringInfoString(&buf, "IN (");
+
+ /* Append list of table names within IN clause */
+ foreach(lc, stmt->table_list)
+ {
+ RangeVar *rv = (RangeVar *) lfirst(lc);
+
+ if (first_item)
+ first_item = false;
+ else
+ appendStringInfoString(&buf, ", ");
+ deparseStringLiteral(&buf, rv->relname);
+ }
+ appendStringInfoChar(&buf, ')');
+ }
+
+ /* Append ORDER BY at the end of query to ensure output ordering */
+ appendStringInfoString(&buf, " ORDER BY c.relname, a.attnum");
+
+ /* Fetch the data */
+ res = pgfdw_exec_query(conn, buf.data, NULL);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(ERROR, res, conn, false, buf.data);
+
+ /* Process results */
+ numrows = PQntuples(res);
+ /* note: incrementation of i happens in inner loop's while() test */
+ for (i = 0; i < numrows;)
+ {
+ char *tablename = PQgetvalue(res, i, 0);
+ bool first_item = true;
+
+ resetStringInfo(&buf);
+ appendStringInfo(&buf, "CREATE FOREIGN TABLE %s (\n",
+ quote_identifier(tablename));
+
+ /* Scan all rows for this table */
+ do
+ {
+ char *attname;
+ char *typename;
+ char *attnotnull;
+ char *attgenerated;
+ char *attdefault;
+ char *collname;
+ char *collnamespace;
+
+ /* If table has no columns, we'll see nulls here */
+ if (PQgetisnull(res, i, 1))
+ continue;
+
+ attname = PQgetvalue(res, i, 1);
+ typename = PQgetvalue(res, i, 2);
+ attnotnull = PQgetvalue(res, i, 3);
+ attgenerated = PQgetisnull(res, i, 4) ? (char *) NULL :
+ PQgetvalue(res, i, 4);
+ attdefault = PQgetisnull(res, i, 5) ? (char *) NULL :
+ PQgetvalue(res, i, 5);
+ collname = PQgetisnull(res, i, 6) ? (char *) NULL :
+ PQgetvalue(res, i, 6);
+ collnamespace = PQgetisnull(res, i, 7) ? (char *) NULL :
+ PQgetvalue(res, i, 7);
+
+ if (first_item)
+ first_item = false;
+ else
+ appendStringInfoString(&buf, ",\n");
+
+ /* Print column name and type */
+ appendStringInfo(&buf, " %s %s",
+ quote_identifier(attname),
+ typename);
+
+ /*
+ * Add column_name option so that renaming the foreign table's
+ * column doesn't break the association to the underlying
+ * column.
+ */
+ appendStringInfoString(&buf, " OPTIONS (column_name ");
+ deparseStringLiteral(&buf, attname);
+ appendStringInfoChar(&buf, ')');
+
+ /* Add COLLATE if needed */
+ if (import_collate && collname != NULL && collnamespace != NULL)
+ appendStringInfo(&buf, " COLLATE %s.%s",
+ quote_identifier(collnamespace),
+ quote_identifier(collname));
+
+ /* Add DEFAULT if needed */
+ if (import_default && attdefault != NULL &&
+ (!attgenerated || !attgenerated[0]))
+ appendStringInfo(&buf, " DEFAULT %s", attdefault);
+
+ /* Add GENERATED if needed */
+ if (import_generated && attgenerated != NULL &&
+ attgenerated[0] == ATTRIBUTE_GENERATED_STORED)
+ {
+ Assert(attdefault != NULL);
+ appendStringInfo(&buf,
+ " GENERATED ALWAYS AS (%s) STORED",
+ attdefault);
+ }
+
+ /* Add NOT NULL if needed */
+ if (import_not_null && attnotnull[0] == 't')
+ appendStringInfoString(&buf, " NOT NULL");
+ }
+ while (++i < numrows &&
+ strcmp(PQgetvalue(res, i, 0), tablename) == 0);
+
+ /*
+ * Add server name and table-level options. We specify remote
+ * schema and table name as options (the latter to ensure that
+ * renaming the foreign table doesn't break the association).
+ */
+ appendStringInfo(&buf, "\n) SERVER %s\nOPTIONS (",
+ quote_identifier(server->servername));
+
+ appendStringInfoString(&buf, "schema_name ");
+ deparseStringLiteral(&buf, stmt->remote_schema);
+ appendStringInfoString(&buf, ", table_name ");
+ deparseStringLiteral(&buf, tablename);
+
+ appendStringInfoString(&buf, ");");
+
+ commands = lappend(commands, pstrdup(buf.data));
+ }
+ }
+ PG_FINALLY();
+ {
+ if (res)
+ PQclear(res);
+ }
+ PG_END_TRY();
+
+ ReleaseConnection(conn);
+
+ return commands;
+}
+
+/*
+ * Assess whether the join between inner and outer relations can be pushed down
+ * to the foreign server. As a side effect, save information we obtain in this
+ * function to PgFdwRelationInfo passed in.
+ */
+static bool
+foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
+ RelOptInfo *outerrel, RelOptInfo *innerrel,
+ JoinPathExtraData *extra)
+{
+ PgFdwRelationInfo *fpinfo;
+ PgFdwRelationInfo *fpinfo_o;
+ PgFdwRelationInfo *fpinfo_i;
+ ListCell *lc;
+ List *joinclauses;
+
+ /*
+ * We support pushing down INNER, LEFT, RIGHT and FULL OUTER joins.
+ * Constructing queries representing SEMI and ANTI joins is hard, hence
+ * not considered right now.
+ */
+ if (jointype != JOIN_INNER && jointype != JOIN_LEFT &&
+ jointype != JOIN_RIGHT && jointype != JOIN_FULL)
+ return false;
+
+ /*
+ * If either of the joining relations is marked as unsafe to pushdown, the
+ * join can not be pushed down.
+ */
+ fpinfo = (PgFdwRelationInfo *) joinrel->fdw_private;
+ fpinfo_o = (PgFdwRelationInfo *) outerrel->fdw_private;
+ fpinfo_i = (PgFdwRelationInfo *) innerrel->fdw_private;
+ if (!fpinfo_o || !fpinfo_o->pushdown_safe ||
+ !fpinfo_i || !fpinfo_i->pushdown_safe)
+ return false;
+
+ /*
+ * If joining relations have local conditions, those conditions are
+ * required to be applied before joining the relations. Hence the join can
+ * not be pushed down.
+ */
+ if (fpinfo_o->local_conds || fpinfo_i->local_conds)
+ return false;
+
+ /*
+ * Merge FDW options. We might be tempted to do this after we have deemed
+ * the foreign join to be OK. But we must do this beforehand so that we
+ * know which quals can be evaluated on the foreign server, which might
+ * depend on shippable_extensions.
+ */
+ fpinfo->server = fpinfo_o->server;
+ merge_fdw_options(fpinfo, fpinfo_o, fpinfo_i);
+
+ /*
+ * Separate restrict list into join quals and pushed-down (other) quals.
+ *
+ * Join quals belonging to an outer join must all be shippable, else we
+ * cannot execute the join remotely. Add such quals to 'joinclauses'.
+ *
+ * Add other quals to fpinfo->remote_conds if they are shippable, else to
+ * fpinfo->local_conds. In an inner join it's okay to execute conditions
+ * either locally or remotely; the same is true for pushed-down conditions
+ * at an outer join.
+ *
+ * Note we might return failure after having already scribbled on
+ * fpinfo->remote_conds and fpinfo->local_conds. That's okay because we
+ * won't consult those lists again if we deem the join unshippable.
+ */
+ joinclauses = NIL;
+ foreach(lc, extra->restrictlist)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
+ bool is_remote_clause = is_foreign_expr(root, joinrel,
+ rinfo->clause);
+
+ if (IS_OUTER_JOIN(jointype) &&
+ !RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
+ {
+ if (!is_remote_clause)
+ return false;
+ joinclauses = lappend(joinclauses, rinfo);
+ }
+ else
+ {
+ if (is_remote_clause)
+ fpinfo->remote_conds = lappend(fpinfo->remote_conds, rinfo);
+ else
+ fpinfo->local_conds = lappend(fpinfo->local_conds, rinfo);
+ }
+ }
+
+ /*
+ * deparseExplicitTargetList() isn't smart enough to handle anything other
+ * than a Var. In particular, if there's some PlaceHolderVar that would
+ * need to be evaluated within this join tree (because there's an upper
+ * reference to a quantity that may go to NULL as a result of an outer
+ * join), then we can't try to push the join down because we'll fail when
+ * we get to deparseExplicitTargetList(). However, a PlaceHolderVar that
+ * needs to be evaluated *at the top* of this join tree is OK, because we
+ * can do that locally after fetching the results from the remote side.
+ */
+ foreach(lc, root->placeholder_list)
+ {
+ PlaceHolderInfo *phinfo = lfirst(lc);
+ Relids relids;
+
+ /* PlaceHolderInfo refers to parent relids, not child relids. */
+ relids = IS_OTHER_REL(joinrel) ?
+ joinrel->top_parent_relids : joinrel->relids;
+
+ if (bms_is_subset(phinfo->ph_eval_at, relids) &&
+ bms_nonempty_difference(relids, phinfo->ph_eval_at))
+ return false;
+ }
+
+ /* Save the join clauses, for later use. */
+ fpinfo->joinclauses = joinclauses;
+
+ fpinfo->outerrel = outerrel;
+ fpinfo->innerrel = innerrel;
+ fpinfo->jointype = jointype;
+
+ /*
+ * By default, both the input relations are not required to be deparsed as
+ * subqueries, but there might be some relations covered by the input
+ * relations that are required to be deparsed as subqueries, so save the
+ * relids of those relations for later use by the deparser.
+ */
+ fpinfo->make_outerrel_subquery = false;
+ fpinfo->make_innerrel_subquery = false;
+ Assert(bms_is_subset(fpinfo_o->lower_subquery_rels, outerrel->relids));
+ Assert(bms_is_subset(fpinfo_i->lower_subquery_rels, innerrel->relids));
+ fpinfo->lower_subquery_rels = bms_union(fpinfo_o->lower_subquery_rels,
+ fpinfo_i->lower_subquery_rels);
+
+ /*
+ * Pull the other remote conditions from the joining relations into join
+ * clauses or other remote clauses (remote_conds) of this relation
+ * wherever possible. This avoids building subqueries at every join step.
+ *
+ * For an inner join, clauses from both the relations are added to the
+ * other remote clauses. For LEFT and RIGHT OUTER join, the clauses from
+ * the outer side are added to remote_conds since those can be evaluated
+ * after the join is evaluated. The clauses from inner side are added to
+ * the joinclauses, since they need to be evaluated while constructing the
+ * join.
+ *
+ * For a FULL OUTER JOIN, the other clauses from either relation can not
+ * be added to the joinclauses or remote_conds, since each relation acts
+ * as an outer relation for the other.
+ *
+ * The joining sides can not have local conditions, thus no need to test
+ * shippability of the clauses being pulled up.
+ */
+ switch (jointype)
+ {
+ case JOIN_INNER:
+ fpinfo->remote_conds = list_concat(fpinfo->remote_conds,
+ fpinfo_i->remote_conds);
+ fpinfo->remote_conds = list_concat(fpinfo->remote_conds,
+ fpinfo_o->remote_conds);
+ break;
+
+ case JOIN_LEFT:
+ fpinfo->joinclauses = list_concat(fpinfo->joinclauses,
+ fpinfo_i->remote_conds);
+ fpinfo->remote_conds = list_concat(fpinfo->remote_conds,
+ fpinfo_o->remote_conds);
+ break;
+
+ case JOIN_RIGHT:
+ fpinfo->joinclauses = list_concat(fpinfo->joinclauses,
+ fpinfo_o->remote_conds);
+ fpinfo->remote_conds = list_concat(fpinfo->remote_conds,
+ fpinfo_i->remote_conds);
+ break;
+
+ case JOIN_FULL:
+
+ /*
+ * In this case, if any of the input relations has conditions, we
+ * need to deparse that relation as a subquery so that the
+ * conditions can be evaluated before the join. Remember it in
+ * the fpinfo of this relation so that the deparser can take
+ * appropriate action. Also, save the relids of base relations
+ * covered by that relation for later use by the deparser.
+ */
+ if (fpinfo_o->remote_conds)
+ {
+ fpinfo->make_outerrel_subquery = true;
+ fpinfo->lower_subquery_rels =
+ bms_add_members(fpinfo->lower_subquery_rels,
+ outerrel->relids);
+ }
+ if (fpinfo_i->remote_conds)
+ {
+ fpinfo->make_innerrel_subquery = true;
+ fpinfo->lower_subquery_rels =
+ bms_add_members(fpinfo->lower_subquery_rels,
+ innerrel->relids);
+ }
+ break;
+
+ default:
+ /* Should not happen, we have just checked this above */
+ elog(ERROR, "unsupported join type %d", jointype);
+ }
+
+ /*
+ * For an inner join, all restrictions can be treated alike. Treating the
+ * pushed down conditions as join conditions allows a top level full outer
+ * join to be deparsed without requiring subqueries.
+ */
+ if (jointype == JOIN_INNER)
+ {
+ Assert(!fpinfo->joinclauses);
+ fpinfo->joinclauses = fpinfo->remote_conds;
+ fpinfo->remote_conds = NIL;
+ }
+
+ /* Mark that this join can be pushed down safely */
+ fpinfo->pushdown_safe = true;
+
+ /* Get user mapping */
+ if (fpinfo->use_remote_estimate)
+ {
+ if (fpinfo_o->use_remote_estimate)
+ fpinfo->user = fpinfo_o->user;
+ else
+ fpinfo->user = fpinfo_i->user;
+ }
+ else
+ fpinfo->user = NULL;
+
+ /*
+ * Set # of retrieved rows and cached relation costs to some negative
+ * value, so that we can detect when they are set to some sensible values,
+ * during one (usually the first) of the calls to estimate_path_cost_size.
+ */
+ fpinfo->retrieved_rows = -1;
+ fpinfo->rel_startup_cost = -1;
+ fpinfo->rel_total_cost = -1;
+
+ /*
+ * Set the string describing this join relation to be used in EXPLAIN
+ * output of corresponding ForeignScan. Note that the decoration we add
+ * to the base relation names mustn't include any digits, or it'll confuse
+ * postgresExplainForeignScan.
+ */
+ fpinfo->relation_name = psprintf("(%s) %s JOIN (%s)",
+ fpinfo_o->relation_name,
+ get_jointype_name(fpinfo->jointype),
+ fpinfo_i->relation_name);
+
+ /*
+ * Set the relation index. This is defined as the position of this
+ * joinrel in the join_rel_list list plus the length of the rtable list.
+ * Note that since this joinrel is at the end of the join_rel_list list
+ * when we are called, we can get the position by list_length.
+ */
+ Assert(fpinfo->relation_index == 0); /* shouldn't be set yet */
+ fpinfo->relation_index =
+ list_length(root->parse->rtable) + list_length(root->join_rel_list);
+
+ return true;
+}
+
+static void
+add_paths_with_pathkeys_for_rel(PlannerInfo *root, RelOptInfo *rel,
+ Path *epq_path)
+{
+ List *useful_pathkeys_list = NIL; /* List of all pathkeys */
+ ListCell *lc;
+
+ useful_pathkeys_list = get_useful_pathkeys_for_relation(root, rel);
+
+ /* Create one path for each set of pathkeys we found above. */
+ foreach(lc, useful_pathkeys_list)
+ {
+ double rows;
+ int width;
+ Cost startup_cost;
+ Cost total_cost;
+ List *useful_pathkeys = lfirst(lc);
+ Path *sorted_epq_path;
+
+ estimate_path_cost_size(root, rel, NIL, useful_pathkeys, NULL,
+ &rows, &width, &startup_cost, &total_cost);
+
+ /*
+ * The EPQ path must be at least as well sorted as the path itself, in
+ * case it gets used as input to a mergejoin.
+ */
+ sorted_epq_path = epq_path;
+ if (sorted_epq_path != NULL &&
+ !pathkeys_contained_in(useful_pathkeys,
+ sorted_epq_path->pathkeys))
+ sorted_epq_path = (Path *)
+ create_sort_path(root,
+ rel,
+ sorted_epq_path,
+ useful_pathkeys,
+ -1.0);
+
+ if (IS_SIMPLE_REL(rel))
+ add_path(rel, (Path *)
+ create_foreignscan_path(root, rel,
+ NULL,
+ rows,
+ startup_cost,
+ total_cost,
+ useful_pathkeys,
+ rel->lateral_relids,
+ sorted_epq_path,
+ NIL));
+ else
+ add_path(rel, (Path *)
+ create_foreign_join_path(root, rel,
+ NULL,
+ rows,
+ startup_cost,
+ total_cost,
+ useful_pathkeys,
+ rel->lateral_relids,
+ sorted_epq_path,
+ NIL));
+ }
+}
+
+/*
+ * Parse options from foreign server and apply them to fpinfo.
+ *
+ * New options might also require tweaking merge_fdw_options().
+ */
+static void
+apply_server_options(PgFdwRelationInfo *fpinfo)
+{
+ ListCell *lc;
+
+ foreach(lc, fpinfo->server->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "use_remote_estimate") == 0)
+ fpinfo->use_remote_estimate = defGetBoolean(def);
+ else if (strcmp(def->defname, "fdw_startup_cost") == 0)
+ (void) parse_real(defGetString(def), &fpinfo->fdw_startup_cost, 0,
+ NULL);
+ else if (strcmp(def->defname, "fdw_tuple_cost") == 0)
+ (void) parse_real(defGetString(def), &fpinfo->fdw_tuple_cost, 0,
+ NULL);
+ else if (strcmp(def->defname, "extensions") == 0)
+ fpinfo->shippable_extensions =
+ ExtractExtensionList(defGetString(def), false);
+ else if (strcmp(def->defname, "fetch_size") == 0)
+ (void) parse_int(defGetString(def), &fpinfo->fetch_size, 0, NULL);
+ else if (strcmp(def->defname, "async_capable") == 0)
+ fpinfo->async_capable = defGetBoolean(def);
+ }
+}
+
+/*
+ * Parse options from foreign table and apply them to fpinfo.
+ *
+ * New options might also require tweaking merge_fdw_options().
+ */
+static void
+apply_table_options(PgFdwRelationInfo *fpinfo)
+{
+ ListCell *lc;
+
+ foreach(lc, fpinfo->table->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "use_remote_estimate") == 0)
+ fpinfo->use_remote_estimate = defGetBoolean(def);
+ else if (strcmp(def->defname, "fetch_size") == 0)
+ (void) parse_int(defGetString(def), &fpinfo->fetch_size, 0, NULL);
+ else if (strcmp(def->defname, "async_capable") == 0)
+ fpinfo->async_capable = defGetBoolean(def);
+ }
+}
+
+/*
+ * Merge FDW options from input relations into a new set of options for a join
+ * or an upper rel.
+ *
+ * For a join relation, FDW-specific information about the inner and outer
+ * relations is provided using fpinfo_i and fpinfo_o. For an upper relation,
+ * fpinfo_o provides the information for the input relation; fpinfo_i is
+ * expected to NULL.
+ */
+static void
+merge_fdw_options(PgFdwRelationInfo *fpinfo,
+ const PgFdwRelationInfo *fpinfo_o,
+ const PgFdwRelationInfo *fpinfo_i)
+{
+ /* We must always have fpinfo_o. */
+ Assert(fpinfo_o);
+
+ /* fpinfo_i may be NULL, but if present the servers must both match. */
+ Assert(!fpinfo_i ||
+ fpinfo_i->server->serverid == fpinfo_o->server->serverid);
+
+ /*
+ * Copy the server specific FDW options. (For a join, both relations come
+ * from the same server, so the server options should have the same value
+ * for both relations.)
+ */
+ fpinfo->fdw_startup_cost = fpinfo_o->fdw_startup_cost;
+ fpinfo->fdw_tuple_cost = fpinfo_o->fdw_tuple_cost;
+ fpinfo->shippable_extensions = fpinfo_o->shippable_extensions;
+ fpinfo->use_remote_estimate = fpinfo_o->use_remote_estimate;
+ fpinfo->fetch_size = fpinfo_o->fetch_size;
+ fpinfo->async_capable = fpinfo_o->async_capable;
+
+ /* Merge the table level options from either side of the join. */
+ if (fpinfo_i)
+ {
+ /*
+ * We'll prefer to use remote estimates for this join if any table
+ * from either side of the join is using remote estimates. This is
+ * most likely going to be preferred since they're already willing to
+ * pay the price of a round trip to get the remote EXPLAIN. In any
+ * case it's not entirely clear how we might otherwise handle this
+ * best.
+ */
+ fpinfo->use_remote_estimate = fpinfo_o->use_remote_estimate ||
+ fpinfo_i->use_remote_estimate;
+
+ /*
+ * Set fetch size to maximum of the joining sides, since we are
+ * expecting the rows returned by the join to be proportional to the
+ * relation sizes.
+ */
+ fpinfo->fetch_size = Max(fpinfo_o->fetch_size, fpinfo_i->fetch_size);
+
+ /*
+ * We'll prefer to consider this join async-capable if any table from
+ * either side of the join is considered async-capable. This would be
+ * reasonable because in that case the foreign server would have its
+ * own resources to scan that table asynchronously, and the join could
+ * also be computed asynchronously using the resources.
+ */
+ fpinfo->async_capable = fpinfo_o->async_capable ||
+ fpinfo_i->async_capable;
+ }
+}
+
+/*
+ * postgresGetForeignJoinPaths
+ * Add possible ForeignPath to joinrel, if join is safe to push down.
+ */
+static void
+postgresGetForeignJoinPaths(PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ JoinPathExtraData *extra)
+{
+ PgFdwRelationInfo *fpinfo;
+ ForeignPath *joinpath;
+ double rows;
+ int width;
+ Cost startup_cost;
+ Cost total_cost;
+ Path *epq_path; /* Path to create plan to be executed when
+ * EvalPlanQual gets triggered. */
+
+ /*
+ * Skip if this join combination has been considered already.
+ */
+ if (joinrel->fdw_private)
+ return;
+
+ /*
+ * This code does not work for joins with lateral references, since those
+ * must have parameterized paths, which we don't generate yet.
+ */
+ if (!bms_is_empty(joinrel->lateral_relids))
+ return;
+
+ /*
+ * Create unfinished PgFdwRelationInfo entry which is used to indicate
+ * that the join relation is already considered, so that we won't waste
+ * time in judging safety of join pushdown and adding the same paths again
+ * if found safe. Once we know that this join can be pushed down, we fill
+ * the entry.
+ */
+ fpinfo = (PgFdwRelationInfo *) palloc0(sizeof(PgFdwRelationInfo));
+ fpinfo->pushdown_safe = false;
+ joinrel->fdw_private = fpinfo;
+ /* attrs_used is only for base relations. */
+ fpinfo->attrs_used = NULL;
+
+ /*
+ * If there is a possibility that EvalPlanQual will be executed, we need
+ * to be able to reconstruct the row using scans of the base relations.
+ * GetExistingLocalJoinPath will find a suitable path for this purpose in
+ * the path list of the joinrel, if one exists. We must be careful to
+ * call it before adding any ForeignPath, since the ForeignPath might
+ * dominate the only suitable local path available. We also do it before
+ * calling foreign_join_ok(), since that function updates fpinfo and marks
+ * it as pushable if the join is found to be pushable.
+ */
+ if (root->parse->commandType == CMD_DELETE ||
+ root->parse->commandType == CMD_UPDATE ||
+ root->rowMarks)
+ {
+ epq_path = GetExistingLocalJoinPath(joinrel);
+ if (!epq_path)
+ {
+ elog(DEBUG3, "could not push down foreign join because a local path suitable for EPQ checks was not found");
+ return;
+ }
+ }
+ else
+ epq_path = NULL;
+
+ if (!foreign_join_ok(root, joinrel, jointype, outerrel, innerrel, extra))
+ {
+ /* Free path required for EPQ if we copied one; we don't need it now */
+ if (epq_path)
+ pfree(epq_path);
+ return;
+ }
+
+ /*
+ * Compute the selectivity and cost of the local_conds, so we don't have
+ * to do it over again for each path. The best we can do for these
+ * conditions is to estimate selectivity on the basis of local statistics.
+ * The local conditions are applied after the join has been computed on
+ * the remote side like quals in WHERE clause, so pass jointype as
+ * JOIN_INNER.
+ */
+ fpinfo->local_conds_sel = clauselist_selectivity(root,
+ fpinfo->local_conds,
+ 0,
+ JOIN_INNER,
+ NULL);
+ cost_qual_eval(&fpinfo->local_conds_cost, fpinfo->local_conds, root);
+
+ /*
+ * If we are going to estimate costs locally, estimate the join clause
+ * selectivity here while we have special join info.
+ */
+ if (!fpinfo->use_remote_estimate)
+ fpinfo->joinclause_sel = clauselist_selectivity(root, fpinfo->joinclauses,
+ 0, fpinfo->jointype,
+ extra->sjinfo);
+
+ /* Estimate costs for bare join relation */
+ estimate_path_cost_size(root, joinrel, NIL, NIL, NULL,
+ &rows, &width, &startup_cost, &total_cost);
+ /* Now update this information in the joinrel */
+ joinrel->rows = rows;
+ joinrel->reltarget->width = width;
+ fpinfo->rows = rows;
+ fpinfo->width = width;
+ fpinfo->startup_cost = startup_cost;
+ fpinfo->total_cost = total_cost;
+
+ /*
+ * Create a new join path and add it to the joinrel which represents a
+ * join between foreign tables.
+ */
+ joinpath = create_foreign_join_path(root,
+ joinrel,
+ NULL, /* default pathtarget */
+ rows,
+ startup_cost,
+ total_cost,
+ NIL, /* no pathkeys */
+ joinrel->lateral_relids,
+ epq_path,
+ NIL); /* no fdw_private */
+
+ /* Add generated path into joinrel by add_path(). */
+ add_path(joinrel, (Path *) joinpath);
+
+ /* Consider pathkeys for the join relation */
+ add_paths_with_pathkeys_for_rel(root, joinrel, epq_path);
+
+ /* XXX Consider parameterized paths for the join relation */
+}
+
+/*
+ * Assess whether the aggregation, grouping and having operations can be pushed
+ * down to the foreign server. As a side effect, save information we obtain in
+ * this function to PgFdwRelationInfo of the input relation.
+ */
+static bool
+foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel,
+ Node *havingQual)
+{
+ Query *query = root->parse;
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) grouped_rel->fdw_private;
+ PathTarget *grouping_target = grouped_rel->reltarget;
+ PgFdwRelationInfo *ofpinfo;
+ ListCell *lc;
+ int i;
+ List *tlist = NIL;
+
+ /* We currently don't support pushing Grouping Sets. */
+ if (query->groupingSets)
+ return false;
+
+ /* Get the fpinfo of the underlying scan relation. */
+ ofpinfo = (PgFdwRelationInfo *) fpinfo->outerrel->fdw_private;
+
+ /*
+ * If underlying scan relation has any local conditions, those conditions
+ * are required to be applied before performing aggregation. Hence the
+ * aggregate cannot be pushed down.
+ */
+ if (ofpinfo->local_conds)
+ return false;
+
+ /*
+ * Examine grouping expressions, as well as other expressions we'd need to
+ * compute, and check whether they are safe to push down to the foreign
+ * server. All GROUP BY expressions will be part of the grouping target
+ * and thus there is no need to search for them separately. Add grouping
+ * expressions into target list which will be passed to foreign server.
+ *
+ * A tricky fine point is that we must not put any expression into the
+ * target list that is just a foreign param (that is, something that
+ * deparse.c would conclude has to be sent to the foreign server). If we
+ * do, the expression will also appear in the fdw_exprs list of the plan
+ * node, and setrefs.c will get confused and decide that the fdw_exprs
+ * entry is actually a reference to the fdw_scan_tlist entry, resulting in
+ * a broken plan. Somewhat oddly, it's OK if the expression contains such
+ * a node, as long as it's not at top level; then no match is possible.
+ */
+ i = 0;
+ foreach(lc, grouping_target->exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+ Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
+ ListCell *l;
+
+ /* Check whether this expression is part of GROUP BY clause */
+ if (sgref && get_sortgroupref_clause_noerr(sgref, query->groupClause))
+ {
+ TargetEntry *tle;
+
+ /*
+ * If any GROUP BY expression is not shippable, then we cannot
+ * push down aggregation to the foreign server.
+ */
+ if (!is_foreign_expr(root, grouped_rel, expr))
+ return false;
+
+ /*
+ * If it would be a foreign param, we can't put it into the tlist,
+ * so we have to fail.
+ */
+ if (is_foreign_param(root, grouped_rel, expr))
+ return false;
+
+ /*
+ * Pushable, so add to tlist. We need to create a TLE for this
+ * expression and apply the sortgroupref to it. We cannot use
+ * add_to_flat_tlist() here because that avoids making duplicate
+ * entries in the tlist. If there are duplicate entries with
+ * distinct sortgrouprefs, we have to duplicate that situation in
+ * the output tlist.
+ */
+ tle = makeTargetEntry(expr, list_length(tlist) + 1, NULL, false);
+ tle->ressortgroupref = sgref;
+ tlist = lappend(tlist, tle);
+ }
+ else
+ {
+ /*
+ * Non-grouping expression we need to compute. Can we ship it
+ * as-is to the foreign server?
+ */
+ if (is_foreign_expr(root, grouped_rel, expr) &&
+ !is_foreign_param(root, grouped_rel, expr))
+ {
+ /* Yes, so add to tlist as-is; OK to suppress duplicates */
+ tlist = add_to_flat_tlist(tlist, list_make1(expr));
+ }
+ else
+ {
+ /* Not pushable as a whole; extract its Vars and aggregates */
+ List *aggvars;
+
+ aggvars = pull_var_clause((Node *) expr,
+ PVC_INCLUDE_AGGREGATES);
+
+ /*
+ * If any aggregate expression is not shippable, then we
+ * cannot push down aggregation to the foreign server. (We
+ * don't have to check is_foreign_param, since that certainly
+ * won't return true for any such expression.)
+ */
+ if (!is_foreign_expr(root, grouped_rel, (Expr *) aggvars))
+ return false;
+
+ /*
+ * Add aggregates, if any, into the targetlist. Plain Vars
+ * outside an aggregate can be ignored, because they should be
+ * either same as some GROUP BY column or part of some GROUP
+ * BY expression. In either case, they are already part of
+ * the targetlist and thus no need to add them again. In fact
+ * including plain Vars in the tlist when they do not match a
+ * GROUP BY column would cause the foreign server to complain
+ * that the shipped query is invalid.
+ */
+ foreach(l, aggvars)
+ {
+ Expr *expr = (Expr *) lfirst(l);
+
+ if (IsA(expr, Aggref))
+ tlist = add_to_flat_tlist(tlist, list_make1(expr));
+ }
+ }
+ }
+
+ i++;
+ }
+
+ /*
+ * Classify the pushable and non-pushable HAVING clauses and save them in
+ * remote_conds and local_conds of the grouped rel's fpinfo.
+ */
+ if (havingQual)
+ {
+ ListCell *lc;
+
+ foreach(lc, (List *) havingQual)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+ RestrictInfo *rinfo;
+
+ /*
+ * Currently, the core code doesn't wrap havingQuals in
+ * RestrictInfos, so we must make our own.
+ */
+ Assert(!IsA(expr, RestrictInfo));
+ rinfo = make_restrictinfo(root,
+ expr,
+ true,
+ false,
+ false,
+ root->qual_security_level,
+ grouped_rel->relids,
+ NULL,
+ NULL);
+ if (is_foreign_expr(root, grouped_rel, expr))
+ fpinfo->remote_conds = lappend(fpinfo->remote_conds, rinfo);
+ else
+ fpinfo->local_conds = lappend(fpinfo->local_conds, rinfo);
+ }
+ }
+
+ /*
+ * If there are any local conditions, pull Vars and aggregates from it and
+ * check whether they are safe to pushdown or not.
+ */
+ if (fpinfo->local_conds)
+ {
+ List *aggvars = NIL;
+ ListCell *lc;
+
+ foreach(lc, fpinfo->local_conds)
+ {
+ RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
+
+ aggvars = list_concat(aggvars,
+ pull_var_clause((Node *) rinfo->clause,
+ PVC_INCLUDE_AGGREGATES));
+ }
+
+ foreach(lc, aggvars)
+ {
+ Expr *expr = (Expr *) lfirst(lc);
+
+ /*
+ * If aggregates within local conditions are not safe to push
+ * down, then we cannot push down the query. Vars are already
+ * part of GROUP BY clause which are checked above, so no need to
+ * access them again here. Again, we need not check
+ * is_foreign_param for a foreign aggregate.
+ */
+ if (IsA(expr, Aggref))
+ {
+ if (!is_foreign_expr(root, grouped_rel, expr))
+ return false;
+
+ tlist = add_to_flat_tlist(tlist, list_make1(expr));
+ }
+ }
+ }
+
+ /* Store generated targetlist */
+ fpinfo->grouped_tlist = tlist;
+
+ /* Safe to pushdown */
+ fpinfo->pushdown_safe = true;
+
+ /*
+ * Set # of retrieved rows and cached relation costs to some negative
+ * value, so that we can detect when they are set to some sensible values,
+ * during one (usually the first) of the calls to estimate_path_cost_size.
+ */
+ fpinfo->retrieved_rows = -1;
+ fpinfo->rel_startup_cost = -1;
+ fpinfo->rel_total_cost = -1;
+
+ /*
+ * Set the string describing this grouped relation to be used in EXPLAIN
+ * output of corresponding ForeignScan. Note that the decoration we add
+ * to the base relation name mustn't include any digits, or it'll confuse
+ * postgresExplainForeignScan.
+ */
+ fpinfo->relation_name = psprintf("Aggregate on (%s)",
+ ofpinfo->relation_name);
+
+ return true;
+}
+
+/*
+ * postgresGetForeignUpperPaths
+ * Add paths for post-join operations like aggregation, grouping etc. if
+ * corresponding operations are safe to push down.
+ */
+static void
+postgresGetForeignUpperPaths(PlannerInfo *root, UpperRelationKind stage,
+ RelOptInfo *input_rel, RelOptInfo *output_rel,
+ void *extra)
+{
+ PgFdwRelationInfo *fpinfo;
+
+ /*
+ * If input rel is not safe to pushdown, then simply return as we cannot
+ * perform any post-join operations on the foreign server.
+ */
+ if (!input_rel->fdw_private ||
+ !((PgFdwRelationInfo *) input_rel->fdw_private)->pushdown_safe)
+ return;
+
+ /* Ignore stages we don't support; and skip any duplicate calls. */
+ if ((stage != UPPERREL_GROUP_AGG &&
+ stage != UPPERREL_ORDERED &&
+ stage != UPPERREL_FINAL) ||
+ output_rel->fdw_private)
+ return;
+
+ fpinfo = (PgFdwRelationInfo *) palloc0(sizeof(PgFdwRelationInfo));
+ fpinfo->pushdown_safe = false;
+ fpinfo->stage = stage;
+ output_rel->fdw_private = fpinfo;
+
+ switch (stage)
+ {
+ case UPPERREL_GROUP_AGG:
+ add_foreign_grouping_paths(root, input_rel, output_rel,
+ (GroupPathExtraData *) extra);
+ break;
+ case UPPERREL_ORDERED:
+ add_foreign_ordered_paths(root, input_rel, output_rel);
+ break;
+ case UPPERREL_FINAL:
+ add_foreign_final_paths(root, input_rel, output_rel,
+ (FinalPathExtraData *) extra);
+ break;
+ default:
+ elog(ERROR, "unexpected upper relation: %d", (int) stage);
+ break;
+ }
+}
+
+/*
+ * add_foreign_grouping_paths
+ * Add foreign path for grouping and/or aggregation.
+ *
+ * Given input_rel represents the underlying scan. The paths are added to the
+ * given grouped_rel.
+ */
+static void
+add_foreign_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
+ RelOptInfo *grouped_rel,
+ GroupPathExtraData *extra)
+{
+ Query *parse = root->parse;
+ PgFdwRelationInfo *ifpinfo = input_rel->fdw_private;
+ PgFdwRelationInfo *fpinfo = grouped_rel->fdw_private;
+ ForeignPath *grouppath;
+ double rows;
+ int width;
+ Cost startup_cost;
+ Cost total_cost;
+
+ /* Nothing to be done, if there is no grouping or aggregation required. */
+ if (!parse->groupClause && !parse->groupingSets && !parse->hasAggs &&
+ !root->hasHavingQual)
+ return;
+
+ Assert(extra->patype == PARTITIONWISE_AGGREGATE_NONE ||
+ extra->patype == PARTITIONWISE_AGGREGATE_FULL);
+
+ /* save the input_rel as outerrel in fpinfo */
+ fpinfo->outerrel = input_rel;
+
+ /*
+ * Copy foreign table, foreign server, user mapping, FDW options etc.
+ * details from the input relation's fpinfo.
+ */
+ fpinfo->table = ifpinfo->table;
+ fpinfo->server = ifpinfo->server;
+ fpinfo->user = ifpinfo->user;
+ merge_fdw_options(fpinfo, ifpinfo, NULL);
+
+ /*
+ * Assess if it is safe to push down aggregation and grouping.
+ *
+ * Use HAVING qual from extra. In case of child partition, it will have
+ * translated Vars.
+ */
+ if (!foreign_grouping_ok(root, grouped_rel, extra->havingQual))
+ return;
+
+ /*
+ * Compute the selectivity and cost of the local_conds, so we don't have
+ * to do it over again for each path. (Currently we create just a single
+ * path here, but in future it would be possible that we build more paths
+ * such as pre-sorted paths as in postgresGetForeignPaths and
+ * postgresGetForeignJoinPaths.) The best we can do for these conditions
+ * is to estimate selectivity on the basis of local statistics.
+ */
+ fpinfo->local_conds_sel = clauselist_selectivity(root,
+ fpinfo->local_conds,
+ 0,
+ JOIN_INNER,
+ NULL);
+
+ cost_qual_eval(&fpinfo->local_conds_cost, fpinfo->local_conds, root);
+
+ /* Estimate the cost of push down */
+ estimate_path_cost_size(root, grouped_rel, NIL, NIL, NULL,
+ &rows, &width, &startup_cost, &total_cost);
+
+ /* Now update this information in the fpinfo */
+ fpinfo->rows = rows;
+ fpinfo->width = width;
+ fpinfo->startup_cost = startup_cost;
+ fpinfo->total_cost = total_cost;
+
+ /* Create and add foreign path to the grouping relation. */
+ grouppath = create_foreign_upper_path(root,
+ grouped_rel,
+ grouped_rel->reltarget,
+ rows,
+ startup_cost,
+ total_cost,
+ NIL, /* no pathkeys */
+ NULL,
+ NIL); /* no fdw_private */
+
+ /* Add generated path into grouped_rel by add_path(). */
+ add_path(grouped_rel, (Path *) grouppath);
+}
+
+/*
+ * add_foreign_ordered_paths
+ * Add foreign paths for performing the final sort remotely.
+ *
+ * Given input_rel contains the source-data Paths. The paths are added to the
+ * given ordered_rel.
+ */
+static void
+add_foreign_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel,
+ RelOptInfo *ordered_rel)
+{
+ Query *parse = root->parse;
+ PgFdwRelationInfo *ifpinfo = input_rel->fdw_private;
+ PgFdwRelationInfo *fpinfo = ordered_rel->fdw_private;
+ PgFdwPathExtraData *fpextra;
+ double rows;
+ int width;
+ Cost startup_cost;
+ Cost total_cost;
+ List *fdw_private;
+ ForeignPath *ordered_path;
+ ListCell *lc;
+
+ /* Shouldn't get here unless the query has ORDER BY */
+ Assert(parse->sortClause);
+
+ /* We don't support cases where there are any SRFs in the targetlist */
+ if (parse->hasTargetSRFs)
+ return;
+
+ /* Save the input_rel as outerrel in fpinfo */
+ fpinfo->outerrel = input_rel;
+
+ /*
+ * Copy foreign table, foreign server, user mapping, FDW options etc.
+ * details from the input relation's fpinfo.
+ */
+ fpinfo->table = ifpinfo->table;
+ fpinfo->server = ifpinfo->server;
+ fpinfo->user = ifpinfo->user;
+ merge_fdw_options(fpinfo, ifpinfo, NULL);
+
+ /*
+ * If the input_rel is a base or join relation, we would already have
+ * considered pushing down the final sort to the remote server when
+ * creating pre-sorted foreign paths for that relation, because the
+ * query_pathkeys is set to the root->sort_pathkeys in that case (see
+ * standard_qp_callback()).
+ */
+ if (input_rel->reloptkind == RELOPT_BASEREL ||
+ input_rel->reloptkind == RELOPT_JOINREL)
+ {
+ Assert(root->query_pathkeys == root->sort_pathkeys);
+
+ /* Safe to push down if the query_pathkeys is safe to push down */
+ fpinfo->pushdown_safe = ifpinfo->qp_is_pushdown_safe;
+
+ return;
+ }
+
+ /* The input_rel should be a grouping relation */
+ Assert(input_rel->reloptkind == RELOPT_UPPER_REL &&
+ ifpinfo->stage == UPPERREL_GROUP_AGG);
+
+ /*
+ * We try to create a path below by extending a simple foreign path for
+ * the underlying grouping relation to perform the final sort remotely,
+ * which is stored into the fdw_private list of the resulting path.
+ */
+
+ /* Assess if it is safe to push down the final sort */
+ foreach(lc, root->sort_pathkeys)
+ {
+ PathKey *pathkey = (PathKey *) lfirst(lc);
+ EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
+
+ /*
+ * is_foreign_expr would detect volatile expressions as well, but
+ * checking ec_has_volatile here saves some cycles.
+ */
+ if (pathkey_ec->ec_has_volatile)
+ return;
+
+ /*
+ * Can't push down the sort if pathkey's opfamily is not shippable.
+ */
+ if (!is_shippable(pathkey->pk_opfamily, OperatorFamilyRelationId,
+ fpinfo))
+ return;
+
+ /*
+ * The EC must contain a shippable EM that is computed in input_rel's
+ * reltarget, else we can't push down the sort.
+ */
+ if (find_em_for_rel_target(root,
+ pathkey_ec,
+ input_rel) == NULL)
+ return;
+ }
+
+ /* Safe to push down */
+ fpinfo->pushdown_safe = true;
+
+ /* Construct PgFdwPathExtraData */
+ fpextra = (PgFdwPathExtraData *) palloc0(sizeof(PgFdwPathExtraData));
+ fpextra->target = root->upper_targets[UPPERREL_ORDERED];
+ fpextra->has_final_sort = true;
+
+ /* Estimate the costs of performing the final sort remotely */
+ estimate_path_cost_size(root, input_rel, NIL, root->sort_pathkeys, fpextra,
+ &rows, &width, &startup_cost, &total_cost);
+
+ /*
+ * Build the fdw_private list that will be used by postgresGetForeignPlan.
+ * Items in the list must match order in enum FdwPathPrivateIndex.
+ */
+ fdw_private = list_make2(makeInteger(true), makeInteger(false));
+
+ /* Create foreign ordering path */
+ ordered_path = create_foreign_upper_path(root,
+ input_rel,
+ root->upper_targets[UPPERREL_ORDERED],
+ rows,
+ startup_cost,
+ total_cost,
+ root->sort_pathkeys,
+ NULL, /* no extra plan */
+ fdw_private);
+
+ /* and add it to the ordered_rel */
+ add_path(ordered_rel, (Path *) ordered_path);
+}
+
+/*
+ * add_foreign_final_paths
+ * Add foreign paths for performing the final processing remotely.
+ *
+ * Given input_rel contains the source-data Paths. The paths are added to the
+ * given final_rel.
+ */
+static void
+add_foreign_final_paths(PlannerInfo *root, RelOptInfo *input_rel,
+ RelOptInfo *final_rel,
+ FinalPathExtraData *extra)
+{
+ Query *parse = root->parse;
+ PgFdwRelationInfo *ifpinfo = (PgFdwRelationInfo *) input_rel->fdw_private;
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) final_rel->fdw_private;
+ bool has_final_sort = false;
+ List *pathkeys = NIL;
+ PgFdwPathExtraData *fpextra;
+ bool save_use_remote_estimate = false;
+ double rows;
+ int width;
+ Cost startup_cost;
+ Cost total_cost;
+ List *fdw_private;
+ ForeignPath *final_path;
+
+ /*
+ * Currently, we only support this for SELECT commands
+ */
+ if (parse->commandType != CMD_SELECT)
+ return;
+
+ /*
+ * No work if there is no FOR UPDATE/SHARE clause and if there is no need
+ * to add a LIMIT node
+ */
+ if (!parse->rowMarks && !extra->limit_needed)
+ return;
+
+ /* We don't support cases where there are any SRFs in the targetlist */
+ if (parse->hasTargetSRFs)
+ return;
+
+ /* Save the input_rel as outerrel in fpinfo */
+ fpinfo->outerrel = input_rel;
+
+ /*
+ * Copy foreign table, foreign server, user mapping, FDW options etc.
+ * details from the input relation's fpinfo.
+ */
+ fpinfo->table = ifpinfo->table;
+ fpinfo->server = ifpinfo->server;
+ fpinfo->user = ifpinfo->user;
+ merge_fdw_options(fpinfo, ifpinfo, NULL);
+
+ /*
+ * If there is no need to add a LIMIT node, there might be a ForeignPath
+ * in the input_rel's pathlist that implements all behavior of the query.
+ * Note: we would already have accounted for the query's FOR UPDATE/SHARE
+ * (if any) before we get here.
+ */
+ if (!extra->limit_needed)
+ {
+ ListCell *lc;
+
+ Assert(parse->rowMarks);
+
+ /*
+ * Grouping and aggregation are not supported with FOR UPDATE/SHARE,
+ * so the input_rel should be a base, join, or ordered relation; and
+ * if it's an ordered relation, its input relation should be a base or
+ * join relation.
+ */
+ Assert(input_rel->reloptkind == RELOPT_BASEREL ||
+ input_rel->reloptkind == RELOPT_JOINREL ||
+ (input_rel->reloptkind == RELOPT_UPPER_REL &&
+ ifpinfo->stage == UPPERREL_ORDERED &&
+ (ifpinfo->outerrel->reloptkind == RELOPT_BASEREL ||
+ ifpinfo->outerrel->reloptkind == RELOPT_JOINREL)));
+
+ foreach(lc, input_rel->pathlist)
+ {
+ Path *path = (Path *) lfirst(lc);
+
+ /*
+ * apply_scanjoin_target_to_paths() uses create_projection_path()
+ * to adjust each of its input paths if needed, whereas
+ * create_ordered_paths() uses apply_projection_to_path() to do
+ * that. So the former might have put a ProjectionPath on top of
+ * the ForeignPath; look through ProjectionPath and see if the
+ * path underneath it is ForeignPath.
+ */
+ if (IsA(path, ForeignPath) ||
+ (IsA(path, ProjectionPath) &&
+ IsA(((ProjectionPath *) path)->subpath, ForeignPath)))
+ {
+ /*
+ * Create foreign final path; this gets rid of a
+ * no-longer-needed outer plan (if any), which makes the
+ * EXPLAIN output look cleaner
+ */
+ final_path = create_foreign_upper_path(root,
+ path->parent,
+ path->pathtarget,
+ path->rows,
+ path->startup_cost,
+ path->total_cost,
+ path->pathkeys,
+ NULL, /* no extra plan */
+ NULL); /* no fdw_private */
+
+ /* and add it to the final_rel */
+ add_path(final_rel, (Path *) final_path);
+
+ /* Safe to push down */
+ fpinfo->pushdown_safe = true;
+
+ return;
+ }
+ }
+
+ /*
+ * If we get here it means no ForeignPaths; since we would already
+ * have considered pushing down all operations for the query to the
+ * remote server, give up on it.
+ */
+ return;
+ }
+
+ Assert(extra->limit_needed);
+
+ /*
+ * If the input_rel is an ordered relation, replace the input_rel with its
+ * input relation
+ */
+ if (input_rel->reloptkind == RELOPT_UPPER_REL &&
+ ifpinfo->stage == UPPERREL_ORDERED)
+ {
+ input_rel = ifpinfo->outerrel;
+ ifpinfo = (PgFdwRelationInfo *) input_rel->fdw_private;
+ has_final_sort = true;
+ pathkeys = root->sort_pathkeys;
+ }
+
+ /* The input_rel should be a base, join, or grouping relation */
+ Assert(input_rel->reloptkind == RELOPT_BASEREL ||
+ input_rel->reloptkind == RELOPT_JOINREL ||
+ (input_rel->reloptkind == RELOPT_UPPER_REL &&
+ ifpinfo->stage == UPPERREL_GROUP_AGG));
+
+ /*
+ * We try to create a path below by extending a simple foreign path for
+ * the underlying base, join, or grouping relation to perform the final
+ * sort (if has_final_sort) and the LIMIT restriction remotely, which is
+ * stored into the fdw_private list of the resulting path. (We
+ * re-estimate the costs of sorting the underlying relation, if
+ * has_final_sort.)
+ */
+
+ /*
+ * Assess if it is safe to push down the LIMIT and OFFSET to the remote
+ * server
+ */
+
+ /*
+ * If the underlying relation has any local conditions, the LIMIT/OFFSET
+ * cannot be pushed down.
+ */
+ if (ifpinfo->local_conds)
+ return;
+
+ /*
+ * Also, the LIMIT/OFFSET cannot be pushed down, if their expressions are
+ * not safe to remote.
+ */
+ if (!is_foreign_expr(root, input_rel, (Expr *) parse->limitOffset) ||
+ !is_foreign_expr(root, input_rel, (Expr *) parse->limitCount))
+ return;
+
+ /* Safe to push down */
+ fpinfo->pushdown_safe = true;
+
+ /* Construct PgFdwPathExtraData */
+ fpextra = (PgFdwPathExtraData *) palloc0(sizeof(PgFdwPathExtraData));
+ fpextra->target = root->upper_targets[UPPERREL_FINAL];
+ fpextra->has_final_sort = has_final_sort;
+ fpextra->has_limit = extra->limit_needed;
+ fpextra->limit_tuples = extra->limit_tuples;
+ fpextra->count_est = extra->count_est;
+ fpextra->offset_est = extra->offset_est;
+
+ /*
+ * Estimate the costs of performing the final sort and the LIMIT
+ * restriction remotely. If has_final_sort is false, we wouldn't need to
+ * execute EXPLAIN anymore if use_remote_estimate, since the costs can be
+ * roughly estimated using the costs we already have for the underlying
+ * relation, in the same way as when use_remote_estimate is false. Since
+ * it's pretty expensive to execute EXPLAIN, force use_remote_estimate to
+ * false in that case.
+ */
+ if (!fpextra->has_final_sort)
+ {
+ save_use_remote_estimate = ifpinfo->use_remote_estimate;
+ ifpinfo->use_remote_estimate = false;
+ }
+ estimate_path_cost_size(root, input_rel, NIL, pathkeys, fpextra,
+ &rows, &width, &startup_cost, &total_cost);
+ if (!fpextra->has_final_sort)
+ ifpinfo->use_remote_estimate = save_use_remote_estimate;
+
+ /*
+ * Build the fdw_private list that will be used by postgresGetForeignPlan.
+ * Items in the list must match order in enum FdwPathPrivateIndex.
+ */
+ fdw_private = list_make2(makeInteger(has_final_sort),
+ makeInteger(extra->limit_needed));
+
+ /*
+ * Create foreign final path; this gets rid of a no-longer-needed outer
+ * plan (if any), which makes the EXPLAIN output look cleaner
+ */
+ final_path = create_foreign_upper_path(root,
+ input_rel,
+ root->upper_targets[UPPERREL_FINAL],
+ rows,
+ startup_cost,
+ total_cost,
+ pathkeys,
+ NULL, /* no extra plan */
+ fdw_private);
+
+ /* and add it to the final_rel */
+ add_path(final_rel, (Path *) final_path);
+}
+
+/*
+ * postgresIsForeignPathAsyncCapable
+ * Check whether a given ForeignPath node is async-capable.
+ */
+static bool
+postgresIsForeignPathAsyncCapable(ForeignPath *path)
+{
+ RelOptInfo *rel = ((Path *) path)->parent;
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) rel->fdw_private;
+
+ return fpinfo->async_capable;
+}
+
+/*
+ * postgresForeignAsyncRequest
+ * Asynchronously request next tuple from a foreign PostgreSQL table.
+ */
+static void
+postgresForeignAsyncRequest(AsyncRequest *areq)
+{
+ produce_tuple_asynchronously(areq, true);
+}
+
+/*
+ * postgresForeignAsyncConfigureWait
+ * Configure a file descriptor event for which we wish to wait.
+ */
+static void
+postgresForeignAsyncConfigureWait(AsyncRequest *areq)
+{
+ ForeignScanState *node = (ForeignScanState *) areq->requestee;
+ PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+ AsyncRequest *pendingAreq = fsstate->conn_state->pendingAreq;
+ AppendState *requestor = (AppendState *) areq->requestor;
+ WaitEventSet *set = requestor->as_eventset;
+
+ /* This should not be called unless callback_pending */
+ Assert(areq->callback_pending);
+
+ /*
+ * If process_pending_request() has been invoked on the given request
+ * before we get here, we might have some tuples already; in which case
+ * complete the request
+ */
+ if (fsstate->next_tuple < fsstate->num_tuples)
+ {
+ complete_pending_request(areq);
+ if (areq->request_complete)
+ return;
+ Assert(areq->callback_pending);
+ }
+
+ /* We must have run out of tuples */
+ Assert(fsstate->next_tuple >= fsstate->num_tuples);
+
+ /* The core code would have registered postmaster death event */
+ Assert(GetNumRegisteredWaitEvents(set) >= 1);
+
+ /* Begin an asynchronous data fetch if not already done */
+ if (!pendingAreq)
+ fetch_more_data_begin(areq);
+ else if (pendingAreq->requestor != areq->requestor)
+ {
+ /*
+ * This is the case when the in-process request was made by another
+ * Append. Note that it might be useless to process the request,
+ * because the query might not need tuples from that Append anymore.
+ * If there are any child subplans of the same parent that are ready
+ * for new requests, skip the given request. Likewise, if there are
+ * any configured events other than the postmaster death event, skip
+ * it. Otherwise, process the in-process request, then begin a fetch
+ * to configure the event below, because we might otherwise end up
+ * with no configured events other than the postmaster death event.
+ */
+ if (!bms_is_empty(requestor->as_needrequest))
+ return;
+ if (GetNumRegisteredWaitEvents(set) > 1)
+ return;
+ process_pending_request(pendingAreq);
+ fetch_more_data_begin(areq);
+ }
+ else if (pendingAreq->requestee != areq->requestee)
+ {
+ /*
+ * This is the case when the in-process request was made by the same
+ * parent but for a different child. Since we configure only the
+ * event for the request made for that child, skip the given request.
+ */
+ return;
+ }
+ else
+ Assert(pendingAreq == areq);
+
+ AddWaitEventToSet(set, WL_SOCKET_READABLE, PQsocket(fsstate->conn),
+ NULL, areq);
+}
+
+/*
+ * postgresForeignAsyncNotify
+ * Fetch some more tuples from a file descriptor that becomes ready,
+ * requesting next tuple.
+ */
+static void
+postgresForeignAsyncNotify(AsyncRequest *areq)
+{
+ ForeignScanState *node = (ForeignScanState *) areq->requestee;
+ PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+
+ /* The core code would have initialized the callback_pending flag */
+ Assert(!areq->callback_pending);
+
+ /*
+ * If process_pending_request() has been invoked on the given request
+ * before we get here, we might have some tuples already; in which case
+ * produce the next tuple
+ */
+ if (fsstate->next_tuple < fsstate->num_tuples)
+ {
+ produce_tuple_asynchronously(areq, true);
+ return;
+ }
+
+ /* We must have run out of tuples */
+ Assert(fsstate->next_tuple >= fsstate->num_tuples);
+
+ /* The request should be currently in-process */
+ Assert(fsstate->conn_state->pendingAreq == areq);
+
+ /* On error, report the original query, not the FETCH. */
+ if (!PQconsumeInput(fsstate->conn))
+ pgfdw_report_error(ERROR, NULL, fsstate->conn, false, fsstate->query);
+
+ fetch_more_data(node);
+
+ produce_tuple_asynchronously(areq, true);
+}
+
+/*
+ * Asynchronously produce next tuple from a foreign PostgreSQL table.
+ */
+static void
+produce_tuple_asynchronously(AsyncRequest *areq, bool fetch)
+{
+ ForeignScanState *node = (ForeignScanState *) areq->requestee;
+ PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+ AsyncRequest *pendingAreq = fsstate->conn_state->pendingAreq;
+ TupleTableSlot *result;
+
+ /* This should not be called if the request is currently in-process */
+ Assert(areq != pendingAreq);
+
+ /* Fetch some more tuples, if we've run out */
+ if (fsstate->next_tuple >= fsstate->num_tuples)
+ {
+ /* No point in another fetch if we already detected EOF, though */
+ if (!fsstate->eof_reached)
+ {
+ /* Mark the request as pending for a callback */
+ ExecAsyncRequestPending(areq);
+ /* Begin another fetch if requested and if no pending request */
+ if (fetch && !pendingAreq)
+ fetch_more_data_begin(areq);
+ }
+ else
+ {
+ /* There's nothing more to do; just return a NULL pointer */
+ result = NULL;
+ /* Mark the request as complete */
+ ExecAsyncRequestDone(areq, result);
+ }
+ return;
+ }
+
+ /* Get a tuple from the ForeignScan node */
+ result = areq->requestee->ExecProcNodeReal(areq->requestee);
+ if (!TupIsNull(result))
+ {
+ /* Mark the request as complete */
+ ExecAsyncRequestDone(areq, result);
+ return;
+ }
+
+ /* We must have run out of tuples */
+ Assert(fsstate->next_tuple >= fsstate->num_tuples);
+
+ /* Fetch some more tuples, if we've not detected EOF yet */
+ if (!fsstate->eof_reached)
+ {
+ /* Mark the request as pending for a callback */
+ ExecAsyncRequestPending(areq);
+ /* Begin another fetch if requested and if no pending request */
+ if (fetch && !pendingAreq)
+ fetch_more_data_begin(areq);
+ }
+ else
+ {
+ /* There's nothing more to do; just return a NULL pointer */
+ result = NULL;
+ /* Mark the request as complete */
+ ExecAsyncRequestDone(areq, result);
+ }
+}
+
+/*
+ * Begin an asynchronous data fetch.
+ *
+ * Note: this function assumes there is no currently-in-progress asynchronous
+ * data fetch.
+ *
+ * Note: fetch_more_data must be called to fetch the result.
+ */
+static void
+fetch_more_data_begin(AsyncRequest *areq)
+{
+ ForeignScanState *node = (ForeignScanState *) areq->requestee;
+ PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+ char sql[64];
+
+ Assert(!fsstate->conn_state->pendingAreq);
+
+ /* Create the cursor synchronously. */
+ if (!fsstate->cursor_exists)
+ create_cursor(node);
+
+ /* We will send this query, but not wait for the response. */
+ snprintf(sql, sizeof(sql), "FETCH %d FROM c%u",
+ fsstate->fetch_size, fsstate->cursor_number);
+
+ if (!PQsendQuery(fsstate->conn, sql))
+ pgfdw_report_error(ERROR, NULL, fsstate->conn, false, fsstate->query);
+
+ /* Remember that the request is in process */
+ fsstate->conn_state->pendingAreq = areq;
+}
+
+/*
+ * Process a pending asynchronous request.
+ */
+void
+process_pending_request(AsyncRequest *areq)
+{
+ ForeignScanState *node = (ForeignScanState *) areq->requestee;
+ PgFdwScanState *fsstate PG_USED_FOR_ASSERTS_ONLY = (PgFdwScanState *) node->fdw_state;
+
+ /* The request would have been pending for a callback */
+ Assert(areq->callback_pending);
+
+ /* The request should be currently in-process */
+ Assert(fsstate->conn_state->pendingAreq == areq);
+
+ fetch_more_data(node);
+
+ /*
+ * If we didn't get any tuples, must be end of data; complete the request
+ * now. Otherwise, we postpone completing the request until we are called
+ * from postgresForeignAsyncConfigureWait()/postgresForeignAsyncNotify().
+ */
+ if (fsstate->next_tuple >= fsstate->num_tuples)
+ {
+ /* Unlike AsyncNotify, we unset callback_pending ourselves */
+ areq->callback_pending = false;
+ /* Mark the request as complete */
+ ExecAsyncRequestDone(areq, NULL);
+ /* Unlike AsyncNotify, we call ExecAsyncResponse ourselves */
+ ExecAsyncResponse(areq);
+ }
+}
+
+/*
+ * Complete a pending asynchronous request.
+ */
+static void
+complete_pending_request(AsyncRequest *areq)
+{
+ /* The request would have been pending for a callback */
+ Assert(areq->callback_pending);
+
+ /* Unlike AsyncNotify, we unset callback_pending ourselves */
+ areq->callback_pending = false;
+
+ /* We begin a fetch afterwards if necessary; don't fetch */
+ produce_tuple_asynchronously(areq, false);
+
+ /* Unlike AsyncNotify, we call ExecAsyncResponse ourselves */
+ ExecAsyncResponse(areq);
+
+ /* Also, we do instrumentation ourselves, if required */
+ if (areq->requestee->instrument)
+ InstrUpdateTupleCount(areq->requestee->instrument,
+ TupIsNull(areq->result) ? 0.0 : 1.0);
+}
+
+/*
+ * Create a tuple from the specified row of the PGresult.
+ *
+ * rel is the local representation of the foreign table, attinmeta is
+ * conversion data for the rel's tupdesc, and retrieved_attrs is an
+ * integer list of the table column numbers present in the PGresult.
+ * fsstate is the ForeignScan plan node's execution state.
+ * temp_context is a working context that can be reset after each tuple.
+ *
+ * Note: either rel or fsstate, but not both, can be NULL. rel is NULL
+ * if we're processing a remote join, while fsstate is NULL in a non-query
+ * context such as ANALYZE, or if we're processing a non-scan query node.
+ */
+static HeapTuple
+make_tuple_from_result_row(PGresult *res,
+ int row,
+ Relation rel,
+ AttInMetadata *attinmeta,
+ List *retrieved_attrs,
+ ForeignScanState *fsstate,
+ MemoryContext temp_context)
+{
+ HeapTuple tuple;
+ TupleDesc tupdesc;
+ Datum *values;
+ bool *nulls;
+ ItemPointer ctid = NULL;
+ ConversionLocation errpos;
+ ErrorContextCallback errcallback;
+ MemoryContext oldcontext;
+ ListCell *lc;
+ int j;
+
+ Assert(row < PQntuples(res));
+
+ /*
+ * Do the following work in a temp context that we reset after each tuple.
+ * This cleans up not only the data we have direct access to, but any
+ * cruft the I/O functions might leak.
+ */
+ oldcontext = MemoryContextSwitchTo(temp_context);
+
+ /*
+ * Get the tuple descriptor for the row. Use the rel's tupdesc if rel is
+ * provided, otherwise look to the scan node's ScanTupleSlot.
+ */
+ if (rel)
+ tupdesc = RelationGetDescr(rel);
+ else
+ {
+ Assert(fsstate);
+ tupdesc = fsstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor;
+ }
+
+ values = (Datum *) palloc0(tupdesc->natts * sizeof(Datum));
+ nulls = (bool *) palloc(tupdesc->natts * sizeof(bool));
+ /* Initialize to nulls for any columns not present in result */
+ memset(nulls, true, tupdesc->natts * sizeof(bool));
+
+ /*
+ * Set up and install callback to report where conversion error occurs.
+ */
+ errpos.cur_attno = 0;
+ errpos.rel = rel;
+ errpos.fsstate = fsstate;
+ errcallback.callback = conversion_error_callback;
+ errcallback.arg = (void *) &errpos;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ /*
+ * i indexes columns in the relation, j indexes columns in the PGresult.
+ */
+ j = 0;
+ foreach(lc, retrieved_attrs)
+ {
+ int i = lfirst_int(lc);
+ char *valstr;
+
+ /* fetch next column's textual value */
+ if (PQgetisnull(res, row, j))
+ valstr = NULL;
+ else
+ valstr = PQgetvalue(res, row, j);
+
+ /*
+ * convert value to internal representation
+ *
+ * Note: we ignore system columns other than ctid and oid in result
+ */
+ errpos.cur_attno = i;
+ if (i > 0)
+ {
+ /* ordinary column */
+ Assert(i <= tupdesc->natts);
+ nulls[i - 1] = (valstr == NULL);
+ /* Apply the input function even to nulls, to support domains */
+ values[i - 1] = InputFunctionCall(&attinmeta->attinfuncs[i - 1],
+ valstr,
+ attinmeta->attioparams[i - 1],
+ attinmeta->atttypmods[i - 1]);
+ }
+ else if (i == SelfItemPointerAttributeNumber)
+ {
+ /* ctid */
+ if (valstr != NULL)
+ {
+ Datum datum;
+
+ datum = DirectFunctionCall1(tidin, CStringGetDatum(valstr));
+ ctid = (ItemPointer) DatumGetPointer(datum);
+ }
+ }
+ errpos.cur_attno = 0;
+
+ j++;
+ }
+
+ /* Uninstall error context callback. */
+ error_context_stack = errcallback.previous;
+
+ /*
+ * Check we got the expected number of columns. Note: j == 0 and
+ * PQnfields == 1 is expected, since deparse emits a NULL if no columns.
+ */
+ if (j > 0 && j != PQnfields(res))
+ elog(ERROR, "remote query result does not match the foreign table");
+
+ /*
+ * Build the result tuple in caller's memory context.
+ */
+ MemoryContextSwitchTo(oldcontext);
+
+ tuple = heap_form_tuple(tupdesc, values, nulls);
+
+ /*
+ * If we have a CTID to return, install it in both t_self and t_ctid.
+ * t_self is the normal place, but if the tuple is converted to a
+ * composite Datum, t_self will be lost; setting t_ctid allows CTID to be
+ * preserved during EvalPlanQual re-evaluations (see ROW_MARK_COPY code).
+ */
+ if (ctid)
+ tuple->t_self = tuple->t_data->t_ctid = *ctid;
+
+ /*
+ * Stomp on the xmin, xmax, and cmin fields from the tuple created by
+ * heap_form_tuple. heap_form_tuple actually creates the tuple with
+ * DatumTupleFields, not HeapTupleFields, but the executor expects
+ * HeapTupleFields and will happily extract system columns on that
+ * assumption. If we don't do this then, for example, the tuple length
+ * ends up in the xmin field, which isn't what we want.
+ */
+ HeapTupleHeaderSetXmax(tuple->t_data, InvalidTransactionId);
+ HeapTupleHeaderSetXmin(tuple->t_data, InvalidTransactionId);
+ HeapTupleHeaderSetCmin(tuple->t_data, InvalidTransactionId);
+
+ /* Clean up */
+ MemoryContextReset(temp_context);
+
+ return tuple;
+}
+
+/*
+ * Callback function which is called when error occurs during column value
+ * conversion. Print names of column and relation.
+ *
+ * Note that this function mustn't do any catalog lookups, since we are in
+ * an already-failed transaction. Fortunately, we can get the needed info
+ * from the relation or the query's rangetable instead.
+ */
+static void
+conversion_error_callback(void *arg)
+{
+ ConversionLocation *errpos = (ConversionLocation *) arg;
+ Relation rel = errpos->rel;
+ ForeignScanState *fsstate = errpos->fsstate;
+ const char *attname = NULL;
+ const char *relname = NULL;
+ bool is_wholerow = false;
+
+ /*
+ * If we're in a scan node, always use aliases from the rangetable, for
+ * consistency between the simple-relation and remote-join cases. Look at
+ * the relation's tupdesc only if we're not in a scan node.
+ */
+ if (fsstate)
+ {
+ /* ForeignScan case */
+ ForeignScan *fsplan = castNode(ForeignScan, fsstate->ss.ps.plan);
+ int varno = 0;
+ AttrNumber colno = 0;
+
+ if (fsplan->scan.scanrelid > 0)
+ {
+ /* error occurred in a scan against a foreign table */
+ varno = fsplan->scan.scanrelid;
+ colno = errpos->cur_attno;
+ }
+ else
+ {
+ /* error occurred in a scan against a foreign join */
+ TargetEntry *tle;
+
+ tle = list_nth_node(TargetEntry, fsplan->fdw_scan_tlist,
+ errpos->cur_attno - 1);
+
+ /*
+ * Target list can have Vars and expressions. For Vars, we can
+ * get some information, however for expressions we can't. Thus
+ * for expressions, just show generic context message.
+ */
+ if (IsA(tle->expr, Var))
+ {
+ Var *var = (Var *) tle->expr;
+
+ varno = var->varno;
+ colno = var->varattno;
+ }
+ }
+
+ if (varno > 0)
+ {
+ EState *estate = fsstate->ss.ps.state;
+ RangeTblEntry *rte = exec_rt_fetch(varno, estate);
+
+ relname = rte->eref->aliasname;
+
+ if (colno == 0)
+ is_wholerow = true;
+ else if (colno > 0 && colno <= list_length(rte->eref->colnames))
+ attname = strVal(list_nth(rte->eref->colnames, colno - 1));
+ else if (colno == SelfItemPointerAttributeNumber)
+ attname = "ctid";
+ }
+ }
+ else if (rel)
+ {
+ /* Non-ForeignScan case (we should always have a rel here) */
+ TupleDesc tupdesc = RelationGetDescr(rel);
+
+ relname = RelationGetRelationName(rel);
+ if (errpos->cur_attno > 0 && errpos->cur_attno <= tupdesc->natts)
+ {
+ Form_pg_attribute attr = TupleDescAttr(tupdesc,
+ errpos->cur_attno - 1);
+
+ attname = NameStr(attr->attname);
+ }
+ else if (errpos->cur_attno == SelfItemPointerAttributeNumber)
+ attname = "ctid";
+ }
+
+ if (relname && is_wholerow)
+ errcontext("whole-row reference to foreign table \"%s\"", relname);
+ else if (relname && attname)
+ errcontext("column \"%s\" of foreign table \"%s\"", attname, relname);
+ else
+ errcontext("processing expression at position %d in select list",
+ errpos->cur_attno);
+}
+
+/*
+ * Given an EquivalenceClass and a foreign relation, find an EC member
+ * that can be used to sort the relation remotely according to a pathkey
+ * using this EC.
+ *
+ * If there is more than one suitable candidate, return an arbitrary
+ * one of them. If there is none, return NULL.
+ *
+ * This checks that the EC member expression uses only Vars from the given
+ * rel and is shippable. Caller must separately verify that the pathkey's
+ * ordering operator is shippable.
+ */
+EquivalenceMember *
+find_em_for_rel(PlannerInfo *root, EquivalenceClass *ec, RelOptInfo *rel)
+{
+ ListCell *lc;
+
+ foreach(lc, ec->ec_members)
+ {
+ EquivalenceMember *em = (EquivalenceMember *) lfirst(lc);
+
+ /*
+ * Note we require !bms_is_empty, else we'd accept constant
+ * expressions which are not suitable for the purpose.
+ */
+ if (bms_is_subset(em->em_relids, rel->relids) &&
+ !bms_is_empty(em->em_relids) &&
+ is_foreign_expr(root, rel, em->em_expr))
+ return em;
+ }
+
+ return NULL;
+}
+
+/*
+ * Find an EquivalenceClass member that is to be computed as a sort column
+ * in the given rel's reltarget, and is shippable.
+ *
+ * If there is more than one suitable candidate, return an arbitrary
+ * one of them. If there is none, return NULL.
+ *
+ * This checks that the EC member expression uses only Vars from the given
+ * rel and is shippable. Caller must separately verify that the pathkey's
+ * ordering operator is shippable.
+ */
+EquivalenceMember *
+find_em_for_rel_target(PlannerInfo *root, EquivalenceClass *ec,
+ RelOptInfo *rel)
+{
+ PathTarget *target = rel->reltarget;
+ ListCell *lc1;
+ int i;
+
+ i = 0;
+ foreach(lc1, target->exprs)
+ {
+ Expr *expr = (Expr *) lfirst(lc1);
+ Index sgref = get_pathtarget_sortgroupref(target, i);
+ ListCell *lc2;
+
+ /* Ignore non-sort expressions */
+ if (sgref == 0 ||
+ get_sortgroupref_clause_noerr(sgref,
+ root->parse->sortClause) == NULL)
+ {
+ i++;
+ continue;
+ }
+
+ /* We ignore binary-compatible relabeling on both ends */
+ while (expr && IsA(expr, RelabelType))
+ expr = ((RelabelType *) expr)->arg;
+
+ /* Locate an EquivalenceClass member matching this expr, if any */
+ foreach(lc2, ec->ec_members)
+ {
+ EquivalenceMember *em = (EquivalenceMember *) lfirst(lc2);
+ Expr *em_expr;
+
+ /* Don't match constants */
+ if (em->em_is_const)
+ continue;
+
+ /* Ignore child members */
+ if (em->em_is_child)
+ continue;
+
+ /* Match if same expression (after stripping relabel) */
+ em_expr = em->em_expr;
+ while (em_expr && IsA(em_expr, RelabelType))
+ em_expr = ((RelabelType *) em_expr)->arg;
+
+ if (!equal(em_expr, expr))
+ continue;
+
+ /* Check that expression (including relabels!) is shippable */
+ if (is_foreign_expr(root, rel, em->em_expr))
+ return em;
+ }
+
+ i++;
+ }
+
+ return NULL;
+}
+
+/*
+ * Determine batch size for a given foreign table. The option specified for
+ * a table has precedence.
+ */
+static int
+get_batch_size_option(Relation rel)
+{
+ Oid foreigntableid = RelationGetRelid(rel);
+ ForeignTable *table;
+ ForeignServer *server;
+ List *options;
+ ListCell *lc;
+
+ /* we use 1 by default, which means "no batching" */
+ int batch_size = 1;
+
+ /*
+ * Load options for table and server. We append server options after table
+ * options, because table options take precedence.
+ */
+ table = GetForeignTable(foreigntableid);
+ server = GetForeignServer(table->serverid);
+
+ options = NIL;
+ options = list_concat(options, table->options);
+ options = list_concat(options, server->options);
+
+ /* See if either table or server specifies batch_size. */
+ foreach(lc, options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "batch_size") == 0)
+ {
+ (void) parse_int(defGetString(def), &batch_size, 0, NULL);
+ break;
+ }
+ }
+
+ return batch_size;
+}
diff --git a/contrib/postgres_fdw/postgres_fdw.control b/contrib/postgres_fdw/postgres_fdw.control
new file mode 100644
index 0000000..d489382
--- /dev/null
+++ b/contrib/postgres_fdw/postgres_fdw.control
@@ -0,0 +1,5 @@
+# postgres_fdw extension
+comment = 'foreign-data wrapper for remote PostgreSQL servers'
+default_version = '1.1'
+module_pathname = '$libdir/postgres_fdw'
+relocatable = true
diff --git a/contrib/postgres_fdw/postgres_fdw.h b/contrib/postgres_fdw/postgres_fdw.h
new file mode 100644
index 0000000..bd4e592
--- /dev/null
+++ b/contrib/postgres_fdw/postgres_fdw.h
@@ -0,0 +1,238 @@
+/*-------------------------------------------------------------------------
+ *
+ * postgres_fdw.h
+ * Foreign-data wrapper for remote PostgreSQL servers
+ *
+ * Portions Copyright (c) 2012-2021, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/postgres_fdw/postgres_fdw.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef POSTGRES_FDW_H
+#define POSTGRES_FDW_H
+
+#include "foreign/foreign.h"
+#include "lib/stringinfo.h"
+#include "libpq-fe.h"
+#include "nodes/execnodes.h"
+#include "nodes/pathnodes.h"
+#include "utils/relcache.h"
+
+/*
+ * FDW-specific planner information kept in RelOptInfo.fdw_private for a
+ * postgres_fdw foreign table. For a baserel, this struct is created by
+ * postgresGetForeignRelSize, although some fields are not filled till later.
+ * postgresGetForeignJoinPaths creates it for a joinrel, and
+ * postgresGetForeignUpperPaths creates it for an upperrel.
+ */
+typedef struct PgFdwRelationInfo
+{
+ /*
+ * True means that the relation can be pushed down. Always true for simple
+ * foreign scan.
+ */
+ bool pushdown_safe;
+
+ /*
+ * Restriction clauses, divided into safe and unsafe to pushdown subsets.
+ * All entries in these lists should have RestrictInfo wrappers; that
+ * improves efficiency of selectivity and cost estimation.
+ */
+ List *remote_conds;
+ List *local_conds;
+
+ /* Actual remote restriction clauses for scan (sans RestrictInfos) */
+ List *final_remote_exprs;
+
+ /* Bitmap of attr numbers we need to fetch from the remote server. */
+ Bitmapset *attrs_used;
+
+ /* True means that the query_pathkeys is safe to push down */
+ bool qp_is_pushdown_safe;
+
+ /* Cost and selectivity of local_conds. */
+ QualCost local_conds_cost;
+ Selectivity local_conds_sel;
+
+ /* Selectivity of join conditions */
+ Selectivity joinclause_sel;
+
+ /* Estimated size and cost for a scan, join, or grouping/aggregation. */
+ double rows;
+ int width;
+ Cost startup_cost;
+ Cost total_cost;
+
+ /*
+ * Estimated number of rows fetched from the foreign server, and costs
+ * excluding costs for transferring those rows from the foreign server.
+ * These are only used by estimate_path_cost_size().
+ */
+ double retrieved_rows;
+ Cost rel_startup_cost;
+ Cost rel_total_cost;
+
+ /* Options extracted from catalogs. */
+ bool use_remote_estimate;
+ Cost fdw_startup_cost;
+ Cost fdw_tuple_cost;
+ List *shippable_extensions; /* OIDs of shippable extensions */
+ bool async_capable;
+
+ /* Cached catalog information. */
+ ForeignTable *table;
+ ForeignServer *server;
+ UserMapping *user; /* only set in use_remote_estimate mode */
+
+ int fetch_size; /* fetch size for this remote table */
+
+ /*
+ * Name of the relation, for use while EXPLAINing ForeignScan. It is used
+ * for join and upper relations but is set for all relations. For a base
+ * relation, this is really just the RT index as a string; we convert that
+ * while producing EXPLAIN output. For join and upper relations, the name
+ * indicates which base foreign tables are included and the join type or
+ * aggregation type used.
+ */
+ char *relation_name;
+
+ /* Join information */
+ RelOptInfo *outerrel;
+ RelOptInfo *innerrel;
+ JoinType jointype;
+ /* joinclauses contains only JOIN/ON conditions for an outer join */
+ List *joinclauses; /* List of RestrictInfo */
+
+ /* Upper relation information */
+ UpperRelationKind stage;
+
+ /* Grouping information */
+ List *grouped_tlist;
+
+ /* Subquery information */
+ bool make_outerrel_subquery; /* do we deparse outerrel as a
+ * subquery? */
+ bool make_innerrel_subquery; /* do we deparse innerrel as a
+ * subquery? */
+ Relids lower_subquery_rels; /* all relids appearing in lower
+ * subqueries */
+
+ /*
+ * Index of the relation. It is used to create an alias to a subquery
+ * representing the relation.
+ */
+ int relation_index;
+} PgFdwRelationInfo;
+
+/*
+ * Extra control information relating to a connection.
+ */
+typedef struct PgFdwConnState
+{
+ AsyncRequest *pendingAreq; /* pending async request */
+} PgFdwConnState;
+
+/* in postgres_fdw.c */
+extern int set_transmission_modes(void);
+extern void reset_transmission_modes(int nestlevel);
+extern void process_pending_request(AsyncRequest *areq);
+
+/* in connection.c */
+extern PGconn *GetConnection(UserMapping *user, bool will_prep_stmt,
+ PgFdwConnState **state);
+extern void ReleaseConnection(PGconn *conn);
+extern unsigned int GetCursorNumber(PGconn *conn);
+extern unsigned int GetPrepStmtNumber(PGconn *conn);
+extern void do_sql_command(PGconn *conn, const char *sql);
+extern PGresult *pgfdw_get_result(PGconn *conn, const char *query);
+extern PGresult *pgfdw_exec_query(PGconn *conn, const char *query,
+ PgFdwConnState *state);
+extern void pgfdw_report_error(int elevel, PGresult *res, PGconn *conn,
+ bool clear, const char *sql);
+
+/* in option.c */
+extern int ExtractConnectionOptions(List *defelems,
+ const char **keywords,
+ const char **values);
+extern List *ExtractExtensionList(const char *extensionsString,
+ bool warnOnMissing);
+
+/* in deparse.c */
+extern void classifyConditions(PlannerInfo *root,
+ RelOptInfo *baserel,
+ List *input_conds,
+ List **remote_conds,
+ List **local_conds);
+extern bool is_foreign_expr(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Expr *expr);
+extern bool is_foreign_param(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Expr *expr);
+extern bool is_foreign_pathkey(PlannerInfo *root,
+ RelOptInfo *baserel,
+ PathKey *pathkey);
+extern void deparseInsertSql(StringInfo buf, RangeTblEntry *rte,
+ Index rtindex, Relation rel,
+ List *targetAttrs, bool doNothing,
+ List *withCheckOptionList, List *returningList,
+ List **retrieved_attrs, int *values_end_len);
+extern void rebuildInsertSql(StringInfo buf, Relation rel,
+ char *orig_query, List *target_attrs,
+ int values_end_len, int num_params,
+ int num_rows);
+extern void deparseUpdateSql(StringInfo buf, RangeTblEntry *rte,
+ Index rtindex, Relation rel,
+ List *targetAttrs,
+ List *withCheckOptionList, List *returningList,
+ List **retrieved_attrs);
+extern void deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root,
+ Index rtindex, Relation rel,
+ RelOptInfo *foreignrel,
+ List *targetlist,
+ List *targetAttrs,
+ List *remote_conds,
+ List **params_list,
+ List *returningList,
+ List **retrieved_attrs);
+extern void deparseDeleteSql(StringInfo buf, RangeTblEntry *rte,
+ Index rtindex, Relation rel,
+ List *returningList,
+ List **retrieved_attrs);
+extern void deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root,
+ Index rtindex, Relation rel,
+ RelOptInfo *foreignrel,
+ List *remote_conds,
+ List **params_list,
+ List *returningList,
+ List **retrieved_attrs);
+extern void deparseAnalyzeSizeSql(StringInfo buf, Relation rel);
+extern void deparseAnalyzeSql(StringInfo buf, Relation rel,
+ List **retrieved_attrs);
+extern void deparseTruncateSql(StringInfo buf,
+ List *rels,
+ DropBehavior behavior,
+ bool restart_seqs);
+extern void deparseStringLiteral(StringInfo buf, const char *val);
+extern EquivalenceMember *find_em_for_rel(PlannerInfo *root,
+ EquivalenceClass *ec,
+ RelOptInfo *rel);
+extern EquivalenceMember *find_em_for_rel_target(PlannerInfo *root,
+ EquivalenceClass *ec,
+ RelOptInfo *rel);
+extern List *build_tlist_to_deparse(RelOptInfo *foreignrel);
+extern void deparseSelectStmtForRel(StringInfo buf, PlannerInfo *root,
+ RelOptInfo *foreignrel, List *tlist,
+ List *remote_conds, List *pathkeys,
+ bool has_final_sort, bool has_limit,
+ bool is_subquery,
+ List **retrieved_attrs, List **params_list);
+extern const char *get_jointype_name(JoinType jointype);
+
+/* in shippable.c */
+extern bool is_builtin(Oid objectId);
+extern bool is_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo);
+
+#endif /* POSTGRES_FDW_H */
diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c
new file mode 100644
index 0000000..b27f82e
--- /dev/null
+++ b/contrib/postgres_fdw/shippable.c
@@ -0,0 +1,211 @@
+/*-------------------------------------------------------------------------
+ *
+ * shippable.c
+ * Determine which database objects are shippable to a remote server.
+ *
+ * We need to determine whether particular functions, operators, and indeed
+ * data types are shippable to a remote server for execution --- that is,
+ * do they exist and have the same behavior remotely as they do locally?
+ * Built-in objects are generally considered shippable. Other objects can
+ * be shipped if they are declared as such by the user.
+ *
+ * Note: there are additional filter rules that prevent shipping mutable
+ * functions or functions using nonportable collations. Those considerations
+ * need not be accounted for here.
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/postgres_fdw/shippable.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/transam.h"
+#include "catalog/dependency.h"
+#include "postgres_fdw.h"
+#include "utils/hsearch.h"
+#include "utils/inval.h"
+#include "utils/syscache.h"
+
+/* Hash table for caching the results of shippability lookups */
+static HTAB *ShippableCacheHash = NULL;
+
+/*
+ * Hash key for shippability lookups. We include the FDW server OID because
+ * decisions may differ per-server. Otherwise, objects are identified by
+ * their (local!) OID and catalog OID.
+ */
+typedef struct
+{
+ /* XXX we assume this struct contains no padding bytes */
+ Oid objid; /* function/operator/type OID */
+ Oid classid; /* OID of its catalog (pg_proc, etc) */
+ Oid serverid; /* FDW server we are concerned with */
+} ShippableCacheKey;
+
+typedef struct
+{
+ ShippableCacheKey key; /* hash key - must be first */
+ bool shippable;
+} ShippableCacheEntry;
+
+
+/*
+ * Flush cache entries when pg_foreign_server is updated.
+ *
+ * We do this because of the possibility of ALTER SERVER being used to change
+ * a server's extensions option. We do not currently bother to check whether
+ * objects' extension membership changes once a shippability decision has been
+ * made for them, however.
+ */
+static void
+InvalidateShippableCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
+{
+ HASH_SEQ_STATUS status;
+ ShippableCacheEntry *entry;
+
+ /*
+ * In principle we could flush only cache entries relating to the
+ * pg_foreign_server entry being outdated; but that would be more
+ * complicated, and it's probably not worth the trouble. So for now, just
+ * flush all entries.
+ */
+ hash_seq_init(&status, ShippableCacheHash);
+ while ((entry = (ShippableCacheEntry *) hash_seq_search(&status)) != NULL)
+ {
+ if (hash_search(ShippableCacheHash,
+ (void *) &entry->key,
+ HASH_REMOVE,
+ NULL) == NULL)
+ elog(ERROR, "hash table corrupted");
+ }
+}
+
+/*
+ * Initialize the backend-lifespan cache of shippability decisions.
+ */
+static void
+InitializeShippableCache(void)
+{
+ HASHCTL ctl;
+
+ /* Create the hash table. */
+ ctl.keysize = sizeof(ShippableCacheKey);
+ ctl.entrysize = sizeof(ShippableCacheEntry);
+ ShippableCacheHash =
+ hash_create("Shippability cache", 256, &ctl, HASH_ELEM | HASH_BLOBS);
+
+ /* Set up invalidation callback on pg_foreign_server. */
+ CacheRegisterSyscacheCallback(FOREIGNSERVEROID,
+ InvalidateShippableCacheCallback,
+ (Datum) 0);
+}
+
+/*
+ * Returns true if given object (operator/function/type) is shippable
+ * according to the server options.
+ *
+ * Right now "shippability" is exclusively a function of whether the object
+ * belongs to an extension declared by the user. In the future we could
+ * additionally have a list of functions/operators declared one at a time.
+ */
+static bool
+lookup_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo)
+{
+ Oid extensionOid;
+
+ /*
+ * Is object a member of some extension? (Note: this is a fairly
+ * expensive lookup, which is why we try to cache the results.)
+ */
+ extensionOid = getExtensionOfObject(classId, objectId);
+
+ /* If so, is that extension in fpinfo->shippable_extensions? */
+ if (OidIsValid(extensionOid) &&
+ list_member_oid(fpinfo->shippable_extensions, extensionOid))
+ return true;
+
+ return false;
+}
+
+/*
+ * Return true if given object is one of PostgreSQL's built-in objects.
+ *
+ * We use FirstGenbkiObjectId as the cutoff, so that we only consider
+ * objects with hand-assigned OIDs to be "built in", not for instance any
+ * function or type defined in the information_schema.
+ *
+ * Our constraints for dealing with types are tighter than they are for
+ * functions or operators: we want to accept only types that are in pg_catalog,
+ * else deparse_type_name might incorrectly fail to schema-qualify their names.
+ * Thus we must exclude information_schema types.
+ *
+ * XXX there is a problem with this, which is that the set of built-in
+ * objects expands over time. Something that is built-in to us might not
+ * be known to the remote server, if it's of an older version. But keeping
+ * track of that would be a huge exercise.
+ */
+bool
+is_builtin(Oid objectId)
+{
+ return (objectId < FirstGenbkiObjectId);
+}
+
+/*
+ * is_shippable
+ * Is this object (function/operator/type) shippable to foreign server?
+ */
+bool
+is_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo)
+{
+ ShippableCacheKey key;
+ ShippableCacheEntry *entry;
+
+ /* Built-in objects are presumed shippable. */
+ if (is_builtin(objectId))
+ return true;
+
+ /* Otherwise, give up if user hasn't specified any shippable extensions. */
+ if (fpinfo->shippable_extensions == NIL)
+ return false;
+
+ /* Initialize cache if first time through. */
+ if (!ShippableCacheHash)
+ InitializeShippableCache();
+
+ /* Set up cache hash key */
+ key.objid = objectId;
+ key.classid = classId;
+ key.serverid = fpinfo->server->serverid;
+
+ /* See if we already cached the result. */
+ entry = (ShippableCacheEntry *)
+ hash_search(ShippableCacheHash,
+ (void *) &key,
+ HASH_FIND,
+ NULL);
+
+ if (!entry)
+ {
+ /* Not found in cache, so perform shippability lookup. */
+ bool shippable = lookup_shippable(objectId, classId, fpinfo);
+
+ /*
+ * Don't create a new hash entry until *after* we have the shippable
+ * result in hand, as the underlying catalog lookups might trigger a
+ * cache invalidation.
+ */
+ entry = (ShippableCacheEntry *)
+ hash_search(ShippableCacheHash,
+ (void *) &key,
+ HASH_ENTER,
+ NULL);
+
+ entry->shippable = shippable;
+ }
+
+ return entry->shippable;
+}
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
new file mode 100644
index 0000000..2378194
--- /dev/null
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -0,0 +1,3466 @@
+-- ===================================================================
+-- create FDW objects
+-- ===================================================================
+
+CREATE EXTENSION postgres_fdw;
+
+CREATE SERVER testserver1 FOREIGN DATA WRAPPER postgres_fdw;
+DO $d$
+ BEGIN
+ EXECUTE $$CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS (dbname '$$||current_database()||$$',
+ port '$$||current_setting('port')||$$'
+ )$$;
+ EXECUTE $$CREATE SERVER loopback2 FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS (dbname '$$||current_database()||$$',
+ port '$$||current_setting('port')||$$'
+ )$$;
+ EXECUTE $$CREATE SERVER loopback3 FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS (dbname '$$||current_database()||$$',
+ port '$$||current_setting('port')||$$'
+ )$$;
+ END;
+$d$;
+
+CREATE USER MAPPING FOR public SERVER testserver1
+ OPTIONS (user 'value', password 'value');
+CREATE USER MAPPING FOR CURRENT_USER SERVER loopback;
+CREATE USER MAPPING FOR CURRENT_USER SERVER loopback2;
+CREATE USER MAPPING FOR public SERVER loopback3;
+
+-- ===================================================================
+-- create objects used through FDW loopback server
+-- ===================================================================
+CREATE TYPE user_enum AS ENUM ('foo', 'bar', 'buz');
+CREATE SCHEMA "S 1";
+CREATE TABLE "S 1"."T 1" (
+ "C 1" int NOT NULL,
+ c2 int NOT NULL,
+ c3 text,
+ c4 timestamptz,
+ c5 timestamp,
+ c6 varchar(10),
+ c7 char(10),
+ c8 user_enum,
+ CONSTRAINT t1_pkey PRIMARY KEY ("C 1")
+);
+CREATE TABLE "S 1"."T 2" (
+ c1 int NOT NULL,
+ c2 text,
+ CONSTRAINT t2_pkey PRIMARY KEY (c1)
+);
+CREATE TABLE "S 1"."T 3" (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text,
+ CONSTRAINT t3_pkey PRIMARY KEY (c1)
+);
+CREATE TABLE "S 1"."T 4" (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text,
+ CONSTRAINT t4_pkey PRIMARY KEY (c1)
+);
+
+-- Disable autovacuum for these tables to avoid unexpected effects of that
+ALTER TABLE "S 1"."T 1" SET (autovacuum_enabled = 'false');
+ALTER TABLE "S 1"."T 2" SET (autovacuum_enabled = 'false');
+ALTER TABLE "S 1"."T 3" SET (autovacuum_enabled = 'false');
+ALTER TABLE "S 1"."T 4" SET (autovacuum_enabled = 'false');
+
+INSERT INTO "S 1"."T 1"
+ SELECT id,
+ id % 10,
+ to_char(id, 'FM00000'),
+ '1970-01-01'::timestamptz + ((id % 100) || ' days')::interval,
+ '1970-01-01'::timestamp + ((id % 100) || ' days')::interval,
+ id % 10,
+ id % 10,
+ 'foo'::user_enum
+ FROM generate_series(1, 1000) id;
+INSERT INTO "S 1"."T 2"
+ SELECT id,
+ 'AAA' || to_char(id, 'FM000')
+ FROM generate_series(1, 100) id;
+INSERT INTO "S 1"."T 3"
+ SELECT id,
+ id + 1,
+ 'AAA' || to_char(id, 'FM000')
+ FROM generate_series(1, 100) id;
+DELETE FROM "S 1"."T 3" WHERE c1 % 2 != 0; -- delete for outer join tests
+INSERT INTO "S 1"."T 4"
+ SELECT id,
+ id + 1,
+ 'AAA' || to_char(id, 'FM000')
+ FROM generate_series(1, 100) id;
+DELETE FROM "S 1"."T 4" WHERE c1 % 3 != 0; -- delete for outer join tests
+
+ANALYZE "S 1"."T 1";
+ANALYZE "S 1"."T 2";
+ANALYZE "S 1"."T 3";
+ANALYZE "S 1"."T 4";
+
+-- ===================================================================
+-- create foreign tables
+-- ===================================================================
+CREATE FOREIGN TABLE ft1 (
+ c0 int,
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text,
+ c4 timestamptz,
+ c5 timestamp,
+ c6 varchar(10),
+ c7 char(10) default 'ft1',
+ c8 user_enum
+) SERVER loopback;
+ALTER FOREIGN TABLE ft1 DROP COLUMN c0;
+
+CREATE FOREIGN TABLE ft2 (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ cx int,
+ c3 text,
+ c4 timestamptz,
+ c5 timestamp,
+ c6 varchar(10),
+ c7 char(10) default 'ft2',
+ c8 user_enum
+) SERVER loopback;
+ALTER FOREIGN TABLE ft2 DROP COLUMN cx;
+
+CREATE FOREIGN TABLE ft4 (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text
+) SERVER loopback OPTIONS (schema_name 'S 1', table_name 'T 3');
+
+CREATE FOREIGN TABLE ft5 (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text
+) SERVER loopback OPTIONS (schema_name 'S 1', table_name 'T 4');
+
+CREATE FOREIGN TABLE ft6 (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text
+) SERVER loopback2 OPTIONS (schema_name 'S 1', table_name 'T 4');
+
+CREATE FOREIGN TABLE ft7 (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text
+) SERVER loopback3 OPTIONS (schema_name 'S 1', table_name 'T 4');
+
+-- ===================================================================
+-- tests for validator
+-- ===================================================================
+-- requiressl and some other parameters are omitted because
+-- valid values for them depend on configure options
+ALTER SERVER testserver1 OPTIONS (
+ use_remote_estimate 'false',
+ updatable 'true',
+ fdw_startup_cost '123.456',
+ fdw_tuple_cost '0.123',
+ service 'value',
+ connect_timeout 'value',
+ dbname 'value',
+ host 'value',
+ hostaddr 'value',
+ port 'value',
+ --client_encoding 'value',
+ application_name 'value',
+ --fallback_application_name 'value',
+ keepalives 'value',
+ keepalives_idle 'value',
+ keepalives_interval 'value',
+ tcp_user_timeout 'value',
+ -- requiressl 'value',
+ sslcompression 'value',
+ sslmode 'value',
+ sslcert 'value',
+ sslkey 'value',
+ sslrootcert 'value',
+ sslcrl 'value',
+ --requirepeer 'value',
+ krbsrvname 'value',
+ gsslib 'value'
+ --replication 'value'
+);
+
+-- Error, invalid list syntax
+ALTER SERVER testserver1 OPTIONS (ADD extensions 'foo; bar');
+
+-- OK but gets a warning
+ALTER SERVER testserver1 OPTIONS (ADD extensions 'foo, bar');
+ALTER SERVER testserver1 OPTIONS (DROP extensions);
+
+ALTER USER MAPPING FOR public SERVER testserver1
+ OPTIONS (DROP user, DROP password);
+
+-- Attempt to add a valid option that's not allowed in a user mapping
+ALTER USER MAPPING FOR public SERVER testserver1
+ OPTIONS (ADD sslmode 'require');
+
+-- But we can add valid ones fine
+ALTER USER MAPPING FOR public SERVER testserver1
+ OPTIONS (ADD sslpassword 'dummy');
+
+-- Ensure valid options we haven't used in a user mapping yet are
+-- permitted to check validation.
+ALTER USER MAPPING FOR public SERVER testserver1
+ OPTIONS (ADD sslkey 'value', ADD sslcert 'value');
+
+ALTER FOREIGN TABLE ft1 OPTIONS (schema_name 'S 1', table_name 'T 1');
+ALTER FOREIGN TABLE ft2 OPTIONS (schema_name 'S 1', table_name 'T 1');
+ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
+ALTER FOREIGN TABLE ft2 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
+\det+
+
+-- Test that alteration of server options causes reconnection
+-- Remote's errors might be non-English, so hide them to ensure stable results
+\set VERBOSITY terse
+SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work
+ALTER SERVER loopback OPTIONS (SET dbname 'no such database');
+SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail
+DO $d$
+ BEGIN
+ EXECUTE $$ALTER SERVER loopback
+ OPTIONS (SET dbname '$$||current_database()||$$')$$;
+ END;
+$d$;
+SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again
+
+-- Test that alteration of user mapping options causes reconnection
+ALTER USER MAPPING FOR CURRENT_USER SERVER loopback
+ OPTIONS (ADD user 'no such user');
+SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should fail
+ALTER USER MAPPING FOR CURRENT_USER SERVER loopback
+ OPTIONS (DROP user);
+SELECT c3, c4 FROM ft1 ORDER BY c3, c1 LIMIT 1; -- should work again
+\set VERBOSITY default
+
+-- Now we should be able to run ANALYZE.
+-- To exercise multiple code paths, we use local stats on ft1
+-- and remote-estimate mode on ft2.
+ANALYZE ft1;
+ALTER FOREIGN TABLE ft2 OPTIONS (use_remote_estimate 'true');
+
+-- ===================================================================
+-- simple queries
+-- ===================================================================
+-- single table without alias
+EXPLAIN (COSTS OFF) SELECT * FROM ft1 ORDER BY c3, c1 OFFSET 100 LIMIT 10;
+SELECT * FROM ft1 ORDER BY c3, c1 OFFSET 100 LIMIT 10;
+-- single table with alias - also test that tableoid sort is not pushed to remote side
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 ORDER BY t1.c3, t1.c1, t1.tableoid OFFSET 100 LIMIT 10;
+SELECT * FROM ft1 t1 ORDER BY t1.c3, t1.c1, t1.tableoid OFFSET 100 LIMIT 10;
+-- whole-row reference
+EXPLAIN (VERBOSE, COSTS OFF) SELECT t1 FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+SELECT t1 FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+-- empty result
+SELECT * FROM ft1 WHERE false;
+-- with WHERE clause
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1';
+SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1';
+-- with FOR UPDATE/SHARE
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE;
+SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE;
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE;
+SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE;
+-- aggregate
+SELECT COUNT(*) FROM ft1 t1;
+-- subquery
+SELECT * FROM ft1 t1 WHERE t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 <= 10) ORDER BY c1;
+-- subquery+MAX
+SELECT * FROM ft1 t1 WHERE t1.c3 = (SELECT MAX(c3) FROM ft2 t2) ORDER BY c1;
+-- used in CTE
+WITH t1 AS (SELECT * FROM ft1 WHERE c1 <= 10) SELECT t2.c1, t2.c2, t2.c3, t2.c4 FROM t1, ft2 t2 WHERE t1.c1 = t2.c1 ORDER BY t1.c1;
+-- fixed values
+SELECT 'fixed', NULL FROM ft1 t1 WHERE c1 = 1;
+-- Test forcing the remote server to produce sorted data for a merge join.
+SET enable_hashjoin TO false;
+SET enable_nestloop TO false;
+-- inner join; expressions in the clauses appear in the equivalence class list
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT t1.c1, t2."C 1" FROM ft2 t1 JOIN "S 1"."T 1" t2 ON (t1.c1 = t2."C 1") OFFSET 100 LIMIT 10;
+SELECT t1.c1, t2."C 1" FROM ft2 t1 JOIN "S 1"."T 1" t2 ON (t1.c1 = t2."C 1") OFFSET 100 LIMIT 10;
+-- outer join; expressions in the clauses do not appear in equivalence class
+-- list but no output change as compared to the previous query
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT t1.c1, t2."C 1" FROM ft2 t1 LEFT JOIN "S 1"."T 1" t2 ON (t1.c1 = t2."C 1") OFFSET 100 LIMIT 10;
+SELECT t1.c1, t2."C 1" FROM ft2 t1 LEFT JOIN "S 1"."T 1" t2 ON (t1.c1 = t2."C 1") OFFSET 100 LIMIT 10;
+-- A join between local table and foreign join. ORDER BY clause is added to the
+-- foreign join so that the local table can be joined using merge join strategy.
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT t1."C 1" FROM "S 1"."T 1" t1 left join ft1 t2 join ft2 t3 on (t2.c1 = t3.c1) on (t3.c1 = t1."C 1") OFFSET 100 LIMIT 10;
+SELECT t1."C 1" FROM "S 1"."T 1" t1 left join ft1 t2 join ft2 t3 on (t2.c1 = t3.c1) on (t3.c1 = t1."C 1") OFFSET 100 LIMIT 10;
+-- Test similar to above, except that the full join prevents any equivalence
+-- classes from being merged. This produces single relation equivalence classes
+-- included in join restrictions.
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT t1."C 1", t2.c1, t3.c1 FROM "S 1"."T 1" t1 left join ft1 t2 full join ft2 t3 on (t2.c1 = t3.c1) on (t3.c1 = t1."C 1") OFFSET 100 LIMIT 10;
+SELECT t1."C 1", t2.c1, t3.c1 FROM "S 1"."T 1" t1 left join ft1 t2 full join ft2 t3 on (t2.c1 = t3.c1) on (t3.c1 = t1."C 1") OFFSET 100 LIMIT 10;
+-- Test similar to above with all full outer joins
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT t1."C 1", t2.c1, t3.c1 FROM "S 1"."T 1" t1 full join ft1 t2 full join ft2 t3 on (t2.c1 = t3.c1) on (t3.c1 = t1."C 1") OFFSET 100 LIMIT 10;
+SELECT t1."C 1", t2.c1, t3.c1 FROM "S 1"."T 1" t1 full join ft1 t2 full join ft2 t3 on (t2.c1 = t3.c1) on (t3.c1 = t1."C 1") OFFSET 100 LIMIT 10;
+RESET enable_hashjoin;
+RESET enable_nestloop;
+
+-- Test executing assertion in estimate_path_cost_size() that makes sure that
+-- retrieved_rows for foreign rel re-used to cost pre-sorted foreign paths is
+-- a sensible value even when the rel has tuples=0
+CREATE TABLE loct_empty (c1 int NOT NULL, c2 text);
+CREATE FOREIGN TABLE ft_empty (c1 int NOT NULL, c2 text)
+ SERVER loopback OPTIONS (table_name 'loct_empty');
+INSERT INTO loct_empty
+ SELECT id, 'AAA' || to_char(id, 'FM000') FROM generate_series(1, 100) id;
+DELETE FROM loct_empty;
+ANALYZE ft_empty;
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft_empty ORDER BY c1;
+
+-- ===================================================================
+-- WHERE with remotely-executable conditions
+-- ===================================================================
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE t1.c1 = 1; -- Var, OpExpr(b), Const
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE t1.c1 = 100 AND t1.c2 = 0; -- BoolExpr
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 IS NULL; -- NullTest
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 IS NOT NULL; -- NullTest
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE round(abs(c1), 0) = 1; -- FuncExpr
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = -c1; -- OpExpr(l)
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE (c1 IS NOT NULL) IS DISTINCT FROM (c1 IS NOT NULL); -- DistinctExpr
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = ANY(ARRAY[c2, 1, c1 + 0]); -- ScalarArrayOpExpr
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- SubscriptingRef
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c6 = E'foo''s\\bar'; -- check special chars
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c8 = 'foo'; -- can't be sent to remote
+-- parameterized remote path for foreign table
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM "S 1"."T 1" a, ft2 b WHERE a."C 1" = 47 AND b.c1 = a.c2;
+SELECT * FROM ft2 a, ft2 b WHERE a.c1 = 47 AND b.c1 = a.c2;
+
+-- check both safe and unsafe join conditions
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM ft2 a, ft2 b
+ WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7);
+SELECT * FROM ft2 a, ft2 b
+WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7);
+-- bug before 9.3.5 due to sloppy handling of remote-estimate parameters
+SELECT * FROM ft1 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft2 WHERE c1 < 5));
+SELECT * FROM ft2 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft1 WHERE c1 < 5));
+-- we should not push order by clause with volatile expressions or unsafe
+-- collations
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM ft2 ORDER BY ft2.c1, random();
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM ft2 ORDER BY ft2.c1, ft2.c3 collate "C";
+
+-- user-defined operator/function
+CREATE FUNCTION postgres_fdw_abs(int) RETURNS int AS $$
+BEGIN
+RETURN abs($1);
+END
+$$ LANGUAGE plpgsql IMMUTABLE;
+CREATE OPERATOR === (
+ LEFTARG = int,
+ RIGHTARG = int,
+ PROCEDURE = int4eq,
+ COMMUTATOR = ===
+);
+
+-- built-in operators and functions can be shipped for remote execution
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = abs(t1.c2);
+SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = abs(t1.c2);
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = t1.c2;
+SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = t1.c2;
+
+-- by default, user-defined ones cannot
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = postgres_fdw_abs(t1.c2);
+SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = postgres_fdw_abs(t1.c2);
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
+SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
+
+-- ORDER BY can be shipped, though
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+
+-- but let's put them in an extension ...
+ALTER EXTENSION postgres_fdw ADD FUNCTION postgres_fdw_abs(int);
+ALTER EXTENSION postgres_fdw ADD OPERATOR === (int, int);
+ALTER SERVER loopback OPTIONS (ADD extensions 'postgres_fdw');
+
+-- ... now they can be shipped
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = postgres_fdw_abs(t1.c2);
+SELECT count(c3) FROM ft1 t1 WHERE t1.c1 = postgres_fdw_abs(t1.c2);
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
+SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
+
+-- and both ORDER BY and LIMIT can be shipped
+EXPLAIN (VERBOSE, COSTS OFF)
+ SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2 order by t1.c2 limit 1;
+
+-- check schema-qualification of regconfig constant
+CREATE TEXT SEARCH CONFIGURATION public.custom_search
+ (COPY = pg_catalog.english);
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT c1, to_tsvector('custom_search'::regconfig, c3) FROM ft1
+WHERE c1 = 642 AND length(to_tsvector('custom_search'::regconfig, c3)) > 0;
+SELECT c1, to_tsvector('custom_search'::regconfig, c3) FROM ft1
+WHERE c1 = 642 AND length(to_tsvector('custom_search'::regconfig, c3)) > 0;
+
+-- ===================================================================
+-- JOIN queries
+-- ===================================================================
+-- Analyze ft4 and ft5 so that we have better statistics. These tables do not
+-- have use_remote_estimate set.
+ANALYZE ft4;
+ANALYZE ft5;
+
+-- join two tables
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+-- join three tables
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) JOIN ft4 t3 ON (t3.c1 = t1.c1) ORDER BY t1.c3, t1.c1 OFFSET 10 LIMIT 10;
+SELECT t1.c1, t2.c2, t3.c3 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) JOIN ft4 t3 ON (t3.c1 = t1.c1) ORDER BY t1.c3, t1.c1 OFFSET 10 LIMIT 10;
+-- left outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+SELECT t1.c1, t2.c1 FROM ft4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+-- left outer join three tables
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+-- left outer join + placement of clauses.
+-- clauses within the nullable side are not pulled up, but top level clause on
+-- non-nullable side is pushed into non-nullable side
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t1.c2, t2.c1, t2.c2 FROM ft4 t1 LEFT JOIN (SELECT * FROM ft5 WHERE c1 < 10) t2 ON (t1.c1 = t2.c1) WHERE t1.c1 < 10;
+SELECT t1.c1, t1.c2, t2.c1, t2.c2 FROM ft4 t1 LEFT JOIN (SELECT * FROM ft5 WHERE c1 < 10) t2 ON (t1.c1 = t2.c1) WHERE t1.c1 < 10;
+-- clauses within the nullable side are not pulled up, but the top level clause
+-- on nullable side is not pushed down into nullable side
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t1.c2, t2.c1, t2.c2 FROM ft4 t1 LEFT JOIN (SELECT * FROM ft5 WHERE c1 < 10) t2 ON (t1.c1 = t2.c1)
+ WHERE (t2.c1 < 10 OR t2.c1 IS NULL) AND t1.c1 < 10;
+SELECT t1.c1, t1.c2, t2.c1, t2.c2 FROM ft4 t1 LEFT JOIN (SELECT * FROM ft5 WHERE c1 < 10) t2 ON (t1.c1 = t2.c1)
+ WHERE (t2.c1 < 10 OR t2.c1 IS NULL) AND t1.c1 < 10;
+-- right outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft5 t1 RIGHT JOIN ft4 t2 ON (t1.c1 = t2.c1) ORDER BY t2.c1, t1.c1 OFFSET 10 LIMIT 10;
+SELECT t1.c1, t2.c1 FROM ft5 t1 RIGHT JOIN ft4 t2 ON (t1.c1 = t2.c1) ORDER BY t2.c1, t1.c1 OFFSET 10 LIMIT 10;
+-- right outer join three tables
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+-- full outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft4 t1 FULL JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 45 LIMIT 10;
+SELECT t1.c1, t2.c1 FROM ft4 t1 FULL JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 45 LIMIT 10;
+-- full outer join with restrictions on the joining relations
+-- a. the joining relations are both base relations
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1;
+SELECT t1.c1, t2.c1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT 1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t2 ON (TRUE) OFFSET 10 LIMIT 10;
+SELECT 1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t2 ON (TRUE) OFFSET 10 LIMIT 10;
+-- b. one of the joining relations is a base relation and the other is a join
+-- relation
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT t2.c1, t3.c1 FROM ft4 t2 LEFT JOIN ft5 t3 ON (t2.c1 = t3.c1) WHERE (t2.c1 between 50 and 60)) ss(a, b) ON (t1.c1 = ss.a) ORDER BY t1.c1, ss.a, ss.b;
+SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT t2.c1, t3.c1 FROM ft4 t2 LEFT JOIN ft5 t3 ON (t2.c1 = t3.c1) WHERE (t2.c1 between 50 and 60)) ss(a, b) ON (t1.c1 = ss.a) ORDER BY t1.c1, ss.a, ss.b;
+-- c. test deparsing the remote query as nested subqueries
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT t2.c1, t3.c1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t2 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t3 ON (t2.c1 = t3.c1) WHERE t2.c1 IS NULL OR t2.c1 IS NOT NULL) ss(a, b) ON (t1.c1 = ss.a) ORDER BY t1.c1, ss.a, ss.b;
+SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t1 FULL JOIN (SELECT t2.c1, t3.c1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t2 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t3 ON (t2.c1 = t3.c1) WHERE t2.c1 IS NULL OR t2.c1 IS NOT NULL) ss(a, b) ON (t1.c1 = ss.a) ORDER BY t1.c1, ss.a, ss.b;
+-- d. test deparsing rowmarked relations as subqueries
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM "S 1"."T 3" WHERE c1 = 50) t1 INNER JOIN (SELECT t2.c1, t3.c1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t2 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t3 ON (t2.c1 = t3.c1) WHERE t2.c1 IS NULL OR t2.c1 IS NOT NULL) ss(a, b) ON (TRUE) ORDER BY t1.c1, ss.a, ss.b FOR UPDATE OF t1;
+SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM "S 1"."T 3" WHERE c1 = 50) t1 INNER JOIN (SELECT t2.c1, t3.c1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t2 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t3 ON (t2.c1 = t3.c1) WHERE t2.c1 IS NULL OR t2.c1 IS NOT NULL) ss(a, b) ON (TRUE) ORDER BY t1.c1, ss.a, ss.b FOR UPDATE OF t1;
+-- full outer join + inner join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1, t3.c1 FROM ft4 t1 INNER JOIN ft5 t2 ON (t1.c1 = t2.c1 + 1 and t1.c1 between 50 and 60) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) ORDER BY t1.c1, t2.c1, t3.c1 LIMIT 10;
+SELECT t1.c1, t2.c1, t3.c1 FROM ft4 t1 INNER JOIN ft5 t2 ON (t1.c1 = t2.c1 + 1 and t1.c1 between 50 and 60) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) ORDER BY t1.c1, t2.c1, t3.c1 LIMIT 10;
+-- full outer join three tables
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+-- full outer join + right outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+-- right outer join + full outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+-- full outer join + left outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+-- left outer join + full outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+SET enable_memoize TO off;
+-- right outer join + left outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+RESET enable_memoize;
+-- left outer join + right outer join
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10;
+-- full outer join + WHERE clause, only matched rows
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft4 t1 FULL JOIN ft5 t2 ON (t1.c1 = t2.c1) WHERE (t1.c1 = t2.c1 OR t1.c1 IS NULL) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+SELECT t1.c1, t2.c1 FROM ft4 t1 FULL JOIN ft5 t2 ON (t1.c1 = t2.c1) WHERE (t1.c1 = t2.c1 OR t1.c1 IS NULL) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+-- full outer join + WHERE clause with shippable extensions set
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t1.c3 FROM ft1 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE postgres_fdw_abs(t1.c1) > 0 OFFSET 10 LIMIT 10;
+ALTER SERVER loopback OPTIONS (DROP extensions);
+-- full outer join + WHERE clause with shippable extensions not set
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2, t1.c3 FROM ft1 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE postgres_fdw_abs(t1.c1) > 0 OFFSET 10 LIMIT 10;
+ALTER SERVER loopback OPTIONS (ADD extensions 'postgres_fdw');
+-- join two tables with FOR UPDATE clause
+-- tests whole-row reference for row marks
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE OF t1;
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE OF t1;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE;
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE;
+-- join two tables with FOR SHARE clause
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE OF t1;
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE OF t1;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE;
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE;
+-- join in CTE
+EXPLAIN (VERBOSE, COSTS OFF)
+WITH t (c1_1, c1_3, c2_1) AS MATERIALIZED (SELECT t1.c1, t1.c3, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1 OFFSET 100 LIMIT 10;
+WITH t (c1_1, c1_3, c2_1) AS MATERIALIZED (SELECT t1.c1, t1.c3, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1 OFFSET 100 LIMIT 10;
+-- ctid with whole-row reference
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.ctid, t1, t2, t1.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+-- SEMI JOIN, not pushed down
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1 FROM ft1 t1 WHERE EXISTS (SELECT 1 FROM ft2 t2 WHERE t1.c1 = t2.c1) ORDER BY t1.c1 OFFSET 100 LIMIT 10;
+SELECT t1.c1 FROM ft1 t1 WHERE EXISTS (SELECT 1 FROM ft2 t2 WHERE t1.c1 = t2.c1) ORDER BY t1.c1 OFFSET 100 LIMIT 10;
+-- ANTI JOIN, not pushed down
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1 FROM ft1 t1 WHERE NOT EXISTS (SELECT 1 FROM ft2 t2 WHERE t1.c1 = t2.c2) ORDER BY t1.c1 OFFSET 100 LIMIT 10;
+SELECT t1.c1 FROM ft1 t1 WHERE NOT EXISTS (SELECT 1 FROM ft2 t2 WHERE t1.c1 = t2.c2) ORDER BY t1.c1 OFFSET 100 LIMIT 10;
+-- CROSS JOIN can be pushed down
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 CROSS JOIN ft2 t2 ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
+SELECT t1.c1, t2.c1 FROM ft1 t1 CROSS JOIN ft2 t2 ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
+-- different server, not pushed down. No result expected.
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft5 t1 JOIN ft6 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
+SELECT t1.c1, t2.c1 FROM ft5 t1 JOIN ft6 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
+-- unsafe join conditions (c8 has a UDT), not pushed down. Practically a CROSS
+-- JOIN since c8 in both tables has same value.
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 LEFT JOIN ft2 t2 ON (t1.c8 = t2.c8) ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
+SELECT t1.c1, t2.c1 FROM ft1 t1 LEFT JOIN ft2 t2 ON (t1.c8 = t2.c8) ORDER BY t1.c1, t2.c1 OFFSET 100 LIMIT 10;
+-- unsafe conditions on one side (c8 has a UDT), not pushed down.
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE t1.c8 = 'foo' ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+SELECT t1.c1, t2.c1 FROM ft1 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE t1.c8 = 'foo' ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+-- join where unsafe to pushdown condition in WHERE clause has a column not
+-- in the SELECT clause. In this test unsafe clause needs to have column
+-- references from both joining sides so that the clause is not pushed down
+-- into one of the joining sides.
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE t1.c8 = t2.c8 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE t1.c8 = t2.c8 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+-- Aggregate after UNION, for testing setrefs
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1c1, avg(t1c1 + t2c1) FROM (SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) UNION SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) AS t (t1c1, t2c1) GROUP BY t1c1 ORDER BY t1c1 OFFSET 100 LIMIT 10;
+SELECT t1c1, avg(t1c1 + t2c1) FROM (SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) UNION SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) AS t (t1c1, t2c1) GROUP BY t1c1 ORDER BY t1c1 OFFSET 100 LIMIT 10;
+-- join with lateral reference
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
+SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
+
+-- non-Var items in targetlist of the nullable rel of a join preventing
+-- push-down in some cases
+-- unable to push {ft1, ft2}
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT q.a, ft2.c1 FROM (SELECT 13 FROM ft1 WHERE c1 = 13) q(a) RIGHT JOIN ft2 ON (q.a = ft2.c1) WHERE ft2.c1 BETWEEN 10 AND 15;
+SELECT q.a, ft2.c1 FROM (SELECT 13 FROM ft1 WHERE c1 = 13) q(a) RIGHT JOIN ft2 ON (q.a = ft2.c1) WHERE ft2.c1 BETWEEN 10 AND 15;
+
+-- ok to push {ft1, ft2} but not {ft1, ft2, ft4}
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT ft4.c1, q.* FROM ft4 LEFT JOIN (SELECT 13, ft1.c1, ft2.c1 FROM ft1 RIGHT JOIN ft2 ON (ft1.c1 = ft2.c1) WHERE ft1.c1 = 12) q(a, b, c) ON (ft4.c1 = q.b) WHERE ft4.c1 BETWEEN 10 AND 15;
+SELECT ft4.c1, q.* FROM ft4 LEFT JOIN (SELECT 13, ft1.c1, ft2.c1 FROM ft1 RIGHT JOIN ft2 ON (ft1.c1 = ft2.c1) WHERE ft1.c1 = 12) q(a, b, c) ON (ft4.c1 = q.b) WHERE ft4.c1 BETWEEN 10 AND 15;
+
+-- join with nullable side with some columns with null values
+UPDATE ft5 SET c3 = null where c1 % 9 = 0;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT ft5, ft5.c1, ft5.c2, ft5.c3, ft4.c1, ft4.c2 FROM ft5 left join ft4 on ft5.c1 = ft4.c1 WHERE ft4.c1 BETWEEN 10 and 30 ORDER BY ft5.c1, ft4.c1;
+SELECT ft5, ft5.c1, ft5.c2, ft5.c3, ft4.c1, ft4.c2 FROM ft5 left join ft4 on ft5.c1 = ft4.c1 WHERE ft4.c1 BETWEEN 10 and 30 ORDER BY ft5.c1, ft4.c1;
+
+-- multi-way join involving multiple merge joins
+-- (this case used to have EPQ-related planning problems)
+CREATE TABLE local_tbl (c1 int NOT NULL, c2 int NOT NULL, c3 text, CONSTRAINT local_tbl_pkey PRIMARY KEY (c1));
+INSERT INTO local_tbl SELECT id, id % 10, to_char(id, 'FM0000') FROM generate_series(1, 1000) id;
+ANALYZE local_tbl;
+SET enable_nestloop TO false;
+SET enable_hashjoin TO false;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM ft1, ft2, ft4, ft5, local_tbl WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1
+ AND ft1.c2 = ft5.c1 AND ft1.c2 = local_tbl.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE;
+SELECT * FROM ft1, ft2, ft4, ft5, local_tbl WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1
+ AND ft1.c2 = ft5.c1 AND ft1.c2 = local_tbl.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE;
+RESET enable_nestloop;
+RESET enable_hashjoin;
+DROP TABLE local_tbl;
+
+-- check join pushdown in situations where multiple userids are involved
+CREATE ROLE regress_view_owner SUPERUSER;
+CREATE USER MAPPING FOR regress_view_owner SERVER loopback;
+GRANT SELECT ON ft4 TO regress_view_owner;
+GRANT SELECT ON ft5 TO regress_view_owner;
+
+CREATE VIEW v4 AS SELECT * FROM ft4;
+CREATE VIEW v5 AS SELECT * FROM ft5;
+ALTER VIEW v5 OWNER TO regress_view_owner;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN v5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10; -- can't be pushed down, different view owners
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN v5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+ALTER VIEW v4 OWNER TO regress_view_owner;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN v5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10; -- can be pushed down
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN v5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10; -- can't be pushed down, view owner not current user
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+ALTER VIEW v4 OWNER TO CURRENT_USER;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10; -- can be pushed down
+SELECT t1.c1, t2.c2 FROM v4 t1 LEFT JOIN ft5 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c1, t2.c1 OFFSET 10 LIMIT 10;
+ALTER VIEW v4 OWNER TO regress_view_owner;
+
+-- cleanup
+DROP OWNED BY regress_view_owner;
+DROP ROLE regress_view_owner;
+
+
+-- ===================================================================
+-- Aggregate and grouping queries
+-- ===================================================================
+
+-- Simple aggregates
+explain (verbose, costs off)
+select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2;
+select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2;
+
+explain (verbose, costs off)
+select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2 limit 1;
+select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2 limit 1;
+
+-- Aggregate is not pushed down as aggregation contains random()
+explain (verbose, costs off)
+select sum(c1 * (random() <= 1)::int) as sum, avg(c1) from ft1;
+
+-- Aggregate over join query
+explain (verbose, costs off)
+select count(*), sum(t1.c1), avg(t2.c1) from ft1 t1 inner join ft1 t2 on (t1.c2 = t2.c2) where t1.c2 = 6;
+select count(*), sum(t1.c1), avg(t2.c1) from ft1 t1 inner join ft1 t2 on (t1.c2 = t2.c2) where t1.c2 = 6;
+
+-- Not pushed down due to local conditions present in underneath input rel
+explain (verbose, costs off)
+select sum(t1.c1), count(t2.c1) from ft1 t1 inner join ft2 t2 on (t1.c1 = t2.c1) where ((t1.c1 * t2.c1)/(t1.c1 * t2.c1)) * random() <= 1;
+
+-- GROUP BY clause having expressions
+explain (verbose, costs off)
+select c2/2, sum(c2) * (c2/2) from ft1 group by c2/2 order by c2/2;
+select c2/2, sum(c2) * (c2/2) from ft1 group by c2/2 order by c2/2;
+
+-- Aggregates in subquery are pushed down.
+explain (verbose, costs off)
+select count(x.a), sum(x.a) from (select c2 a, sum(c1) b from ft1 group by c2, sqrt(c1) order by 1, 2) x;
+select count(x.a), sum(x.a) from (select c2 a, sum(c1) b from ft1 group by c2, sqrt(c1) order by 1, 2) x;
+
+-- Aggregate is still pushed down by taking unshippable expression out
+explain (verbose, costs off)
+select c2 * (random() <= 1)::int as sum1, sum(c1) * c2 as sum2 from ft1 group by c2 order by 1, 2;
+select c2 * (random() <= 1)::int as sum1, sum(c1) * c2 as sum2 from ft1 group by c2 order by 1, 2;
+
+-- Aggregate with unshippable GROUP BY clause are not pushed
+explain (verbose, costs off)
+select c2 * (random() <= 1)::int as c2 from ft2 group by c2 * (random() <= 1)::int order by 1;
+
+-- GROUP BY clause in various forms, cardinal, alias and constant expression
+explain (verbose, costs off)
+select count(c2) w, c2 x, 5 y, 7.0 z from ft1 group by 2, y, 9.0::int order by 2;
+select count(c2) w, c2 x, 5 y, 7.0 z from ft1 group by 2, y, 9.0::int order by 2;
+
+-- GROUP BY clause referring to same column multiple times
+-- Also, ORDER BY contains an aggregate function
+explain (verbose, costs off)
+select c2, c2 from ft1 where c2 > 6 group by 1, 2 order by sum(c1);
+select c2, c2 from ft1 where c2 > 6 group by 1, 2 order by sum(c1);
+
+-- Testing HAVING clause shippability
+explain (verbose, costs off)
+select c2, sum(c1) from ft2 group by c2 having avg(c1) < 500 and sum(c1) < 49800 order by c2;
+select c2, sum(c1) from ft2 group by c2 having avg(c1) < 500 and sum(c1) < 49800 order by c2;
+
+-- Unshippable HAVING clause will be evaluated locally, and other qual in HAVING clause is pushed down
+explain (verbose, costs off)
+select count(*) from (select c5, count(c1) from ft1 group by c5, sqrt(c2) having (avg(c1) / avg(c1)) * random() <= 1 and avg(c1) < 500) x;
+select count(*) from (select c5, count(c1) from ft1 group by c5, sqrt(c2) having (avg(c1) / avg(c1)) * random() <= 1 and avg(c1) < 500) x;
+
+-- Aggregate in HAVING clause is not pushable, and thus aggregation is not pushed down
+explain (verbose, costs off)
+select sum(c1) from ft1 group by c2 having avg(c1 * (random() <= 1)::int) > 100 order by 1;
+
+-- Remote aggregate in combination with a local Param (for the output
+-- of an initplan) can be trouble, per bug #15781
+explain (verbose, costs off)
+select exists(select 1 from pg_enum), sum(c1) from ft1;
+select exists(select 1 from pg_enum), sum(c1) from ft1;
+
+explain (verbose, costs off)
+select exists(select 1 from pg_enum), sum(c1) from ft1 group by 1;
+select exists(select 1 from pg_enum), sum(c1) from ft1 group by 1;
+
+
+-- Testing ORDER BY, DISTINCT, FILTER, Ordered-sets and VARIADIC within aggregates
+
+-- ORDER BY within aggregate, same column used to order
+explain (verbose, costs off)
+select array_agg(c1 order by c1) from ft1 where c1 < 100 group by c2 order by 1;
+select array_agg(c1 order by c1) from ft1 where c1 < 100 group by c2 order by 1;
+
+-- ORDER BY within aggregate, different column used to order also using DESC
+explain (verbose, costs off)
+select array_agg(c5 order by c1 desc) from ft2 where c2 = 6 and c1 < 50;
+select array_agg(c5 order by c1 desc) from ft2 where c2 = 6 and c1 < 50;
+
+-- DISTINCT within aggregate
+explain (verbose, costs off)
+select array_agg(distinct (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
+select array_agg(distinct (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
+
+-- DISTINCT combined with ORDER BY within aggregate
+explain (verbose, costs off)
+select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
+select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
+
+explain (verbose, costs off)
+select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5 desc nulls last) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
+select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5 desc nulls last) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1;
+
+-- FILTER within aggregate
+explain (verbose, costs off)
+select sum(c1) filter (where c1 < 100 and c2 > 5) from ft1 group by c2 order by 1 nulls last;
+select sum(c1) filter (where c1 < 100 and c2 > 5) from ft1 group by c2 order by 1 nulls last;
+
+-- DISTINCT, ORDER BY and FILTER within aggregate
+explain (verbose, costs off)
+select sum(c1%3), sum(distinct c1%3 order by c1%3) filter (where c1%3 < 2), c2 from ft1 where c2 = 6 group by c2;
+select sum(c1%3), sum(distinct c1%3 order by c1%3) filter (where c1%3 < 2), c2 from ft1 where c2 = 6 group by c2;
+
+-- Outer query is aggregation query
+explain (verbose, costs off)
+select distinct (select count(*) filter (where t2.c2 = 6 and t2.c1 < 10) from ft1 t1 where t1.c1 = 6) from ft2 t2 where t2.c2 % 6 = 0 order by 1;
+select distinct (select count(*) filter (where t2.c2 = 6 and t2.c1 < 10) from ft1 t1 where t1.c1 = 6) from ft2 t2 where t2.c2 % 6 = 0 order by 1;
+-- Inner query is aggregation query
+explain (verbose, costs off)
+select distinct (select count(t1.c1) filter (where t2.c2 = 6 and t2.c1 < 10) from ft1 t1 where t1.c1 = 6) from ft2 t2 where t2.c2 % 6 = 0 order by 1;
+select distinct (select count(t1.c1) filter (where t2.c2 = 6 and t2.c1 < 10) from ft1 t1 where t1.c1 = 6) from ft2 t2 where t2.c2 % 6 = 0 order by 1;
+
+-- Aggregate not pushed down as FILTER condition is not pushable
+explain (verbose, costs off)
+select sum(c1) filter (where (c1 / c1) * random() <= 1) from ft1 group by c2 order by 1;
+explain (verbose, costs off)
+select sum(c2) filter (where c2 in (select c2 from ft1 where c2 < 5)) from ft1;
+
+-- Ordered-sets within aggregate
+explain (verbose, costs off)
+select c2, rank('10'::varchar) within group (order by c6), percentile_cont(c2/10::numeric) within group (order by c1) from ft1 where c2 < 10 group by c2 having percentile_cont(c2/10::numeric) within group (order by c1) < 500 order by c2;
+select c2, rank('10'::varchar) within group (order by c6), percentile_cont(c2/10::numeric) within group (order by c1) from ft1 where c2 < 10 group by c2 having percentile_cont(c2/10::numeric) within group (order by c1) < 500 order by c2;
+
+-- Using multiple arguments within aggregates
+explain (verbose, costs off)
+select c1, rank(c1, c2) within group (order by c1, c2) from ft1 group by c1, c2 having c1 = 6 order by 1;
+select c1, rank(c1, c2) within group (order by c1, c2) from ft1 group by c1, c2 having c1 = 6 order by 1;
+
+-- User defined function for user defined aggregate, VARIADIC
+create function least_accum(anyelement, variadic anyarray)
+returns anyelement language sql as
+ 'select least($1, min($2[i])) from generate_subscripts($2,1) g(i)';
+create aggregate least_agg(variadic items anyarray) (
+ stype = anyelement, sfunc = least_accum
+);
+
+-- Disable hash aggregation for plan stability.
+set enable_hashagg to false;
+
+-- Not pushed down due to user defined aggregate
+explain (verbose, costs off)
+select c2, least_agg(c1) from ft1 group by c2 order by c2;
+
+-- Add function and aggregate into extension
+alter extension postgres_fdw add function least_accum(anyelement, variadic anyarray);
+alter extension postgres_fdw add aggregate least_agg(variadic items anyarray);
+alter server loopback options (set extensions 'postgres_fdw');
+
+-- Now aggregate will be pushed. Aggregate will display VARIADIC argument.
+explain (verbose, costs off)
+select c2, least_agg(c1) from ft1 where c2 < 100 group by c2 order by c2;
+select c2, least_agg(c1) from ft1 where c2 < 100 group by c2 order by c2;
+
+-- Remove function and aggregate from extension
+alter extension postgres_fdw drop function least_accum(anyelement, variadic anyarray);
+alter extension postgres_fdw drop aggregate least_agg(variadic items anyarray);
+alter server loopback options (set extensions 'postgres_fdw');
+
+-- Not pushed down as we have dropped objects from extension.
+explain (verbose, costs off)
+select c2, least_agg(c1) from ft1 group by c2 order by c2;
+
+-- Cleanup
+reset enable_hashagg;
+drop aggregate least_agg(variadic items anyarray);
+drop function least_accum(anyelement, variadic anyarray);
+
+
+-- Testing USING OPERATOR() in ORDER BY within aggregate.
+-- For this, we need user defined operators along with operator family and
+-- operator class. Create those and then add them in extension. Note that
+-- user defined objects are considered unshippable unless they are part of
+-- the extension.
+create operator public.<^ (
+ leftarg = int4,
+ rightarg = int4,
+ procedure = int4eq
+);
+
+create operator public.=^ (
+ leftarg = int4,
+ rightarg = int4,
+ procedure = int4lt
+);
+
+create operator public.>^ (
+ leftarg = int4,
+ rightarg = int4,
+ procedure = int4gt
+);
+
+create operator family my_op_family using btree;
+
+create function my_op_cmp(a int, b int) returns int as
+ $$begin return btint4cmp(a, b); end $$ language plpgsql;
+
+create operator class my_op_class for type int using btree family my_op_family as
+ operator 1 public.<^,
+ operator 3 public.=^,
+ operator 5 public.>^,
+ function 1 my_op_cmp(int, int);
+
+-- This will not be pushed as user defined sort operator is not part of the
+-- extension yet.
+explain (verbose, costs off)
+select array_agg(c1 order by c1 using operator(public.<^)) from ft2 where c2 = 6 and c1 < 100 group by c2;
+
+-- This should not be pushed either.
+explain (verbose, costs off)
+select * from ft2 order by c1 using operator(public.<^);
+
+-- Update local stats on ft2
+ANALYZE ft2;
+
+-- Add into extension
+alter extension postgres_fdw add operator class my_op_class using btree;
+alter extension postgres_fdw add function my_op_cmp(a int, b int);
+alter extension postgres_fdw add operator family my_op_family using btree;
+alter extension postgres_fdw add operator public.<^(int, int);
+alter extension postgres_fdw add operator public.=^(int, int);
+alter extension postgres_fdw add operator public.>^(int, int);
+alter server loopback options (set extensions 'postgres_fdw');
+
+-- Now this will be pushed as sort operator is part of the extension.
+explain (verbose, costs off)
+select array_agg(c1 order by c1 using operator(public.<^)) from ft2 where c2 = 6 and c1 < 100 group by c2;
+select array_agg(c1 order by c1 using operator(public.<^)) from ft2 where c2 = 6 and c1 < 100 group by c2;
+
+-- This should be pushed too.
+explain (verbose, costs off)
+select * from ft2 order by c1 using operator(public.<^);
+
+-- Remove from extension
+alter extension postgres_fdw drop operator class my_op_class using btree;
+alter extension postgres_fdw drop function my_op_cmp(a int, b int);
+alter extension postgres_fdw drop operator family my_op_family using btree;
+alter extension postgres_fdw drop operator public.<^(int, int);
+alter extension postgres_fdw drop operator public.=^(int, int);
+alter extension postgres_fdw drop operator public.>^(int, int);
+alter server loopback options (set extensions 'postgres_fdw');
+
+-- This will not be pushed as sort operator is now removed from the extension.
+explain (verbose, costs off)
+select array_agg(c1 order by c1 using operator(public.<^)) from ft2 where c2 = 6 and c1 < 100 group by c2;
+
+-- Cleanup
+drop operator class my_op_class using btree;
+drop function my_op_cmp(a int, b int);
+drop operator family my_op_family using btree;
+drop operator public.>^(int, int);
+drop operator public.=^(int, int);
+drop operator public.<^(int, int);
+
+-- Input relation to aggregate push down hook is not safe to pushdown and thus
+-- the aggregate cannot be pushed down to foreign server.
+explain (verbose, costs off)
+select count(t1.c3) from ft2 t1 left join ft2 t2 on (t1.c1 = random() * t2.c2);
+
+-- Subquery in FROM clause having aggregate
+explain (verbose, costs off)
+select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x where ft1.c2 = x.a group by x.b order by 1, 2;
+select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x where ft1.c2 = x.a group by x.b order by 1, 2;
+
+-- FULL join with IS NULL check in HAVING
+explain (verbose, costs off)
+select avg(t1.c1), sum(t2.c1) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) group by t2.c1 having (avg(t1.c1) is null and sum(t2.c1) < 10) or sum(t2.c1) is null order by 1 nulls last, 2;
+select avg(t1.c1), sum(t2.c1) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) group by t2.c1 having (avg(t1.c1) is null and sum(t2.c1) < 10) or sum(t2.c1) is null order by 1 nulls last, 2;
+
+-- Aggregate over FULL join needing to deparse the joining relations as
+-- subqueries.
+explain (verbose, costs off)
+select count(*), sum(t1.c1), avg(t2.c1) from (select c1 from ft4 where c1 between 50 and 60) t1 full join (select c1 from ft5 where c1 between 50 and 60) t2 on (t1.c1 = t2.c1);
+select count(*), sum(t1.c1), avg(t2.c1) from (select c1 from ft4 where c1 between 50 and 60) t1 full join (select c1 from ft5 where c1 between 50 and 60) t2 on (t1.c1 = t2.c1);
+
+-- ORDER BY expression is part of the target list but not pushed down to
+-- foreign server.
+explain (verbose, costs off)
+select sum(c2) * (random() <= 1)::int as sum from ft1 order by 1;
+select sum(c2) * (random() <= 1)::int as sum from ft1 order by 1;
+
+-- LATERAL join, with parameterization
+set enable_hashagg to false;
+explain (verbose, costs off)
+select c2, sum from "S 1"."T 1" t1, lateral (select sum(t2.c1 + t1."C 1") sum from ft2 t2 group by t2.c1) qry where t1.c2 * 2 = qry.sum and t1.c2 < 3 and t1."C 1" < 100 order by 1;
+select c2, sum from "S 1"."T 1" t1, lateral (select sum(t2.c1 + t1."C 1") sum from ft2 t2 group by t2.c1) qry where t1.c2 * 2 = qry.sum and t1.c2 < 3 and t1."C 1" < 100 order by 1;
+reset enable_hashagg;
+
+-- bug #15613: bad plan for foreign table scan with lateral reference
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT ref_0.c2, subq_1.*
+FROM
+ "S 1"."T 1" AS ref_0,
+ LATERAL (
+ SELECT ref_0."C 1" c1, subq_0.*
+ FROM (SELECT ref_0.c2, ref_1.c3
+ FROM ft1 AS ref_1) AS subq_0
+ RIGHT JOIN ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
+ ) AS subq_1
+WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
+ORDER BY ref_0."C 1";
+
+SELECT ref_0.c2, subq_1.*
+FROM
+ "S 1"."T 1" AS ref_0,
+ LATERAL (
+ SELECT ref_0."C 1" c1, subq_0.*
+ FROM (SELECT ref_0.c2, ref_1.c3
+ FROM ft1 AS ref_1) AS subq_0
+ RIGHT JOIN ft2 AS ref_3 ON (subq_0.c3 = ref_3.c3)
+ ) AS subq_1
+WHERE ref_0."C 1" < 10 AND subq_1.c3 = '00001'
+ORDER BY ref_0."C 1";
+
+-- Check with placeHolderVars
+explain (verbose, costs off)
+select sum(q.a), count(q.b) from ft4 left join (select 13, avg(ft1.c1), sum(ft2.c1) from ft1 right join ft2 on (ft1.c1 = ft2.c1)) q(a, b, c) on (ft4.c1 <= q.b);
+select sum(q.a), count(q.b) from ft4 left join (select 13, avg(ft1.c1), sum(ft2.c1) from ft1 right join ft2 on (ft1.c1 = ft2.c1)) q(a, b, c) on (ft4.c1 <= q.b);
+
+
+-- Not supported cases
+-- Grouping sets
+explain (verbose, costs off)
+select c2, sum(c1) from ft1 where c2 < 3 group by rollup(c2) order by 1 nulls last;
+select c2, sum(c1) from ft1 where c2 < 3 group by rollup(c2) order by 1 nulls last;
+explain (verbose, costs off)
+select c2, sum(c1) from ft1 where c2 < 3 group by cube(c2) order by 1 nulls last;
+select c2, sum(c1) from ft1 where c2 < 3 group by cube(c2) order by 1 nulls last;
+explain (verbose, costs off)
+select c2, c6, sum(c1) from ft1 where c2 < 3 group by grouping sets(c2, c6) order by 1 nulls last, 2 nulls last;
+select c2, c6, sum(c1) from ft1 where c2 < 3 group by grouping sets(c2, c6) order by 1 nulls last, 2 nulls last;
+explain (verbose, costs off)
+select c2, sum(c1), grouping(c2) from ft1 where c2 < 3 group by c2 order by 1 nulls last;
+select c2, sum(c1), grouping(c2) from ft1 where c2 < 3 group by c2 order by 1 nulls last;
+
+-- DISTINCT itself is not pushed down, whereas underneath aggregate is pushed
+explain (verbose, costs off)
+select distinct sum(c1)/1000 s from ft2 where c2 < 6 group by c2 order by 1;
+select distinct sum(c1)/1000 s from ft2 where c2 < 6 group by c2 order by 1;
+
+-- WindowAgg
+explain (verbose, costs off)
+select c2, sum(c2), count(c2) over (partition by c2%2) from ft2 where c2 < 10 group by c2 order by 1;
+select c2, sum(c2), count(c2) over (partition by c2%2) from ft2 where c2 < 10 group by c2 order by 1;
+explain (verbose, costs off)
+select c2, array_agg(c2) over (partition by c2%2 order by c2 desc) from ft1 where c2 < 10 group by c2 order by 1;
+select c2, array_agg(c2) over (partition by c2%2 order by c2 desc) from ft1 where c2 < 10 group by c2 order by 1;
+explain (verbose, costs off)
+select c2, array_agg(c2) over (partition by c2%2 order by c2 range between current row and unbounded following) from ft1 where c2 < 10 group by c2 order by 1;
+select c2, array_agg(c2) over (partition by c2%2 order by c2 range between current row and unbounded following) from ft1 where c2 < 10 group by c2 order by 1;
+
+
+-- ===================================================================
+-- parameterized queries
+-- ===================================================================
+-- simple join
+PREPARE st1(int, int) AS SELECT t1.c3, t2.c3 FROM ft1 t1, ft2 t2 WHERE t1.c1 = $1 AND t2.c1 = $2;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st1(1, 2);
+EXECUTE st1(1, 1);
+EXECUTE st1(101, 101);
+-- subquery using stable function (can't be sent to remote)
+PREPARE st2(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND date(c4) = '1970-01-17'::date) ORDER BY c1;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st2(10, 20);
+EXECUTE st2(10, 20);
+EXECUTE st2(101, 121);
+-- subquery using immutable function (can be sent to remote)
+PREPARE st3(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND date(c5) = '1970-01-17'::date) ORDER BY c1;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st3(10, 20);
+EXECUTE st3(10, 20);
+EXECUTE st3(20, 30);
+-- custom plan should be chosen initially
+PREPARE st4(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 = $1;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st4(1);
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st4(1);
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st4(1);
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st4(1);
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st4(1);
+-- once we try it enough times, should switch to generic plan
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st4(1);
+-- value of $1 should not be sent to remote
+PREPARE st5(user_enum,int) AS SELECT * FROM ft1 t1 WHERE c8 = $1 and c1 = $2;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st5('foo', 1);
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st5('foo', 1);
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st5('foo', 1);
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st5('foo', 1);
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st5('foo', 1);
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st5('foo', 1);
+EXECUTE st5('foo', 1);
+
+-- altering FDW options requires replanning
+PREPARE st6 AS SELECT * FROM ft1 t1 WHERE t1.c1 = t1.c2;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st6;
+PREPARE st7 AS INSERT INTO ft1 (c1,c2,c3) VALUES (1001,101,'foo');
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st7;
+ALTER TABLE "S 1"."T 1" RENAME TO "T 0";
+ALTER FOREIGN TABLE ft1 OPTIONS (SET table_name 'T 0');
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st6;
+EXECUTE st6;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st7;
+ALTER TABLE "S 1"."T 0" RENAME TO "T 1";
+ALTER FOREIGN TABLE ft1 OPTIONS (SET table_name 'T 1');
+
+PREPARE st8 AS SELECT count(c3) FROM ft1 t1 WHERE t1.c1 === t1.c2;
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st8;
+ALTER SERVER loopback OPTIONS (DROP extensions);
+EXPLAIN (VERBOSE, COSTS OFF) EXECUTE st8;
+EXECUTE st8;
+ALTER SERVER loopback OPTIONS (ADD extensions 'postgres_fdw');
+
+-- cleanup
+DEALLOCATE st1;
+DEALLOCATE st2;
+DEALLOCATE st3;
+DEALLOCATE st4;
+DEALLOCATE st5;
+DEALLOCATE st6;
+DEALLOCATE st7;
+DEALLOCATE st8;
+
+-- System columns, except ctid and oid, should not be sent to remote
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM ft1 t1 WHERE t1.tableoid = 'pg_class'::regclass LIMIT 1;
+SELECT * FROM ft1 t1 WHERE t1.tableoid = 'ft1'::regclass LIMIT 1;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1;
+SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1;
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)';
+SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)';
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT ctid, * FROM ft1 t1 LIMIT 1;
+SELECT ctid, * FROM ft1 t1 LIMIT 1;
+
+-- ===================================================================
+-- used in PL/pgSQL function
+-- ===================================================================
+CREATE OR REPLACE FUNCTION f_test(p_c1 int) RETURNS int AS $$
+DECLARE
+ v_c1 int;
+BEGIN
+ SELECT c1 INTO v_c1 FROM ft1 WHERE c1 = p_c1 LIMIT 1;
+ PERFORM c1 FROM ft1 WHERE c1 = p_c1 AND p_c1 = v_c1 LIMIT 1;
+ RETURN v_c1;
+END;
+$$ LANGUAGE plpgsql;
+SELECT f_test(100);
+DROP FUNCTION f_test(int);
+
+-- ===================================================================
+-- REINDEX
+-- ===================================================================
+-- remote table is not created here
+CREATE FOREIGN TABLE reindex_foreign (c1 int, c2 int)
+ SERVER loopback2 OPTIONS (table_name 'reindex_local');
+REINDEX TABLE reindex_foreign; -- error
+REINDEX TABLE CONCURRENTLY reindex_foreign; -- error
+DROP FOREIGN TABLE reindex_foreign;
+-- partitions and foreign tables
+CREATE TABLE reind_fdw_parent (c1 int) PARTITION BY RANGE (c1);
+CREATE TABLE reind_fdw_0_10 PARTITION OF reind_fdw_parent
+ FOR VALUES FROM (0) TO (10);
+CREATE FOREIGN TABLE reind_fdw_10_20 PARTITION OF reind_fdw_parent
+ FOR VALUES FROM (10) TO (20)
+ SERVER loopback OPTIONS (table_name 'reind_local_10_20');
+REINDEX TABLE reind_fdw_parent; -- ok
+REINDEX TABLE CONCURRENTLY reind_fdw_parent; -- ok
+DROP TABLE reind_fdw_parent;
+
+-- ===================================================================
+-- conversion error
+-- ===================================================================
+ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE int;
+SELECT * FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8) WHERE x1 = 1; -- ERROR
+SELECT ftx.x1, ft2.c2, ftx.x8 FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2
+ WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR
+SELECT ftx.x1, ft2.c2, ftx FROM ft1 ftx(x1,x2,x3,x4,x5,x6,x7,x8), ft2
+ WHERE ftx.x1 = ft2.c1 AND ftx.x1 = 1; -- ERROR
+SELECT sum(c2), array_agg(c8) FROM ft1 GROUP BY c8; -- ERROR
+ANALYZE ft1; -- ERROR
+ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE user_enum;
+
+-- ===================================================================
+-- subtransaction
+-- + local/remote error doesn't break cursor
+-- ===================================================================
+BEGIN;
+DECLARE c CURSOR FOR SELECT * FROM ft1 ORDER BY c1;
+FETCH c;
+SAVEPOINT s;
+ERROR OUT; -- ERROR
+ROLLBACK TO s;
+FETCH c;
+SAVEPOINT s;
+SELECT * FROM ft1 WHERE 1 / (c1 - 1) > 0; -- ERROR
+ROLLBACK TO s;
+FETCH c;
+SELECT * FROM ft1 ORDER BY c1 LIMIT 1;
+COMMIT;
+
+-- ===================================================================
+-- test handling of collations
+-- ===================================================================
+create table loct3 (f1 text collate "C" unique, f2 text, f3 varchar(10) unique);
+create foreign table ft3 (f1 text collate "C", f2 text, f3 varchar(10))
+ server loopback options (table_name 'loct3', use_remote_estimate 'true');
+
+-- can be sent to remote
+explain (verbose, costs off) select * from ft3 where f1 = 'foo';
+explain (verbose, costs off) select * from ft3 where f1 COLLATE "C" = 'foo';
+explain (verbose, costs off) select * from ft3 where f2 = 'foo';
+explain (verbose, costs off) select * from ft3 where f3 = 'foo';
+explain (verbose, costs off) select * from ft3 f, loct3 l
+ where f.f3 = l.f3 and l.f1 = 'foo';
+-- can't be sent to remote
+explain (verbose, costs off) select * from ft3 where f1 COLLATE "POSIX" = 'foo';
+explain (verbose, costs off) select * from ft3 where f1 = 'foo' COLLATE "C";
+explain (verbose, costs off) select * from ft3 where f2 COLLATE "C" = 'foo';
+explain (verbose, costs off) select * from ft3 where f2 = 'foo' COLLATE "C";
+explain (verbose, costs off) select * from ft3 f, loct3 l
+ where f.f3 = l.f3 COLLATE "POSIX" and l.f1 = 'foo';
+
+-- ===================================================================
+-- test writable foreign table stuff
+-- ===================================================================
+EXPLAIN (verbose, costs off)
+INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20;
+INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20;
+INSERT INTO ft2 (c1,c2,c3)
+ VALUES (1101,201,'aaa'), (1102,202,'bbb'), (1103,203,'ccc') RETURNING *;
+INSERT INTO ft2 (c1,c2,c3) VALUES (1104,204,'ddd'), (1105,205,'eee');
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c2 = c2 + 300, c3 = c3 || '_update3' WHERE c1 % 10 = 3; -- can be pushed down
+UPDATE ft2 SET c2 = c2 + 300, c3 = c3 || '_update3' WHERE c1 % 10 = 3;
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c2 = c2 + 400, c3 = c3 || '_update7' WHERE c1 % 10 = 7 RETURNING *; -- can be pushed down
+UPDATE ft2 SET c2 = c2 + 400, c3 = c3 || '_update7' WHERE c1 % 10 = 7 RETURNING *;
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT
+ FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9; -- can be pushed down
+UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT
+ FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9;
+EXPLAIN (verbose, costs off)
+ DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4; -- can be pushed down
+DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4;
+EXPLAIN (verbose, costs off)
+DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2; -- can be pushed down
+DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2;
+SELECT c1,c2,c3,c4 FROM ft2 ORDER BY c1;
+EXPLAIN (verbose, costs off)
+INSERT INTO ft2 (c1,c2,c3) VALUES (1200,999,'foo') RETURNING tableoid::regclass;
+INSERT INTO ft2 (c1,c2,c3) VALUES (1200,999,'foo') RETURNING tableoid::regclass;
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c3 = 'bar' WHERE c1 = 1200 RETURNING tableoid::regclass; -- can be pushed down
+UPDATE ft2 SET c3 = 'bar' WHERE c1 = 1200 RETURNING tableoid::regclass;
+EXPLAIN (verbose, costs off)
+DELETE FROM ft2 WHERE c1 = 1200 RETURNING tableoid::regclass; -- can be pushed down
+DELETE FROM ft2 WHERE c1 = 1200 RETURNING tableoid::regclass;
+
+-- Test UPDATE/DELETE with RETURNING on a three-table join
+INSERT INTO ft2 (c1,c2,c3)
+ SELECT id, id - 1200, to_char(id, 'FM00000') FROM generate_series(1201, 1300) id;
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c3 = 'foo'
+ FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1)
+ WHERE ft2.c1 > 1200 AND ft2.c2 = ft4.c1
+ RETURNING ft2, ft2.*, ft4, ft4.*; -- can be pushed down
+UPDATE ft2 SET c3 = 'foo'
+ FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1)
+ WHERE ft2.c1 > 1200 AND ft2.c2 = ft4.c1
+ RETURNING ft2, ft2.*, ft4, ft4.*;
+EXPLAIN (verbose, costs off)
+DELETE FROM ft2
+ USING ft4 LEFT JOIN ft5 ON (ft4.c1 = ft5.c1)
+ WHERE ft2.c1 > 1200 AND ft2.c1 % 10 = 0 AND ft2.c2 = ft4.c1
+ RETURNING 100; -- can be pushed down
+DELETE FROM ft2
+ USING ft4 LEFT JOIN ft5 ON (ft4.c1 = ft5.c1)
+ WHERE ft2.c1 > 1200 AND ft2.c1 % 10 = 0 AND ft2.c2 = ft4.c1
+ RETURNING 100;
+DELETE FROM ft2 WHERE ft2.c1 > 1200;
+
+-- Test UPDATE with a MULTIEXPR sub-select
+-- (maybe someday this'll be remotely executable, but not today)
+EXPLAIN (verbose, costs off)
+UPDATE ft2 AS target SET (c2, c7) = (
+ SELECT c2 * 10, c7
+ FROM ft2 AS src
+ WHERE target.c1 = src.c1
+) WHERE c1 > 1100;
+UPDATE ft2 AS target SET (c2, c7) = (
+ SELECT c2 * 10, c7
+ FROM ft2 AS src
+ WHERE target.c1 = src.c1
+) WHERE c1 > 1100;
+
+UPDATE ft2 AS target SET (c2) = (
+ SELECT c2 / 10
+ FROM ft2 AS src
+ WHERE target.c1 = src.c1
+) WHERE c1 > 1100;
+
+-- Test UPDATE involving a join that can be pushed down,
+-- but a SET clause that can't be
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE ft2 d SET c2 = CASE WHEN random() >= 0 THEN d.c2 ELSE 0 END
+ FROM ft2 AS t WHERE d.c1 = t.c1 AND d.c1 > 1000;
+UPDATE ft2 d SET c2 = CASE WHEN random() >= 0 THEN d.c2 ELSE 0 END
+ FROM ft2 AS t WHERE d.c1 = t.c1 AND d.c1 > 1000;
+
+-- Test UPDATE/DELETE with WHERE or JOIN/ON conditions containing
+-- user-defined operators/functions
+ALTER SERVER loopback OPTIONS (DROP extensions);
+INSERT INTO ft2 (c1,c2,c3)
+ SELECT id, id % 10, to_char(id, 'FM00000') FROM generate_series(2001, 2010) id;
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c3 = 'bar' WHERE postgres_fdw_abs(c1) > 2000 RETURNING *; -- can't be pushed down
+UPDATE ft2 SET c3 = 'bar' WHERE postgres_fdw_abs(c1) > 2000 RETURNING *;
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c3 = 'baz'
+ FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1)
+ WHERE ft2.c1 > 2000 AND ft2.c2 === ft4.c1
+ RETURNING ft2.*, ft4.*, ft5.*; -- can't be pushed down
+UPDATE ft2 SET c3 = 'baz'
+ FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1)
+ WHERE ft2.c1 > 2000 AND ft2.c2 === ft4.c1
+ RETURNING ft2.*, ft4.*, ft5.*;
+EXPLAIN (verbose, costs off)
+DELETE FROM ft2
+ USING ft4 INNER JOIN ft5 ON (ft4.c1 === ft5.c1)
+ WHERE ft2.c1 > 2000 AND ft2.c2 = ft4.c1
+ RETURNING ft2.c1, ft2.c2, ft2.c3; -- can't be pushed down
+DELETE FROM ft2
+ USING ft4 INNER JOIN ft5 ON (ft4.c1 === ft5.c1)
+ WHERE ft2.c1 > 2000 AND ft2.c2 = ft4.c1
+ RETURNING ft2.c1, ft2.c2, ft2.c3;
+DELETE FROM ft2 WHERE ft2.c1 > 2000;
+ALTER SERVER loopback OPTIONS (ADD extensions 'postgres_fdw');
+
+-- Test that trigger on remote table works as expected
+CREATE OR REPLACE FUNCTION "S 1".F_BRTRIG() RETURNS trigger AS $$
+BEGIN
+ NEW.c3 = NEW.c3 || '_trig_update';
+ RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
+CREATE TRIGGER t1_br_insert BEFORE INSERT OR UPDATE
+ ON "S 1"."T 1" FOR EACH ROW EXECUTE PROCEDURE "S 1".F_BRTRIG();
+
+INSERT INTO ft2 (c1,c2,c3) VALUES (1208, 818, 'fff') RETURNING *;
+INSERT INTO ft2 (c1,c2,c3,c6) VALUES (1218, 818, 'ggg', '(--;') RETURNING *;
+UPDATE ft2 SET c2 = c2 + 600 WHERE c1 % 10 = 8 AND c1 < 1200 RETURNING *;
+
+-- Test errors thrown on remote side during update
+ALTER TABLE "S 1"."T 1" ADD CONSTRAINT c2positive CHECK (c2 >= 0);
+
+INSERT INTO ft1(c1, c2) VALUES(11, 12); -- duplicate key
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT DO NOTHING; -- works
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT (c1, c2) DO NOTHING; -- unsupported
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT (c1, c2) DO UPDATE SET c3 = 'ffg'; -- unsupported
+INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive
+UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive
+
+-- Test savepoint/rollback behavior
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
+begin;
+update ft2 set c2 = 42 where c2 = 0;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+savepoint s1;
+update ft2 set c2 = 44 where c2 = 4;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+release savepoint s1;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+savepoint s2;
+update ft2 set c2 = 46 where c2 = 6;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+rollback to savepoint s2;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+release savepoint s2;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+savepoint s3;
+update ft2 set c2 = -2 where c2 = 42 and c1 = 10; -- fail on remote side
+rollback to savepoint s3;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+release savepoint s3;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+-- none of the above is committed yet remotely
+select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
+commit;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
+
+VACUUM ANALYZE "S 1"."T 1";
+
+-- Above DMLs add data with c6 as NULL in ft1, so test ORDER BY NULLS LAST and NULLs
+-- FIRST behavior here.
+-- ORDER BY DESC NULLS LAST options
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 ORDER BY c6 DESC NULLS LAST, c1 OFFSET 795 LIMIT 10;
+SELECT * FROM ft1 ORDER BY c6 DESC NULLS LAST, c1 OFFSET 795 LIMIT 10;
+-- ORDER BY DESC NULLS FIRST options
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 ORDER BY c6 DESC NULLS FIRST, c1 OFFSET 15 LIMIT 10;
+SELECT * FROM ft1 ORDER BY c6 DESC NULLS FIRST, c1 OFFSET 15 LIMIT 10;
+-- ORDER BY ASC NULLS FIRST options
+EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 ORDER BY c6 ASC NULLS FIRST, c1 OFFSET 15 LIMIT 10;
+SELECT * FROM ft1 ORDER BY c6 ASC NULLS FIRST, c1 OFFSET 15 LIMIT 10;
+
+-- ===================================================================
+-- test check constraints
+-- ===================================================================
+
+-- Consistent check constraints provide consistent results
+ALTER FOREIGN TABLE ft1 ADD CONSTRAINT ft1_c2positive CHECK (c2 >= 0);
+EXPLAIN (VERBOSE, COSTS OFF) SELECT count(*) FROM ft1 WHERE c2 < 0;
+SELECT count(*) FROM ft1 WHERE c2 < 0;
+SET constraint_exclusion = 'on';
+EXPLAIN (VERBOSE, COSTS OFF) SELECT count(*) FROM ft1 WHERE c2 < 0;
+SELECT count(*) FROM ft1 WHERE c2 < 0;
+RESET constraint_exclusion;
+-- check constraint is enforced on the remote side, not locally
+INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive
+UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive
+ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c2positive;
+
+-- But inconsistent check constraints provide inconsistent results
+ALTER FOREIGN TABLE ft1 ADD CONSTRAINT ft1_c2negative CHECK (c2 < 0);
+EXPLAIN (VERBOSE, COSTS OFF) SELECT count(*) FROM ft1 WHERE c2 >= 0;
+SELECT count(*) FROM ft1 WHERE c2 >= 0;
+SET constraint_exclusion = 'on';
+EXPLAIN (VERBOSE, COSTS OFF) SELECT count(*) FROM ft1 WHERE c2 >= 0;
+SELECT count(*) FROM ft1 WHERE c2 >= 0;
+RESET constraint_exclusion;
+-- local check constraint is not actually enforced
+INSERT INTO ft1(c1, c2) VALUES(1111, 2);
+UPDATE ft1 SET c2 = c2 + 1 WHERE c1 = 1;
+ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c2negative;
+
+-- ===================================================================
+-- test WITH CHECK OPTION constraints
+-- ===================================================================
+
+CREATE FUNCTION row_before_insupd_trigfunc() RETURNS trigger AS $$BEGIN NEW.a := NEW.a + 10; RETURN NEW; END$$ LANGUAGE plpgsql;
+
+CREATE TABLE base_tbl (a int, b int);
+ALTER TABLE base_tbl SET (autovacuum_enabled = 'false');
+CREATE TRIGGER row_before_insupd_trigger BEFORE INSERT OR UPDATE ON base_tbl FOR EACH ROW EXECUTE PROCEDURE row_before_insupd_trigfunc();
+CREATE FOREIGN TABLE foreign_tbl (a int, b int)
+ SERVER loopback OPTIONS (table_name 'base_tbl');
+CREATE VIEW rw_view AS SELECT * FROM foreign_tbl
+ WHERE a < b WITH CHECK OPTION;
+\d+ rw_view
+
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 5);
+INSERT INTO rw_view VALUES (0, 5); -- should fail
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 15);
+INSERT INTO rw_view VALUES (0, 15); -- ok
+SELECT * FROM foreign_tbl;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 5;
+UPDATE rw_view SET b = b + 5; -- should fail
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 15;
+UPDATE rw_view SET b = b + 15; -- ok
+SELECT * FROM foreign_tbl;
+
+-- We don't allow batch insert when there are any WCO constraints
+ALTER SERVER loopback OPTIONS (ADD batch_size '10');
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 15), (0, 5);
+INSERT INTO rw_view VALUES (0, 15), (0, 5); -- should fail
+SELECT * FROM foreign_tbl;
+ALTER SERVER loopback OPTIONS (DROP batch_size);
+
+DROP FOREIGN TABLE foreign_tbl CASCADE;
+DROP TRIGGER row_before_insupd_trigger ON base_tbl;
+DROP TABLE base_tbl;
+
+-- test WCO for partitions
+
+CREATE TABLE child_tbl (a int, b int);
+ALTER TABLE child_tbl SET (autovacuum_enabled = 'false');
+CREATE TRIGGER row_before_insupd_trigger BEFORE INSERT OR UPDATE ON child_tbl FOR EACH ROW EXECUTE PROCEDURE row_before_insupd_trigfunc();
+CREATE FOREIGN TABLE foreign_tbl (a int, b int)
+ SERVER loopback OPTIONS (table_name 'child_tbl');
+
+CREATE TABLE parent_tbl (a int, b int) PARTITION BY RANGE(a);
+ALTER TABLE parent_tbl ATTACH PARTITION foreign_tbl FOR VALUES FROM (0) TO (100);
+-- Detach and re-attach once, to stress the concurrent detach case.
+ALTER TABLE parent_tbl DETACH PARTITION foreign_tbl CONCURRENTLY;
+ALTER TABLE parent_tbl ATTACH PARTITION foreign_tbl FOR VALUES FROM (0) TO (100);
+
+CREATE VIEW rw_view AS SELECT * FROM parent_tbl
+ WHERE a < b WITH CHECK OPTION;
+\d+ rw_view
+
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 5);
+INSERT INTO rw_view VALUES (0, 5); -- should fail
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 15);
+INSERT INTO rw_view VALUES (0, 15); -- ok
+SELECT * FROM foreign_tbl;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 5;
+UPDATE rw_view SET b = b + 5; -- should fail
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE rw_view SET b = b + 15;
+UPDATE rw_view SET b = b + 15; -- ok
+SELECT * FROM foreign_tbl;
+
+-- We don't allow batch insert when there are any WCO constraints
+ALTER SERVER loopback OPTIONS (ADD batch_size '10');
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO rw_view VALUES (0, 15), (0, 5);
+INSERT INTO rw_view VALUES (0, 15), (0, 5); -- should fail
+SELECT * FROM foreign_tbl;
+ALTER SERVER loopback OPTIONS (DROP batch_size);
+
+DROP FOREIGN TABLE foreign_tbl CASCADE;
+DROP TRIGGER row_before_insupd_trigger ON child_tbl;
+DROP TABLE parent_tbl CASCADE;
+
+DROP FUNCTION row_before_insupd_trigfunc;
+
+-- ===================================================================
+-- test serial columns (ie, sequence-based defaults)
+-- ===================================================================
+create table loc1 (f1 serial, f2 text);
+alter table loc1 set (autovacuum_enabled = 'false');
+create foreign table rem1 (f1 serial, f2 text)
+ server loopback options(table_name 'loc1');
+select pg_catalog.setval('rem1_f1_seq', 10, false);
+insert into loc1(f2) values('hi');
+insert into rem1(f2) values('hi remote');
+insert into loc1(f2) values('bye');
+insert into rem1(f2) values('bye remote');
+select * from loc1;
+select * from rem1;
+
+-- ===================================================================
+-- test generated columns
+-- ===================================================================
+create table gloc1 (
+ a int,
+ b int generated always as (a * 2) stored);
+alter table gloc1 set (autovacuum_enabled = 'false');
+create foreign table grem1 (
+ a int,
+ b int generated always as (a * 2) stored)
+ server loopback options(table_name 'gloc1');
+explain (verbose, costs off)
+insert into grem1 (a) values (1), (2);
+insert into grem1 (a) values (1), (2);
+explain (verbose, costs off)
+update grem1 set a = 22 where a = 2;
+update grem1 set a = 22 where a = 2;
+select * from gloc1;
+select * from grem1;
+delete from grem1;
+
+-- test copy from
+copy grem1 from stdin;
+1
+2
+\.
+select * from gloc1;
+select * from grem1;
+delete from grem1;
+
+-- test batch insert
+alter server loopback options (add batch_size '10');
+explain (verbose, costs off)
+insert into grem1 (a) values (1), (2);
+insert into grem1 (a) values (1), (2);
+select * from gloc1;
+select * from grem1;
+delete from grem1;
+alter server loopback options (drop batch_size);
+
+-- ===================================================================
+-- test local triggers
+-- ===================================================================
+
+-- Trigger functions "borrowed" from triggers regress test.
+CREATE FUNCTION trigger_func() RETURNS trigger LANGUAGE plpgsql AS $$
+BEGIN
+ RAISE NOTICE 'trigger_func(%) called: action = %, when = %, level = %',
+ TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL;
+ RETURN NULL;
+END;$$;
+
+CREATE TRIGGER trig_stmt_before BEFORE DELETE OR INSERT OR UPDATE ON rem1
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+CREATE TRIGGER trig_stmt_after AFTER DELETE OR INSERT OR UPDATE ON rem1
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+
+CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger
+LANGUAGE plpgsql AS $$
+
+declare
+ oldnew text[];
+ relid text;
+ argstr text;
+begin
+
+ relid := TG_relid::regclass;
+ argstr := '';
+ for i in 0 .. TG_nargs - 1 loop
+ if i > 0 then
+ argstr := argstr || ', ';
+ end if;
+ argstr := argstr || TG_argv[i];
+ end loop;
+
+ RAISE NOTICE '%(%) % % % ON %',
+ tg_name, argstr, TG_when, TG_level, TG_OP, relid;
+ oldnew := '{}'::text[];
+ if TG_OP != 'INSERT' then
+ oldnew := array_append(oldnew, format('OLD: %s', OLD));
+ end if;
+
+ if TG_OP != 'DELETE' then
+ oldnew := array_append(oldnew, format('NEW: %s', NEW));
+ end if;
+
+ RAISE NOTICE '%', array_to_string(oldnew, ',');
+
+ if TG_OP = 'DELETE' then
+ return OLD;
+ else
+ return NEW;
+ end if;
+end;
+$$;
+
+-- Test basic functionality
+CREATE TRIGGER trig_row_before
+BEFORE INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+CREATE TRIGGER trig_row_after
+AFTER INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+delete from rem1;
+insert into rem1 values(1,'insert');
+update rem1 set f2 = 'update' where f1 = 1;
+update rem1 set f2 = f2 || f2;
+
+
+-- cleanup
+DROP TRIGGER trig_row_before ON rem1;
+DROP TRIGGER trig_row_after ON rem1;
+DROP TRIGGER trig_stmt_before ON rem1;
+DROP TRIGGER trig_stmt_after ON rem1;
+
+DELETE from rem1;
+
+-- Test multiple AFTER ROW triggers on a foreign table
+CREATE TRIGGER trig_row_after1
+AFTER INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+CREATE TRIGGER trig_row_after2
+AFTER INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+insert into rem1 values(1,'insert');
+update rem1 set f2 = 'update' where f1 = 1;
+update rem1 set f2 = f2 || f2;
+delete from rem1;
+
+-- cleanup
+DROP TRIGGER trig_row_after1 ON rem1;
+DROP TRIGGER trig_row_after2 ON rem1;
+
+-- Test WHEN conditions
+
+CREATE TRIGGER trig_row_before_insupd
+BEFORE INSERT OR UPDATE ON rem1
+FOR EACH ROW
+WHEN (NEW.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+CREATE TRIGGER trig_row_after_insupd
+AFTER INSERT OR UPDATE ON rem1
+FOR EACH ROW
+WHEN (NEW.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+-- Insert or update not matching: nothing happens
+INSERT INTO rem1 values(1, 'insert');
+UPDATE rem1 set f2 = 'test';
+
+-- Insert or update matching: triggers are fired
+INSERT INTO rem1 values(2, 'update');
+UPDATE rem1 set f2 = 'update update' where f1 = '2';
+
+CREATE TRIGGER trig_row_before_delete
+BEFORE DELETE ON rem1
+FOR EACH ROW
+WHEN (OLD.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+CREATE TRIGGER trig_row_after_delete
+AFTER DELETE ON rem1
+FOR EACH ROW
+WHEN (OLD.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+-- Trigger is fired for f1=2, not for f1=1
+DELETE FROM rem1;
+
+-- cleanup
+DROP TRIGGER trig_row_before_insupd ON rem1;
+DROP TRIGGER trig_row_after_insupd ON rem1;
+DROP TRIGGER trig_row_before_delete ON rem1;
+DROP TRIGGER trig_row_after_delete ON rem1;
+
+
+-- Test various RETURN statements in BEFORE triggers.
+
+CREATE FUNCTION trig_row_before_insupdate() RETURNS TRIGGER AS $$
+ BEGIN
+ NEW.f2 := NEW.f2 || ' triggered !';
+ RETURN NEW;
+ END
+$$ language plpgsql;
+
+CREATE TRIGGER trig_row_before_insupd
+BEFORE INSERT OR UPDATE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
+
+-- The new values should have 'triggered' appended
+INSERT INTO rem1 values(1, 'insert');
+SELECT * from loc1;
+INSERT INTO rem1 values(2, 'insert') RETURNING f2;
+SELECT * from loc1;
+UPDATE rem1 set f2 = '';
+SELECT * from loc1;
+UPDATE rem1 set f2 = 'skidoo' RETURNING f2;
+SELECT * from loc1;
+
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f1 = 10; -- all columns should be transmitted
+UPDATE rem1 set f1 = 10;
+SELECT * from loc1;
+
+DELETE FROM rem1;
+
+-- Add a second trigger, to check that the changes are propagated correctly
+-- from trigger to trigger
+CREATE TRIGGER trig_row_before_insupd2
+BEFORE INSERT OR UPDATE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
+
+INSERT INTO rem1 values(1, 'insert');
+SELECT * from loc1;
+INSERT INTO rem1 values(2, 'insert') RETURNING f2;
+SELECT * from loc1;
+UPDATE rem1 set f2 = '';
+SELECT * from loc1;
+UPDATE rem1 set f2 = 'skidoo' RETURNING f2;
+SELECT * from loc1;
+
+DROP TRIGGER trig_row_before_insupd ON rem1;
+DROP TRIGGER trig_row_before_insupd2 ON rem1;
+
+DELETE from rem1;
+
+INSERT INTO rem1 VALUES (1, 'test');
+
+-- Test with a trigger returning NULL
+CREATE FUNCTION trig_null() RETURNS TRIGGER AS $$
+ BEGIN
+ RETURN NULL;
+ END
+$$ language plpgsql;
+
+CREATE TRIGGER trig_null
+BEFORE INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trig_null();
+
+-- Nothing should have changed.
+INSERT INTO rem1 VALUES (2, 'test2');
+
+SELECT * from loc1;
+
+UPDATE rem1 SET f2 = 'test2';
+
+SELECT * from loc1;
+
+DELETE from rem1;
+
+SELECT * from loc1;
+
+DROP TRIGGER trig_null ON rem1;
+DELETE from rem1;
+
+-- Test a combination of local and remote triggers
+CREATE TRIGGER trig_row_before
+BEFORE INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+CREATE TRIGGER trig_row_after
+AFTER INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+CREATE TRIGGER trig_local_before BEFORE INSERT OR UPDATE ON loc1
+FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
+
+INSERT INTO rem1(f2) VALUES ('test');
+UPDATE rem1 SET f2 = 'testo';
+
+-- Test returning a system attribute
+INSERT INTO rem1(f2) VALUES ('test') RETURNING ctid;
+
+-- cleanup
+DROP TRIGGER trig_row_before ON rem1;
+DROP TRIGGER trig_row_after ON rem1;
+DROP TRIGGER trig_local_before ON loc1;
+
+
+-- Test direct foreign table modification functionality
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can be pushed down
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1 WHERE false; -- currently can't be pushed down
+
+-- Test with statement-level triggers
+CREATE TRIGGER trig_stmt_before
+ BEFORE DELETE OR INSERT OR UPDATE ON rem1
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can be pushed down
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can be pushed down
+DROP TRIGGER trig_stmt_before ON rem1;
+
+CREATE TRIGGER trig_stmt_after
+ AFTER DELETE OR INSERT OR UPDATE ON rem1
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can be pushed down
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can be pushed down
+DROP TRIGGER trig_stmt_after ON rem1;
+
+-- Test with row-level ON INSERT triggers
+CREATE TRIGGER trig_row_before_insert
+BEFORE INSERT ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can be pushed down
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can be pushed down
+DROP TRIGGER trig_row_before_insert ON rem1;
+
+CREATE TRIGGER trig_row_after_insert
+AFTER INSERT ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can be pushed down
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can be pushed down
+DROP TRIGGER trig_row_after_insert ON rem1;
+
+-- Test with row-level ON UPDATE triggers
+CREATE TRIGGER trig_row_before_update
+BEFORE UPDATE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can't be pushed down
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can be pushed down
+DROP TRIGGER trig_row_before_update ON rem1;
+
+CREATE TRIGGER trig_row_after_update
+AFTER UPDATE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can't be pushed down
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can be pushed down
+DROP TRIGGER trig_row_after_update ON rem1;
+
+-- Test with row-level ON DELETE triggers
+CREATE TRIGGER trig_row_before_delete
+BEFORE DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can be pushed down
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can't be pushed down
+DROP TRIGGER trig_row_before_delete ON rem1;
+
+CREATE TRIGGER trig_row_after_delete
+AFTER DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+EXPLAIN (verbose, costs off)
+UPDATE rem1 set f2 = ''; -- can be pushed down
+EXPLAIN (verbose, costs off)
+DELETE FROM rem1; -- can't be pushed down
+DROP TRIGGER trig_row_after_delete ON rem1;
+
+-- ===================================================================
+-- test inheritance features
+-- ===================================================================
+
+CREATE TABLE a (aa TEXT);
+CREATE TABLE loct (aa TEXT, bb TEXT);
+ALTER TABLE a SET (autovacuum_enabled = 'false');
+ALTER TABLE loct SET (autovacuum_enabled = 'false');
+CREATE FOREIGN TABLE b (bb TEXT) INHERITS (a)
+ SERVER loopback OPTIONS (table_name 'loct');
+
+INSERT INTO a(aa) VALUES('aaa');
+INSERT INTO a(aa) VALUES('aaaa');
+INSERT INTO a(aa) VALUES('aaaaa');
+
+INSERT INTO b(aa) VALUES('bbb');
+INSERT INTO b(aa) VALUES('bbbb');
+INSERT INTO b(aa) VALUES('bbbbb');
+
+SELECT tableoid::regclass, * FROM a;
+SELECT tableoid::regclass, * FROM b;
+SELECT tableoid::regclass, * FROM ONLY a;
+
+UPDATE a SET aa = 'zzzzzz' WHERE aa LIKE 'aaaa%';
+
+SELECT tableoid::regclass, * FROM a;
+SELECT tableoid::regclass, * FROM b;
+SELECT tableoid::regclass, * FROM ONLY a;
+
+UPDATE b SET aa = 'new';
+
+SELECT tableoid::regclass, * FROM a;
+SELECT tableoid::regclass, * FROM b;
+SELECT tableoid::regclass, * FROM ONLY a;
+
+UPDATE a SET aa = 'newtoo';
+
+SELECT tableoid::regclass, * FROM a;
+SELECT tableoid::regclass, * FROM b;
+SELECT tableoid::regclass, * FROM ONLY a;
+
+DELETE FROM a;
+
+SELECT tableoid::regclass, * FROM a;
+SELECT tableoid::regclass, * FROM b;
+SELECT tableoid::regclass, * FROM ONLY a;
+
+DROP TABLE a CASCADE;
+DROP TABLE loct;
+
+-- Check SELECT FOR UPDATE/SHARE with an inherited source table
+create table loct1 (f1 int, f2 int, f3 int);
+create table loct2 (f1 int, f2 int, f3 int);
+
+alter table loct1 set (autovacuum_enabled = 'false');
+alter table loct2 set (autovacuum_enabled = 'false');
+
+create table foo (f1 int, f2 int);
+create foreign table foo2 (f3 int) inherits (foo)
+ server loopback options (table_name 'loct1');
+create table bar (f1 int, f2 int);
+create foreign table bar2 (f3 int) inherits (bar)
+ server loopback options (table_name 'loct2');
+
+alter table foo set (autovacuum_enabled = 'false');
+alter table bar set (autovacuum_enabled = 'false');
+
+insert into foo values(1,1);
+insert into foo values(3,3);
+insert into foo2 values(2,2,2);
+insert into foo2 values(4,4,4);
+insert into bar values(1,11);
+insert into bar values(2,22);
+insert into bar values(6,66);
+insert into bar2 values(3,33,33);
+insert into bar2 values(4,44,44);
+insert into bar2 values(7,77,77);
+
+explain (verbose, costs off)
+select * from bar where f1 in (select f1 from foo) for update;
+select * from bar where f1 in (select f1 from foo) for update;
+
+explain (verbose, costs off)
+select * from bar where f1 in (select f1 from foo) for share;
+select * from bar where f1 in (select f1 from foo) for share;
+
+-- Now check SELECT FOR UPDATE/SHARE with an inherited source table,
+-- where the parent is itself a foreign table
+create table loct4 (f1 int, f2 int, f3 int);
+create foreign table foo2child (f3 int) inherits (foo2)
+ server loopback options (table_name 'loct4');
+
+explain (verbose, costs off)
+select * from bar where f1 in (select f1 from foo2) for share;
+select * from bar where f1 in (select f1 from foo2) for share;
+
+drop foreign table foo2child;
+
+-- And with a local child relation of the foreign table parent
+create table foo2child (f3 int) inherits (foo2);
+
+explain (verbose, costs off)
+select * from bar where f1 in (select f1 from foo2) for share;
+select * from bar where f1 in (select f1 from foo2) for share;
+
+drop table foo2child;
+
+-- Check UPDATE with inherited target and an inherited source table
+explain (verbose, costs off)
+update bar set f2 = f2 + 100 where f1 in (select f1 from foo);
+update bar set f2 = f2 + 100 where f1 in (select f1 from foo);
+
+select tableoid::regclass, * from bar order by 1,2;
+
+-- Check UPDATE with inherited target and an appendrel subquery
+explain (verbose, costs off)
+update bar set f2 = f2 + 100
+from
+ ( select f1 from foo union all select f1+3 from foo ) ss
+where bar.f1 = ss.f1;
+update bar set f2 = f2 + 100
+from
+ ( select f1 from foo union all select f1+3 from foo ) ss
+where bar.f1 = ss.f1;
+
+select tableoid::regclass, * from bar order by 1,2;
+
+-- Test forcing the remote server to produce sorted data for a merge join,
+-- but the foreign table is an inheritance child.
+truncate table loct1;
+truncate table only foo;
+\set num_rows_foo 2000
+insert into loct1 select generate_series(0, :num_rows_foo, 2), generate_series(0, :num_rows_foo, 2), generate_series(0, :num_rows_foo, 2);
+insert into foo select generate_series(1, :num_rows_foo, 2), generate_series(1, :num_rows_foo, 2);
+SET enable_hashjoin to false;
+SET enable_nestloop to false;
+alter foreign table foo2 options (use_remote_estimate 'true');
+create index i_loct1_f1 on loct1(f1);
+create index i_foo_f1 on foo(f1);
+analyze foo;
+analyze loct1;
+-- inner join; expressions in the clauses appear in the equivalence class list
+explain (verbose, costs off)
+ select foo.f1, loct1.f1 from foo join loct1 on (foo.f1 = loct1.f1) order by foo.f2 offset 10 limit 10;
+select foo.f1, loct1.f1 from foo join loct1 on (foo.f1 = loct1.f1) order by foo.f2 offset 10 limit 10;
+-- outer join; expressions in the clauses do not appear in equivalence class
+-- list but no output change as compared to the previous query
+explain (verbose, costs off)
+ select foo.f1, loct1.f1 from foo left join loct1 on (foo.f1 = loct1.f1) order by foo.f2 offset 10 limit 10;
+select foo.f1, loct1.f1 from foo left join loct1 on (foo.f1 = loct1.f1) order by foo.f2 offset 10 limit 10;
+RESET enable_hashjoin;
+RESET enable_nestloop;
+
+-- Test that WHERE CURRENT OF is not supported
+begin;
+declare c cursor for select * from bar where f1 = 7;
+fetch from c;
+update bar set f2 = null where current of c;
+rollback;
+
+explain (verbose, costs off)
+delete from foo where f1 < 5 returning *;
+delete from foo where f1 < 5 returning *;
+explain (verbose, costs off)
+update bar set f2 = f2 + 100 returning *;
+update bar set f2 = f2 + 100 returning *;
+
+-- Test that UPDATE/DELETE with inherited target works with row-level triggers
+CREATE TRIGGER trig_row_before
+BEFORE UPDATE OR DELETE ON bar2
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+CREATE TRIGGER trig_row_after
+AFTER UPDATE OR DELETE ON bar2
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+explain (verbose, costs off)
+update bar set f2 = f2 + 100;
+update bar set f2 = f2 + 100;
+
+explain (verbose, costs off)
+delete from bar where f2 < 400;
+delete from bar where f2 < 400;
+
+-- cleanup
+drop table foo cascade;
+drop table bar cascade;
+drop table loct1;
+drop table loct2;
+
+-- Test pushing down UPDATE/DELETE joins to the remote server
+create table parent (a int, b text);
+create table loct1 (a int, b text);
+create table loct2 (a int, b text);
+create foreign table remt1 (a int, b text)
+ server loopback options (table_name 'loct1');
+create foreign table remt2 (a int, b text)
+ server loopback options (table_name 'loct2');
+alter foreign table remt1 inherit parent;
+
+insert into remt1 values (1, 'foo');
+insert into remt1 values (2, 'bar');
+insert into remt2 values (1, 'foo');
+insert into remt2 values (2, 'bar');
+
+analyze remt1;
+analyze remt2;
+
+explain (verbose, costs off)
+update parent set b = parent.b || remt2.b from remt2 where parent.a = remt2.a returning *;
+update parent set b = parent.b || remt2.b from remt2 where parent.a = remt2.a returning *;
+explain (verbose, costs off)
+delete from parent using remt2 where parent.a = remt2.a returning parent;
+delete from parent using remt2 where parent.a = remt2.a returning parent;
+
+-- cleanup
+drop foreign table remt1;
+drop foreign table remt2;
+drop table loct1;
+drop table loct2;
+drop table parent;
+
+-- ===================================================================
+-- test tuple routing for foreign-table partitions
+-- ===================================================================
+
+-- Test insert tuple routing
+create table itrtest (a int, b text) partition by list (a);
+create table loct1 (a int check (a in (1)), b text);
+create foreign table remp1 (a int check (a in (1)), b text) server loopback options (table_name 'loct1');
+create table loct2 (a int check (a in (2)), b text);
+create foreign table remp2 (b text, a int check (a in (2))) server loopback options (table_name 'loct2');
+alter table itrtest attach partition remp1 for values in (1);
+alter table itrtest attach partition remp2 for values in (2);
+
+insert into itrtest values (1, 'foo');
+insert into itrtest values (1, 'bar') returning *;
+insert into itrtest values (2, 'baz');
+insert into itrtest values (2, 'qux') returning *;
+insert into itrtest values (1, 'test1'), (2, 'test2') returning *;
+
+select tableoid::regclass, * FROM itrtest;
+select tableoid::regclass, * FROM remp1;
+select tableoid::regclass, * FROM remp2;
+
+delete from itrtest;
+
+create unique index loct1_idx on loct1 (a);
+
+-- DO NOTHING without an inference specification is supported
+insert into itrtest values (1, 'foo') on conflict do nothing returning *;
+insert into itrtest values (1, 'foo') on conflict do nothing returning *;
+
+-- But other cases are not supported
+insert into itrtest values (1, 'bar') on conflict (a) do nothing;
+insert into itrtest values (1, 'bar') on conflict (a) do update set b = excluded.b;
+
+select tableoid::regclass, * FROM itrtest;
+
+delete from itrtest;
+
+drop index loct1_idx;
+
+-- Test that remote triggers work with insert tuple routing
+create function br_insert_trigfunc() returns trigger as $$
+begin
+ new.b := new.b || ' triggered !';
+ return new;
+end
+$$ language plpgsql;
+create trigger loct1_br_insert_trigger before insert on loct1
+ for each row execute procedure br_insert_trigfunc();
+create trigger loct2_br_insert_trigger before insert on loct2
+ for each row execute procedure br_insert_trigfunc();
+
+-- The new values are concatenated with ' triggered !'
+insert into itrtest values (1, 'foo') returning *;
+insert into itrtest values (2, 'qux') returning *;
+insert into itrtest values (1, 'test1'), (2, 'test2') returning *;
+with result as (insert into itrtest values (1, 'test1'), (2, 'test2') returning *) select * from result;
+
+drop trigger loct1_br_insert_trigger on loct1;
+drop trigger loct2_br_insert_trigger on loct2;
+
+drop table itrtest;
+drop table loct1;
+drop table loct2;
+
+-- Test update tuple routing
+create table utrtest (a int, b text) partition by list (a);
+create table loct (a int check (a in (1)), b text);
+create foreign table remp (a int check (a in (1)), b text) server loopback options (table_name 'loct');
+create table locp (a int check (a in (2)), b text);
+alter table utrtest attach partition remp for values in (1);
+alter table utrtest attach partition locp for values in (2);
+
+insert into utrtest values (1, 'foo');
+insert into utrtest values (2, 'qux');
+
+select tableoid::regclass, * FROM utrtest;
+select tableoid::regclass, * FROM remp;
+select tableoid::regclass, * FROM locp;
+
+-- It's not allowed to move a row from a partition that is foreign to another
+update utrtest set a = 2 where b = 'foo' returning *;
+
+-- But the reverse is allowed
+update utrtest set a = 1 where b = 'qux' returning *;
+
+select tableoid::regclass, * FROM utrtest;
+select tableoid::regclass, * FROM remp;
+select tableoid::regclass, * FROM locp;
+
+-- The executor should not let unexercised FDWs shut down
+update utrtest set a = 1 where b = 'foo';
+
+-- Test that remote triggers work with update tuple routing
+create trigger loct_br_insert_trigger before insert on loct
+ for each row execute procedure br_insert_trigfunc();
+
+delete from utrtest;
+insert into utrtest values (2, 'qux');
+
+-- Check case where the foreign partition is a subplan target rel
+explain (verbose, costs off)
+update utrtest set a = 1 where a = 1 or a = 2 returning *;
+-- The new values are concatenated with ' triggered !'
+update utrtest set a = 1 where a = 1 or a = 2 returning *;
+
+delete from utrtest;
+insert into utrtest values (2, 'qux');
+
+-- Check case where the foreign partition isn't a subplan target rel
+explain (verbose, costs off)
+update utrtest set a = 1 where a = 2 returning *;
+-- The new values are concatenated with ' triggered !'
+update utrtest set a = 1 where a = 2 returning *;
+
+drop trigger loct_br_insert_trigger on loct;
+
+-- We can move rows to a foreign partition that has been updated already,
+-- but can't move rows to a foreign partition that hasn't been updated yet
+
+delete from utrtest;
+insert into utrtest values (1, 'foo');
+insert into utrtest values (2, 'qux');
+
+-- Test the former case:
+-- with a direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 1 returning *;
+update utrtest set a = 1 returning *;
+
+delete from utrtest;
+insert into utrtest values (1, 'foo');
+insert into utrtest values (2, 'qux');
+
+-- with a non-direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 1 from (values (1), (2)) s(x) where a = s.x returning *;
+update utrtest set a = 1 from (values (1), (2)) s(x) where a = s.x returning *;
+
+-- Change the definition of utrtest so that the foreign partition get updated
+-- after the local partition
+delete from utrtest;
+alter table utrtest detach partition remp;
+drop foreign table remp;
+alter table loct drop constraint loct_a_check;
+alter table loct add check (a in (3));
+create foreign table remp (a int check (a in (3)), b text) server loopback options (table_name 'loct');
+alter table utrtest attach partition remp for values in (3);
+insert into utrtest values (2, 'qux');
+insert into utrtest values (3, 'xyzzy');
+
+-- Test the latter case:
+-- with a direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 3 returning *;
+update utrtest set a = 3 returning *; -- ERROR
+
+-- with a non-direct modification plan
+explain (verbose, costs off)
+update utrtest set a = 3 from (values (2), (3)) s(x) where a = s.x returning *;
+update utrtest set a = 3 from (values (2), (3)) s(x) where a = s.x returning *; -- ERROR
+
+drop table utrtest;
+drop table loct;
+
+-- Test copy tuple routing
+create table ctrtest (a int, b text) partition by list (a);
+create table loct1 (a int check (a in (1)), b text);
+create foreign table remp1 (a int check (a in (1)), b text) server loopback options (table_name 'loct1');
+create table loct2 (a int check (a in (2)), b text);
+create foreign table remp2 (b text, a int check (a in (2))) server loopback options (table_name 'loct2');
+alter table ctrtest attach partition remp1 for values in (1);
+alter table ctrtest attach partition remp2 for values in (2);
+
+copy ctrtest from stdin;
+1 foo
+2 qux
+\.
+
+select tableoid::regclass, * FROM ctrtest;
+select tableoid::regclass, * FROM remp1;
+select tableoid::regclass, * FROM remp2;
+
+-- Copying into foreign partitions directly should work as well
+copy remp1 from stdin;
+1 bar
+\.
+
+select tableoid::regclass, * FROM remp1;
+
+drop table ctrtest;
+drop table loct1;
+drop table loct2;
+
+-- ===================================================================
+-- test COPY FROM
+-- ===================================================================
+
+create table loc2 (f1 int, f2 text);
+alter table loc2 set (autovacuum_enabled = 'false');
+create foreign table rem2 (f1 int, f2 text) server loopback options(table_name 'loc2');
+
+-- Test basic functionality
+copy rem2 from stdin;
+1 foo
+2 bar
+\.
+select * from rem2;
+
+delete from rem2;
+
+-- Test check constraints
+alter table loc2 add constraint loc2_f1positive check (f1 >= 0);
+alter foreign table rem2 add constraint rem2_f1positive check (f1 >= 0);
+
+-- check constraint is enforced on the remote side, not locally
+copy rem2 from stdin;
+1 foo
+2 bar
+\.
+copy rem2 from stdin; -- ERROR
+-1 xyzzy
+\.
+select * from rem2;
+
+alter foreign table rem2 drop constraint rem2_f1positive;
+alter table loc2 drop constraint loc2_f1positive;
+
+delete from rem2;
+
+-- Test local triggers
+create trigger trig_stmt_before before insert on rem2
+ for each statement execute procedure trigger_func();
+create trigger trig_stmt_after after insert on rem2
+ for each statement execute procedure trigger_func();
+create trigger trig_row_before before insert on rem2
+ for each row execute procedure trigger_data(23,'skidoo');
+create trigger trig_row_after after insert on rem2
+ for each row execute procedure trigger_data(23,'skidoo');
+
+copy rem2 from stdin;
+1 foo
+2 bar
+\.
+select * from rem2;
+
+drop trigger trig_row_before on rem2;
+drop trigger trig_row_after on rem2;
+drop trigger trig_stmt_before on rem2;
+drop trigger trig_stmt_after on rem2;
+
+delete from rem2;
+
+create trigger trig_row_before_insert before insert on rem2
+ for each row execute procedure trig_row_before_insupdate();
+
+-- The new values are concatenated with ' triggered !'
+copy rem2 from stdin;
+1 foo
+2 bar
+\.
+select * from rem2;
+
+drop trigger trig_row_before_insert on rem2;
+
+delete from rem2;
+
+create trigger trig_null before insert on rem2
+ for each row execute procedure trig_null();
+
+-- Nothing happens
+copy rem2 from stdin;
+1 foo
+2 bar
+\.
+select * from rem2;
+
+drop trigger trig_null on rem2;
+
+delete from rem2;
+
+-- Test remote triggers
+create trigger trig_row_before_insert before insert on loc2
+ for each row execute procedure trig_row_before_insupdate();
+
+-- The new values are concatenated with ' triggered !'
+copy rem2 from stdin;
+1 foo
+2 bar
+\.
+select * from rem2;
+
+drop trigger trig_row_before_insert on loc2;
+
+delete from rem2;
+
+create trigger trig_null before insert on loc2
+ for each row execute procedure trig_null();
+
+-- Nothing happens
+copy rem2 from stdin;
+1 foo
+2 bar
+\.
+select * from rem2;
+
+drop trigger trig_null on loc2;
+
+delete from rem2;
+
+-- Test a combination of local and remote triggers
+create trigger rem2_trig_row_before before insert on rem2
+ for each row execute procedure trigger_data(23,'skidoo');
+create trigger rem2_trig_row_after after insert on rem2
+ for each row execute procedure trigger_data(23,'skidoo');
+create trigger loc2_trig_row_before_insert before insert on loc2
+ for each row execute procedure trig_row_before_insupdate();
+
+copy rem2 from stdin;
+1 foo
+2 bar
+\.
+select * from rem2;
+
+drop trigger rem2_trig_row_before on rem2;
+drop trigger rem2_trig_row_after on rem2;
+drop trigger loc2_trig_row_before_insert on loc2;
+
+delete from rem2;
+
+-- test COPY FROM with foreign table created in the same transaction
+create table loc3 (f1 int, f2 text);
+begin;
+create foreign table rem3 (f1 int, f2 text)
+ server loopback options(table_name 'loc3');
+copy rem3 from stdin;
+1 foo
+2 bar
+\.
+commit;
+select * from rem3;
+drop foreign table rem3;
+drop table loc3;
+
+-- ===================================================================
+-- test for TRUNCATE
+-- ===================================================================
+CREATE TABLE tru_rtable0 (id int primary key);
+CREATE FOREIGN TABLE tru_ftable (id int)
+ SERVER loopback OPTIONS (table_name 'tru_rtable0');
+INSERT INTO tru_rtable0 (SELECT x FROM generate_series(1,10) x);
+
+CREATE TABLE tru_ptable (id int) PARTITION BY HASH(id);
+CREATE TABLE tru_ptable__p0 PARTITION OF tru_ptable
+ FOR VALUES WITH (MODULUS 2, REMAINDER 0);
+CREATE TABLE tru_rtable1 (id int primary key);
+CREATE FOREIGN TABLE tru_ftable__p1 PARTITION OF tru_ptable
+ FOR VALUES WITH (MODULUS 2, REMAINDER 1)
+ SERVER loopback OPTIONS (table_name 'tru_rtable1');
+INSERT INTO tru_ptable (SELECT x FROM generate_series(11,20) x);
+
+CREATE TABLE tru_pk_table(id int primary key);
+CREATE TABLE tru_fk_table(fkey int references tru_pk_table(id));
+INSERT INTO tru_pk_table (SELECT x FROM generate_series(1,10) x);
+INSERT INTO tru_fk_table (SELECT x % 10 + 1 FROM generate_series(5,25) x);
+CREATE FOREIGN TABLE tru_pk_ftable (id int)
+ SERVER loopback OPTIONS (table_name 'tru_pk_table');
+
+CREATE TABLE tru_rtable_parent (id int);
+CREATE TABLE tru_rtable_child (id int);
+CREATE FOREIGN TABLE tru_ftable_parent (id int)
+ SERVER loopback OPTIONS (table_name 'tru_rtable_parent');
+CREATE FOREIGN TABLE tru_ftable_child () INHERITS (tru_ftable_parent)
+ SERVER loopback OPTIONS (table_name 'tru_rtable_child');
+INSERT INTO tru_rtable_parent (SELECT x FROM generate_series(1,8) x);
+INSERT INTO tru_rtable_child (SELECT x FROM generate_series(10, 18) x);
+
+-- normal truncate
+SELECT sum(id) FROM tru_ftable; -- 55
+TRUNCATE tru_ftable;
+SELECT count(*) FROM tru_rtable0; -- 0
+SELECT count(*) FROM tru_ftable; -- 0
+
+-- 'truncatable' option
+ALTER SERVER loopback OPTIONS (ADD truncatable 'false');
+TRUNCATE tru_ftable; -- error
+ALTER FOREIGN TABLE tru_ftable OPTIONS (ADD truncatable 'true');
+TRUNCATE tru_ftable; -- accepted
+ALTER FOREIGN TABLE tru_ftable OPTIONS (SET truncatable 'false');
+TRUNCATE tru_ftable; -- error
+ALTER SERVER loopback OPTIONS (DROP truncatable);
+ALTER FOREIGN TABLE tru_ftable OPTIONS (SET truncatable 'false');
+TRUNCATE tru_ftable; -- error
+ALTER FOREIGN TABLE tru_ftable OPTIONS (SET truncatable 'true');
+TRUNCATE tru_ftable; -- accepted
+
+-- partitioned table with both local and foreign tables as partitions
+SELECT sum(id) FROM tru_ptable; -- 155
+TRUNCATE tru_ptable;
+SELECT count(*) FROM tru_ptable; -- 0
+SELECT count(*) FROM tru_ptable__p0; -- 0
+SELECT count(*) FROM tru_ftable__p1; -- 0
+SELECT count(*) FROM tru_rtable1; -- 0
+
+-- 'CASCADE' option
+SELECT sum(id) FROM tru_pk_ftable; -- 55
+TRUNCATE tru_pk_ftable; -- failed by FK reference
+TRUNCATE tru_pk_ftable CASCADE;
+SELECT count(*) FROM tru_pk_ftable; -- 0
+SELECT count(*) FROM tru_fk_table; -- also truncated,0
+
+-- truncate two tables at a command
+INSERT INTO tru_ftable (SELECT x FROM generate_series(1,8) x);
+INSERT INTO tru_pk_ftable (SELECT x FROM generate_series(3,10) x);
+SELECT count(*) from tru_ftable; -- 8
+SELECT count(*) from tru_pk_ftable; -- 8
+TRUNCATE tru_ftable, tru_pk_ftable CASCADE;
+SELECT count(*) from tru_ftable; -- 0
+SELECT count(*) from tru_pk_ftable; -- 0
+
+-- truncate with ONLY clause
+-- Since ONLY is specified, the table tru_ftable_child that inherits
+-- tru_ftable_parent locally is not truncated.
+TRUNCATE ONLY tru_ftable_parent;
+SELECT sum(id) FROM tru_ftable_parent; -- 126
+TRUNCATE tru_ftable_parent;
+SELECT count(*) FROM tru_ftable_parent; -- 0
+
+-- in case when remote table has inherited children
+CREATE TABLE tru_rtable0_child () INHERITS (tru_rtable0);
+INSERT INTO tru_rtable0 (SELECT x FROM generate_series(5,9) x);
+INSERT INTO tru_rtable0_child (SELECT x FROM generate_series(10,14) x);
+SELECT sum(id) FROM tru_ftable; -- 95
+
+-- Both parent and child tables in the foreign server are truncated
+-- even though ONLY is specified because ONLY has no effect
+-- when truncating a foreign table.
+TRUNCATE ONLY tru_ftable;
+SELECT count(*) FROM tru_ftable; -- 0
+
+INSERT INTO tru_rtable0 (SELECT x FROM generate_series(21,25) x);
+INSERT INTO tru_rtable0_child (SELECT x FROM generate_series(26,30) x);
+SELECT sum(id) FROM tru_ftable; -- 255
+TRUNCATE tru_ftable; -- truncate both of parent and child
+SELECT count(*) FROM tru_ftable; -- 0
+
+-- cleanup
+DROP FOREIGN TABLE tru_ftable_parent, tru_ftable_child, tru_pk_ftable,tru_ftable__p1,tru_ftable;
+DROP TABLE tru_rtable0, tru_rtable1, tru_ptable, tru_ptable__p0, tru_pk_table, tru_fk_table,
+tru_rtable_parent,tru_rtable_child, tru_rtable0_child;
+
+-- ===================================================================
+-- test IMPORT FOREIGN SCHEMA
+-- ===================================================================
+
+CREATE SCHEMA import_source;
+CREATE TABLE import_source.t1 (c1 int, c2 varchar NOT NULL);
+CREATE TABLE import_source.t2 (c1 int default 42, c2 varchar NULL, c3 text collate "POSIX");
+CREATE TYPE typ1 AS (m1 int, m2 varchar);
+CREATE TABLE import_source.t3 (c1 timestamptz default now(), c2 typ1);
+CREATE TABLE import_source."x 4" (c1 float8, "C 2" text, c3 varchar(42));
+CREATE TABLE import_source."x 5" (c1 float8);
+ALTER TABLE import_source."x 5" DROP COLUMN c1;
+CREATE TABLE import_source."x 6" (c1 int, c2 int generated always as (c1 * 2) stored);
+CREATE TABLE import_source.t4 (c1 int) PARTITION BY RANGE (c1);
+CREATE TABLE import_source.t4_part PARTITION OF import_source.t4
+ FOR VALUES FROM (1) TO (100);
+CREATE TABLE import_source.t4_part2 PARTITION OF import_source.t4
+ FOR VALUES FROM (100) TO (200);
+
+CREATE SCHEMA import_dest1;
+IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest1;
+\det+ import_dest1.*
+\d import_dest1.*
+
+-- Options
+CREATE SCHEMA import_dest2;
+IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest2
+ OPTIONS (import_default 'true');
+\det+ import_dest2.*
+\d import_dest2.*
+CREATE SCHEMA import_dest3;
+IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest3
+ OPTIONS (import_collate 'false', import_generated 'false', import_not_null 'false');
+\det+ import_dest3.*
+\d import_dest3.*
+
+-- Check LIMIT TO and EXCEPT
+CREATE SCHEMA import_dest4;
+IMPORT FOREIGN SCHEMA import_source LIMIT TO (t1, nonesuch, t4_part)
+ FROM SERVER loopback INTO import_dest4;
+\det+ import_dest4.*
+IMPORT FOREIGN SCHEMA import_source EXCEPT (t1, "x 4", nonesuch, t4_part)
+ FROM SERVER loopback INTO import_dest4;
+\det+ import_dest4.*
+
+-- Assorted error cases
+IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest4;
+IMPORT FOREIGN SCHEMA nonesuch FROM SERVER loopback INTO import_dest4;
+IMPORT FOREIGN SCHEMA nonesuch FROM SERVER loopback INTO notthere;
+IMPORT FOREIGN SCHEMA nonesuch FROM SERVER nowhere INTO notthere;
+
+-- Check case of a type present only on the remote server.
+-- We can fake this by dropping the type locally in our transaction.
+CREATE TYPE "Colors" AS ENUM ('red', 'green', 'blue');
+CREATE TABLE import_source.t5 (c1 int, c2 text collate "C", "Col" "Colors");
+
+CREATE SCHEMA import_dest5;
+BEGIN;
+DROP TYPE "Colors" CASCADE;
+IMPORT FOREIGN SCHEMA import_source LIMIT TO (t5)
+ FROM SERVER loopback INTO import_dest5; -- ERROR
+
+ROLLBACK;
+
+BEGIN;
+
+
+CREATE SERVER fetch101 FOREIGN DATA WRAPPER postgres_fdw OPTIONS( fetch_size '101' );
+
+SELECT count(*)
+FROM pg_foreign_server
+WHERE srvname = 'fetch101'
+AND srvoptions @> array['fetch_size=101'];
+
+ALTER SERVER fetch101 OPTIONS( SET fetch_size '202' );
+
+SELECT count(*)
+FROM pg_foreign_server
+WHERE srvname = 'fetch101'
+AND srvoptions @> array['fetch_size=101'];
+
+SELECT count(*)
+FROM pg_foreign_server
+WHERE srvname = 'fetch101'
+AND srvoptions @> array['fetch_size=202'];
+
+CREATE FOREIGN TABLE table30000 ( x int ) SERVER fetch101 OPTIONS ( fetch_size '30000' );
+
+SELECT COUNT(*)
+FROM pg_foreign_table
+WHERE ftrelid = 'table30000'::regclass
+AND ftoptions @> array['fetch_size=30000'];
+
+ALTER FOREIGN TABLE table30000 OPTIONS ( SET fetch_size '60000');
+
+SELECT COUNT(*)
+FROM pg_foreign_table
+WHERE ftrelid = 'table30000'::regclass
+AND ftoptions @> array['fetch_size=30000'];
+
+SELECT COUNT(*)
+FROM pg_foreign_table
+WHERE ftrelid = 'table30000'::regclass
+AND ftoptions @> array['fetch_size=60000'];
+
+ROLLBACK;
+
+-- ===================================================================
+-- test partitionwise joins
+-- ===================================================================
+SET enable_partitionwise_join=on;
+
+CREATE TABLE fprt1 (a int, b int, c varchar) PARTITION BY RANGE(a);
+CREATE TABLE fprt1_p1 (LIKE fprt1);
+CREATE TABLE fprt1_p2 (LIKE fprt1);
+ALTER TABLE fprt1_p1 SET (autovacuum_enabled = 'false');
+ALTER TABLE fprt1_p2 SET (autovacuum_enabled = 'false');
+INSERT INTO fprt1_p1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 249, 2) i;
+INSERT INTO fprt1_p2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(250, 499, 2) i;
+CREATE FOREIGN TABLE ftprt1_p1 PARTITION OF fprt1 FOR VALUES FROM (0) TO (250)
+ SERVER loopback OPTIONS (table_name 'fprt1_p1', use_remote_estimate 'true');
+CREATE FOREIGN TABLE ftprt1_p2 PARTITION OF fprt1 FOR VALUES FROM (250) TO (500)
+ SERVER loopback OPTIONS (TABLE_NAME 'fprt1_p2');
+ANALYZE fprt1;
+ANALYZE fprt1_p1;
+ANALYZE fprt1_p2;
+
+CREATE TABLE fprt2 (a int, b int, c varchar) PARTITION BY RANGE(b);
+CREATE TABLE fprt2_p1 (LIKE fprt2);
+CREATE TABLE fprt2_p2 (LIKE fprt2);
+ALTER TABLE fprt2_p1 SET (autovacuum_enabled = 'false');
+ALTER TABLE fprt2_p2 SET (autovacuum_enabled = 'false');
+INSERT INTO fprt2_p1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 249, 3) i;
+INSERT INTO fprt2_p2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(250, 499, 3) i;
+CREATE FOREIGN TABLE ftprt2_p1 (b int, c varchar, a int)
+ SERVER loopback OPTIONS (table_name 'fprt2_p1', use_remote_estimate 'true');
+ALTER TABLE fprt2 ATTACH PARTITION ftprt2_p1 FOR VALUES FROM (0) TO (250);
+CREATE FOREIGN TABLE ftprt2_p2 PARTITION OF fprt2 FOR VALUES FROM (250) TO (500)
+ SERVER loopback OPTIONS (table_name 'fprt2_p2', use_remote_estimate 'true');
+ANALYZE fprt2;
+ANALYZE fprt2_p1;
+ANALYZE fprt2_p2;
+
+-- inner join three tables
+EXPLAIN (COSTS OFF)
+SELECT t1.a,t2.b,t3.c FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) INNER JOIN fprt1 t3 ON (t2.b = t3.a) WHERE t1.a % 25 =0 ORDER BY 1,2,3;
+SELECT t1.a,t2.b,t3.c FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) INNER JOIN fprt1 t3 ON (t2.b = t3.a) WHERE t1.a % 25 =0 ORDER BY 1,2,3;
+
+-- left outer join + nullable clause
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT t1.a,t2.b,t2.c FROM fprt1 t1 LEFT JOIN (SELECT * FROM fprt2 WHERE a < 10) t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a < 10 ORDER BY 1,2,3;
+SELECT t1.a,t2.b,t2.c FROM fprt1 t1 LEFT JOIN (SELECT * FROM fprt2 WHERE a < 10) t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a < 10 ORDER BY 1,2,3;
+
+-- with whole-row reference; partitionwise join does not apply
+EXPLAIN (COSTS OFF)
+SELECT t1.wr, t2.wr FROM (SELECT t1 wr, a FROM fprt1 t1 WHERE t1.a % 25 = 0) t1 FULL JOIN (SELECT t2 wr, b FROM fprt2 t2 WHERE t2.b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY 1,2;
+SELECT t1.wr, t2.wr FROM (SELECT t1 wr, a FROM fprt1 t1 WHERE t1.a % 25 = 0) t1 FULL JOIN (SELECT t2 wr, b FROM fprt2 t2 WHERE t2.b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY 1,2;
+
+-- join with lateral reference
+EXPLAIN (COSTS OFF)
+SELECT t1.a,t1.b FROM fprt1 t1, LATERAL (SELECT t2.a, t2.b FROM fprt2 t2 WHERE t1.a = t2.b AND t1.b = t2.a) q WHERE t1.a%25 = 0 ORDER BY 1,2;
+SELECT t1.a,t1.b FROM fprt1 t1, LATERAL (SELECT t2.a, t2.b FROM fprt2 t2 WHERE t1.a = t2.b AND t1.b = t2.a) q WHERE t1.a%25 = 0 ORDER BY 1,2;
+
+-- with PHVs, partitionwise join selected but no join pushdown
+EXPLAIN (COSTS OFF)
+SELECT t1.a, t1.phv, t2.b, t2.phv FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE a % 25 = 0) t1 FULL JOIN (SELECT 't2_phv' phv, * FROM fprt2 WHERE b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY t1.a, t2.b;
+SELECT t1.a, t1.phv, t2.b, t2.phv FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE a % 25 = 0) t1 FULL JOIN (SELECT 't2_phv' phv, * FROM fprt2 WHERE b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY t1.a, t2.b;
+
+-- test FOR UPDATE; partitionwise join does not apply
+EXPLAIN (COSTS OFF)
+SELECT t1.a, t2.b FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) WHERE t1.a % 25 = 0 ORDER BY 1,2 FOR UPDATE OF t1;
+SELECT t1.a, t2.b FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) WHERE t1.a % 25 = 0 ORDER BY 1,2 FOR UPDATE OF t1;
+
+RESET enable_partitionwise_join;
+
+
+-- ===================================================================
+-- test partitionwise aggregates
+-- ===================================================================
+
+CREATE TABLE pagg_tab (a int, b int, c text) PARTITION BY RANGE(a);
+
+CREATE TABLE pagg_tab_p1 (LIKE pagg_tab);
+CREATE TABLE pagg_tab_p2 (LIKE pagg_tab);
+CREATE TABLE pagg_tab_p3 (LIKE pagg_tab);
+
+INSERT INTO pagg_tab_p1 SELECT i % 30, i % 50, to_char(i/30, 'FM0000') FROM generate_series(1, 3000) i WHERE (i % 30) < 10;
+INSERT INTO pagg_tab_p2 SELECT i % 30, i % 50, to_char(i/30, 'FM0000') FROM generate_series(1, 3000) i WHERE (i % 30) < 20 and (i % 30) >= 10;
+INSERT INTO pagg_tab_p3 SELECT i % 30, i % 50, to_char(i/30, 'FM0000') FROM generate_series(1, 3000) i WHERE (i % 30) < 30 and (i % 30) >= 20;
+
+-- Create foreign partitions
+CREATE FOREIGN TABLE fpagg_tab_p1 PARTITION OF pagg_tab FOR VALUES FROM (0) TO (10) SERVER loopback OPTIONS (table_name 'pagg_tab_p1');
+CREATE FOREIGN TABLE fpagg_tab_p2 PARTITION OF pagg_tab FOR VALUES FROM (10) TO (20) SERVER loopback OPTIONS (table_name 'pagg_tab_p2');
+CREATE FOREIGN TABLE fpagg_tab_p3 PARTITION OF pagg_tab FOR VALUES FROM (20) TO (30) SERVER loopback OPTIONS (table_name 'pagg_tab_p3');
+
+ANALYZE pagg_tab;
+ANALYZE fpagg_tab_p1;
+ANALYZE fpagg_tab_p2;
+ANALYZE fpagg_tab_p3;
+
+-- When GROUP BY clause matches with PARTITION KEY.
+-- Plan with partitionwise aggregates is disabled
+SET enable_partitionwise_aggregate TO false;
+EXPLAIN (COSTS OFF)
+SELECT a, sum(b), min(b), count(*) FROM pagg_tab GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
+
+-- Plan with partitionwise aggregates is enabled
+SET enable_partitionwise_aggregate TO true;
+EXPLAIN (COSTS OFF)
+SELECT a, sum(b), min(b), count(*) FROM pagg_tab GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
+SELECT a, sum(b), min(b), count(*) FROM pagg_tab GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
+
+-- Check with whole-row reference
+-- Should have all the columns in the target list for the given relation
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
+SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
+
+-- When GROUP BY clause does not match with PARTITION KEY.
+EXPLAIN (COSTS OFF)
+SELECT b, avg(a), max(a), count(*) FROM pagg_tab GROUP BY b HAVING sum(a) < 700 ORDER BY 1;
+
+-- ===================================================================
+-- access rights and superuser
+-- ===================================================================
+
+-- Non-superuser cannot create a FDW without a password in the connstr
+CREATE ROLE regress_nosuper NOSUPERUSER;
+
+GRANT USAGE ON FOREIGN DATA WRAPPER postgres_fdw TO regress_nosuper;
+
+SET ROLE regress_nosuper;
+
+SHOW is_superuser;
+
+-- This will be OK, we can create the FDW
+DO $d$
+ BEGIN
+ EXECUTE $$CREATE SERVER loopback_nopw FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS (dbname '$$||current_database()||$$',
+ port '$$||current_setting('port')||$$'
+ )$$;
+ END;
+$d$;
+
+-- But creation of user mappings for non-superusers should fail
+CREATE USER MAPPING FOR public SERVER loopback_nopw;
+CREATE USER MAPPING FOR CURRENT_USER SERVER loopback_nopw;
+
+CREATE FOREIGN TABLE ft1_nopw (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text,
+ c4 timestamptz,
+ c5 timestamp,
+ c6 varchar(10),
+ c7 char(10) default 'ft1',
+ c8 user_enum
+) SERVER loopback_nopw OPTIONS (schema_name 'public', table_name 'ft1');
+
+SELECT 1 FROM ft1_nopw LIMIT 1;
+
+-- If we add a password to the connstr it'll fail, because we don't allow passwords
+-- in connstrs only in user mappings.
+
+DO $d$
+ BEGIN
+ EXECUTE $$ALTER SERVER loopback_nopw OPTIONS (ADD password 'dummypw')$$;
+ END;
+$d$;
+
+-- If we add a password for our user mapping instead, we should get a different
+-- error because the password wasn't actually *used* when we run with trust auth.
+--
+-- This won't work with installcheck, but neither will most of the FDW checks.
+
+ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password 'dummypw');
+
+SELECT 1 FROM ft1_nopw LIMIT 1;
+
+-- Unpriv user cannot make the mapping passwordless
+ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD password_required 'false');
+
+
+SELECT 1 FROM ft1_nopw LIMIT 1;
+
+RESET ROLE;
+
+-- But the superuser can
+ALTER USER MAPPING FOR regress_nosuper SERVER loopback_nopw OPTIONS (ADD password_required 'false');
+
+SET ROLE regress_nosuper;
+
+-- Should finally work now
+SELECT 1 FROM ft1_nopw LIMIT 1;
+
+-- unpriv user also cannot set sslcert / sslkey on the user mapping
+-- first set password_required so we see the right error messages
+ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (SET password_required 'true');
+ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD sslcert 'foo.crt');
+ALTER USER MAPPING FOR CURRENT_USER SERVER loopback_nopw OPTIONS (ADD sslkey 'foo.key');
+
+-- We're done with the role named after a specific user and need to check the
+-- changes to the public mapping.
+DROP USER MAPPING FOR CURRENT_USER SERVER loopback_nopw;
+
+-- This will fail again as it'll resolve the user mapping for public, which
+-- lacks password_required=false
+SELECT 1 FROM ft1_nopw LIMIT 1;
+
+RESET ROLE;
+
+-- The user mapping for public is passwordless and lacks the password_required=false
+-- mapping option, but will work because the current user is a superuser.
+SELECT 1 FROM ft1_nopw LIMIT 1;
+
+-- cleanup
+DROP USER MAPPING FOR public SERVER loopback_nopw;
+DROP OWNED BY regress_nosuper;
+DROP ROLE regress_nosuper;
+
+-- Clean-up
+RESET enable_partitionwise_aggregate;
+
+-- Two-phase transactions are not supported.
+BEGIN;
+SELECT count(*) FROM ft1;
+-- error here
+PREPARE TRANSACTION 'fdw_tpc';
+ROLLBACK;
+
+-- ===================================================================
+-- reestablish new connection
+-- ===================================================================
+
+-- Change application_name of remote connection to special one
+-- so that we can easily terminate the connection later.
+ALTER SERVER loopback OPTIONS (application_name 'fdw_retry_check');
+
+-- If debug_discard_caches is active, it results in
+-- dropping remote connections after every transaction, making it
+-- impossible to test termination meaningfully. So turn that off
+-- for this test.
+SET debug_discard_caches = 0;
+
+-- Make sure we have a remote connection.
+SELECT 1 FROM ft1 LIMIT 1;
+
+-- Terminate the remote connection and wait for the termination to complete.
+SELECT pg_terminate_backend(pid, 180000) FROM pg_stat_activity
+ WHERE application_name = 'fdw_retry_check';
+
+-- This query should detect the broken connection when starting new remote
+-- transaction, reestablish new connection, and then succeed.
+BEGIN;
+SELECT 1 FROM ft1 LIMIT 1;
+
+-- If we detect the broken connection when starting a new remote
+-- subtransaction, we should fail instead of establishing a new connection.
+-- Terminate the remote connection and wait for the termination to complete.
+SELECT pg_terminate_backend(pid, 180000) FROM pg_stat_activity
+ WHERE application_name = 'fdw_retry_check';
+SAVEPOINT s;
+-- The text of the error might vary across platforms, so only show SQLSTATE.
+\set VERBOSITY sqlstate
+SELECT 1 FROM ft1 LIMIT 1; -- should fail
+\set VERBOSITY default
+COMMIT;
+
+RESET debug_discard_caches;
+
+-- =============================================================================
+-- test connection invalidation cases and postgres_fdw_get_connections function
+-- =============================================================================
+-- Let's ensure to close all the existing cached connections.
+SELECT 1 FROM postgres_fdw_disconnect_all();
+-- No cached connections, so no records should be output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+-- This test case is for closing the connection in pgfdw_xact_callback
+BEGIN;
+-- Connection xact depth becomes 1 i.e. the connection is in midst of the xact.
+SELECT 1 FROM ft1 LIMIT 1;
+SELECT 1 FROM ft7 LIMIT 1;
+-- List all the existing cached connections. loopback and loopback3 should be
+-- output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+-- Connections are not closed at the end of the alter and drop statements.
+-- That's because the connections are in midst of this xact,
+-- they are just marked as invalid in pgfdw_inval_callback.
+ALTER SERVER loopback OPTIONS (ADD use_remote_estimate 'off');
+DROP SERVER loopback3 CASCADE;
+-- List all the existing cached connections. loopback and loopback3
+-- should be output as invalid connections. Also the server name for
+-- loopback3 should be NULL because the server was dropped.
+SELECT * FROM postgres_fdw_get_connections() ORDER BY 1;
+-- The invalid connections get closed in pgfdw_xact_callback during commit.
+COMMIT;
+-- All cached connections were closed while committing above xact, so no
+-- records should be output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+
+-- =======================================================================
+-- test postgres_fdw_disconnect and postgres_fdw_disconnect_all functions
+-- =======================================================================
+BEGIN;
+-- Ensure to cache loopback connection.
+SELECT 1 FROM ft1 LIMIT 1;
+-- Ensure to cache loopback2 connection.
+SELECT 1 FROM ft6 LIMIT 1;
+-- List all the existing cached connections. loopback and loopback2 should be
+-- output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+-- Issue a warning and return false as loopback connection is still in use and
+-- can not be closed.
+SELECT postgres_fdw_disconnect('loopback');
+-- List all the existing cached connections. loopback and loopback2 should be
+-- output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+-- Return false as connections are still in use, warnings are issued.
+-- But disable warnings temporarily because the order of them is not stable.
+SET client_min_messages = 'ERROR';
+SELECT postgres_fdw_disconnect_all();
+RESET client_min_messages;
+COMMIT;
+-- Ensure that loopback2 connection is closed.
+SELECT 1 FROM postgres_fdw_disconnect('loopback2');
+SELECT server_name FROM postgres_fdw_get_connections() WHERE server_name = 'loopback2';
+-- Return false as loopback2 connection is closed already.
+SELECT postgres_fdw_disconnect('loopback2');
+-- Return an error as there is no foreign server with given name.
+SELECT postgres_fdw_disconnect('unknownserver');
+-- Let's ensure to close all the existing cached connections.
+SELECT 1 FROM postgres_fdw_disconnect_all();
+-- No cached connections, so no records should be output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+
+-- =============================================================================
+-- test case for having multiple cached connections for a foreign server
+-- =============================================================================
+CREATE ROLE regress_multi_conn_user1 SUPERUSER;
+CREATE ROLE regress_multi_conn_user2 SUPERUSER;
+CREATE USER MAPPING FOR regress_multi_conn_user1 SERVER loopback;
+CREATE USER MAPPING FOR regress_multi_conn_user2 SERVER loopback;
+
+BEGIN;
+-- Will cache loopback connection with user mapping for regress_multi_conn_user1
+SET ROLE regress_multi_conn_user1;
+SELECT 1 FROM ft1 LIMIT 1;
+RESET ROLE;
+
+-- Will cache loopback connection with user mapping for regress_multi_conn_user2
+SET ROLE regress_multi_conn_user2;
+SELECT 1 FROM ft1 LIMIT 1;
+RESET ROLE;
+
+-- Should output two connections for loopback server
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+COMMIT;
+-- Let's ensure to close all the existing cached connections.
+SELECT 1 FROM postgres_fdw_disconnect_all();
+-- No cached connections, so no records should be output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+
+-- Clean up
+DROP USER MAPPING FOR regress_multi_conn_user1 SERVER loopback;
+DROP USER MAPPING FOR regress_multi_conn_user2 SERVER loopback;
+DROP ROLE regress_multi_conn_user1;
+DROP ROLE regress_multi_conn_user2;
+
+-- ===================================================================
+-- Test foreign server level option keep_connections
+-- ===================================================================
+-- By default, the connections associated with foreign server are cached i.e.
+-- keep_connections option is on. Set it to off.
+ALTER SERVER loopback OPTIONS (keep_connections 'off');
+-- connection to loopback server is closed at the end of xact
+-- as keep_connections was set to off.
+SELECT 1 FROM ft1 LIMIT 1;
+-- No cached connections, so no records should be output.
+SELECT server_name FROM postgres_fdw_get_connections() ORDER BY 1;
+ALTER SERVER loopback OPTIONS (SET keep_connections 'on');
+
+-- ===================================================================
+-- batch insert
+-- ===================================================================
+
+BEGIN;
+
+CREATE SERVER batch10 FOREIGN DATA WRAPPER postgres_fdw OPTIONS( batch_size '10' );
+
+SELECT count(*)
+FROM pg_foreign_server
+WHERE srvname = 'batch10'
+AND srvoptions @> array['batch_size=10'];
+
+ALTER SERVER batch10 OPTIONS( SET batch_size '20' );
+
+SELECT count(*)
+FROM pg_foreign_server
+WHERE srvname = 'batch10'
+AND srvoptions @> array['batch_size=10'];
+
+SELECT count(*)
+FROM pg_foreign_server
+WHERE srvname = 'batch10'
+AND srvoptions @> array['batch_size=20'];
+
+CREATE FOREIGN TABLE table30 ( x int ) SERVER batch10 OPTIONS ( batch_size '30' );
+
+SELECT COUNT(*)
+FROM pg_foreign_table
+WHERE ftrelid = 'table30'::regclass
+AND ftoptions @> array['batch_size=30'];
+
+ALTER FOREIGN TABLE table30 OPTIONS ( SET batch_size '40');
+
+SELECT COUNT(*)
+FROM pg_foreign_table
+WHERE ftrelid = 'table30'::regclass
+AND ftoptions @> array['batch_size=30'];
+
+SELECT COUNT(*)
+FROM pg_foreign_table
+WHERE ftrelid = 'table30'::regclass
+AND ftoptions @> array['batch_size=40'];
+
+ROLLBACK;
+
+CREATE TABLE batch_table ( x int );
+
+CREATE FOREIGN TABLE ftable ( x int ) SERVER loopback OPTIONS ( table_name 'batch_table', batch_size '10' );
+EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO ftable SELECT * FROM generate_series(1, 10) i;
+INSERT INTO ftable SELECT * FROM generate_series(1, 10) i;
+INSERT INTO ftable SELECT * FROM generate_series(11, 31) i;
+INSERT INTO ftable VALUES (32);
+INSERT INTO ftable VALUES (33), (34);
+SELECT COUNT(*) FROM ftable;
+TRUNCATE batch_table;
+DROP FOREIGN TABLE ftable;
+
+-- try if large batches exceed max number of bind parameters
+CREATE FOREIGN TABLE ftable ( x int ) SERVER loopback OPTIONS ( table_name 'batch_table', batch_size '100000' );
+INSERT INTO ftable SELECT * FROM generate_series(1, 70000) i;
+SELECT COUNT(*) FROM ftable;
+TRUNCATE batch_table;
+DROP FOREIGN TABLE ftable;
+
+-- Disable batch insert
+CREATE FOREIGN TABLE ftable ( x int ) SERVER loopback OPTIONS ( table_name 'batch_table', batch_size '1' );
+EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO ftable VALUES (1), (2);
+INSERT INTO ftable VALUES (1), (2);
+SELECT COUNT(*) FROM ftable;
+
+-- Disable batch inserting into foreign tables with BEFORE ROW INSERT triggers
+-- even if the batch_size option is enabled.
+ALTER FOREIGN TABLE ftable OPTIONS ( SET batch_size '10' );
+CREATE TRIGGER trig_row_before BEFORE INSERT ON ftable
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+EXPLAIN (VERBOSE, COSTS OFF) INSERT INTO ftable VALUES (3), (4);
+INSERT INTO ftable VALUES (3), (4);
+SELECT COUNT(*) FROM ftable;
+
+-- Clean up
+DROP TRIGGER trig_row_before ON ftable;
+DROP FOREIGN TABLE ftable;
+DROP TABLE batch_table;
+
+-- Use partitioning
+CREATE TABLE batch_table ( x int ) PARTITION BY HASH (x);
+
+CREATE TABLE batch_table_p0 (LIKE batch_table);
+CREATE FOREIGN TABLE batch_table_p0f
+ PARTITION OF batch_table
+ FOR VALUES WITH (MODULUS 3, REMAINDER 0)
+ SERVER loopback
+ OPTIONS (table_name 'batch_table_p0', batch_size '10');
+
+CREATE TABLE batch_table_p1 (LIKE batch_table);
+CREATE FOREIGN TABLE batch_table_p1f
+ PARTITION OF batch_table
+ FOR VALUES WITH (MODULUS 3, REMAINDER 1)
+ SERVER loopback
+ OPTIONS (table_name 'batch_table_p1', batch_size '1');
+
+CREATE TABLE batch_table_p2
+ PARTITION OF batch_table
+ FOR VALUES WITH (MODULUS 3, REMAINDER 2);
+
+INSERT INTO batch_table SELECT * FROM generate_series(1, 66) i;
+SELECT COUNT(*) FROM batch_table;
+
+-- Check that enabling batched inserts doesn't interfere with cross-partition
+-- updates
+CREATE TABLE batch_cp_upd_test (a int) PARTITION BY LIST (a);
+CREATE TABLE batch_cp_upd_test1 (LIKE batch_cp_upd_test);
+CREATE FOREIGN TABLE batch_cp_upd_test1_f
+ PARTITION OF batch_cp_upd_test
+ FOR VALUES IN (1)
+ SERVER loopback
+ OPTIONS (table_name 'batch_cp_upd_test1', batch_size '10');
+CREATE TABLE batch_cp_up_test1 PARTITION OF batch_cp_upd_test
+ FOR VALUES IN (2);
+INSERT INTO batch_cp_upd_test VALUES (1), (2);
+
+-- The following moves a row from the local partition to the foreign one
+UPDATE batch_cp_upd_test t SET a = 1 FROM (VALUES (1), (2)) s(a) WHERE t.a = s.a;
+SELECT tableoid::regclass, * FROM batch_cp_upd_test;
+
+-- Clean up
+DROP TABLE batch_table, batch_cp_upd_test, batch_table_p0, batch_table_p1 CASCADE;
+
+-- Use partitioning
+ALTER SERVER loopback OPTIONS (ADD batch_size '10');
+
+CREATE TABLE batch_table ( x int, field1 text, field2 text) PARTITION BY HASH (x);
+
+CREATE TABLE batch_table_p0 (LIKE batch_table);
+ALTER TABLE batch_table_p0 ADD CONSTRAINT p0_pkey PRIMARY KEY (x);
+CREATE FOREIGN TABLE batch_table_p0f
+ PARTITION OF batch_table
+ FOR VALUES WITH (MODULUS 2, REMAINDER 0)
+ SERVER loopback
+ OPTIONS (table_name 'batch_table_p0');
+
+CREATE TABLE batch_table_p1 (LIKE batch_table);
+ALTER TABLE batch_table_p1 ADD CONSTRAINT p1_pkey PRIMARY KEY (x);
+CREATE FOREIGN TABLE batch_table_p1f
+ PARTITION OF batch_table
+ FOR VALUES WITH (MODULUS 2, REMAINDER 1)
+ SERVER loopback
+ OPTIONS (table_name 'batch_table_p1');
+
+INSERT INTO batch_table SELECT i, 'test'||i, 'test'|| i FROM generate_series(1, 50) i;
+SELECT COUNT(*) FROM batch_table;
+SELECT * FROM batch_table ORDER BY x;
+
+ALTER SERVER loopback OPTIONS (DROP batch_size);
+
+-- ===================================================================
+-- test asynchronous execution
+-- ===================================================================
+
+ALTER SERVER loopback OPTIONS (DROP extensions);
+ALTER SERVER loopback OPTIONS (ADD async_capable 'true');
+ALTER SERVER loopback2 OPTIONS (ADD async_capable 'true');
+
+CREATE TABLE async_pt (a int, b int, c text) PARTITION BY RANGE (a);
+CREATE TABLE base_tbl1 (a int, b int, c text);
+CREATE TABLE base_tbl2 (a int, b int, c text);
+CREATE FOREIGN TABLE async_p1 PARTITION OF async_pt FOR VALUES FROM (1000) TO (2000)
+ SERVER loopback OPTIONS (table_name 'base_tbl1');
+CREATE FOREIGN TABLE async_p2 PARTITION OF async_pt FOR VALUES FROM (2000) TO (3000)
+ SERVER loopback2 OPTIONS (table_name 'base_tbl2');
+INSERT INTO async_p1 SELECT 1000 + i, i, to_char(i, 'FM0000') FROM generate_series(0, 999, 5) i;
+INSERT INTO async_p2 SELECT 2000 + i, i, to_char(i, 'FM0000') FROM generate_series(0, 999, 5) i;
+ANALYZE async_pt;
+
+-- simple queries
+CREATE TABLE result_tbl (a int, b int, c text);
+
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b % 100 = 0;
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b % 100 = 0;
+
+SELECT * FROM result_tbl ORDER BY a;
+DELETE FROM result_tbl;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b === 505;
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b === 505;
+
+SELECT * FROM result_tbl ORDER BY a;
+DELETE FROM result_tbl;
+
+-- Check case where multiple partitions use the same connection
+CREATE TABLE base_tbl3 (a int, b int, c text);
+CREATE FOREIGN TABLE async_p3 PARTITION OF async_pt FOR VALUES FROM (3000) TO (4000)
+ SERVER loopback2 OPTIONS (table_name 'base_tbl3');
+INSERT INTO async_p3 SELECT 3000 + i, i, to_char(i, 'FM0000') FROM generate_series(0, 999, 5) i;
+ANALYZE async_pt;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b === 505;
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b === 505;
+
+SELECT * FROM result_tbl ORDER BY a;
+DELETE FROM result_tbl;
+
+DROP FOREIGN TABLE async_p3;
+DROP TABLE base_tbl3;
+
+-- Check case where the partitioned table has local/remote partitions
+CREATE TABLE async_p3 PARTITION OF async_pt FOR VALUES FROM (3000) TO (4000);
+INSERT INTO async_p3 SELECT 3000 + i, i, to_char(i, 'FM0000') FROM generate_series(0, 999, 5) i;
+ANALYZE async_pt;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b === 505;
+INSERT INTO result_tbl SELECT * FROM async_pt WHERE b === 505;
+
+SELECT * FROM result_tbl ORDER BY a;
+DELETE FROM result_tbl;
+
+-- partitionwise joins
+SET enable_partitionwise_join TO true;
+
+CREATE TABLE join_tbl (a1 int, b1 int, c1 text, a2 int, b2 int, c2 text);
+
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO join_tbl SELECT * FROM async_pt t1, async_pt t2 WHERE t1.a = t2.a AND t1.b = t2.b AND t1.b % 100 = 0;
+INSERT INTO join_tbl SELECT * FROM async_pt t1, async_pt t2 WHERE t1.a = t2.a AND t1.b = t2.b AND t1.b % 100 = 0;
+
+SELECT * FROM join_tbl ORDER BY a1;
+DELETE FROM join_tbl;
+
+RESET enable_partitionwise_join;
+
+-- Test rescan of an async Append node with do_exec_prune=false
+SET enable_hashjoin TO false;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO join_tbl SELECT * FROM async_p1 t1, async_pt t2 WHERE t1.a = t2.a AND t1.b = t2.b AND t1.b % 100 = 0;
+INSERT INTO join_tbl SELECT * FROM async_p1 t1, async_pt t2 WHERE t1.a = t2.a AND t1.b = t2.b AND t1.b % 100 = 0;
+
+SELECT * FROM join_tbl ORDER BY a1;
+DELETE FROM join_tbl;
+
+RESET enable_hashjoin;
+
+-- Test interaction of async execution with plan-time partition pruning
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM async_pt WHERE a < 3000;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM async_pt WHERE a < 2000;
+
+-- Test interaction of async execution with run-time partition pruning
+SET plan_cache_mode TO force_generic_plan;
+
+PREPARE async_pt_query (int, int) AS
+ INSERT INTO result_tbl SELECT * FROM async_pt WHERE a < $1 AND b === $2;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+EXECUTE async_pt_query (3000, 505);
+EXECUTE async_pt_query (3000, 505);
+
+SELECT * FROM result_tbl ORDER BY a;
+DELETE FROM result_tbl;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+EXECUTE async_pt_query (2000, 505);
+EXECUTE async_pt_query (2000, 505);
+
+SELECT * FROM result_tbl ORDER BY a;
+DELETE FROM result_tbl;
+
+RESET plan_cache_mode;
+
+CREATE TABLE local_tbl(a int, b int, c text);
+INSERT INTO local_tbl VALUES (1505, 505, 'foo'), (2505, 505, 'bar');
+ANALYZE local_tbl;
+
+CREATE INDEX base_tbl1_idx ON base_tbl1 (a);
+CREATE INDEX base_tbl2_idx ON base_tbl2 (a);
+CREATE INDEX async_p3_idx ON async_p3 (a);
+ANALYZE base_tbl1;
+ANALYZE base_tbl2;
+ANALYZE async_p3;
+
+ALTER FOREIGN TABLE async_p1 OPTIONS (use_remote_estimate 'true');
+ALTER FOREIGN TABLE async_p2 OPTIONS (use_remote_estimate 'true');
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM local_tbl, async_pt WHERE local_tbl.a = async_pt.a AND local_tbl.c = 'bar';
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+SELECT * FROM local_tbl, async_pt WHERE local_tbl.a = async_pt.a AND local_tbl.c = 'bar';
+SELECT * FROM local_tbl, async_pt WHERE local_tbl.a = async_pt.a AND local_tbl.c = 'bar';
+
+ALTER FOREIGN TABLE async_p1 OPTIONS (DROP use_remote_estimate);
+ALTER FOREIGN TABLE async_p2 OPTIONS (DROP use_remote_estimate);
+
+DROP TABLE local_tbl;
+DROP INDEX base_tbl1_idx;
+DROP INDEX base_tbl2_idx;
+DROP INDEX async_p3_idx;
+
+-- Disable async execution if we use gating Result nodes for pseudoconstant
+-- quals
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM async_pt WHERE CURRENT_USER = SESSION_USER;
+
+-- Test that pending requests are processed properly
+SET enable_mergejoin TO false;
+SET enable_hashjoin TO false;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505;
+SELECT * FROM async_pt t1, async_p2 t2 WHERE t1.a = t2.a AND t1.b === 505;
+
+CREATE TABLE local_tbl (a int, b int, c text);
+INSERT INTO local_tbl VALUES (1505, 505, 'foo');
+ANALYZE local_tbl;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a;
+SELECT * FROM local_tbl t1 LEFT JOIN (SELECT *, (SELECT count(*) FROM async_pt WHERE a < 3000) FROM async_pt WHERE a < 3000) t2 ON t1.a = t2.a;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1;
+SELECT * FROM async_pt t1 WHERE t1.b === 505 LIMIT 1;
+
+-- Check with foreign modify
+CREATE TABLE base_tbl3 (a int, b int, c text);
+CREATE FOREIGN TABLE remote_tbl (a int, b int, c text)
+ SERVER loopback OPTIONS (table_name 'base_tbl3');
+INSERT INTO remote_tbl VALUES (2505, 505, 'bar');
+
+CREATE TABLE base_tbl4 (a int, b int, c text);
+CREATE FOREIGN TABLE insert_tbl (a int, b int, c text)
+ SERVER loopback OPTIONS (table_name 'base_tbl4');
+
+EXPLAIN (VERBOSE, COSTS OFF)
+INSERT INTO insert_tbl (SELECT * FROM local_tbl UNION ALL SELECT * FROM remote_tbl);
+INSERT INTO insert_tbl (SELECT * FROM local_tbl UNION ALL SELECT * FROM remote_tbl);
+
+SELECT * FROM insert_tbl ORDER BY a;
+
+-- Check with direct modify
+EXPLAIN (VERBOSE, COSTS OFF)
+WITH t AS (UPDATE remote_tbl SET c = c || c RETURNING *)
+INSERT INTO join_tbl SELECT * FROM async_pt LEFT JOIN t ON (async_pt.a = t.a AND async_pt.b = t.b) WHERE async_pt.b === 505;
+WITH t AS (UPDATE remote_tbl SET c = c || c RETURNING *)
+INSERT INTO join_tbl SELECT * FROM async_pt LEFT JOIN t ON (async_pt.a = t.a AND async_pt.b = t.b) WHERE async_pt.b === 505;
+
+SELECT * FROM join_tbl ORDER BY a1;
+DELETE FROM join_tbl;
+
+DROP TABLE local_tbl;
+DROP FOREIGN TABLE remote_tbl;
+DROP FOREIGN TABLE insert_tbl;
+DROP TABLE base_tbl3;
+DROP TABLE base_tbl4;
+
+RESET enable_mergejoin;
+RESET enable_hashjoin;
+
+-- Test that UPDATE/DELETE with inherited target works with async_capable enabled
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE async_pt SET c = c || c WHERE b = 0 RETURNING *;
+UPDATE async_pt SET c = c || c WHERE b = 0 RETURNING *;
+EXPLAIN (VERBOSE, COSTS OFF)
+DELETE FROM async_pt WHERE b = 0 RETURNING *;
+DELETE FROM async_pt WHERE b = 0 RETURNING *;
+
+-- Check EXPLAIN ANALYZE for a query that scans empty partitions asynchronously
+DELETE FROM async_p1;
+DELETE FROM async_p2;
+DELETE FROM async_p3;
+
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+SELECT * FROM async_pt;
+
+-- Clean up
+DROP TABLE async_pt;
+DROP TABLE base_tbl1;
+DROP TABLE base_tbl2;
+DROP TABLE result_tbl;
+DROP TABLE join_tbl;
+
+-- Test that an asynchronous fetch is processed before restarting the scan in
+-- ReScanForeignScan
+CREATE TABLE base_tbl (a int, b int);
+INSERT INTO base_tbl VALUES (1, 11), (2, 22), (3, 33);
+CREATE FOREIGN TABLE foreign_tbl (b int)
+ SERVER loopback OPTIONS (table_name 'base_tbl');
+CREATE FOREIGN TABLE foreign_tbl2 () INHERITS (foreign_tbl)
+ SERVER loopback OPTIONS (table_name 'base_tbl');
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT a FROM base_tbl WHERE a IN (SELECT a FROM foreign_tbl);
+SELECT a FROM base_tbl WHERE a IN (SELECT a FROM foreign_tbl);
+
+-- Clean up
+DROP FOREIGN TABLE foreign_tbl CASCADE;
+DROP TABLE base_tbl;
+
+ALTER SERVER loopback OPTIONS (DROP async_capable);
+ALTER SERVER loopback2 OPTIONS (DROP async_capable);
+
+-- ===================================================================
+-- test invalid server and foreign table options
+-- ===================================================================
+-- Invalid fdw_startup_cost option
+CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS(fdw_startup_cost '100$%$#$#');
+-- Invalid fdw_tuple_cost option
+CREATE SERVER inv_scst FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS(fdw_tuple_cost '100$%$#$#');
+-- Invalid fetch_size option
+CREATE FOREIGN TABLE inv_fsz (c1 int )
+ SERVER loopback OPTIONS (fetch_size '100$%$#$#');
+-- Invalid batch_size option
+CREATE FOREIGN TABLE inv_bsz (c1 int )
+ SERVER loopback OPTIONS (batch_size '100$%$#$#');