summaryrefslogtreecommitdiffstats
path: root/src/backend/storage/smgr
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/storage/smgr')
-rw-r--r--src/backend/storage/smgr/Makefile19
-rw-r--r--src/backend/storage/smgr/README52
-rw-r--r--src/backend/storage/smgr/md.c1409
-rw-r--r--src/backend/storage/smgr/smgr.c695
4 files changed, 2175 insertions, 0 deletions
diff --git a/src/backend/storage/smgr/Makefile b/src/backend/storage/smgr/Makefile
new file mode 100644
index 0000000..596b564
--- /dev/null
+++ b/src/backend/storage/smgr/Makefile
@@ -0,0 +1,19 @@
+#-------------------------------------------------------------------------
+#
+# Makefile--
+# Makefile for storage/smgr
+#
+# IDENTIFICATION
+# src/backend/storage/smgr/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/backend/storage/smgr
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+
+OBJS = \
+ md.o \
+ smgr.o
+
+include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/storage/smgr/README b/src/backend/storage/smgr/README
new file mode 100644
index 0000000..e1cfc6c
--- /dev/null
+++ b/src/backend/storage/smgr/README
@@ -0,0 +1,52 @@
+src/backend/storage/smgr/README
+
+Storage Managers
+================
+
+In the original Berkeley Postgres system, there were several storage managers,
+of which only the "magnetic disk" manager remains. (At Berkeley there were
+also managers for the Sony WORM optical disk jukebox and persistent main
+memory, but these were never supported in any externally released Postgres,
+nor in any version of PostgreSQL.) The "magnetic disk" manager is itself
+seriously misnamed, because actually it supports any kind of device for
+which the operating system provides standard filesystem operations; which
+these days is pretty much everything of interest. However, we retain the
+notion of a storage manager switch in case anyone ever wants to reintroduce
+other kinds of storage managers. Removing the switch layer would save
+nothing noticeable anyway, since storage-access operations are surely far
+more expensive than one extra layer of C function calls.
+
+In Berkeley Postgres each relation was tagged with the ID of the storage
+manager to use for it. This is gone. It would be probably more reasonable
+to associate storage managers with tablespaces, should we ever re-introduce
+multiple storage managers into the system catalogs.
+
+The files in this directory, and their contents, are
+
+ smgr.c The storage manager switch dispatch code. The routines in
+ this file call the appropriate storage manager to do storage
+ accesses requested by higher-level code. smgr.c also manages
+ the file handle cache (SMgrRelation table).
+
+ md.c The "magnetic disk" storage manager, which is really just
+ an interface to the kernel's filesystem operations.
+
+Note that md.c in turn relies on src/backend/storage/file/fd.c.
+
+
+Relation Forks
+==============
+
+Since 8.4, a single smgr relation can be comprised of multiple physical
+files, called relation forks. This allows storing additional metadata like
+Free Space information in additional forks, which can be grown and truncated
+independently of the main data file, while still treating it all as a single
+physical relation in system catalogs.
+
+It is assumed that the main fork, fork number 0 or MAIN_FORKNUM, always
+exists. Fork numbers are assigned in src/include/common/relpath.h.
+Functions in smgr.c and md.c take an extra fork number argument, in addition
+to relfilenode and block number, to identify which relation fork you want to
+access. Since most code wants to access the main fork, a shortcut version of
+ReadBuffer that accesses MAIN_FORKNUM is provided in the buffer manager for
+convenience.
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
new file mode 100644
index 0000000..b4bca7e
--- /dev/null
+++ b/src/backend/storage/smgr/md.c
@@ -0,0 +1,1409 @@
+/*-------------------------------------------------------------------------
+ *
+ * md.c
+ * This code manages relations that reside on magnetic disk.
+ *
+ * Or at least, that was what the Berkeley folk had in mind when they named
+ * this file. In reality, what this code provides is an interface from
+ * the smgr API to Unix-like filesystem APIs, so it will work with any type
+ * of device for which the operating system provides filesystem support.
+ * It doesn't matter whether the bits are on spinning rust or some other
+ * storage technology.
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/storage/smgr/md.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/file.h>
+
+#include "access/xlog.h"
+#include "access/xlogutils.h"
+#include "commands/tablespace.h"
+#include "miscadmin.h"
+#include "pg_trace.h"
+#include "pgstat.h"
+#include "postmaster/bgwriter.h"
+#include "storage/bufmgr.h"
+#include "storage/fd.h"
+#include "storage/md.h"
+#include "storage/relfilenode.h"
+#include "storage/smgr.h"
+#include "storage/sync.h"
+#include "utils/hsearch.h"
+#include "utils/memutils.h"
+
+/*
+ * The magnetic disk storage manager keeps track of open file
+ * descriptors in its own descriptor pool. This is done to make it
+ * easier to support relations that are larger than the operating
+ * system's file size limit (often 2GBytes). In order to do that,
+ * we break relations up into "segment" files that are each shorter than
+ * the OS file size limit. The segment size is set by the RELSEG_SIZE
+ * configuration constant in pg_config.h.
+ *
+ * On disk, a relation must consist of consecutively numbered segment
+ * files in the pattern
+ * -- Zero or more full segments of exactly RELSEG_SIZE blocks each
+ * -- Exactly one partial segment of size 0 <= size < RELSEG_SIZE blocks
+ * -- Optionally, any number of inactive segments of size 0 blocks.
+ * The full and partial segments are collectively the "active" segments.
+ * Inactive segments are those that once contained data but are currently
+ * not needed because of an mdtruncate() operation. The reason for leaving
+ * them present at size zero, rather than unlinking them, is that other
+ * backends and/or the checkpointer might be holding open file references to
+ * such segments. If the relation expands again after mdtruncate(), such
+ * that a deactivated segment becomes active again, it is important that
+ * such file references still be valid --- else data might get written
+ * out to an unlinked old copy of a segment file that will eventually
+ * disappear.
+ *
+ * File descriptors are stored in the per-fork md_seg_fds arrays inside
+ * SMgrRelation. The length of these arrays is stored in md_num_open_segs.
+ * Note that a fork's md_num_open_segs having a specific value does not
+ * necessarily mean the relation doesn't have additional segments; we may
+ * just not have opened the next segment yet. (We could not have "all
+ * segments are in the array" as an invariant anyway, since another backend
+ * could extend the relation while we aren't looking.) We do not have
+ * entries for inactive segments, however; as soon as we find a partial
+ * segment, we assume that any subsequent segments are inactive.
+ *
+ * The entire MdfdVec array is palloc'd in the MdCxt memory context.
+ */
+
+typedef struct _MdfdVec
+{
+ File mdfd_vfd; /* fd number in fd.c's pool */
+ BlockNumber mdfd_segno; /* segment number, from 0 */
+} MdfdVec;
+
+static MemoryContext MdCxt; /* context for all MdfdVec objects */
+
+
+/* Populate a file tag describing an md.c segment file. */
+#define INIT_MD_FILETAG(a,xx_rnode,xx_forknum,xx_segno) \
+( \
+ memset(&(a), 0, sizeof(FileTag)), \
+ (a).handler = SYNC_HANDLER_MD, \
+ (a).rnode = (xx_rnode), \
+ (a).forknum = (xx_forknum), \
+ (a).segno = (xx_segno) \
+)
+
+
+/*** behavior for mdopen & _mdfd_getseg ***/
+/* ereport if segment not present */
+#define EXTENSION_FAIL (1 << 0)
+/* return NULL if segment not present */
+#define EXTENSION_RETURN_NULL (1 << 1)
+/* create new segments as needed */
+#define EXTENSION_CREATE (1 << 2)
+/* create new segments if needed during recovery */
+#define EXTENSION_CREATE_RECOVERY (1 << 3)
+/*
+ * Allow opening segments which are preceded by segments smaller than
+ * RELSEG_SIZE, e.g. inactive segments (see above). Note that this breaks
+ * mdnblocks() and related functionality henceforth - which currently is ok,
+ * because this is only required in the checkpointer which never uses
+ * mdnblocks().
+ */
+#define EXTENSION_DONT_CHECK_SIZE (1 << 4)
+
+
+/* local routines */
+static void mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum,
+ bool isRedo);
+static MdfdVec *mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior);
+static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum,
+ MdfdVec *seg);
+static void register_unlink_segment(RelFileNodeBackend rnode, ForkNumber forknum,
+ BlockNumber segno);
+static void register_forget_request(RelFileNodeBackend rnode, ForkNumber forknum,
+ BlockNumber segno);
+static void _fdvec_resize(SMgrRelation reln,
+ ForkNumber forknum,
+ int nseg);
+static char *_mdfd_segpath(SMgrRelation reln, ForkNumber forknum,
+ BlockNumber segno);
+static MdfdVec *_mdfd_openseg(SMgrRelation reln, ForkNumber forkno,
+ BlockNumber segno, int oflags);
+static MdfdVec *_mdfd_getseg(SMgrRelation reln, ForkNumber forkno,
+ BlockNumber blkno, bool skipFsync, int behavior);
+static BlockNumber _mdnblocks(SMgrRelation reln, ForkNumber forknum,
+ MdfdVec *seg);
+
+
+/*
+ * mdinit() -- Initialize private state for magnetic disk storage manager.
+ */
+void
+mdinit(void)
+{
+ MdCxt = AllocSetContextCreate(TopMemoryContext,
+ "MdSmgr",
+ ALLOCSET_DEFAULT_SIZES);
+}
+
+/*
+ * mdexists() -- Does the physical file exist?
+ *
+ * Note: this will return true for lingering files, with pending deletions
+ */
+bool
+mdexists(SMgrRelation reln, ForkNumber forkNum)
+{
+ /*
+ * Close it first, to ensure that we notice if the fork has been unlinked
+ * since we opened it.
+ */
+ mdclose(reln, forkNum);
+
+ return (mdopenfork(reln, forkNum, EXTENSION_RETURN_NULL) != NULL);
+}
+
+/*
+ * mdcreate() -- Create a new relation on magnetic disk.
+ *
+ * If isRedo is true, it's okay for the relation to exist already.
+ */
+void
+mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
+{
+ MdfdVec *mdfd;
+ char *path;
+ File fd;
+
+ if (isRedo && reln->md_num_open_segs[forkNum] > 0)
+ return; /* created and opened already... */
+
+ Assert(reln->md_num_open_segs[forkNum] == 0);
+
+ /*
+ * We may be using the target table space for the first time in this
+ * database, so create a per-database subdirectory if needed.
+ *
+ * XXX this is a fairly ugly violation of module layering, but this seems
+ * to be the best place to put the check. Maybe TablespaceCreateDbspace
+ * should be here and not in commands/tablespace.c? But that would imply
+ * importing a lot of stuff that smgr.c oughtn't know, either.
+ */
+ TablespaceCreateDbspace(reln->smgr_rnode.node.spcNode,
+ reln->smgr_rnode.node.dbNode,
+ isRedo);
+
+ path = relpath(reln->smgr_rnode, forkNum);
+
+ fd = PathNameOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY);
+
+ if (fd < 0)
+ {
+ int save_errno = errno;
+
+ if (isRedo)
+ fd = PathNameOpenFile(path, O_RDWR | PG_BINARY);
+ if (fd < 0)
+ {
+ /* be sure to report the error reported by create, not open */
+ errno = save_errno;
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not create file \"%s\": %m", path)));
+ }
+ }
+
+ pfree(path);
+
+ _fdvec_resize(reln, forkNum, 1);
+ mdfd = &reln->md_seg_fds[forkNum][0];
+ mdfd->mdfd_vfd = fd;
+ mdfd->mdfd_segno = 0;
+}
+
+/*
+ * mdunlink() -- Unlink a relation.
+ *
+ * Note that we're passed a RelFileNodeBackend --- by the time this is called,
+ * there won't be an SMgrRelation hashtable entry anymore.
+ *
+ * forkNum can be a fork number to delete a specific fork, or InvalidForkNumber
+ * to delete all forks.
+ *
+ * For regular relations, we don't unlink the first segment file of the rel,
+ * but just truncate it to zero length, and record a request to unlink it after
+ * the next checkpoint. Additional segments can be unlinked immediately,
+ * however. Leaving the empty file in place prevents that relfilenode
+ * number from being reused. The scenario this protects us from is:
+ * 1. We delete a relation (and commit, and actually remove its file).
+ * 2. We create a new relation, which by chance gets the same relfilenode as
+ * the just-deleted one (OIDs must've wrapped around for that to happen).
+ * 3. We crash before another checkpoint occurs.
+ * During replay, we would delete the file and then recreate it, which is fine
+ * if the contents of the file were repopulated by subsequent WAL entries.
+ * But if we didn't WAL-log insertions, but instead relied on fsyncing the
+ * file after populating it (as we do at wal_level=minimal), the contents of
+ * the file would be lost forever. By leaving the empty file until after the
+ * next checkpoint, we prevent reassignment of the relfilenode number until
+ * it's safe, because relfilenode assignment skips over any existing file.
+ *
+ * We do not need to go through this dance for temp relations, though, because
+ * we never make WAL entries for temp rels, and so a temp rel poses no threat
+ * to the health of a regular rel that has taken over its relfilenode number.
+ * The fact that temp rels and regular rels have different file naming
+ * patterns provides additional safety.
+ *
+ * All the above applies only to the relation's main fork; other forks can
+ * just be removed immediately, since they are not needed to prevent the
+ * relfilenode number from being recycled. Also, we do not carefully
+ * track whether other forks have been created or not, but just attempt to
+ * unlink them unconditionally; so we should never complain about ENOENT.
+ *
+ * If isRedo is true, it's unsurprising for the relation to be already gone.
+ * Also, we should remove the file immediately instead of queuing a request
+ * for later, since during redo there's no possibility of creating a
+ * conflicting relation.
+ *
+ * Note: any failure should be reported as WARNING not ERROR, because
+ * we are usually not in a transaction anymore when this is called.
+ */
+void
+mdunlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
+{
+ /* Now do the per-fork work */
+ if (forkNum == InvalidForkNumber)
+ {
+ for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
+ mdunlinkfork(rnode, forkNum, isRedo);
+ }
+ else
+ mdunlinkfork(rnode, forkNum, isRedo);
+}
+
+/*
+ * Truncate a file to release disk space.
+ */
+static int
+do_truncate(const char *path)
+{
+ int save_errno;
+ int ret;
+
+ ret = pg_truncate(path, 0);
+
+ /* Log a warning here to avoid repetition in callers. */
+ if (ret < 0 && errno != ENOENT)
+ {
+ save_errno = errno;
+ ereport(WARNING,
+ (errcode_for_file_access(),
+ errmsg("could not truncate file \"%s\": %m", path)));
+ errno = save_errno;
+ }
+
+ return ret;
+}
+
+static void
+mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
+{
+ char *path;
+ int ret;
+
+ path = relpath(rnode, forkNum);
+
+ /*
+ * Delete or truncate the first segment.
+ */
+ if (isRedo || forkNum != MAIN_FORKNUM || RelFileNodeBackendIsTemp(rnode))
+ {
+ if (!RelFileNodeBackendIsTemp(rnode))
+ {
+ /* Prevent other backends' fds from holding on to the disk space */
+ ret = do_truncate(path);
+
+ /* Forget any pending sync requests for the first segment */
+ register_forget_request(rnode, forkNum, 0 /* first seg */ );
+ }
+ else
+ ret = 0;
+
+ /* Next unlink the file, unless it was already found to be missing */
+ if (ret == 0 || errno != ENOENT)
+ {
+ ret = unlink(path);
+ if (ret < 0 && errno != ENOENT)
+ ereport(WARNING,
+ (errcode_for_file_access(),
+ errmsg("could not remove file \"%s\": %m", path)));
+ }
+ }
+ else
+ {
+ /* Prevent other backends' fds from holding on to the disk space */
+ ret = do_truncate(path);
+
+ /* Register request to unlink first segment later */
+ register_unlink_segment(rnode, forkNum, 0 /* first seg */ );
+ }
+
+ /*
+ * Delete any additional segments.
+ */
+ if (ret >= 0)
+ {
+ char *segpath = (char *) palloc(strlen(path) + 12);
+ BlockNumber segno;
+
+ /*
+ * Note that because we loop until getting ENOENT, we will correctly
+ * remove all inactive segments as well as active ones.
+ */
+ for (segno = 1;; segno++)
+ {
+ sprintf(segpath, "%s.%u", path, segno);
+
+ if (!RelFileNodeBackendIsTemp(rnode))
+ {
+ /*
+ * Prevent other backends' fds from holding on to the disk
+ * space.
+ */
+ if (do_truncate(segpath) < 0 && errno == ENOENT)
+ break;
+
+ /*
+ * Forget any pending sync requests for this segment before we
+ * try to unlink.
+ */
+ register_forget_request(rnode, forkNum, segno);
+ }
+
+ if (unlink(segpath) < 0)
+ {
+ /* ENOENT is expected after the last segment... */
+ if (errno != ENOENT)
+ ereport(WARNING,
+ (errcode_for_file_access(),
+ errmsg("could not remove file \"%s\": %m", segpath)));
+ break;
+ }
+ }
+ pfree(segpath);
+ }
+
+ pfree(path);
+}
+
+/*
+ * mdextend() -- Add a block to the specified relation.
+ *
+ * The semantics are nearly the same as mdwrite(): write at the
+ * specified position. However, this is to be used for the case of
+ * extending a relation (i.e., blocknum is at or beyond the current
+ * EOF). Note that we assume writing a block beyond current EOF
+ * causes intervening file space to become filled with zeroes.
+ */
+void
+mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
+ char *buffer, bool skipFsync)
+{
+ off_t seekpos;
+ int nbytes;
+ MdfdVec *v;
+
+ /* This assert is too expensive to have on normally ... */
+#ifdef CHECK_WRITE_VS_EXTEND
+ Assert(blocknum >= mdnblocks(reln, forknum));
+#endif
+
+ /*
+ * If a relation manages to grow to 2^32-1 blocks, refuse to extend it any
+ * more --- we mustn't create a block whose number actually is
+ * InvalidBlockNumber. (Note that this failure should be unreachable
+ * because of upstream checks in bufmgr.c.)
+ */
+ if (blocknum == InvalidBlockNumber)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("cannot extend file \"%s\" beyond %u blocks",
+ relpath(reln->smgr_rnode, forknum),
+ InvalidBlockNumber)));
+
+ v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_CREATE);
+
+ seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
+
+ Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
+
+ if ((nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_EXTEND)) != BLCKSZ)
+ {
+ if (nbytes < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not extend file \"%s\": %m",
+ FilePathName(v->mdfd_vfd)),
+ errhint("Check free disk space.")));
+ /* short write: complain appropriately */
+ ereport(ERROR,
+ (errcode(ERRCODE_DISK_FULL),
+ errmsg("could not extend file \"%s\": wrote only %d of %d bytes at block %u",
+ FilePathName(v->mdfd_vfd),
+ nbytes, BLCKSZ, blocknum),
+ errhint("Check free disk space.")));
+ }
+
+ if (!skipFsync && !SmgrIsTemp(reln))
+ register_dirty_segment(reln, forknum, v);
+
+ Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
+}
+
+/*
+ * mdopenfork() -- Open one fork of the specified relation.
+ *
+ * Note we only open the first segment, when there are multiple segments.
+ *
+ * If first segment is not present, either ereport or return NULL according
+ * to "behavior". We treat EXTENSION_CREATE the same as EXTENSION_FAIL;
+ * EXTENSION_CREATE means it's OK to extend an existing relation, not to
+ * invent one out of whole cloth.
+ */
+static MdfdVec *
+mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior)
+{
+ MdfdVec *mdfd;
+ char *path;
+ File fd;
+
+ /* No work if already open */
+ if (reln->md_num_open_segs[forknum] > 0)
+ return &reln->md_seg_fds[forknum][0];
+
+ path = relpath(reln->smgr_rnode, forknum);
+
+ fd = PathNameOpenFile(path, O_RDWR | PG_BINARY);
+
+ if (fd < 0)
+ {
+ if ((behavior & EXTENSION_RETURN_NULL) &&
+ FILE_POSSIBLY_DELETED(errno))
+ {
+ pfree(path);
+ return NULL;
+ }
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\": %m", path)));
+ }
+
+ pfree(path);
+
+ _fdvec_resize(reln, forknum, 1);
+ mdfd = &reln->md_seg_fds[forknum][0];
+ mdfd->mdfd_vfd = fd;
+ mdfd->mdfd_segno = 0;
+
+ Assert(_mdnblocks(reln, forknum, mdfd) <= ((BlockNumber) RELSEG_SIZE));
+
+ return mdfd;
+}
+
+/*
+ * mdopen() -- Initialize newly-opened relation.
+ */
+void
+mdopen(SMgrRelation reln)
+{
+ /* mark it not open */
+ for (int forknum = 0; forknum <= MAX_FORKNUM; forknum++)
+ reln->md_num_open_segs[forknum] = 0;
+}
+
+/*
+ * mdclose() -- Close the specified relation, if it isn't closed already.
+ */
+void
+mdclose(SMgrRelation reln, ForkNumber forknum)
+{
+ int nopensegs = reln->md_num_open_segs[forknum];
+
+ /* No work if already closed */
+ if (nopensegs == 0)
+ return;
+
+ /* close segments starting from the end */
+ while (nopensegs > 0)
+ {
+ MdfdVec *v = &reln->md_seg_fds[forknum][nopensegs - 1];
+
+ FileClose(v->mdfd_vfd);
+ _fdvec_resize(reln, forknum, nopensegs - 1);
+ nopensegs--;
+ }
+}
+
+/*
+ * mdprefetch() -- Initiate asynchronous read of the specified block of a relation
+ */
+bool
+mdprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
+{
+#ifdef USE_PREFETCH
+ off_t seekpos;
+ MdfdVec *v;
+
+ v = _mdfd_getseg(reln, forknum, blocknum, false,
+ InRecovery ? EXTENSION_RETURN_NULL : EXTENSION_FAIL);
+ if (v == NULL)
+ return false;
+
+ seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
+
+ Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
+
+ (void) FilePrefetch(v->mdfd_vfd, seekpos, BLCKSZ, WAIT_EVENT_DATA_FILE_PREFETCH);
+#endif /* USE_PREFETCH */
+
+ return true;
+}
+
+/*
+ * mdwriteback() -- Tell the kernel to write pages back to storage.
+ *
+ * This accepts a range of blocks because flushing several pages at once is
+ * considerably more efficient than doing so individually.
+ */
+void
+mdwriteback(SMgrRelation reln, ForkNumber forknum,
+ BlockNumber blocknum, BlockNumber nblocks)
+{
+ /*
+ * Issue flush requests in as few requests as possible; have to split at
+ * segment boundaries though, since those are actually separate files.
+ */
+ while (nblocks > 0)
+ {
+ BlockNumber nflush = nblocks;
+ off_t seekpos;
+ MdfdVec *v;
+ int segnum_start,
+ segnum_end;
+
+ v = _mdfd_getseg(reln, forknum, blocknum, true /* not used */ ,
+ EXTENSION_RETURN_NULL);
+
+ /*
+ * We might be flushing buffers of already removed relations, that's
+ * ok, just ignore that case.
+ */
+ if (!v)
+ return;
+
+ /* compute offset inside the current segment */
+ segnum_start = blocknum / RELSEG_SIZE;
+
+ /* compute number of desired writes within the current segment */
+ segnum_end = (blocknum + nblocks - 1) / RELSEG_SIZE;
+ if (segnum_start != segnum_end)
+ nflush = RELSEG_SIZE - (blocknum % ((BlockNumber) RELSEG_SIZE));
+
+ Assert(nflush >= 1);
+ Assert(nflush <= nblocks);
+
+ seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
+
+ FileWriteback(v->mdfd_vfd, seekpos, (off_t) BLCKSZ * nflush, WAIT_EVENT_DATA_FILE_FLUSH);
+
+ nblocks -= nflush;
+ blocknum += nflush;
+ }
+}
+
+/*
+ * mdread() -- Read the specified block from a relation.
+ */
+void
+mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
+ char *buffer)
+{
+ off_t seekpos;
+ int nbytes;
+ MdfdVec *v;
+
+ TRACE_POSTGRESQL_SMGR_MD_READ_START(forknum, blocknum,
+ reln->smgr_rnode.node.spcNode,
+ reln->smgr_rnode.node.dbNode,
+ reln->smgr_rnode.node.relNode,
+ reln->smgr_rnode.backend);
+
+ v = _mdfd_getseg(reln, forknum, blocknum, false,
+ EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY);
+
+ seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
+
+ Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
+
+ nbytes = FileRead(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_READ);
+
+ TRACE_POSTGRESQL_SMGR_MD_READ_DONE(forknum, blocknum,
+ reln->smgr_rnode.node.spcNode,
+ reln->smgr_rnode.node.dbNode,
+ reln->smgr_rnode.node.relNode,
+ reln->smgr_rnode.backend,
+ nbytes,
+ BLCKSZ);
+
+ if (nbytes != BLCKSZ)
+ {
+ if (nbytes < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read block %u in file \"%s\": %m",
+ blocknum, FilePathName(v->mdfd_vfd))));
+
+ /*
+ * Short read: we are at or past EOF, or we read a partial block at
+ * EOF. Normally this is an error; upper levels should never try to
+ * read a nonexistent block. However, if zero_damaged_pages is ON or
+ * we are InRecovery, we should instead return zeroes without
+ * complaining. This allows, for example, the case of trying to
+ * update a block that was later truncated away.
+ */
+ if (zero_damaged_pages || InRecovery)
+ MemSet(buffer, 0, BLCKSZ);
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("could not read block %u in file \"%s\": read only %d of %d bytes",
+ blocknum, FilePathName(v->mdfd_vfd),
+ nbytes, BLCKSZ)));
+ }
+}
+
+/*
+ * mdwrite() -- Write the supplied block at the appropriate location.
+ *
+ * This is to be used only for updating already-existing blocks of a
+ * relation (ie, those before the current EOF). To extend a relation,
+ * use mdextend().
+ */
+void
+mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
+ char *buffer, bool skipFsync)
+{
+ off_t seekpos;
+ int nbytes;
+ MdfdVec *v;
+
+ /* This assert is too expensive to have on normally ... */
+#ifdef CHECK_WRITE_VS_EXTEND
+ Assert(blocknum < mdnblocks(reln, forknum));
+#endif
+
+ TRACE_POSTGRESQL_SMGR_MD_WRITE_START(forknum, blocknum,
+ reln->smgr_rnode.node.spcNode,
+ reln->smgr_rnode.node.dbNode,
+ reln->smgr_rnode.node.relNode,
+ reln->smgr_rnode.backend);
+
+ v = _mdfd_getseg(reln, forknum, blocknum, skipFsync,
+ EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY);
+
+ seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE));
+
+ Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
+
+ nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_WRITE);
+
+ TRACE_POSTGRESQL_SMGR_MD_WRITE_DONE(forknum, blocknum,
+ reln->smgr_rnode.node.spcNode,
+ reln->smgr_rnode.node.dbNode,
+ reln->smgr_rnode.node.relNode,
+ reln->smgr_rnode.backend,
+ nbytes,
+ BLCKSZ);
+
+ if (nbytes != BLCKSZ)
+ {
+ if (nbytes < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not write block %u in file \"%s\": %m",
+ blocknum, FilePathName(v->mdfd_vfd))));
+ /* short write: complain appropriately */
+ ereport(ERROR,
+ (errcode(ERRCODE_DISK_FULL),
+ errmsg("could not write block %u in file \"%s\": wrote only %d of %d bytes",
+ blocknum,
+ FilePathName(v->mdfd_vfd),
+ nbytes, BLCKSZ),
+ errhint("Check free disk space.")));
+ }
+
+ if (!skipFsync && !SmgrIsTemp(reln))
+ register_dirty_segment(reln, forknum, v);
+}
+
+/*
+ * mdnblocks() -- Get the number of blocks stored in a relation.
+ *
+ * Important side effect: all active segments of the relation are opened
+ * and added to the md_seg_fds array. If this routine has not been
+ * called, then only segments up to the last one actually touched
+ * are present in the array.
+ */
+BlockNumber
+mdnblocks(SMgrRelation reln, ForkNumber forknum)
+{
+ MdfdVec *v;
+ BlockNumber nblocks;
+ BlockNumber segno;
+
+ mdopenfork(reln, forknum, EXTENSION_FAIL);
+
+ /* mdopen has opened the first segment */
+ Assert(reln->md_num_open_segs[forknum] > 0);
+
+ /*
+ * Start from the last open segments, to avoid redundant seeks. We have
+ * previously verified that these segments are exactly RELSEG_SIZE long,
+ * and it's useless to recheck that each time.
+ *
+ * NOTE: this assumption could only be wrong if another backend has
+ * truncated the relation. We rely on higher code levels to handle that
+ * scenario by closing and re-opening the md fd, which is handled via
+ * relcache flush. (Since the checkpointer doesn't participate in
+ * relcache flush, it could have segment entries for inactive segments;
+ * that's OK because the checkpointer never needs to compute relation
+ * size.)
+ */
+ segno = reln->md_num_open_segs[forknum] - 1;
+ v = &reln->md_seg_fds[forknum][segno];
+
+ for (;;)
+ {
+ nblocks = _mdnblocks(reln, forknum, v);
+ if (nblocks > ((BlockNumber) RELSEG_SIZE))
+ elog(FATAL, "segment too big");
+ if (nblocks < ((BlockNumber) RELSEG_SIZE))
+ return (segno * ((BlockNumber) RELSEG_SIZE)) + nblocks;
+
+ /*
+ * If segment is exactly RELSEG_SIZE, advance to next one.
+ */
+ segno++;
+
+ /*
+ * We used to pass O_CREAT here, but that has the disadvantage that it
+ * might create a segment which has vanished through some operating
+ * system misadventure. In such a case, creating the segment here
+ * undermines _mdfd_getseg's attempts to notice and report an error
+ * upon access to a missing segment.
+ */
+ v = _mdfd_openseg(reln, forknum, segno, 0);
+ if (v == NULL)
+ return segno * ((BlockNumber) RELSEG_SIZE);
+ }
+}
+
+/*
+ * mdtruncate() -- Truncate relation to specified number of blocks.
+ */
+void
+mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks)
+{
+ BlockNumber curnblk;
+ BlockNumber priorblocks;
+ int curopensegs;
+
+ /*
+ * NOTE: mdnblocks makes sure we have opened all active segments, so that
+ * truncation loop will get them all!
+ */
+ curnblk = mdnblocks(reln, forknum);
+ if (nblocks > curnblk)
+ {
+ /* Bogus request ... but no complaint if InRecovery */
+ if (InRecovery)
+ return;
+ ereport(ERROR,
+ (errmsg("could not truncate file \"%s\" to %u blocks: it's only %u blocks now",
+ relpath(reln->smgr_rnode, forknum),
+ nblocks, curnblk)));
+ }
+ if (nblocks == curnblk)
+ return; /* no work */
+
+ /*
+ * Truncate segments, starting at the last one. Starting at the end makes
+ * managing the memory for the fd array easier, should there be errors.
+ */
+ curopensegs = reln->md_num_open_segs[forknum];
+ while (curopensegs > 0)
+ {
+ MdfdVec *v;
+
+ priorblocks = (curopensegs - 1) * RELSEG_SIZE;
+
+ v = &reln->md_seg_fds[forknum][curopensegs - 1];
+
+ if (priorblocks > nblocks)
+ {
+ /*
+ * This segment is no longer active. We truncate the file, but do
+ * not delete it, for reasons explained in the header comments.
+ */
+ if (FileTruncate(v->mdfd_vfd, 0, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not truncate file \"%s\": %m",
+ FilePathName(v->mdfd_vfd))));
+
+ if (!SmgrIsTemp(reln))
+ register_dirty_segment(reln, forknum, v);
+
+ /* we never drop the 1st segment */
+ Assert(v != &reln->md_seg_fds[forknum][0]);
+
+ FileClose(v->mdfd_vfd);
+ _fdvec_resize(reln, forknum, curopensegs - 1);
+ }
+ else if (priorblocks + ((BlockNumber) RELSEG_SIZE) > nblocks)
+ {
+ /*
+ * This is the last segment we want to keep. Truncate the file to
+ * the right length. NOTE: if nblocks is exactly a multiple K of
+ * RELSEG_SIZE, we will truncate the K+1st segment to 0 length but
+ * keep it. This adheres to the invariant given in the header
+ * comments.
+ */
+ BlockNumber lastsegblocks = nblocks - priorblocks;
+
+ if (FileTruncate(v->mdfd_vfd, (off_t) lastsegblocks * BLCKSZ, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not truncate file \"%s\" to %u blocks: %m",
+ FilePathName(v->mdfd_vfd),
+ nblocks)));
+ if (!SmgrIsTemp(reln))
+ register_dirty_segment(reln, forknum, v);
+ }
+ else
+ {
+ /*
+ * We still need this segment, so nothing to do for this and any
+ * earlier segment.
+ */
+ break;
+ }
+ curopensegs--;
+ }
+}
+
+/*
+ * mdimmedsync() -- Immediately sync a relation to stable storage.
+ *
+ * Note that only writes already issued are synced; this routine knows
+ * nothing of dirty buffers that may exist inside the buffer manager. We
+ * sync active and inactive segments; smgrDoPendingSyncs() relies on this.
+ * Consider a relation skipping WAL. Suppose a checkpoint syncs blocks of
+ * some segment, then mdtruncate() renders that segment inactive. If we
+ * crash before the next checkpoint syncs the newly-inactive segment, that
+ * segment may survive recovery, reintroducing unwanted data into the table.
+ */
+void
+mdimmedsync(SMgrRelation reln, ForkNumber forknum)
+{
+ int segno;
+ int min_inactive_seg;
+
+ /*
+ * NOTE: mdnblocks makes sure we have opened all active segments, so that
+ * fsync loop will get them all!
+ */
+ mdnblocks(reln, forknum);
+
+ min_inactive_seg = segno = reln->md_num_open_segs[forknum];
+
+ /*
+ * Temporarily open inactive segments, then close them after sync. There
+ * may be some inactive segments left opened after fsync() error, but that
+ * is harmless. We don't bother to clean them up and take a risk of
+ * further trouble. The next mdclose() will soon close them.
+ */
+ while (_mdfd_openseg(reln, forknum, segno, 0) != NULL)
+ segno++;
+
+ while (segno > 0)
+ {
+ MdfdVec *v = &reln->md_seg_fds[forknum][segno - 1];
+
+ if (FileSync(v->mdfd_vfd, WAIT_EVENT_DATA_FILE_IMMEDIATE_SYNC) < 0)
+ ereport(data_sync_elevel(ERROR),
+ (errcode_for_file_access(),
+ errmsg("could not fsync file \"%s\": %m",
+ FilePathName(v->mdfd_vfd))));
+
+ /* Close inactive segments immediately */
+ if (segno > min_inactive_seg)
+ {
+ FileClose(v->mdfd_vfd);
+ _fdvec_resize(reln, forknum, segno - 1);
+ }
+
+ segno--;
+ }
+}
+
+/*
+ * register_dirty_segment() -- Mark a relation segment as needing fsync
+ *
+ * If there is a local pending-ops table, just make an entry in it for
+ * ProcessSyncRequests to process later. Otherwise, try to pass off the
+ * fsync request to the checkpointer process. If that fails, just do the
+ * fsync locally before returning (we hope this will not happen often
+ * enough to be a performance problem).
+ */
+static void
+register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
+{
+ FileTag tag;
+
+ INIT_MD_FILETAG(tag, reln->smgr_rnode.node, forknum, seg->mdfd_segno);
+
+ /* Temp relations should never be fsync'd */
+ Assert(!SmgrIsTemp(reln));
+
+ if (!RegisterSyncRequest(&tag, SYNC_REQUEST, false /* retryOnError */ ))
+ {
+ ereport(DEBUG1,
+ (errmsg_internal("could not forward fsync request because request queue is full")));
+
+ if (FileSync(seg->mdfd_vfd, WAIT_EVENT_DATA_FILE_SYNC) < 0)
+ ereport(data_sync_elevel(ERROR),
+ (errcode_for_file_access(),
+ errmsg("could not fsync file \"%s\": %m",
+ FilePathName(seg->mdfd_vfd))));
+ }
+}
+
+/*
+ * register_unlink_segment() -- Schedule a file to be deleted after next checkpoint
+ */
+static void
+register_unlink_segment(RelFileNodeBackend rnode, ForkNumber forknum,
+ BlockNumber segno)
+{
+ FileTag tag;
+
+ INIT_MD_FILETAG(tag, rnode.node, forknum, segno);
+
+ /* Should never be used with temp relations */
+ Assert(!RelFileNodeBackendIsTemp(rnode));
+
+ RegisterSyncRequest(&tag, SYNC_UNLINK_REQUEST, true /* retryOnError */ );
+}
+
+/*
+ * register_forget_request() -- forget any fsyncs for a relation fork's segment
+ */
+static void
+register_forget_request(RelFileNodeBackend rnode, ForkNumber forknum,
+ BlockNumber segno)
+{
+ FileTag tag;
+
+ INIT_MD_FILETAG(tag, rnode.node, forknum, segno);
+
+ RegisterSyncRequest(&tag, SYNC_FORGET_REQUEST, true /* retryOnError */ );
+}
+
+/*
+ * ForgetDatabaseSyncRequests -- forget any fsyncs and unlinks for a DB
+ */
+void
+ForgetDatabaseSyncRequests(Oid dbid)
+{
+ FileTag tag;
+ RelFileNode rnode;
+
+ rnode.dbNode = dbid;
+ rnode.spcNode = 0;
+ rnode.relNode = 0;
+
+ INIT_MD_FILETAG(tag, rnode, InvalidForkNumber, InvalidBlockNumber);
+
+ RegisterSyncRequest(&tag, SYNC_FILTER_REQUEST, true /* retryOnError */ );
+}
+
+/*
+ * DropRelationFiles -- drop files of all given relations
+ */
+void
+DropRelationFiles(RelFileNode *delrels, int ndelrels, bool isRedo)
+{
+ SMgrRelation *srels;
+ int i;
+
+ srels = palloc(sizeof(SMgrRelation) * ndelrels);
+ for (i = 0; i < ndelrels; i++)
+ {
+ SMgrRelation srel = smgropen(delrels[i], InvalidBackendId);
+
+ if (isRedo)
+ {
+ ForkNumber fork;
+
+ for (fork = 0; fork <= MAX_FORKNUM; fork++)
+ XLogDropRelation(delrels[i], fork);
+ }
+ srels[i] = srel;
+ }
+
+ smgrdounlinkall(srels, ndelrels, isRedo);
+
+ for (i = 0; i < ndelrels; i++)
+ smgrclose(srels[i]);
+ pfree(srels);
+}
+
+
+/*
+ * _fdvec_resize() -- Resize the fork's open segments array
+ */
+static void
+_fdvec_resize(SMgrRelation reln,
+ ForkNumber forknum,
+ int nseg)
+{
+ if (nseg == 0)
+ {
+ if (reln->md_num_open_segs[forknum] > 0)
+ {
+ pfree(reln->md_seg_fds[forknum]);
+ reln->md_seg_fds[forknum] = NULL;
+ }
+ }
+ else if (reln->md_num_open_segs[forknum] == 0)
+ {
+ reln->md_seg_fds[forknum] =
+ MemoryContextAlloc(MdCxt, sizeof(MdfdVec) * nseg);
+ }
+ else
+ {
+ /*
+ * It doesn't seem worthwhile complicating the code to amortize
+ * repalloc() calls. Those are far faster than PathNameOpenFile() or
+ * FileClose(), and the memory context internally will sometimes avoid
+ * doing an actual reallocation.
+ */
+ reln->md_seg_fds[forknum] =
+ repalloc(reln->md_seg_fds[forknum],
+ sizeof(MdfdVec) * nseg);
+ }
+
+ reln->md_num_open_segs[forknum] = nseg;
+}
+
+/*
+ * Return the filename for the specified segment of the relation. The
+ * returned string is palloc'd.
+ */
+static char *
+_mdfd_segpath(SMgrRelation reln, ForkNumber forknum, BlockNumber segno)
+{
+ char *path,
+ *fullpath;
+
+ path = relpath(reln->smgr_rnode, forknum);
+
+ if (segno > 0)
+ {
+ fullpath = psprintf("%s.%u", path, segno);
+ pfree(path);
+ }
+ else
+ fullpath = path;
+
+ return fullpath;
+}
+
+/*
+ * Open the specified segment of the relation,
+ * and make a MdfdVec object for it. Returns NULL on failure.
+ */
+static MdfdVec *
+_mdfd_openseg(SMgrRelation reln, ForkNumber forknum, BlockNumber segno,
+ int oflags)
+{
+ MdfdVec *v;
+ File fd;
+ char *fullpath;
+
+ fullpath = _mdfd_segpath(reln, forknum, segno);
+
+ /* open the file */
+ fd = PathNameOpenFile(fullpath, O_RDWR | PG_BINARY | oflags);
+
+ pfree(fullpath);
+
+ if (fd < 0)
+ return NULL;
+
+ /*
+ * Segments are always opened in order from lowest to highest, so we must
+ * be adding a new one at the end.
+ */
+ Assert(segno == reln->md_num_open_segs[forknum]);
+
+ _fdvec_resize(reln, forknum, segno + 1);
+
+ /* fill the entry */
+ v = &reln->md_seg_fds[forknum][segno];
+ v->mdfd_vfd = fd;
+ v->mdfd_segno = segno;
+
+ Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
+
+ /* all done */
+ return v;
+}
+
+/*
+ * _mdfd_getseg() -- Find the segment of the relation holding the
+ * specified block.
+ *
+ * If the segment doesn't exist, we ereport, return NULL, or create the
+ * segment, according to "behavior". Note: skipFsync is only used in the
+ * EXTENSION_CREATE case.
+ */
+static MdfdVec *
+_mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
+ bool skipFsync, int behavior)
+{
+ MdfdVec *v;
+ BlockNumber targetseg;
+ BlockNumber nextsegno;
+
+ /* some way to handle non-existent segments needs to be specified */
+ Assert(behavior &
+ (EXTENSION_FAIL | EXTENSION_CREATE | EXTENSION_RETURN_NULL));
+
+ targetseg = blkno / ((BlockNumber) RELSEG_SIZE);
+
+ /* if an existing and opened segment, we're done */
+ if (targetseg < reln->md_num_open_segs[forknum])
+ {
+ v = &reln->md_seg_fds[forknum][targetseg];
+ return v;
+ }
+
+ /*
+ * The target segment is not yet open. Iterate over all the segments
+ * between the last opened and the target segment. This way missing
+ * segments either raise an error, or get created (according to
+ * 'behavior'). Start with either the last opened, or the first segment if
+ * none was opened before.
+ */
+ if (reln->md_num_open_segs[forknum] > 0)
+ v = &reln->md_seg_fds[forknum][reln->md_num_open_segs[forknum] - 1];
+ else
+ {
+ v = mdopenfork(reln, forknum, behavior);
+ if (!v)
+ return NULL; /* if behavior & EXTENSION_RETURN_NULL */
+ }
+
+ for (nextsegno = reln->md_num_open_segs[forknum];
+ nextsegno <= targetseg; nextsegno++)
+ {
+ BlockNumber nblocks = _mdnblocks(reln, forknum, v);
+ int flags = 0;
+
+ Assert(nextsegno == v->mdfd_segno + 1);
+
+ if (nblocks > ((BlockNumber) RELSEG_SIZE))
+ elog(FATAL, "segment too big");
+
+ if ((behavior & EXTENSION_CREATE) ||
+ (InRecovery && (behavior & EXTENSION_CREATE_RECOVERY)))
+ {
+ /*
+ * Normally we will create new segments only if authorized by the
+ * caller (i.e., we are doing mdextend()). But when doing WAL
+ * recovery, create segments anyway; this allows cases such as
+ * replaying WAL data that has a write into a high-numbered
+ * segment of a relation that was later deleted. We want to go
+ * ahead and create the segments so we can finish out the replay.
+ *
+ * We have to maintain the invariant that segments before the last
+ * active segment are of size RELSEG_SIZE; therefore, if
+ * extending, pad them out with zeroes if needed. (This only
+ * matters if in recovery, or if the caller is extending the
+ * relation discontiguously, but that can happen in hash indexes.)
+ */
+ if (nblocks < ((BlockNumber) RELSEG_SIZE))
+ {
+ char *zerobuf = palloc0(BLCKSZ);
+
+ mdextend(reln, forknum,
+ nextsegno * ((BlockNumber) RELSEG_SIZE) - 1,
+ zerobuf, skipFsync);
+ pfree(zerobuf);
+ }
+ flags = O_CREAT;
+ }
+ else if (!(behavior & EXTENSION_DONT_CHECK_SIZE) &&
+ nblocks < ((BlockNumber) RELSEG_SIZE))
+ {
+ /*
+ * When not extending (or explicitly including truncated
+ * segments), only open the next segment if the current one is
+ * exactly RELSEG_SIZE. If not (this branch), either return NULL
+ * or fail.
+ */
+ if (behavior & EXTENSION_RETURN_NULL)
+ {
+ /*
+ * Some callers discern between reasons for _mdfd_getseg()
+ * returning NULL based on errno. As there's no failing
+ * syscall involved in this case, explicitly set errno to
+ * ENOENT, as that seems the closest interpretation.
+ */
+ errno = ENOENT;
+ return NULL;
+ }
+
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\" (target block %u): previous segment is only %u blocks",
+ _mdfd_segpath(reln, forknum, nextsegno),
+ blkno, nblocks)));
+ }
+
+ v = _mdfd_openseg(reln, forknum, nextsegno, flags);
+
+ if (v == NULL)
+ {
+ if ((behavior & EXTENSION_RETURN_NULL) &&
+ FILE_POSSIBLY_DELETED(errno))
+ return NULL;
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\" (target block %u): %m",
+ _mdfd_segpath(reln, forknum, nextsegno),
+ blkno)));
+ }
+ }
+
+ return v;
+}
+
+/*
+ * Get number of blocks present in a single disk file
+ */
+static BlockNumber
+_mdnblocks(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
+{
+ off_t len;
+
+ len = FileSize(seg->mdfd_vfd);
+ if (len < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not seek to end of file \"%s\": %m",
+ FilePathName(seg->mdfd_vfd))));
+ /* note that this calculation will ignore any partial block at EOF */
+ return (BlockNumber) (len / BLCKSZ);
+}
+
+/*
+ * Sync a file to disk, given a file tag. Write the path into an output
+ * buffer so the caller can use it in error messages.
+ *
+ * Return 0 on success, -1 on failure, with errno set.
+ */
+int
+mdsyncfiletag(const FileTag *ftag, char *path)
+{
+ SMgrRelation reln = smgropen(ftag->rnode, InvalidBackendId);
+ File file;
+ bool need_to_close;
+ int result,
+ save_errno;
+
+ /* See if we already have the file open, or need to open it. */
+ if (ftag->segno < reln->md_num_open_segs[ftag->forknum])
+ {
+ file = reln->md_seg_fds[ftag->forknum][ftag->segno].mdfd_vfd;
+ strlcpy(path, FilePathName(file), MAXPGPATH);
+ need_to_close = false;
+ }
+ else
+ {
+ char *p;
+
+ p = _mdfd_segpath(reln, ftag->forknum, ftag->segno);
+ strlcpy(path, p, MAXPGPATH);
+ pfree(p);
+
+ file = PathNameOpenFile(path, O_RDWR | PG_BINARY);
+ if (file < 0)
+ return -1;
+ need_to_close = true;
+ }
+
+ /* Sync the file. */
+ result = FileSync(file, WAIT_EVENT_DATA_FILE_SYNC);
+ save_errno = errno;
+
+ if (need_to_close)
+ FileClose(file);
+
+ errno = save_errno;
+ return result;
+}
+
+/*
+ * Unlink a file, given a file tag. Write the path into an output
+ * buffer so the caller can use it in error messages.
+ *
+ * Return 0 on success, -1 on failure, with errno set.
+ */
+int
+mdunlinkfiletag(const FileTag *ftag, char *path)
+{
+ char *p;
+
+ /* Compute the path. */
+ p = relpathperm(ftag->rnode, MAIN_FORKNUM);
+ strlcpy(path, p, MAXPGPATH);
+ pfree(p);
+
+ /* Try to unlink the file. */
+ return unlink(path);
+}
+
+/*
+ * Check if a given candidate request matches a given tag, when processing
+ * a SYNC_FILTER_REQUEST request. This will be called for all pending
+ * requests to find out whether to forget them.
+ */
+bool
+mdfiletagmatches(const FileTag *ftag, const FileTag *candidate)
+{
+ /*
+ * For now we only use filter requests as a way to drop all scheduled
+ * callbacks relating to a given database, when dropping the database.
+ * We'll return true for all candidates that have the same database OID as
+ * the ftag from the SYNC_FILTER_REQUEST request, so they're forgotten.
+ */
+ return ftag->rnode.dbNode == candidate->rnode.dbNode;
+}
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
new file mode 100644
index 0000000..4dc2464
--- /dev/null
+++ b/src/backend/storage/smgr/smgr.c
@@ -0,0 +1,695 @@
+/*-------------------------------------------------------------------------
+ *
+ * smgr.c
+ * public interface routines to storage manager switch.
+ *
+ * All file system operations in POSTGRES dispatch through these
+ * routines.
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/storage/smgr/smgr.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/xlog.h"
+#include "lib/ilist.h"
+#include "storage/bufmgr.h"
+#include "storage/ipc.h"
+#include "storage/md.h"
+#include "storage/smgr.h"
+#include "utils/hsearch.h"
+#include "utils/inval.h"
+
+
+/*
+ * This struct of function pointers defines the API between smgr.c and
+ * any individual storage manager module. Note that smgr subfunctions are
+ * generally expected to report problems via elog(ERROR). An exception is
+ * that smgr_unlink should use elog(WARNING), rather than erroring out,
+ * because we normally unlink relations during post-commit/abort cleanup,
+ * and so it's too late to raise an error. Also, various conditions that
+ * would normally be errors should be allowed during bootstrap and/or WAL
+ * recovery --- see comments in md.c for details.
+ */
+typedef struct f_smgr
+{
+ void (*smgr_init) (void); /* may be NULL */
+ void (*smgr_shutdown) (void); /* may be NULL */
+ void (*smgr_open) (SMgrRelation reln);
+ void (*smgr_close) (SMgrRelation reln, ForkNumber forknum);
+ void (*smgr_create) (SMgrRelation reln, ForkNumber forknum,
+ bool isRedo);
+ bool (*smgr_exists) (SMgrRelation reln, ForkNumber forknum);
+ void (*smgr_unlink) (RelFileNodeBackend rnode, ForkNumber forknum,
+ bool isRedo);
+ void (*smgr_extend) (SMgrRelation reln, ForkNumber forknum,
+ BlockNumber blocknum, char *buffer, bool skipFsync);
+ bool (*smgr_prefetch) (SMgrRelation reln, ForkNumber forknum,
+ BlockNumber blocknum);
+ void (*smgr_read) (SMgrRelation reln, ForkNumber forknum,
+ BlockNumber blocknum, char *buffer);
+ void (*smgr_write) (SMgrRelation reln, ForkNumber forknum,
+ BlockNumber blocknum, char *buffer, bool skipFsync);
+ void (*smgr_writeback) (SMgrRelation reln, ForkNumber forknum,
+ BlockNumber blocknum, BlockNumber nblocks);
+ BlockNumber (*smgr_nblocks) (SMgrRelation reln, ForkNumber forknum);
+ void (*smgr_truncate) (SMgrRelation reln, ForkNumber forknum,
+ BlockNumber nblocks);
+ void (*smgr_immedsync) (SMgrRelation reln, ForkNumber forknum);
+} f_smgr;
+
+static const f_smgr smgrsw[] = {
+ /* magnetic disk */
+ {
+ .smgr_init = mdinit,
+ .smgr_shutdown = NULL,
+ .smgr_open = mdopen,
+ .smgr_close = mdclose,
+ .smgr_create = mdcreate,
+ .smgr_exists = mdexists,
+ .smgr_unlink = mdunlink,
+ .smgr_extend = mdextend,
+ .smgr_prefetch = mdprefetch,
+ .smgr_read = mdread,
+ .smgr_write = mdwrite,
+ .smgr_writeback = mdwriteback,
+ .smgr_nblocks = mdnblocks,
+ .smgr_truncate = mdtruncate,
+ .smgr_immedsync = mdimmedsync,
+ }
+};
+
+static const int NSmgr = lengthof(smgrsw);
+
+/*
+ * Each backend has a hashtable that stores all extant SMgrRelation objects.
+ * In addition, "unowned" SMgrRelation objects are chained together in a list.
+ */
+static HTAB *SMgrRelationHash = NULL;
+
+static dlist_head unowned_relns;
+
+/* local function prototypes */
+static void smgrshutdown(int code, Datum arg);
+
+
+/*
+ * smgrinit(), smgrshutdown() -- Initialize or shut down storage
+ * managers.
+ *
+ * Note: smgrinit is called during backend startup (normal or standalone
+ * case), *not* during postmaster start. Therefore, any resources created
+ * here or destroyed in smgrshutdown are backend-local.
+ */
+void
+smgrinit(void)
+{
+ int i;
+
+ for (i = 0; i < NSmgr; i++)
+ {
+ if (smgrsw[i].smgr_init)
+ smgrsw[i].smgr_init();
+ }
+
+ /* register the shutdown proc */
+ on_proc_exit(smgrshutdown, 0);
+}
+
+/*
+ * on_proc_exit hook for smgr cleanup during backend shutdown
+ */
+static void
+smgrshutdown(int code, Datum arg)
+{
+ int i;
+
+ for (i = 0; i < NSmgr; i++)
+ {
+ if (smgrsw[i].smgr_shutdown)
+ smgrsw[i].smgr_shutdown();
+ }
+}
+
+/*
+ * smgropen() -- Return an SMgrRelation object, creating it if need be.
+ *
+ * This does not attempt to actually open the underlying file.
+ */
+SMgrRelation
+smgropen(RelFileNode rnode, BackendId backend)
+{
+ RelFileNodeBackend brnode;
+ SMgrRelation reln;
+ bool found;
+
+ if (SMgrRelationHash == NULL)
+ {
+ /* First time through: initialize the hash table */
+ HASHCTL ctl;
+
+ ctl.keysize = sizeof(RelFileNodeBackend);
+ ctl.entrysize = sizeof(SMgrRelationData);
+ SMgrRelationHash = hash_create("smgr relation table", 400,
+ &ctl, HASH_ELEM | HASH_BLOBS);
+ dlist_init(&unowned_relns);
+ }
+
+ /* Look up or create an entry */
+ brnode.node = rnode;
+ brnode.backend = backend;
+ reln = (SMgrRelation) hash_search(SMgrRelationHash,
+ (void *) &brnode,
+ HASH_ENTER, &found);
+
+ /* Initialize it if not present before */
+ if (!found)
+ {
+ /* hash_search already filled in the lookup key */
+ reln->smgr_owner = NULL;
+ reln->smgr_targblock = InvalidBlockNumber;
+ for (int i = 0; i <= MAX_FORKNUM; ++i)
+ reln->smgr_cached_nblocks[i] = InvalidBlockNumber;
+ reln->smgr_which = 0; /* we only have md.c at present */
+
+ /* implementation-specific initialization */
+ smgrsw[reln->smgr_which].smgr_open(reln);
+
+ /* it has no owner yet */
+ dlist_push_tail(&unowned_relns, &reln->node);
+ }
+
+ return reln;
+}
+
+/*
+ * smgrsetowner() -- Establish a long-lived reference to an SMgrRelation object
+ *
+ * There can be only one owner at a time; this is sufficient since currently
+ * the only such owners exist in the relcache.
+ */
+void
+smgrsetowner(SMgrRelation *owner, SMgrRelation reln)
+{
+ /* We don't support "disowning" an SMgrRelation here, use smgrclearowner */
+ Assert(owner != NULL);
+
+ /*
+ * First, unhook any old owner. (Normally there shouldn't be any, but it
+ * seems possible that this can happen during swap_relation_files()
+ * depending on the order of processing. It's ok to close the old
+ * relcache entry early in that case.)
+ *
+ * If there isn't an old owner, then the reln should be in the unowned
+ * list, and we need to remove it.
+ */
+ if (reln->smgr_owner)
+ *(reln->smgr_owner) = NULL;
+ else
+ dlist_delete(&reln->node);
+
+ /* Now establish the ownership relationship. */
+ reln->smgr_owner = owner;
+ *owner = reln;
+}
+
+/*
+ * smgrclearowner() -- Remove long-lived reference to an SMgrRelation object
+ * if one exists
+ */
+void
+smgrclearowner(SMgrRelation *owner, SMgrRelation reln)
+{
+ /* Do nothing if the SMgrRelation object is not owned by the owner */
+ if (reln->smgr_owner != owner)
+ return;
+
+ /* unset the owner's reference */
+ *owner = NULL;
+
+ /* unset our reference to the owner */
+ reln->smgr_owner = NULL;
+
+ /* add to list of unowned relations */
+ dlist_push_tail(&unowned_relns, &reln->node);
+}
+
+/*
+ * smgrexists() -- Does the underlying file for a fork exist?
+ */
+bool
+smgrexists(SMgrRelation reln, ForkNumber forknum)
+{
+ return smgrsw[reln->smgr_which].smgr_exists(reln, forknum);
+}
+
+/*
+ * smgrclose() -- Close and delete an SMgrRelation object.
+ */
+void
+smgrclose(SMgrRelation reln)
+{
+ SMgrRelation *owner;
+ ForkNumber forknum;
+
+ for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
+ smgrsw[reln->smgr_which].smgr_close(reln, forknum);
+
+ owner = reln->smgr_owner;
+
+ if (!owner)
+ dlist_delete(&reln->node);
+
+ if (hash_search(SMgrRelationHash,
+ (void *) &(reln->smgr_rnode),
+ HASH_REMOVE, NULL) == NULL)
+ elog(ERROR, "SMgrRelation hashtable corrupted");
+
+ /*
+ * Unhook the owner pointer, if any. We do this last since in the remote
+ * possibility of failure above, the SMgrRelation object will still exist.
+ */
+ if (owner)
+ *owner = NULL;
+}
+
+/*
+ * smgrcloseall() -- Close all existing SMgrRelation objects.
+ */
+void
+smgrcloseall(void)
+{
+ HASH_SEQ_STATUS status;
+ SMgrRelation reln;
+
+ /* Nothing to do if hashtable not set up */
+ if (SMgrRelationHash == NULL)
+ return;
+
+ hash_seq_init(&status, SMgrRelationHash);
+
+ while ((reln = (SMgrRelation) hash_seq_search(&status)) != NULL)
+ smgrclose(reln);
+}
+
+/*
+ * smgrclosenode() -- Close SMgrRelation object for given RelFileNode,
+ * if one exists.
+ *
+ * This has the same effects as smgrclose(smgropen(rnode)), but it avoids
+ * uselessly creating a hashtable entry only to drop it again when no
+ * such entry exists already.
+ */
+void
+smgrclosenode(RelFileNodeBackend rnode)
+{
+ SMgrRelation reln;
+
+ /* Nothing to do if hashtable not set up */
+ if (SMgrRelationHash == NULL)
+ return;
+
+ reln = (SMgrRelation) hash_search(SMgrRelationHash,
+ (void *) &rnode,
+ HASH_FIND, NULL);
+ if (reln != NULL)
+ smgrclose(reln);
+}
+
+/*
+ * smgrcreate() -- Create a new relation.
+ *
+ * Given an already-created (but presumably unused) SMgrRelation,
+ * cause the underlying disk file or other storage for the fork
+ * to be created.
+ */
+void
+smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
+{
+ smgrsw[reln->smgr_which].smgr_create(reln, forknum, isRedo);
+}
+
+/*
+ * smgrdosyncall() -- Immediately sync all forks of all given relations
+ *
+ * All forks of all given relations are synced out to the store.
+ *
+ * This is equivalent to FlushRelationBuffers() for each smgr relation,
+ * then calling smgrimmedsync() for all forks of each relation, but it's
+ * significantly quicker so should be preferred when possible.
+ */
+void
+smgrdosyncall(SMgrRelation *rels, int nrels)
+{
+ int i = 0;
+ ForkNumber forknum;
+
+ if (nrels == 0)
+ return;
+
+ FlushRelationsAllBuffers(rels, nrels);
+
+ /*
+ * Sync the physical file(s).
+ */
+ for (i = 0; i < nrels; i++)
+ {
+ int which = rels[i]->smgr_which;
+
+ for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
+ {
+ if (smgrsw[which].smgr_exists(rels[i], forknum))
+ smgrsw[which].smgr_immedsync(rels[i], forknum);
+ }
+ }
+}
+
+/*
+ * smgrdounlinkall() -- Immediately unlink all forks of all given relations
+ *
+ * All forks of all given relations are removed from the store. This
+ * should not be used during transactional operations, since it can't be
+ * undone.
+ *
+ * If isRedo is true, it is okay for the underlying file(s) to be gone
+ * already.
+ */
+void
+smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
+{
+ int i = 0;
+ RelFileNodeBackend *rnodes;
+ ForkNumber forknum;
+
+ if (nrels == 0)
+ return;
+
+ /*
+ * Get rid of any remaining buffers for the relations. bufmgr will just
+ * drop them without bothering to write the contents.
+ */
+ DropRelFileNodesAllBuffers(rels, nrels);
+
+ /*
+ * create an array which contains all relations to be dropped, and close
+ * each relation's forks at the smgr level while at it
+ */
+ rnodes = palloc(sizeof(RelFileNodeBackend) * nrels);
+ for (i = 0; i < nrels; i++)
+ {
+ RelFileNodeBackend rnode = rels[i]->smgr_rnode;
+ int which = rels[i]->smgr_which;
+
+ rnodes[i] = rnode;
+
+ /* Close the forks at smgr level */
+ for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
+ smgrsw[which].smgr_close(rels[i], forknum);
+ }
+
+ /*
+ * It'd be nice to tell the stats collector to forget them immediately,
+ * too. But we can't because we don't know the OIDs.
+ */
+
+ /*
+ * Send a shared-inval message to force other backends to close any
+ * dangling smgr references they may have for these rels. We should do
+ * this before starting the actual unlinking, in case we fail partway
+ * through that step. Note that the sinval messages will eventually come
+ * back to this backend, too, and thereby provide a backstop that we
+ * closed our own smgr rel.
+ */
+ for (i = 0; i < nrels; i++)
+ CacheInvalidateSmgr(rnodes[i]);
+
+ /*
+ * Delete the physical file(s).
+ *
+ * Note: smgr_unlink must treat deletion failure as a WARNING, not an
+ * ERROR, because we've already decided to commit or abort the current
+ * xact.
+ */
+
+ for (i = 0; i < nrels; i++)
+ {
+ int which = rels[i]->smgr_which;
+
+ for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
+ smgrsw[which].smgr_unlink(rnodes[i], forknum, isRedo);
+ }
+
+ pfree(rnodes);
+}
+
+
+/*
+ * smgrextend() -- Add a new block to a file.
+ *
+ * The semantics are nearly the same as smgrwrite(): write at the
+ * specified position. However, this is to be used for the case of
+ * extending a relation (i.e., blocknum is at or beyond the current
+ * EOF). Note that we assume writing a block beyond current EOF
+ * causes intervening file space to become filled with zeroes.
+ */
+void
+smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
+ char *buffer, bool skipFsync)
+{
+ smgrsw[reln->smgr_which].smgr_extend(reln, forknum, blocknum,
+ buffer, skipFsync);
+
+ /*
+ * Normally we expect this to increase nblocks by one, but if the cached
+ * value isn't as expected, just invalidate it so the next call asks the
+ * kernel.
+ */
+ if (reln->smgr_cached_nblocks[forknum] == blocknum)
+ reln->smgr_cached_nblocks[forknum] = blocknum + 1;
+ else
+ reln->smgr_cached_nblocks[forknum] = InvalidBlockNumber;
+}
+
+/*
+ * smgrprefetch() -- Initiate asynchronous read of the specified block of a relation.
+ *
+ * In recovery only, this can return false to indicate that a file
+ * doesn't exist (presumably it has been dropped by a later WAL
+ * record).
+ */
+bool
+smgrprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
+{
+ return smgrsw[reln->smgr_which].smgr_prefetch(reln, forknum, blocknum);
+}
+
+/*
+ * smgrread() -- read a particular block from a relation into the supplied
+ * buffer.
+ *
+ * This routine is called from the buffer manager in order to
+ * instantiate pages in the shared buffer cache. All storage managers
+ * return pages in the format that POSTGRES expects.
+ */
+void
+smgrread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
+ char *buffer)
+{
+ smgrsw[reln->smgr_which].smgr_read(reln, forknum, blocknum, buffer);
+}
+
+/*
+ * smgrwrite() -- Write the supplied buffer out.
+ *
+ * This is to be used only for updating already-existing blocks of a
+ * relation (ie, those before the current EOF). To extend a relation,
+ * use smgrextend().
+ *
+ * This is not a synchronous write -- the block is not necessarily
+ * on disk at return, only dumped out to the kernel. However,
+ * provisions will be made to fsync the write before the next checkpoint.
+ *
+ * skipFsync indicates that the caller will make other provisions to
+ * fsync the relation, so we needn't bother. Temporary relations also
+ * do not require fsync.
+ */
+void
+smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
+ char *buffer, bool skipFsync)
+{
+ smgrsw[reln->smgr_which].smgr_write(reln, forknum, blocknum,
+ buffer, skipFsync);
+}
+
+
+/*
+ * smgrwriteback() -- Trigger kernel writeback for the supplied range of
+ * blocks.
+ */
+void
+smgrwriteback(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
+ BlockNumber nblocks)
+{
+ smgrsw[reln->smgr_which].smgr_writeback(reln, forknum, blocknum,
+ nblocks);
+}
+
+/*
+ * smgrnblocks() -- Calculate the number of blocks in the
+ * supplied relation.
+ */
+BlockNumber
+smgrnblocks(SMgrRelation reln, ForkNumber forknum)
+{
+ BlockNumber result;
+
+ /* Check and return if we get the cached value for the number of blocks. */
+ result = smgrnblocks_cached(reln, forknum);
+ if (result != InvalidBlockNumber)
+ return result;
+
+ result = smgrsw[reln->smgr_which].smgr_nblocks(reln, forknum);
+
+ reln->smgr_cached_nblocks[forknum] = result;
+
+ return result;
+}
+
+/*
+ * smgrnblocks_cached() -- Get the cached number of blocks in the supplied
+ * relation.
+ *
+ * Returns an InvalidBlockNumber when not in recovery and when the relation
+ * fork size is not cached.
+ */
+BlockNumber
+smgrnblocks_cached(SMgrRelation reln, ForkNumber forknum)
+{
+ /*
+ * For now, we only use cached values in recovery due to lack of a shared
+ * invalidation mechanism for changes in file size.
+ */
+ if (InRecovery && reln->smgr_cached_nblocks[forknum] != InvalidBlockNumber)
+ return reln->smgr_cached_nblocks[forknum];
+
+ return InvalidBlockNumber;
+}
+
+/*
+ * smgrtruncate() -- Truncate the given forks of supplied relation to
+ * each specified numbers of blocks
+ *
+ * The truncation is done immediately, so this can't be rolled back.
+ *
+ * The caller must hold AccessExclusiveLock on the relation, to ensure that
+ * other backends receive the smgr invalidation event that this function sends
+ * before they access any forks of the relation again.
+ */
+void
+smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nblocks)
+{
+ int i;
+
+ /*
+ * Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
+ * just drop them without bothering to write the contents.
+ */
+ DropRelFileNodeBuffers(reln, forknum, nforks, nblocks);
+
+ /*
+ * Send a shared-inval message to force other backends to close any smgr
+ * references they may have for this rel. This is useful because they
+ * might have open file pointers to segments that got removed, and/or
+ * smgr_targblock variables pointing past the new rel end. (The inval
+ * message will come back to our backend, too, causing a
+ * probably-unnecessary local smgr flush. But we don't expect that this
+ * is a performance-critical path.) As in the unlink code, we want to be
+ * sure the message is sent before we start changing things on-disk.
+ */
+ CacheInvalidateSmgr(reln->smgr_rnode);
+
+ /* Do the truncation */
+ for (i = 0; i < nforks; i++)
+ {
+ /* Make the cached size is invalid if we encounter an error. */
+ reln->smgr_cached_nblocks[forknum[i]] = InvalidBlockNumber;
+
+ smgrsw[reln->smgr_which].smgr_truncate(reln, forknum[i], nblocks[i]);
+
+ /*
+ * We might as well update the local smgr_cached_nblocks values. The
+ * smgr cache inval message that this function sent will cause other
+ * backends to invalidate their copies of smgr_fsm_nblocks and
+ * smgr_vm_nblocks, and these ones too at the next command boundary.
+ * But these ensure they aren't outright wrong until then.
+ */
+ reln->smgr_cached_nblocks[forknum[i]] = nblocks[i];
+ }
+}
+
+/*
+ * smgrimmedsync() -- Force the specified relation to stable storage.
+ *
+ * Synchronously force all previous writes to the specified relation
+ * down to disk.
+ *
+ * This is useful for building completely new relations (eg, new
+ * indexes). Instead of incrementally WAL-logging the index build
+ * steps, we can just write completed index pages to disk with smgrwrite
+ * or smgrextend, and then fsync the completed index file before
+ * committing the transaction. (This is sufficient for purposes of
+ * crash recovery, since it effectively duplicates forcing a checkpoint
+ * for the completed index. But it is *not* sufficient if one wishes
+ * to use the WAL log for PITR or replication purposes: in that case
+ * we have to make WAL entries as well.)
+ *
+ * The preceding writes should specify skipFsync = true to avoid
+ * duplicative fsyncs.
+ *
+ * Note that you need to do FlushRelationBuffers() first if there is
+ * any possibility that there are dirty buffers for the relation;
+ * otherwise the sync is not very meaningful.
+ */
+void
+smgrimmedsync(SMgrRelation reln, ForkNumber forknum)
+{
+ smgrsw[reln->smgr_which].smgr_immedsync(reln, forknum);
+}
+
+/*
+ * AtEOXact_SMgr
+ *
+ * This routine is called during transaction commit or abort (it doesn't
+ * particularly care which). All transient SMgrRelation objects are closed.
+ *
+ * We do this as a compromise between wanting transient SMgrRelations to
+ * live awhile (to amortize the costs of blind writes of multiple blocks)
+ * and needing them to not live forever (since we're probably holding open
+ * a kernel file descriptor for the underlying file, and we need to ensure
+ * that gets closed reasonably soon if the file gets deleted).
+ */
+void
+AtEOXact_SMgr(void)
+{
+ dlist_mutable_iter iter;
+
+ /*
+ * Zap all unowned SMgrRelations. We rely on smgrclose() to remove each
+ * one from the list.
+ */
+ dlist_foreach_modify(iter, &unowned_relns)
+ {
+ SMgrRelation rel = dlist_container(SMgrRelationData, node,
+ iter.cur);
+
+ Assert(rel->smgr_owner == NULL);
+
+ smgrclose(rel);
+ }
+}