summaryrefslogtreecommitdiffstats
path: root/src/backend/access/heap
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:15:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:15:05 +0000
commit46651ce6fe013220ed397add242004d764fc0153 (patch)
tree6e5299f990f88e60174a1d3ae6e48eedd2688b2b /src/backend/access/heap
parentInitial commit. (diff)
downloadpostgresql-14-46651ce6fe013220ed397add242004d764fc0153.tar.xz
postgresql-14-46651ce6fe013220ed397add242004d764fc0153.zip
Adding upstream version 14.5.upstream/14.5upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/backend/access/heap')
-rw-r--r--src/backend/access/heap/Makefile26
-rw-r--r--src/backend/access/heap/README.HOT499
-rw-r--r--src/backend/access/heap/README.tuplock155
-rw-r--r--src/backend/access/heap/heapam.c9955
-rw-r--r--src/backend/access/heap/heapam_handler.c2608
-rw-r--r--src/backend/access/heap/heapam_visibility.c1794
-rw-r--r--src/backend/access/heap/heaptoast.c793
-rw-r--r--src/backend/access/heap/hio.c721
-rw-r--r--src/backend/access/heap/pruneheap.c1052
-rw-r--r--src/backend/access/heap/rewriteheap.c1295
-rw-r--r--src/backend/access/heap/vacuumlazy.c4353
-rw-r--r--src/backend/access/heap/visibilitymap.c672
12 files changed, 23923 insertions, 0 deletions
diff --git a/src/backend/access/heap/Makefile b/src/backend/access/heap/Makefile
new file mode 100644
index 0000000..af0bd18
--- /dev/null
+++ b/src/backend/access/heap/Makefile
@@ -0,0 +1,26 @@
+#-------------------------------------------------------------------------
+#
+# Makefile--
+# Makefile for access/heap
+#
+# IDENTIFICATION
+# src/backend/access/heap/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/backend/access/heap
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+
+OBJS = \
+ heapam.o \
+ heapam_handler.o \
+ heapam_visibility.o \
+ heaptoast.o \
+ hio.o \
+ pruneheap.o \
+ rewriteheap.o \
+ vacuumlazy.o \
+ visibilitymap.o
+
+include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/access/heap/README.HOT b/src/backend/access/heap/README.HOT
new file mode 100644
index 0000000..68c6709
--- /dev/null
+++ b/src/backend/access/heap/README.HOT
@@ -0,0 +1,499 @@
+src/backend/access/heap/README.HOT
+
+Heap Only Tuples (HOT)
+======================
+
+The Heap Only Tuple (HOT) feature eliminates redundant index entries and
+allows the re-use of space taken by DELETEd or obsoleted UPDATEd tuples
+without performing a table-wide vacuum. It does this by allowing
+single-page vacuuming, also called "defragmentation".
+
+Note: there is a Glossary at the end of this document that may be helpful
+for first-time readers.
+
+
+Technical Challenges
+--------------------
+
+Page-at-a-time vacuuming is normally impractical because of the costs of
+finding and removing the index entries that link to the tuples to be
+reclaimed. Standard vacuuming scans the indexes to ensure all such index
+entries are removed, amortizing the index scan cost across as many dead
+tuples as possible; this approach does not scale down well to the case of
+reclaiming just a few tuples. In principle one could recompute the index
+keys and do standard index searches to find the index entries, but this is
+risky in the presence of possibly-buggy user-defined functions in
+functional indexes. An allegedly immutable function that in fact is not
+immutable might prevent us from re-finding an index entry (and we cannot
+throw an error for not finding it, in view of the fact that dead index
+entries are sometimes reclaimed early). That would lead to a seriously
+corrupt index, in the form of entries pointing to tuple slots that by now
+contain some unrelated content. In any case we would prefer to be able
+to do vacuuming without invoking any user-written code.
+
+HOT solves this problem for a restricted but useful special case:
+where a tuple is repeatedly updated in ways that do not change its
+indexed columns. (Here, "indexed column" means any column referenced
+at all in an index definition, including for example columns that are
+tested in a partial-index predicate but are not stored in the index.)
+
+An additional property of HOT is that it reduces index size by avoiding
+the creation of identically-keyed index entries. This improves search
+speeds.
+
+
+Update Chains With a Single Index Entry
+---------------------------------------
+
+Without HOT, every version of a row in an update chain has its own index
+entries, even if all indexed columns are the same. With HOT, a new tuple
+placed on the same page and with all indexed columns the same as its
+parent row version does not get new index entries. This means there is
+only one index entry for the entire update chain on the heap page.
+An index-entry-less tuple is marked with the HEAP_ONLY_TUPLE flag.
+The prior row version is marked HEAP_HOT_UPDATED, and (as always in an
+update chain) its t_ctid field links forward to the newer version.
+
+For example:
+
+ Index points to 1
+ lp [1] [2]
+
+ [111111111]->[2222222222]
+
+In the above diagram, the index points to line pointer 1, and tuple 1 is
+marked as HEAP_HOT_UPDATED. Tuple 2 is a HOT tuple, meaning it has
+no index entry pointing to it, and is marked as HEAP_ONLY_TUPLE.
+Although tuple 2 is not directly referenced by the index, it can still be
+found by an index search: after traversing from the index to tuple 1,
+the index search proceeds forward to child tuples as long as it sees the
+HEAP_HOT_UPDATED flag set. Since we restrict the HOT chain to lie within
+a single page, this requires no additional page fetches and doesn't
+introduce much performance penalty.
+
+Eventually, tuple 1 will no longer be visible to any transaction.
+At that point its space could be reclaimed, but its line pointer cannot,
+since the index still links to that line pointer and we still need to
+be able to find tuple 2 in an index search. HOT handles this by turning
+line pointer 1 into a "redirecting line pointer", which links to tuple 2
+but has no actual tuple attached. This state of affairs looks like
+
+ Index points to 1
+ lp [1]->[2]
+
+ [2222222222]
+
+If now the row is updated again, to version 3, the page looks like this:
+
+ Index points to 1
+ lp [1]->[2] [3]
+
+ [2222222222]->[3333333333]
+
+At some later time when no transaction can see tuple 2 in its snapshot,
+tuple 2 and its line pointer can be pruned entirely:
+
+ Index points to 1
+ lp [1]------>[3]
+
+ [3333333333]
+
+This is safe because no index entry points to line pointer 2. Subsequent
+insertions into the page can now recycle both line pointer 2 and the
+space formerly used by tuple 2.
+
+If an update changes any indexed column, or there is not room on the
+same page for the new tuple, then the HOT chain ends: the last member
+has a regular t_ctid link to the next version and is not marked
+HEAP_HOT_UPDATED. (In principle we could continue a HOT chain across
+pages, but this would destroy the desired property of being able to
+reclaim space with just page-local manipulations. Anyway, we don't
+want to have to chase through multiple heap pages to get from an index
+entry to the desired tuple, so it seems better to create a new index
+entry for the new tuple.) If further updates occur, the next version
+could become the root of a new HOT chain.
+
+Line pointer 1 has to remain as long as there is any non-dead member of
+the chain on the page. When there is not, it is marked "dead".
+This lets us reclaim the last child line pointer and associated tuple
+immediately. The next regular VACUUM pass can reclaim the index entries
+pointing at the line pointer and then the line pointer itself. Since a
+line pointer is small compared to a tuple, this does not represent an
+undue space cost.
+
+Note: we can use a "dead" line pointer for any DELETEd tuple,
+whether it was part of a HOT chain or not. This allows space reclamation
+in advance of running VACUUM for plain DELETEs as well as HOT updates.
+
+The requirement for doing a HOT update is that none of the indexed
+columns are changed. This is checked at execution time by comparing the
+binary representation of the old and new values. We insist on bitwise
+equality rather than using datatype-specific equality routines. The
+main reason to avoid the latter is that there might be multiple notions
+of equality for a datatype, and we don't know exactly which one is
+relevant for the indexes at hand. We assume that bitwise equality
+guarantees equality for all purposes.
+
+
+Abort Cases
+-----------
+
+If a heap-only tuple's xmin is aborted, then it can be removed immediately:
+it was never visible to any other transaction, and all descendant row
+versions must be aborted as well. Therefore we need not consider it part
+of a HOT chain. By the same token, if a HOT-updated tuple's xmax is
+aborted, there is no need to follow the chain link. However, there is a
+race condition here: the transaction that did the HOT update might abort
+between the time we inspect the HOT-updated tuple and the time we reach
+the descendant heap-only tuple. It is conceivable that someone prunes
+the heap-only tuple before that, and even conceivable that the line pointer
+is re-used for another purpose. Therefore, when following a HOT chain,
+it is always necessary to be prepared for the possibility that the
+linked-to line pointer is unused, dead, or redirected; and if it is a
+normal line pointer, we still have to check that XMIN of the tuple matches
+the XMAX of the tuple we left. Otherwise we should assume that we have
+come to the end of the HOT chain. Note that this sort of XMIN/XMAX
+matching is required when following ordinary update chains anyway.
+
+(Early versions of the HOT code assumed that holding pin on the page
+buffer while following a HOT link would prevent this type of problem,
+but checking XMIN/XMAX matching is a much more robust solution.)
+
+
+Index/Sequential Scans
+----------------------
+
+When doing an index scan, whenever we reach a HEAP_HOT_UPDATED tuple whose
+xmax is not aborted, we need to follow its t_ctid link and check that
+entry as well; possibly repeatedly until we reach the end of the HOT
+chain. (When using an MVCC snapshot it is possible to optimize this a
+bit: there can be at most one visible tuple in the chain, so we can stop
+when we find it. This rule does not work for non-MVCC snapshots, though.)
+
+Sequential scans do not need to pay attention to the HOT links because
+they scan every line pointer on the page anyway. The same goes for a
+bitmap heap scan with a lossy bitmap.
+
+
+Pruning
+-------
+
+HOT pruning means updating line pointers so that HOT chains are
+reduced in length, by collapsing out line pointers for intermediate dead
+tuples. Although this makes those line pointers available for re-use,
+it does not immediately make the space occupied by their tuples available.
+
+
+Defragmentation
+---------------
+
+Defragmentation centralizes unused space. After we have converted root
+line pointers to redirected line pointers and pruned away any dead
+intermediate line pointers, the tuples they linked to are free space.
+But unless that space is adjacent to the central "hole" on the page
+(the pd_lower-to-pd_upper area) it cannot be used by tuple insertion.
+Defragmentation moves the surviving tuples to coalesce all the free
+space into one "hole". This is done with the same PageRepairFragmentation
+function that regular VACUUM uses.
+
+
+When can/should we prune or defragment?
+---------------------------------------
+
+This is the most interesting question in HOT implementation, since there
+is no simple right answer: we must use heuristics to determine when it's
+most efficient to perform pruning and/or defragmenting.
+
+We cannot prune or defragment unless we can get a "buffer cleanup lock"
+on the target page; otherwise, pruning might destroy line pointers that
+other backends have live references to, and defragmenting might move
+tuples that other backends have live pointers to. Thus the general
+approach must be to heuristically decide if we should try to prune
+or defragment, and if so try to acquire the buffer cleanup lock without
+blocking. If we succeed we can proceed with our housekeeping work.
+If we cannot get the lock (which should not happen often, except under
+very heavy contention) then the housekeeping has to be postponed till
+some other time. The worst-case consequence of this is only that an
+UPDATE cannot be made HOT but has to link to a new tuple version placed on
+some other page, for lack of centralized space on the original page.
+
+Ideally we would do defragmenting only when we are about to attempt
+heap_update on a HOT-safe tuple. The difficulty with this approach
+is that the update query has certainly got a pin on the old tuple, and
+therefore our attempt to acquire a buffer cleanup lock will always fail.
+(This corresponds to the idea that we don't want to move the old tuple
+out from under where the query's HeapTuple pointer points. It might
+be possible to finesse that, but it seems fragile.)
+
+Pruning, however, is potentially useful even when we are not about to
+insert a new tuple, since shortening a HOT chain reduces the cost of
+subsequent index searches. However it is unclear that this gain is
+large enough to accept any extra maintenance burden for.
+
+The currently planned heuristic is to prune and defrag when first accessing
+a page that potentially has prunable tuples (as flagged by the pd_prune_xid
+page hint field) and that either has free space less than MAX(fillfactor
+target free space, BLCKSZ/10) *or* has recently had an UPDATE fail to
+find enough free space to store an updated tuple version. (These rules
+are subject to change.)
+
+We have effectively implemented the "truncate dead tuples to just line
+pointer" idea that has been proposed and rejected before because of fear
+of line pointer bloat: we might end up with huge numbers of line pointers
+and just a few actual tuples on a page. To limit the damage in the worst
+case, and to keep various work arrays as well as the bitmaps in bitmap
+scans reasonably sized, the maximum number of line pointers per page
+is arbitrarily capped at MaxHeapTuplesPerPage (the most tuples that
+could fit without HOT pruning).
+
+Effectively, space reclamation happens during tuple retrieval when the
+page is nearly full (<10% free) and a buffer cleanup lock can be
+acquired. This means that UPDATE, DELETE, and SELECT can trigger space
+reclamation, but often not during INSERT ... VALUES because it does
+not retrieve a row.
+
+
+VACUUM
+------
+
+There is little change to regular vacuum. It performs pruning to remove
+dead heap-only tuples, and cleans up any dead line pointers as if they were
+regular dead tuples.
+
+
+Statistics
+----------
+
+Currently, we count HOT updates the same as cold updates for statistics
+purposes, though there is an additional per-table counter that counts
+only HOT updates. When a page pruning operation is able to remove a
+physical tuple by eliminating an intermediate heap-only tuple or
+replacing a physical root tuple by a redirect pointer, a decrement in
+the table's number of dead tuples is reported to pgstats, which may
+postpone autovacuuming. Note that we do not count replacing a root tuple
+by a DEAD line pointer as decrementing n_dead_tuples; we still want
+autovacuum to run to clean up the index entries and DEAD item.
+
+This area probably needs further work ...
+
+
+CREATE INDEX
+------------
+
+CREATE INDEX presents a problem for HOT updates. While the existing HOT
+chains all have the same index values for existing indexes, the columns
+in the new index might change within a pre-existing HOT chain, creating
+a "broken" chain that can't be indexed properly.
+
+To address this issue, regular (non-concurrent) CREATE INDEX makes the
+new index usable only by new transactions and transactions that don't
+have snapshots older than the CREATE INDEX command. This prevents
+queries that can see the inconsistent HOT chains from trying to use the
+new index and getting incorrect results. Queries that can see the index
+can only see the rows that were visible after the index was created,
+hence the HOT chains are consistent for them.
+
+Entries in the new index point to root tuples (tuples with current index
+pointers) so that our index uses the same index pointers as all other
+indexes on the table. However the row we want to index is actually at
+the *end* of the chain, ie, the most recent live tuple on the HOT chain.
+That is the one we compute the index entry values for, but the TID
+we put into the index is that of the root tuple. Since queries that
+will be allowed to use the new index cannot see any of the older tuple
+versions in the chain, the fact that they might not match the index entry
+isn't a problem. (Such queries will check the tuple visibility
+information of the older versions and ignore them, without ever looking at
+their contents, so the content inconsistency is OK.) Subsequent updates
+to the live tuple will be allowed to extend the HOT chain only if they are
+HOT-safe for all the indexes.
+
+Because we have ShareLock on the table, any DELETE_IN_PROGRESS or
+INSERT_IN_PROGRESS tuples should have come from our own transaction.
+Therefore we can consider them committed since if the CREATE INDEX
+commits, they will be committed, and if it aborts the index is discarded.
+An exception to this is that early lock release is customary for system
+catalog updates, and so we might find such tuples when reindexing a system
+catalog. In that case we deal with it by waiting for the source
+transaction to commit or roll back. (We could do that for user tables
+too, but since the case is unexpected we prefer to throw an error.)
+
+Practically, we prevent certain transactions from using the new index by
+setting pg_index.indcheckxmin to TRUE. Transactions are allowed to use
+such an index only after pg_index.xmin is below their TransactionXmin
+horizon, thereby ensuring that any incompatible rows in HOT chains are
+dead to them. (pg_index.xmin will be the XID of the CREATE INDEX
+transaction. The reason for using xmin rather than a normal column is
+that the regular vacuum freezing mechanism will take care of converting
+xmin to FrozenTransactionId before it can wrap around.)
+
+This means in particular that the transaction creating the index will be
+unable to use the index if the transaction has old snapshots. We
+alleviate that problem somewhat by not setting indcheckxmin unless the
+table actually contains HOT chains with RECENTLY_DEAD members.
+
+Another unpleasant consequence is that it is now risky to use SnapshotAny
+in an index scan: if the index was created more recently than the last
+vacuum, it's possible that some of the visited tuples do not match the
+index entry they are linked to. This does not seem to be a fatal
+objection, since there are few users of SnapshotAny and most use seqscans.
+The only exception at this writing is CLUSTER, which is okay because it
+does not require perfect ordering of the indexscan readout (and especially
+so because CLUSTER tends to write recently-dead tuples out of order anyway).
+
+
+CREATE INDEX CONCURRENTLY
+-------------------------
+
+In the concurrent case we must take a different approach. We create the
+pg_index entry immediately, before we scan the table. The pg_index entry
+is marked as "not ready for inserts". Then we commit and wait for any
+transactions which have the table open to finish. This ensures that no
+new HOT updates will change the key value for our new index, because all
+transactions will see the existence of the index and will respect its
+constraint on which updates can be HOT. Other transactions must include
+such an index when determining HOT-safety of updates, even though they
+must ignore it for both insertion and searching purposes.
+
+We must do this to avoid making incorrect index entries. For example,
+suppose we are building an index on column X and we make an index entry for
+a non-HOT tuple with X=1. Then some other backend, unaware that X is an
+indexed column, HOT-updates the row to have X=2, and commits. We now have
+an index entry for X=1 pointing at a HOT chain whose live row has X=2.
+We could make an index entry with X=2 during the validation pass, but
+there is no nice way to get rid of the wrong entry with X=1. So we must
+have the HOT-safety property enforced before we start to build the new
+index.
+
+After waiting for transactions which had the table open, we build the index
+for all rows that are valid in a fresh snapshot. Any tuples visible in the
+snapshot will have only valid forward-growing HOT chains. (They might have
+older HOT updates behind them which are broken, but this is OK for the same
+reason it's OK in a regular index build.) As above, we point the index
+entry at the root of the HOT-update chain but we use the key value from the
+live tuple.
+
+We mark the index open for inserts (but still not ready for reads) then
+we again wait for transactions which have the table open. Then we take
+a second reference snapshot and validate the index. This searches for
+tuples missing from the index, and inserts any missing ones. Again,
+the index entries have to have TIDs equal to HOT-chain root TIDs, but
+the value to be inserted is the one from the live tuple.
+
+Then we wait until every transaction that could have a snapshot older than
+the second reference snapshot is finished. This ensures that nobody is
+alive any longer who could need to see any tuples that might be missing
+from the index, as well as ensuring that no one can see any inconsistent
+rows in a broken HOT chain (the first condition is stronger than the
+second). Finally, we can mark the index valid for searches.
+
+Note that we do not need to set pg_index.indcheckxmin in this code path,
+because we have outwaited any transactions that would need to avoid using
+the index. (indcheckxmin is only needed because non-concurrent CREATE
+INDEX doesn't want to wait; its stronger lock would create too much risk of
+deadlock if it did.)
+
+
+DROP INDEX CONCURRENTLY
+-----------------------
+
+DROP INDEX CONCURRENTLY is sort of the reverse sequence of CREATE INDEX
+CONCURRENTLY. We first mark the index as not indisvalid, and then wait for
+any transactions that could be using it in queries to end. (During this
+time, index updates must still be performed as normal, since such
+transactions might expect freshly inserted tuples to be findable.)
+Then, we clear indisready and indislive, and again wait for transactions
+that could be updating the index to end. Finally we can drop the index
+normally (though taking only ShareUpdateExclusiveLock on its parent table).
+
+The reason we need the pg_index.indislive flag is that after the second
+wait step begins, we don't want transactions to be touching the index at
+all; otherwise they might suffer errors if the DROP finally commits while
+they are reading catalog entries for the index. If we had only indisvalid
+and indisready, this state would be indistinguishable from the first stage
+of CREATE INDEX CONCURRENTLY --- but in that state, we *do* want
+transactions to examine the index, since they must consider it in
+HOT-safety checks.
+
+
+Limitations and Restrictions
+----------------------------
+
+It is worth noting that HOT forever forecloses alternative approaches
+to vacuuming, specifically the recompute-the-index-keys approach alluded
+to in Technical Challenges above. It'll be tough to recompute the index
+keys for a root line pointer you don't have data for anymore ...
+
+
+Glossary
+--------
+
+Broken HOT Chain
+
+ A HOT chain in which the key value for an index has changed.
+
+ This is not allowed to occur normally but if a new index is created
+ it can happen. In that case various strategies are used to ensure
+ that no transaction for which the older tuples are visible can
+ use the index.
+
+Cold update
+
+ A normal, non-HOT update, in which index entries are made for
+ the new version of the tuple.
+
+Dead line pointer
+
+ A stub line pointer, that does not point to anything, but cannot
+ be removed or reused yet because there are index pointers to it.
+ Semantically same as a dead tuple. It has state LP_DEAD.
+
+Heap-only tuple
+
+ A heap tuple with no index pointers, which can only be reached
+ from indexes indirectly through its ancestral root tuple.
+ Marked with HEAP_ONLY_TUPLE flag.
+
+HOT-safe
+
+ A proposed tuple update is said to be HOT-safe if it changes
+ none of the tuple's indexed columns. It will only become an
+ actual HOT update if we can find room on the same page for
+ the new tuple version.
+
+HOT update
+
+ An UPDATE where the new tuple becomes a heap-only tuple, and no
+ new index entries are made.
+
+HOT-updated tuple
+
+ An updated tuple, for which the next tuple in the chain is a
+ heap-only tuple. Marked with HEAP_HOT_UPDATED flag.
+
+Indexed column
+
+ A column used in an index definition. The column might not
+ actually be stored in the index --- it could be used in a
+ functional index's expression, or used in a partial index
+ predicate. HOT treats all these cases alike.
+
+Redirecting line pointer
+
+ A line pointer that points to another line pointer and has no
+ associated tuple. It has the special lp_flags state LP_REDIRECT,
+ and lp_off is the OffsetNumber of the line pointer it links to.
+ This is used when a root tuple becomes dead but we cannot prune
+ the line pointer because there are non-dead heap-only tuples
+ further down the chain.
+
+Root tuple
+
+ The first tuple in a HOT update chain; the one that indexes point to.
+
+Update chain
+
+ A chain of updated tuples, in which each tuple's ctid points to
+ the next tuple in the chain. A HOT update chain is an update chain
+ (or portion of an update chain) that consists of a root tuple and
+ one or more heap-only tuples. A complete update chain can contain
+ both HOT and non-HOT (cold) updated tuples.
diff --git a/src/backend/access/heap/README.tuplock b/src/backend/access/heap/README.tuplock
new file mode 100644
index 0000000..6441e8b
--- /dev/null
+++ b/src/backend/access/heap/README.tuplock
@@ -0,0 +1,155 @@
+Locking tuples
+--------------
+
+Locking tuples is not as easy as locking tables or other database objects.
+The problem is that transactions might want to lock large numbers of tuples at
+any one time, so it's not possible to keep the locks objects in shared memory.
+To work around this limitation, we use a two-level mechanism. The first level
+is implemented by storing locking information in the tuple header: a tuple is
+marked as locked by setting the current transaction's XID as its XMAX, and
+setting additional infomask bits to distinguish this case from the more normal
+case of having deleted the tuple. When multiple transactions concurrently
+lock a tuple, a MultiXact is used; see below. This mechanism can accommodate
+arbitrarily large numbers of tuples being locked simultaneously.
+
+When it is necessary to wait for a tuple-level lock to be released, the basic
+delay is provided by XactLockTableWait or MultiXactIdWait on the contents of
+the tuple's XMAX. However, that mechanism will release all waiters
+concurrently, so there would be a race condition as to which waiter gets the
+tuple, potentially leading to indefinite starvation of some waiters. The
+possibility of share-locking makes the problem much worse --- a steady stream
+of share-lockers can easily block an exclusive locker forever. To provide
+more reliable semantics about who gets a tuple-level lock first, we use the
+standard lock manager, which implements the second level mentioned above. The
+protocol for waiting for a tuple-level lock is really
+
+ LockTuple()
+ XactLockTableWait()
+ mark tuple as locked by me
+ UnlockTuple()
+
+When there are multiple waiters, arbitration of who is to get the lock next
+is provided by LockTuple(). However, at most one tuple-level lock will
+be held or awaited per backend at any time, so we don't risk overflow
+of the lock table. Note that incoming share-lockers are required to
+do LockTuple as well, if there is any conflict, to ensure that they don't
+starve out waiting exclusive-lockers. However, if there is not any active
+conflict for a tuple, we don't incur any extra overhead.
+
+We make an exception to the above rule for those lockers that already hold
+some lock on a tuple and attempt to acquire a stronger one on it. In that
+case, we skip the LockTuple() call even when there are conflicts, provided
+that the target tuple is being locked, updated or deleted by multiple sessions
+concurrently. Failing to skip the lock would risk a deadlock, e.g., between a
+session that was first to record its weaker lock in the tuple header and would
+be waiting on the LockTuple() call to upgrade to the stronger lock level, and
+another session that has already done LockTuple() and is waiting for the first
+session transaction to release its tuple header-level lock.
+
+We provide four levels of tuple locking strength: SELECT FOR UPDATE obtains an
+exclusive lock which prevents any kind of modification of the tuple. This is
+the lock level that is implicitly taken by DELETE operations, and also by
+UPDATE operations if they modify any of the tuple's key fields. SELECT FOR NO
+KEY UPDATE likewise obtains an exclusive lock, but only prevents tuple removal
+and modifications which might alter the tuple's key. This is the lock that is
+implicitly taken by UPDATE operations which leave all key fields unchanged.
+SELECT FOR SHARE obtains a shared lock which prevents any kind of tuple
+modification. Finally, SELECT FOR KEY SHARE obtains a shared lock which only
+prevents tuple removal and modifications of key fields. This lock level is
+just strong enough to implement RI checks, i.e. it ensures that tuples do not
+go away from under a check, without blocking transactions that want to update
+the tuple without changing its key.
+
+The conflict table is:
+
+ UPDATE NO KEY UPDATE SHARE KEY SHARE
+UPDATE conflict conflict conflict conflict
+NO KEY UPDATE conflict conflict conflict
+SHARE conflict conflict
+KEY SHARE conflict
+
+When there is a single locker in a tuple, we can just store the locking info
+in the tuple itself. We do this by storing the locker's Xid in XMAX, and
+setting infomask bits specifying the locking strength. There is one exception
+here: since infomask space is limited, we do not provide a separate bit
+for SELECT FOR SHARE, so we have to use the extended info in a MultiXact in
+that case. (The other cases, SELECT FOR UPDATE and SELECT FOR KEY SHARE, are
+presumably more commonly used due to being the standards-mandated locking
+mechanism, or heavily used by the RI code, so we want to provide fast paths
+for those.)
+
+MultiXacts
+----------
+
+A tuple header provides very limited space for storing information about tuple
+locking and updates: there is room only for a single Xid and a small number of
+infomask bits. Whenever we need to store more than one lock, we replace the
+first locker's Xid with a new MultiXactId. Each MultiXact provides extended
+locking data; it comprises an array of Xids plus some flags bits for each one.
+The flags are currently used to store the locking strength of each member
+transaction. (The flags also distinguish a pure locker from an updater.)
+
+In earlier PostgreSQL releases, a MultiXact always meant that the tuple was
+locked in shared mode by multiple transactions. This is no longer the case; a
+MultiXact may contain an update or delete Xid. (Keep in mind that tuple locks
+in a transaction do not conflict with other tuple locks in the same
+transaction, so it's possible to have otherwise conflicting locks in a
+MultiXact if they belong to the same transaction).
+
+Note that each lock is attributed to the subtransaction that acquires it.
+This means that a subtransaction that aborts is seen as though it releases the
+locks it acquired; concurrent transactions can then proceed without having to
+wait for the main transaction to finish. It also means that a subtransaction
+can upgrade to a stronger lock level than an earlier transaction had, and if
+the subxact aborts, the earlier, weaker lock is kept.
+
+The possibility of having an update within a MultiXact means that they must
+persist across crashes and restarts: a future reader of the tuple needs to
+figure out whether the update committed or aborted. So we have a requirement
+that pg_multixact needs to retain pages of its data until we're certain that
+the MultiXacts in them are no longer of interest.
+
+VACUUM is in charge of removing old MultiXacts at the time of tuple freezing.
+The lower bound used by vacuum (that is, the value below which all multixacts
+are removed) is stored as pg_class.relminmxid for each table; the minimum of
+all such values is stored in pg_database.datminmxid. The minimum across
+all databases, in turn, is recorded in checkpoint records, and CHECKPOINT
+removes pg_multixact/ segments older than that value once the checkpoint
+record has been flushed.
+
+Infomask Bits
+-------------
+
+The following infomask bits are applicable:
+
+- HEAP_XMAX_INVALID
+ Any tuple with this bit set does not have a valid value stored in XMAX.
+
+- HEAP_XMAX_IS_MULTI
+ This bit is set if the tuple's Xmax is a MultiXactId (as opposed to a
+ regular TransactionId).
+
+- HEAP_XMAX_LOCK_ONLY
+ This bit is set when the XMAX is a locker only; that is, if it's a
+ multixact, it does not contain an update among its members. It's set when
+ the XMAX is a plain Xid that locked the tuple, as well.
+
+- HEAP_XMAX_KEYSHR_LOCK
+- HEAP_XMAX_SHR_LOCK
+- HEAP_XMAX_EXCL_LOCK
+ These bits indicate the strength of the lock acquired; they are useful when
+ the XMAX is not a MultiXactId. If it's a multi, the info is to be found in
+ the member flags. If HEAP_XMAX_IS_MULTI is not set and HEAP_XMAX_LOCK_ONLY
+ is set, then one of these *must* be set as well.
+
+ Note that HEAP_XMAX_EXCL_LOCK does not distinguish FOR NO KEY UPDATE from
+ FOR UPDATE; this is implemented by the HEAP_KEYS_UPDATED bit.
+
+- HEAP_KEYS_UPDATED
+ This bit lives in t_infomask2. If set, indicates that the operation(s) done
+ by the XMAX compromise the tuple key, such as a SELECT FOR UPDATE, an UPDATE
+ that modifies the columns of the key, or a DELETE. It's set regardless of
+ whether the XMAX is a TransactionId or a MultiXactId.
+
+We currently never set the HEAP_XMAX_COMMITTED when the HEAP_XMAX_IS_MULTI bit
+is set.
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
new file mode 100644
index 0000000..64b9ec0
--- /dev/null
+++ b/src/backend/access/heap/heapam.c
@@ -0,0 +1,9955 @@
+/*-------------------------------------------------------------------------
+ *
+ * heapam.c
+ * heap access method code
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/access/heap/heapam.c
+ *
+ *
+ * INTERFACE ROUTINES
+ * heap_beginscan - begin relation scan
+ * heap_rescan - restart a relation scan
+ * heap_endscan - end relation scan
+ * heap_getnext - retrieve next tuple in scan
+ * heap_fetch - retrieve tuple with given tid
+ * heap_insert - insert tuple into a relation
+ * heap_multi_insert - insert multiple tuples into a relation
+ * heap_delete - delete a tuple from a relation
+ * heap_update - replace a tuple in a relation with another tuple
+ *
+ * NOTES
+ * This file contains the heap_ routines which implement
+ * the POSTGRES heap access method used for all POSTGRES
+ * relations.
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/bufmask.h"
+#include "access/genam.h"
+#include "access/heapam.h"
+#include "access/heapam_xlog.h"
+#include "access/heaptoast.h"
+#include "access/hio.h"
+#include "access/multixact.h"
+#include "access/parallel.h"
+#include "access/relscan.h"
+#include "access/subtrans.h"
+#include "access/syncscan.h"
+#include "access/sysattr.h"
+#include "access/tableam.h"
+#include "access/transam.h"
+#include "access/valid.h"
+#include "access/visibilitymap.h"
+#include "access/xact.h"
+#include "access/xlog.h"
+#include "access/xloginsert.h"
+#include "access/xlogutils.h"
+#include "catalog/catalog.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "port/atomics.h"
+#include "port/pg_bitutils.h"
+#include "storage/bufmgr.h"
+#include "storage/freespace.h"
+#include "storage/lmgr.h"
+#include "storage/predicate.h"
+#include "storage/procarray.h"
+#include "storage/smgr.h"
+#include "storage/spin.h"
+#include "storage/standby.h"
+#include "utils/datum.h"
+#include "utils/inval.h"
+#include "utils/lsyscache.h"
+#include "utils/relcache.h"
+#include "utils/snapmgr.h"
+#include "utils/spccache.h"
+
+
+static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
+ TransactionId xid, CommandId cid, int options);
+static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
+ Buffer newbuf, HeapTuple oldtup,
+ HeapTuple newtup, HeapTuple old_key_tuple,
+ bool all_visible_cleared, bool new_all_visible_cleared);
+static Bitmapset *HeapDetermineColumnsInfo(Relation relation,
+ Bitmapset *interesting_cols,
+ Bitmapset *external_cols,
+ HeapTuple oldtup, HeapTuple newtup,
+ bool *has_external);
+static bool heap_acquire_tuplock(Relation relation, ItemPointer tid,
+ LockTupleMode mode, LockWaitPolicy wait_policy,
+ bool *have_tuple_lock);
+static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
+ uint16 old_infomask2, TransactionId add_to_xmax,
+ LockTupleMode mode, bool is_update,
+ TransactionId *result_xmax, uint16 *result_infomask,
+ uint16 *result_infomask2);
+static TM_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple,
+ ItemPointer ctid, TransactionId xid,
+ LockTupleMode mode);
+static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
+ uint16 *new_infomask2);
+static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
+ uint16 t_infomask);
+static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
+ LockTupleMode lockmode, bool *current_is_member);
+static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
+ Relation rel, ItemPointer ctid, XLTW_Oper oper,
+ int *remaining);
+static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
+ uint16 infomask, Relation rel, int *remaining);
+static void index_delete_sort(TM_IndexDeleteOp *delstate);
+static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate);
+static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
+static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_required,
+ bool *copy);
+
+
+/*
+ * Each tuple lock mode has a corresponding heavyweight lock, and one or two
+ * corresponding MultiXactStatuses (one to merely lock tuples, another one to
+ * update them). This table (and the macros below) helps us determine the
+ * heavyweight lock mode and MultiXactStatus values to use for any particular
+ * tuple lock strength.
+ *
+ * Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
+ * instead.
+ */
+static const struct
+{
+ LOCKMODE hwlock;
+ int lockstatus;
+ int updstatus;
+}
+
+ tupleLockExtraInfo[MaxLockTupleMode + 1] =
+{
+ { /* LockTupleKeyShare */
+ AccessShareLock,
+ MultiXactStatusForKeyShare,
+ -1 /* KeyShare does not allow updating tuples */
+ },
+ { /* LockTupleShare */
+ RowShareLock,
+ MultiXactStatusForShare,
+ -1 /* Share does not allow updating tuples */
+ },
+ { /* LockTupleNoKeyExclusive */
+ ExclusiveLock,
+ MultiXactStatusForNoKeyUpdate,
+ MultiXactStatusNoKeyUpdate
+ },
+ { /* LockTupleExclusive */
+ AccessExclusiveLock,
+ MultiXactStatusForUpdate,
+ MultiXactStatusUpdate
+ }
+};
+
+/* Get the LOCKMODE for a given MultiXactStatus */
+#define LOCKMODE_from_mxstatus(status) \
+ (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
+
+/*
+ * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
+ * This is more readable than having every caller translate it to lock.h's
+ * LOCKMODE.
+ */
+#define LockTupleTuplock(rel, tup, mode) \
+ LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
+#define UnlockTupleTuplock(rel, tup, mode) \
+ UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
+#define ConditionalLockTupleTuplock(rel, tup, mode) \
+ ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
+
+#ifdef USE_PREFETCH
+/*
+ * heap_index_delete_tuples and index_delete_prefetch_buffer use this
+ * structure to coordinate prefetching activity
+ */
+typedef struct
+{
+ BlockNumber cur_hblkno;
+ int next_item;
+ int ndeltids;
+ TM_IndexDelete *deltids;
+} IndexDeletePrefetchState;
+#endif
+
+/* heap_index_delete_tuples bottom-up index deletion costing constants */
+#define BOTTOMUP_MAX_NBLOCKS 6
+#define BOTTOMUP_TOLERANCE_NBLOCKS 3
+
+/*
+ * heap_index_delete_tuples uses this when determining which heap blocks it
+ * must visit to help its bottom-up index deletion caller
+ */
+typedef struct IndexDeleteCounts
+{
+ int16 npromisingtids; /* Number of "promising" TIDs in group */
+ int16 ntids; /* Number of TIDs in group */
+ int16 ifirsttid; /* Offset to group's first deltid */
+} IndexDeleteCounts;
+
+/*
+ * This table maps tuple lock strength values for each particular
+ * MultiXactStatus value.
+ */
+static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
+{
+ LockTupleKeyShare, /* ForKeyShare */
+ LockTupleShare, /* ForShare */
+ LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
+ LockTupleExclusive, /* ForUpdate */
+ LockTupleNoKeyExclusive, /* NoKeyUpdate */
+ LockTupleExclusive /* Update */
+};
+
+/* Get the LockTupleMode for a given MultiXactStatus */
+#define TUPLOCK_from_mxstatus(status) \
+ (MultiXactStatusLock[(status)])
+
+/* ----------------------------------------------------------------
+ * heap support routines
+ * ----------------------------------------------------------------
+ */
+
+/* ----------------
+ * initscan - scan code common to heap_beginscan and heap_rescan
+ * ----------------
+ */
+static void
+initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
+{
+ ParallelBlockTableScanDesc bpscan = NULL;
+ bool allow_strat;
+ bool allow_sync;
+
+ /*
+ * Determine the number of blocks we have to scan.
+ *
+ * It is sufficient to do this once at scan start, since any tuples added
+ * while the scan is in progress will be invisible to my snapshot anyway.
+ * (That is not true when using a non-MVCC snapshot. However, we couldn't
+ * guarantee to return tuples added after scan start anyway, since they
+ * might go into pages we already scanned. To guarantee consistent
+ * results for a non-MVCC snapshot, the caller must hold some higher-level
+ * lock that ensures the interesting tuple(s) won't change.)
+ */
+ if (scan->rs_base.rs_parallel != NULL)
+ {
+ bpscan = (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
+ scan->rs_nblocks = bpscan->phs_nblocks;
+ }
+ else
+ scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_base.rs_rd);
+
+ /*
+ * If the table is large relative to NBuffers, use a bulk-read access
+ * strategy and enable synchronized scanning (see syncscan.c). Although
+ * the thresholds for these features could be different, we make them the
+ * same so that there are only two behaviors to tune rather than four.
+ * (However, some callers need to be able to disable one or both of these
+ * behaviors, independently of the size of the table; also there is a GUC
+ * variable that can disable synchronized scanning.)
+ *
+ * Note that table_block_parallelscan_initialize has a very similar test;
+ * if you change this, consider changing that one, too.
+ */
+ if (!RelationUsesLocalBuffers(scan->rs_base.rs_rd) &&
+ scan->rs_nblocks > NBuffers / 4)
+ {
+ allow_strat = (scan->rs_base.rs_flags & SO_ALLOW_STRAT) != 0;
+ allow_sync = (scan->rs_base.rs_flags & SO_ALLOW_SYNC) != 0;
+ }
+ else
+ allow_strat = allow_sync = false;
+
+ if (allow_strat)
+ {
+ /* During a rescan, keep the previous strategy object. */
+ if (scan->rs_strategy == NULL)
+ scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
+ }
+ else
+ {
+ if (scan->rs_strategy != NULL)
+ FreeAccessStrategy(scan->rs_strategy);
+ scan->rs_strategy = NULL;
+ }
+
+ if (scan->rs_base.rs_parallel != NULL)
+ {
+ /* For parallel scan, believe whatever ParallelTableScanDesc says. */
+ if (scan->rs_base.rs_parallel->phs_syncscan)
+ scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
+ else
+ scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
+ }
+ else if (keep_startblock)
+ {
+ /*
+ * When rescanning, we want to keep the previous startblock setting,
+ * so that rewinding a cursor doesn't generate surprising results.
+ * Reset the active syncscan setting, though.
+ */
+ if (allow_sync && synchronize_seqscans)
+ scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
+ else
+ scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
+ }
+ else if (allow_sync && synchronize_seqscans)
+ {
+ scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
+ scan->rs_startblock = ss_get_location(scan->rs_base.rs_rd, scan->rs_nblocks);
+ }
+ else
+ {
+ scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
+ scan->rs_startblock = 0;
+ }
+
+ scan->rs_numblocks = InvalidBlockNumber;
+ scan->rs_inited = false;
+ scan->rs_ctup.t_data = NULL;
+ ItemPointerSetInvalid(&scan->rs_ctup.t_self);
+ scan->rs_cbuf = InvalidBuffer;
+ scan->rs_cblock = InvalidBlockNumber;
+
+ /* page-at-a-time fields are always invalid when not rs_inited */
+
+ /*
+ * copy the scan key, if appropriate
+ */
+ if (key != NULL && scan->rs_base.rs_nkeys > 0)
+ memcpy(scan->rs_base.rs_key, key, scan->rs_base.rs_nkeys * sizeof(ScanKeyData));
+
+ /*
+ * Currently, we only have a stats counter for sequential heap scans (but
+ * e.g for bitmap scans the underlying bitmap index scans will be counted,
+ * and for sample scans we update stats for tuple fetches).
+ */
+ if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN)
+ pgstat_count_heap_scan(scan->rs_base.rs_rd);
+}
+
+/*
+ * heap_setscanlimits - restrict range of a heapscan
+ *
+ * startBlk is the page to start at
+ * numBlks is number of pages to scan (InvalidBlockNumber means "all")
+ */
+void
+heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
+{
+ HeapScanDesc scan = (HeapScanDesc) sscan;
+
+ Assert(!scan->rs_inited); /* else too late to change */
+ /* else rs_startblock is significant */
+ Assert(!(scan->rs_base.rs_flags & SO_ALLOW_SYNC));
+
+ /* Check startBlk is valid (but allow case of zero blocks...) */
+ Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
+
+ scan->rs_startblock = startBlk;
+ scan->rs_numblocks = numBlks;
+}
+
+/*
+ * heapgetpage - subroutine for heapgettup()
+ *
+ * This routine reads and pins the specified page of the relation.
+ * In page-at-a-time mode it performs additional work, namely determining
+ * which tuples on the page are visible.
+ */
+void
+heapgetpage(TableScanDesc sscan, BlockNumber page)
+{
+ HeapScanDesc scan = (HeapScanDesc) sscan;
+ Buffer buffer;
+ Snapshot snapshot;
+ Page dp;
+ int lines;
+ int ntup;
+ OffsetNumber lineoff;
+ ItemId lpp;
+ bool all_visible;
+
+ Assert(page < scan->rs_nblocks);
+
+ /* release previous scan buffer, if any */
+ if (BufferIsValid(scan->rs_cbuf))
+ {
+ ReleaseBuffer(scan->rs_cbuf);
+ scan->rs_cbuf = InvalidBuffer;
+ }
+
+ /*
+ * Be sure to check for interrupts at least once per page. Checks at
+ * higher code levels won't be able to stop a seqscan that encounters many
+ * pages' worth of consecutive dead tuples.
+ */
+ CHECK_FOR_INTERRUPTS();
+
+ /* read page using selected strategy */
+ scan->rs_cbuf = ReadBufferExtended(scan->rs_base.rs_rd, MAIN_FORKNUM, page,
+ RBM_NORMAL, scan->rs_strategy);
+ scan->rs_cblock = page;
+
+ if (!(scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE))
+ return;
+
+ buffer = scan->rs_cbuf;
+ snapshot = scan->rs_base.rs_snapshot;
+
+ /*
+ * Prune and repair fragmentation for the whole page, if possible.
+ */
+ heap_page_prune_opt(scan->rs_base.rs_rd, buffer);
+
+ /*
+ * We must hold share lock on the buffer content while examining tuple
+ * visibility. Afterwards, however, the tuples we have found to be
+ * visible are guaranteed good as long as we hold the buffer pin.
+ */
+ LockBuffer(buffer, BUFFER_LOCK_SHARE);
+
+ dp = BufferGetPage(buffer);
+ TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
+ lines = PageGetMaxOffsetNumber(dp);
+ ntup = 0;
+
+ /*
+ * If the all-visible flag indicates that all tuples on the page are
+ * visible to everyone, we can skip the per-tuple visibility tests.
+ *
+ * Note: In hot standby, a tuple that's already visible to all
+ * transactions on the primary might still be invisible to a read-only
+ * transaction in the standby. We partly handle this problem by tracking
+ * the minimum xmin of visible tuples as the cut-off XID while marking a
+ * page all-visible on the primary and WAL log that along with the
+ * visibility map SET operation. In hot standby, we wait for (or abort)
+ * all transactions that can potentially may not see one or more tuples on
+ * the page. That's how index-only scans work fine in hot standby. A
+ * crucial difference between index-only scans and heap scans is that the
+ * index-only scan completely relies on the visibility map where as heap
+ * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
+ * the page-level flag can be trusted in the same way, because it might
+ * get propagated somehow without being explicitly WAL-logged, e.g. via a
+ * full page write. Until we can prove that beyond doubt, let's check each
+ * tuple for visibility the hard way.
+ */
+ all_visible = PageIsAllVisible(dp) && !snapshot->takenDuringRecovery;
+
+ for (lineoff = FirstOffsetNumber, lpp = PageGetItemId(dp, lineoff);
+ lineoff <= lines;
+ lineoff++, lpp++)
+ {
+ if (ItemIdIsNormal(lpp))
+ {
+ HeapTupleData loctup;
+ bool valid;
+
+ loctup.t_tableOid = RelationGetRelid(scan->rs_base.rs_rd);
+ loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
+ loctup.t_len = ItemIdGetLength(lpp);
+ ItemPointerSet(&(loctup.t_self), page, lineoff);
+
+ if (all_visible)
+ valid = true;
+ else
+ valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
+
+ HeapCheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
+ &loctup, buffer, snapshot);
+
+ if (valid)
+ scan->rs_vistuples[ntup++] = lineoff;
+ }
+ }
+
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+ Assert(ntup <= MaxHeapTuplesPerPage);
+ scan->rs_ntuples = ntup;
+}
+
+/* ----------------
+ * heapgettup - fetch next heap tuple
+ *
+ * Initialize the scan if not already done; then advance to the next
+ * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
+ * or set scan->rs_ctup.t_data = NULL if no more tuples.
+ *
+ * dir == NoMovementScanDirection means "re-fetch the tuple indicated
+ * by scan->rs_ctup".
+ *
+ * Note: the reason nkeys/key are passed separately, even though they are
+ * kept in the scan descriptor, is that the caller may not want us to check
+ * the scankeys.
+ *
+ * Note: when we fall off the end of the scan in either direction, we
+ * reset rs_inited. This means that a further request with the same
+ * scan direction will restart the scan, which is a bit odd, but a
+ * request with the opposite scan direction will start a fresh scan
+ * in the proper direction. The latter is required behavior for cursors,
+ * while the former case is generally undefined behavior in Postgres
+ * so we don't care too much.
+ * ----------------
+ */
+static void
+heapgettup(HeapScanDesc scan,
+ ScanDirection dir,
+ int nkeys,
+ ScanKey key)
+{
+ HeapTuple tuple = &(scan->rs_ctup);
+ Snapshot snapshot = scan->rs_base.rs_snapshot;
+ bool backward = ScanDirectionIsBackward(dir);
+ BlockNumber page;
+ bool finished;
+ Page dp;
+ int lines;
+ OffsetNumber lineoff;
+ int linesleft;
+ ItemId lpp;
+
+ /*
+ * calculate next starting lineoff, given scan direction
+ */
+ if (ScanDirectionIsForward(dir))
+ {
+ if (!scan->rs_inited)
+ {
+ /*
+ * return null immediately if relation is empty
+ */
+ if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
+ {
+ Assert(!BufferIsValid(scan->rs_cbuf));
+ tuple->t_data = NULL;
+ return;
+ }
+ if (scan->rs_base.rs_parallel != NULL)
+ {
+ ParallelBlockTableScanDesc pbscan =
+ (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
+ ParallelBlockTableScanWorker pbscanwork =
+ scan->rs_parallelworkerdata;
+
+ table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
+ pbscanwork, pbscan);
+
+ page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
+ pbscanwork, pbscan);
+
+ /* Other processes might have already finished the scan. */
+ if (page == InvalidBlockNumber)
+ {
+ Assert(!BufferIsValid(scan->rs_cbuf));
+ tuple->t_data = NULL;
+ return;
+ }
+ }
+ else
+ page = scan->rs_startblock; /* first page */
+ heapgetpage((TableScanDesc) scan, page);
+ lineoff = FirstOffsetNumber; /* first offnum */
+ scan->rs_inited = true;
+ }
+ else
+ {
+ /* continue from previously returned page/tuple */
+ page = scan->rs_cblock; /* current page */
+ lineoff = /* next offnum */
+ OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self)));
+ }
+
+ LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
+
+ dp = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
+ lines = PageGetMaxOffsetNumber(dp);
+ /* page and lineoff now reference the physically next tid */
+
+ linesleft = lines - lineoff + 1;
+ }
+ else if (backward)
+ {
+ /* backward parallel scan not supported */
+ Assert(scan->rs_base.rs_parallel == NULL);
+
+ if (!scan->rs_inited)
+ {
+ /*
+ * return null immediately if relation is empty
+ */
+ if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
+ {
+ Assert(!BufferIsValid(scan->rs_cbuf));
+ tuple->t_data = NULL;
+ return;
+ }
+
+ /*
+ * Disable reporting to syncscan logic in a backwards scan; it's
+ * not very likely anyone else is doing the same thing at the same
+ * time, and much more likely that we'll just bollix things for
+ * forward scanners.
+ */
+ scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
+
+ /*
+ * Start from last page of the scan. Ensure we take into account
+ * rs_numblocks if it's been adjusted by heap_setscanlimits().
+ */
+ if (scan->rs_numblocks != InvalidBlockNumber)
+ page = (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
+ else if (scan->rs_startblock > 0)
+ page = scan->rs_startblock - 1;
+ else
+ page = scan->rs_nblocks - 1;
+ heapgetpage((TableScanDesc) scan, page);
+ }
+ else
+ {
+ /* continue from previously returned page/tuple */
+ page = scan->rs_cblock; /* current page */
+ }
+
+ LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
+
+ dp = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
+ lines = PageGetMaxOffsetNumber(dp);
+
+ if (!scan->rs_inited)
+ {
+ lineoff = lines; /* final offnum */
+ scan->rs_inited = true;
+ }
+ else
+ {
+ /*
+ * The previous returned tuple may have been vacuumed since the
+ * previous scan when we use a non-MVCC snapshot, so we must
+ * re-establish the lineoff <= PageGetMaxOffsetNumber(dp)
+ * invariant
+ */
+ lineoff = /* previous offnum */
+ Min(lines,
+ OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self))));
+ }
+ /* page and lineoff now reference the physically previous tid */
+
+ linesleft = lineoff;
+ }
+ else
+ {
+ /*
+ * ``no movement'' scan direction: refetch prior tuple
+ */
+ if (!scan->rs_inited)
+ {
+ Assert(!BufferIsValid(scan->rs_cbuf));
+ tuple->t_data = NULL;
+ return;
+ }
+
+ page = ItemPointerGetBlockNumber(&(tuple->t_self));
+ if (page != scan->rs_cblock)
+ heapgetpage((TableScanDesc) scan, page);
+
+ /* Since the tuple was previously fetched, needn't lock page here */
+ dp = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
+ lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
+ lpp = PageGetItemId(dp, lineoff);
+ Assert(ItemIdIsNormal(lpp));
+
+ tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
+ tuple->t_len = ItemIdGetLength(lpp);
+
+ return;
+ }
+
+ /*
+ * advance the scan until we find a qualifying tuple or run out of stuff
+ * to scan
+ */
+ lpp = PageGetItemId(dp, lineoff);
+ for (;;)
+ {
+ /*
+ * Only continue scanning the page while we have lines left.
+ *
+ * Note that this protects us from accessing line pointers past
+ * PageGetMaxOffsetNumber(); both for forward scans when we resume the
+ * table scan, and for when we start scanning a new page.
+ */
+ while (linesleft > 0)
+ {
+ if (ItemIdIsNormal(lpp))
+ {
+ bool valid;
+
+ tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
+ tuple->t_len = ItemIdGetLength(lpp);
+ ItemPointerSet(&(tuple->t_self), page, lineoff);
+
+ /*
+ * if current tuple qualifies, return it.
+ */
+ valid = HeapTupleSatisfiesVisibility(tuple,
+ snapshot,
+ scan->rs_cbuf);
+
+ HeapCheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
+ tuple, scan->rs_cbuf,
+ snapshot);
+
+ if (valid && key != NULL)
+ HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
+ nkeys, key, valid);
+
+ if (valid)
+ {
+ LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+ return;
+ }
+ }
+
+ /*
+ * otherwise move to the next item on the page
+ */
+ --linesleft;
+ if (backward)
+ {
+ --lpp; /* move back in this page's ItemId array */
+ --lineoff;
+ }
+ else
+ {
+ ++lpp; /* move forward in this page's ItemId array */
+ ++lineoff;
+ }
+ }
+
+ /*
+ * if we get here, it means we've exhausted the items on this page and
+ * it's time to move to the next.
+ */
+ LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+
+ /*
+ * advance to next/prior page and detect end of scan
+ */
+ if (backward)
+ {
+ finished = (page == scan->rs_startblock) ||
+ (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
+ if (page == 0)
+ page = scan->rs_nblocks;
+ page--;
+ }
+ else if (scan->rs_base.rs_parallel != NULL)
+ {
+ ParallelBlockTableScanDesc pbscan =
+ (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
+ ParallelBlockTableScanWorker pbscanwork =
+ scan->rs_parallelworkerdata;
+
+ page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
+ pbscanwork, pbscan);
+ finished = (page == InvalidBlockNumber);
+ }
+ else
+ {
+ page++;
+ if (page >= scan->rs_nblocks)
+ page = 0;
+ finished = (page == scan->rs_startblock) ||
+ (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
+
+ /*
+ * Report our new scan position for synchronization purposes. We
+ * don't do that when moving backwards, however. That would just
+ * mess up any other forward-moving scanners.
+ *
+ * Note: we do this before checking for end of scan so that the
+ * final state of the position hint is back at the start of the
+ * rel. That's not strictly necessary, but otherwise when you run
+ * the same query multiple times the starting position would shift
+ * a little bit backwards on every invocation, which is confusing.
+ * We don't guarantee any specific ordering in general, though.
+ */
+ if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
+ ss_report_location(scan->rs_base.rs_rd, page);
+ }
+
+ /*
+ * return NULL if we've exhausted all the pages
+ */
+ if (finished)
+ {
+ if (BufferIsValid(scan->rs_cbuf))
+ ReleaseBuffer(scan->rs_cbuf);
+ scan->rs_cbuf = InvalidBuffer;
+ scan->rs_cblock = InvalidBlockNumber;
+ tuple->t_data = NULL;
+ scan->rs_inited = false;
+ return;
+ }
+
+ heapgetpage((TableScanDesc) scan, page);
+
+ LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
+
+ dp = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
+ lines = PageGetMaxOffsetNumber((Page) dp);
+ linesleft = lines;
+ if (backward)
+ {
+ lineoff = lines;
+ lpp = PageGetItemId(dp, lines);
+ }
+ else
+ {
+ lineoff = FirstOffsetNumber;
+ lpp = PageGetItemId(dp, FirstOffsetNumber);
+ }
+ }
+}
+
+/* ----------------
+ * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
+ *
+ * Same API as heapgettup, but used in page-at-a-time mode
+ *
+ * The internal logic is much the same as heapgettup's too, but there are some
+ * differences: we do not take the buffer content lock (that only needs to
+ * happen inside heapgetpage), and we iterate through just the tuples listed
+ * in rs_vistuples[] rather than all tuples on the page. Notice that
+ * lineindex is 0-based, where the corresponding loop variable lineoff in
+ * heapgettup is 1-based.
+ * ----------------
+ */
+static void
+heapgettup_pagemode(HeapScanDesc scan,
+ ScanDirection dir,
+ int nkeys,
+ ScanKey key)
+{
+ HeapTuple tuple = &(scan->rs_ctup);
+ bool backward = ScanDirectionIsBackward(dir);
+ BlockNumber page;
+ bool finished;
+ Page dp;
+ int lines;
+ int lineindex;
+ OffsetNumber lineoff;
+ int linesleft;
+ ItemId lpp;
+
+ /*
+ * calculate next starting lineindex, given scan direction
+ */
+ if (ScanDirectionIsForward(dir))
+ {
+ if (!scan->rs_inited)
+ {
+ /*
+ * return null immediately if relation is empty
+ */
+ if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
+ {
+ Assert(!BufferIsValid(scan->rs_cbuf));
+ tuple->t_data = NULL;
+ return;
+ }
+ if (scan->rs_base.rs_parallel != NULL)
+ {
+ ParallelBlockTableScanDesc pbscan =
+ (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
+ ParallelBlockTableScanWorker pbscanwork =
+ scan->rs_parallelworkerdata;
+
+ table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
+ pbscanwork, pbscan);
+
+ page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
+ pbscanwork, pbscan);
+
+ /* Other processes might have already finished the scan. */
+ if (page == InvalidBlockNumber)
+ {
+ Assert(!BufferIsValid(scan->rs_cbuf));
+ tuple->t_data = NULL;
+ return;
+ }
+ }
+ else
+ page = scan->rs_startblock; /* first page */
+ heapgetpage((TableScanDesc) scan, page);
+ lineindex = 0;
+ scan->rs_inited = true;
+ }
+ else
+ {
+ /* continue from previously returned page/tuple */
+ page = scan->rs_cblock; /* current page */
+ lineindex = scan->rs_cindex + 1;
+ }
+
+ dp = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
+ lines = scan->rs_ntuples;
+ /* page and lineindex now reference the next visible tid */
+
+ linesleft = lines - lineindex;
+ }
+ else if (backward)
+ {
+ /* backward parallel scan not supported */
+ Assert(scan->rs_base.rs_parallel == NULL);
+
+ if (!scan->rs_inited)
+ {
+ /*
+ * return null immediately if relation is empty
+ */
+ if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
+ {
+ Assert(!BufferIsValid(scan->rs_cbuf));
+ tuple->t_data = NULL;
+ return;
+ }
+
+ /*
+ * Disable reporting to syncscan logic in a backwards scan; it's
+ * not very likely anyone else is doing the same thing at the same
+ * time, and much more likely that we'll just bollix things for
+ * forward scanners.
+ */
+ scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
+
+ /*
+ * Start from last page of the scan. Ensure we take into account
+ * rs_numblocks if it's been adjusted by heap_setscanlimits().
+ */
+ if (scan->rs_numblocks != InvalidBlockNumber)
+ page = (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
+ else if (scan->rs_startblock > 0)
+ page = scan->rs_startblock - 1;
+ else
+ page = scan->rs_nblocks - 1;
+ heapgetpage((TableScanDesc) scan, page);
+ }
+ else
+ {
+ /* continue from previously returned page/tuple */
+ page = scan->rs_cblock; /* current page */
+ }
+
+ dp = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
+ lines = scan->rs_ntuples;
+
+ if (!scan->rs_inited)
+ {
+ lineindex = lines - 1;
+ scan->rs_inited = true;
+ }
+ else
+ {
+ lineindex = scan->rs_cindex - 1;
+ }
+ /* page and lineindex now reference the previous visible tid */
+
+ linesleft = lineindex + 1;
+ }
+ else
+ {
+ /*
+ * ``no movement'' scan direction: refetch prior tuple
+ */
+ if (!scan->rs_inited)
+ {
+ Assert(!BufferIsValid(scan->rs_cbuf));
+ tuple->t_data = NULL;
+ return;
+ }
+
+ page = ItemPointerGetBlockNumber(&(tuple->t_self));
+ if (page != scan->rs_cblock)
+ heapgetpage((TableScanDesc) scan, page);
+
+ /* Since the tuple was previously fetched, needn't lock page here */
+ dp = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
+ lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
+ lpp = PageGetItemId(dp, lineoff);
+ Assert(ItemIdIsNormal(lpp));
+
+ tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
+ tuple->t_len = ItemIdGetLength(lpp);
+
+ /* check that rs_cindex is in sync */
+ Assert(scan->rs_cindex < scan->rs_ntuples);
+ Assert(lineoff == scan->rs_vistuples[scan->rs_cindex]);
+
+ return;
+ }
+
+ /*
+ * advance the scan until we find a qualifying tuple or run out of stuff
+ * to scan
+ */
+ for (;;)
+ {
+ while (linesleft > 0)
+ {
+ lineoff = scan->rs_vistuples[lineindex];
+ lpp = PageGetItemId(dp, lineoff);
+ Assert(ItemIdIsNormal(lpp));
+
+ tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
+ tuple->t_len = ItemIdGetLength(lpp);
+ ItemPointerSet(&(tuple->t_self), page, lineoff);
+
+ /*
+ * if current tuple qualifies, return it.
+ */
+ if (key != NULL)
+ {
+ bool valid;
+
+ HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
+ nkeys, key, valid);
+ if (valid)
+ {
+ scan->rs_cindex = lineindex;
+ return;
+ }
+ }
+ else
+ {
+ scan->rs_cindex = lineindex;
+ return;
+ }
+
+ /*
+ * otherwise move to the next item on the page
+ */
+ --linesleft;
+ if (backward)
+ --lineindex;
+ else
+ ++lineindex;
+ }
+
+ /*
+ * if we get here, it means we've exhausted the items on this page and
+ * it's time to move to the next.
+ */
+ if (backward)
+ {
+ finished = (page == scan->rs_startblock) ||
+ (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
+ if (page == 0)
+ page = scan->rs_nblocks;
+ page--;
+ }
+ else if (scan->rs_base.rs_parallel != NULL)
+ {
+ ParallelBlockTableScanDesc pbscan =
+ (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
+ ParallelBlockTableScanWorker pbscanwork =
+ scan->rs_parallelworkerdata;
+
+ page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
+ pbscanwork, pbscan);
+ finished = (page == InvalidBlockNumber);
+ }
+ else
+ {
+ page++;
+ if (page >= scan->rs_nblocks)
+ page = 0;
+ finished = (page == scan->rs_startblock) ||
+ (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
+
+ /*
+ * Report our new scan position for synchronization purposes. We
+ * don't do that when moving backwards, however. That would just
+ * mess up any other forward-moving scanners.
+ *
+ * Note: we do this before checking for end of scan so that the
+ * final state of the position hint is back at the start of the
+ * rel. That's not strictly necessary, but otherwise when you run
+ * the same query multiple times the starting position would shift
+ * a little bit backwards on every invocation, which is confusing.
+ * We don't guarantee any specific ordering in general, though.
+ */
+ if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
+ ss_report_location(scan->rs_base.rs_rd, page);
+ }
+
+ /*
+ * return NULL if we've exhausted all the pages
+ */
+ if (finished)
+ {
+ if (BufferIsValid(scan->rs_cbuf))
+ ReleaseBuffer(scan->rs_cbuf);
+ scan->rs_cbuf = InvalidBuffer;
+ scan->rs_cblock = InvalidBlockNumber;
+ tuple->t_data = NULL;
+ scan->rs_inited = false;
+ return;
+ }
+
+ heapgetpage((TableScanDesc) scan, page);
+
+ dp = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
+ lines = scan->rs_ntuples;
+ linesleft = lines;
+ if (backward)
+ lineindex = lines - 1;
+ else
+ lineindex = 0;
+ }
+}
+
+
+#if defined(DISABLE_COMPLEX_MACRO)
+/*
+ * This is formatted so oddly so that the correspondence to the macro
+ * definition in access/htup_details.h is maintained.
+ */
+Datum
+fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
+ bool *isnull)
+{
+ return (
+ (attnum) > 0 ?
+ (
+ (*(isnull) = false),
+ HeapTupleNoNulls(tup) ?
+ (
+ TupleDescAttr((tupleDesc), (attnum) - 1)->attcacheoff >= 0 ?
+ (
+ fetchatt(TupleDescAttr((tupleDesc), (attnum) - 1),
+ (char *) (tup)->t_data + (tup)->t_data->t_hoff +
+ TupleDescAttr((tupleDesc), (attnum) - 1)->attcacheoff)
+ )
+ :
+ nocachegetattr((tup), (attnum), (tupleDesc))
+ )
+ :
+ (
+ att_isnull((attnum) - 1, (tup)->t_data->t_bits) ?
+ (
+ (*(isnull) = true),
+ (Datum) NULL
+ )
+ :
+ (
+ nocachegetattr((tup), (attnum), (tupleDesc))
+ )
+ )
+ )
+ :
+ (
+ (Datum) NULL
+ )
+ );
+}
+#endif /* defined(DISABLE_COMPLEX_MACRO) */
+
+
+/* ----------------------------------------------------------------
+ * heap access method interface
+ * ----------------------------------------------------------------
+ */
+
+
+TableScanDesc
+heap_beginscan(Relation relation, Snapshot snapshot,
+ int nkeys, ScanKey key,
+ ParallelTableScanDesc parallel_scan,
+ uint32 flags)
+{
+ HeapScanDesc scan;
+
+ /*
+ * increment relation ref count while scanning relation
+ *
+ * This is just to make really sure the relcache entry won't go away while
+ * the scan has a pointer to it. Caller should be holding the rel open
+ * anyway, so this is redundant in all normal scenarios...
+ */
+ RelationIncrementReferenceCount(relation);
+
+ /*
+ * allocate and initialize scan descriptor
+ */
+ scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
+
+ scan->rs_base.rs_rd = relation;
+ scan->rs_base.rs_snapshot = snapshot;
+ scan->rs_base.rs_nkeys = nkeys;
+ scan->rs_base.rs_flags = flags;
+ scan->rs_base.rs_parallel = parallel_scan;
+ scan->rs_strategy = NULL; /* set in initscan */
+
+ /*
+ * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
+ */
+ if (!(snapshot && IsMVCCSnapshot(snapshot)))
+ scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
+
+ /*
+ * For seqscan and sample scans in a serializable transaction, acquire a
+ * predicate lock on the entire relation. This is required not only to
+ * lock all the matching tuples, but also to conflict with new insertions
+ * into the table. In an indexscan, we take page locks on the index pages
+ * covering the range specified in the scan qual, but in a heap scan there
+ * is nothing more fine-grained to lock. A bitmap scan is a different
+ * story, there we have already scanned the index and locked the index
+ * pages covering the predicate. But in that case we still have to lock
+ * any matching heap tuples. For sample scan we could optimize the locking
+ * to be at least page-level granularity, but we'd need to add per-tuple
+ * locking for that.
+ */
+ if (scan->rs_base.rs_flags & (SO_TYPE_SEQSCAN | SO_TYPE_SAMPLESCAN))
+ {
+ /*
+ * Ensure a missing snapshot is noticed reliably, even if the
+ * isolation mode means predicate locking isn't performed (and
+ * therefore the snapshot isn't used here).
+ */
+ Assert(snapshot);
+ PredicateLockRelation(relation, snapshot);
+ }
+
+ /* we only need to set this up once */
+ scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
+
+ /*
+ * Allocate memory to keep track of page allocation for parallel workers
+ * when doing a parallel scan.
+ */
+ if (parallel_scan != NULL)
+ scan->rs_parallelworkerdata = palloc(sizeof(ParallelBlockTableScanWorkerData));
+ else
+ scan->rs_parallelworkerdata = NULL;
+
+ /*
+ * we do this here instead of in initscan() because heap_rescan also calls
+ * initscan() and we don't want to allocate memory again
+ */
+ if (nkeys > 0)
+ scan->rs_base.rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
+ else
+ scan->rs_base.rs_key = NULL;
+
+ initscan(scan, key, false);
+
+ return (TableScanDesc) scan;
+}
+
+void
+heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
+ bool allow_strat, bool allow_sync, bool allow_pagemode)
+{
+ HeapScanDesc scan = (HeapScanDesc) sscan;
+
+ if (set_params)
+ {
+ if (allow_strat)
+ scan->rs_base.rs_flags |= SO_ALLOW_STRAT;
+ else
+ scan->rs_base.rs_flags &= ~SO_ALLOW_STRAT;
+
+ if (allow_sync)
+ scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
+ else
+ scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
+
+ if (allow_pagemode && scan->rs_base.rs_snapshot &&
+ IsMVCCSnapshot(scan->rs_base.rs_snapshot))
+ scan->rs_base.rs_flags |= SO_ALLOW_PAGEMODE;
+ else
+ scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
+ }
+
+ /*
+ * unpin scan buffers
+ */
+ if (BufferIsValid(scan->rs_cbuf))
+ ReleaseBuffer(scan->rs_cbuf);
+
+ /*
+ * reinitialize scan descriptor
+ */
+ initscan(scan, key, true);
+}
+
+void
+heap_endscan(TableScanDesc sscan)
+{
+ HeapScanDesc scan = (HeapScanDesc) sscan;
+
+ /* Note: no locking manipulations needed */
+
+ /*
+ * unpin scan buffers
+ */
+ if (BufferIsValid(scan->rs_cbuf))
+ ReleaseBuffer(scan->rs_cbuf);
+
+ /*
+ * decrement relation reference count and free scan descriptor storage
+ */
+ RelationDecrementReferenceCount(scan->rs_base.rs_rd);
+
+ if (scan->rs_base.rs_key)
+ pfree(scan->rs_base.rs_key);
+
+ if (scan->rs_strategy != NULL)
+ FreeAccessStrategy(scan->rs_strategy);
+
+ if (scan->rs_parallelworkerdata != NULL)
+ pfree(scan->rs_parallelworkerdata);
+
+ if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
+ UnregisterSnapshot(scan->rs_base.rs_snapshot);
+
+ pfree(scan);
+}
+
+HeapTuple
+heap_getnext(TableScanDesc sscan, ScanDirection direction)
+{
+ HeapScanDesc scan = (HeapScanDesc) sscan;
+
+ /*
+ * This is still widely used directly, without going through table AM, so
+ * add a safety check. It's possible we should, at a later point,
+ * downgrade this to an assert. The reason for checking the AM routine,
+ * rather than the AM oid, is that this allows to write regression tests
+ * that create another AM reusing the heap handler.
+ */
+ if (unlikely(sscan->rs_rd->rd_tableam != GetHeapamTableAmRoutine()))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg_internal("only heap AM is supported")));
+
+ /*
+ * We don't expect direct calls to heap_getnext with valid CheckXidAlive
+ * for catalog or regular tables. See detailed comments in xact.c where
+ * these variables are declared. Normally we have such a check at tableam
+ * level API but this is called from many places so we need to ensure it
+ * here.
+ */
+ if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
+ elog(ERROR, "unexpected heap_getnext call during logical decoding");
+
+ /* Note: no locking manipulations needed */
+
+ if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
+ heapgettup_pagemode(scan, direction,
+ scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
+ else
+ heapgettup(scan, direction,
+ scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
+
+ if (scan->rs_ctup.t_data == NULL)
+ return NULL;
+
+ /*
+ * if we get here it means we have a new current scan tuple, so point to
+ * the proper return buffer and return the tuple.
+ */
+
+ pgstat_count_heap_getnext(scan->rs_base.rs_rd);
+
+ return &scan->rs_ctup;
+}
+
+bool
+heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
+{
+ HeapScanDesc scan = (HeapScanDesc) sscan;
+
+ /* Note: no locking manipulations needed */
+
+ if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
+ heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
+ else
+ heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
+
+ if (scan->rs_ctup.t_data == NULL)
+ {
+ ExecClearTuple(slot);
+ return false;
+ }
+
+ /*
+ * if we get here it means we have a new current scan tuple, so point to
+ * the proper return buffer and return the tuple.
+ */
+
+ pgstat_count_heap_getnext(scan->rs_base.rs_rd);
+
+ ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
+ scan->rs_cbuf);
+ return true;
+}
+
+void
+heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
+ ItemPointer maxtid)
+{
+ HeapScanDesc scan = (HeapScanDesc) sscan;
+ BlockNumber startBlk;
+ BlockNumber numBlks;
+ ItemPointerData highestItem;
+ ItemPointerData lowestItem;
+
+ /*
+ * For relations without any pages, we can simply leave the TID range
+ * unset. There will be no tuples to scan, therefore no tuples outside
+ * the given TID range.
+ */
+ if (scan->rs_nblocks == 0)
+ return;
+
+ /*
+ * Set up some ItemPointers which point to the first and last possible
+ * tuples in the heap.
+ */
+ ItemPointerSet(&highestItem, scan->rs_nblocks - 1, MaxOffsetNumber);
+ ItemPointerSet(&lowestItem, 0, FirstOffsetNumber);
+
+ /*
+ * If the given maximum TID is below the highest possible TID in the
+ * relation, then restrict the range to that, otherwise we scan to the end
+ * of the relation.
+ */
+ if (ItemPointerCompare(maxtid, &highestItem) < 0)
+ ItemPointerCopy(maxtid, &highestItem);
+
+ /*
+ * If the given minimum TID is above the lowest possible TID in the
+ * relation, then restrict the range to only scan for TIDs above that.
+ */
+ if (ItemPointerCompare(mintid, &lowestItem) > 0)
+ ItemPointerCopy(mintid, &lowestItem);
+
+ /*
+ * Check for an empty range and protect from would be negative results
+ * from the numBlks calculation below.
+ */
+ if (ItemPointerCompare(&highestItem, &lowestItem) < 0)
+ {
+ /* Set an empty range of blocks to scan */
+ heap_setscanlimits(sscan, 0, 0);
+ return;
+ }
+
+ /*
+ * Calculate the first block and the number of blocks we must scan. We
+ * could be more aggressive here and perform some more validation to try
+ * and further narrow the scope of blocks to scan by checking if the
+ * lowerItem has an offset above MaxOffsetNumber. In this case, we could
+ * advance startBlk by one. Likewise, if highestItem has an offset of 0
+ * we could scan one fewer blocks. However, such an optimization does not
+ * seem worth troubling over, currently.
+ */
+ startBlk = ItemPointerGetBlockNumberNoCheck(&lowestItem);
+
+ numBlks = ItemPointerGetBlockNumberNoCheck(&highestItem) -
+ ItemPointerGetBlockNumberNoCheck(&lowestItem) + 1;
+
+ /* Set the start block and number of blocks to scan */
+ heap_setscanlimits(sscan, startBlk, numBlks);
+
+ /* Finally, set the TID range in sscan */
+ ItemPointerCopy(&lowestItem, &sscan->rs_mintid);
+ ItemPointerCopy(&highestItem, &sscan->rs_maxtid);
+}
+
+bool
+heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
+ TupleTableSlot *slot)
+{
+ HeapScanDesc scan = (HeapScanDesc) sscan;
+ ItemPointer mintid = &sscan->rs_mintid;
+ ItemPointer maxtid = &sscan->rs_maxtid;
+
+ /* Note: no locking manipulations needed */
+ for (;;)
+ {
+ if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
+ heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
+ else
+ heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
+
+ if (scan->rs_ctup.t_data == NULL)
+ {
+ ExecClearTuple(slot);
+ return false;
+ }
+
+ /*
+ * heap_set_tidrange will have used heap_setscanlimits to limit the
+ * range of pages we scan to only ones that can contain the TID range
+ * we're scanning for. Here we must filter out any tuples from these
+ * pages that are outwith that range.
+ */
+ if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
+ {
+ ExecClearTuple(slot);
+
+ /*
+ * When scanning backwards, the TIDs will be in descending order.
+ * Future tuples in this direction will be lower still, so we can
+ * just return false to indicate there will be no more tuples.
+ */
+ if (ScanDirectionIsBackward(direction))
+ return false;
+
+ continue;
+ }
+
+ /*
+ * Likewise for the final page, we must filter out TIDs greater than
+ * maxtid.
+ */
+ if (ItemPointerCompare(&scan->rs_ctup.t_self, maxtid) > 0)
+ {
+ ExecClearTuple(slot);
+
+ /*
+ * When scanning forward, the TIDs will be in ascending order.
+ * Future tuples in this direction will be higher still, so we can
+ * just return false to indicate there will be no more tuples.
+ */
+ if (ScanDirectionIsForward(direction))
+ return false;
+ continue;
+ }
+
+ break;
+ }
+
+ /*
+ * if we get here it means we have a new current scan tuple, so point to
+ * the proper return buffer and return the tuple.
+ */
+ pgstat_count_heap_getnext(scan->rs_base.rs_rd);
+
+ ExecStoreBufferHeapTuple(&scan->rs_ctup, slot, scan->rs_cbuf);
+ return true;
+}
+
+/*
+ * heap_fetch - retrieve tuple with given tid
+ *
+ * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
+ * the tuple, fill in the remaining fields of *tuple, and check the tuple
+ * against the specified snapshot.
+ *
+ * If successful (tuple found and passes snapshot time qual), then *userbuf
+ * is set to the buffer holding the tuple and true is returned. The caller
+ * must unpin the buffer when done with the tuple.
+ *
+ * If the tuple is not found (ie, item number references a deleted slot),
+ * then tuple->t_data is set to NULL, *userbuf is set to InvalidBuffer,
+ * and false is returned.
+ *
+ * If the tuple is found but fails the time qual check, then false is returned
+ * and *userbuf is set to InvalidBuffer, but tuple->t_data is left pointing
+ * to the tuple. (Note that it is unsafe to dereference tuple->t_data in
+ * this case, but callers might choose to test it for NULL-ness.)
+ *
+ * heap_fetch does not follow HOT chains: only the exact TID requested will
+ * be fetched.
+ *
+ * It is somewhat inconsistent that we ereport() on invalid block number but
+ * return false on invalid item number. There are a couple of reasons though.
+ * One is that the caller can relatively easily check the block number for
+ * validity, but cannot check the item number without reading the page
+ * himself. Another is that when we are following a t_ctid link, we can be
+ * reasonably confident that the page number is valid (since VACUUM shouldn't
+ * truncate off the destination page without having killed the referencing
+ * tuple first), but the item number might well not be good.
+ */
+bool
+heap_fetch(Relation relation,
+ Snapshot snapshot,
+ HeapTuple tuple,
+ Buffer *userbuf)
+{
+ return heap_fetch_extended(relation, snapshot, tuple, userbuf, false);
+}
+
+/*
+ * heap_fetch_extended - fetch tuple even if it fails snapshot test
+ *
+ * If keep_buf is true, then upon finding a tuple that is valid but fails
+ * the snapshot check, we return the tuple pointer in tuple->t_data and the
+ * buffer ID in *userbuf, keeping the buffer pin, just as if it had passed
+ * the snapshot. (The function result is still "false" though.)
+ * If keep_buf is false then this behaves identically to heap_fetch().
+ */
+bool
+heap_fetch_extended(Relation relation,
+ Snapshot snapshot,
+ HeapTuple tuple,
+ Buffer *userbuf,
+ bool keep_buf)
+{
+ ItemPointer tid = &(tuple->t_self);
+ ItemId lp;
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ bool valid;
+
+ /*
+ * Fetch and pin the appropriate page of the relation.
+ */
+ buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
+
+ /*
+ * Need share lock on buffer to examine tuple commit status.
+ */
+ LockBuffer(buffer, BUFFER_LOCK_SHARE);
+ page = BufferGetPage(buffer);
+ TestForOldSnapshot(snapshot, relation, page);
+
+ /*
+ * We'd better check for out-of-range offnum in case of VACUUM since the
+ * TID was obtained.
+ */
+ offnum = ItemPointerGetOffsetNumber(tid);
+ if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
+ {
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ ReleaseBuffer(buffer);
+ *userbuf = InvalidBuffer;
+ tuple->t_data = NULL;
+ return false;
+ }
+
+ /*
+ * get the item line pointer corresponding to the requested tid
+ */
+ lp = PageGetItemId(page, offnum);
+
+ /*
+ * Must check for deleted tuple.
+ */
+ if (!ItemIdIsNormal(lp))
+ {
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ ReleaseBuffer(buffer);
+ *userbuf = InvalidBuffer;
+ tuple->t_data = NULL;
+ return false;
+ }
+
+ /*
+ * fill in *tuple fields
+ */
+ tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
+ tuple->t_len = ItemIdGetLength(lp);
+ tuple->t_tableOid = RelationGetRelid(relation);
+
+ /*
+ * check tuple visibility, then release lock
+ */
+ valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
+
+ if (valid)
+ PredicateLockTID(relation, &(tuple->t_self), snapshot,
+ HeapTupleHeaderGetXmin(tuple->t_data));
+
+ HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
+
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+ if (valid)
+ {
+ /*
+ * All checks passed, so return the tuple as valid. Caller is now
+ * responsible for releasing the buffer.
+ */
+ *userbuf = buffer;
+
+ return true;
+ }
+
+ /* Tuple failed time qual, but maybe caller wants to see it anyway. */
+ if (keep_buf)
+ *userbuf = buffer;
+ else
+ {
+ ReleaseBuffer(buffer);
+ *userbuf = InvalidBuffer;
+ }
+
+ return false;
+}
+
+/*
+ * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
+ *
+ * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
+ * of a HOT chain), and buffer is the buffer holding this tuple. We search
+ * for the first chain member satisfying the given snapshot. If one is
+ * found, we update *tid to reference that tuple's offset number, and
+ * return true. If no match, return false without modifying *tid.
+ *
+ * heapTuple is a caller-supplied buffer. When a match is found, we return
+ * the tuple here, in addition to updating *tid. If no match is found, the
+ * contents of this buffer on return are undefined.
+ *
+ * If all_dead is not NULL, we check non-visible tuples to see if they are
+ * globally dead; *all_dead is set true if all members of the HOT chain
+ * are vacuumable, false if not.
+ *
+ * Unlike heap_fetch, the caller must already have pin and (at least) share
+ * lock on the buffer; it is still pinned/locked at exit.
+ */
+bool
+heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
+ Snapshot snapshot, HeapTuple heapTuple,
+ bool *all_dead, bool first_call)
+{
+ Page dp = (Page) BufferGetPage(buffer);
+ TransactionId prev_xmax = InvalidTransactionId;
+ BlockNumber blkno;
+ OffsetNumber offnum;
+ bool at_chain_start;
+ bool valid;
+ bool skip;
+ GlobalVisState *vistest = NULL;
+
+ /* If this is not the first call, previous call returned a (live!) tuple */
+ if (all_dead)
+ *all_dead = first_call;
+
+ blkno = ItemPointerGetBlockNumber(tid);
+ offnum = ItemPointerGetOffsetNumber(tid);
+ at_chain_start = first_call;
+ skip = !first_call;
+
+ /* XXX: we should assert that a snapshot is pushed or registered */
+ Assert(TransactionIdIsValid(RecentXmin));
+ Assert(BufferGetBlockNumber(buffer) == blkno);
+
+ /* Scan through possible multiple members of HOT-chain */
+ for (;;)
+ {
+ ItemId lp;
+
+ /* check for bogus TID */
+ if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
+ break;
+
+ lp = PageGetItemId(dp, offnum);
+
+ /* check for unused, dead, or redirected items */
+ if (!ItemIdIsNormal(lp))
+ {
+ /* We should only see a redirect at start of chain */
+ if (ItemIdIsRedirected(lp) && at_chain_start)
+ {
+ /* Follow the redirect */
+ offnum = ItemIdGetRedirect(lp);
+ at_chain_start = false;
+ continue;
+ }
+ /* else must be end of chain */
+ break;
+ }
+
+ /*
+ * Update heapTuple to point to the element of the HOT chain we're
+ * currently investigating. Having t_self set correctly is important
+ * because the SSI checks and the *Satisfies routine for historical
+ * MVCC snapshots need the correct tid to decide about the visibility.
+ */
+ heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
+ heapTuple->t_len = ItemIdGetLength(lp);
+ heapTuple->t_tableOid = RelationGetRelid(relation);
+ ItemPointerSet(&heapTuple->t_self, blkno, offnum);
+
+ /*
+ * Shouldn't see a HEAP_ONLY tuple at chain start.
+ */
+ if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
+ break;
+
+ /*
+ * The xmin should match the previous xmax value, else chain is
+ * broken.
+ */
+ if (TransactionIdIsValid(prev_xmax) &&
+ !TransactionIdEquals(prev_xmax,
+ HeapTupleHeaderGetXmin(heapTuple->t_data)))
+ break;
+
+ /*
+ * When first_call is true (and thus, skip is initially false) we'll
+ * return the first tuple we find. But on later passes, heapTuple
+ * will initially be pointing to the tuple we returned last time.
+ * Returning it again would be incorrect (and would loop forever), so
+ * we skip it and return the next match we find.
+ */
+ if (!skip)
+ {
+ /* If it's visible per the snapshot, we must return it */
+ valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
+ HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
+ buffer, snapshot);
+
+ if (valid)
+ {
+ ItemPointerSetOffsetNumber(tid, offnum);
+ PredicateLockTID(relation, &heapTuple->t_self, snapshot,
+ HeapTupleHeaderGetXmin(heapTuple->t_data));
+ if (all_dead)
+ *all_dead = false;
+ return true;
+ }
+ }
+ skip = false;
+
+ /*
+ * If we can't see it, maybe no one else can either. At caller
+ * request, check whether all chain members are dead to all
+ * transactions.
+ *
+ * Note: if you change the criterion here for what is "dead", fix the
+ * planner's get_actual_variable_range() function to match.
+ */
+ if (all_dead && *all_dead)
+ {
+ if (!vistest)
+ vistest = GlobalVisTestFor(relation);
+
+ if (!HeapTupleIsSurelyDead(heapTuple, vistest))
+ *all_dead = false;
+ }
+
+ /*
+ * Check to see if HOT chain continues past this tuple; if so fetch
+ * the next offnum and loop around.
+ */
+ if (HeapTupleIsHotUpdated(heapTuple))
+ {
+ Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
+ blkno);
+ offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
+ at_chain_start = false;
+ prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
+ }
+ else
+ break; /* end of chain */
+ }
+
+ return false;
+}
+
+/*
+ * heap_get_latest_tid - get the latest tid of a specified tuple
+ *
+ * Actually, this gets the latest version that is visible according to the
+ * scan's snapshot. Create a scan using SnapshotDirty to get the very latest,
+ * possibly uncommitted version.
+ *
+ * *tid is both an input and an output parameter: it is updated to
+ * show the latest version of the row. Note that it will not be changed
+ * if no version of the row passes the snapshot test.
+ */
+void
+heap_get_latest_tid(TableScanDesc sscan,
+ ItemPointer tid)
+{
+ Relation relation = sscan->rs_rd;
+ Snapshot snapshot = sscan->rs_snapshot;
+ ItemPointerData ctid;
+ TransactionId priorXmax;
+
+ /*
+ * table_tuple_get_latest_tid() verified that the passed in tid is valid.
+ * Assume that t_ctid links are valid however - there shouldn't be invalid
+ * ones in the table.
+ */
+ Assert(ItemPointerIsValid(tid));
+
+ /*
+ * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
+ * need to examine, and *tid is the TID we will return if ctid turns out
+ * to be bogus.
+ *
+ * Note that we will loop until we reach the end of the t_ctid chain.
+ * Depending on the snapshot passed, there might be at most one visible
+ * version of the row, but we don't try to optimize for that.
+ */
+ ctid = *tid;
+ priorXmax = InvalidTransactionId; /* cannot check first XMIN */
+ for (;;)
+ {
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp;
+ HeapTupleData tp;
+ bool valid;
+
+ /*
+ * Read, pin, and lock the page.
+ */
+ buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
+ LockBuffer(buffer, BUFFER_LOCK_SHARE);
+ page = BufferGetPage(buffer);
+ TestForOldSnapshot(snapshot, relation, page);
+
+ /*
+ * Check for bogus item number. This is not treated as an error
+ * condition because it can happen while following a t_ctid link. We
+ * just assume that the prior tid is OK and return it unchanged.
+ */
+ offnum = ItemPointerGetOffsetNumber(&ctid);
+ if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
+ {
+ UnlockReleaseBuffer(buffer);
+ break;
+ }
+ lp = PageGetItemId(page, offnum);
+ if (!ItemIdIsNormal(lp))
+ {
+ UnlockReleaseBuffer(buffer);
+ break;
+ }
+
+ /* OK to access the tuple */
+ tp.t_self = ctid;
+ tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
+ tp.t_len = ItemIdGetLength(lp);
+ tp.t_tableOid = RelationGetRelid(relation);
+
+ /*
+ * After following a t_ctid link, we might arrive at an unrelated
+ * tuple. Check for XMIN match.
+ */
+ if (TransactionIdIsValid(priorXmax) &&
+ !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
+ {
+ UnlockReleaseBuffer(buffer);
+ break;
+ }
+
+ /*
+ * Check tuple visibility; if visible, set it as the new result
+ * candidate.
+ */
+ valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
+ HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
+ if (valid)
+ *tid = ctid;
+
+ /*
+ * If there's a valid t_ctid link, follow it, else we're done.
+ */
+ if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
+ HeapTupleHeaderIsOnlyLocked(tp.t_data) ||
+ HeapTupleHeaderIndicatesMovedPartitions(tp.t_data) ||
+ ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
+ {
+ UnlockReleaseBuffer(buffer);
+ break;
+ }
+
+ ctid = tp.t_data->t_ctid;
+ priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
+ UnlockReleaseBuffer(buffer);
+ } /* end of loop */
+}
+
+
+/*
+ * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
+ *
+ * This is called after we have waited for the XMAX transaction to terminate.
+ * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
+ * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
+ * hint bit if possible --- but beware that that may not yet be possible,
+ * if the transaction committed asynchronously.
+ *
+ * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
+ * even if it commits.
+ *
+ * Hence callers should look only at XMAX_INVALID.
+ *
+ * Note this is not allowed for tuples whose xmax is a multixact.
+ */
+static void
+UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
+{
+ Assert(TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple), xid));
+ Assert(!(tuple->t_infomask & HEAP_XMAX_IS_MULTI));
+
+ if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
+ {
+ if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
+ TransactionIdDidCommit(xid))
+ HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
+ xid);
+ else
+ HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ }
+}
+
+
+/*
+ * GetBulkInsertState - prepare status object for a bulk insert
+ */
+BulkInsertState
+GetBulkInsertState(void)
+{
+ BulkInsertState bistate;
+
+ bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
+ bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
+ bistate->current_buf = InvalidBuffer;
+ return bistate;
+}
+
+/*
+ * FreeBulkInsertState - clean up after finishing a bulk insert
+ */
+void
+FreeBulkInsertState(BulkInsertState bistate)
+{
+ if (bistate->current_buf != InvalidBuffer)
+ ReleaseBuffer(bistate->current_buf);
+ FreeAccessStrategy(bistate->strategy);
+ pfree(bistate);
+}
+
+/*
+ * ReleaseBulkInsertStatePin - release a buffer currently held in bistate
+ */
+void
+ReleaseBulkInsertStatePin(BulkInsertState bistate)
+{
+ if (bistate->current_buf != InvalidBuffer)
+ ReleaseBuffer(bistate->current_buf);
+ bistate->current_buf = InvalidBuffer;
+}
+
+
+/*
+ * heap_insert - insert tuple into a heap
+ *
+ * The new tuple is stamped with current transaction ID and the specified
+ * command ID.
+ *
+ * See table_tuple_insert for comments about most of the input flags, except
+ * that this routine directly takes a tuple rather than a slot.
+ *
+ * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
+ * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
+ * implement table_tuple_insert_speculative().
+ *
+ * On return the header fields of *tup are updated to match the stored tuple;
+ * in particular tup->t_self receives the actual TID where the tuple was
+ * stored. But note that any toasting of fields within the tuple data is NOT
+ * reflected into *tup.
+ */
+void
+heap_insert(Relation relation, HeapTuple tup, CommandId cid,
+ int options, BulkInsertState bistate)
+{
+ TransactionId xid = GetCurrentTransactionId();
+ HeapTuple heaptup;
+ Buffer buffer;
+ Buffer vmbuffer = InvalidBuffer;
+ bool all_visible_cleared = false;
+
+ /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
+ Assert(HeapTupleHeaderGetNatts(tup->t_data) <=
+ RelationGetNumberOfAttributes(relation));
+
+ /*
+ * Fill in tuple header fields and toast the tuple if necessary.
+ *
+ * Note: below this point, heaptup is the data we actually intend to store
+ * into the relation; tup is the caller's original untoasted data.
+ */
+ heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
+
+ /*
+ * Find buffer to insert this tuple into. If the page is all visible,
+ * this will also pin the requisite visibility map page.
+ */
+ buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
+ InvalidBuffer, options, bistate,
+ &vmbuffer, NULL);
+
+ /*
+ * We're about to do the actual insert -- but check for conflict first, to
+ * avoid possibly having to roll back work we've just done.
+ *
+ * This is safe without a recheck as long as there is no possibility of
+ * another process scanning the page between this check and the insert
+ * being visible to the scan (i.e., an exclusive buffer content lock is
+ * continuously held from this point until the tuple insert is visible).
+ *
+ * For a heap insert, we only need to check for table-level SSI locks. Our
+ * new tuple can't possibly conflict with existing tuple locks, and heap
+ * page locks are only consolidated versions of tuple locks; they do not
+ * lock "gaps" as index page locks do. So we don't need to specify a
+ * buffer when making the call, which makes for a faster check.
+ */
+ CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
+
+ /* NO EREPORT(ERROR) from here till changes are logged */
+ START_CRIT_SECTION();
+
+ RelationPutHeapTuple(relation, buffer, heaptup,
+ (options & HEAP_INSERT_SPECULATIVE) != 0);
+
+ if (PageIsAllVisible(BufferGetPage(buffer)))
+ {
+ all_visible_cleared = true;
+ PageClearAllVisible(BufferGetPage(buffer));
+ visibilitymap_clear(relation,
+ ItemPointerGetBlockNumber(&(heaptup->t_self)),
+ vmbuffer, VISIBILITYMAP_VALID_BITS);
+ }
+
+ /*
+ * XXX Should we set PageSetPrunable on this page ?
+ *
+ * The inserting transaction may eventually abort thus making this tuple
+ * DEAD and hence available for pruning. Though we don't want to optimize
+ * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
+ * aborted tuple will never be pruned until next vacuum is triggered.
+ *
+ * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
+ */
+
+ MarkBufferDirty(buffer);
+
+ /* XLOG stuff */
+ if (RelationNeedsWAL(relation))
+ {
+ xl_heap_insert xlrec;
+ xl_heap_header xlhdr;
+ XLogRecPtr recptr;
+ Page page = BufferGetPage(buffer);
+ uint8 info = XLOG_HEAP_INSERT;
+ int bufflags = 0;
+
+ /*
+ * If this is a catalog, we need to transmit combo CIDs to properly
+ * decode, so log that as well.
+ */
+ if (RelationIsAccessibleInLogicalDecoding(relation))
+ log_heap_new_cid(relation, heaptup);
+
+ /*
+ * If this is the single and first tuple on page, we can reinit the
+ * page instead of restoring the whole thing. Set flag, and hide
+ * buffer references from XLogInsert.
+ */
+ if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
+ PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
+ {
+ info |= XLOG_HEAP_INIT_PAGE;
+ bufflags |= REGBUF_WILL_INIT;
+ }
+
+ xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
+ xlrec.flags = 0;
+ if (all_visible_cleared)
+ xlrec.flags |= XLH_INSERT_ALL_VISIBLE_CLEARED;
+ if (options & HEAP_INSERT_SPECULATIVE)
+ xlrec.flags |= XLH_INSERT_IS_SPECULATIVE;
+ Assert(ItemPointerGetBlockNumber(&heaptup->t_self) == BufferGetBlockNumber(buffer));
+
+ /*
+ * For logical decoding, we need the tuple even if we're doing a full
+ * page write, so make sure it's included even if we take a full-page
+ * image. (XXX We could alternatively store a pointer into the FPW).
+ */
+ if (RelationIsLogicallyLogged(relation) &&
+ !(options & HEAP_INSERT_NO_LOGICAL))
+ {
+ xlrec.flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
+ bufflags |= REGBUF_KEEP_DATA;
+
+ if (IsToastRelation(relation))
+ xlrec.flags |= XLH_INSERT_ON_TOAST_RELATION;
+ }
+
+ XLogBeginInsert();
+ XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
+
+ xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
+ xlhdr.t_infomask = heaptup->t_data->t_infomask;
+ xlhdr.t_hoff = heaptup->t_data->t_hoff;
+
+ /*
+ * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
+ * write the whole page to the xlog, we don't need to store
+ * xl_heap_header in the xlog.
+ */
+ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
+ XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
+ /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
+ XLogRegisterBufData(0,
+ (char *) heaptup->t_data + SizeofHeapTupleHeader,
+ heaptup->t_len - SizeofHeapTupleHeader);
+
+ /* filtering by origin on a row level is much more efficient */
+ XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
+
+ recptr = XLogInsert(RM_HEAP_ID, info);
+
+ PageSetLSN(page, recptr);
+ }
+
+ END_CRIT_SECTION();
+
+ UnlockReleaseBuffer(buffer);
+ if (vmbuffer != InvalidBuffer)
+ ReleaseBuffer(vmbuffer);
+
+ /*
+ * If tuple is cachable, mark it for invalidation from the caches in case
+ * we abort. Note it is OK to do this after releasing the buffer, because
+ * the heaptup data structure is all in local memory, not in the shared
+ * buffer.
+ */
+ CacheInvalidateHeapTuple(relation, heaptup, NULL);
+
+ /* Note: speculative insertions are counted too, even if aborted later */
+ pgstat_count_heap_insert(relation, 1);
+
+ /*
+ * If heaptup is a private copy, release it. Don't forget to copy t_self
+ * back to the caller's image, too.
+ */
+ if (heaptup != tup)
+ {
+ tup->t_self = heaptup->t_self;
+ heap_freetuple(heaptup);
+ }
+}
+
+/*
+ * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
+ * tuple header fields and toasts the tuple if necessary. Returns a toasted
+ * version of the tuple if it was toasted, or the original tuple if not. Note
+ * that in any case, the header fields are also set in the original tuple.
+ */
+static HeapTuple
+heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
+ CommandId cid, int options)
+{
+ /*
+ * To allow parallel inserts, we need to ensure that they are safe to be
+ * performed in workers. We have the infrastructure to allow parallel
+ * inserts in general except for the cases where inserts generate a new
+ * CommandId (eg. inserts into a table having a foreign key column).
+ */
+ if (IsParallelWorker())
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
+ errmsg("cannot insert tuples in a parallel worker")));
+
+ tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
+ tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
+ tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
+ HeapTupleHeaderSetXmin(tup->t_data, xid);
+ if (options & HEAP_INSERT_FROZEN)
+ HeapTupleHeaderSetXminFrozen(tup->t_data);
+
+ HeapTupleHeaderSetCmin(tup->t_data, cid);
+ HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
+ tup->t_tableOid = RelationGetRelid(relation);
+
+ /*
+ * If the new tuple is too big for storage or contains already toasted
+ * out-of-line attributes from some other relation, invoke the toaster.
+ */
+ if (relation->rd_rel->relkind != RELKIND_RELATION &&
+ relation->rd_rel->relkind != RELKIND_MATVIEW)
+ {
+ /* toast table entries should never be recursively toasted */
+ Assert(!HeapTupleHasExternal(tup));
+ return tup;
+ }
+ else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
+ return heap_toast_insert_or_update(relation, tup, NULL, options);
+ else
+ return tup;
+}
+
+/*
+ * heap_multi_insert - insert multiple tuples into a heap
+ *
+ * This is like heap_insert(), but inserts multiple tuples in one operation.
+ * That's faster than calling heap_insert() in a loop, because when multiple
+ * tuples can be inserted on a single page, we can write just a single WAL
+ * record covering all of them, and only need to lock/unlock the page once.
+ *
+ * Note: this leaks memory into the current memory context. You can create a
+ * temporary context before calling this, if that's a problem.
+ */
+void
+heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
+ CommandId cid, int options, BulkInsertState bistate)
+{
+ TransactionId xid = GetCurrentTransactionId();
+ HeapTuple *heaptuples;
+ int i;
+ int ndone;
+ PGAlignedBlock scratch;
+ Page page;
+ Buffer vmbuffer = InvalidBuffer;
+ bool needwal;
+ Size saveFreeSpace;
+ bool need_tuple_data = RelationIsLogicallyLogged(relation);
+ bool need_cids = RelationIsAccessibleInLogicalDecoding(relation);
+
+ /* currently not needed (thus unsupported) for heap_multi_insert() */
+ AssertArg(!(options & HEAP_INSERT_NO_LOGICAL));
+
+ needwal = RelationNeedsWAL(relation);
+ saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
+ HEAP_DEFAULT_FILLFACTOR);
+
+ /* Toast and set header data in all the slots */
+ heaptuples = palloc(ntuples * sizeof(HeapTuple));
+ for (i = 0; i < ntuples; i++)
+ {
+ HeapTuple tuple;
+
+ tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL);
+ slots[i]->tts_tableOid = RelationGetRelid(relation);
+ tuple->t_tableOid = slots[i]->tts_tableOid;
+ heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid,
+ options);
+ }
+
+ /*
+ * We're about to do the actual inserts -- but check for conflict first,
+ * to minimize the possibility of having to roll back work we've just
+ * done.
+ *
+ * A check here does not definitively prevent a serialization anomaly;
+ * that check MUST be done at least past the point of acquiring an
+ * exclusive buffer content lock on every buffer that will be affected,
+ * and MAY be done after all inserts are reflected in the buffers and
+ * those locks are released; otherwise there is a race condition. Since
+ * multiple buffers can be locked and unlocked in the loop below, and it
+ * would not be feasible to identify and lock all of those buffers before
+ * the loop, we must do a final check at the end.
+ *
+ * The check here could be omitted with no loss of correctness; it is
+ * present strictly as an optimization.
+ *
+ * For heap inserts, we only need to check for table-level SSI locks. Our
+ * new tuples can't possibly conflict with existing tuple locks, and heap
+ * page locks are only consolidated versions of tuple locks; they do not
+ * lock "gaps" as index page locks do. So we don't need to specify a
+ * buffer when making the call, which makes for a faster check.
+ */
+ CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
+
+ ndone = 0;
+ while (ndone < ntuples)
+ {
+ Buffer buffer;
+ bool starting_with_empty_page;
+ bool all_visible_cleared = false;
+ bool all_frozen_set = false;
+ int nthispage;
+
+ CHECK_FOR_INTERRUPTS();
+
+ /*
+ * Find buffer where at least the next tuple will fit. If the page is
+ * all-visible, this will also pin the requisite visibility map page.
+ *
+ * Also pin visibility map page if COPY FREEZE inserts tuples into an
+ * empty page. See all_frozen_set below.
+ */
+ buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
+ InvalidBuffer, options, bistate,
+ &vmbuffer, NULL);
+ page = BufferGetPage(buffer);
+
+ starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
+
+ if (starting_with_empty_page && (options & HEAP_INSERT_FROZEN))
+ all_frozen_set = true;
+
+ /* NO EREPORT(ERROR) from here till changes are logged */
+ START_CRIT_SECTION();
+
+ /*
+ * RelationGetBufferForTuple has ensured that the first tuple fits.
+ * Put that on the page, and then as many other tuples as fit.
+ */
+ RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
+
+ /*
+ * For logical decoding we need combo CIDs to properly decode the
+ * catalog.
+ */
+ if (needwal && need_cids)
+ log_heap_new_cid(relation, heaptuples[ndone]);
+
+ for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
+ {
+ HeapTuple heaptup = heaptuples[ndone + nthispage];
+
+ if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
+ break;
+
+ RelationPutHeapTuple(relation, buffer, heaptup, false);
+
+ /*
+ * For logical decoding we need combo CIDs to properly decode the
+ * catalog.
+ */
+ if (needwal && need_cids)
+ log_heap_new_cid(relation, heaptup);
+ }
+
+ /*
+ * If the page is all visible, need to clear that, unless we're only
+ * going to add further frozen rows to it.
+ *
+ * If we're only adding already frozen rows to a previously empty
+ * page, mark it as all-visible.
+ */
+ if (PageIsAllVisible(page) && !(options & HEAP_INSERT_FROZEN))
+ {
+ all_visible_cleared = true;
+ PageClearAllVisible(page);
+ visibilitymap_clear(relation,
+ BufferGetBlockNumber(buffer),
+ vmbuffer, VISIBILITYMAP_VALID_BITS);
+ }
+ else if (all_frozen_set)
+ PageSetAllVisible(page);
+
+ /*
+ * XXX Should we set PageSetPrunable on this page ? See heap_insert()
+ */
+
+ MarkBufferDirty(buffer);
+
+ /* XLOG stuff */
+ if (needwal)
+ {
+ XLogRecPtr recptr;
+ xl_heap_multi_insert *xlrec;
+ uint8 info = XLOG_HEAP2_MULTI_INSERT;
+ char *tupledata;
+ int totaldatalen;
+ char *scratchptr = scratch.data;
+ bool init;
+ int bufflags = 0;
+
+ /*
+ * If the page was previously empty, we can reinit the page
+ * instead of restoring the whole thing.
+ */
+ init = starting_with_empty_page;
+
+ /* allocate xl_heap_multi_insert struct from the scratch area */
+ xlrec = (xl_heap_multi_insert *) scratchptr;
+ scratchptr += SizeOfHeapMultiInsert;
+
+ /*
+ * Allocate offsets array. Unless we're reinitializing the page,
+ * in that case the tuples are stored in order starting at
+ * FirstOffsetNumber and we don't need to store the offsets
+ * explicitly.
+ */
+ if (!init)
+ scratchptr += nthispage * sizeof(OffsetNumber);
+
+ /* the rest of the scratch space is used for tuple data */
+ tupledata = scratchptr;
+
+ /* check that the mutually exclusive flags are not both set */
+ Assert(!(all_visible_cleared && all_frozen_set));
+
+ xlrec->flags = 0;
+ if (all_visible_cleared)
+ xlrec->flags = XLH_INSERT_ALL_VISIBLE_CLEARED;
+ if (all_frozen_set)
+ xlrec->flags = XLH_INSERT_ALL_FROZEN_SET;
+
+ xlrec->ntuples = nthispage;
+
+ /*
+ * Write out an xl_multi_insert_tuple and the tuple data itself
+ * for each tuple.
+ */
+ for (i = 0; i < nthispage; i++)
+ {
+ HeapTuple heaptup = heaptuples[ndone + i];
+ xl_multi_insert_tuple *tuphdr;
+ int datalen;
+
+ if (!init)
+ xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
+ /* xl_multi_insert_tuple needs two-byte alignment. */
+ tuphdr = (xl_multi_insert_tuple *) SHORTALIGN(scratchptr);
+ scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
+
+ tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
+ tuphdr->t_infomask = heaptup->t_data->t_infomask;
+ tuphdr->t_hoff = heaptup->t_data->t_hoff;
+
+ /* write bitmap [+ padding] [+ oid] + data */
+ datalen = heaptup->t_len - SizeofHeapTupleHeader;
+ memcpy(scratchptr,
+ (char *) heaptup->t_data + SizeofHeapTupleHeader,
+ datalen);
+ tuphdr->datalen = datalen;
+ scratchptr += datalen;
+ }
+ totaldatalen = scratchptr - tupledata;
+ Assert((scratchptr - scratch.data) < BLCKSZ);
+
+ if (need_tuple_data)
+ xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
+
+ /*
+ * Signal that this is the last xl_heap_multi_insert record
+ * emitted by this call to heap_multi_insert(). Needed for logical
+ * decoding so it knows when to cleanup temporary data.
+ */
+ if (ndone + nthispage == ntuples)
+ xlrec->flags |= XLH_INSERT_LAST_IN_MULTI;
+
+ if (init)
+ {
+ info |= XLOG_HEAP_INIT_PAGE;
+ bufflags |= REGBUF_WILL_INIT;
+ }
+
+ /*
+ * If we're doing logical decoding, include the new tuple data
+ * even if we take a full-page image of the page.
+ */
+ if (need_tuple_data)
+ bufflags |= REGBUF_KEEP_DATA;
+
+ XLogBeginInsert();
+ XLogRegisterData((char *) xlrec, tupledata - scratch.data);
+ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
+
+ XLogRegisterBufData(0, tupledata, totaldatalen);
+
+ /* filtering by origin on a row level is much more efficient */
+ XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
+
+ recptr = XLogInsert(RM_HEAP2_ID, info);
+
+ PageSetLSN(page, recptr);
+ }
+
+ END_CRIT_SECTION();
+
+ /*
+ * If we've frozen everything on the page, update the visibilitymap.
+ * We're already holding pin on the vmbuffer.
+ */
+ if (all_frozen_set)
+ {
+ Assert(PageIsAllVisible(page));
+ Assert(visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer));
+
+ /*
+ * It's fine to use InvalidTransactionId here - this is only used
+ * when HEAP_INSERT_FROZEN is specified, which intentionally
+ * violates visibility rules.
+ */
+ visibilitymap_set(relation, BufferGetBlockNumber(buffer), buffer,
+ InvalidXLogRecPtr, vmbuffer,
+ InvalidTransactionId,
+ VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
+ }
+
+ UnlockReleaseBuffer(buffer);
+ ndone += nthispage;
+
+ /*
+ * NB: Only release vmbuffer after inserting all tuples - it's fairly
+ * likely that we'll insert into subsequent heap pages that are likely
+ * to use the same vm page.
+ */
+ }
+
+ /* We're done with inserting all tuples, so release the last vmbuffer. */
+ if (vmbuffer != InvalidBuffer)
+ ReleaseBuffer(vmbuffer);
+
+ /*
+ * We're done with the actual inserts. Check for conflicts again, to
+ * ensure that all rw-conflicts in to these inserts are detected. Without
+ * this final check, a sequential scan of the heap may have locked the
+ * table after the "before" check, missing one opportunity to detect the
+ * conflict, and then scanned the table before the new tuples were there,
+ * missing the other chance to detect the conflict.
+ *
+ * For heap inserts, we only need to check for table-level SSI locks. Our
+ * new tuples can't possibly conflict with existing tuple locks, and heap
+ * page locks are only consolidated versions of tuple locks; they do not
+ * lock "gaps" as index page locks do. So we don't need to specify a
+ * buffer when making the call.
+ */
+ CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
+
+ /*
+ * If tuples are cachable, mark them for invalidation from the caches in
+ * case we abort. Note it is OK to do this after releasing the buffer,
+ * because the heaptuples data structure is all in local memory, not in
+ * the shared buffer.
+ */
+ if (IsCatalogRelation(relation))
+ {
+ for (i = 0; i < ntuples; i++)
+ CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
+ }
+
+ /* copy t_self fields back to the caller's slots */
+ for (i = 0; i < ntuples; i++)
+ slots[i]->tts_tid = heaptuples[i]->t_self;
+
+ pgstat_count_heap_insert(relation, ntuples);
+}
+
+/*
+ * simple_heap_insert - insert a tuple
+ *
+ * Currently, this routine differs from heap_insert only in supplying
+ * a default command ID and not allowing access to the speedup options.
+ *
+ * This should be used rather than using heap_insert directly in most places
+ * where we are modifying system catalogs.
+ */
+void
+simple_heap_insert(Relation relation, HeapTuple tup)
+{
+ heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
+}
+
+/*
+ * Given infomask/infomask2, compute the bits that must be saved in the
+ * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
+ * xl_heap_lock_updated WAL records.
+ *
+ * See fix_infomask_from_infobits.
+ */
+static uint8
+compute_infobits(uint16 infomask, uint16 infomask2)
+{
+ return
+ ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
+ ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
+ ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
+ /* note we ignore HEAP_XMAX_SHR_LOCK here */
+ ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
+ ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
+ XLHL_KEYS_UPDATED : 0);
+}
+
+/*
+ * Given two versions of the same t_infomask for a tuple, compare them and
+ * return whether the relevant status for a tuple Xmax has changed. This is
+ * used after a buffer lock has been released and reacquired: we want to ensure
+ * that the tuple state continues to be the same it was when we previously
+ * examined it.
+ *
+ * Note the Xmax field itself must be compared separately.
+ */
+static inline bool
+xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
+{
+ const uint16 interesting =
+ HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
+
+ if ((new_infomask & interesting) != (old_infomask & interesting))
+ return true;
+
+ return false;
+}
+
+/*
+ * heap_delete - delete a tuple
+ *
+ * See table_tuple_delete() for an explanation of the parameters, except that
+ * this routine directly takes a tuple rather than a slot.
+ *
+ * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
+ * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
+ * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
+ * generated by another transaction).
+ */
+TM_Result
+heap_delete(Relation relation, ItemPointer tid,
+ CommandId cid, Snapshot crosscheck, bool wait,
+ TM_FailureData *tmfd, bool changingPart)
+{
+ TM_Result result;
+ TransactionId xid = GetCurrentTransactionId();
+ ItemId lp;
+ HeapTupleData tp;
+ Page page;
+ BlockNumber block;
+ Buffer buffer;
+ Buffer vmbuffer = InvalidBuffer;
+ TransactionId new_xmax;
+ uint16 new_infomask,
+ new_infomask2;
+ bool have_tuple_lock = false;
+ bool iscombo;
+ bool all_visible_cleared = false;
+ HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
+ bool old_key_copied = false;
+
+ Assert(ItemPointerIsValid(tid));
+
+ /*
+ * Forbid this during a parallel operation, lest it allocate a combo CID.
+ * Other workers might need that combo CID for visibility checks, and we
+ * have no provision for broadcasting it to them.
+ */
+ if (IsInParallelMode())
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
+ errmsg("cannot delete tuples during a parallel operation")));
+
+ block = ItemPointerGetBlockNumber(tid);
+ buffer = ReadBuffer(relation, block);
+ page = BufferGetPage(buffer);
+
+ /*
+ * Before locking the buffer, pin the visibility map page if it appears to
+ * be necessary. Since we haven't got the lock yet, someone else might be
+ * in the middle of changing this, so we'll need to recheck after we have
+ * the lock.
+ */
+ if (PageIsAllVisible(page))
+ visibilitymap_pin(relation, block, &vmbuffer);
+
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * If we didn't pin the visibility map page and the page has become all
+ * visible while we were busy locking the buffer, we'll have to unlock and
+ * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
+ * unfortunate, but hopefully shouldn't happen often.
+ */
+ if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
+ {
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ visibilitymap_pin(relation, block, &vmbuffer);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ }
+
+ lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
+ Assert(ItemIdIsNormal(lp));
+
+ tp.t_tableOid = RelationGetRelid(relation);
+ tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
+ tp.t_len = ItemIdGetLength(lp);
+ tp.t_self = *tid;
+
+l1:
+ result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
+
+ if (result == TM_Invisible)
+ {
+ UnlockReleaseBuffer(buffer);
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("attempted to delete invisible tuple")));
+ }
+ else if (result == TM_BeingModified && wait)
+ {
+ TransactionId xwait;
+ uint16 infomask;
+
+ /* must copy state data before unlocking buffer */
+ xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
+ infomask = tp.t_data->t_infomask;
+
+ /*
+ * Sleep until concurrent transaction ends -- except when there's a
+ * single locker and it's our own transaction. Note we don't care
+ * which lock mode the locker has, because we need the strongest one.
+ *
+ * Before sleeping, we need to acquire tuple lock to establish our
+ * priority for the tuple (see heap_lock_tuple). LockTuple will
+ * release us when we are next-in-line for the tuple.
+ *
+ * If we are forced to "start over" below, we keep the tuple lock;
+ * this arranges that we stay at the head of the line while rechecking
+ * tuple state.
+ */
+ if (infomask & HEAP_XMAX_IS_MULTI)
+ {
+ bool current_is_member = false;
+
+ if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
+ LockTupleExclusive, &current_is_member))
+ {
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+ /*
+ * Acquire the lock, if necessary (but skip it when we're
+ * requesting a lock and already have one; avoids deadlock).
+ */
+ if (!current_is_member)
+ heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
+ LockWaitBlock, &have_tuple_lock);
+
+ /* wait for multixact */
+ MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
+ relation, &(tp.t_self), XLTW_Delete,
+ NULL);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * If xwait had just locked the tuple then some other xact
+ * could update this tuple before we get to this point. Check
+ * for xmax change, and start over if so.
+ */
+ if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
+ xwait))
+ goto l1;
+ }
+
+ /*
+ * You might think the multixact is necessarily done here, but not
+ * so: it could have surviving members, namely our own xact or
+ * other subxacts of this backend. It is legal for us to delete
+ * the tuple in either case, however (the latter case is
+ * essentially a situation of upgrading our former shared lock to
+ * exclusive). We don't bother changing the on-disk hint bits
+ * since we are about to overwrite the xmax altogether.
+ */
+ }
+ else if (!TransactionIdIsCurrentTransactionId(xwait))
+ {
+ /*
+ * Wait for regular transaction to end; but first, acquire tuple
+ * lock.
+ */
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
+ LockWaitBlock, &have_tuple_lock);
+ XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * xwait is done, but if xwait had just locked the tuple then some
+ * other xact could update this tuple before we get to this point.
+ * Check for xmax change, and start over if so.
+ */
+ if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
+ xwait))
+ goto l1;
+
+ /* Otherwise check if it committed or aborted */
+ UpdateXmaxHintBits(tp.t_data, buffer, xwait);
+ }
+
+ /*
+ * We may overwrite if previous xmax aborted, or if it committed but
+ * only locked the tuple without updating it.
+ */
+ if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
+ HEAP_XMAX_IS_LOCKED_ONLY(tp.t_data->t_infomask) ||
+ HeapTupleHeaderIsOnlyLocked(tp.t_data))
+ result = TM_Ok;
+ else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
+ result = TM_Updated;
+ else
+ result = TM_Deleted;
+ }
+
+ if (crosscheck != InvalidSnapshot && result == TM_Ok)
+ {
+ /* Perform additional check for transaction-snapshot mode RI updates */
+ if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
+ result = TM_Updated;
+ }
+
+ if (result != TM_Ok)
+ {
+ Assert(result == TM_SelfModified ||
+ result == TM_Updated ||
+ result == TM_Deleted ||
+ result == TM_BeingModified);
+ Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
+ Assert(result != TM_Updated ||
+ !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
+ tmfd->ctid = tp.t_data->t_ctid;
+ tmfd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
+ if (result == TM_SelfModified)
+ tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
+ else
+ tmfd->cmax = InvalidCommandId;
+ UnlockReleaseBuffer(buffer);
+ if (have_tuple_lock)
+ UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
+ if (vmbuffer != InvalidBuffer)
+ ReleaseBuffer(vmbuffer);
+ return result;
+ }
+
+ /*
+ * We're about to do the actual delete -- check for conflict first, to
+ * avoid possibly having to roll back work we've just done.
+ *
+ * This is safe without a recheck as long as there is no possibility of
+ * another process scanning the page between this check and the delete
+ * being visible to the scan (i.e., an exclusive buffer content lock is
+ * continuously held from this point until the tuple delete is visible).
+ */
+ CheckForSerializableConflictIn(relation, tid, BufferGetBlockNumber(buffer));
+
+ /* replace cid with a combo CID if necessary */
+ HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
+
+ /*
+ * Compute replica identity tuple before entering the critical section so
+ * we don't PANIC upon a memory allocation failure.
+ */
+ old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
+
+ /*
+ * If this is the first possibly-multixact-able operation in the current
+ * transaction, set my per-backend OldestMemberMXactId setting. We can be
+ * certain that the transaction will never become a member of any older
+ * MultiXactIds than that. (We have to do this even if we end up just
+ * using our own TransactionId below, since some other backend could
+ * incorporate our XID into a MultiXact immediately afterwards.)
+ */
+ MultiXactIdSetOldestMember();
+
+ compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
+ tp.t_data->t_infomask, tp.t_data->t_infomask2,
+ xid, LockTupleExclusive, true,
+ &new_xmax, &new_infomask, &new_infomask2);
+
+ START_CRIT_SECTION();
+
+ /*
+ * If this transaction commits, the tuple will become DEAD sooner or
+ * later. Set flag that this page is a candidate for pruning once our xid
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * the subsequent page pruning will be a no-op and the hint will be
+ * cleared.
+ */
+ PageSetPrunable(page, xid);
+
+ if (PageIsAllVisible(page))
+ {
+ all_visible_cleared = true;
+ PageClearAllVisible(page);
+ visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
+ vmbuffer, VISIBILITYMAP_VALID_BITS);
+ }
+
+ /* store transaction information of xact deleting the tuple */
+ tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
+ tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+ tp.t_data->t_infomask |= new_infomask;
+ tp.t_data->t_infomask2 |= new_infomask2;
+ HeapTupleHeaderClearHotUpdated(tp.t_data);
+ HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
+ HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
+ /* Make sure there is no forward chain link in t_ctid */
+ tp.t_data->t_ctid = tp.t_self;
+
+ /* Signal that this is actually a move into another partition */
+ if (changingPart)
+ HeapTupleHeaderSetMovedPartitions(tp.t_data);
+
+ MarkBufferDirty(buffer);
+
+ /*
+ * XLOG stuff
+ *
+ * NB: heap_abort_speculative() uses the same xlog record and replay
+ * routines.
+ */
+ if (RelationNeedsWAL(relation))
+ {
+ xl_heap_delete xlrec;
+ xl_heap_header xlhdr;
+ XLogRecPtr recptr;
+
+ /*
+ * For logical decode we need combo CIDs to properly decode the
+ * catalog
+ */
+ if (RelationIsAccessibleInLogicalDecoding(relation))
+ log_heap_new_cid(relation, &tp);
+
+ xlrec.flags = 0;
+ if (all_visible_cleared)
+ xlrec.flags |= XLH_DELETE_ALL_VISIBLE_CLEARED;
+ if (changingPart)
+ xlrec.flags |= XLH_DELETE_IS_PARTITION_MOVE;
+ xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
+ tp.t_data->t_infomask2);
+ xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
+ xlrec.xmax = new_xmax;
+
+ if (old_key_tuple != NULL)
+ {
+ if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
+ xlrec.flags |= XLH_DELETE_CONTAINS_OLD_TUPLE;
+ else
+ xlrec.flags |= XLH_DELETE_CONTAINS_OLD_KEY;
+ }
+
+ XLogBeginInsert();
+ XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
+
+ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
+
+ /*
+ * Log replica identity of the deleted tuple if there is one
+ */
+ if (old_key_tuple != NULL)
+ {
+ xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
+ xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
+ xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
+
+ XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
+ XLogRegisterData((char *) old_key_tuple->t_data
+ + SizeofHeapTupleHeader,
+ old_key_tuple->t_len
+ - SizeofHeapTupleHeader);
+ }
+
+ /* filtering by origin on a row level is much more efficient */
+ XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
+
+ recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
+
+ PageSetLSN(page, recptr);
+ }
+
+ END_CRIT_SECTION();
+
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+ if (vmbuffer != InvalidBuffer)
+ ReleaseBuffer(vmbuffer);
+
+ /*
+ * If the tuple has toasted out-of-line attributes, we need to delete
+ * those items too. We have to do this before releasing the buffer
+ * because we need to look at the contents of the tuple, but it's OK to
+ * release the content lock on the buffer first.
+ */
+ if (relation->rd_rel->relkind != RELKIND_RELATION &&
+ relation->rd_rel->relkind != RELKIND_MATVIEW)
+ {
+ /* toast table entries should never be recursively toasted */
+ Assert(!HeapTupleHasExternal(&tp));
+ }
+ else if (HeapTupleHasExternal(&tp))
+ heap_toast_delete(relation, &tp, false);
+
+ /*
+ * Mark tuple for invalidation from system caches at next command
+ * boundary. We have to do this before releasing the buffer because we
+ * need to look at the contents of the tuple.
+ */
+ CacheInvalidateHeapTuple(relation, &tp, NULL);
+
+ /* Now we can release the buffer */
+ ReleaseBuffer(buffer);
+
+ /*
+ * Release the lmgr tuple lock, if we had it.
+ */
+ if (have_tuple_lock)
+ UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
+
+ pgstat_count_heap_delete(relation);
+
+ if (old_key_tuple != NULL && old_key_copied)
+ heap_freetuple(old_key_tuple);
+
+ return TM_Ok;
+}
+
+/*
+ * simple_heap_delete - delete a tuple
+ *
+ * This routine may be used to delete a tuple when concurrent updates of
+ * the target tuple are not expected (for example, because we have a lock
+ * on the relation associated with the tuple). Any failure is reported
+ * via ereport().
+ */
+void
+simple_heap_delete(Relation relation, ItemPointer tid)
+{
+ TM_Result result;
+ TM_FailureData tmfd;
+
+ result = heap_delete(relation, tid,
+ GetCurrentCommandId(true), InvalidSnapshot,
+ true /* wait for commit */ ,
+ &tmfd, false /* changingPart */ );
+ switch (result)
+ {
+ case TM_SelfModified:
+ /* Tuple was already updated in current command? */
+ elog(ERROR, "tuple already updated by self");
+ break;
+
+ case TM_Ok:
+ /* done successfully */
+ break;
+
+ case TM_Updated:
+ elog(ERROR, "tuple concurrently updated");
+ break;
+
+ case TM_Deleted:
+ elog(ERROR, "tuple concurrently deleted");
+ break;
+
+ default:
+ elog(ERROR, "unrecognized heap_delete status: %u", result);
+ break;
+ }
+}
+
+/*
+ * heap_update - replace a tuple
+ *
+ * See table_tuple_update() for an explanation of the parameters, except that
+ * this routine directly takes a tuple rather than a slot.
+ *
+ * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
+ * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
+ * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
+ * generated by another transaction).
+ */
+TM_Result
+heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
+ CommandId cid, Snapshot crosscheck, bool wait,
+ TM_FailureData *tmfd, LockTupleMode *lockmode)
+{
+ TM_Result result;
+ TransactionId xid = GetCurrentTransactionId();
+ Bitmapset *hot_attrs;
+ Bitmapset *key_attrs;
+ Bitmapset *id_attrs;
+ Bitmapset *interesting_attrs;
+ Bitmapset *modified_attrs;
+ ItemId lp;
+ HeapTupleData oldtup;
+ HeapTuple heaptup;
+ HeapTuple old_key_tuple = NULL;
+ bool old_key_copied = false;
+ Page page;
+ BlockNumber block;
+ MultiXactStatus mxact_status;
+ Buffer buffer,
+ newbuf,
+ vmbuffer = InvalidBuffer,
+ vmbuffer_new = InvalidBuffer;
+ bool need_toast;
+ Size newtupsize,
+ pagefree;
+ bool have_tuple_lock = false;
+ bool iscombo;
+ bool use_hot_update = false;
+ bool hot_attrs_checked = false;
+ bool key_intact;
+ bool all_visible_cleared = false;
+ bool all_visible_cleared_new = false;
+ bool checked_lockers;
+ bool locker_remains;
+ bool id_has_external = false;
+ TransactionId xmax_new_tuple,
+ xmax_old_tuple;
+ uint16 infomask_old_tuple,
+ infomask2_old_tuple,
+ infomask_new_tuple,
+ infomask2_new_tuple;
+
+ Assert(ItemPointerIsValid(otid));
+
+ /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
+ Assert(HeapTupleHeaderGetNatts(newtup->t_data) <=
+ RelationGetNumberOfAttributes(relation));
+
+ /*
+ * Forbid this during a parallel operation, lest it allocate a combo CID.
+ * Other workers might need that combo CID for visibility checks, and we
+ * have no provision for broadcasting it to them.
+ */
+ if (IsInParallelMode())
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
+ errmsg("cannot update tuples during a parallel operation")));
+
+ /*
+ * Fetch the list of attributes to be checked for various operations.
+ *
+ * For HOT considerations, this is wasted effort if we fail to update or
+ * have to put the new tuple on a different page. But we must compute the
+ * list before obtaining buffer lock --- in the worst case, if we are
+ * doing an update on one of the relevant system catalogs, we could
+ * deadlock if we try to fetch the list later. In any case, the relcache
+ * caches the data so this is usually pretty cheap.
+ *
+ * We also need columns used by the replica identity and columns that are
+ * considered the "key" of rows in the table.
+ *
+ * Note that we get copies of each bitmap, so we need not worry about
+ * relcache flush happening midway through.
+ */
+ hot_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_ALL);
+ key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
+ id_attrs = RelationGetIndexAttrBitmap(relation,
+ INDEX_ATTR_BITMAP_IDENTITY_KEY);
+
+
+ block = ItemPointerGetBlockNumber(otid);
+ buffer = ReadBuffer(relation, block);
+ page = BufferGetPage(buffer);
+
+ interesting_attrs = NULL;
+
+ /*
+ * If the page is already full, there is hardly any chance of doing a HOT
+ * update on this page. It might be wasteful effort to look for index
+ * column updates only to later reject HOT updates for lack of space in
+ * the same page. So we be conservative and only fetch hot_attrs if the
+ * page is not already full. Since we are already holding a pin on the
+ * buffer, there is no chance that the buffer can get cleaned up
+ * concurrently and even if that was possible, in the worst case we lose a
+ * chance to do a HOT update.
+ */
+ if (!PageIsFull(page))
+ {
+ interesting_attrs = bms_add_members(interesting_attrs, hot_attrs);
+ hot_attrs_checked = true;
+ }
+ interesting_attrs = bms_add_members(interesting_attrs, key_attrs);
+ interesting_attrs = bms_add_members(interesting_attrs, id_attrs);
+
+ /*
+ * Before locking the buffer, pin the visibility map page if it appears to
+ * be necessary. Since we haven't got the lock yet, someone else might be
+ * in the middle of changing this, so we'll need to recheck after we have
+ * the lock.
+ */
+ if (PageIsAllVisible(page))
+ visibilitymap_pin(relation, block, &vmbuffer);
+
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
+ Assert(ItemIdIsNormal(lp));
+
+ /*
+ * Fill in enough data in oldtup for HeapDetermineColumnsInfo to work
+ * properly.
+ */
+ oldtup.t_tableOid = RelationGetRelid(relation);
+ oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
+ oldtup.t_len = ItemIdGetLength(lp);
+ oldtup.t_self = *otid;
+
+ /* the new tuple is ready, except for this: */
+ newtup->t_tableOid = RelationGetRelid(relation);
+
+ /*
+ * Determine columns modified by the update. Additionally, identify
+ * whether any of the unmodified replica identity key attributes in the
+ * old tuple is externally stored or not. This is required because for
+ * such attributes the flattened value won't be WAL logged as part of the
+ * new tuple so we must include it as part of the old_key_tuple. See
+ * ExtractReplicaIdentity.
+ */
+ modified_attrs = HeapDetermineColumnsInfo(relation, interesting_attrs,
+ id_attrs, &oldtup,
+ newtup, &id_has_external);
+
+ /*
+ * If we're not updating any "key" column, we can grab a weaker lock type.
+ * This allows for more concurrency when we are running simultaneously
+ * with foreign key checks.
+ *
+ * Note that if a column gets detoasted while executing the update, but
+ * the value ends up being the same, this test will fail and we will use
+ * the stronger lock. This is acceptable; the important case to optimize
+ * is updates that don't manipulate key columns, not those that
+ * serendipitously arrive at the same key values.
+ */
+ if (!bms_overlap(modified_attrs, key_attrs))
+ {
+ *lockmode = LockTupleNoKeyExclusive;
+ mxact_status = MultiXactStatusNoKeyUpdate;
+ key_intact = true;
+
+ /*
+ * If this is the first possibly-multixact-able operation in the
+ * current transaction, set my per-backend OldestMemberMXactId
+ * setting. We can be certain that the transaction will never become a
+ * member of any older MultiXactIds than that. (We have to do this
+ * even if we end up just using our own TransactionId below, since
+ * some other backend could incorporate our XID into a MultiXact
+ * immediately afterwards.)
+ */
+ MultiXactIdSetOldestMember();
+ }
+ else
+ {
+ *lockmode = LockTupleExclusive;
+ mxact_status = MultiXactStatusUpdate;
+ key_intact = false;
+ }
+
+ /*
+ * Note: beyond this point, use oldtup not otid to refer to old tuple.
+ * otid may very well point at newtup->t_self, which we will overwrite
+ * with the new tuple's location, so there's great risk of confusion if we
+ * use otid anymore.
+ */
+
+l2:
+ checked_lockers = false;
+ locker_remains = false;
+ result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
+
+ /* see below about the "no wait" case */
+ Assert(result != TM_BeingModified || wait);
+
+ if (result == TM_Invisible)
+ {
+ UnlockReleaseBuffer(buffer);
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("attempted to update invisible tuple")));
+ }
+ else if (result == TM_BeingModified && wait)
+ {
+ TransactionId xwait;
+ uint16 infomask;
+ bool can_continue = false;
+
+ /*
+ * XXX note that we don't consider the "no wait" case here. This
+ * isn't a problem currently because no caller uses that case, but it
+ * should be fixed if such a caller is introduced. It wasn't a
+ * problem previously because this code would always wait, but now
+ * that some tuple locks do not conflict with one of the lock modes we
+ * use, it is possible that this case is interesting to handle
+ * specially.
+ *
+ * This may cause failures with third-party code that calls
+ * heap_update directly.
+ */
+
+ /* must copy state data before unlocking buffer */
+ xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
+ infomask = oldtup.t_data->t_infomask;
+
+ /*
+ * Now we have to do something about the existing locker. If it's a
+ * multi, sleep on it; we might be awakened before it is completely
+ * gone (or even not sleep at all in some cases); we need to preserve
+ * it as locker, unless it is gone completely.
+ *
+ * If it's not a multi, we need to check for sleeping conditions
+ * before actually going to sleep. If the update doesn't conflict
+ * with the locks, we just continue without sleeping (but making sure
+ * it is preserved).
+ *
+ * Before sleeping, we need to acquire tuple lock to establish our
+ * priority for the tuple (see heap_lock_tuple). LockTuple will
+ * release us when we are next-in-line for the tuple. Note we must
+ * not acquire the tuple lock until we're sure we're going to sleep;
+ * otherwise we're open for race conditions with other transactions
+ * holding the tuple lock which sleep on us.
+ *
+ * If we are forced to "start over" below, we keep the tuple lock;
+ * this arranges that we stay at the head of the line while rechecking
+ * tuple state.
+ */
+ if (infomask & HEAP_XMAX_IS_MULTI)
+ {
+ TransactionId update_xact;
+ int remain;
+ bool current_is_member = false;
+
+ if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
+ *lockmode, &current_is_member))
+ {
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+ /*
+ * Acquire the lock, if necessary (but skip it when we're
+ * requesting a lock and already have one; avoids deadlock).
+ */
+ if (!current_is_member)
+ heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
+ LockWaitBlock, &have_tuple_lock);
+
+ /* wait for multixact */
+ MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
+ relation, &oldtup.t_self, XLTW_Update,
+ &remain);
+ checked_lockers = true;
+ locker_remains = remain != 0;
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * If xwait had just locked the tuple then some other xact
+ * could update this tuple before we get to this point. Check
+ * for xmax change, and start over if so.
+ */
+ if (xmax_infomask_changed(oldtup.t_data->t_infomask,
+ infomask) ||
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+ xwait))
+ goto l2;
+ }
+
+ /*
+ * Note that the multixact may not be done by now. It could have
+ * surviving members; our own xact or other subxacts of this
+ * backend, and also any other concurrent transaction that locked
+ * the tuple with LockTupleKeyShare if we only got
+ * LockTupleNoKeyExclusive. If this is the case, we have to be
+ * careful to mark the updated tuple with the surviving members in
+ * Xmax.
+ *
+ * Note that there could have been another update in the
+ * MultiXact. In that case, we need to check whether it committed
+ * or aborted. If it aborted we are safe to update it again;
+ * otherwise there is an update conflict, and we have to return
+ * TableTuple{Deleted, Updated} below.
+ *
+ * In the LockTupleExclusive case, we still need to preserve the
+ * surviving members: those would include the tuple locks we had
+ * before this one, which are important to keep in case this
+ * subxact aborts.
+ */
+ if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
+ update_xact = HeapTupleGetUpdateXid(oldtup.t_data);
+ else
+ update_xact = InvalidTransactionId;
+
+ /*
+ * There was no UPDATE in the MultiXact; or it aborted. No
+ * TransactionIdIsInProgress() call needed here, since we called
+ * MultiXactIdWait() above.
+ */
+ if (!TransactionIdIsValid(update_xact) ||
+ TransactionIdDidAbort(update_xact))
+ can_continue = true;
+ }
+ else if (TransactionIdIsCurrentTransactionId(xwait))
+ {
+ /*
+ * The only locker is ourselves; we can avoid grabbing the tuple
+ * lock here, but must preserve our locking information.
+ */
+ checked_lockers = true;
+ locker_remains = true;
+ can_continue = true;
+ }
+ else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
+ {
+ /*
+ * If it's just a key-share locker, and we're not changing the key
+ * columns, we don't need to wait for it to end; but we need to
+ * preserve it as locker.
+ */
+ checked_lockers = true;
+ locker_remains = true;
+ can_continue = true;
+ }
+ else
+ {
+ /*
+ * Wait for regular transaction to end; but first, acquire tuple
+ * lock.
+ */
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
+ LockWaitBlock, &have_tuple_lock);
+ XactLockTableWait(xwait, relation, &oldtup.t_self,
+ XLTW_Update);
+ checked_lockers = true;
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * xwait is done, but if xwait had just locked the tuple then some
+ * other xact could update this tuple before we get to this point.
+ * Check for xmax change, and start over if so.
+ */
+ if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
+ !TransactionIdEquals(xwait,
+ HeapTupleHeaderGetRawXmax(oldtup.t_data)))
+ goto l2;
+
+ /* Otherwise check if it committed or aborted */
+ UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
+ if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
+ can_continue = true;
+ }
+
+ if (can_continue)
+ result = TM_Ok;
+ else if (!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid))
+ result = TM_Updated;
+ else
+ result = TM_Deleted;
+ }
+
+ if (crosscheck != InvalidSnapshot && result == TM_Ok)
+ {
+ /* Perform additional check for transaction-snapshot mode RI updates */
+ if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
+ {
+ result = TM_Updated;
+ Assert(!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
+ }
+ }
+
+ if (result != TM_Ok)
+ {
+ Assert(result == TM_SelfModified ||
+ result == TM_Updated ||
+ result == TM_Deleted ||
+ result == TM_BeingModified);
+ Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
+ Assert(result != TM_Updated ||
+ !ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
+ tmfd->ctid = oldtup.t_data->t_ctid;
+ tmfd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
+ if (result == TM_SelfModified)
+ tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
+ else
+ tmfd->cmax = InvalidCommandId;
+ UnlockReleaseBuffer(buffer);
+ if (have_tuple_lock)
+ UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
+ if (vmbuffer != InvalidBuffer)
+ ReleaseBuffer(vmbuffer);
+ bms_free(hot_attrs);
+ bms_free(key_attrs);
+ bms_free(id_attrs);
+ bms_free(modified_attrs);
+ bms_free(interesting_attrs);
+ return result;
+ }
+
+ /*
+ * If we didn't pin the visibility map page and the page has become all
+ * visible while we were busy locking the buffer, or during some
+ * subsequent window during which we had it unlocked, we'll have to unlock
+ * and re-lock, to avoid holding the buffer lock across an I/O. That's a
+ * bit unfortunate, especially since we'll now have to recheck whether the
+ * tuple has been locked or updated under us, but hopefully it won't
+ * happen very often.
+ */
+ if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
+ {
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ visibilitymap_pin(relation, block, &vmbuffer);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ goto l2;
+ }
+
+ /* Fill in transaction status data */
+
+ /*
+ * If the tuple we're updating is locked, we need to preserve the locking
+ * info in the old tuple's Xmax. Prepare a new Xmax value for this.
+ */
+ compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+ oldtup.t_data->t_infomask,
+ oldtup.t_data->t_infomask2,
+ xid, *lockmode, true,
+ &xmax_old_tuple, &infomask_old_tuple,
+ &infomask2_old_tuple);
+
+ /*
+ * And also prepare an Xmax value for the new copy of the tuple. If there
+ * was no xmax previously, or there was one but all lockers are now gone,
+ * then use InvalidXid; otherwise, get the xmax from the old tuple. (In
+ * rare cases that might also be InvalidXid and yet not have the
+ * HEAP_XMAX_INVALID bit set; that's fine.)
+ */
+ if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
+ HEAP_LOCKED_UPGRADED(oldtup.t_data->t_infomask) ||
+ (checked_lockers && !locker_remains))
+ xmax_new_tuple = InvalidTransactionId;
+ else
+ xmax_new_tuple = HeapTupleHeaderGetRawXmax(oldtup.t_data);
+
+ if (!TransactionIdIsValid(xmax_new_tuple))
+ {
+ infomask_new_tuple = HEAP_XMAX_INVALID;
+ infomask2_new_tuple = 0;
+ }
+ else
+ {
+ /*
+ * If we found a valid Xmax for the new tuple, then the infomask bits
+ * to use on the new tuple depend on what was there on the old one.
+ * Note that since we're doing an update, the only possibility is that
+ * the lockers had FOR KEY SHARE lock.
+ */
+ if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ GetMultiXactIdHintBits(xmax_new_tuple, &infomask_new_tuple,
+ &infomask2_new_tuple);
+ }
+ else
+ {
+ infomask_new_tuple = HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_LOCK_ONLY;
+ infomask2_new_tuple = 0;
+ }
+ }
+
+ /*
+ * Prepare the new tuple with the appropriate initial values of Xmin and
+ * Xmax, as well as initial infomask bits as computed above.
+ */
+ newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
+ newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
+ HeapTupleHeaderSetXmin(newtup->t_data, xid);
+ HeapTupleHeaderSetCmin(newtup->t_data, cid);
+ newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
+ newtup->t_data->t_infomask2 |= infomask2_new_tuple;
+ HeapTupleHeaderSetXmax(newtup->t_data, xmax_new_tuple);
+
+ /*
+ * Replace cid with a combo CID if necessary. Note that we already put
+ * the plain cid into the new tuple.
+ */
+ HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
+
+ /*
+ * If the toaster needs to be activated, OR if the new tuple will not fit
+ * on the same page as the old, then we need to release the content lock
+ * (but not the pin!) on the old tuple's buffer while we are off doing
+ * TOAST and/or table-file-extension work. We must mark the old tuple to
+ * show that it's locked, else other processes may try to update it
+ * themselves.
+ *
+ * We need to invoke the toaster if there are already any out-of-line
+ * toasted values present, or if the new tuple is over-threshold.
+ */
+ if (relation->rd_rel->relkind != RELKIND_RELATION &&
+ relation->rd_rel->relkind != RELKIND_MATVIEW)
+ {
+ /* toast table entries should never be recursively toasted */
+ Assert(!HeapTupleHasExternal(&oldtup));
+ Assert(!HeapTupleHasExternal(newtup));
+ need_toast = false;
+ }
+ else
+ need_toast = (HeapTupleHasExternal(&oldtup) ||
+ HeapTupleHasExternal(newtup) ||
+ newtup->t_len > TOAST_TUPLE_THRESHOLD);
+
+ pagefree = PageGetHeapFreeSpace(page);
+
+ newtupsize = MAXALIGN(newtup->t_len);
+
+ if (need_toast || newtupsize > pagefree)
+ {
+ TransactionId xmax_lock_old_tuple;
+ uint16 infomask_lock_old_tuple,
+ infomask2_lock_old_tuple;
+ bool cleared_all_frozen = false;
+
+ /*
+ * To prevent concurrent sessions from updating the tuple, we have to
+ * temporarily mark it locked, while we release the page-level lock.
+ *
+ * To satisfy the rule that any xid potentially appearing in a buffer
+ * written out to disk, we unfortunately have to WAL log this
+ * temporary modification. We can reuse xl_heap_lock for this
+ * purpose. If we crash/error before following through with the
+ * actual update, xmax will be of an aborted transaction, allowing
+ * other sessions to proceed.
+ */
+
+ /*
+ * Compute xmax / infomask appropriate for locking the tuple. This has
+ * to be done separately from the combo that's going to be used for
+ * updating, because the potentially created multixact would otherwise
+ * be wrong.
+ */
+ compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+ oldtup.t_data->t_infomask,
+ oldtup.t_data->t_infomask2,
+ xid, *lockmode, false,
+ &xmax_lock_old_tuple, &infomask_lock_old_tuple,
+ &infomask2_lock_old_tuple);
+
+ Assert(HEAP_XMAX_IS_LOCKED_ONLY(infomask_lock_old_tuple));
+
+ START_CRIT_SECTION();
+
+ /* Clear obsolete visibility flags ... */
+ oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
+ oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+ HeapTupleClearHotUpdated(&oldtup);
+ /* ... and store info about transaction updating this tuple */
+ Assert(TransactionIdIsValid(xmax_lock_old_tuple));
+ HeapTupleHeaderSetXmax(oldtup.t_data, xmax_lock_old_tuple);
+ oldtup.t_data->t_infomask |= infomask_lock_old_tuple;
+ oldtup.t_data->t_infomask2 |= infomask2_lock_old_tuple;
+ HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
+
+ /* temporarily make it look not-updated, but locked */
+ oldtup.t_data->t_ctid = oldtup.t_self;
+
+ /*
+ * Clear all-frozen bit on visibility map if needed. We could
+ * immediately reset ALL_VISIBLE, but given that the WAL logging
+ * overhead would be unchanged, that doesn't seem necessarily
+ * worthwhile.
+ */
+ if (PageIsAllVisible(page) &&
+ visibilitymap_clear(relation, block, vmbuffer,
+ VISIBILITYMAP_ALL_FROZEN))
+ cleared_all_frozen = true;
+
+ MarkBufferDirty(buffer);
+
+ if (RelationNeedsWAL(relation))
+ {
+ xl_heap_lock xlrec;
+ XLogRecPtr recptr;
+
+ XLogBeginInsert();
+ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
+
+ xlrec.offnum = ItemPointerGetOffsetNumber(&oldtup.t_self);
+ xlrec.locking_xid = xmax_lock_old_tuple;
+ xlrec.infobits_set = compute_infobits(oldtup.t_data->t_infomask,
+ oldtup.t_data->t_infomask2);
+ xlrec.flags =
+ cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
+ XLogRegisterData((char *) &xlrec, SizeOfHeapLock);
+ recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
+ PageSetLSN(page, recptr);
+ }
+
+ END_CRIT_SECTION();
+
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+ /*
+ * Let the toaster do its thing, if needed.
+ *
+ * Note: below this point, heaptup is the data we actually intend to
+ * store into the relation; newtup is the caller's original untoasted
+ * data.
+ */
+ if (need_toast)
+ {
+ /* Note we always use WAL and FSM during updates */
+ heaptup = heap_toast_insert_or_update(relation, newtup, &oldtup, 0);
+ newtupsize = MAXALIGN(heaptup->t_len);
+ }
+ else
+ heaptup = newtup;
+
+ /*
+ * Now, do we need a new page for the tuple, or not? This is a bit
+ * tricky since someone else could have added tuples to the page while
+ * we weren't looking. We have to recheck the available space after
+ * reacquiring the buffer lock. But don't bother to do that if the
+ * former amount of free space is still not enough; it's unlikely
+ * there's more free now than before.
+ *
+ * What's more, if we need to get a new page, we will need to acquire
+ * buffer locks on both old and new pages. To avoid deadlock against
+ * some other backend trying to get the same two locks in the other
+ * order, we must be consistent about the order we get the locks in.
+ * We use the rule "lock the lower-numbered page of the relation
+ * first". To implement this, we must do RelationGetBufferForTuple
+ * while not holding the lock on the old page, and we must rely on it
+ * to get the locks on both pages in the correct order.
+ *
+ * Another consideration is that we need visibility map page pin(s) if
+ * we will have to clear the all-visible flag on either page. If we
+ * call RelationGetBufferForTuple, we rely on it to acquire any such
+ * pins; but if we don't, we have to handle that here. Hence we need
+ * a loop.
+ */
+ for (;;)
+ {
+ if (newtupsize > pagefree)
+ {
+ /* It doesn't fit, must use RelationGetBufferForTuple. */
+ newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
+ buffer, 0, NULL,
+ &vmbuffer_new, &vmbuffer);
+ /* We're all done. */
+ break;
+ }
+ /* Acquire VM page pin if needed and we don't have it. */
+ if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
+ visibilitymap_pin(relation, block, &vmbuffer);
+ /* Re-acquire the lock on the old tuple's page. */
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ /* Re-check using the up-to-date free space */
+ pagefree = PageGetHeapFreeSpace(page);
+ if (newtupsize > pagefree ||
+ (vmbuffer == InvalidBuffer && PageIsAllVisible(page)))
+ {
+ /*
+ * Rats, it doesn't fit anymore, or somebody just now set the
+ * all-visible flag. We must now unlock and loop to avoid
+ * deadlock. Fortunately, this path should seldom be taken.
+ */
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ }
+ else
+ {
+ /* We're all done. */
+ newbuf = buffer;
+ break;
+ }
+ }
+ }
+ else
+ {
+ /* No TOAST work needed, and it'll fit on same page */
+ newbuf = buffer;
+ heaptup = newtup;
+ }
+
+ /*
+ * We're about to do the actual update -- check for conflict first, to
+ * avoid possibly having to roll back work we've just done.
+ *
+ * This is safe without a recheck as long as there is no possibility of
+ * another process scanning the pages between this check and the update
+ * being visible to the scan (i.e., exclusive buffer content lock(s) are
+ * continuously held from this point until the tuple update is visible).
+ *
+ * For the new tuple the only check needed is at the relation level, but
+ * since both tuples are in the same relation and the check for oldtup
+ * will include checking the relation level, there is no benefit to a
+ * separate check for the new tuple.
+ */
+ CheckForSerializableConflictIn(relation, &oldtup.t_self,
+ BufferGetBlockNumber(buffer));
+
+ /*
+ * At this point newbuf and buffer are both pinned and locked, and newbuf
+ * has enough space for the new tuple. If they are the same buffer, only
+ * one pin is held.
+ */
+
+ if (newbuf == buffer)
+ {
+ /*
+ * Since the new tuple is going into the same page, we might be able
+ * to do a HOT update. Check if any of the index columns have been
+ * changed. If the page was already full, we may have skipped checking
+ * for index columns, and also can't do a HOT update.
+ */
+ if (hot_attrs_checked && !bms_overlap(modified_attrs, hot_attrs))
+ use_hot_update = true;
+ }
+ else
+ {
+ /* Set a hint that the old page could use prune/defrag */
+ PageSetFull(page);
+ }
+
+ /*
+ * Compute replica identity tuple before entering the critical section so
+ * we don't PANIC upon a memory allocation failure.
+ * ExtractReplicaIdentity() will return NULL if nothing needs to be
+ * logged. Pass old key required as true only if the replica identity key
+ * columns are modified or it has external data.
+ */
+ old_key_tuple = ExtractReplicaIdentity(relation, &oldtup,
+ bms_overlap(modified_attrs, id_attrs) ||
+ id_has_external,
+ &old_key_copied);
+
+ /* NO EREPORT(ERROR) from here till changes are logged */
+ START_CRIT_SECTION();
+
+ /*
+ * If this transaction commits, the old tuple will become DEAD sooner or
+ * later. Set flag that this page is a candidate for pruning once our xid
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * the subsequent page pruning will be a no-op and the hint will be
+ * cleared.
+ *
+ * XXX Should we set hint on newbuf as well? If the transaction aborts,
+ * there would be a prunable tuple in the newbuf; but for now we choose
+ * not to optimize for aborts. Note that heap_xlog_update must be kept in
+ * sync if this decision changes.
+ */
+ PageSetPrunable(page, xid);
+
+ if (use_hot_update)
+ {
+ /* Mark the old tuple as HOT-updated */
+ HeapTupleSetHotUpdated(&oldtup);
+ /* And mark the new tuple as heap-only */
+ HeapTupleSetHeapOnly(heaptup);
+ /* Mark the caller's copy too, in case different from heaptup */
+ HeapTupleSetHeapOnly(newtup);
+ }
+ else
+ {
+ /* Make sure tuples are correctly marked as not-HOT */
+ HeapTupleClearHotUpdated(&oldtup);
+ HeapTupleClearHeapOnly(heaptup);
+ HeapTupleClearHeapOnly(newtup);
+ }
+
+ RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
+
+
+ /* Clear obsolete visibility flags, possibly set by ourselves above... */
+ oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
+ oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+ /* ... and store info about transaction updating this tuple */
+ Assert(TransactionIdIsValid(xmax_old_tuple));
+ HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
+ oldtup.t_data->t_infomask |= infomask_old_tuple;
+ oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
+ HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
+
+ /* record address of new tuple in t_ctid of old one */
+ oldtup.t_data->t_ctid = heaptup->t_self;
+
+ /* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
+ if (PageIsAllVisible(BufferGetPage(buffer)))
+ {
+ all_visible_cleared = true;
+ PageClearAllVisible(BufferGetPage(buffer));
+ visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
+ vmbuffer, VISIBILITYMAP_VALID_BITS);
+ }
+ if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
+ {
+ all_visible_cleared_new = true;
+ PageClearAllVisible(BufferGetPage(newbuf));
+ visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
+ vmbuffer_new, VISIBILITYMAP_VALID_BITS);
+ }
+
+ if (newbuf != buffer)
+ MarkBufferDirty(newbuf);
+ MarkBufferDirty(buffer);
+
+ /* XLOG stuff */
+ if (RelationNeedsWAL(relation))
+ {
+ XLogRecPtr recptr;
+
+ /*
+ * For logical decoding we need combo CIDs to properly decode the
+ * catalog.
+ */
+ if (RelationIsAccessibleInLogicalDecoding(relation))
+ {
+ log_heap_new_cid(relation, &oldtup);
+ log_heap_new_cid(relation, heaptup);
+ }
+
+ recptr = log_heap_update(relation, buffer,
+ newbuf, &oldtup, heaptup,
+ old_key_tuple,
+ all_visible_cleared,
+ all_visible_cleared_new);
+ if (newbuf != buffer)
+ {
+ PageSetLSN(BufferGetPage(newbuf), recptr);
+ }
+ PageSetLSN(BufferGetPage(buffer), recptr);
+ }
+
+ END_CRIT_SECTION();
+
+ if (newbuf != buffer)
+ LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+ /*
+ * Mark old tuple for invalidation from system caches at next command
+ * boundary, and mark the new tuple for invalidation in case we abort. We
+ * have to do this before releasing the buffer because oldtup is in the
+ * buffer. (heaptup is all in local memory, but it's necessary to process
+ * both tuple versions in one call to inval.c so we can avoid redundant
+ * sinval messages.)
+ */
+ CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
+
+ /* Now we can release the buffer(s) */
+ if (newbuf != buffer)
+ ReleaseBuffer(newbuf);
+ ReleaseBuffer(buffer);
+ if (BufferIsValid(vmbuffer_new))
+ ReleaseBuffer(vmbuffer_new);
+ if (BufferIsValid(vmbuffer))
+ ReleaseBuffer(vmbuffer);
+
+ /*
+ * Release the lmgr tuple lock, if we had it.
+ */
+ if (have_tuple_lock)
+ UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
+
+ pgstat_count_heap_update(relation, use_hot_update);
+
+ /*
+ * If heaptup is a private copy, release it. Don't forget to copy t_self
+ * back to the caller's image, too.
+ */
+ if (heaptup != newtup)
+ {
+ newtup->t_self = heaptup->t_self;
+ heap_freetuple(heaptup);
+ }
+
+ if (old_key_tuple != NULL && old_key_copied)
+ heap_freetuple(old_key_tuple);
+
+ bms_free(hot_attrs);
+ bms_free(key_attrs);
+ bms_free(id_attrs);
+ bms_free(modified_attrs);
+ bms_free(interesting_attrs);
+
+ return TM_Ok;
+}
+
+/*
+ * Check if the specified attribute's values are the same. Subroutine for
+ * HeapDetermineColumnsInfo.
+ */
+static bool
+heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2,
+ bool isnull1, bool isnull2)
+{
+ Form_pg_attribute att;
+
+ /*
+ * If one value is NULL and other is not, then they are certainly not
+ * equal
+ */
+ if (isnull1 != isnull2)
+ return false;
+
+ /*
+ * If both are NULL, they can be considered equal.
+ */
+ if (isnull1)
+ return true;
+
+ /*
+ * We do simple binary comparison of the two datums. This may be overly
+ * strict because there can be multiple binary representations for the
+ * same logical value. But we should be OK as long as there are no false
+ * positives. Using a type-specific equality operator is messy because
+ * there could be multiple notions of equality in different operator
+ * classes; furthermore, we cannot safely invoke user-defined functions
+ * while holding exclusive buffer lock.
+ */
+ if (attrnum <= 0)
+ {
+ /* The only allowed system columns are OIDs, so do this */
+ return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
+ }
+ else
+ {
+ Assert(attrnum <= tupdesc->natts);
+ att = TupleDescAttr(tupdesc, attrnum - 1);
+ return datumIsEqual(value1, value2, att->attbyval, att->attlen);
+ }
+}
+
+/*
+ * Check which columns are being updated.
+ *
+ * Given an updated tuple, determine (and return into the output bitmapset),
+ * from those listed as interesting, the set of columns that changed.
+ *
+ * has_external indicates if any of the unmodified attributes (from those
+ * listed as interesting) of the old tuple is a member of external_cols and is
+ * stored externally.
+ *
+ * The input interesting_cols bitmapset is destructively modified; that is OK
+ * since this is invoked at most once in heap_update.
+ */
+static Bitmapset *
+HeapDetermineColumnsInfo(Relation relation,
+ Bitmapset *interesting_cols,
+ Bitmapset *external_cols,
+ HeapTuple oldtup, HeapTuple newtup,
+ bool *has_external)
+{
+ int attrnum;
+ Bitmapset *modified = NULL;
+ TupleDesc tupdesc = RelationGetDescr(relation);
+
+ while ((attrnum = bms_first_member(interesting_cols)) >= 0)
+ {
+ Datum value1,
+ value2;
+ bool isnull1,
+ isnull2;
+
+ attrnum += FirstLowInvalidHeapAttributeNumber;
+
+ /*
+ * If it's a whole-tuple reference, say "not equal". It's not really
+ * worth supporting this case, since it could only succeed after a
+ * no-op update, which is hardly a case worth optimizing for.
+ */
+ if (attrnum == 0)
+ {
+ modified = bms_add_member(modified,
+ attrnum -
+ FirstLowInvalidHeapAttributeNumber);
+ continue;
+ }
+
+ /*
+ * Likewise, automatically say "not equal" for any system attribute
+ * other than tableOID; we cannot expect these to be consistent in a
+ * HOT chain, or even to be set correctly yet in the new tuple.
+ */
+ if (attrnum < 0)
+ {
+ if (attrnum != TableOidAttributeNumber)
+ {
+ modified = bms_add_member(modified,
+ attrnum -
+ FirstLowInvalidHeapAttributeNumber);
+ continue;
+ }
+ }
+
+ /*
+ * Extract the corresponding values. XXX this is pretty inefficient
+ * if there are many indexed columns. Should we do a single
+ * heap_deform_tuple call on each tuple, instead? But that doesn't
+ * work for system columns ...
+ */
+ value1 = heap_getattr(oldtup, attrnum, tupdesc, &isnull1);
+ value2 = heap_getattr(newtup, attrnum, tupdesc, &isnull2);
+
+ if (!heap_attr_equals(tupdesc, attrnum, value1,
+ value2, isnull1, isnull2))
+ {
+ modified = bms_add_member(modified,
+ attrnum -
+ FirstLowInvalidHeapAttributeNumber);
+ continue;
+ }
+
+ /*
+ * No need to check attributes that can't be stored externally. Note
+ * that system attributes can't be stored externally.
+ */
+ if (attrnum < 0 || isnull1 ||
+ TupleDescAttr(tupdesc, attrnum - 1)->attlen != -1)
+ continue;
+
+ /*
+ * Check if the old tuple's attribute is stored externally and is a
+ * member of external_cols.
+ */
+ if (VARATT_IS_EXTERNAL((struct varlena *) DatumGetPointer(value1)) &&
+ bms_is_member(attrnum - FirstLowInvalidHeapAttributeNumber,
+ external_cols))
+ *has_external = true;
+ }
+
+ return modified;
+}
+
+/*
+ * simple_heap_update - replace a tuple
+ *
+ * This routine may be used to update a tuple when concurrent updates of
+ * the target tuple are not expected (for example, because we have a lock
+ * on the relation associated with the tuple). Any failure is reported
+ * via ereport().
+ */
+void
+simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
+{
+ TM_Result result;
+ TM_FailureData tmfd;
+ LockTupleMode lockmode;
+
+ result = heap_update(relation, otid, tup,
+ GetCurrentCommandId(true), InvalidSnapshot,
+ true /* wait for commit */ ,
+ &tmfd, &lockmode);
+ switch (result)
+ {
+ case TM_SelfModified:
+ /* Tuple was already updated in current command? */
+ elog(ERROR, "tuple already updated by self");
+ break;
+
+ case TM_Ok:
+ /* done successfully */
+ break;
+
+ case TM_Updated:
+ elog(ERROR, "tuple concurrently updated");
+ break;
+
+ case TM_Deleted:
+ elog(ERROR, "tuple concurrently deleted");
+ break;
+
+ default:
+ elog(ERROR, "unrecognized heap_update status: %u", result);
+ break;
+ }
+}
+
+
+/*
+ * Return the MultiXactStatus corresponding to the given tuple lock mode.
+ */
+static MultiXactStatus
+get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
+{
+ int retval;
+
+ if (is_update)
+ retval = tupleLockExtraInfo[mode].updstatus;
+ else
+ retval = tupleLockExtraInfo[mode].lockstatus;
+
+ if (retval == -1)
+ elog(ERROR, "invalid lock tuple mode %d/%s", mode,
+ is_update ? "true" : "false");
+
+ return (MultiXactStatus) retval;
+}
+
+/*
+ * heap_lock_tuple - lock a tuple in shared or exclusive mode
+ *
+ * Note that this acquires a buffer pin, which the caller must release.
+ *
+ * Input parameters:
+ * relation: relation containing tuple (caller must hold suitable lock)
+ * tid: TID of tuple to lock
+ * cid: current command ID (used for visibility test, and stored into
+ * tuple's cmax if lock is successful)
+ * mode: indicates if shared or exclusive tuple lock is desired
+ * wait_policy: what to do if tuple lock is not available
+ * follow_updates: if true, follow the update chain to also lock descendant
+ * tuples.
+ *
+ * Output parameters:
+ * *tuple: all fields filled in
+ * *buffer: set to buffer holding tuple (pinned but not locked at exit)
+ * *tmfd: filled in failure cases (see below)
+ *
+ * Function results are the same as the ones for table_tuple_lock().
+ *
+ * In the failure cases other than TM_Invisible, the routine fills
+ * *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
+ * if necessary), and t_cmax (the last only for TM_SelfModified,
+ * since we cannot obtain cmax from a combo CID generated by another
+ * transaction).
+ * See comments for struct TM_FailureData for additional info.
+ *
+ * See README.tuplock for a thorough explanation of this mechanism.
+ */
+TM_Result
+heap_lock_tuple(Relation relation, HeapTuple tuple,
+ CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
+ bool follow_updates,
+ Buffer *buffer, TM_FailureData *tmfd)
+{
+ TM_Result result;
+ ItemPointer tid = &(tuple->t_self);
+ ItemId lp;
+ Page page;
+ Buffer vmbuffer = InvalidBuffer;
+ BlockNumber block;
+ TransactionId xid,
+ xmax;
+ uint16 old_infomask,
+ new_infomask,
+ new_infomask2;
+ bool first_time = true;
+ bool skip_tuple_lock = false;
+ bool have_tuple_lock = false;
+ bool cleared_all_frozen = false;
+
+ *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
+ block = ItemPointerGetBlockNumber(tid);
+
+ /*
+ * Before locking the buffer, pin the visibility map page if it appears to
+ * be necessary. Since we haven't got the lock yet, someone else might be
+ * in the middle of changing this, so we'll need to recheck after we have
+ * the lock.
+ */
+ if (PageIsAllVisible(BufferGetPage(*buffer)))
+ visibilitymap_pin(relation, block, &vmbuffer);
+
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ page = BufferGetPage(*buffer);
+ lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
+ Assert(ItemIdIsNormal(lp));
+
+ tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
+ tuple->t_len = ItemIdGetLength(lp);
+ tuple->t_tableOid = RelationGetRelid(relation);
+
+l3:
+ result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
+
+ if (result == TM_Invisible)
+ {
+ /*
+ * This is possible, but only when locking a tuple for ON CONFLICT
+ * UPDATE. We return this value here rather than throwing an error in
+ * order to give that case the opportunity to throw a more specific
+ * error.
+ */
+ result = TM_Invisible;
+ goto out_locked;
+ }
+ else if (result == TM_BeingModified ||
+ result == TM_Updated ||
+ result == TM_Deleted)
+ {
+ TransactionId xwait;
+ uint16 infomask;
+ uint16 infomask2;
+ bool require_sleep;
+ ItemPointerData t_ctid;
+
+ /* must copy state data before unlocking buffer */
+ xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
+ infomask = tuple->t_data->t_infomask;
+ infomask2 = tuple->t_data->t_infomask2;
+ ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
+
+ LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
+
+ /*
+ * If any subtransaction of the current top transaction already holds
+ * a lock as strong as or stronger than what we're requesting, we
+ * effectively hold the desired lock already. We *must* succeed
+ * without trying to take the tuple lock, else we will deadlock
+ * against anyone wanting to acquire a stronger lock.
+ *
+ * Note we only do this the first time we loop on the HTSU result;
+ * there is no point in testing in subsequent passes, because
+ * evidently our own transaction cannot have acquired a new lock after
+ * the first time we checked.
+ */
+ if (first_time)
+ {
+ first_time = false;
+
+ if (infomask & HEAP_XMAX_IS_MULTI)
+ {
+ int i;
+ int nmembers;
+ MultiXactMember *members;
+
+ /*
+ * We don't need to allow old multixacts here; if that had
+ * been the case, HeapTupleSatisfiesUpdate would have returned
+ * MayBeUpdated and we wouldn't be here.
+ */
+ nmembers =
+ GetMultiXactIdMembers(xwait, &members, false,
+ HEAP_XMAX_IS_LOCKED_ONLY(infomask));
+
+ for (i = 0; i < nmembers; i++)
+ {
+ /* only consider members of our own transaction */
+ if (!TransactionIdIsCurrentTransactionId(members[i].xid))
+ continue;
+
+ if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
+ {
+ pfree(members);
+ result = TM_Ok;
+ goto out_unlocked;
+ }
+ else
+ {
+ /*
+ * Disable acquisition of the heavyweight tuple lock.
+ * Otherwise, when promoting a weaker lock, we might
+ * deadlock with another locker that has acquired the
+ * heavyweight tuple lock and is waiting for our
+ * transaction to finish.
+ *
+ * Note that in this case we still need to wait for
+ * the multixact if required, to avoid acquiring
+ * conflicting locks.
+ */
+ skip_tuple_lock = true;
+ }
+ }
+
+ if (members)
+ pfree(members);
+ }
+ else if (TransactionIdIsCurrentTransactionId(xwait))
+ {
+ switch (mode)
+ {
+ case LockTupleKeyShare:
+ Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
+ HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
+ HEAP_XMAX_IS_EXCL_LOCKED(infomask));
+ result = TM_Ok;
+ goto out_unlocked;
+ case LockTupleShare:
+ if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
+ HEAP_XMAX_IS_EXCL_LOCKED(infomask))
+ {
+ result = TM_Ok;
+ goto out_unlocked;
+ }
+ break;
+ case LockTupleNoKeyExclusive:
+ if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
+ {
+ result = TM_Ok;
+ goto out_unlocked;
+ }
+ break;
+ case LockTupleExclusive:
+ if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
+ infomask2 & HEAP_KEYS_UPDATED)
+ {
+ result = TM_Ok;
+ goto out_unlocked;
+ }
+ break;
+ }
+ }
+ }
+
+ /*
+ * Initially assume that we will have to wait for the locking
+ * transaction(s) to finish. We check various cases below in which
+ * this can be turned off.
+ */
+ require_sleep = true;
+ if (mode == LockTupleKeyShare)
+ {
+ /*
+ * If we're requesting KeyShare, and there's no update present, we
+ * don't need to wait. Even if there is an update, we can still
+ * continue if the key hasn't been modified.
+ *
+ * However, if there are updates, we need to walk the update chain
+ * to mark future versions of the row as locked, too. That way,
+ * if somebody deletes that future version, we're protected
+ * against the key going away. This locking of future versions
+ * could block momentarily, if a concurrent transaction is
+ * deleting a key; or it could return a value to the effect that
+ * the transaction deleting the key has already committed. So we
+ * do this before re-locking the buffer; otherwise this would be
+ * prone to deadlocks.
+ *
+ * Note that the TID we're locking was grabbed before we unlocked
+ * the buffer. For it to change while we're not looking, the
+ * other properties we're testing for below after re-locking the
+ * buffer would also change, in which case we would restart this
+ * loop above.
+ */
+ if (!(infomask2 & HEAP_KEYS_UPDATED))
+ {
+ bool updated;
+
+ updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
+
+ /*
+ * If there are updates, follow the update chain; bail out if
+ * that cannot be done.
+ */
+ if (follow_updates && updated)
+ {
+ TM_Result res;
+
+ res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
+ GetCurrentTransactionId(),
+ mode);
+ if (res != TM_Ok)
+ {
+ result = res;
+ /* recovery code expects to have buffer lock held */
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ goto failed;
+ }
+ }
+
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * Make sure it's still an appropriate lock, else start over.
+ * Also, if it wasn't updated before we released the lock, but
+ * is updated now, we start over too; the reason is that we
+ * now need to follow the update chain to lock the new
+ * versions.
+ */
+ if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
+ ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
+ !updated))
+ goto l3;
+
+ /* Things look okay, so we can skip sleeping */
+ require_sleep = false;
+
+ /*
+ * Note we allow Xmax to change here; other updaters/lockers
+ * could have modified it before we grabbed the buffer lock.
+ * However, this is not a problem, because with the recheck we
+ * just did we ensure that they still don't conflict with the
+ * lock we want.
+ */
+ }
+ }
+ else if (mode == LockTupleShare)
+ {
+ /*
+ * If we're requesting Share, we can similarly avoid sleeping if
+ * there's no update and no exclusive lock present.
+ */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
+ !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
+ {
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * Make sure it's still an appropriate lock, else start over.
+ * See above about allowing xmax to change.
+ */
+ if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
+ HEAP_XMAX_IS_EXCL_LOCKED(tuple->t_data->t_infomask))
+ goto l3;
+ require_sleep = false;
+ }
+ }
+ else if (mode == LockTupleNoKeyExclusive)
+ {
+ /*
+ * If we're requesting NoKeyExclusive, we might also be able to
+ * avoid sleeping; just ensure that there no conflicting lock
+ * already acquired.
+ */
+ if (infomask & HEAP_XMAX_IS_MULTI)
+ {
+ if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
+ mode, NULL))
+ {
+ /*
+ * No conflict, but if the xmax changed under us in the
+ * meantime, start over.
+ */
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
+ goto l3;
+
+ /* otherwise, we're good */
+ require_sleep = false;
+ }
+ }
+ else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
+ {
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ /* if the xmax changed in the meantime, start over */
+ if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
+ goto l3;
+ /* otherwise, we're good */
+ require_sleep = false;
+ }
+ }
+
+ /*
+ * As a check independent from those above, we can also avoid sleeping
+ * if the current transaction is the sole locker of the tuple. Note
+ * that the strength of the lock already held is irrelevant; this is
+ * not about recording the lock in Xmax (which will be done regardless
+ * of this optimization, below). Also, note that the cases where we
+ * hold a lock stronger than we are requesting are already handled
+ * above by not doing anything.
+ *
+ * Note we only deal with the non-multixact case here; MultiXactIdWait
+ * is well equipped to deal with this situation on its own.
+ */
+ if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
+ TransactionIdIsCurrentTransactionId(xwait))
+ {
+ /* ... but if the xmax changed in the meantime, start over */
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
+ goto l3;
+ Assert(HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask));
+ require_sleep = false;
+ }
+
+ /*
+ * Time to sleep on the other transaction/multixact, if necessary.
+ *
+ * If the other transaction is an update/delete that's already
+ * committed, then sleeping cannot possibly do any good: if we're
+ * required to sleep, get out to raise an error instead.
+ *
+ * By here, we either have already acquired the buffer exclusive lock,
+ * or we must wait for the locking transaction or multixact; so below
+ * we ensure that we grab buffer lock after the sleep.
+ */
+ if (require_sleep && (result == TM_Updated || result == TM_Deleted))
+ {
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ goto failed;
+ }
+ else if (require_sleep)
+ {
+ /*
+ * Acquire tuple lock to establish our priority for the tuple, or
+ * die trying. LockTuple will release us when we are next-in-line
+ * for the tuple. We must do this even if we are share-locking,
+ * but not if we already have a weaker lock on the tuple.
+ *
+ * If we are forced to "start over" below, we keep the tuple lock;
+ * this arranges that we stay at the head of the line while
+ * rechecking tuple state.
+ */
+ if (!skip_tuple_lock &&
+ !heap_acquire_tuplock(relation, tid, mode, wait_policy,
+ &have_tuple_lock))
+ {
+ /*
+ * This can only happen if wait_policy is Skip and the lock
+ * couldn't be obtained.
+ */
+ result = TM_WouldBlock;
+ /* recovery code expects to have buffer lock held */
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ goto failed;
+ }
+
+ if (infomask & HEAP_XMAX_IS_MULTI)
+ {
+ MultiXactStatus status = get_mxact_status_for_lock(mode, false);
+
+ /* We only ever lock tuples, never update them */
+ if (status >= MultiXactStatusNoKeyUpdate)
+ elog(ERROR, "invalid lock mode in heap_lock_tuple");
+
+ /* wait for multixact to end, or die trying */
+ switch (wait_policy)
+ {
+ case LockWaitBlock:
+ MultiXactIdWait((MultiXactId) xwait, status, infomask,
+ relation, &tuple->t_self, XLTW_Lock, NULL);
+ break;
+ case LockWaitSkip:
+ if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
+ status, infomask, relation,
+ NULL))
+ {
+ result = TM_WouldBlock;
+ /* recovery code expects to have buffer lock held */
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ goto failed;
+ }
+ break;
+ case LockWaitError:
+ if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
+ status, infomask, relation,
+ NULL))
+ ereport(ERROR,
+ (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
+ errmsg("could not obtain lock on row in relation \"%s\"",
+ RelationGetRelationName(relation))));
+
+ break;
+ }
+
+ /*
+ * Of course, the multixact might not be done here: if we're
+ * requesting a light lock mode, other transactions with light
+ * locks could still be alive, as well as locks owned by our
+ * own xact or other subxacts of this backend. We need to
+ * preserve the surviving MultiXact members. Note that it
+ * isn't absolutely necessary in the latter case, but doing so
+ * is simpler.
+ */
+ }
+ else
+ {
+ /* wait for regular transaction to end, or die trying */
+ switch (wait_policy)
+ {
+ case LockWaitBlock:
+ XactLockTableWait(xwait, relation, &tuple->t_self,
+ XLTW_Lock);
+ break;
+ case LockWaitSkip:
+ if (!ConditionalXactLockTableWait(xwait))
+ {
+ result = TM_WouldBlock;
+ /* recovery code expects to have buffer lock held */
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ goto failed;
+ }
+ break;
+ case LockWaitError:
+ if (!ConditionalXactLockTableWait(xwait))
+ ereport(ERROR,
+ (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
+ errmsg("could not obtain lock on row in relation \"%s\"",
+ RelationGetRelationName(relation))));
+ break;
+ }
+ }
+
+ /* if there are updates, follow the update chain */
+ if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
+ {
+ TM_Result res;
+
+ res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
+ GetCurrentTransactionId(),
+ mode);
+ if (res != TM_Ok)
+ {
+ result = res;
+ /* recovery code expects to have buffer lock held */
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ goto failed;
+ }
+ }
+
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * xwait is done, but if xwait had just locked the tuple then some
+ * other xact could update this tuple before we get to this point.
+ * Check for xmax change, and start over if so.
+ */
+ if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
+ goto l3;
+
+ if (!(infomask & HEAP_XMAX_IS_MULTI))
+ {
+ /*
+ * Otherwise check if it committed or aborted. Note we cannot
+ * be here if the tuple was only locked by somebody who didn't
+ * conflict with us; that would have been handled above. So
+ * that transaction must necessarily be gone by now. But
+ * don't check for this in the multixact case, because some
+ * locker transactions might still be running.
+ */
+ UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
+ }
+ }
+
+ /* By here, we're certain that we hold buffer exclusive lock again */
+
+ /*
+ * We may lock if previous xmax aborted, or if it committed but only
+ * locked the tuple without updating it; or if we didn't have to wait
+ * at all for whatever reason.
+ */
+ if (!require_sleep ||
+ (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
+ HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
+ HeapTupleHeaderIsOnlyLocked(tuple->t_data))
+ result = TM_Ok;
+ else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
+ result = TM_Updated;
+ else
+ result = TM_Deleted;
+ }
+
+failed:
+ if (result != TM_Ok)
+ {
+ Assert(result == TM_SelfModified || result == TM_Updated ||
+ result == TM_Deleted || result == TM_WouldBlock);
+
+ /*
+ * When locking a tuple under LockWaitSkip semantics and we fail with
+ * TM_WouldBlock above, it's possible for concurrent transactions to
+ * release the lock and set HEAP_XMAX_INVALID in the meantime. So
+ * this assert is slightly different from the equivalent one in
+ * heap_delete and heap_update.
+ */
+ Assert((result == TM_WouldBlock) ||
+ !(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
+ Assert(result != TM_Updated ||
+ !ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid));
+ tmfd->ctid = tuple->t_data->t_ctid;
+ tmfd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
+ if (result == TM_SelfModified)
+ tmfd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
+ else
+ tmfd->cmax = InvalidCommandId;
+ goto out_locked;
+ }
+
+ /*
+ * If we didn't pin the visibility map page and the page has become all
+ * visible while we were busy locking the buffer, or during some
+ * subsequent window during which we had it unlocked, we'll have to unlock
+ * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
+ * unfortunate, especially since we'll now have to recheck whether the
+ * tuple has been locked or updated under us, but hopefully it won't
+ * happen very often.
+ */
+ if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
+ {
+ LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
+ visibilitymap_pin(relation, block, &vmbuffer);
+ LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+ goto l3;
+ }
+
+ xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
+ old_infomask = tuple->t_data->t_infomask;
+
+ /*
+ * If this is the first possibly-multixact-able operation in the current
+ * transaction, set my per-backend OldestMemberMXactId setting. We can be
+ * certain that the transaction will never become a member of any older
+ * MultiXactIds than that. (We have to do this even if we end up just
+ * using our own TransactionId below, since some other backend could
+ * incorporate our XID into a MultiXact immediately afterwards.)
+ */
+ MultiXactIdSetOldestMember();
+
+ /*
+ * Compute the new xmax and infomask to store into the tuple. Note we do
+ * not modify the tuple just yet, because that would leave it in the wrong
+ * state if multixact.c elogs.
+ */
+ compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
+ GetCurrentTransactionId(), mode, false,
+ &xid, &new_infomask, &new_infomask2);
+
+ START_CRIT_SECTION();
+
+ /*
+ * Store transaction information of xact locking the tuple.
+ *
+ * Note: Cmax is meaningless in this context, so don't set it; this avoids
+ * possibly generating a useless combo CID. Moreover, if we're locking a
+ * previously updated tuple, it's important to preserve the Cmax.
+ *
+ * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
+ * we would break the HOT chain.
+ */
+ tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
+ tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+ tuple->t_data->t_infomask |= new_infomask;
+ tuple->t_data->t_infomask2 |= new_infomask2;
+ if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
+ HeapTupleHeaderClearHotUpdated(tuple->t_data);
+ HeapTupleHeaderSetXmax(tuple->t_data, xid);
+
+ /*
+ * Make sure there is no forward chain link in t_ctid. Note that in the
+ * cases where the tuple has been updated, we must not overwrite t_ctid,
+ * because it was set by the updater. Moreover, if the tuple has been
+ * updated, we need to follow the update chain to lock the new versions of
+ * the tuple as well.
+ */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
+ tuple->t_data->t_ctid = *tid;
+
+ /* Clear only the all-frozen bit on visibility map if needed */
+ if (PageIsAllVisible(page) &&
+ visibilitymap_clear(relation, block, vmbuffer,
+ VISIBILITYMAP_ALL_FROZEN))
+ cleared_all_frozen = true;
+
+
+ MarkBufferDirty(*buffer);
+
+ /*
+ * XLOG stuff. You might think that we don't need an XLOG record because
+ * there is no state change worth restoring after a crash. You would be
+ * wrong however: we have just written either a TransactionId or a
+ * MultiXactId that may never have been seen on disk before, and we need
+ * to make sure that there are XLOG entries covering those ID numbers.
+ * Else the same IDs might be re-used after a crash, which would be
+ * disastrous if this page made it to disk before the crash. Essentially
+ * we have to enforce the WAL log-before-data rule even in this case.
+ * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
+ * entries for everything anyway.)
+ */
+ if (RelationNeedsWAL(relation))
+ {
+ xl_heap_lock xlrec;
+ XLogRecPtr recptr;
+
+ XLogBeginInsert();
+ XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
+
+ xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
+ xlrec.locking_xid = xid;
+ xlrec.infobits_set = compute_infobits(new_infomask,
+ tuple->t_data->t_infomask2);
+ xlrec.flags = cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
+ XLogRegisterData((char *) &xlrec, SizeOfHeapLock);
+
+ /* we don't decode row locks atm, so no need to log the origin */
+
+ recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
+
+ PageSetLSN(page, recptr);
+ }
+
+ END_CRIT_SECTION();
+
+ result = TM_Ok;
+
+out_locked:
+ LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
+
+out_unlocked:
+ if (BufferIsValid(vmbuffer))
+ ReleaseBuffer(vmbuffer);
+
+ /*
+ * Don't update the visibility map here. Locking a tuple doesn't change
+ * visibility info.
+ */
+
+ /*
+ * Now that we have successfully marked the tuple as locked, we can
+ * release the lmgr tuple lock, if we had it.
+ */
+ if (have_tuple_lock)
+ UnlockTupleTuplock(relation, tid, mode);
+
+ return result;
+}
+
+/*
+ * Acquire heavyweight lock on the given tuple, in preparation for acquiring
+ * its normal, Xmax-based tuple lock.
+ *
+ * have_tuple_lock is an input and output parameter: on input, it indicates
+ * whether the lock has previously been acquired (and this function does
+ * nothing in that case). If this function returns success, have_tuple_lock
+ * has been flipped to true.
+ *
+ * Returns false if it was unable to obtain the lock; this can only happen if
+ * wait_policy is Skip.
+ */
+static bool
+heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
+ LockWaitPolicy wait_policy, bool *have_tuple_lock)
+{
+ if (*have_tuple_lock)
+ return true;
+
+ switch (wait_policy)
+ {
+ case LockWaitBlock:
+ LockTupleTuplock(relation, tid, mode);
+ break;
+
+ case LockWaitSkip:
+ if (!ConditionalLockTupleTuplock(relation, tid, mode))
+ return false;
+ break;
+
+ case LockWaitError:
+ if (!ConditionalLockTupleTuplock(relation, tid, mode))
+ ereport(ERROR,
+ (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
+ errmsg("could not obtain lock on row in relation \"%s\"",
+ RelationGetRelationName(relation))));
+ break;
+ }
+ *have_tuple_lock = true;
+
+ return true;
+}
+
+/*
+ * Given an original set of Xmax and infomask, and a transaction (identified by
+ * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
+ * corresponding infomasks to use on the tuple.
+ *
+ * Note that this might have side effects such as creating a new MultiXactId.
+ *
+ * Most callers will have called HeapTupleSatisfiesUpdate before this function;
+ * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
+ * but it was not running anymore. There is a race condition, which is that the
+ * MultiXactId may have finished since then, but that uncommon case is handled
+ * either here, or within MultiXactIdExpand.
+ *
+ * There is a similar race condition possible when the old xmax was a regular
+ * TransactionId. We test TransactionIdIsInProgress again just to narrow the
+ * window, but it's still possible to end up creating an unnecessary
+ * MultiXactId. Fortunately this is harmless.
+ */
+static void
+compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
+ uint16 old_infomask2, TransactionId add_to_xmax,
+ LockTupleMode mode, bool is_update,
+ TransactionId *result_xmax, uint16 *result_infomask,
+ uint16 *result_infomask2)
+{
+ TransactionId new_xmax;
+ uint16 new_infomask,
+ new_infomask2;
+
+ Assert(TransactionIdIsCurrentTransactionId(add_to_xmax));
+
+l5:
+ new_infomask = 0;
+ new_infomask2 = 0;
+ if (old_infomask & HEAP_XMAX_INVALID)
+ {
+ /*
+ * No previous locker; we just insert our own TransactionId.
+ *
+ * Note that it's critical that this case be the first one checked,
+ * because there are several blocks below that come back to this one
+ * to implement certain optimizations; old_infomask might contain
+ * other dirty bits in those cases, but we don't really care.
+ */
+ if (is_update)
+ {
+ new_xmax = add_to_xmax;
+ if (mode == LockTupleExclusive)
+ new_infomask2 |= HEAP_KEYS_UPDATED;
+ }
+ else
+ {
+ new_infomask |= HEAP_XMAX_LOCK_ONLY;
+ switch (mode)
+ {
+ case LockTupleKeyShare:
+ new_xmax = add_to_xmax;
+ new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
+ break;
+ case LockTupleShare:
+ new_xmax = add_to_xmax;
+ new_infomask |= HEAP_XMAX_SHR_LOCK;
+ break;
+ case LockTupleNoKeyExclusive:
+ new_xmax = add_to_xmax;
+ new_infomask |= HEAP_XMAX_EXCL_LOCK;
+ break;
+ case LockTupleExclusive:
+ new_xmax = add_to_xmax;
+ new_infomask |= HEAP_XMAX_EXCL_LOCK;
+ new_infomask2 |= HEAP_KEYS_UPDATED;
+ break;
+ default:
+ new_xmax = InvalidTransactionId; /* silence compiler */
+ elog(ERROR, "invalid lock mode");
+ }
+ }
+ }
+ else if (old_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ MultiXactStatus new_status;
+
+ /*
+ * Currently we don't allow XMAX_COMMITTED to be set for multis, so
+ * cross-check.
+ */
+ Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
+
+ /*
+ * A multixact together with LOCK_ONLY set but neither lock bit set
+ * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
+ * anymore. This check is critical for databases upgraded by
+ * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
+ * that such multis are never passed.
+ */
+ if (HEAP_LOCKED_UPGRADED(old_infomask))
+ {
+ old_infomask &= ~HEAP_XMAX_IS_MULTI;
+ old_infomask |= HEAP_XMAX_INVALID;
+ goto l5;
+ }
+
+ /*
+ * If the XMAX is already a MultiXactId, then we need to expand it to
+ * include add_to_xmax; but if all the members were lockers and are
+ * all gone, we can do away with the IS_MULTI bit and just set
+ * add_to_xmax as the only locker/updater. If all lockers are gone
+ * and we have an updater that aborted, we can also do without a
+ * multi.
+ *
+ * The cost of doing GetMultiXactIdMembers would be paid by
+ * MultiXactIdExpand if we weren't to do this, so this check is not
+ * incurring extra work anyhow.
+ */
+ if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
+ {
+ if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
+ !TransactionIdDidCommit(MultiXactIdGetUpdateXid(xmax,
+ old_infomask)))
+ {
+ /*
+ * Reset these bits and restart; otherwise fall through to
+ * create a new multi below.
+ */
+ old_infomask &= ~HEAP_XMAX_IS_MULTI;
+ old_infomask |= HEAP_XMAX_INVALID;
+ goto l5;
+ }
+ }
+
+ new_status = get_mxact_status_for_lock(mode, is_update);
+
+ new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
+ new_status);
+ GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
+ }
+ else if (old_infomask & HEAP_XMAX_COMMITTED)
+ {
+ /*
+ * It's a committed update, so we need to preserve him as updater of
+ * the tuple.
+ */
+ MultiXactStatus status;
+ MultiXactStatus new_status;
+
+ if (old_infomask2 & HEAP_KEYS_UPDATED)
+ status = MultiXactStatusUpdate;
+ else
+ status = MultiXactStatusNoKeyUpdate;
+
+ new_status = get_mxact_status_for_lock(mode, is_update);
+
+ /*
+ * since it's not running, it's obviously impossible for the old
+ * updater to be identical to the current one, so we need not check
+ * for that case as we do in the block above.
+ */
+ new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
+ GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
+ }
+ else if (TransactionIdIsInProgress(xmax))
+ {
+ /*
+ * If the XMAX is a valid, in-progress TransactionId, then we need to
+ * create a new MultiXactId that includes both the old locker or
+ * updater and our own TransactionId.
+ */
+ MultiXactStatus new_status;
+ MultiXactStatus old_status;
+ LockTupleMode old_mode;
+
+ if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
+ {
+ if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
+ old_status = MultiXactStatusForKeyShare;
+ else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
+ old_status = MultiXactStatusForShare;
+ else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
+ {
+ if (old_infomask2 & HEAP_KEYS_UPDATED)
+ old_status = MultiXactStatusForUpdate;
+ else
+ old_status = MultiXactStatusForNoKeyUpdate;
+ }
+ else
+ {
+ /*
+ * LOCK_ONLY can be present alone only when a page has been
+ * upgraded by pg_upgrade. But in that case,
+ * TransactionIdIsInProgress() should have returned false. We
+ * assume it's no longer locked in this case.
+ */
+ elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
+ old_infomask |= HEAP_XMAX_INVALID;
+ old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
+ goto l5;
+ }
+ }
+ else
+ {
+ /* it's an update, but which kind? */
+ if (old_infomask2 & HEAP_KEYS_UPDATED)
+ old_status = MultiXactStatusUpdate;
+ else
+ old_status = MultiXactStatusNoKeyUpdate;
+ }
+
+ old_mode = TUPLOCK_from_mxstatus(old_status);
+
+ /*
+ * If the lock to be acquired is for the same TransactionId as the
+ * existing lock, there's an optimization possible: consider only the
+ * strongest of both locks as the only one present, and restart.
+ */
+ if (xmax == add_to_xmax)
+ {
+ /*
+ * Note that it's not possible for the original tuple to be
+ * updated: we wouldn't be here because the tuple would have been
+ * invisible and we wouldn't try to update it. As a subtlety,
+ * this code can also run when traversing an update chain to lock
+ * future versions of a tuple. But we wouldn't be here either,
+ * because the add_to_xmax would be different from the original
+ * updater.
+ */
+ Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
+
+ /* acquire the strongest of both */
+ if (mode < old_mode)
+ mode = old_mode;
+ /* mustn't touch is_update */
+
+ old_infomask |= HEAP_XMAX_INVALID;
+ goto l5;
+ }
+
+ /* otherwise, just fall back to creating a new multixact */
+ new_status = get_mxact_status_for_lock(mode, is_update);
+ new_xmax = MultiXactIdCreate(xmax, old_status,
+ add_to_xmax, new_status);
+ GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
+ }
+ else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
+ TransactionIdDidCommit(xmax))
+ {
+ /*
+ * It's a committed update, so we gotta preserve him as updater of the
+ * tuple.
+ */
+ MultiXactStatus status;
+ MultiXactStatus new_status;
+
+ if (old_infomask2 & HEAP_KEYS_UPDATED)
+ status = MultiXactStatusUpdate;
+ else
+ status = MultiXactStatusNoKeyUpdate;
+
+ new_status = get_mxact_status_for_lock(mode, is_update);
+
+ /*
+ * since it's not running, it's obviously impossible for the old
+ * updater to be identical to the current one, so we need not check
+ * for that case as we do in the block above.
+ */
+ new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
+ GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
+ }
+ else
+ {
+ /*
+ * Can get here iff the locking/updating transaction was running when
+ * the infomask was extracted from the tuple, but finished before
+ * TransactionIdIsInProgress got to run. Deal with it as if there was
+ * no locker at all in the first place.
+ */
+ old_infomask |= HEAP_XMAX_INVALID;
+ goto l5;
+ }
+
+ *result_infomask = new_infomask;
+ *result_infomask2 = new_infomask2;
+ *result_xmax = new_xmax;
+}
+
+/*
+ * Subroutine for heap_lock_updated_tuple_rec.
+ *
+ * Given a hypothetical multixact status held by the transaction identified
+ * with the given xid, does the current transaction need to wait, fail, or can
+ * it continue if it wanted to acquire a lock of the given mode? "needwait"
+ * is set to true if waiting is necessary; if it can continue, then TM_Ok is
+ * returned. If the lock is already held by the current transaction, return
+ * TM_SelfModified. In case of a conflict with another transaction, a
+ * different HeapTupleSatisfiesUpdate return code is returned.
+ *
+ * The held status is said to be hypothetical because it might correspond to a
+ * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
+ * way for simplicity of API.
+ */
+static TM_Result
+test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
+ LockTupleMode mode, HeapTuple tup,
+ bool *needwait)
+{
+ MultiXactStatus wantedstatus;
+
+ *needwait = false;
+ wantedstatus = get_mxact_status_for_lock(mode, false);
+
+ /*
+ * Note: we *must* check TransactionIdIsInProgress before
+ * TransactionIdDidAbort/Commit; see comment at top of heapam_visibility.c
+ * for an explanation.
+ */
+ if (TransactionIdIsCurrentTransactionId(xid))
+ {
+ /*
+ * The tuple has already been locked by our own transaction. This is
+ * very rare but can happen if multiple transactions are trying to
+ * lock an ancient version of the same tuple.
+ */
+ return TM_SelfModified;
+ }
+ else if (TransactionIdIsInProgress(xid))
+ {
+ /*
+ * If the locking transaction is running, what we do depends on
+ * whether the lock modes conflict: if they do, then we must wait for
+ * it to finish; otherwise we can fall through to lock this tuple
+ * version without waiting.
+ */
+ if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
+ LOCKMODE_from_mxstatus(wantedstatus)))
+ {
+ *needwait = true;
+ }
+
+ /*
+ * If we set needwait above, then this value doesn't matter;
+ * otherwise, this value signals to caller that it's okay to proceed.
+ */
+ return TM_Ok;
+ }
+ else if (TransactionIdDidAbort(xid))
+ return TM_Ok;
+ else if (TransactionIdDidCommit(xid))
+ {
+ /*
+ * The other transaction committed. If it was only a locker, then the
+ * lock is completely gone now and we can return success; but if it
+ * was an update, then what we do depends on whether the two lock
+ * modes conflict. If they conflict, then we must report error to
+ * caller. But if they don't, we can fall through to allow the current
+ * transaction to lock the tuple.
+ *
+ * Note: the reason we worry about ISUPDATE here is because as soon as
+ * a transaction ends, all its locks are gone and meaningless, and
+ * thus we can ignore them; whereas its updates persist. In the
+ * TransactionIdIsInProgress case, above, we don't need to check
+ * because we know the lock is still "alive" and thus a conflict needs
+ * always be checked.
+ */
+ if (!ISUPDATE_from_mxstatus(status))
+ return TM_Ok;
+
+ if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
+ LOCKMODE_from_mxstatus(wantedstatus)))
+ {
+ /* bummer */
+ if (!ItemPointerEquals(&tup->t_self, &tup->t_data->t_ctid))
+ return TM_Updated;
+ else
+ return TM_Deleted;
+ }
+
+ return TM_Ok;
+ }
+
+ /* Not in progress, not aborted, not committed -- must have crashed */
+ return TM_Ok;
+}
+
+
+/*
+ * Recursive part of heap_lock_updated_tuple
+ *
+ * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
+ * xid with the given mode; if this tuple is updated, recurse to lock the new
+ * version as well.
+ */
+static TM_Result
+heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
+ LockTupleMode mode)
+{
+ TM_Result result;
+ ItemPointerData tupid;
+ HeapTupleData mytup;
+ Buffer buf;
+ uint16 new_infomask,
+ new_infomask2,
+ old_infomask,
+ old_infomask2;
+ TransactionId xmax,
+ new_xmax;
+ TransactionId priorXmax = InvalidTransactionId;
+ bool cleared_all_frozen = false;
+ bool pinned_desired_page;
+ Buffer vmbuffer = InvalidBuffer;
+ BlockNumber block;
+
+ ItemPointerCopy(tid, &tupid);
+
+ for (;;)
+ {
+ new_infomask = 0;
+ new_xmax = InvalidTransactionId;
+ block = ItemPointerGetBlockNumber(&tupid);
+ ItemPointerCopy(&tupid, &(mytup.t_self));
+
+ if (!heap_fetch(rel, SnapshotAny, &mytup, &buf))
+ {
+ /*
+ * if we fail to find the updated version of the tuple, it's
+ * because it was vacuumed/pruned away after its creator
+ * transaction aborted. So behave as if we got to the end of the
+ * chain, and there's no further tuple to lock: return success to
+ * caller.
+ */
+ result = TM_Ok;
+ goto out_unlocked;
+ }
+
+l4:
+ CHECK_FOR_INTERRUPTS();
+
+ /*
+ * Before locking the buffer, pin the visibility map page if it
+ * appears to be necessary. Since we haven't got the lock yet,
+ * someone else might be in the middle of changing this, so we'll need
+ * to recheck after we have the lock.
+ */
+ if (PageIsAllVisible(BufferGetPage(buf)))
+ {
+ visibilitymap_pin(rel, block, &vmbuffer);
+ pinned_desired_page = true;
+ }
+ else
+ pinned_desired_page = false;
+
+ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * If we didn't pin the visibility map page and the page has become
+ * all visible while we were busy locking the buffer, we'll have to
+ * unlock and re-lock, to avoid holding the buffer lock across I/O.
+ * That's a bit unfortunate, but hopefully shouldn't happen often.
+ *
+ * Note: in some paths through this function, we will reach here
+ * holding a pin on a vm page that may or may not be the one matching
+ * this page. If this page isn't all-visible, we won't use the vm
+ * page, but we hold onto such a pin till the end of the function.
+ */
+ if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
+ {
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+ visibilitymap_pin(rel, block, &vmbuffer);
+ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+ }
+
+ /*
+ * Check the tuple XMIN against prior XMAX, if any. If we reached the
+ * end of the chain, we're done, so return success.
+ */
+ if (TransactionIdIsValid(priorXmax) &&
+ !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
+ priorXmax))
+ {
+ result = TM_Ok;
+ goto out_locked;
+ }
+
+ /*
+ * Also check Xmin: if this tuple was created by an aborted
+ * (sub)transaction, then we already locked the last live one in the
+ * chain, thus we're done, so return success.
+ */
+ if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
+ {
+ result = TM_Ok;
+ goto out_locked;
+ }
+
+ old_infomask = mytup.t_data->t_infomask;
+ old_infomask2 = mytup.t_data->t_infomask2;
+ xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
+
+ /*
+ * If this tuple version has been updated or locked by some concurrent
+ * transaction(s), what we do depends on whether our lock mode
+ * conflicts with what those other transactions hold, and also on the
+ * status of them.
+ */
+ if (!(old_infomask & HEAP_XMAX_INVALID))
+ {
+ TransactionId rawxmax;
+ bool needwait;
+
+ rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
+ if (old_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ int nmembers;
+ int i;
+ MultiXactMember *members;
+
+ /*
+ * We don't need a test for pg_upgrade'd tuples: this is only
+ * applied to tuples after the first in an update chain. Said
+ * first tuple in the chain may well be locked-in-9.2-and-
+ * pg_upgraded, but that one was already locked by our caller,
+ * not us; and any subsequent ones cannot be because our
+ * caller must necessarily have obtained a snapshot later than
+ * the pg_upgrade itself.
+ */
+ Assert(!HEAP_LOCKED_UPGRADED(mytup.t_data->t_infomask));
+
+ nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
+ HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
+ for (i = 0; i < nmembers; i++)
+ {
+ result = test_lockmode_for_conflict(members[i].status,
+ members[i].xid,
+ mode,
+ &mytup,
+ &needwait);
+
+ /*
+ * If the tuple was already locked by ourselves in a
+ * previous iteration of this (say heap_lock_tuple was
+ * forced to restart the locking loop because of a change
+ * in xmax), then we hold the lock already on this tuple
+ * version and we don't need to do anything; and this is
+ * not an error condition either. We just need to skip
+ * this tuple and continue locking the next version in the
+ * update chain.
+ */
+ if (result == TM_SelfModified)
+ {
+ pfree(members);
+ goto next;
+ }
+
+ if (needwait)
+ {
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+ XactLockTableWait(members[i].xid, rel,
+ &mytup.t_self,
+ XLTW_LockUpdated);
+ pfree(members);
+ goto l4;
+ }
+ if (result != TM_Ok)
+ {
+ pfree(members);
+ goto out_locked;
+ }
+ }
+ if (members)
+ pfree(members);
+ }
+ else
+ {
+ MultiXactStatus status;
+
+ /*
+ * For a non-multi Xmax, we first need to compute the
+ * corresponding MultiXactStatus by using the infomask bits.
+ */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
+ {
+ if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
+ status = MultiXactStatusForKeyShare;
+ else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
+ status = MultiXactStatusForShare;
+ else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
+ {
+ if (old_infomask2 & HEAP_KEYS_UPDATED)
+ status = MultiXactStatusForUpdate;
+ else
+ status = MultiXactStatusForNoKeyUpdate;
+ }
+ else
+ {
+ /*
+ * LOCK_ONLY present alone (a pg_upgraded tuple marked
+ * as share-locked in the old cluster) shouldn't be
+ * seen in the middle of an update chain.
+ */
+ elog(ERROR, "invalid lock status in tuple");
+ }
+ }
+ else
+ {
+ /* it's an update, but which kind? */
+ if (old_infomask2 & HEAP_KEYS_UPDATED)
+ status = MultiXactStatusUpdate;
+ else
+ status = MultiXactStatusNoKeyUpdate;
+ }
+
+ result = test_lockmode_for_conflict(status, rawxmax, mode,
+ &mytup, &needwait);
+
+ /*
+ * If the tuple was already locked by ourselves in a previous
+ * iteration of this (say heap_lock_tuple was forced to
+ * restart the locking loop because of a change in xmax), then
+ * we hold the lock already on this tuple version and we don't
+ * need to do anything; and this is not an error condition
+ * either. We just need to skip this tuple and continue
+ * locking the next version in the update chain.
+ */
+ if (result == TM_SelfModified)
+ goto next;
+
+ if (needwait)
+ {
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+ XactLockTableWait(rawxmax, rel, &mytup.t_self,
+ XLTW_LockUpdated);
+ goto l4;
+ }
+ if (result != TM_Ok)
+ {
+ goto out_locked;
+ }
+ }
+ }
+
+ /* compute the new Xmax and infomask values for the tuple ... */
+ compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
+ xid, mode, false,
+ &new_xmax, &new_infomask, &new_infomask2);
+
+ if (PageIsAllVisible(BufferGetPage(buf)) &&
+ visibilitymap_clear(rel, block, vmbuffer,
+ VISIBILITYMAP_ALL_FROZEN))
+ cleared_all_frozen = true;
+
+ START_CRIT_SECTION();
+
+ /* ... and set them */
+ HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
+ mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
+ mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+ mytup.t_data->t_infomask |= new_infomask;
+ mytup.t_data->t_infomask2 |= new_infomask2;
+
+ MarkBufferDirty(buf);
+
+ /* XLOG stuff */
+ if (RelationNeedsWAL(rel))
+ {
+ xl_heap_lock_updated xlrec;
+ XLogRecPtr recptr;
+ Page page = BufferGetPage(buf);
+
+ XLogBeginInsert();
+ XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
+
+ xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
+ xlrec.xmax = new_xmax;
+ xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
+ xlrec.flags =
+ cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
+
+ XLogRegisterData((char *) &xlrec, SizeOfHeapLockUpdated);
+
+ recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED);
+
+ PageSetLSN(page, recptr);
+ }
+
+ END_CRIT_SECTION();
+
+next:
+ /* if we find the end of update chain, we're done. */
+ if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
+ HeapTupleHeaderIndicatesMovedPartitions(mytup.t_data) ||
+ ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
+ HeapTupleHeaderIsOnlyLocked(mytup.t_data))
+ {
+ result = TM_Ok;
+ goto out_locked;
+ }
+
+ /* tail recursion */
+ priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
+ ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
+ UnlockReleaseBuffer(buf);
+ }
+
+ result = TM_Ok;
+
+out_locked:
+ UnlockReleaseBuffer(buf);
+
+out_unlocked:
+ if (vmbuffer != InvalidBuffer)
+ ReleaseBuffer(vmbuffer);
+
+ return result;
+}
+
+/*
+ * heap_lock_updated_tuple
+ * Follow update chain when locking an updated tuple, acquiring locks (row
+ * marks) on the updated versions.
+ *
+ * The initial tuple is assumed to be already locked.
+ *
+ * This function doesn't check visibility, it just unconditionally marks the
+ * tuple(s) as locked. If any tuple in the updated chain is being deleted
+ * concurrently (or updated with the key being modified), sleep until the
+ * transaction doing it is finished.
+ *
+ * Note that we don't acquire heavyweight tuple locks on the tuples we walk
+ * when we have to wait for other transactions to release them, as opposed to
+ * what heap_lock_tuple does. The reason is that having more than one
+ * transaction walking the chain is probably uncommon enough that risk of
+ * starvation is not likely: one of the preconditions for being here is that
+ * the snapshot in use predates the update that created this tuple (because we
+ * started at an earlier version of the tuple), but at the same time such a
+ * transaction cannot be using repeatable read or serializable isolation
+ * levels, because that would lead to a serializability failure.
+ */
+static TM_Result
+heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
+ TransactionId xid, LockTupleMode mode)
+{
+ /*
+ * If the tuple has not been updated, or has moved into another partition
+ * (effectively a delete) stop here.
+ */
+ if (!HeapTupleHeaderIndicatesMovedPartitions(tuple->t_data) &&
+ !ItemPointerEquals(&tuple->t_self, ctid))
+ {
+ /*
+ * If this is the first possibly-multixact-able operation in the
+ * current transaction, set my per-backend OldestMemberMXactId
+ * setting. We can be certain that the transaction will never become a
+ * member of any older MultiXactIds than that. (We have to do this
+ * even if we end up just using our own TransactionId below, since
+ * some other backend could incorporate our XID into a MultiXact
+ * immediately afterwards.)
+ */
+ MultiXactIdSetOldestMember();
+
+ return heap_lock_updated_tuple_rec(rel, ctid, xid, mode);
+ }
+
+ /* nothing to lock */
+ return TM_Ok;
+}
+
+/*
+ * heap_finish_speculative - mark speculative insertion as successful
+ *
+ * To successfully finish a speculative insertion we have to clear speculative
+ * token from tuple. To do so the t_ctid field, which will contain a
+ * speculative token value, is modified in place to point to the tuple itself,
+ * which is characteristic of a newly inserted ordinary tuple.
+ *
+ * NB: It is not ok to commit without either finishing or aborting a
+ * speculative insertion. We could treat speculative tuples of committed
+ * transactions implicitly as completed, but then we would have to be prepared
+ * to deal with speculative tokens on committed tuples. That wouldn't be
+ * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
+ * but clearing the token at completion isn't very expensive either.
+ * An explicit confirmation WAL record also makes logical decoding simpler.
+ */
+void
+heap_finish_speculative(Relation relation, ItemPointer tid)
+{
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
+
+ buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ page = (Page) BufferGetPage(buffer);
+
+ offnum = ItemPointerGetOffsetNumber(tid);
+ if (PageGetMaxOffsetNumber(page) >= offnum)
+ lp = PageGetItemId(page, offnum);
+
+ if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
+ elog(ERROR, "invalid lp");
+
+ htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+ /* SpecTokenOffsetNumber should be distinguishable from any real offset */
+ StaticAssertStmt(MaxOffsetNumber < SpecTokenOffsetNumber,
+ "invalid speculative token constant");
+
+ /* NO EREPORT(ERROR) from here till changes are logged */
+ START_CRIT_SECTION();
+
+ Assert(HeapTupleHeaderIsSpeculative(htup));
+
+ MarkBufferDirty(buffer);
+
+ /*
+ * Replace the speculative insertion token with a real t_ctid, pointing to
+ * itself like it does on regular tuples.
+ */
+ htup->t_ctid = *tid;
+
+ /* XLOG stuff */
+ if (RelationNeedsWAL(relation))
+ {
+ xl_heap_confirm xlrec;
+ XLogRecPtr recptr;
+
+ xlrec.offnum = ItemPointerGetOffsetNumber(tid);
+
+ XLogBeginInsert();
+
+ /* We want the same filtering on this as on a plain insert */
+ XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
+
+ XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
+ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
+
+ recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
+
+ PageSetLSN(page, recptr);
+ }
+
+ END_CRIT_SECTION();
+
+ UnlockReleaseBuffer(buffer);
+}
+
+/*
+ * heap_abort_speculative - kill a speculatively inserted tuple
+ *
+ * Marks a tuple that was speculatively inserted in the same command as dead,
+ * by setting its xmin as invalid. That makes it immediately appear as dead
+ * to all transactions, including our own. In particular, it makes
+ * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
+ * inserting a duplicate key value won't unnecessarily wait for our whole
+ * transaction to finish (it'll just wait for our speculative insertion to
+ * finish).
+ *
+ * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
+ * that arise due to a mutual dependency that is not user visible. By
+ * definition, unprincipled deadlocks cannot be prevented by the user
+ * reordering lock acquisition in client code, because the implementation level
+ * lock acquisitions are not under the user's direct control. If speculative
+ * inserters did not take this precaution, then under high concurrency they
+ * could deadlock with each other, which would not be acceptable.
+ *
+ * This is somewhat redundant with heap_delete, but we prefer to have a
+ * dedicated routine with stripped down requirements. Note that this is also
+ * used to delete the TOAST tuples created during speculative insertion.
+ *
+ * This routine does not affect logical decoding as it only looks at
+ * confirmation records.
+ */
+void
+heap_abort_speculative(Relation relation, ItemPointer tid)
+{
+ TransactionId xid = GetCurrentTransactionId();
+ ItemId lp;
+ HeapTupleData tp;
+ Page page;
+ BlockNumber block;
+ Buffer buffer;
+ TransactionId prune_xid;
+
+ Assert(ItemPointerIsValid(tid));
+
+ block = ItemPointerGetBlockNumber(tid);
+ buffer = ReadBuffer(relation, block);
+ page = BufferGetPage(buffer);
+
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * Page can't be all visible, we just inserted into it, and are still
+ * running.
+ */
+ Assert(!PageIsAllVisible(page));
+
+ lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
+ Assert(ItemIdIsNormal(lp));
+
+ tp.t_tableOid = RelationGetRelid(relation);
+ tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
+ tp.t_len = ItemIdGetLength(lp);
+ tp.t_self = *tid;
+
+ /*
+ * Sanity check that the tuple really is a speculatively inserted tuple,
+ * inserted by us.
+ */
+ if (tp.t_data->t_choice.t_heap.t_xmin != xid)
+ elog(ERROR, "attempted to kill a tuple inserted by another transaction");
+ if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
+ elog(ERROR, "attempted to kill a non-speculative tuple");
+ Assert(!HeapTupleHeaderIsHeapOnly(tp.t_data));
+
+ /*
+ * No need to check for serializable conflicts here. There is never a
+ * need for a combo CID, either. No need to extract replica identity, or
+ * do anything special with infomask bits.
+ */
+
+ START_CRIT_SECTION();
+
+ /*
+ * The tuple will become DEAD immediately. Flag that this page is a
+ * candidate for pruning by setting xmin to TransactionXmin. While not
+ * immediately prunable, it is the oldest xid we can cheaply determine
+ * that's safe against wraparound / being older than the table's
+ * relfrozenxid. To defend against the unlikely case of a new relation
+ * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
+ * if so (vacuum can't subsequently move relfrozenxid to beyond
+ * TransactionXmin, so there's no race here).
+ */
+ Assert(TransactionIdIsValid(TransactionXmin));
+ if (TransactionIdPrecedes(TransactionXmin, relation->rd_rel->relfrozenxid))
+ prune_xid = relation->rd_rel->relfrozenxid;
+ else
+ prune_xid = TransactionXmin;
+ PageSetPrunable(page, prune_xid);
+
+ /* store transaction information of xact deleting the tuple */
+ tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
+ tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+
+ /*
+ * Set the tuple header xmin to InvalidTransactionId. This makes the
+ * tuple immediately invisible everyone. (In particular, to any
+ * transactions waiting on the speculative token, woken up later.)
+ */
+ HeapTupleHeaderSetXmin(tp.t_data, InvalidTransactionId);
+
+ /* Clear the speculative insertion token too */
+ tp.t_data->t_ctid = tp.t_self;
+
+ MarkBufferDirty(buffer);
+
+ /*
+ * XLOG stuff
+ *
+ * The WAL records generated here match heap_delete(). The same recovery
+ * routines are used.
+ */
+ if (RelationNeedsWAL(relation))
+ {
+ xl_heap_delete xlrec;
+ XLogRecPtr recptr;
+
+ xlrec.flags = XLH_DELETE_IS_SUPER;
+ xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
+ tp.t_data->t_infomask2);
+ xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
+ xlrec.xmax = xid;
+
+ XLogBeginInsert();
+ XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
+ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
+
+ /* No replica identity & replication origin logged */
+
+ recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
+
+ PageSetLSN(page, recptr);
+ }
+
+ END_CRIT_SECTION();
+
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+ if (HeapTupleHasExternal(&tp))
+ {
+ Assert(!IsToastRelation(relation));
+ heap_toast_delete(relation, &tp, true);
+ }
+
+ /*
+ * Never need to mark tuple for invalidation, since catalogs don't support
+ * speculative insertion
+ */
+
+ /* Now we can release the buffer */
+ ReleaseBuffer(buffer);
+
+ /* count deletion, as we counted the insertion too */
+ pgstat_count_heap_delete(relation);
+}
+
+/*
+ * heap_inplace_update - update a tuple "in place" (ie, overwrite it)
+ *
+ * Overwriting violates both MVCC and transactional safety, so the uses
+ * of this function in Postgres are extremely limited. Nonetheless we
+ * find some places to use it.
+ *
+ * The tuple cannot change size, and therefore it's reasonable to assume
+ * that its null bitmap (if any) doesn't change either. So we just
+ * overwrite the data portion of the tuple without touching the null
+ * bitmap or any of the header fields.
+ *
+ * tuple is an in-memory tuple structure containing the data to be written
+ * over the target tuple. Also, tuple->t_self identifies the target tuple.
+ *
+ * Note that the tuple updated here had better not come directly from the
+ * syscache if the relation has a toast relation as this tuple could
+ * include toast values that have been expanded, causing a failure here.
+ */
+void
+heap_inplace_update(Relation relation, HeapTuple tuple)
+{
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
+ uint32 oldlen;
+ uint32 newlen;
+
+ /*
+ * For now, we don't allow parallel updates. Unlike a regular update,
+ * this should never create a combo CID, so it might be possible to relax
+ * this restriction, but not without more thought and testing. It's not
+ * clear that it would be useful, anyway.
+ */
+ if (IsInParallelMode())
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
+ errmsg("cannot update tuples during a parallel operation")));
+
+ buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ page = (Page) BufferGetPage(buffer);
+
+ offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
+ if (PageGetMaxOffsetNumber(page) >= offnum)
+ lp = PageGetItemId(page, offnum);
+
+ if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
+ elog(ERROR, "invalid lp");
+
+ htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+ oldlen = ItemIdGetLength(lp) - htup->t_hoff;
+ newlen = tuple->t_len - tuple->t_data->t_hoff;
+ if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
+ elog(ERROR, "wrong tuple length");
+
+ /* NO EREPORT(ERROR) from here till changes are logged */
+ START_CRIT_SECTION();
+
+ memcpy((char *) htup + htup->t_hoff,
+ (char *) tuple->t_data + tuple->t_data->t_hoff,
+ newlen);
+
+ MarkBufferDirty(buffer);
+
+ /* XLOG stuff */
+ if (RelationNeedsWAL(relation))
+ {
+ xl_heap_inplace xlrec;
+ XLogRecPtr recptr;
+
+ xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
+
+ XLogBeginInsert();
+ XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
+
+ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
+ XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
+
+ /* inplace updates aren't decoded atm, don't log the origin */
+
+ recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
+
+ PageSetLSN(page, recptr);
+ }
+
+ END_CRIT_SECTION();
+
+ UnlockReleaseBuffer(buffer);
+
+ /*
+ * Send out shared cache inval if necessary. Note that because we only
+ * pass the new version of the tuple, this mustn't be used for any
+ * operations that could change catcache lookup keys. But we aren't
+ * bothering with index updates either, so that's true a fortiori.
+ */
+ if (!IsBootstrapProcessingMode())
+ CacheInvalidateHeapTuple(relation, tuple, NULL);
+}
+
+#define FRM_NOOP 0x0001
+#define FRM_INVALIDATE_XMAX 0x0002
+#define FRM_RETURN_IS_XID 0x0004
+#define FRM_RETURN_IS_MULTI 0x0008
+#define FRM_MARK_COMMITTED 0x0010
+
+/*
+ * FreezeMultiXactId
+ * Determine what to do during freezing when a tuple is marked by a
+ * MultiXactId.
+ *
+ * NB -- this might have the side-effect of creating a new MultiXactId!
+ *
+ * "flags" is an output value; it's used to tell caller what to do on return.
+ * Possible flags are:
+ * FRM_NOOP
+ * don't do anything -- keep existing Xmax
+ * FRM_INVALIDATE_XMAX
+ * mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
+ * FRM_RETURN_IS_XID
+ * The Xid return value is a single update Xid to set as xmax.
+ * FRM_MARK_COMMITTED
+ * Xmax can be marked as HEAP_XMAX_COMMITTED
+ * FRM_RETURN_IS_MULTI
+ * The return value is a new MultiXactId to set as new Xmax.
+ * (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
+ */
+static TransactionId
+FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
+ TransactionId relfrozenxid, TransactionId relminmxid,
+ TransactionId cutoff_xid, MultiXactId cutoff_multi,
+ uint16 *flags)
+{
+ TransactionId xid = InvalidTransactionId;
+ int i;
+ MultiXactMember *members;
+ int nmembers;
+ bool need_replace;
+ int nnewmembers;
+ MultiXactMember *newmembers;
+ bool has_lockers;
+ TransactionId update_xid;
+ bool update_committed;
+
+ *flags = 0;
+
+ /* We should only be called in Multis */
+ Assert(t_infomask & HEAP_XMAX_IS_MULTI);
+
+ if (!MultiXactIdIsValid(multi) ||
+ HEAP_LOCKED_UPGRADED(t_infomask))
+ {
+ /* Ensure infomask bits are appropriately set/reset */
+ *flags |= FRM_INVALIDATE_XMAX;
+ return InvalidTransactionId;
+ }
+ else if (MultiXactIdPrecedes(multi, relminmxid))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("found multixact %u from before relminmxid %u",
+ multi, relminmxid)));
+ else if (MultiXactIdPrecedes(multi, cutoff_multi))
+ {
+ /*
+ * This old multi cannot possibly have members still running, but
+ * verify just in case. If it was a locker only, it can be removed
+ * without any further consideration; but if it contained an update,
+ * we might need to preserve it.
+ */
+ if (MultiXactIdIsRunning(multi,
+ HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("multixact %u from before cutoff %u found to be still running",
+ multi, cutoff_multi)));
+
+ if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
+ {
+ *flags |= FRM_INVALIDATE_XMAX;
+ xid = InvalidTransactionId; /* not strictly necessary */
+ }
+ else
+ {
+ /* replace multi by update xid */
+ xid = MultiXactIdGetUpdateXid(multi, t_infomask);
+
+ /* wasn't only a lock, xid needs to be valid */
+ Assert(TransactionIdIsValid(xid));
+
+ if (TransactionIdPrecedes(xid, relfrozenxid))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("found update xid %u from before relfrozenxid %u",
+ xid, relfrozenxid)));
+
+ /*
+ * If the xid is older than the cutoff, it has to have aborted,
+ * otherwise the tuple would have gotten pruned away.
+ */
+ if (TransactionIdPrecedes(xid, cutoff_xid))
+ {
+ if (TransactionIdDidCommit(xid))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("cannot freeze committed update xid %u", xid)));
+ *flags |= FRM_INVALIDATE_XMAX;
+ xid = InvalidTransactionId; /* not strictly necessary */
+ }
+ else
+ {
+ *flags |= FRM_RETURN_IS_XID;
+ }
+ }
+
+ return xid;
+ }
+
+ /*
+ * This multixact might have or might not have members still running, but
+ * we know it's valid and is newer than the cutoff point for multis.
+ * However, some member(s) of it may be below the cutoff for Xids, so we
+ * need to walk the whole members array to figure out what to do, if
+ * anything.
+ */
+
+ nmembers =
+ GetMultiXactIdMembers(multi, &members, false,
+ HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
+ if (nmembers <= 0)
+ {
+ /* Nothing worth keeping */
+ *flags |= FRM_INVALIDATE_XMAX;
+ return InvalidTransactionId;
+ }
+
+ /* is there anything older than the cutoff? */
+ need_replace = false;
+ for (i = 0; i < nmembers; i++)
+ {
+ if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
+ {
+ need_replace = true;
+ break;
+ }
+ }
+
+ /*
+ * In the simplest case, there is no member older than the cutoff; we can
+ * keep the existing MultiXactId as is.
+ */
+ if (!need_replace)
+ {
+ *flags |= FRM_NOOP;
+ pfree(members);
+ return InvalidTransactionId;
+ }
+
+ /*
+ * If the multi needs to be updated, figure out which members do we need
+ * to keep.
+ */
+ nnewmembers = 0;
+ newmembers = palloc(sizeof(MultiXactMember) * nmembers);
+ has_lockers = false;
+ update_xid = InvalidTransactionId;
+ update_committed = false;
+
+ for (i = 0; i < nmembers; i++)
+ {
+ /*
+ * Determine whether to keep this member or ignore it.
+ */
+ if (ISUPDATE_from_mxstatus(members[i].status))
+ {
+ TransactionId xid = members[i].xid;
+
+ Assert(TransactionIdIsValid(xid));
+ if (TransactionIdPrecedes(xid, relfrozenxid))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("found update xid %u from before relfrozenxid %u",
+ xid, relfrozenxid)));
+
+ /*
+ * It's an update; should we keep it? If the transaction is known
+ * aborted or crashed then it's okay to ignore it, otherwise not.
+ * Note that an updater older than cutoff_xid cannot possibly be
+ * committed, because HeapTupleSatisfiesVacuum would have returned
+ * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
+ *
+ * As with all tuple visibility routines, it's critical to test
+ * TransactionIdIsInProgress before TransactionIdDidCommit,
+ * because of race conditions explained in detail in
+ * heapam_visibility.c.
+ */
+ if (TransactionIdIsCurrentTransactionId(xid) ||
+ TransactionIdIsInProgress(xid))
+ {
+ Assert(!TransactionIdIsValid(update_xid));
+ update_xid = xid;
+ }
+ else if (TransactionIdDidCommit(xid))
+ {
+ /*
+ * The transaction committed, so we can tell caller to set
+ * HEAP_XMAX_COMMITTED. (We can only do this because we know
+ * the transaction is not running.)
+ */
+ Assert(!TransactionIdIsValid(update_xid));
+ update_committed = true;
+ update_xid = xid;
+ }
+ else
+ {
+ /*
+ * Not in progress, not committed -- must be aborted or
+ * crashed; we can ignore it.
+ */
+ }
+
+ /*
+ * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
+ * update Xid cannot possibly be older than the xid cutoff. The
+ * presence of such a tuple would cause corruption, so be paranoid
+ * and check.
+ */
+ if (TransactionIdIsValid(update_xid) &&
+ TransactionIdPrecedes(update_xid, cutoff_xid))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("found update xid %u from before xid cutoff %u",
+ update_xid, cutoff_xid)));
+
+ /*
+ * If we determined that it's an Xid corresponding to an update
+ * that must be retained, additionally add it to the list of
+ * members of the new Multi, in case we end up using that. (We
+ * might still decide to use only an update Xid and not a multi,
+ * but it's easier to maintain the list as we walk the old members
+ * list.)
+ */
+ if (TransactionIdIsValid(update_xid))
+ newmembers[nnewmembers++] = members[i];
+ }
+ else
+ {
+ /* We only keep lockers if they are still running */
+ if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
+ TransactionIdIsInProgress(members[i].xid))
+ {
+ /* running locker cannot possibly be older than the cutoff */
+ Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
+ newmembers[nnewmembers++] = members[i];
+ has_lockers = true;
+ }
+ }
+ }
+
+ pfree(members);
+
+ if (nnewmembers == 0)
+ {
+ /* nothing worth keeping!? Tell caller to remove the whole thing */
+ *flags |= FRM_INVALIDATE_XMAX;
+ xid = InvalidTransactionId;
+ }
+ else if (TransactionIdIsValid(update_xid) && !has_lockers)
+ {
+ /*
+ * If there's a single member and it's an update, pass it back alone
+ * without creating a new Multi. (XXX we could do this when there's a
+ * single remaining locker, too, but that would complicate the API too
+ * much; moreover, the case with the single updater is more
+ * interesting, because those are longer-lived.)
+ */
+ Assert(nnewmembers == 1);
+ *flags |= FRM_RETURN_IS_XID;
+ if (update_committed)
+ *flags |= FRM_MARK_COMMITTED;
+ xid = update_xid;
+ }
+ else
+ {
+ /*
+ * Create a new multixact with the surviving members of the previous
+ * one, to set as new Xmax in the tuple.
+ */
+ xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
+ *flags |= FRM_RETURN_IS_MULTI;
+ }
+
+ pfree(newmembers);
+
+ return xid;
+}
+
+/*
+ * heap_prepare_freeze_tuple
+ *
+ * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
+ * are older than the specified cutoff XID and cutoff MultiXactId. If so,
+ * setup enough state (in the *frz output argument) to later execute and
+ * WAL-log what we would need to do, and return true. Return false if nothing
+ * is to be changed. In addition, set *totally_frozen_p to true if the tuple
+ * will be totally frozen after these operations are performed and false if
+ * more freezing will eventually be required.
+ *
+ * Caller is responsible for setting the offset field, if appropriate.
+ *
+ * It is assumed that the caller has checked the tuple with
+ * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
+ * (else we should be removing the tuple, not freezing it).
+ *
+ * NB: cutoff_xid *must* be <= the current global xmin, to ensure that any
+ * XID older than it could neither be running nor seen as running by any
+ * open transaction. This ensures that the replacement will not change
+ * anyone's idea of the tuple state.
+ * Similarly, cutoff_multi must be less than or equal to the smallest
+ * MultiXactId used by any transaction currently open.
+ *
+ * If the tuple is in a shared buffer, caller must hold an exclusive lock on
+ * that buffer.
+ *
+ * NB: It is not enough to set hint bits to indicate something is
+ * committed/invalid -- they might not be set on a standby, or after crash
+ * recovery. We really need to remove old xids.
+ */
+bool
+heap_prepare_freeze_tuple(HeapTupleHeader tuple,
+ TransactionId relfrozenxid, TransactionId relminmxid,
+ TransactionId cutoff_xid, TransactionId cutoff_multi,
+ xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
+{
+ bool changed = false;
+ bool xmax_already_frozen = false;
+ bool xmin_frozen;
+ bool freeze_xmax;
+ TransactionId xid;
+
+ frz->frzflags = 0;
+ frz->t_infomask2 = tuple->t_infomask2;
+ frz->t_infomask = tuple->t_infomask;
+ frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
+
+ /*
+ * Process xmin. xmin_frozen has two slightly different meanings: in the
+ * !XidIsNormal case, it means "the xmin doesn't need any freezing" (it's
+ * already a permanent value), while in the block below it is set true to
+ * mean "xmin won't need freezing after what we do to it here" (false
+ * otherwise). In both cases we're allowed to set totally_frozen, as far
+ * as xmin is concerned.
+ */
+ xid = HeapTupleHeaderGetXmin(tuple);
+ if (!TransactionIdIsNormal(xid))
+ xmin_frozen = true;
+ else
+ {
+ if (TransactionIdPrecedes(xid, relfrozenxid))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("found xmin %u from before relfrozenxid %u",
+ xid, relfrozenxid)));
+
+ xmin_frozen = TransactionIdPrecedes(xid, cutoff_xid);
+ if (xmin_frozen)
+ {
+ if (!TransactionIdDidCommit(xid))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("uncommitted xmin %u from before xid cutoff %u needs to be frozen",
+ xid, cutoff_xid)));
+
+ frz->t_infomask |= HEAP_XMIN_FROZEN;
+ changed = true;
+ }
+ }
+
+ /*
+ * Process xmax. To thoroughly examine the current Xmax value we need to
+ * resolve a MultiXactId to its member Xids, in case some of them are
+ * below the given cutoff for Xids. In that case, those values might need
+ * freezing, too. Also, if a multi needs freezing, we cannot simply take
+ * it out --- if there's a live updater Xid, it needs to be kept.
+ *
+ * Make sure to keep heap_tuple_needs_freeze in sync with this.
+ */
+ xid = HeapTupleHeaderGetRawXmax(tuple);
+
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ TransactionId newxmax;
+ uint16 flags;
+
+ newxmax = FreezeMultiXactId(xid, tuple->t_infomask,
+ relfrozenxid, relminmxid,
+ cutoff_xid, cutoff_multi, &flags);
+
+ freeze_xmax = (flags & FRM_INVALIDATE_XMAX);
+
+ if (flags & FRM_RETURN_IS_XID)
+ {
+ /*
+ * NB -- some of these transformations are only valid because we
+ * know the return Xid is a tuple updater (i.e. not merely a
+ * locker.) Also note that the only reason we don't explicitly
+ * worry about HEAP_KEYS_UPDATED is because it lives in
+ * t_infomask2 rather than t_infomask.
+ */
+ frz->t_infomask &= ~HEAP_XMAX_BITS;
+ frz->xmax = newxmax;
+ if (flags & FRM_MARK_COMMITTED)
+ frz->t_infomask |= HEAP_XMAX_COMMITTED;
+ changed = true;
+ }
+ else if (flags & FRM_RETURN_IS_MULTI)
+ {
+ uint16 newbits;
+ uint16 newbits2;
+
+ /*
+ * We can't use GetMultiXactIdHintBits directly on the new multi
+ * here; that routine initializes the masks to all zeroes, which
+ * would lose other bits we need. Doing it this way ensures all
+ * unrelated bits remain untouched.
+ */
+ frz->t_infomask &= ~HEAP_XMAX_BITS;
+ frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+ GetMultiXactIdHintBits(newxmax, &newbits, &newbits2);
+ frz->t_infomask |= newbits;
+ frz->t_infomask2 |= newbits2;
+
+ frz->xmax = newxmax;
+
+ changed = true;
+ }
+ }
+ else if (TransactionIdIsNormal(xid))
+ {
+ if (TransactionIdPrecedes(xid, relfrozenxid))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("found xmax %u from before relfrozenxid %u",
+ xid, relfrozenxid)));
+
+ if (TransactionIdPrecedes(xid, cutoff_xid))
+ {
+ /*
+ * If we freeze xmax, make absolutely sure that it's not an XID
+ * that is important. (Note, a lock-only xmax can be removed
+ * independent of committedness, since a committed lock holder has
+ * released the lock).
+ */
+ if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
+ TransactionIdDidCommit(xid))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("cannot freeze committed xmax %u",
+ xid)));
+ freeze_xmax = true;
+ }
+ else
+ freeze_xmax = false;
+ }
+ else if ((tuple->t_infomask & HEAP_XMAX_INVALID) ||
+ !TransactionIdIsValid(HeapTupleHeaderGetRawXmax(tuple)))
+ {
+ freeze_xmax = false;
+ xmax_already_frozen = true;
+ }
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("found xmax %u (infomask 0x%04x) not frozen, not multi, not normal",
+ xid, tuple->t_infomask)));
+
+ if (freeze_xmax)
+ {
+ Assert(!xmax_already_frozen);
+
+ frz->xmax = InvalidTransactionId;
+
+ /*
+ * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
+ * LOCKED. Normalize to INVALID just to be sure no one gets confused.
+ * Also get rid of the HEAP_KEYS_UPDATED bit.
+ */
+ frz->t_infomask &= ~HEAP_XMAX_BITS;
+ frz->t_infomask |= HEAP_XMAX_INVALID;
+ frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
+ frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+ changed = true;
+ }
+
+ /*
+ * Old-style VACUUM FULL is gone, but we have to keep this code as long as
+ * we support having MOVED_OFF/MOVED_IN tuples in the database.
+ */
+ if (tuple->t_infomask & HEAP_MOVED)
+ {
+ xid = HeapTupleHeaderGetXvac(tuple);
+
+ /*
+ * For Xvac, we ignore the cutoff_xid and just always perform the
+ * freeze operation. The oldest release in which such a value can
+ * actually be set is PostgreSQL 8.4, because old-style VACUUM FULL
+ * was removed in PostgreSQL 9.0. Note that if we were to respect
+ * cutoff_xid here, we'd need to make surely to clear totally_frozen
+ * when we skipped freezing on that basis.
+ */
+ if (TransactionIdIsNormal(xid))
+ {
+ /*
+ * If a MOVED_OFF tuple is not dead, the xvac transaction must
+ * have failed; whereas a non-dead MOVED_IN tuple must mean the
+ * xvac transaction succeeded.
+ */
+ if (tuple->t_infomask & HEAP_MOVED_OFF)
+ frz->frzflags |= XLH_INVALID_XVAC;
+ else
+ frz->frzflags |= XLH_FREEZE_XVAC;
+
+ /*
+ * Might as well fix the hint bits too; usually XMIN_COMMITTED
+ * will already be set here, but there's a small chance not.
+ */
+ Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
+ frz->t_infomask |= HEAP_XMIN_COMMITTED;
+ changed = true;
+ }
+ }
+
+ *totally_frozen_p = (xmin_frozen &&
+ (freeze_xmax || xmax_already_frozen));
+ return changed;
+}
+
+/*
+ * heap_execute_freeze_tuple
+ * Execute the prepared freezing of a tuple.
+ *
+ * Caller is responsible for ensuring that no other backend can access the
+ * storage underlying this tuple, either by holding an exclusive lock on the
+ * buffer containing it (which is what lazy VACUUM does), or by having it be
+ * in private storage (which is what CLUSTER and friends do).
+ *
+ * Note: it might seem we could make the changes without exclusive lock, since
+ * TransactionId read/write is assumed atomic anyway. However there is a race
+ * condition: someone who just fetched an old XID that we overwrite here could
+ * conceivably not finish checking the XID against pg_xact before we finish
+ * the VACUUM and perhaps truncate off the part of pg_xact he needs. Getting
+ * exclusive lock ensures no other backend is in process of checking the
+ * tuple status. Also, getting exclusive lock makes it safe to adjust the
+ * infomask bits.
+ *
+ * NB: All code in here must be safe to execute during crash recovery!
+ */
+void
+heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
+{
+ HeapTupleHeaderSetXmax(tuple, frz->xmax);
+
+ if (frz->frzflags & XLH_FREEZE_XVAC)
+ HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
+
+ if (frz->frzflags & XLH_INVALID_XVAC)
+ HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
+
+ tuple->t_infomask = frz->t_infomask;
+ tuple->t_infomask2 = frz->t_infomask2;
+}
+
+/*
+ * heap_freeze_tuple
+ * Freeze tuple in place, without WAL logging.
+ *
+ * Useful for callers like CLUSTER that perform their own WAL logging.
+ */
+bool
+heap_freeze_tuple(HeapTupleHeader tuple,
+ TransactionId relfrozenxid, TransactionId relminmxid,
+ TransactionId cutoff_xid, TransactionId cutoff_multi)
+{
+ xl_heap_freeze_tuple frz;
+ bool do_freeze;
+ bool tuple_totally_frozen;
+
+ do_freeze = heap_prepare_freeze_tuple(tuple,
+ relfrozenxid, relminmxid,
+ cutoff_xid, cutoff_multi,
+ &frz, &tuple_totally_frozen);
+
+ /*
+ * Note that because this is not a WAL-logged operation, we don't need to
+ * fill in the offset in the freeze record.
+ */
+
+ if (do_freeze)
+ heap_execute_freeze_tuple(tuple, &frz);
+ return do_freeze;
+}
+
+/*
+ * For a given MultiXactId, return the hint bits that should be set in the
+ * tuple's infomask.
+ *
+ * Normally this should be called for a multixact that was just created, and
+ * so is on our local cache, so the GetMembers call is fast.
+ */
+static void
+GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
+ uint16 *new_infomask2)
+{
+ int nmembers;
+ MultiXactMember *members;
+ int i;
+ uint16 bits = HEAP_XMAX_IS_MULTI;
+ uint16 bits2 = 0;
+ bool has_update = false;
+ LockTupleMode strongest = LockTupleKeyShare;
+
+ /*
+ * We only use this in multis we just created, so they cannot be values
+ * pre-pg_upgrade.
+ */
+ nmembers = GetMultiXactIdMembers(multi, &members, false, false);
+
+ for (i = 0; i < nmembers; i++)
+ {
+ LockTupleMode mode;
+
+ /*
+ * Remember the strongest lock mode held by any member of the
+ * multixact.
+ */
+ mode = TUPLOCK_from_mxstatus(members[i].status);
+ if (mode > strongest)
+ strongest = mode;
+
+ /* See what other bits we need */
+ switch (members[i].status)
+ {
+ case MultiXactStatusForKeyShare:
+ case MultiXactStatusForShare:
+ case MultiXactStatusForNoKeyUpdate:
+ break;
+
+ case MultiXactStatusForUpdate:
+ bits2 |= HEAP_KEYS_UPDATED;
+ break;
+
+ case MultiXactStatusNoKeyUpdate:
+ has_update = true;
+ break;
+
+ case MultiXactStatusUpdate:
+ bits2 |= HEAP_KEYS_UPDATED;
+ has_update = true;
+ break;
+ }
+ }
+
+ if (strongest == LockTupleExclusive ||
+ strongest == LockTupleNoKeyExclusive)
+ bits |= HEAP_XMAX_EXCL_LOCK;
+ else if (strongest == LockTupleShare)
+ bits |= HEAP_XMAX_SHR_LOCK;
+ else if (strongest == LockTupleKeyShare)
+ bits |= HEAP_XMAX_KEYSHR_LOCK;
+
+ if (!has_update)
+ bits |= HEAP_XMAX_LOCK_ONLY;
+
+ if (nmembers > 0)
+ pfree(members);
+
+ *new_infomask = bits;
+ *new_infomask2 = bits2;
+}
+
+/*
+ * MultiXactIdGetUpdateXid
+ *
+ * Given a multixact Xmax and corresponding infomask, which does not have the
+ * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
+ * transaction.
+ *
+ * Caller is expected to check the status of the updating transaction, if
+ * necessary.
+ */
+static TransactionId
+MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
+{
+ TransactionId update_xact = InvalidTransactionId;
+ MultiXactMember *members;
+ int nmembers;
+
+ Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
+ Assert(t_infomask & HEAP_XMAX_IS_MULTI);
+
+ /*
+ * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
+ * pre-pg_upgrade.
+ */
+ nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
+
+ if (nmembers > 0)
+ {
+ int i;
+
+ for (i = 0; i < nmembers; i++)
+ {
+ /* Ignore lockers */
+ if (!ISUPDATE_from_mxstatus(members[i].status))
+ continue;
+
+ /* there can be at most one updater */
+ Assert(update_xact == InvalidTransactionId);
+ update_xact = members[i].xid;
+#ifndef USE_ASSERT_CHECKING
+
+ /*
+ * in an assert-enabled build, walk the whole array to ensure
+ * there's no other updater.
+ */
+ break;
+#endif
+ }
+
+ pfree(members);
+ }
+
+ return update_xact;
+}
+
+/*
+ * HeapTupleGetUpdateXid
+ * As above, but use a HeapTupleHeader
+ *
+ * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
+ * checking the hint bits.
+ */
+TransactionId
+HeapTupleGetUpdateXid(HeapTupleHeader tuple)
+{
+ return MultiXactIdGetUpdateXid(HeapTupleHeaderGetRawXmax(tuple),
+ tuple->t_infomask);
+}
+
+/*
+ * Does the given multixact conflict with the current transaction grabbing a
+ * tuple lock of the given strength?
+ *
+ * The passed infomask pairs up with the given multixact in the tuple header.
+ *
+ * If current_is_member is not NULL, it is set to 'true' if the current
+ * transaction is a member of the given multixact.
+ */
+static bool
+DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
+ LockTupleMode lockmode, bool *current_is_member)
+{
+ int nmembers;
+ MultiXactMember *members;
+ bool result = false;
+ LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
+
+ if (HEAP_LOCKED_UPGRADED(infomask))
+ return false;
+
+ nmembers = GetMultiXactIdMembers(multi, &members, false,
+ HEAP_XMAX_IS_LOCKED_ONLY(infomask));
+ if (nmembers >= 0)
+ {
+ int i;
+
+ for (i = 0; i < nmembers; i++)
+ {
+ TransactionId memxid;
+ LOCKMODE memlockmode;
+
+ if (result && (current_is_member == NULL || *current_is_member))
+ break;
+
+ memlockmode = LOCKMODE_from_mxstatus(members[i].status);
+
+ /* ignore members from current xact (but track their presence) */
+ memxid = members[i].xid;
+ if (TransactionIdIsCurrentTransactionId(memxid))
+ {
+ if (current_is_member != NULL)
+ *current_is_member = true;
+ continue;
+ }
+ else if (result)
+ continue;
+
+ /* ignore members that don't conflict with the lock we want */
+ if (!DoLockModesConflict(memlockmode, wanted))
+ continue;
+
+ if (ISUPDATE_from_mxstatus(members[i].status))
+ {
+ /* ignore aborted updaters */
+ if (TransactionIdDidAbort(memxid))
+ continue;
+ }
+ else
+ {
+ /* ignore lockers-only that are no longer in progress */
+ if (!TransactionIdIsInProgress(memxid))
+ continue;
+ }
+
+ /*
+ * Whatever remains are either live lockers that conflict with our
+ * wanted lock, and updaters that are not aborted. Those conflict
+ * with what we want. Set up to return true, but keep going to
+ * look for the current transaction among the multixact members,
+ * if needed.
+ */
+ result = true;
+ }
+ pfree(members);
+ }
+
+ return result;
+}
+
+/*
+ * Do_MultiXactIdWait
+ * Actual implementation for the two functions below.
+ *
+ * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
+ * needed to ensure we only sleep on conflicting members, and the infomask is
+ * used to optimize multixact access in case it's a lock-only multi); 'nowait'
+ * indicates whether to use conditional lock acquisition, to allow callers to
+ * fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up
+ * context information for error messages. 'remaining', if not NULL, receives
+ * the number of members that are still running, including any (non-aborted)
+ * subtransactions of our own transaction.
+ *
+ * We do this by sleeping on each member using XactLockTableWait. Any
+ * members that belong to the current backend are *not* waited for, however;
+ * this would not merely be useless but would lead to Assert failure inside
+ * XactLockTableWait. By the time this returns, it is certain that all
+ * transactions *of other backends* that were members of the MultiXactId
+ * that conflict with the requested status are dead (and no new ones can have
+ * been added, since it is not legal to add members to an existing
+ * MultiXactId).
+ *
+ * But by the time we finish sleeping, someone else may have changed the Xmax
+ * of the containing tuple, so the caller needs to iterate on us somehow.
+ *
+ * Note that in case we return false, the number of remaining members is
+ * not to be trusted.
+ */
+static bool
+Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,
+ uint16 infomask, bool nowait,
+ Relation rel, ItemPointer ctid, XLTW_Oper oper,
+ int *remaining)
+{
+ bool result = true;
+ MultiXactMember *members;
+ int nmembers;
+ int remain = 0;
+
+ /* for pre-pg_upgrade tuples, no need to sleep at all */
+ nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
+ GetMultiXactIdMembers(multi, &members, false,
+ HEAP_XMAX_IS_LOCKED_ONLY(infomask));
+
+ if (nmembers >= 0)
+ {
+ int i;
+
+ for (i = 0; i < nmembers; i++)
+ {
+ TransactionId memxid = members[i].xid;
+ MultiXactStatus memstatus = members[i].status;
+
+ if (TransactionIdIsCurrentTransactionId(memxid))
+ {
+ remain++;
+ continue;
+ }
+
+ if (!DoLockModesConflict(LOCKMODE_from_mxstatus(memstatus),
+ LOCKMODE_from_mxstatus(status)))
+ {
+ if (remaining && TransactionIdIsInProgress(memxid))
+ remain++;
+ continue;
+ }
+
+ /*
+ * This member conflicts with our multi, so we have to sleep (or
+ * return failure, if asked to avoid waiting.)
+ *
+ * Note that we don't set up an error context callback ourselves,
+ * but instead we pass the info down to XactLockTableWait. This
+ * might seem a bit wasteful because the context is set up and
+ * tore down for each member of the multixact, but in reality it
+ * should be barely noticeable, and it avoids duplicate code.
+ */
+ if (nowait)
+ {
+ result = ConditionalXactLockTableWait(memxid);
+ if (!result)
+ break;
+ }
+ else
+ XactLockTableWait(memxid, rel, ctid, oper);
+ }
+
+ pfree(members);
+ }
+
+ if (remaining)
+ *remaining = remain;
+
+ return result;
+}
+
+/*
+ * MultiXactIdWait
+ * Sleep on a MultiXactId.
+ *
+ * By the time we finish sleeping, someone else may have changed the Xmax
+ * of the containing tuple, so the caller needs to iterate on us somehow.
+ *
+ * We return (in *remaining, if not NULL) the number of members that are still
+ * running, including any (non-aborted) subtransactions of our own transaction.
+ */
+static void
+MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
+ Relation rel, ItemPointer ctid, XLTW_Oper oper,
+ int *remaining)
+{
+ (void) Do_MultiXactIdWait(multi, status, infomask, false,
+ rel, ctid, oper, remaining);
+}
+
+/*
+ * ConditionalMultiXactIdWait
+ * As above, but only lock if we can get the lock without blocking.
+ *
+ * By the time we finish sleeping, someone else may have changed the Xmax
+ * of the containing tuple, so the caller needs to iterate on us somehow.
+ *
+ * If the multixact is now all gone, return true. Returns false if some
+ * transactions might still be running.
+ *
+ * We return (in *remaining, if not NULL) the number of members that are still
+ * running, including any (non-aborted) subtransactions of our own transaction.
+ */
+static bool
+ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
+ uint16 infomask, Relation rel, int *remaining)
+{
+ return Do_MultiXactIdWait(multi, status, infomask, true,
+ rel, NULL, XLTW_None, remaining);
+}
+
+/*
+ * heap_tuple_needs_eventual_freeze
+ *
+ * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
+ * will eventually require freezing. Similar to heap_tuple_needs_freeze,
+ * but there's no cutoff, since we're trying to figure out whether freezing
+ * will ever be needed, not whether it's needed now.
+ */
+bool
+heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
+{
+ TransactionId xid;
+
+ /*
+ * If xmin is a normal transaction ID, this tuple is definitely not
+ * frozen.
+ */
+ xid = HeapTupleHeaderGetXmin(tuple);
+ if (TransactionIdIsNormal(xid))
+ return true;
+
+ /*
+ * If xmax is a valid xact or multixact, this tuple is also not frozen.
+ */
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ MultiXactId multi;
+
+ multi = HeapTupleHeaderGetRawXmax(tuple);
+ if (MultiXactIdIsValid(multi))
+ return true;
+ }
+ else
+ {
+ xid = HeapTupleHeaderGetRawXmax(tuple);
+ if (TransactionIdIsNormal(xid))
+ return true;
+ }
+
+ if (tuple->t_infomask & HEAP_MOVED)
+ {
+ xid = HeapTupleHeaderGetXvac(tuple);
+ if (TransactionIdIsNormal(xid))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * heap_tuple_needs_freeze
+ *
+ * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
+ * are older than the specified cutoff XID or MultiXactId. If so, return true.
+ *
+ * It doesn't matter whether the tuple is alive or dead, we are checking
+ * to see if a tuple needs to be removed or frozen to avoid wraparound.
+ *
+ * NB: Cannot rely on hint bits here, they might not be set after a crash or
+ * on a standby.
+ */
+bool
+heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
+ MultiXactId cutoff_multi, Buffer buf)
+{
+ TransactionId xid;
+
+ xid = HeapTupleHeaderGetXmin(tuple);
+ if (TransactionIdIsNormal(xid) &&
+ TransactionIdPrecedes(xid, cutoff_xid))
+ return true;
+
+ /*
+ * The considerations for multixacts are complicated; look at
+ * heap_prepare_freeze_tuple for justifications. This routine had better
+ * be in sync with that one!
+ */
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ MultiXactId multi;
+
+ multi = HeapTupleHeaderGetRawXmax(tuple);
+ if (!MultiXactIdIsValid(multi))
+ {
+ /* no xmax set, ignore */
+ ;
+ }
+ else if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
+ return true;
+ else if (MultiXactIdPrecedes(multi, cutoff_multi))
+ return true;
+ else
+ {
+ MultiXactMember *members;
+ int nmembers;
+ int i;
+
+ /* need to check whether any member of the mxact is too old */
+
+ nmembers = GetMultiXactIdMembers(multi, &members, false,
+ HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
+
+ for (i = 0; i < nmembers; i++)
+ {
+ if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
+ {
+ pfree(members);
+ return true;
+ }
+ }
+ if (nmembers > 0)
+ pfree(members);
+ }
+ }
+ else
+ {
+ xid = HeapTupleHeaderGetRawXmax(tuple);
+ if (TransactionIdIsNormal(xid) &&
+ TransactionIdPrecedes(xid, cutoff_xid))
+ return true;
+ }
+
+ if (tuple->t_infomask & HEAP_MOVED)
+ {
+ xid = HeapTupleHeaderGetXvac(tuple);
+ if (TransactionIdIsNormal(xid) &&
+ TransactionIdPrecedes(xid, cutoff_xid))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * If 'tuple' contains any visible XID greater than latestRemovedXid,
+ * ratchet forwards latestRemovedXid to the greatest one found.
+ * This is used as the basis for generating Hot Standby conflicts, so
+ * if a tuple was never visible then removing it should not conflict
+ * with queries.
+ */
+void
+HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
+ TransactionId *latestRemovedXid)
+{
+ TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
+ TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
+ TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
+
+ if (tuple->t_infomask & HEAP_MOVED)
+ {
+ if (TransactionIdPrecedes(*latestRemovedXid, xvac))
+ *latestRemovedXid = xvac;
+ }
+
+ /*
+ * Ignore tuples inserted by an aborted transaction or if the tuple was
+ * updated/deleted by the inserting transaction.
+ *
+ * Look for a committed hint bit, or if no xmin bit is set, check clog.
+ */
+ if (HeapTupleHeaderXminCommitted(tuple) ||
+ (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
+ {
+ if (xmax != xmin &&
+ TransactionIdFollows(xmax, *latestRemovedXid))
+ *latestRemovedXid = xmax;
+ }
+
+ /* *latestRemovedXid may still be invalid at end */
+}
+
+#ifdef USE_PREFETCH
+/*
+ * Helper function for heap_index_delete_tuples. Issues prefetch requests for
+ * prefetch_count buffers. The prefetch_state keeps track of all the buffers
+ * we can prefetch, and which have already been prefetched; each call to this
+ * function picks up where the previous call left off.
+ *
+ * Note: we expect the deltids array to be sorted in an order that groups TIDs
+ * by heap block, with all TIDs for each block appearing together in exactly
+ * one group.
+ */
+static void
+index_delete_prefetch_buffer(Relation rel,
+ IndexDeletePrefetchState *prefetch_state,
+ int prefetch_count)
+{
+ BlockNumber cur_hblkno = prefetch_state->cur_hblkno;
+ int count = 0;
+ int i;
+ int ndeltids = prefetch_state->ndeltids;
+ TM_IndexDelete *deltids = prefetch_state->deltids;
+
+ for (i = prefetch_state->next_item;
+ i < ndeltids && count < prefetch_count;
+ i++)
+ {
+ ItemPointer htid = &deltids[i].tid;
+
+ if (cur_hblkno == InvalidBlockNumber ||
+ ItemPointerGetBlockNumber(htid) != cur_hblkno)
+ {
+ cur_hblkno = ItemPointerGetBlockNumber(htid);
+ PrefetchBuffer(rel, MAIN_FORKNUM, cur_hblkno);
+ count++;
+ }
+ }
+
+ /*
+ * Save the prefetch position so that next time we can continue from that
+ * position.
+ */
+ prefetch_state->next_item = i;
+ prefetch_state->cur_hblkno = cur_hblkno;
+}
+#endif
+
+/*
+ * heapam implementation of tableam's index_delete_tuples interface.
+ *
+ * This helper function is called by index AMs during index tuple deletion.
+ * See tableam header comments for an explanation of the interface implemented
+ * here and a general theory of operation. Note that each call here is either
+ * a simple index deletion call, or a bottom-up index deletion call.
+ *
+ * It's possible for this to generate a fair amount of I/O, since we may be
+ * deleting hundreds of tuples from a single index block. To amortize that
+ * cost to some degree, this uses prefetching and combines repeat accesses to
+ * the same heap block.
+ */
+TransactionId
+heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
+{
+ /* Initial assumption is that earlier pruning took care of conflict */
+ TransactionId latestRemovedXid = InvalidTransactionId;
+ BlockNumber blkno = InvalidBlockNumber;
+ Buffer buf = InvalidBuffer;
+ Page page = NULL;
+ OffsetNumber maxoff = InvalidOffsetNumber;
+ TransactionId priorXmax;
+#ifdef USE_PREFETCH
+ IndexDeletePrefetchState prefetch_state;
+ int prefetch_distance;
+#endif
+ SnapshotData SnapshotNonVacuumable;
+ int finalndeltids = 0,
+ nblocksaccessed = 0;
+
+ /* State that's only used in bottom-up index deletion case */
+ int nblocksfavorable = 0;
+ int curtargetfreespace = delstate->bottomupfreespace,
+ lastfreespace = 0,
+ actualfreespace = 0;
+ bool bottomup_final_block = false;
+
+ InitNonVacuumableSnapshot(SnapshotNonVacuumable, GlobalVisTestFor(rel));
+
+ /* Sort caller's deltids array by TID for further processing */
+ index_delete_sort(delstate);
+
+ /*
+ * Bottom-up case: resort deltids array in an order attuned to where the
+ * greatest number of promising TIDs are to be found, and determine how
+ * many blocks from the start of sorted array should be considered
+ * favorable. This will also shrink the deltids array in order to
+ * eliminate completely unfavorable blocks up front.
+ */
+ if (delstate->bottomup)
+ nblocksfavorable = bottomup_sort_and_shrink(delstate);
+
+#ifdef USE_PREFETCH
+ /* Initialize prefetch state. */
+ prefetch_state.cur_hblkno = InvalidBlockNumber;
+ prefetch_state.next_item = 0;
+ prefetch_state.ndeltids = delstate->ndeltids;
+ prefetch_state.deltids = delstate->deltids;
+
+ /*
+ * Determine the prefetch distance that we will attempt to maintain.
+ *
+ * Since the caller holds a buffer lock somewhere in rel, we'd better make
+ * sure that isn't a catalog relation before we call code that does
+ * syscache lookups, to avoid risk of deadlock.
+ */
+ if (IsCatalogRelation(rel))
+ prefetch_distance = maintenance_io_concurrency;
+ else
+ prefetch_distance =
+ get_tablespace_maintenance_io_concurrency(rel->rd_rel->reltablespace);
+
+ /* Cap initial prefetch distance for bottom-up deletion caller */
+ if (delstate->bottomup)
+ {
+ Assert(nblocksfavorable >= 1);
+ Assert(nblocksfavorable <= BOTTOMUP_MAX_NBLOCKS);
+ prefetch_distance = Min(prefetch_distance, nblocksfavorable);
+ }
+
+ /* Start prefetching. */
+ index_delete_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
+#endif
+
+ /* Iterate over deltids, determine which to delete, check their horizon */
+ Assert(delstate->ndeltids > 0);
+ for (int i = 0; i < delstate->ndeltids; i++)
+ {
+ TM_IndexDelete *ideltid = &delstate->deltids[i];
+ TM_IndexStatus *istatus = delstate->status + ideltid->id;
+ ItemPointer htid = &ideltid->tid;
+ OffsetNumber offnum;
+
+ /*
+ * Read buffer, and perform required extra steps each time a new block
+ * is encountered. Avoid refetching if it's the same block as the one
+ * from the last htid.
+ */
+ if (blkno == InvalidBlockNumber ||
+ ItemPointerGetBlockNumber(htid) != blkno)
+ {
+ /*
+ * Consider giving up early for bottom-up index deletion caller
+ * first. (Only prefetch next-next block afterwards, when it
+ * becomes clear that we're at least going to access the next
+ * block in line.)
+ *
+ * Sometimes the first block frees so much space for bottom-up
+ * caller that the deletion process can end without accessing any
+ * more blocks. It is usually necessary to access 2 or 3 blocks
+ * per bottom-up deletion operation, though.
+ */
+ if (delstate->bottomup)
+ {
+ /*
+ * We often allow caller to delete a few additional items
+ * whose entries we reached after the point that space target
+ * from caller was satisfied. The cost of accessing the page
+ * was already paid at that point, so it made sense to finish
+ * it off. When that happened, we finalize everything here
+ * (by finishing off the whole bottom-up deletion operation
+ * without needlessly paying the cost of accessing any more
+ * blocks).
+ */
+ if (bottomup_final_block)
+ break;
+
+ /*
+ * Give up when we didn't enable our caller to free any
+ * additional space as a result of processing the page that we
+ * just finished up with. This rule is the main way in which
+ * we keep the cost of bottom-up deletion under control.
+ */
+ if (nblocksaccessed >= 1 && actualfreespace == lastfreespace)
+ break;
+ lastfreespace = actualfreespace; /* for next time */
+
+ /*
+ * Deletion operation (which is bottom-up) will definitely
+ * access the next block in line. Prepare for that now.
+ *
+ * Decay target free space so that we don't hang on for too
+ * long with a marginal case. (Space target is only truly
+ * helpful when it allows us to recognize that we don't need
+ * to access more than 1 or 2 blocks to satisfy caller due to
+ * agreeable workload characteristics.)
+ *
+ * We are a bit more patient when we encounter contiguous
+ * blocks, though: these are treated as favorable blocks. The
+ * decay process is only applied when the next block in line
+ * is not a favorable/contiguous block. This is not an
+ * exception to the general rule; we still insist on finding
+ * at least one deletable item per block accessed. See
+ * bottomup_nblocksfavorable() for full details of the theory
+ * behind favorable blocks and heap block locality in general.
+ *
+ * Note: The first block in line is always treated as a
+ * favorable block, so the earliest possible point that the
+ * decay can be applied is just before we access the second
+ * block in line. The Assert() verifies this for us.
+ */
+ Assert(nblocksaccessed > 0 || nblocksfavorable > 0);
+ if (nblocksfavorable > 0)
+ nblocksfavorable--;
+ else
+ curtargetfreespace /= 2;
+ }
+
+ /* release old buffer */
+ if (BufferIsValid(buf))
+ UnlockReleaseBuffer(buf);
+
+ blkno = ItemPointerGetBlockNumber(htid);
+ buf = ReadBuffer(rel, blkno);
+ nblocksaccessed++;
+ Assert(!delstate->bottomup ||
+ nblocksaccessed <= BOTTOMUP_MAX_NBLOCKS);
+
+#ifdef USE_PREFETCH
+
+ /*
+ * To maintain the prefetch distance, prefetch one more page for
+ * each page we read.
+ */
+ index_delete_prefetch_buffer(rel, &prefetch_state, 1);
+#endif
+
+ LockBuffer(buf, BUFFER_LOCK_SHARE);
+
+ page = BufferGetPage(buf);
+ maxoff = PageGetMaxOffsetNumber(page);
+ }
+
+ if (istatus->knowndeletable)
+ Assert(!delstate->bottomup && !istatus->promising);
+ else
+ {
+ ItemPointerData tmp = *htid;
+ HeapTupleData heapTuple;
+
+ /* Are any tuples from this HOT chain non-vacuumable? */
+ if (heap_hot_search_buffer(&tmp, rel, buf, &SnapshotNonVacuumable,
+ &heapTuple, NULL, true))
+ continue; /* can't delete entry */
+
+ /* Caller will delete, since whole HOT chain is vacuumable */
+ istatus->knowndeletable = true;
+
+ /* Maintain index free space info for bottom-up deletion case */
+ if (delstate->bottomup)
+ {
+ Assert(istatus->freespace > 0);
+ actualfreespace += istatus->freespace;
+ if (actualfreespace >= curtargetfreespace)
+ bottomup_final_block = true;
+ }
+ }
+
+ /*
+ * Maintain latestRemovedXid value for deletion operation as a whole
+ * by advancing current value using heap tuple headers. This is
+ * loosely based on the logic for pruning a HOT chain.
+ */
+ offnum = ItemPointerGetOffsetNumber(htid);
+ priorXmax = InvalidTransactionId; /* cannot check first XMIN */
+ for (;;)
+ {
+ ItemId lp;
+ HeapTupleHeader htup;
+
+ /* Some sanity checks */
+ if (offnum < FirstOffsetNumber || offnum > maxoff)
+ break;
+
+ lp = PageGetItemId(page, offnum);
+ if (ItemIdIsRedirected(lp))
+ {
+ offnum = ItemIdGetRedirect(lp);
+ continue;
+ }
+
+ /*
+ * We'll often encounter LP_DEAD line pointers (especially with an
+ * entry marked knowndeletable by our caller up front). No heap
+ * tuple headers get examined for an htid that leads us to an
+ * LP_DEAD item. This is okay because the earlier pruning
+ * operation that made the line pointer LP_DEAD in the first place
+ * must have considered the original tuple header as part of
+ * generating its own latestRemovedXid value.
+ *
+ * Relying on XLOG_HEAP2_PRUNE records like this is the same
+ * strategy that index vacuuming uses in all cases. Index VACUUM
+ * WAL records don't even have a latestRemovedXid field of their
+ * own for this reason.
+ */
+ if (!ItemIdIsNormal(lp))
+ break;
+
+ htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+ /*
+ * Check the tuple XMIN against prior XMAX, if any
+ */
+ if (TransactionIdIsValid(priorXmax) &&
+ !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
+ break;
+
+ HeapTupleHeaderAdvanceLatestRemovedXid(htup, &latestRemovedXid);
+
+ /*
+ * If the tuple is not HOT-updated, then we are at the end of this
+ * HOT-chain. No need to visit later tuples from the same update
+ * chain (they get their own index entries) -- just move on to
+ * next htid from index AM caller.
+ */
+ if (!HeapTupleHeaderIsHotUpdated(htup))
+ break;
+
+ /* Advance to next HOT chain member */
+ Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blkno);
+ offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
+ priorXmax = HeapTupleHeaderGetUpdateXid(htup);
+ }
+
+ /* Enable further/final shrinking of deltids for caller */
+ finalndeltids = i + 1;
+ }
+
+ UnlockReleaseBuffer(buf);
+
+ /*
+ * Shrink deltids array to exclude non-deletable entries at the end. This
+ * is not just a minor optimization. Final deltids array size might be
+ * zero for a bottom-up caller. Index AM is explicitly allowed to rely on
+ * ndeltids being zero in all cases with zero total deletable entries.
+ */
+ Assert(finalndeltids > 0 || delstate->bottomup);
+ delstate->ndeltids = finalndeltids;
+
+ return latestRemovedXid;
+}
+
+/*
+ * Specialized inlineable comparison function for index_delete_sort()
+ */
+static inline int
+index_delete_sort_cmp(TM_IndexDelete *deltid1, TM_IndexDelete *deltid2)
+{
+ ItemPointer tid1 = &deltid1->tid;
+ ItemPointer tid2 = &deltid2->tid;
+
+ {
+ BlockNumber blk1 = ItemPointerGetBlockNumber(tid1);
+ BlockNumber blk2 = ItemPointerGetBlockNumber(tid2);
+
+ if (blk1 != blk2)
+ return (blk1 < blk2) ? -1 : 1;
+ }
+ {
+ OffsetNumber pos1 = ItemPointerGetOffsetNumber(tid1);
+ OffsetNumber pos2 = ItemPointerGetOffsetNumber(tid2);
+
+ if (pos1 != pos2)
+ return (pos1 < pos2) ? -1 : 1;
+ }
+
+ Assert(false);
+
+ return 0;
+}
+
+/*
+ * Sort deltids array from delstate by TID. This prepares it for further
+ * processing by heap_index_delete_tuples().
+ *
+ * This operation becomes a noticeable consumer of CPU cycles with some
+ * workloads, so we go to the trouble of specialization/micro optimization.
+ * We use shellsort for this because it's easy to specialize, compiles to
+ * relatively few instructions, and is adaptive to presorted inputs/subsets
+ * (which are typical here).
+ */
+static void
+index_delete_sort(TM_IndexDeleteOp *delstate)
+{
+ TM_IndexDelete *deltids = delstate->deltids;
+ int ndeltids = delstate->ndeltids;
+ int low = 0;
+
+ /*
+ * Shellsort gap sequence (taken from Sedgewick-Incerpi paper).
+ *
+ * This implementation is fast with array sizes up to ~4500. This covers
+ * all supported BLCKSZ values.
+ */
+ const int gaps[9] = {1968, 861, 336, 112, 48, 21, 7, 3, 1};
+
+ /* Think carefully before changing anything here -- keep swaps cheap */
+ StaticAssertStmt(sizeof(TM_IndexDelete) <= 8,
+ "element size exceeds 8 bytes");
+
+ for (int g = 0; g < lengthof(gaps); g++)
+ {
+ for (int hi = gaps[g], i = low + hi; i < ndeltids; i++)
+ {
+ TM_IndexDelete d = deltids[i];
+ int j = i;
+
+ while (j >= hi && index_delete_sort_cmp(&deltids[j - hi], &d) >= 0)
+ {
+ deltids[j] = deltids[j - hi];
+ j -= hi;
+ }
+ deltids[j] = d;
+ }
+ }
+}
+
+/*
+ * Returns how many blocks should be considered favorable/contiguous for a
+ * bottom-up index deletion pass. This is a number of heap blocks that starts
+ * from and includes the first block in line.
+ *
+ * There is always at least one favorable block during bottom-up index
+ * deletion. In the worst case (i.e. with totally random heap blocks) the
+ * first block in line (the only favorable block) can be thought of as a
+ * degenerate array of contiguous blocks that consists of a single block.
+ * heap_index_delete_tuples() will expect this.
+ *
+ * Caller passes blockgroups, a description of the final order that deltids
+ * will be sorted in for heap_index_delete_tuples() bottom-up index deletion
+ * processing. Note that deltids need not actually be sorted just yet (caller
+ * only passes deltids to us so that we can interpret blockgroups).
+ *
+ * You might guess that the existence of contiguous blocks cannot matter much,
+ * since in general the main factor that determines which blocks we visit is
+ * the number of promising TIDs, which is a fixed hint from the index AM.
+ * We're not really targeting the general case, though -- the actual goal is
+ * to adapt our behavior to a wide variety of naturally occurring conditions.
+ * The effects of most of the heuristics we apply are only noticeable in the
+ * aggregate, over time and across many _related_ bottom-up index deletion
+ * passes.
+ *
+ * Deeming certain blocks favorable allows heapam to recognize and adapt to
+ * workloads where heap blocks visited during bottom-up index deletion can be
+ * accessed contiguously, in the sense that each newly visited block is the
+ * neighbor of the block that bottom-up deletion just finished processing (or
+ * close enough to it). It will likely be cheaper to access more favorable
+ * blocks sooner rather than later (e.g. in this pass, not across a series of
+ * related bottom-up passes). Either way it is probably only a matter of time
+ * (or a matter of further correlated version churn) before all blocks that
+ * appear together as a single large batch of favorable blocks get accessed by
+ * _some_ bottom-up pass. Large batches of favorable blocks tend to either
+ * appear almost constantly or not even once (it all depends on per-index
+ * workload characteristics).
+ *
+ * Note that the blockgroups sort order applies a power-of-two bucketing
+ * scheme that creates opportunities for contiguous groups of blocks to get
+ * batched together, at least with workloads that are naturally amenable to
+ * being driven by heap block locality. This doesn't just enhance the spatial
+ * locality of bottom-up heap block processing in the obvious way. It also
+ * enables temporal locality of access, since sorting by heap block number
+ * naturally tends to make the bottom-up processing order deterministic.
+ *
+ * Consider the following example to get a sense of how temporal locality
+ * might matter: There is a heap relation with several indexes, each of which
+ * is low to medium cardinality. It is subject to constant non-HOT updates.
+ * The updates are skewed (in one part of the primary key, perhaps). None of
+ * the indexes are logically modified by the UPDATE statements (if they were
+ * then bottom-up index deletion would not be triggered in the first place).
+ * Naturally, each new round of index tuples (for each heap tuple that gets a
+ * heap_update() call) will have the same heap TID in each and every index.
+ * Since these indexes are low cardinality and never get logically modified,
+ * heapam processing during bottom-up deletion passes will access heap blocks
+ * in approximately sequential order. Temporal locality of access occurs due
+ * to bottom-up deletion passes behaving very similarly across each of the
+ * indexes at any given moment. This keeps the number of buffer misses needed
+ * to visit heap blocks to a minimum.
+ */
+static int
+bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups,
+ TM_IndexDelete *deltids)
+{
+ int64 lastblock = -1;
+ int nblocksfavorable = 0;
+
+ Assert(nblockgroups >= 1);
+ Assert(nblockgroups <= BOTTOMUP_MAX_NBLOCKS);
+
+ /*
+ * We tolerate heap blocks that will be accessed only slightly out of
+ * physical order. Small blips occur when a pair of almost-contiguous
+ * blocks happen to fall into different buckets (perhaps due only to a
+ * small difference in npromisingtids that the bucketing scheme didn't
+ * quite manage to ignore). We effectively ignore these blips by applying
+ * a small tolerance. The precise tolerance we use is a little arbitrary,
+ * but it works well enough in practice.
+ */
+ for (int b = 0; b < nblockgroups; b++)
+ {
+ IndexDeleteCounts *group = blockgroups + b;
+ TM_IndexDelete *firstdtid = deltids + group->ifirsttid;
+ BlockNumber block = ItemPointerGetBlockNumber(&firstdtid->tid);
+
+ if (lastblock != -1 &&
+ ((int64) block < lastblock - BOTTOMUP_TOLERANCE_NBLOCKS ||
+ (int64) block > lastblock + BOTTOMUP_TOLERANCE_NBLOCKS))
+ break;
+
+ nblocksfavorable++;
+ lastblock = block;
+ }
+
+ /* Always indicate that there is at least 1 favorable block */
+ Assert(nblocksfavorable >= 1);
+
+ return nblocksfavorable;
+}
+
+/*
+ * qsort comparison function for bottomup_sort_and_shrink()
+ */
+static int
+bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
+{
+ const IndexDeleteCounts *group1 = (const IndexDeleteCounts *) arg1;
+ const IndexDeleteCounts *group2 = (const IndexDeleteCounts *) arg2;
+
+ /*
+ * Most significant field is npromisingtids (which we invert the order of
+ * so as to sort in desc order).
+ *
+ * Caller should have already normalized npromisingtids fields into
+ * power-of-two values (buckets).
+ */
+ if (group1->npromisingtids > group2->npromisingtids)
+ return -1;
+ if (group1->npromisingtids < group2->npromisingtids)
+ return 1;
+
+ /*
+ * Tiebreak: desc ntids sort order.
+ *
+ * We cannot expect power-of-two values for ntids fields. We should
+ * behave as if they were already rounded up for us instead.
+ */
+ if (group1->ntids != group2->ntids)
+ {
+ uint32 ntids1 = pg_nextpower2_32((uint32) group1->ntids);
+ uint32 ntids2 = pg_nextpower2_32((uint32) group2->ntids);
+
+ if (ntids1 > ntids2)
+ return -1;
+ if (ntids1 < ntids2)
+ return 1;
+ }
+
+ /*
+ * Tiebreak: asc offset-into-deltids-for-block (offset to first TID for
+ * block in deltids array) order.
+ *
+ * This is equivalent to sorting in ascending heap block number order
+ * (among otherwise equal subsets of the array). This approach allows us
+ * to avoid accessing the out-of-line TID. (We rely on the assumption
+ * that the deltids array was sorted in ascending heap TID order when
+ * these offsets to the first TID from each heap block group were formed.)
+ */
+ if (group1->ifirsttid > group2->ifirsttid)
+ return 1;
+ if (group1->ifirsttid < group2->ifirsttid)
+ return -1;
+
+ pg_unreachable();
+
+ return 0;
+}
+
+/*
+ * heap_index_delete_tuples() helper function for bottom-up deletion callers.
+ *
+ * Sorts deltids array in the order needed for useful processing by bottom-up
+ * deletion. The array should already be sorted in TID order when we're
+ * called. The sort process groups heap TIDs from deltids into heap block
+ * groupings. Earlier/more-promising groups/blocks are usually those that are
+ * known to have the most "promising" TIDs.
+ *
+ * Sets new size of deltids array (ndeltids) in state. deltids will only have
+ * TIDs from the BOTTOMUP_MAX_NBLOCKS most promising heap blocks when we
+ * return. This often means that deltids will be shrunk to a small fraction
+ * of its original size (we eliminate many heap blocks from consideration for
+ * caller up front).
+ *
+ * Returns the number of "favorable" blocks. See bottomup_nblocksfavorable()
+ * for a definition and full details.
+ */
+static int
+bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
+{
+ IndexDeleteCounts *blockgroups;
+ TM_IndexDelete *reordereddeltids;
+ BlockNumber curblock = InvalidBlockNumber;
+ int nblockgroups = 0;
+ int ncopied = 0;
+ int nblocksfavorable = 0;
+
+ Assert(delstate->bottomup);
+ Assert(delstate->ndeltids > 0);
+
+ /* Calculate per-heap-block count of TIDs */
+ blockgroups = palloc(sizeof(IndexDeleteCounts) * delstate->ndeltids);
+ for (int i = 0; i < delstate->ndeltids; i++)
+ {
+ TM_IndexDelete *ideltid = &delstate->deltids[i];
+ TM_IndexStatus *istatus = delstate->status + ideltid->id;
+ ItemPointer htid = &ideltid->tid;
+ bool promising = istatus->promising;
+
+ if (curblock != ItemPointerGetBlockNumber(htid))
+ {
+ /* New block group */
+ nblockgroups++;
+
+ Assert(curblock < ItemPointerGetBlockNumber(htid) ||
+ !BlockNumberIsValid(curblock));
+
+ curblock = ItemPointerGetBlockNumber(htid);
+ blockgroups[nblockgroups - 1].ifirsttid = i;
+ blockgroups[nblockgroups - 1].ntids = 1;
+ blockgroups[nblockgroups - 1].npromisingtids = 0;
+ }
+ else
+ {
+ blockgroups[nblockgroups - 1].ntids++;
+ }
+
+ if (promising)
+ blockgroups[nblockgroups - 1].npromisingtids++;
+ }
+
+ /*
+ * We're about ready to sort block groups to determine the optimal order
+ * for visiting heap blocks. But before we do, round the number of
+ * promising tuples for each block group up to the next power-of-two,
+ * unless it is very low (less than 4), in which case we round up to 4.
+ * npromisingtids is far too noisy to trust when choosing between a pair
+ * of block groups that both have very low values.
+ *
+ * This scheme divides heap blocks/block groups into buckets. Each bucket
+ * contains blocks that have _approximately_ the same number of promising
+ * TIDs as each other. The goal is to ignore relatively small differences
+ * in the total number of promising entries, so that the whole process can
+ * give a little weight to heapam factors (like heap block locality)
+ * instead. This isn't a trade-off, really -- we have nothing to lose. It
+ * would be foolish to interpret small differences in npromisingtids
+ * values as anything more than noise.
+ *
+ * We tiebreak on nhtids when sorting block group subsets that have the
+ * same npromisingtids, but this has the same issues as npromisingtids,
+ * and so nhtids is subject to the same power-of-two bucketing scheme. The
+ * only reason that we don't fix nhtids in the same way here too is that
+ * we'll need accurate nhtids values after the sort. We handle nhtids
+ * bucketization dynamically instead (in the sort comparator).
+ *
+ * See bottomup_nblocksfavorable() for a full explanation of when and how
+ * heap locality/favorable blocks can significantly influence when and how
+ * heap blocks are accessed.
+ */
+ for (int b = 0; b < nblockgroups; b++)
+ {
+ IndexDeleteCounts *group = blockgroups + b;
+
+ /* Better off falling back on nhtids with low npromisingtids */
+ if (group->npromisingtids <= 4)
+ group->npromisingtids = 4;
+ else
+ group->npromisingtids =
+ pg_nextpower2_32((uint32) group->npromisingtids);
+ }
+
+ /* Sort groups and rearrange caller's deltids array */
+ qsort(blockgroups, nblockgroups, sizeof(IndexDeleteCounts),
+ bottomup_sort_and_shrink_cmp);
+ reordereddeltids = palloc(delstate->ndeltids * sizeof(TM_IndexDelete));
+
+ nblockgroups = Min(BOTTOMUP_MAX_NBLOCKS, nblockgroups);
+ /* Determine number of favorable blocks at the start of final deltids */
+ nblocksfavorable = bottomup_nblocksfavorable(blockgroups, nblockgroups,
+ delstate->deltids);
+
+ for (int b = 0; b < nblockgroups; b++)
+ {
+ IndexDeleteCounts *group = blockgroups + b;
+ TM_IndexDelete *firstdtid = delstate->deltids + group->ifirsttid;
+
+ memcpy(reordereddeltids + ncopied, firstdtid,
+ sizeof(TM_IndexDelete) * group->ntids);
+ ncopied += group->ntids;
+ }
+
+ /* Copy final grouped and sorted TIDs back into start of caller's array */
+ memcpy(delstate->deltids, reordereddeltids,
+ sizeof(TM_IndexDelete) * ncopied);
+ delstate->ndeltids = ncopied;
+
+ pfree(reordereddeltids);
+ pfree(blockgroups);
+
+ return nblocksfavorable;
+}
+
+/*
+ * Perform XLogInsert for a heap-freeze operation. Caller must have already
+ * modified the buffer and marked it dirty.
+ */
+XLogRecPtr
+log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid,
+ xl_heap_freeze_tuple *tuples, int ntuples)
+{
+ xl_heap_freeze_page xlrec;
+ XLogRecPtr recptr;
+
+ /* Caller should not call me on a non-WAL-logged relation */
+ Assert(RelationNeedsWAL(reln));
+ /* nor when there are no tuples to freeze */
+ Assert(ntuples > 0);
+
+ xlrec.cutoff_xid = cutoff_xid;
+ xlrec.ntuples = ntuples;
+
+ XLogBeginInsert();
+ XLogRegisterData((char *) &xlrec, SizeOfHeapFreezePage);
+
+ /*
+ * The freeze plan array is not actually in the buffer, but pretend that
+ * it is. When XLogInsert stores the whole buffer, the freeze plan need
+ * not be stored too.
+ */
+ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
+ XLogRegisterBufData(0, (char *) tuples,
+ ntuples * sizeof(xl_heap_freeze_tuple));
+
+ recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_FREEZE_PAGE);
+
+ return recptr;
+}
+
+/*
+ * Perform XLogInsert for a heap-visible operation. 'block' is the block
+ * being marked all-visible, and vm_buffer is the buffer containing the
+ * corresponding visibility map block. Both should have already been modified
+ * and dirtied.
+ *
+ * If checksums are enabled, we also generate a full-page image of
+ * heap_buffer, if necessary.
+ */
+XLogRecPtr
+log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
+ TransactionId cutoff_xid, uint8 vmflags)
+{
+ xl_heap_visible xlrec;
+ XLogRecPtr recptr;
+ uint8 flags;
+
+ Assert(BufferIsValid(heap_buffer));
+ Assert(BufferIsValid(vm_buffer));
+
+ xlrec.cutoff_xid = cutoff_xid;
+ xlrec.flags = vmflags;
+ XLogBeginInsert();
+ XLogRegisterData((char *) &xlrec, SizeOfHeapVisible);
+
+ XLogRegisterBuffer(0, vm_buffer, 0);
+
+ flags = REGBUF_STANDARD;
+ if (!XLogHintBitIsNeeded())
+ flags |= REGBUF_NO_IMAGE;
+ XLogRegisterBuffer(1, heap_buffer, flags);
+
+ recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
+
+ return recptr;
+}
+
+/*
+ * Perform XLogInsert for a heap-update operation. Caller must already
+ * have modified the buffer(s) and marked them dirty.
+ */
+static XLogRecPtr
+log_heap_update(Relation reln, Buffer oldbuf,
+ Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
+ HeapTuple old_key_tuple,
+ bool all_visible_cleared, bool new_all_visible_cleared)
+{
+ xl_heap_update xlrec;
+ xl_heap_header xlhdr;
+ xl_heap_header xlhdr_idx;
+ uint8 info;
+ uint16 prefix_suffix[2];
+ uint16 prefixlen = 0,
+ suffixlen = 0;
+ XLogRecPtr recptr;
+ Page page = BufferGetPage(newbuf);
+ bool need_tuple_data = RelationIsLogicallyLogged(reln);
+ bool init;
+ int bufflags;
+
+ /* Caller should not call me on a non-WAL-logged relation */
+ Assert(RelationNeedsWAL(reln));
+
+ XLogBeginInsert();
+
+ if (HeapTupleIsHeapOnly(newtup))
+ info = XLOG_HEAP_HOT_UPDATE;
+ else
+ info = XLOG_HEAP_UPDATE;
+
+ /*
+ * If the old and new tuple are on the same page, we only need to log the
+ * parts of the new tuple that were changed. That saves on the amount of
+ * WAL we need to write. Currently, we just count any unchanged bytes in
+ * the beginning and end of the tuple. That's quick to check, and
+ * perfectly covers the common case that only one field is updated.
+ *
+ * We could do this even if the old and new tuple are on different pages,
+ * but only if we don't make a full-page image of the old page, which is
+ * difficult to know in advance. Also, if the old tuple is corrupt for
+ * some reason, it would allow the corruption to propagate the new page,
+ * so it seems best to avoid. Under the general assumption that most
+ * updates tend to create the new tuple version on the same page, there
+ * isn't much to be gained by doing this across pages anyway.
+ *
+ * Skip this if we're taking a full-page image of the new page, as we
+ * don't include the new tuple in the WAL record in that case. Also
+ * disable if wal_level='logical', as logical decoding needs to be able to
+ * read the new tuple in whole from the WAL record alone.
+ */
+ if (oldbuf == newbuf && !need_tuple_data &&
+ !XLogCheckBufferNeedsBackup(newbuf))
+ {
+ char *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
+ char *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
+ int oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
+ int newlen = newtup->t_len - newtup->t_data->t_hoff;
+
+ /* Check for common prefix between old and new tuple */
+ for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
+ {
+ if (newp[prefixlen] != oldp[prefixlen])
+ break;
+ }
+
+ /*
+ * Storing the length of the prefix takes 2 bytes, so we need to save
+ * at least 3 bytes or there's no point.
+ */
+ if (prefixlen < 3)
+ prefixlen = 0;
+
+ /* Same for suffix */
+ for (suffixlen = 0; suffixlen < Min(oldlen, newlen) - prefixlen; suffixlen++)
+ {
+ if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
+ break;
+ }
+ if (suffixlen < 3)
+ suffixlen = 0;
+ }
+
+ /* Prepare main WAL data chain */
+ xlrec.flags = 0;
+ if (all_visible_cleared)
+ xlrec.flags |= XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED;
+ if (new_all_visible_cleared)
+ xlrec.flags |= XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED;
+ if (prefixlen > 0)
+ xlrec.flags |= XLH_UPDATE_PREFIX_FROM_OLD;
+ if (suffixlen > 0)
+ xlrec.flags |= XLH_UPDATE_SUFFIX_FROM_OLD;
+ if (need_tuple_data)
+ {
+ xlrec.flags |= XLH_UPDATE_CONTAINS_NEW_TUPLE;
+ if (old_key_tuple)
+ {
+ if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
+ xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_TUPLE;
+ else
+ xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_KEY;
+ }
+ }
+
+ /* If new tuple is the single and first tuple on page... */
+ if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
+ PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
+ {
+ info |= XLOG_HEAP_INIT_PAGE;
+ init = true;
+ }
+ else
+ init = false;
+
+ /* Prepare WAL data for the old page */
+ xlrec.old_offnum = ItemPointerGetOffsetNumber(&oldtup->t_self);
+ xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
+ xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
+ oldtup->t_data->t_infomask2);
+
+ /* Prepare WAL data for the new page */
+ xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self);
+ xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
+
+ bufflags = REGBUF_STANDARD;
+ if (init)
+ bufflags |= REGBUF_WILL_INIT;
+ if (need_tuple_data)
+ bufflags |= REGBUF_KEEP_DATA;
+
+ XLogRegisterBuffer(0, newbuf, bufflags);
+ if (oldbuf != newbuf)
+ XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD);
+
+ XLogRegisterData((char *) &xlrec, SizeOfHeapUpdate);
+
+ /*
+ * Prepare WAL data for the new tuple.
+ */
+ if (prefixlen > 0 || suffixlen > 0)
+ {
+ if (prefixlen > 0 && suffixlen > 0)
+ {
+ prefix_suffix[0] = prefixlen;
+ prefix_suffix[1] = suffixlen;
+ XLogRegisterBufData(0, (char *) &prefix_suffix, sizeof(uint16) * 2);
+ }
+ else if (prefixlen > 0)
+ {
+ XLogRegisterBufData(0, (char *) &prefixlen, sizeof(uint16));
+ }
+ else
+ {
+ XLogRegisterBufData(0, (char *) &suffixlen, sizeof(uint16));
+ }
+ }
+
+ xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
+ xlhdr.t_infomask = newtup->t_data->t_infomask;
+ xlhdr.t_hoff = newtup->t_data->t_hoff;
+ Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len);
+
+ /*
+ * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
+ *
+ * The 'data' doesn't include the common prefix or suffix.
+ */
+ XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
+ if (prefixlen == 0)
+ {
+ XLogRegisterBufData(0,
+ ((char *) newtup->t_data) + SizeofHeapTupleHeader,
+ newtup->t_len - SizeofHeapTupleHeader - suffixlen);
+ }
+ else
+ {
+ /*
+ * Have to write the null bitmap and data after the common prefix as
+ * two separate rdata entries.
+ */
+ /* bitmap [+ padding] [+ oid] */
+ if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
+ {
+ XLogRegisterBufData(0,
+ ((char *) newtup->t_data) + SizeofHeapTupleHeader,
+ newtup->t_data->t_hoff - SizeofHeapTupleHeader);
+ }
+
+ /* data after common prefix */
+ XLogRegisterBufData(0,
+ ((char *) newtup->t_data) + newtup->t_data->t_hoff + prefixlen,
+ newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
+ }
+
+ /* We need to log a tuple identity */
+ if (need_tuple_data && old_key_tuple)
+ {
+ /* don't really need this, but its more comfy to decode */
+ xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
+ xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
+ xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
+
+ XLogRegisterData((char *) &xlhdr_idx, SizeOfHeapHeader);
+
+ /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
+ XLogRegisterData((char *) old_key_tuple->t_data + SizeofHeapTupleHeader,
+ old_key_tuple->t_len - SizeofHeapTupleHeader);
+ }
+
+ /* filtering by origin on a row level is much more efficient */
+ XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
+
+ recptr = XLogInsert(RM_HEAP_ID, info);
+
+ return recptr;
+}
+
+/*
+ * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record
+ *
+ * This is only used in wal_level >= WAL_LEVEL_LOGICAL, and only for catalog
+ * tuples.
+ */
+static XLogRecPtr
+log_heap_new_cid(Relation relation, HeapTuple tup)
+{
+ xl_heap_new_cid xlrec;
+
+ XLogRecPtr recptr;
+ HeapTupleHeader hdr = tup->t_data;
+
+ Assert(ItemPointerIsValid(&tup->t_self));
+ Assert(tup->t_tableOid != InvalidOid);
+
+ xlrec.top_xid = GetTopTransactionId();
+ xlrec.target_node = relation->rd_node;
+ xlrec.target_tid = tup->t_self;
+
+ /*
+ * If the tuple got inserted & deleted in the same TX we definitely have a
+ * combo CID, set cmin and cmax.
+ */
+ if (hdr->t_infomask & HEAP_COMBOCID)
+ {
+ Assert(!(hdr->t_infomask & HEAP_XMAX_INVALID));
+ Assert(!HeapTupleHeaderXminInvalid(hdr));
+ xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
+ xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
+ xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
+ }
+ /* No combo CID, so only cmin or cmax can be set by this TX */
+ else
+ {
+ /*
+ * Tuple inserted.
+ *
+ * We need to check for LOCK ONLY because multixacts might be
+ * transferred to the new tuple in case of FOR KEY SHARE updates in
+ * which case there will be an xmax, although the tuple just got
+ * inserted.
+ */
+ if (hdr->t_infomask & HEAP_XMAX_INVALID ||
+ HEAP_XMAX_IS_LOCKED_ONLY(hdr->t_infomask))
+ {
+ xlrec.cmin = HeapTupleHeaderGetRawCommandId(hdr);
+ xlrec.cmax = InvalidCommandId;
+ }
+ /* Tuple from a different tx updated or deleted. */
+ else
+ {
+ xlrec.cmin = InvalidCommandId;
+ xlrec.cmax = HeapTupleHeaderGetRawCommandId(hdr);
+
+ }
+ xlrec.combocid = InvalidCommandId;
+ }
+
+ /*
+ * Note that we don't need to register the buffer here, because this
+ * operation does not modify the page. The insert/update/delete that
+ * called us certainly did, but that's WAL-logged separately.
+ */
+ XLogBeginInsert();
+ XLogRegisterData((char *) &xlrec, SizeOfHeapNewCid);
+
+ /* will be looked at irrespective of origin */
+
+ recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_NEW_CID);
+
+ return recptr;
+}
+
+/*
+ * Build a heap tuple representing the configured REPLICA IDENTITY to represent
+ * the old tuple in a UPDATE or DELETE.
+ *
+ * Returns NULL if there's no need to log an identity or if there's no suitable
+ * key defined.
+ *
+ * Pass key_required true if any replica identity columns changed value, or if
+ * any of them have any external data. Delete must always pass true.
+ *
+ * *copy is set to true if the returned tuple is a modified copy rather than
+ * the same tuple that was passed in.
+ */
+static HeapTuple
+ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
+ bool *copy)
+{
+ TupleDesc desc = RelationGetDescr(relation);
+ char replident = relation->rd_rel->relreplident;
+ Bitmapset *idattrs;
+ HeapTuple key_tuple;
+ bool nulls[MaxHeapAttributeNumber];
+ Datum values[MaxHeapAttributeNumber];
+
+ *copy = false;
+
+ if (!RelationIsLogicallyLogged(relation))
+ return NULL;
+
+ if (replident == REPLICA_IDENTITY_NOTHING)
+ return NULL;
+
+ if (replident == REPLICA_IDENTITY_FULL)
+ {
+ /*
+ * When logging the entire old tuple, it very well could contain
+ * toasted columns. If so, force them to be inlined.
+ */
+ if (HeapTupleHasExternal(tp))
+ {
+ *copy = true;
+ tp = toast_flatten_tuple(tp, desc);
+ }
+ return tp;
+ }
+
+ /* if the key isn't required and we're only logging the key, we're done */
+ if (!key_required)
+ return NULL;
+
+ /* find out the replica identity columns */
+ idattrs = RelationGetIndexAttrBitmap(relation,
+ INDEX_ATTR_BITMAP_IDENTITY_KEY);
+
+ /*
+ * If there's no defined replica identity columns, treat as !key_required.
+ * (This case should not be reachable from heap_update, since that should
+ * calculate key_required accurately. But heap_delete just passes
+ * constant true for key_required, so we can hit this case in deletes.)
+ */
+ if (bms_is_empty(idattrs))
+ return NULL;
+
+ /*
+ * Construct a new tuple containing only the replica identity columns,
+ * with nulls elsewhere. While we're at it, assert that the replica
+ * identity columns aren't null.
+ */
+ heap_deform_tuple(tp, desc, values, nulls);
+
+ for (int i = 0; i < desc->natts; i++)
+ {
+ if (bms_is_member(i + 1 - FirstLowInvalidHeapAttributeNumber,
+ idattrs))
+ Assert(!nulls[i]);
+ else
+ nulls[i] = true;
+ }
+
+ key_tuple = heap_form_tuple(desc, values, nulls);
+ *copy = true;
+
+ bms_free(idattrs);
+
+ /*
+ * If the tuple, which by here only contains indexed columns, still has
+ * toasted columns, force them to be inlined. This is somewhat unlikely
+ * since there's limits on the size of indexed columns, so we don't
+ * duplicate toast_flatten_tuple()s functionality in the above loop over
+ * the indexed columns, even if it would be more efficient.
+ */
+ if (HeapTupleHasExternal(key_tuple))
+ {
+ HeapTuple oldtup = key_tuple;
+
+ key_tuple = toast_flatten_tuple(oldtup, desc);
+ heap_freetuple(oldtup);
+ }
+
+ return key_tuple;
+}
+
+/*
+ * Handles XLOG_HEAP2_PRUNE record type.
+ *
+ * Acquires a super-exclusive lock.
+ */
+static void
+heap_xlog_prune(XLogReaderState *record)
+{
+ XLogRecPtr lsn = record->EndRecPtr;
+ xl_heap_prune *xlrec = (xl_heap_prune *) XLogRecGetData(record);
+ Buffer buffer;
+ RelFileNode rnode;
+ BlockNumber blkno;
+ XLogRedoAction action;
+
+ XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
+
+ /*
+ * We're about to remove tuples. In Hot Standby mode, ensure that there's
+ * no queries running for which the removed tuples are still visible.
+ */
+ if (InHotStandby)
+ ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rnode);
+
+ /*
+ * If we have a full-page image, restore it (using a cleanup lock) and
+ * we're done.
+ */
+ action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true,
+ &buffer);
+ if (action == BLK_NEEDS_REDO)
+ {
+ Page page = (Page) BufferGetPage(buffer);
+ OffsetNumber *end;
+ OffsetNumber *redirected;
+ OffsetNumber *nowdead;
+ OffsetNumber *nowunused;
+ int nredirected;
+ int ndead;
+ int nunused;
+ Size datalen;
+
+ redirected = (OffsetNumber *) XLogRecGetBlockData(record, 0, &datalen);
+
+ nredirected = xlrec->nredirected;
+ ndead = xlrec->ndead;
+ end = (OffsetNumber *) ((char *) redirected + datalen);
+ nowdead = redirected + (nredirected * 2);
+ nowunused = nowdead + ndead;
+ nunused = (end - nowunused);
+ Assert(nunused >= 0);
+
+ /* Update all line pointers per the record, and repair fragmentation */
+ heap_page_prune_execute(buffer,
+ redirected, nredirected,
+ nowdead, ndead,
+ nowunused, nunused);
+
+ /*
+ * Note: we don't worry about updating the page's prunability hints.
+ * At worst this will cause an extra prune cycle to occur soon.
+ */
+
+ PageSetLSN(page, lsn);
+ MarkBufferDirty(buffer);
+ }
+
+ if (BufferIsValid(buffer))
+ {
+ Size freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
+
+ UnlockReleaseBuffer(buffer);
+
+ /*
+ * After pruning records from a page, it's useful to update the FSM
+ * about it, as it may cause the page become target for insertions
+ * later even if vacuum decides not to visit it (which is possible if
+ * gets marked all-visible.)
+ *
+ * Do this regardless of a full-page image being applied, since the
+ * FSM data is not in the page anyway.
+ */
+ XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
+ }
+}
+
+/*
+ * Handles XLOG_HEAP2_VACUUM record type.
+ *
+ * Acquires an exclusive lock only.
+ */
+static void
+heap_xlog_vacuum(XLogReaderState *record)
+{
+ XLogRecPtr lsn = record->EndRecPtr;
+ xl_heap_vacuum *xlrec = (xl_heap_vacuum *) XLogRecGetData(record);
+ Buffer buffer;
+ BlockNumber blkno;
+ XLogRedoAction action;
+
+ /*
+ * If we have a full-page image, restore it (without using a cleanup lock)
+ * and we're done.
+ */
+ action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, false,
+ &buffer);
+ if (action == BLK_NEEDS_REDO)
+ {
+ Page page = (Page) BufferGetPage(buffer);
+ OffsetNumber *nowunused;
+ Size datalen;
+ OffsetNumber *offnum;
+
+ nowunused = (OffsetNumber *) XLogRecGetBlockData(record, 0, &datalen);
+
+ /* Shouldn't be a record unless there's something to do */
+ Assert(xlrec->nunused > 0);
+
+ /* Update all now-unused line pointers */
+ offnum = nowunused;
+ for (int i = 0; i < xlrec->nunused; i++)
+ {
+ OffsetNumber off = *offnum++;
+ ItemId lp = PageGetItemId(page, off);
+
+ Assert(ItemIdIsDead(lp) && !ItemIdHasStorage(lp));
+ ItemIdSetUnused(lp);
+ }
+
+ /* Attempt to truncate line pointer array now */
+ PageTruncateLinePointerArray(page);
+
+ PageSetLSN(page, lsn);
+ MarkBufferDirty(buffer);
+ }
+
+ if (BufferIsValid(buffer))
+ {
+ Size freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
+ RelFileNode rnode;
+
+ XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
+
+ UnlockReleaseBuffer(buffer);
+
+ /*
+ * After vacuuming LP_DEAD items from a page, it's useful to update
+ * the FSM about it, as it may cause the page become target for
+ * insertions later even if vacuum decides not to visit it (which is
+ * possible if gets marked all-visible.)
+ *
+ * Do this regardless of a full-page image being applied, since the
+ * FSM data is not in the page anyway.
+ */
+ XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
+ }
+}
+
+/*
+ * Replay XLOG_HEAP2_VISIBLE record.
+ *
+ * The critical integrity requirement here is that we must never end up with
+ * a situation where the visibility map bit is set, and the page-level
+ * PD_ALL_VISIBLE bit is clear. If that were to occur, then a subsequent
+ * page modification would fail to clear the visibility map bit.
+ */
+static void
+heap_xlog_visible(XLogReaderState *record)
+{
+ XLogRecPtr lsn = record->EndRecPtr;
+ xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record);
+ Buffer vmbuffer = InvalidBuffer;
+ Buffer buffer;
+ Page page;
+ RelFileNode rnode;
+ BlockNumber blkno;
+ XLogRedoAction action;
+
+ XLogRecGetBlockTag(record, 1, &rnode, NULL, &blkno);
+
+ /*
+ * If there are any Hot Standby transactions running that have an xmin
+ * horizon old enough that this page isn't all-visible for them, they
+ * might incorrectly decide that an index-only scan can skip a heap fetch.
+ *
+ * NB: It might be better to throw some kind of "soft" conflict here that
+ * forces any index-only scan that is in flight to perform heap fetches,
+ * rather than killing the transaction outright.
+ */
+ if (InHotStandby)
+ ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rnode);
+
+ /*
+ * Read the heap page, if it still exists. If the heap file has dropped or
+ * truncated later in recovery, we don't need to update the page, but we'd
+ * better still update the visibility map.
+ */
+ action = XLogReadBufferForRedo(record, 1, &buffer);
+ if (action == BLK_NEEDS_REDO)
+ {
+ /*
+ * We don't bump the LSN of the heap page when setting the visibility
+ * map bit (unless checksums or wal_hint_bits is enabled, in which
+ * case we must), because that would generate an unworkable volume of
+ * full-page writes. This exposes us to torn page hazards, but since
+ * we're not inspecting the existing page contents in any way, we
+ * don't care.
+ *
+ * However, all operations that clear the visibility map bit *do* bump
+ * the LSN, and those operations will only be replayed if the XLOG LSN
+ * follows the page LSN. Thus, if the page LSN has advanced past our
+ * XLOG record's LSN, we mustn't mark the page all-visible, because
+ * the subsequent update won't be replayed to clear the flag.
+ */
+ page = BufferGetPage(buffer);
+
+ PageSetAllVisible(page);
+
+ MarkBufferDirty(buffer);
+ }
+ else if (action == BLK_RESTORED)
+ {
+ /*
+ * If heap block was backed up, we already restored it and there's
+ * nothing more to do. (This can only happen with checksums or
+ * wal_log_hints enabled.)
+ */
+ }
+
+ if (BufferIsValid(buffer))
+ {
+ Size space = PageGetFreeSpace(BufferGetPage(buffer));
+
+ UnlockReleaseBuffer(buffer);
+
+ /*
+ * Since FSM is not WAL-logged and only updated heuristically, it
+ * easily becomes stale in standbys. If the standby is later promoted
+ * and runs VACUUM, it will skip updating individual free space
+ * figures for pages that became all-visible (or all-frozen, depending
+ * on the vacuum mode,) which is troublesome when FreeSpaceMapVacuum
+ * propagates too optimistic free space values to upper FSM layers;
+ * later inserters try to use such pages only to find out that they
+ * are unusable. This can cause long stalls when there are many such
+ * pages.
+ *
+ * Forestall those problems by updating FSM's idea about a page that
+ * is becoming all-visible or all-frozen.
+ *
+ * Do this regardless of a full-page image being applied, since the
+ * FSM data is not in the page anyway.
+ */
+ if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
+ XLogRecordPageWithFreeSpace(rnode, blkno, space);
+ }
+
+ /*
+ * Even if we skipped the heap page update due to the LSN interlock, it's
+ * still safe to update the visibility map. Any WAL record that clears
+ * the visibility map bit does so before checking the page LSN, so any
+ * bits that need to be cleared will still be cleared.
+ */
+ if (XLogReadBufferForRedoExtended(record, 0, RBM_ZERO_ON_ERROR, false,
+ &vmbuffer) == BLK_NEEDS_REDO)
+ {
+ Page vmpage = BufferGetPage(vmbuffer);
+ Relation reln;
+
+ /* initialize the page if it was read as zeros */
+ if (PageIsNew(vmpage))
+ PageInit(vmpage, BLCKSZ, 0);
+
+ /*
+ * XLogReadBufferForRedoExtended locked the buffer. But
+ * visibilitymap_set will handle locking itself.
+ */
+ LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
+
+ reln = CreateFakeRelcacheEntry(rnode);
+ visibilitymap_pin(reln, blkno, &vmbuffer);
+
+ /*
+ * Don't set the bit if replay has already passed this point.
+ *
+ * It might be safe to do this unconditionally; if replay has passed
+ * this point, we'll replay at least as far this time as we did
+ * before, and if this bit needs to be cleared, the record responsible
+ * for doing so should be again replayed, and clear it. For right
+ * now, out of an abundance of conservatism, we use the same test here
+ * we did for the heap page. If this results in a dropped bit, no
+ * real harm is done; and the next VACUUM will fix it.
+ */
+ if (lsn > PageGetLSN(vmpage))
+ visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
+ xlrec->cutoff_xid, xlrec->flags);
+
+ ReleaseBuffer(vmbuffer);
+ FreeFakeRelcacheEntry(reln);
+ }
+ else if (BufferIsValid(vmbuffer))
+ UnlockReleaseBuffer(vmbuffer);
+}
+
+/*
+ * Replay XLOG_HEAP2_FREEZE_PAGE records
+ */
+static void
+heap_xlog_freeze_page(XLogReaderState *record)
+{
+ XLogRecPtr lsn = record->EndRecPtr;
+ xl_heap_freeze_page *xlrec = (xl_heap_freeze_page *) XLogRecGetData(record);
+ TransactionId cutoff_xid = xlrec->cutoff_xid;
+ Buffer buffer;
+ int ntup;
+
+ /*
+ * In Hot Standby mode, ensure that there's no queries running which still
+ * consider the frozen xids as running.
+ */
+ if (InHotStandby)
+ {
+ RelFileNode rnode;
+ TransactionId latestRemovedXid = cutoff_xid;
+
+ TransactionIdRetreat(latestRemovedXid);
+
+ XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
+ ResolveRecoveryConflictWithSnapshot(latestRemovedXid, rnode);
+ }
+
+ if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
+ {
+ Page page = BufferGetPage(buffer);
+ xl_heap_freeze_tuple *tuples;
+
+ tuples = (xl_heap_freeze_tuple *) XLogRecGetBlockData(record, 0, NULL);
+
+ /* now execute freeze plan for each frozen tuple */
+ for (ntup = 0; ntup < xlrec->ntuples; ntup++)
+ {
+ xl_heap_freeze_tuple *xlrec_tp;
+ ItemId lp;
+ HeapTupleHeader tuple;
+
+ xlrec_tp = &tuples[ntup];
+ lp = PageGetItemId(page, xlrec_tp->offset); /* offsets are one-based */
+ tuple = (HeapTupleHeader) PageGetItem(page, lp);
+
+ heap_execute_freeze_tuple(tuple, xlrec_tp);
+ }
+
+ PageSetLSN(page, lsn);
+ MarkBufferDirty(buffer);
+ }
+ if (BufferIsValid(buffer))
+ UnlockReleaseBuffer(buffer);
+}
+
+/*
+ * Given an "infobits" field from an XLog record, set the correct bits in the
+ * given infomask and infomask2 for the tuple touched by the record.
+ *
+ * (This is the reverse of compute_infobits).
+ */
+static void
+fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
+{
+ *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
+ HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_EXCL_LOCK);
+ *infomask2 &= ~HEAP_KEYS_UPDATED;
+
+ if (infobits & XLHL_XMAX_IS_MULTI)
+ *infomask |= HEAP_XMAX_IS_MULTI;
+ if (infobits & XLHL_XMAX_LOCK_ONLY)
+ *infomask |= HEAP_XMAX_LOCK_ONLY;
+ if (infobits & XLHL_XMAX_EXCL_LOCK)
+ *infomask |= HEAP_XMAX_EXCL_LOCK;
+ /* note HEAP_XMAX_SHR_LOCK isn't considered here */
+ if (infobits & XLHL_XMAX_KEYSHR_LOCK)
+ *infomask |= HEAP_XMAX_KEYSHR_LOCK;
+
+ if (infobits & XLHL_KEYS_UPDATED)
+ *infomask2 |= HEAP_KEYS_UPDATED;
+}
+
+static void
+heap_xlog_delete(XLogReaderState *record)
+{
+ XLogRecPtr lsn = record->EndRecPtr;
+ xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
+ Buffer buffer;
+ Page page;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
+ BlockNumber blkno;
+ RelFileNode target_node;
+ ItemPointerData target_tid;
+
+ XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
+ ItemPointerSetBlockNumber(&target_tid, blkno);
+ ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
+
+ /*
+ * The visibility map may need to be fixed even if the heap page is
+ * already up-to-date.
+ */
+ if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
+ {
+ Relation reln = CreateFakeRelcacheEntry(target_node);
+ Buffer vmbuffer = InvalidBuffer;
+
+ visibilitymap_pin(reln, blkno, &vmbuffer);
+ visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
+ ReleaseBuffer(vmbuffer);
+ FreeFakeRelcacheEntry(reln);
+ }
+
+ if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
+ {
+ page = BufferGetPage(buffer);
+
+ if (PageGetMaxOffsetNumber(page) >= xlrec->offnum)
+ lp = PageGetItemId(page, xlrec->offnum);
+
+ if (PageGetMaxOffsetNumber(page) < xlrec->offnum || !ItemIdIsNormal(lp))
+ elog(PANIC, "invalid lp");
+
+ htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+ htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
+ htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+ HeapTupleHeaderClearHotUpdated(htup);
+ fix_infomask_from_infobits(xlrec->infobits_set,
+ &htup->t_infomask, &htup->t_infomask2);
+ if (!(xlrec->flags & XLH_DELETE_IS_SUPER))
+ HeapTupleHeaderSetXmax(htup, xlrec->xmax);
+ else
+ HeapTupleHeaderSetXmin(htup, InvalidTransactionId);
+ HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
+
+ /* Mark the page as a candidate for pruning */
+ PageSetPrunable(page, XLogRecGetXid(record));
+
+ if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
+ PageClearAllVisible(page);
+
+ /* Make sure t_ctid is set correctly */
+ if (xlrec->flags & XLH_DELETE_IS_PARTITION_MOVE)
+ HeapTupleHeaderSetMovedPartitions(htup);
+ else
+ htup->t_ctid = target_tid;
+ PageSetLSN(page, lsn);
+ MarkBufferDirty(buffer);
+ }
+ if (BufferIsValid(buffer))
+ UnlockReleaseBuffer(buffer);
+}
+
+static void
+heap_xlog_insert(XLogReaderState *record)
+{
+ XLogRecPtr lsn = record->EndRecPtr;
+ xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
+ Buffer buffer;
+ Page page;
+ union
+ {
+ HeapTupleHeaderData hdr;
+ char data[MaxHeapTupleSize];
+ } tbuf;
+ HeapTupleHeader htup;
+ xl_heap_header xlhdr;
+ uint32 newlen;
+ Size freespace = 0;
+ RelFileNode target_node;
+ BlockNumber blkno;
+ ItemPointerData target_tid;
+ XLogRedoAction action;
+
+ XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
+ ItemPointerSetBlockNumber(&target_tid, blkno);
+ ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
+
+ /*
+ * The visibility map may need to be fixed even if the heap page is
+ * already up-to-date.
+ */
+ if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
+ {
+ Relation reln = CreateFakeRelcacheEntry(target_node);
+ Buffer vmbuffer = InvalidBuffer;
+
+ visibilitymap_pin(reln, blkno, &vmbuffer);
+ visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
+ ReleaseBuffer(vmbuffer);
+ FreeFakeRelcacheEntry(reln);
+ }
+
+ /*
+ * If we inserted the first and only tuple on the page, re-initialize the
+ * page from scratch.
+ */
+ if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
+ {
+ buffer = XLogInitBufferForRedo(record, 0);
+ page = BufferGetPage(buffer);
+ PageInit(page, BufferGetPageSize(buffer), 0);
+ action = BLK_NEEDS_REDO;
+ }
+ else
+ action = XLogReadBufferForRedo(record, 0, &buffer);
+ if (action == BLK_NEEDS_REDO)
+ {
+ Size datalen;
+ char *data;
+
+ page = BufferGetPage(buffer);
+
+ if (PageGetMaxOffsetNumber(page) + 1 < xlrec->offnum)
+ elog(PANIC, "invalid max offset number");
+
+ data = XLogRecGetBlockData(record, 0, &datalen);
+
+ newlen = datalen - SizeOfHeapHeader;
+ Assert(datalen > SizeOfHeapHeader && newlen <= MaxHeapTupleSize);
+ memcpy((char *) &xlhdr, data, SizeOfHeapHeader);
+ data += SizeOfHeapHeader;
+
+ htup = &tbuf.hdr;
+ MemSet((char *) htup, 0, SizeofHeapTupleHeader);
+ /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
+ memcpy((char *) htup + SizeofHeapTupleHeader,
+ data,
+ newlen);
+ newlen += SizeofHeapTupleHeader;
+ htup->t_infomask2 = xlhdr.t_infomask2;
+ htup->t_infomask = xlhdr.t_infomask;
+ htup->t_hoff = xlhdr.t_hoff;
+ HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
+ HeapTupleHeaderSetCmin(htup, FirstCommandId);
+ htup->t_ctid = target_tid;
+
+ if (PageAddItem(page, (Item) htup, newlen, xlrec->offnum,
+ true, true) == InvalidOffsetNumber)
+ elog(PANIC, "failed to add tuple");
+
+ freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
+
+ PageSetLSN(page, lsn);
+
+ if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
+ PageClearAllVisible(page);
+
+ /* XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible */
+ if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)
+ PageSetAllVisible(page);
+
+ MarkBufferDirty(buffer);
+ }
+ if (BufferIsValid(buffer))
+ UnlockReleaseBuffer(buffer);
+
+ /*
+ * If the page is running low on free space, update the FSM as well.
+ * Arbitrarily, our definition of "low" is less than 20%. We can't do much
+ * better than that without knowing the fill-factor for the table.
+ *
+ * XXX: Don't do this if the page was restored from full page image. We
+ * don't bother to update the FSM in that case, it doesn't need to be
+ * totally accurate anyway.
+ */
+ if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
+ XLogRecordPageWithFreeSpace(target_node, blkno, freespace);
+}
+
+/*
+ * Handles MULTI_INSERT record type.
+ */
+static void
+heap_xlog_multi_insert(XLogReaderState *record)
+{
+ XLogRecPtr lsn = record->EndRecPtr;
+ xl_heap_multi_insert *xlrec;
+ RelFileNode rnode;
+ BlockNumber blkno;
+ Buffer buffer;
+ Page page;
+ union
+ {
+ HeapTupleHeaderData hdr;
+ char data[MaxHeapTupleSize];
+ } tbuf;
+ HeapTupleHeader htup;
+ uint32 newlen;
+ Size freespace = 0;
+ int i;
+ bool isinit = (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) != 0;
+ XLogRedoAction action;
+
+ /*
+ * Insertion doesn't overwrite MVCC data, so no conflict processing is
+ * required.
+ */
+ xlrec = (xl_heap_multi_insert *) XLogRecGetData(record);
+
+ XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
+
+ /* check that the mutually exclusive flags are not both set */
+ Assert(!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
+ (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
+
+ /*
+ * The visibility map may need to be fixed even if the heap page is
+ * already up-to-date.
+ */
+ if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
+ {
+ Relation reln = CreateFakeRelcacheEntry(rnode);
+ Buffer vmbuffer = InvalidBuffer;
+
+ visibilitymap_pin(reln, blkno, &vmbuffer);
+ visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
+ ReleaseBuffer(vmbuffer);
+ FreeFakeRelcacheEntry(reln);
+ }
+
+ if (isinit)
+ {
+ buffer = XLogInitBufferForRedo(record, 0);
+ page = BufferGetPage(buffer);
+ PageInit(page, BufferGetPageSize(buffer), 0);
+ action = BLK_NEEDS_REDO;
+ }
+ else
+ action = XLogReadBufferForRedo(record, 0, &buffer);
+ if (action == BLK_NEEDS_REDO)
+ {
+ char *tupdata;
+ char *endptr;
+ Size len;
+
+ /* Tuples are stored as block data */
+ tupdata = XLogRecGetBlockData(record, 0, &len);
+ endptr = tupdata + len;
+
+ page = (Page) BufferGetPage(buffer);
+
+ for (i = 0; i < xlrec->ntuples; i++)
+ {
+ OffsetNumber offnum;
+ xl_multi_insert_tuple *xlhdr;
+
+ /*
+ * If we're reinitializing the page, the tuples are stored in
+ * order from FirstOffsetNumber. Otherwise there's an array of
+ * offsets in the WAL record, and the tuples come after that.
+ */
+ if (isinit)
+ offnum = FirstOffsetNumber + i;
+ else
+ offnum = xlrec->offsets[i];
+ if (PageGetMaxOffsetNumber(page) + 1 < offnum)
+ elog(PANIC, "invalid max offset number");
+
+ xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(tupdata);
+ tupdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
+
+ newlen = xlhdr->datalen;
+ Assert(newlen <= MaxHeapTupleSize);
+ htup = &tbuf.hdr;
+ MemSet((char *) htup, 0, SizeofHeapTupleHeader);
+ /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
+ memcpy((char *) htup + SizeofHeapTupleHeader,
+ (char *) tupdata,
+ newlen);
+ tupdata += newlen;
+
+ newlen += SizeofHeapTupleHeader;
+ htup->t_infomask2 = xlhdr->t_infomask2;
+ htup->t_infomask = xlhdr->t_infomask;
+ htup->t_hoff = xlhdr->t_hoff;
+ HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
+ HeapTupleHeaderSetCmin(htup, FirstCommandId);
+ ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
+ ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
+
+ offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
+ if (offnum == InvalidOffsetNumber)
+ elog(PANIC, "failed to add tuple");
+ }
+ if (tupdata != endptr)
+ elog(PANIC, "total tuple length mismatch");
+
+ freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
+
+ PageSetLSN(page, lsn);
+
+ if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
+ PageClearAllVisible(page);
+
+ /* XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible */
+ if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)
+ PageSetAllVisible(page);
+
+ MarkBufferDirty(buffer);
+ }
+ if (BufferIsValid(buffer))
+ UnlockReleaseBuffer(buffer);
+
+ /*
+ * If the page is running low on free space, update the FSM as well.
+ * Arbitrarily, our definition of "low" is less than 20%. We can't do much
+ * better than that without knowing the fill-factor for the table.
+ *
+ * XXX: Don't do this if the page was restored from full page image. We
+ * don't bother to update the FSM in that case, it doesn't need to be
+ * totally accurate anyway.
+ */
+ if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
+ XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
+}
+
+/*
+ * Handles UPDATE and HOT_UPDATE
+ */
+static void
+heap_xlog_update(XLogReaderState *record, bool hot_update)
+{
+ XLogRecPtr lsn = record->EndRecPtr;
+ xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
+ RelFileNode rnode;
+ BlockNumber oldblk;
+ BlockNumber newblk;
+ ItemPointerData newtid;
+ Buffer obuffer,
+ nbuffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleData oldtup;
+ HeapTupleHeader htup;
+ uint16 prefixlen = 0,
+ suffixlen = 0;
+ char *newp;
+ union
+ {
+ HeapTupleHeaderData hdr;
+ char data[MaxHeapTupleSize];
+ } tbuf;
+ xl_heap_header xlhdr;
+ uint32 newlen;
+ Size freespace = 0;
+ XLogRedoAction oldaction;
+ XLogRedoAction newaction;
+
+ /* initialize to keep the compiler quiet */
+ oldtup.t_data = NULL;
+ oldtup.t_len = 0;
+
+ XLogRecGetBlockTag(record, 0, &rnode, NULL, &newblk);
+ if (XLogRecGetBlockTag(record, 1, NULL, NULL, &oldblk))
+ {
+ /* HOT updates are never done across pages */
+ Assert(!hot_update);
+ }
+ else
+ oldblk = newblk;
+
+ ItemPointerSet(&newtid, newblk, xlrec->new_offnum);
+
+ /*
+ * The visibility map may need to be fixed even if the heap page is
+ * already up-to-date.
+ */
+ if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
+ {
+ Relation reln = CreateFakeRelcacheEntry(rnode);
+ Buffer vmbuffer = InvalidBuffer;
+
+ visibilitymap_pin(reln, oldblk, &vmbuffer);
+ visibilitymap_clear(reln, oldblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
+ ReleaseBuffer(vmbuffer);
+ FreeFakeRelcacheEntry(reln);
+ }
+
+ /*
+ * In normal operation, it is important to lock the two pages in
+ * page-number order, to avoid possible deadlocks against other update
+ * operations going the other way. However, during WAL replay there can
+ * be no other update happening, so we don't need to worry about that. But
+ * we *do* need to worry that we don't expose an inconsistent state to Hot
+ * Standby queries --- so the original page can't be unlocked before we've
+ * added the new tuple to the new page.
+ */
+
+ /* Deal with old tuple version */
+ oldaction = XLogReadBufferForRedo(record, (oldblk == newblk) ? 0 : 1,
+ &obuffer);
+ if (oldaction == BLK_NEEDS_REDO)
+ {
+ page = BufferGetPage(obuffer);
+ offnum = xlrec->old_offnum;
+ if (PageGetMaxOffsetNumber(page) >= offnum)
+ lp = PageGetItemId(page, offnum);
+
+ if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
+ elog(PANIC, "invalid lp");
+
+ htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+ oldtup.t_data = htup;
+ oldtup.t_len = ItemIdGetLength(lp);
+
+ htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
+ htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+ if (hot_update)
+ HeapTupleHeaderSetHotUpdated(htup);
+ else
+ HeapTupleHeaderClearHotUpdated(htup);
+ fix_infomask_from_infobits(xlrec->old_infobits_set, &htup->t_infomask,
+ &htup->t_infomask2);
+ HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
+ HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
+ /* Set forward chain link in t_ctid */
+ htup->t_ctid = newtid;
+
+ /* Mark the page as a candidate for pruning */
+ PageSetPrunable(page, XLogRecGetXid(record));
+
+ if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
+ PageClearAllVisible(page);
+
+ PageSetLSN(page, lsn);
+ MarkBufferDirty(obuffer);
+ }
+
+ /*
+ * Read the page the new tuple goes into, if different from old.
+ */
+ if (oldblk == newblk)
+ {
+ nbuffer = obuffer;
+ newaction = oldaction;
+ }
+ else if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
+ {
+ nbuffer = XLogInitBufferForRedo(record, 0);
+ page = (Page) BufferGetPage(nbuffer);
+ PageInit(page, BufferGetPageSize(nbuffer), 0);
+ newaction = BLK_NEEDS_REDO;
+ }
+ else
+ newaction = XLogReadBufferForRedo(record, 0, &nbuffer);
+
+ /*
+ * The visibility map may need to be fixed even if the heap page is
+ * already up-to-date.
+ */
+ if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
+ {
+ Relation reln = CreateFakeRelcacheEntry(rnode);
+ Buffer vmbuffer = InvalidBuffer;
+
+ visibilitymap_pin(reln, newblk, &vmbuffer);
+ visibilitymap_clear(reln, newblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
+ ReleaseBuffer(vmbuffer);
+ FreeFakeRelcacheEntry(reln);
+ }
+
+ /* Deal with new tuple */
+ if (newaction == BLK_NEEDS_REDO)
+ {
+ char *recdata;
+ char *recdata_end;
+ Size datalen;
+ Size tuplen;
+
+ recdata = XLogRecGetBlockData(record, 0, &datalen);
+ recdata_end = recdata + datalen;
+
+ page = BufferGetPage(nbuffer);
+
+ offnum = xlrec->new_offnum;
+ if (PageGetMaxOffsetNumber(page) + 1 < offnum)
+ elog(PANIC, "invalid max offset number");
+
+ if (xlrec->flags & XLH_UPDATE_PREFIX_FROM_OLD)
+ {
+ Assert(newblk == oldblk);
+ memcpy(&prefixlen, recdata, sizeof(uint16));
+ recdata += sizeof(uint16);
+ }
+ if (xlrec->flags & XLH_UPDATE_SUFFIX_FROM_OLD)
+ {
+ Assert(newblk == oldblk);
+ memcpy(&suffixlen, recdata, sizeof(uint16));
+ recdata += sizeof(uint16);
+ }
+
+ memcpy((char *) &xlhdr, recdata, SizeOfHeapHeader);
+ recdata += SizeOfHeapHeader;
+
+ tuplen = recdata_end - recdata;
+ Assert(tuplen <= MaxHeapTupleSize);
+
+ htup = &tbuf.hdr;
+ MemSet((char *) htup, 0, SizeofHeapTupleHeader);
+
+ /*
+ * Reconstruct the new tuple using the prefix and/or suffix from the
+ * old tuple, and the data stored in the WAL record.
+ */
+ newp = (char *) htup + SizeofHeapTupleHeader;
+ if (prefixlen > 0)
+ {
+ int len;
+
+ /* copy bitmap [+ padding] [+ oid] from WAL record */
+ len = xlhdr.t_hoff - SizeofHeapTupleHeader;
+ memcpy(newp, recdata, len);
+ recdata += len;
+ newp += len;
+
+ /* copy prefix from old tuple */
+ memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
+ newp += prefixlen;
+
+ /* copy new tuple data from WAL record */
+ len = tuplen - (xlhdr.t_hoff - SizeofHeapTupleHeader);
+ memcpy(newp, recdata, len);
+ recdata += len;
+ newp += len;
+ }
+ else
+ {
+ /*
+ * copy bitmap [+ padding] [+ oid] + data from record, all in one
+ * go
+ */
+ memcpy(newp, recdata, tuplen);
+ recdata += tuplen;
+ newp += tuplen;
+ }
+ Assert(recdata == recdata_end);
+
+ /* copy suffix from old tuple */
+ if (suffixlen > 0)
+ memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
+
+ newlen = SizeofHeapTupleHeader + tuplen + prefixlen + suffixlen;
+ htup->t_infomask2 = xlhdr.t_infomask2;
+ htup->t_infomask = xlhdr.t_infomask;
+ htup->t_hoff = xlhdr.t_hoff;
+
+ HeapTupleHeaderSetXmin(htup, XLogRecGetXid(record));
+ HeapTupleHeaderSetCmin(htup, FirstCommandId);
+ HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
+ /* Make sure there is no forward chain link in t_ctid */
+ htup->t_ctid = newtid;
+
+ offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
+ if (offnum == InvalidOffsetNumber)
+ elog(PANIC, "failed to add tuple");
+
+ if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
+ PageClearAllVisible(page);
+
+ freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
+
+ PageSetLSN(page, lsn);
+ MarkBufferDirty(nbuffer);
+ }
+
+ if (BufferIsValid(nbuffer) && nbuffer != obuffer)
+ UnlockReleaseBuffer(nbuffer);
+ if (BufferIsValid(obuffer))
+ UnlockReleaseBuffer(obuffer);
+
+ /*
+ * If the new page is running low on free space, update the FSM as well.
+ * Arbitrarily, our definition of "low" is less than 20%. We can't do much
+ * better than that without knowing the fill-factor for the table.
+ *
+ * However, don't update the FSM on HOT updates, because after crash
+ * recovery, either the old or the new tuple will certainly be dead and
+ * prunable. After pruning, the page will have roughly as much free space
+ * as it did before the update, assuming the new tuple is about the same
+ * size as the old one.
+ *
+ * XXX: Don't do this if the page was restored from full page image. We
+ * don't bother to update the FSM in that case, it doesn't need to be
+ * totally accurate anyway.
+ */
+ if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
+ XLogRecordPageWithFreeSpace(rnode, newblk, freespace);
+}
+
+static void
+heap_xlog_confirm(XLogReaderState *record)
+{
+ XLogRecPtr lsn = record->EndRecPtr;
+ xl_heap_confirm *xlrec = (xl_heap_confirm *) XLogRecGetData(record);
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
+
+ if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
+ {
+ page = BufferGetPage(buffer);
+
+ offnum = xlrec->offnum;
+ if (PageGetMaxOffsetNumber(page) >= offnum)
+ lp = PageGetItemId(page, offnum);
+
+ if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
+ elog(PANIC, "invalid lp");
+
+ htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+ /*
+ * Confirm tuple as actually inserted
+ */
+ ItemPointerSet(&htup->t_ctid, BufferGetBlockNumber(buffer), offnum);
+
+ PageSetLSN(page, lsn);
+ MarkBufferDirty(buffer);
+ }
+ if (BufferIsValid(buffer))
+ UnlockReleaseBuffer(buffer);
+}
+
+static void
+heap_xlog_lock(XLogReaderState *record)
+{
+ XLogRecPtr lsn = record->EndRecPtr;
+ xl_heap_lock *xlrec = (xl_heap_lock *) XLogRecGetData(record);
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
+
+ /*
+ * The visibility map may need to be fixed even if the heap page is
+ * already up-to-date.
+ */
+ if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
+ {
+ RelFileNode rnode;
+ Buffer vmbuffer = InvalidBuffer;
+ BlockNumber block;
+ Relation reln;
+
+ XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
+ reln = CreateFakeRelcacheEntry(rnode);
+
+ visibilitymap_pin(reln, block, &vmbuffer);
+ visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
+
+ ReleaseBuffer(vmbuffer);
+ FreeFakeRelcacheEntry(reln);
+ }
+
+ if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
+ {
+ page = (Page) BufferGetPage(buffer);
+
+ offnum = xlrec->offnum;
+ if (PageGetMaxOffsetNumber(page) >= offnum)
+ lp = PageGetItemId(page, offnum);
+
+ if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
+ elog(PANIC, "invalid lp");
+
+ htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+ htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
+ htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+ fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
+ &htup->t_infomask2);
+
+ /*
+ * Clear relevant update flags, but only if the modified infomask says
+ * there's no update.
+ */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(htup->t_infomask))
+ {
+ HeapTupleHeaderClearHotUpdated(htup);
+ /* Make sure there is no forward chain link in t_ctid */
+ ItemPointerSet(&htup->t_ctid,
+ BufferGetBlockNumber(buffer),
+ offnum);
+ }
+ HeapTupleHeaderSetXmax(htup, xlrec->locking_xid);
+ HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
+ PageSetLSN(page, lsn);
+ MarkBufferDirty(buffer);
+ }
+ if (BufferIsValid(buffer))
+ UnlockReleaseBuffer(buffer);
+}
+
+static void
+heap_xlog_lock_updated(XLogReaderState *record)
+{
+ XLogRecPtr lsn = record->EndRecPtr;
+ xl_heap_lock_updated *xlrec;
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
+
+ xlrec = (xl_heap_lock_updated *) XLogRecGetData(record);
+
+ /*
+ * The visibility map may need to be fixed even if the heap page is
+ * already up-to-date.
+ */
+ if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
+ {
+ RelFileNode rnode;
+ Buffer vmbuffer = InvalidBuffer;
+ BlockNumber block;
+ Relation reln;
+
+ XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
+ reln = CreateFakeRelcacheEntry(rnode);
+
+ visibilitymap_pin(reln, block, &vmbuffer);
+ visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
+
+ ReleaseBuffer(vmbuffer);
+ FreeFakeRelcacheEntry(reln);
+ }
+
+ if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
+ {
+ page = BufferGetPage(buffer);
+
+ offnum = xlrec->offnum;
+ if (PageGetMaxOffsetNumber(page) >= offnum)
+ lp = PageGetItemId(page, offnum);
+
+ if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
+ elog(PANIC, "invalid lp");
+
+ htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+ htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
+ htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+ fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
+ &htup->t_infomask2);
+ HeapTupleHeaderSetXmax(htup, xlrec->xmax);
+
+ PageSetLSN(page, lsn);
+ MarkBufferDirty(buffer);
+ }
+ if (BufferIsValid(buffer))
+ UnlockReleaseBuffer(buffer);
+}
+
+static void
+heap_xlog_inplace(XLogReaderState *record)
+{
+ XLogRecPtr lsn = record->EndRecPtr;
+ xl_heap_inplace *xlrec = (xl_heap_inplace *) XLogRecGetData(record);
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
+ uint32 oldlen;
+ Size newlen;
+
+ if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
+ {
+ char *newtup = XLogRecGetBlockData(record, 0, &newlen);
+
+ page = BufferGetPage(buffer);
+
+ offnum = xlrec->offnum;
+ if (PageGetMaxOffsetNumber(page) >= offnum)
+ lp = PageGetItemId(page, offnum);
+
+ if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
+ elog(PANIC, "invalid lp");
+
+ htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+ oldlen = ItemIdGetLength(lp) - htup->t_hoff;
+ if (oldlen != newlen)
+ elog(PANIC, "wrong tuple length");
+
+ memcpy((char *) htup + htup->t_hoff, newtup, newlen);
+
+ PageSetLSN(page, lsn);
+ MarkBufferDirty(buffer);
+ }
+ if (BufferIsValid(buffer))
+ UnlockReleaseBuffer(buffer);
+}
+
+void
+heap_redo(XLogReaderState *record)
+{
+ uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
+
+ /*
+ * These operations don't overwrite MVCC data so no conflict processing is
+ * required. The ones in heap2 rmgr do.
+ */
+
+ switch (info & XLOG_HEAP_OPMASK)
+ {
+ case XLOG_HEAP_INSERT:
+ heap_xlog_insert(record);
+ break;
+ case XLOG_HEAP_DELETE:
+ heap_xlog_delete(record);
+ break;
+ case XLOG_HEAP_UPDATE:
+ heap_xlog_update(record, false);
+ break;
+ case XLOG_HEAP_TRUNCATE:
+
+ /*
+ * TRUNCATE is a no-op because the actions are already logged as
+ * SMGR WAL records. TRUNCATE WAL record only exists for logical
+ * decoding.
+ */
+ break;
+ case XLOG_HEAP_HOT_UPDATE:
+ heap_xlog_update(record, true);
+ break;
+ case XLOG_HEAP_CONFIRM:
+ heap_xlog_confirm(record);
+ break;
+ case XLOG_HEAP_LOCK:
+ heap_xlog_lock(record);
+ break;
+ case XLOG_HEAP_INPLACE:
+ heap_xlog_inplace(record);
+ break;
+ default:
+ elog(PANIC, "heap_redo: unknown op code %u", info);
+ }
+}
+
+void
+heap2_redo(XLogReaderState *record)
+{
+ uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
+
+ switch (info & XLOG_HEAP_OPMASK)
+ {
+ case XLOG_HEAP2_PRUNE:
+ heap_xlog_prune(record);
+ break;
+ case XLOG_HEAP2_VACUUM:
+ heap_xlog_vacuum(record);
+ break;
+ case XLOG_HEAP2_FREEZE_PAGE:
+ heap_xlog_freeze_page(record);
+ break;
+ case XLOG_HEAP2_VISIBLE:
+ heap_xlog_visible(record);
+ break;
+ case XLOG_HEAP2_MULTI_INSERT:
+ heap_xlog_multi_insert(record);
+ break;
+ case XLOG_HEAP2_LOCK_UPDATED:
+ heap_xlog_lock_updated(record);
+ break;
+ case XLOG_HEAP2_NEW_CID:
+
+ /*
+ * Nothing to do on a real replay, only used during logical
+ * decoding.
+ */
+ break;
+ case XLOG_HEAP2_REWRITE:
+ heap_xlog_logical_rewrite(record);
+ break;
+ default:
+ elog(PANIC, "heap2_redo: unknown op code %u", info);
+ }
+}
+
+/*
+ * Mask a heap page before performing consistency checks on it.
+ */
+void
+heap_mask(char *pagedata, BlockNumber blkno)
+{
+ Page page = (Page) pagedata;
+ OffsetNumber off;
+
+ mask_page_lsn_and_checksum(page);
+
+ mask_page_hint_bits(page);
+ mask_unused_space(page);
+
+ for (off = 1; off <= PageGetMaxOffsetNumber(page); off++)
+ {
+ ItemId iid = PageGetItemId(page, off);
+ char *page_item;
+
+ page_item = (char *) (page + ItemIdGetOffset(iid));
+
+ if (ItemIdIsNormal(iid))
+ {
+ HeapTupleHeader page_htup = (HeapTupleHeader) page_item;
+
+ /*
+ * If xmin of a tuple is not yet frozen, we should ignore
+ * differences in hint bits, since they can be set without
+ * emitting WAL.
+ */
+ if (!HeapTupleHeaderXminFrozen(page_htup))
+ page_htup->t_infomask &= ~HEAP_XACT_MASK;
+ else
+ {
+ /* Still we need to mask xmax hint bits. */
+ page_htup->t_infomask &= ~HEAP_XMAX_INVALID;
+ page_htup->t_infomask &= ~HEAP_XMAX_COMMITTED;
+ }
+
+ /*
+ * During replay, we set Command Id to FirstCommandId. Hence, mask
+ * it. See heap_xlog_insert() for details.
+ */
+ page_htup->t_choice.t_heap.t_field3.t_cid = MASK_MARKER;
+
+ /*
+ * For a speculative tuple, heap_insert() does not set ctid in the
+ * caller-passed heap tuple itself, leaving the ctid field to
+ * contain a speculative token value - a per-backend monotonically
+ * increasing identifier. Besides, it does not WAL-log ctid under
+ * any circumstances.
+ *
+ * During redo, heap_xlog_insert() sets t_ctid to current block
+ * number and self offset number. It doesn't care about any
+ * speculative insertions on the primary. Hence, we set t_ctid to
+ * current block number and self offset number to ignore any
+ * inconsistency.
+ */
+ if (HeapTupleHeaderIsSpeculative(page_htup))
+ ItemPointerSet(&page_htup->t_ctid, blkno, off);
+
+ /*
+ * NB: Not ignoring ctid changes due to the tuple having moved
+ * (i.e. HeapTupleHeaderIndicatesMovedPartitions), because that's
+ * important information that needs to be in-sync between primary
+ * and standby, and thus is WAL logged.
+ */
+ }
+
+ /*
+ * Ignore any padding bytes after the tuple, when the length of the
+ * item is not MAXALIGNed.
+ */
+ if (ItemIdHasStorage(iid))
+ {
+ int len = ItemIdGetLength(iid);
+ int padlen = MAXALIGN(len) - len;
+
+ if (padlen > 0)
+ memset(page_item + len, MASK_MARKER, padlen);
+ }
+ }
+}
+
+/*
+ * HeapCheckForSerializableConflictOut
+ * We are reading a tuple. If it's not visible, there may be a
+ * rw-conflict out with the inserter. Otherwise, if it is visible to us
+ * but has been deleted, there may be a rw-conflict out with the deleter.
+ *
+ * We will determine the top level xid of the writing transaction with which
+ * we may be in conflict, and ask CheckForSerializableConflictOut() to check
+ * for overlap with our own transaction.
+ *
+ * This function should be called just about anywhere in heapam.c where a
+ * tuple has been read. The caller must hold at least a shared lock on the
+ * buffer, because this function might set hint bits on the tuple. There is
+ * currently no known reason to call this function from an index AM.
+ */
+void
+HeapCheckForSerializableConflictOut(bool visible, Relation relation,
+ HeapTuple tuple, Buffer buffer,
+ Snapshot snapshot)
+{
+ TransactionId xid;
+ HTSV_Result htsvResult;
+
+ if (!CheckForSerializableConflictOutNeeded(relation, snapshot))
+ return;
+
+ /*
+ * Check to see whether the tuple has been written to by a concurrent
+ * transaction, either to create it not visible to us, or to delete it
+ * while it is visible to us. The "visible" bool indicates whether the
+ * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
+ * is going on with it.
+ *
+ * In the event of a concurrently inserted tuple that also happens to have
+ * been concurrently updated (by a separate transaction), the xmin of the
+ * tuple will be used -- not the updater's xid.
+ */
+ htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
+ switch (htsvResult)
+ {
+ case HEAPTUPLE_LIVE:
+ if (visible)
+ return;
+ xid = HeapTupleHeaderGetXmin(tuple->t_data);
+ break;
+ case HEAPTUPLE_RECENTLY_DEAD:
+ case HEAPTUPLE_DELETE_IN_PROGRESS:
+ if (visible)
+ xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
+ else
+ xid = HeapTupleHeaderGetXmin(tuple->t_data);
+
+ if (TransactionIdPrecedes(xid, TransactionXmin))
+ {
+ /* This is like the HEAPTUPLE_DEAD case */
+ Assert(!visible);
+ return;
+ }
+ break;
+ case HEAPTUPLE_INSERT_IN_PROGRESS:
+ xid = HeapTupleHeaderGetXmin(tuple->t_data);
+ break;
+ case HEAPTUPLE_DEAD:
+ Assert(!visible);
+ return;
+ default:
+
+ /*
+ * The only way to get to this default clause is if a new value is
+ * added to the enum type without adding it to this switch
+ * statement. That's a bug, so elog.
+ */
+ elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
+
+ /*
+ * In spite of having all enum values covered and calling elog on
+ * this default, some compilers think this is a code path which
+ * allows xid to be used below without initialization. Silence
+ * that warning.
+ */
+ xid = InvalidTransactionId;
+ }
+
+ Assert(TransactionIdIsValid(xid));
+ Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
+
+ /*
+ * Find top level xid. Bail out if xid is too early to be a conflict, or
+ * if it's our own xid.
+ */
+ if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
+ return;
+ xid = SubTransGetTopmostTransaction(xid);
+ if (TransactionIdPrecedes(xid, TransactionXmin))
+ return;
+
+ CheckForSerializableConflictOut(relation, xid, snapshot);
+}
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
new file mode 100644
index 0000000..6633939
--- /dev/null
+++ b/src/backend/access/heap/heapam_handler.c
@@ -0,0 +1,2608 @@
+/*-------------------------------------------------------------------------
+ *
+ * heapam_handler.c
+ * heap table access method code
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/access/heap/heapam_handler.c
+ *
+ *
+ * NOTES
+ * This files wires up the lower level heapam.c et al routines with the
+ * tableam abstraction.
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/genam.h"
+#include "access/heapam.h"
+#include "access/heaptoast.h"
+#include "access/multixact.h"
+#include "access/rewriteheap.h"
+#include "access/syncscan.h"
+#include "access/tableam.h"
+#include "access/tsmapi.h"
+#include "access/xact.h"
+#include "catalog/catalog.h"
+#include "catalog/index.h"
+#include "catalog/storage.h"
+#include "catalog/storage_xlog.h"
+#include "commands/progress.h"
+#include "executor/executor.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "storage/bufmgr.h"
+#include "storage/bufpage.h"
+#include "storage/lmgr.h"
+#include "storage/predicate.h"
+#include "storage/procarray.h"
+#include "storage/smgr.h"
+#include "utils/builtins.h"
+#include "utils/rel.h"
+
+static void reform_and_rewrite_tuple(HeapTuple tuple,
+ Relation OldHeap, Relation NewHeap,
+ Datum *values, bool *isnull, RewriteState rwstate);
+
+static bool SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer,
+ HeapTuple tuple,
+ OffsetNumber tupoffset);
+
+static BlockNumber heapam_scan_get_blocks_done(HeapScanDesc hscan);
+
+static const TableAmRoutine heapam_methods;
+
+
+/* ------------------------------------------------------------------------
+ * Slot related callbacks for heap AM
+ * ------------------------------------------------------------------------
+ */
+
+static const TupleTableSlotOps *
+heapam_slot_callbacks(Relation relation)
+{
+ return &TTSOpsBufferHeapTuple;
+}
+
+
+/* ------------------------------------------------------------------------
+ * Index Scan Callbacks for heap AM
+ * ------------------------------------------------------------------------
+ */
+
+static IndexFetchTableData *
+heapam_index_fetch_begin(Relation rel)
+{
+ IndexFetchHeapData *hscan = palloc0(sizeof(IndexFetchHeapData));
+
+ hscan->xs_base.rel = rel;
+ hscan->xs_cbuf = InvalidBuffer;
+
+ return &hscan->xs_base;
+}
+
+static void
+heapam_index_fetch_reset(IndexFetchTableData *scan)
+{
+ IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
+
+ if (BufferIsValid(hscan->xs_cbuf))
+ {
+ ReleaseBuffer(hscan->xs_cbuf);
+ hscan->xs_cbuf = InvalidBuffer;
+ }
+}
+
+static void
+heapam_index_fetch_end(IndexFetchTableData *scan)
+{
+ IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
+
+ heapam_index_fetch_reset(scan);
+
+ pfree(hscan);
+}
+
+static bool
+heapam_index_fetch_tuple(struct IndexFetchTableData *scan,
+ ItemPointer tid,
+ Snapshot snapshot,
+ TupleTableSlot *slot,
+ bool *call_again, bool *all_dead)
+{
+ IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
+ BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
+ bool got_heap_tuple;
+
+ Assert(TTS_IS_BUFFERTUPLE(slot));
+
+ /* We can skip the buffer-switching logic if we're in mid-HOT chain. */
+ if (!*call_again)
+ {
+ /* Switch to correct buffer if we don't have it already */
+ Buffer prev_buf = hscan->xs_cbuf;
+
+ hscan->xs_cbuf = ReleaseAndReadBuffer(hscan->xs_cbuf,
+ hscan->xs_base.rel,
+ ItemPointerGetBlockNumber(tid));
+
+ /*
+ * Prune page, but only if we weren't already on this page
+ */
+ if (prev_buf != hscan->xs_cbuf)
+ heap_page_prune_opt(hscan->xs_base.rel, hscan->xs_cbuf);
+ }
+
+ /* Obtain share-lock on the buffer so we can examine visibility */
+ LockBuffer(hscan->xs_cbuf, BUFFER_LOCK_SHARE);
+ got_heap_tuple = heap_hot_search_buffer(tid,
+ hscan->xs_base.rel,
+ hscan->xs_cbuf,
+ snapshot,
+ &bslot->base.tupdata,
+ all_dead,
+ !*call_again);
+ bslot->base.tupdata.t_self = *tid;
+ LockBuffer(hscan->xs_cbuf, BUFFER_LOCK_UNLOCK);
+
+ if (got_heap_tuple)
+ {
+ /*
+ * Only in a non-MVCC snapshot can more than one member of the HOT
+ * chain be visible.
+ */
+ *call_again = !IsMVCCSnapshot(snapshot);
+
+ slot->tts_tableOid = RelationGetRelid(scan->rel);
+ ExecStoreBufferHeapTuple(&bslot->base.tupdata, slot, hscan->xs_cbuf);
+ }
+ else
+ {
+ /* We've reached the end of the HOT chain. */
+ *call_again = false;
+ }
+
+ return got_heap_tuple;
+}
+
+
+/* ------------------------------------------------------------------------
+ * Callbacks for non-modifying operations on individual tuples for heap AM
+ * ------------------------------------------------------------------------
+ */
+
+static bool
+heapam_fetch_row_version(Relation relation,
+ ItemPointer tid,
+ Snapshot snapshot,
+ TupleTableSlot *slot)
+{
+ BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
+ Buffer buffer;
+
+ Assert(TTS_IS_BUFFERTUPLE(slot));
+
+ bslot->base.tupdata.t_self = *tid;
+ if (heap_fetch(relation, snapshot, &bslot->base.tupdata, &buffer))
+ {
+ /* store in slot, transferring existing pin */
+ ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata, slot, buffer);
+ slot->tts_tableOid = RelationGetRelid(relation);
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+heapam_tuple_tid_valid(TableScanDesc scan, ItemPointer tid)
+{
+ HeapScanDesc hscan = (HeapScanDesc) scan;
+
+ return ItemPointerIsValid(tid) &&
+ ItemPointerGetBlockNumber(tid) < hscan->rs_nblocks;
+}
+
+static bool
+heapam_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
+ Snapshot snapshot)
+{
+ BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
+ bool res;
+
+ Assert(TTS_IS_BUFFERTUPLE(slot));
+ Assert(BufferIsValid(bslot->buffer));
+
+ /*
+ * We need buffer pin and lock to call HeapTupleSatisfiesVisibility.
+ * Caller should be holding pin, but not lock.
+ */
+ LockBuffer(bslot->buffer, BUFFER_LOCK_SHARE);
+ res = HeapTupleSatisfiesVisibility(bslot->base.tuple, snapshot,
+ bslot->buffer);
+ LockBuffer(bslot->buffer, BUFFER_LOCK_UNLOCK);
+
+ return res;
+}
+
+
+/* ----------------------------------------------------------------------------
+ * Functions for manipulations of physical tuples for heap AM.
+ * ----------------------------------------------------------------------------
+ */
+
+static void
+heapam_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid,
+ int options, BulkInsertState bistate)
+{
+ bool shouldFree = true;
+ HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
+
+ /* Update the tuple with table oid */
+ slot->tts_tableOid = RelationGetRelid(relation);
+ tuple->t_tableOid = slot->tts_tableOid;
+
+ /* Perform the insertion, and copy the resulting ItemPointer */
+ heap_insert(relation, tuple, cid, options, bistate);
+ ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
+
+ if (shouldFree)
+ pfree(tuple);
+}
+
+static void
+heapam_tuple_insert_speculative(Relation relation, TupleTableSlot *slot,
+ CommandId cid, int options,
+ BulkInsertState bistate, uint32 specToken)
+{
+ bool shouldFree = true;
+ HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
+
+ /* Update the tuple with table oid */
+ slot->tts_tableOid = RelationGetRelid(relation);
+ tuple->t_tableOid = slot->tts_tableOid;
+
+ HeapTupleHeaderSetSpeculativeToken(tuple->t_data, specToken);
+ options |= HEAP_INSERT_SPECULATIVE;
+
+ /* Perform the insertion, and copy the resulting ItemPointer */
+ heap_insert(relation, tuple, cid, options, bistate);
+ ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
+
+ if (shouldFree)
+ pfree(tuple);
+}
+
+static void
+heapam_tuple_complete_speculative(Relation relation, TupleTableSlot *slot,
+ uint32 specToken, bool succeeded)
+{
+ bool shouldFree = true;
+ HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
+
+ /* adjust the tuple's state accordingly */
+ if (succeeded)
+ heap_finish_speculative(relation, &slot->tts_tid);
+ else
+ heap_abort_speculative(relation, &slot->tts_tid);
+
+ if (shouldFree)
+ pfree(tuple);
+}
+
+static TM_Result
+heapam_tuple_delete(Relation relation, ItemPointer tid, CommandId cid,
+ Snapshot snapshot, Snapshot crosscheck, bool wait,
+ TM_FailureData *tmfd, bool changingPart)
+{
+ /*
+ * Currently Deleting of index tuples are handled at vacuum, in case if
+ * the storage itself is cleaning the dead tuples by itself, it is the
+ * time to call the index tuple deletion also.
+ */
+ return heap_delete(relation, tid, cid, crosscheck, wait, tmfd, changingPart);
+}
+
+
+static TM_Result
+heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
+ CommandId cid, Snapshot snapshot, Snapshot crosscheck,
+ bool wait, TM_FailureData *tmfd,
+ LockTupleMode *lockmode, bool *update_indexes)
+{
+ bool shouldFree = true;
+ HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
+ TM_Result result;
+
+ /* Update the tuple with table oid */
+ slot->tts_tableOid = RelationGetRelid(relation);
+ tuple->t_tableOid = slot->tts_tableOid;
+
+ result = heap_update(relation, otid, tuple, cid, crosscheck, wait,
+ tmfd, lockmode);
+ ItemPointerCopy(&tuple->t_self, &slot->tts_tid);
+
+ /*
+ * Decide whether new index entries are needed for the tuple
+ *
+ * Note: heap_update returns the tid (location) of the new tuple in the
+ * t_self field.
+ *
+ * If it's a HOT update, we mustn't insert new index entries.
+ */
+ *update_indexes = result == TM_Ok && !HeapTupleIsHeapOnly(tuple);
+
+ if (shouldFree)
+ pfree(tuple);
+
+ return result;
+}
+
+static TM_Result
+heapam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot,
+ TupleTableSlot *slot, CommandId cid, LockTupleMode mode,
+ LockWaitPolicy wait_policy, uint8 flags,
+ TM_FailureData *tmfd)
+{
+ BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
+ TM_Result result;
+ Buffer buffer;
+ HeapTuple tuple = &bslot->base.tupdata;
+ bool follow_updates;
+
+ follow_updates = (flags & TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS) != 0;
+ tmfd->traversed = false;
+
+ Assert(TTS_IS_BUFFERTUPLE(slot));
+
+tuple_lock_retry:
+ tuple->t_self = *tid;
+ result = heap_lock_tuple(relation, tuple, cid, mode, wait_policy,
+ follow_updates, &buffer, tmfd);
+
+ if (result == TM_Updated &&
+ (flags & TUPLE_LOCK_FLAG_FIND_LAST_VERSION))
+ {
+ /* Should not encounter speculative tuple on recheck */
+ Assert(!HeapTupleHeaderIsSpeculative(tuple->t_data));
+
+ ReleaseBuffer(buffer);
+
+ if (!ItemPointerEquals(&tmfd->ctid, &tuple->t_self))
+ {
+ SnapshotData SnapshotDirty;
+ TransactionId priorXmax;
+
+ /* it was updated, so look at the updated version */
+ *tid = tmfd->ctid;
+ /* updated row should have xmin matching this xmax */
+ priorXmax = tmfd->xmax;
+
+ /* signal that a tuple later in the chain is getting locked */
+ tmfd->traversed = true;
+
+ /*
+ * fetch target tuple
+ *
+ * Loop here to deal with updated or busy tuples
+ */
+ InitDirtySnapshot(SnapshotDirty);
+ for (;;)
+ {
+ if (ItemPointerIndicatesMovedPartitions(tid))
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("tuple to be locked was already moved to another partition due to concurrent update")));
+
+ tuple->t_self = *tid;
+ if (heap_fetch_extended(relation, &SnapshotDirty, tuple,
+ &buffer, true))
+ {
+ /*
+ * If xmin isn't what we're expecting, the slot must have
+ * been recycled and reused for an unrelated tuple. This
+ * implies that the latest version of the row was deleted,
+ * so we need do nothing. (Should be safe to examine xmin
+ * without getting buffer's content lock. We assume
+ * reading a TransactionId to be atomic, and Xmin never
+ * changes in an existing tuple, except to invalid or
+ * frozen, and neither of those can match priorXmax.)
+ */
+ if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple->t_data),
+ priorXmax))
+ {
+ ReleaseBuffer(buffer);
+ return TM_Deleted;
+ }
+
+ /* otherwise xmin should not be dirty... */
+ if (TransactionIdIsValid(SnapshotDirty.xmin))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("t_xmin %u is uncommitted in tuple (%u,%u) to be updated in table \"%s\"",
+ SnapshotDirty.xmin,
+ ItemPointerGetBlockNumber(&tuple->t_self),
+ ItemPointerGetOffsetNumber(&tuple->t_self),
+ RelationGetRelationName(relation))));
+
+ /*
+ * If tuple is being updated by other transaction then we
+ * have to wait for its commit/abort, or die trying.
+ */
+ if (TransactionIdIsValid(SnapshotDirty.xmax))
+ {
+ ReleaseBuffer(buffer);
+ switch (wait_policy)
+ {
+ case LockWaitBlock:
+ XactLockTableWait(SnapshotDirty.xmax,
+ relation, &tuple->t_self,
+ XLTW_FetchUpdated);
+ break;
+ case LockWaitSkip:
+ if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
+ /* skip instead of waiting */
+ return TM_WouldBlock;
+ break;
+ case LockWaitError:
+ if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
+ ereport(ERROR,
+ (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
+ errmsg("could not obtain lock on row in relation \"%s\"",
+ RelationGetRelationName(relation))));
+ break;
+ }
+ continue; /* loop back to repeat heap_fetch */
+ }
+
+ /*
+ * If tuple was inserted by our own transaction, we have
+ * to check cmin against cid: cmin >= current CID means
+ * our command cannot see the tuple, so we should ignore
+ * it. Otherwise heap_lock_tuple() will throw an error,
+ * and so would any later attempt to update or delete the
+ * tuple. (We need not check cmax because
+ * HeapTupleSatisfiesDirty will consider a tuple deleted
+ * by our transaction dead, regardless of cmax.) We just
+ * checked that priorXmax == xmin, so we can test that
+ * variable instead of doing HeapTupleHeaderGetXmin again.
+ */
+ if (TransactionIdIsCurrentTransactionId(priorXmax) &&
+ HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
+ {
+ tmfd->xmax = priorXmax;
+
+ /*
+ * Cmin is the problematic value, so store that. See
+ * above.
+ */
+ tmfd->cmax = HeapTupleHeaderGetCmin(tuple->t_data);
+ ReleaseBuffer(buffer);
+ return TM_SelfModified;
+ }
+
+ /*
+ * This is a live tuple, so try to lock it again.
+ */
+ ReleaseBuffer(buffer);
+ goto tuple_lock_retry;
+ }
+
+ /*
+ * If the referenced slot was actually empty, the latest
+ * version of the row must have been deleted, so we need do
+ * nothing.
+ */
+ if (tuple->t_data == NULL)
+ {
+ Assert(!BufferIsValid(buffer));
+ return TM_Deleted;
+ }
+
+ /*
+ * As above, if xmin isn't what we're expecting, do nothing.
+ */
+ if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple->t_data),
+ priorXmax))
+ {
+ ReleaseBuffer(buffer);
+ return TM_Deleted;
+ }
+
+ /*
+ * If we get here, the tuple was found but failed
+ * SnapshotDirty. Assuming the xmin is either a committed xact
+ * or our own xact (as it certainly should be if we're trying
+ * to modify the tuple), this must mean that the row was
+ * updated or deleted by either a committed xact or our own
+ * xact. If it was deleted, we can ignore it; if it was
+ * updated then chain up to the next version and repeat the
+ * whole process.
+ *
+ * As above, it should be safe to examine xmax and t_ctid
+ * without the buffer content lock, because they can't be
+ * changing. We'd better hold a buffer pin though.
+ */
+ if (ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
+ {
+ /* deleted, so forget about it */
+ ReleaseBuffer(buffer);
+ return TM_Deleted;
+ }
+
+ /* updated, so look at the updated row */
+ *tid = tuple->t_data->t_ctid;
+ /* updated row should have xmin matching this xmax */
+ priorXmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
+ ReleaseBuffer(buffer);
+ /* loop back to fetch next in chain */
+ }
+ }
+ else
+ {
+ /* tuple was deleted, so give up */
+ return TM_Deleted;
+ }
+ }
+
+ slot->tts_tableOid = RelationGetRelid(relation);
+ tuple->t_tableOid = slot->tts_tableOid;
+
+ /* store in slot, transferring existing pin */
+ ExecStorePinnedBufferHeapTuple(tuple, slot, buffer);
+
+ return result;
+}
+
+
+/* ------------------------------------------------------------------------
+ * DDL related callbacks for heap AM.
+ * ------------------------------------------------------------------------
+ */
+
+static void
+heapam_relation_set_new_filenode(Relation rel,
+ const RelFileNode *newrnode,
+ char persistence,
+ TransactionId *freezeXid,
+ MultiXactId *minmulti)
+{
+ SMgrRelation srel;
+
+ /*
+ * Initialize to the minimum XID that could put tuples in the table. We
+ * know that no xacts older than RecentXmin are still running, so that
+ * will do.
+ */
+ *freezeXid = RecentXmin;
+
+ /*
+ * Similarly, initialize the minimum Multixact to the first value that
+ * could possibly be stored in tuples in the table. Running transactions
+ * could reuse values from their local cache, so we are careful to
+ * consider all currently running multis.
+ *
+ * XXX this could be refined further, but is it worth the hassle?
+ */
+ *minmulti = GetOldestMultiXactId();
+
+ srel = RelationCreateStorage(*newrnode, persistence);
+
+ /*
+ * If required, set up an init fork for an unlogged table so that it can
+ * be correctly reinitialized on restart. An immediate sync is required
+ * even if the page has been logged, because the write did not go through
+ * shared_buffers and therefore a concurrent checkpoint may have moved the
+ * redo pointer past our xlog record. Recovery may as well remove it
+ * while replaying, for example, XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE
+ * record. Therefore, logging is necessary even if wal_level=minimal.
+ */
+ if (persistence == RELPERSISTENCE_UNLOGGED)
+ {
+ Assert(rel->rd_rel->relkind == RELKIND_RELATION ||
+ rel->rd_rel->relkind == RELKIND_MATVIEW ||
+ rel->rd_rel->relkind == RELKIND_TOASTVALUE);
+ smgrcreate(srel, INIT_FORKNUM, false);
+ log_smgrcreate(newrnode, INIT_FORKNUM);
+ smgrimmedsync(srel, INIT_FORKNUM);
+ }
+
+ smgrclose(srel);
+}
+
+static void
+heapam_relation_nontransactional_truncate(Relation rel)
+{
+ RelationTruncate(rel, 0);
+}
+
+static void
+heapam_relation_copy_data(Relation rel, const RelFileNode *newrnode)
+{
+ SMgrRelation dstrel;
+
+ dstrel = smgropen(*newrnode, rel->rd_backend);
+ RelationOpenSmgr(rel);
+
+ /*
+ * Since we copy the file directly without looking at the shared buffers,
+ * we'd better first flush out any pages of the source relation that are
+ * in shared buffers. We assume no new changes will be made while we are
+ * holding exclusive lock on the rel.
+ */
+ FlushRelationBuffers(rel);
+
+ /*
+ * Create and copy all forks of the relation, and schedule unlinking of
+ * old physical files.
+ *
+ * NOTE: any conflict in relfilenode value will be caught in
+ * RelationCreateStorage().
+ */
+ RelationCreateStorage(*newrnode, rel->rd_rel->relpersistence);
+
+ /* copy main fork */
+ RelationCopyStorage(rel->rd_smgr, dstrel, MAIN_FORKNUM,
+ rel->rd_rel->relpersistence);
+
+ /* copy those extra forks that exist */
+ for (ForkNumber forkNum = MAIN_FORKNUM + 1;
+ forkNum <= MAX_FORKNUM; forkNum++)
+ {
+ if (smgrexists(rel->rd_smgr, forkNum))
+ {
+ smgrcreate(dstrel, forkNum, false);
+
+ /*
+ * WAL log creation if the relation is persistent, or this is the
+ * init fork of an unlogged relation.
+ */
+ if (RelationIsPermanent(rel) ||
+ (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
+ forkNum == INIT_FORKNUM))
+ log_smgrcreate(newrnode, forkNum);
+ RelationCopyStorage(rel->rd_smgr, dstrel, forkNum,
+ rel->rd_rel->relpersistence);
+ }
+ }
+
+
+ /* drop old relation, and close new one */
+ RelationDropStorage(rel);
+ smgrclose(dstrel);
+}
+
+static void
+heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
+ Relation OldIndex, bool use_sort,
+ TransactionId OldestXmin,
+ TransactionId *xid_cutoff,
+ MultiXactId *multi_cutoff,
+ double *num_tuples,
+ double *tups_vacuumed,
+ double *tups_recently_dead)
+{
+ RewriteState rwstate;
+ IndexScanDesc indexScan;
+ TableScanDesc tableScan;
+ HeapScanDesc heapScan;
+ bool is_system_catalog;
+ Tuplesortstate *tuplesort;
+ TupleDesc oldTupDesc = RelationGetDescr(OldHeap);
+ TupleDesc newTupDesc = RelationGetDescr(NewHeap);
+ TupleTableSlot *slot;
+ int natts;
+ Datum *values;
+ bool *isnull;
+ BufferHeapTupleTableSlot *hslot;
+ BlockNumber prev_cblock = InvalidBlockNumber;
+
+ /* Remember if it's a system catalog */
+ is_system_catalog = IsSystemRelation(OldHeap);
+
+ /*
+ * Valid smgr_targblock implies something already wrote to the relation.
+ * This may be harmless, but this function hasn't planned for it.
+ */
+ Assert(RelationGetTargetBlock(NewHeap) == InvalidBlockNumber);
+
+ /* Preallocate values/isnull arrays */
+ natts = newTupDesc->natts;
+ values = (Datum *) palloc(natts * sizeof(Datum));
+ isnull = (bool *) palloc(natts * sizeof(bool));
+
+ /* Initialize the rewrite operation */
+ rwstate = begin_heap_rewrite(OldHeap, NewHeap, OldestXmin, *xid_cutoff,
+ *multi_cutoff);
+
+
+ /* Set up sorting if wanted */
+ if (use_sort)
+ tuplesort = tuplesort_begin_cluster(oldTupDesc, OldIndex,
+ maintenance_work_mem,
+ NULL, false);
+ else
+ tuplesort = NULL;
+
+ /*
+ * Prepare to scan the OldHeap. To ensure we see recently-dead tuples
+ * that still need to be copied, we scan with SnapshotAny and use
+ * HeapTupleSatisfiesVacuum for the visibility test.
+ */
+ if (OldIndex != NULL && !use_sort)
+ {
+ const int ci_index[] = {
+ PROGRESS_CLUSTER_PHASE,
+ PROGRESS_CLUSTER_INDEX_RELID
+ };
+ int64 ci_val[2];
+
+ /* Set phase and OIDOldIndex to columns */
+ ci_val[0] = PROGRESS_CLUSTER_PHASE_INDEX_SCAN_HEAP;
+ ci_val[1] = RelationGetRelid(OldIndex);
+ pgstat_progress_update_multi_param(2, ci_index, ci_val);
+
+ tableScan = NULL;
+ heapScan = NULL;
+ indexScan = index_beginscan(OldHeap, OldIndex, SnapshotAny, 0, 0);
+ index_rescan(indexScan, NULL, 0, NULL, 0);
+ }
+ else
+ {
+ /* In scan-and-sort mode and also VACUUM FULL, set phase */
+ pgstat_progress_update_param(PROGRESS_CLUSTER_PHASE,
+ PROGRESS_CLUSTER_PHASE_SEQ_SCAN_HEAP);
+
+ tableScan = table_beginscan(OldHeap, SnapshotAny, 0, (ScanKey) NULL);
+ heapScan = (HeapScanDesc) tableScan;
+ indexScan = NULL;
+
+ /* Set total heap blocks */
+ pgstat_progress_update_param(PROGRESS_CLUSTER_TOTAL_HEAP_BLKS,
+ heapScan->rs_nblocks);
+ }
+
+ slot = table_slot_create(OldHeap, NULL);
+ hslot = (BufferHeapTupleTableSlot *) slot;
+
+ /*
+ * Scan through the OldHeap, either in OldIndex order or sequentially;
+ * copy each tuple into the NewHeap, or transiently to the tuplesort
+ * module. Note that we don't bother sorting dead tuples (they won't get
+ * to the new table anyway).
+ */
+ for (;;)
+ {
+ HeapTuple tuple;
+ Buffer buf;
+ bool isdead;
+
+ CHECK_FOR_INTERRUPTS();
+
+ if (indexScan != NULL)
+ {
+ if (!index_getnext_slot(indexScan, ForwardScanDirection, slot))
+ break;
+
+ /* Since we used no scan keys, should never need to recheck */
+ if (indexScan->xs_recheck)
+ elog(ERROR, "CLUSTER does not support lossy index conditions");
+ }
+ else
+ {
+ if (!table_scan_getnextslot(tableScan, ForwardScanDirection, slot))
+ {
+ /*
+ * If the last pages of the scan were empty, we would go to
+ * the next phase while heap_blks_scanned != heap_blks_total.
+ * Instead, to ensure that heap_blks_scanned is equivalent to
+ * total_heap_blks after the table scan phase, this parameter
+ * is manually updated to the correct value when the table
+ * scan finishes.
+ */
+ pgstat_progress_update_param(PROGRESS_CLUSTER_HEAP_BLKS_SCANNED,
+ heapScan->rs_nblocks);
+ break;
+ }
+
+ /*
+ * In scan-and-sort mode and also VACUUM FULL, set heap blocks
+ * scanned
+ *
+ * Note that heapScan may start at an offset and wrap around, i.e.
+ * rs_startblock may be >0, and rs_cblock may end with a number
+ * below rs_startblock. To prevent showing this wraparound to the
+ * user, we offset rs_cblock by rs_startblock (modulo rs_nblocks).
+ */
+ if (prev_cblock != heapScan->rs_cblock)
+ {
+ pgstat_progress_update_param(PROGRESS_CLUSTER_HEAP_BLKS_SCANNED,
+ (heapScan->rs_cblock +
+ heapScan->rs_nblocks -
+ heapScan->rs_startblock
+ ) % heapScan->rs_nblocks + 1);
+ prev_cblock = heapScan->rs_cblock;
+ }
+ }
+
+ tuple = ExecFetchSlotHeapTuple(slot, false, NULL);
+ buf = hslot->buffer;
+
+ LockBuffer(buf, BUFFER_LOCK_SHARE);
+
+ switch (HeapTupleSatisfiesVacuum(tuple, OldestXmin, buf))
+ {
+ case HEAPTUPLE_DEAD:
+ /* Definitely dead */
+ isdead = true;
+ break;
+ case HEAPTUPLE_RECENTLY_DEAD:
+ *tups_recently_dead += 1;
+ /* fall through */
+ case HEAPTUPLE_LIVE:
+ /* Live or recently dead, must copy it */
+ isdead = false;
+ break;
+ case HEAPTUPLE_INSERT_IN_PROGRESS:
+
+ /*
+ * Since we hold exclusive lock on the relation, normally the
+ * only way to see this is if it was inserted earlier in our
+ * own transaction. However, it can happen in system
+ * catalogs, since we tend to release write lock before commit
+ * there. Give a warning if neither case applies; but in any
+ * case we had better copy it.
+ */
+ if (!is_system_catalog &&
+ !TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple->t_data)))
+ elog(WARNING, "concurrent insert in progress within table \"%s\"",
+ RelationGetRelationName(OldHeap));
+ /* treat as live */
+ isdead = false;
+ break;
+ case HEAPTUPLE_DELETE_IN_PROGRESS:
+
+ /*
+ * Similar situation to INSERT_IN_PROGRESS case.
+ */
+ if (!is_system_catalog &&
+ !TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetUpdateXid(tuple->t_data)))
+ elog(WARNING, "concurrent delete in progress within table \"%s\"",
+ RelationGetRelationName(OldHeap));
+ /* treat as recently dead */
+ *tups_recently_dead += 1;
+ isdead = false;
+ break;
+ default:
+ elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
+ isdead = false; /* keep compiler quiet */
+ break;
+ }
+
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+
+ if (isdead)
+ {
+ *tups_vacuumed += 1;
+ /* heap rewrite module still needs to see it... */
+ if (rewrite_heap_dead_tuple(rwstate, tuple))
+ {
+ /* A previous recently-dead tuple is now known dead */
+ *tups_vacuumed += 1;
+ *tups_recently_dead -= 1;
+ }
+ continue;
+ }
+
+ *num_tuples += 1;
+ if (tuplesort != NULL)
+ {
+ tuplesort_putheaptuple(tuplesort, tuple);
+
+ /*
+ * In scan-and-sort mode, report increase in number of tuples
+ * scanned
+ */
+ pgstat_progress_update_param(PROGRESS_CLUSTER_HEAP_TUPLES_SCANNED,
+ *num_tuples);
+ }
+ else
+ {
+ const int ct_index[] = {
+ PROGRESS_CLUSTER_HEAP_TUPLES_SCANNED,
+ PROGRESS_CLUSTER_HEAP_TUPLES_WRITTEN
+ };
+ int64 ct_val[2];
+
+ reform_and_rewrite_tuple(tuple, OldHeap, NewHeap,
+ values, isnull, rwstate);
+
+ /*
+ * In indexscan mode and also VACUUM FULL, report increase in
+ * number of tuples scanned and written
+ */
+ ct_val[0] = *num_tuples;
+ ct_val[1] = *num_tuples;
+ pgstat_progress_update_multi_param(2, ct_index, ct_val);
+ }
+ }
+
+ if (indexScan != NULL)
+ index_endscan(indexScan);
+ if (tableScan != NULL)
+ table_endscan(tableScan);
+ if (slot)
+ ExecDropSingleTupleTableSlot(slot);
+
+ /*
+ * In scan-and-sort mode, complete the sort, then read out all live tuples
+ * from the tuplestore and write them to the new relation.
+ */
+ if (tuplesort != NULL)
+ {
+ double n_tuples = 0;
+
+ /* Report that we are now sorting tuples */
+ pgstat_progress_update_param(PROGRESS_CLUSTER_PHASE,
+ PROGRESS_CLUSTER_PHASE_SORT_TUPLES);
+
+ tuplesort_performsort(tuplesort);
+
+ /* Report that we are now writing new heap */
+ pgstat_progress_update_param(PROGRESS_CLUSTER_PHASE,
+ PROGRESS_CLUSTER_PHASE_WRITE_NEW_HEAP);
+
+ for (;;)
+ {
+ HeapTuple tuple;
+
+ CHECK_FOR_INTERRUPTS();
+
+ tuple = tuplesort_getheaptuple(tuplesort, true);
+ if (tuple == NULL)
+ break;
+
+ n_tuples += 1;
+ reform_and_rewrite_tuple(tuple,
+ OldHeap, NewHeap,
+ values, isnull,
+ rwstate);
+ /* Report n_tuples */
+ pgstat_progress_update_param(PROGRESS_CLUSTER_HEAP_TUPLES_WRITTEN,
+ n_tuples);
+ }
+
+ tuplesort_end(tuplesort);
+ }
+
+ /* Write out any remaining tuples, and fsync if needed */
+ end_heap_rewrite(rwstate);
+
+ /* Clean up */
+ pfree(values);
+ pfree(isnull);
+}
+
+static bool
+heapam_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno,
+ BufferAccessStrategy bstrategy)
+{
+ HeapScanDesc hscan = (HeapScanDesc) scan;
+
+ /*
+ * We must maintain a pin on the target page's buffer to ensure that
+ * concurrent activity - e.g. HOT pruning - doesn't delete tuples out from
+ * under us. Hence, pin the page until we are done looking at it. We
+ * also choose to hold sharelock on the buffer throughout --- we could
+ * release and re-acquire sharelock for each tuple, but since we aren't
+ * doing much work per tuple, the extra lock traffic is probably better
+ * avoided.
+ */
+ hscan->rs_cblock = blockno;
+ hscan->rs_cindex = FirstOffsetNumber;
+ hscan->rs_cbuf = ReadBufferExtended(scan->rs_rd, MAIN_FORKNUM,
+ blockno, RBM_NORMAL, bstrategy);
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
+
+ /* in heap all blocks can contain tuples, so always return true */
+ return true;
+}
+
+static bool
+heapam_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin,
+ double *liverows, double *deadrows,
+ TupleTableSlot *slot)
+{
+ HeapScanDesc hscan = (HeapScanDesc) scan;
+ Page targpage;
+ OffsetNumber maxoffset;
+ BufferHeapTupleTableSlot *hslot;
+
+ Assert(TTS_IS_BUFFERTUPLE(slot));
+
+ hslot = (BufferHeapTupleTableSlot *) slot;
+ targpage = BufferGetPage(hscan->rs_cbuf);
+ maxoffset = PageGetMaxOffsetNumber(targpage);
+
+ /* Inner loop over all tuples on the selected page */
+ for (; hscan->rs_cindex <= maxoffset; hscan->rs_cindex++)
+ {
+ ItemId itemid;
+ HeapTuple targtuple = &hslot->base.tupdata;
+ bool sample_it = false;
+
+ itemid = PageGetItemId(targpage, hscan->rs_cindex);
+
+ /*
+ * We ignore unused and redirect line pointers. DEAD line pointers
+ * should be counted as dead, because we need vacuum to run to get rid
+ * of them. Note that this rule agrees with the way that
+ * heap_page_prune() counts things.
+ */
+ if (!ItemIdIsNormal(itemid))
+ {
+ if (ItemIdIsDead(itemid))
+ *deadrows += 1;
+ continue;
+ }
+
+ ItemPointerSet(&targtuple->t_self, hscan->rs_cblock, hscan->rs_cindex);
+
+ targtuple->t_tableOid = RelationGetRelid(scan->rs_rd);
+ targtuple->t_data = (HeapTupleHeader) PageGetItem(targpage, itemid);
+ targtuple->t_len = ItemIdGetLength(itemid);
+
+ switch (HeapTupleSatisfiesVacuum(targtuple, OldestXmin,
+ hscan->rs_cbuf))
+ {
+ case HEAPTUPLE_LIVE:
+ sample_it = true;
+ *liverows += 1;
+ break;
+
+ case HEAPTUPLE_DEAD:
+ case HEAPTUPLE_RECENTLY_DEAD:
+ /* Count dead and recently-dead rows */
+ *deadrows += 1;
+ break;
+
+ case HEAPTUPLE_INSERT_IN_PROGRESS:
+
+ /*
+ * Insert-in-progress rows are not counted. We assume that
+ * when the inserting transaction commits or aborts, it will
+ * send a stats message to increment the proper count. This
+ * works right only if that transaction ends after we finish
+ * analyzing the table; if things happen in the other order,
+ * its stats update will be overwritten by ours. However, the
+ * error will be large only if the other transaction runs long
+ * enough to insert many tuples, so assuming it will finish
+ * after us is the safer option.
+ *
+ * A special case is that the inserting transaction might be
+ * our own. In this case we should count and sample the row,
+ * to accommodate users who load a table and analyze it in one
+ * transaction. (pgstat_report_analyze has to adjust the
+ * numbers we send to the stats collector to make this come
+ * out right.)
+ */
+ if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(targtuple->t_data)))
+ {
+ sample_it = true;
+ *liverows += 1;
+ }
+ break;
+
+ case HEAPTUPLE_DELETE_IN_PROGRESS:
+
+ /*
+ * We count and sample delete-in-progress rows the same as
+ * live ones, so that the stats counters come out right if the
+ * deleting transaction commits after us, per the same
+ * reasoning given above.
+ *
+ * If the delete was done by our own transaction, however, we
+ * must count the row as dead to make pgstat_report_analyze's
+ * stats adjustments come out right. (Note: this works out
+ * properly when the row was both inserted and deleted in our
+ * xact.)
+ *
+ * The net effect of these choices is that we act as though an
+ * IN_PROGRESS transaction hasn't happened yet, except if it
+ * is our own transaction, which we assume has happened.
+ *
+ * This approach ensures that we behave sanely if we see both
+ * the pre-image and post-image rows for a row being updated
+ * by a concurrent transaction: we will sample the pre-image
+ * but not the post-image. We also get sane results if the
+ * concurrent transaction never commits.
+ */
+ if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetUpdateXid(targtuple->t_data)))
+ *deadrows += 1;
+ else
+ {
+ sample_it = true;
+ *liverows += 1;
+ }
+ break;
+
+ default:
+ elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
+ break;
+ }
+
+ if (sample_it)
+ {
+ ExecStoreBufferHeapTuple(targtuple, slot, hscan->rs_cbuf);
+ hscan->rs_cindex++;
+
+ /* note that we leave the buffer locked here! */
+ return true;
+ }
+ }
+
+ /* Now release the lock and pin on the page */
+ UnlockReleaseBuffer(hscan->rs_cbuf);
+ hscan->rs_cbuf = InvalidBuffer;
+
+ /* also prevent old slot contents from having pin on page */
+ ExecClearTuple(slot);
+
+ return false;
+}
+
+static double
+heapam_index_build_range_scan(Relation heapRelation,
+ Relation indexRelation,
+ IndexInfo *indexInfo,
+ bool allow_sync,
+ bool anyvisible,
+ bool progress,
+ BlockNumber start_blockno,
+ BlockNumber numblocks,
+ IndexBuildCallback callback,
+ void *callback_state,
+ TableScanDesc scan)
+{
+ HeapScanDesc hscan;
+ bool is_system_catalog;
+ bool checking_uniqueness;
+ HeapTuple heapTuple;
+ Datum values[INDEX_MAX_KEYS];
+ bool isnull[INDEX_MAX_KEYS];
+ double reltuples;
+ ExprState *predicate;
+ TupleTableSlot *slot;
+ EState *estate;
+ ExprContext *econtext;
+ Snapshot snapshot;
+ bool need_unregister_snapshot = false;
+ TransactionId OldestXmin;
+ BlockNumber previous_blkno = InvalidBlockNumber;
+ BlockNumber root_blkno = InvalidBlockNumber;
+ OffsetNumber root_offsets[MaxHeapTuplesPerPage];
+
+ /*
+ * sanity checks
+ */
+ Assert(OidIsValid(indexRelation->rd_rel->relam));
+
+ /* Remember if it's a system catalog */
+ is_system_catalog = IsSystemRelation(heapRelation);
+
+ /* See whether we're verifying uniqueness/exclusion properties */
+ checking_uniqueness = (indexInfo->ii_Unique ||
+ indexInfo->ii_ExclusionOps != NULL);
+
+ /*
+ * "Any visible" mode is not compatible with uniqueness checks; make sure
+ * only one of those is requested.
+ */
+ Assert(!(anyvisible && checking_uniqueness));
+
+ /*
+ * Need an EState for evaluation of index expressions and partial-index
+ * predicates. Also a slot to hold the current tuple.
+ */
+ estate = CreateExecutorState();
+ econtext = GetPerTupleExprContext(estate);
+ slot = table_slot_create(heapRelation, NULL);
+
+ /* Arrange for econtext's scan tuple to be the tuple under test */
+ econtext->ecxt_scantuple = slot;
+
+ /* Set up execution state for predicate, if any. */
+ predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
+
+ /*
+ * Prepare for scan of the base relation. In a normal index build, we use
+ * SnapshotAny because we must retrieve all tuples and do our own time
+ * qual checks (because we have to index RECENTLY_DEAD tuples). In a
+ * concurrent build, or during bootstrap, we take a regular MVCC snapshot
+ * and index whatever's live according to that.
+ */
+ OldestXmin = InvalidTransactionId;
+
+ /* okay to ignore lazy VACUUMs here */
+ if (!IsBootstrapProcessingMode() && !indexInfo->ii_Concurrent)
+ OldestXmin = GetOldestNonRemovableTransactionId(heapRelation);
+
+ if (!scan)
+ {
+ /*
+ * Serial index build.
+ *
+ * Must begin our own heap scan in this case. We may also need to
+ * register a snapshot whose lifetime is under our direct control.
+ */
+ if (!TransactionIdIsValid(OldestXmin))
+ {
+ snapshot = RegisterSnapshot(GetTransactionSnapshot());
+ need_unregister_snapshot = true;
+ }
+ else
+ snapshot = SnapshotAny;
+
+ scan = table_beginscan_strat(heapRelation, /* relation */
+ snapshot, /* snapshot */
+ 0, /* number of keys */
+ NULL, /* scan key */
+ true, /* buffer access strategy OK */
+ allow_sync); /* syncscan OK? */
+ }
+ else
+ {
+ /*
+ * Parallel index build.
+ *
+ * Parallel case never registers/unregisters own snapshot. Snapshot
+ * is taken from parallel heap scan, and is SnapshotAny or an MVCC
+ * snapshot, based on same criteria as serial case.
+ */
+ Assert(!IsBootstrapProcessingMode());
+ Assert(allow_sync);
+ snapshot = scan->rs_snapshot;
+ }
+
+ hscan = (HeapScanDesc) scan;
+
+ /*
+ * Must have called GetOldestNonRemovableTransactionId() if using
+ * SnapshotAny. Shouldn't have for an MVCC snapshot. (It's especially
+ * worth checking this for parallel builds, since ambuild routines that
+ * support parallel builds must work these details out for themselves.)
+ */
+ Assert(snapshot == SnapshotAny || IsMVCCSnapshot(snapshot));
+ Assert(snapshot == SnapshotAny ? TransactionIdIsValid(OldestXmin) :
+ !TransactionIdIsValid(OldestXmin));
+ Assert(snapshot == SnapshotAny || !anyvisible);
+
+ /* Publish number of blocks to scan */
+ if (progress)
+ {
+ BlockNumber nblocks;
+
+ if (hscan->rs_base.rs_parallel != NULL)
+ {
+ ParallelBlockTableScanDesc pbscan;
+
+ pbscan = (ParallelBlockTableScanDesc) hscan->rs_base.rs_parallel;
+ nblocks = pbscan->phs_nblocks;
+ }
+ else
+ nblocks = hscan->rs_nblocks;
+
+ pgstat_progress_update_param(PROGRESS_SCAN_BLOCKS_TOTAL,
+ nblocks);
+ }
+
+ /* set our scan endpoints */
+ if (!allow_sync)
+ heap_setscanlimits(scan, start_blockno, numblocks);
+ else
+ {
+ /* syncscan can only be requested on whole relation */
+ Assert(start_blockno == 0);
+ Assert(numblocks == InvalidBlockNumber);
+ }
+
+ reltuples = 0;
+
+ /*
+ * Scan all tuples in the base relation.
+ */
+ while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
+ {
+ bool tupleIsAlive;
+
+ CHECK_FOR_INTERRUPTS();
+
+ /* Report scan progress, if asked to. */
+ if (progress)
+ {
+ BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
+
+ if (blocks_done != previous_blkno)
+ {
+ pgstat_progress_update_param(PROGRESS_SCAN_BLOCKS_DONE,
+ blocks_done);
+ previous_blkno = blocks_done;
+ }
+ }
+
+ /*
+ * When dealing with a HOT-chain of updated tuples, we want to index
+ * the values of the live tuple (if any), but index it under the TID
+ * of the chain's root tuple. This approach is necessary to preserve
+ * the HOT-chain structure in the heap. So we need to be able to find
+ * the root item offset for every tuple that's in a HOT-chain. When
+ * first reaching a new page of the relation, call
+ * heap_get_root_tuples() to build a map of root item offsets on the
+ * page.
+ *
+ * It might look unsafe to use this information across buffer
+ * lock/unlock. However, we hold ShareLock on the table so no
+ * ordinary insert/update/delete should occur; and we hold pin on the
+ * buffer continuously while visiting the page, so no pruning
+ * operation can occur either.
+ *
+ * In cases with only ShareUpdateExclusiveLock on the table, it's
+ * possible for some HOT tuples to appear that we didn't know about
+ * when we first read the page. To handle that case, we re-obtain the
+ * list of root offsets when a HOT tuple points to a root item that we
+ * don't know about.
+ *
+ * Also, although our opinions about tuple liveness could change while
+ * we scan the page (due to concurrent transaction commits/aborts),
+ * the chain root locations won't, so this info doesn't need to be
+ * rebuilt after waiting for another transaction.
+ *
+ * Note the implied assumption that there is no more than one live
+ * tuple per HOT-chain --- else we could create more than one index
+ * entry pointing to the same root tuple.
+ */
+ if (hscan->rs_cblock != root_blkno)
+ {
+ Page page = BufferGetPage(hscan->rs_cbuf);
+
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
+ heap_get_root_tuples(page, root_offsets);
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+
+ root_blkno = hscan->rs_cblock;
+ }
+
+ if (snapshot == SnapshotAny)
+ {
+ /* do our own time qual check */
+ bool indexIt;
+ TransactionId xwait;
+
+ recheck:
+
+ /*
+ * We could possibly get away with not locking the buffer here,
+ * since caller should hold ShareLock on the relation, but let's
+ * be conservative about it. (This remark is still correct even
+ * with HOT-pruning: our pin on the buffer prevents pruning.)
+ */
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
+
+ /*
+ * The criteria for counting a tuple as live in this block need to
+ * match what analyze.c's heapam_scan_analyze_next_tuple() does,
+ * otherwise CREATE INDEX and ANALYZE may produce wildly different
+ * reltuples values, e.g. when there are many recently-dead
+ * tuples.
+ */
+ switch (HeapTupleSatisfiesVacuum(heapTuple, OldestXmin,
+ hscan->rs_cbuf))
+ {
+ case HEAPTUPLE_DEAD:
+ /* Definitely dead, we can ignore it */
+ indexIt = false;
+ tupleIsAlive = false;
+ break;
+ case HEAPTUPLE_LIVE:
+ /* Normal case, index and unique-check it */
+ indexIt = true;
+ tupleIsAlive = true;
+ /* Count it as live, too */
+ reltuples += 1;
+ break;
+ case HEAPTUPLE_RECENTLY_DEAD:
+
+ /*
+ * If tuple is recently deleted then we must index it
+ * anyway to preserve MVCC semantics. (Pre-existing
+ * transactions could try to use the index after we finish
+ * building it, and may need to see such tuples.)
+ *
+ * However, if it was HOT-updated then we must only index
+ * the live tuple at the end of the HOT-chain. Since this
+ * breaks semantics for pre-existing snapshots, mark the
+ * index as unusable for them.
+ *
+ * We don't count recently-dead tuples in reltuples, even
+ * if we index them; see heapam_scan_analyze_next_tuple().
+ */
+ if (HeapTupleIsHotUpdated(heapTuple))
+ {
+ indexIt = false;
+ /* mark the index as unsafe for old snapshots */
+ indexInfo->ii_BrokenHotChain = true;
+ }
+ else
+ indexIt = true;
+ /* In any case, exclude the tuple from unique-checking */
+ tupleIsAlive = false;
+ break;
+ case HEAPTUPLE_INSERT_IN_PROGRESS:
+
+ /*
+ * In "anyvisible" mode, this tuple is visible and we
+ * don't need any further checks.
+ */
+ if (anyvisible)
+ {
+ indexIt = true;
+ tupleIsAlive = true;
+ reltuples += 1;
+ break;
+ }
+
+ /*
+ * Since caller should hold ShareLock or better, normally
+ * the only way to see this is if it was inserted earlier
+ * in our own transaction. However, it can happen in
+ * system catalogs, since we tend to release write lock
+ * before commit there. Give a warning if neither case
+ * applies.
+ */
+ xwait = HeapTupleHeaderGetXmin(heapTuple->t_data);
+ if (!TransactionIdIsCurrentTransactionId(xwait))
+ {
+ if (!is_system_catalog)
+ elog(WARNING, "concurrent insert in progress within table \"%s\"",
+ RelationGetRelationName(heapRelation));
+
+ /*
+ * If we are performing uniqueness checks, indexing
+ * such a tuple could lead to a bogus uniqueness
+ * failure. In that case we wait for the inserting
+ * transaction to finish and check again.
+ */
+ if (checking_uniqueness)
+ {
+ /*
+ * Must drop the lock on the buffer before we wait
+ */
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+ XactLockTableWait(xwait, heapRelation,
+ &heapTuple->t_self,
+ XLTW_InsertIndexUnique);
+ CHECK_FOR_INTERRUPTS();
+ goto recheck;
+ }
+ }
+ else
+ {
+ /*
+ * For consistency with
+ * heapam_scan_analyze_next_tuple(), count
+ * HEAPTUPLE_INSERT_IN_PROGRESS tuples as live only
+ * when inserted by our own transaction.
+ */
+ reltuples += 1;
+ }
+
+ /*
+ * We must index such tuples, since if the index build
+ * commits then they're good.
+ */
+ indexIt = true;
+ tupleIsAlive = true;
+ break;
+ case HEAPTUPLE_DELETE_IN_PROGRESS:
+
+ /*
+ * As with INSERT_IN_PROGRESS case, this is unexpected
+ * unless it's our own deletion or a system catalog; but
+ * in anyvisible mode, this tuple is visible.
+ */
+ if (anyvisible)
+ {
+ indexIt = true;
+ tupleIsAlive = false;
+ reltuples += 1;
+ break;
+ }
+
+ xwait = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
+ if (!TransactionIdIsCurrentTransactionId(xwait))
+ {
+ if (!is_system_catalog)
+ elog(WARNING, "concurrent delete in progress within table \"%s\"",
+ RelationGetRelationName(heapRelation));
+
+ /*
+ * If we are performing uniqueness checks, assuming
+ * the tuple is dead could lead to missing a
+ * uniqueness violation. In that case we wait for the
+ * deleting transaction to finish and check again.
+ *
+ * Also, if it's a HOT-updated tuple, we should not
+ * index it but rather the live tuple at the end of
+ * the HOT-chain. However, the deleting transaction
+ * could abort, possibly leaving this tuple as live
+ * after all, in which case it has to be indexed. The
+ * only way to know what to do is to wait for the
+ * deleting transaction to finish and check again.
+ */
+ if (checking_uniqueness ||
+ HeapTupleIsHotUpdated(heapTuple))
+ {
+ /*
+ * Must drop the lock on the buffer before we wait
+ */
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+ XactLockTableWait(xwait, heapRelation,
+ &heapTuple->t_self,
+ XLTW_InsertIndexUnique);
+ CHECK_FOR_INTERRUPTS();
+ goto recheck;
+ }
+
+ /*
+ * Otherwise index it but don't check for uniqueness,
+ * the same as a RECENTLY_DEAD tuple.
+ */
+ indexIt = true;
+
+ /*
+ * Count HEAPTUPLE_DELETE_IN_PROGRESS tuples as live,
+ * if they were not deleted by the current
+ * transaction. That's what
+ * heapam_scan_analyze_next_tuple() does, and we want
+ * the behavior to be consistent.
+ */
+ reltuples += 1;
+ }
+ else if (HeapTupleIsHotUpdated(heapTuple))
+ {
+ /*
+ * It's a HOT-updated tuple deleted by our own xact.
+ * We can assume the deletion will commit (else the
+ * index contents don't matter), so treat the same as
+ * RECENTLY_DEAD HOT-updated tuples.
+ */
+ indexIt = false;
+ /* mark the index as unsafe for old snapshots */
+ indexInfo->ii_BrokenHotChain = true;
+ }
+ else
+ {
+ /*
+ * It's a regular tuple deleted by our own xact. Index
+ * it, but don't check for uniqueness nor count in
+ * reltuples, the same as a RECENTLY_DEAD tuple.
+ */
+ indexIt = true;
+ }
+ /* In any case, exclude the tuple from unique-checking */
+ tupleIsAlive = false;
+ break;
+ default:
+ elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
+ indexIt = tupleIsAlive = false; /* keep compiler quiet */
+ break;
+ }
+
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+
+ if (!indexIt)
+ continue;
+ }
+ else
+ {
+ /* heap_getnext did the time qual check */
+ tupleIsAlive = true;
+ reltuples += 1;
+ }
+
+ MemoryContextReset(econtext->ecxt_per_tuple_memory);
+
+ /* Set up for predicate or expression evaluation */
+ ExecStoreBufferHeapTuple(heapTuple, slot, hscan->rs_cbuf);
+
+ /*
+ * In a partial index, discard tuples that don't satisfy the
+ * predicate.
+ */
+ if (predicate != NULL)
+ {
+ if (!ExecQual(predicate, econtext))
+ continue;
+ }
+
+ /*
+ * For the current heap tuple, extract all the attributes we use in
+ * this index, and note which are null. This also performs evaluation
+ * of any expressions needed.
+ */
+ FormIndexDatum(indexInfo,
+ slot,
+ estate,
+ values,
+ isnull);
+
+ /*
+ * You'd think we should go ahead and build the index tuple here, but
+ * some index AMs want to do further processing on the data first. So
+ * pass the values[] and isnull[] arrays, instead.
+ */
+
+ if (HeapTupleIsHeapOnly(heapTuple))
+ {
+ /*
+ * For a heap-only tuple, pretend its TID is that of the root. See
+ * src/backend/access/heap/README.HOT for discussion.
+ */
+ ItemPointerData tid;
+ OffsetNumber offnum;
+
+ offnum = ItemPointerGetOffsetNumber(&heapTuple->t_self);
+
+ /*
+ * If a HOT tuple points to a root that we don't know about,
+ * obtain root items afresh. If that still fails, report it as
+ * corruption.
+ */
+ if (root_offsets[offnum - 1] == InvalidOffsetNumber)
+ {
+ Page page = BufferGetPage(hscan->rs_cbuf);
+
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
+ heap_get_root_tuples(page, root_offsets);
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+ }
+
+ if (!OffsetNumberIsValid(root_offsets[offnum - 1]))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"",
+ ItemPointerGetBlockNumber(&heapTuple->t_self),
+ offnum,
+ RelationGetRelationName(heapRelation))));
+
+ ItemPointerSet(&tid, ItemPointerGetBlockNumber(&heapTuple->t_self),
+ root_offsets[offnum - 1]);
+
+ /* Call the AM's callback routine to process the tuple */
+ callback(indexRelation, &tid, values, isnull, tupleIsAlive,
+ callback_state);
+ }
+ else
+ {
+ /* Call the AM's callback routine to process the tuple */
+ callback(indexRelation, &heapTuple->t_self, values, isnull,
+ tupleIsAlive, callback_state);
+ }
+ }
+
+ /* Report scan progress one last time. */
+ if (progress)
+ {
+ BlockNumber blks_done;
+
+ if (hscan->rs_base.rs_parallel != NULL)
+ {
+ ParallelBlockTableScanDesc pbscan;
+
+ pbscan = (ParallelBlockTableScanDesc) hscan->rs_base.rs_parallel;
+ blks_done = pbscan->phs_nblocks;
+ }
+ else
+ blks_done = hscan->rs_nblocks;
+
+ pgstat_progress_update_param(PROGRESS_SCAN_BLOCKS_DONE,
+ blks_done);
+ }
+
+ table_endscan(scan);
+
+ /* we can now forget our snapshot, if set and registered by us */
+ if (need_unregister_snapshot)
+ UnregisterSnapshot(snapshot);
+
+ ExecDropSingleTupleTableSlot(slot);
+
+ FreeExecutorState(estate);
+
+ /* These may have been pointing to the now-gone estate */
+ indexInfo->ii_ExpressionsState = NIL;
+ indexInfo->ii_PredicateState = NULL;
+
+ return reltuples;
+}
+
+static void
+heapam_index_validate_scan(Relation heapRelation,
+ Relation indexRelation,
+ IndexInfo *indexInfo,
+ Snapshot snapshot,
+ ValidateIndexState *state)
+{
+ TableScanDesc scan;
+ HeapScanDesc hscan;
+ HeapTuple heapTuple;
+ Datum values[INDEX_MAX_KEYS];
+ bool isnull[INDEX_MAX_KEYS];
+ ExprState *predicate;
+ TupleTableSlot *slot;
+ EState *estate;
+ ExprContext *econtext;
+ BlockNumber root_blkno = InvalidBlockNumber;
+ OffsetNumber root_offsets[MaxHeapTuplesPerPage];
+ bool in_index[MaxHeapTuplesPerPage];
+ BlockNumber previous_blkno = InvalidBlockNumber;
+
+ /* state variables for the merge */
+ ItemPointer indexcursor = NULL;
+ ItemPointerData decoded;
+ bool tuplesort_empty = false;
+
+ /*
+ * sanity checks
+ */
+ Assert(OidIsValid(indexRelation->rd_rel->relam));
+
+ /*
+ * Need an EState for evaluation of index expressions and partial-index
+ * predicates. Also a slot to hold the current tuple.
+ */
+ estate = CreateExecutorState();
+ econtext = GetPerTupleExprContext(estate);
+ slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation),
+ &TTSOpsHeapTuple);
+
+ /* Arrange for econtext's scan tuple to be the tuple under test */
+ econtext->ecxt_scantuple = slot;
+
+ /* Set up execution state for predicate, if any. */
+ predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
+
+ /*
+ * Prepare for scan of the base relation. We need just those tuples
+ * satisfying the passed-in reference snapshot. We must disable syncscan
+ * here, because it's critical that we read from block zero forward to
+ * match the sorted TIDs.
+ */
+ scan = table_beginscan_strat(heapRelation, /* relation */
+ snapshot, /* snapshot */
+ 0, /* number of keys */
+ NULL, /* scan key */
+ true, /* buffer access strategy OK */
+ false); /* syncscan not OK */
+ hscan = (HeapScanDesc) scan;
+
+ pgstat_progress_update_param(PROGRESS_SCAN_BLOCKS_TOTAL,
+ hscan->rs_nblocks);
+
+ /*
+ * Scan all tuples matching the snapshot.
+ */
+ while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
+ {
+ ItemPointer heapcursor = &heapTuple->t_self;
+ ItemPointerData rootTuple;
+ OffsetNumber root_offnum;
+
+ CHECK_FOR_INTERRUPTS();
+
+ state->htups += 1;
+
+ if ((previous_blkno == InvalidBlockNumber) ||
+ (hscan->rs_cblock != previous_blkno))
+ {
+ pgstat_progress_update_param(PROGRESS_SCAN_BLOCKS_DONE,
+ hscan->rs_cblock);
+ previous_blkno = hscan->rs_cblock;
+ }
+
+ /*
+ * As commented in table_index_build_scan, we should index heap-only
+ * tuples under the TIDs of their root tuples; so when we advance onto
+ * a new heap page, build a map of root item offsets on the page.
+ *
+ * This complicates merging against the tuplesort output: we will
+ * visit the live tuples in order by their offsets, but the root
+ * offsets that we need to compare against the index contents might be
+ * ordered differently. So we might have to "look back" within the
+ * tuplesort output, but only within the current page. We handle that
+ * by keeping a bool array in_index[] showing all the
+ * already-passed-over tuplesort output TIDs of the current page. We
+ * clear that array here, when advancing onto a new heap page.
+ */
+ if (hscan->rs_cblock != root_blkno)
+ {
+ Page page = BufferGetPage(hscan->rs_cbuf);
+
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
+ heap_get_root_tuples(page, root_offsets);
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+
+ memset(in_index, 0, sizeof(in_index));
+
+ root_blkno = hscan->rs_cblock;
+ }
+
+ /* Convert actual tuple TID to root TID */
+ rootTuple = *heapcursor;
+ root_offnum = ItemPointerGetOffsetNumber(heapcursor);
+
+ if (HeapTupleIsHeapOnly(heapTuple))
+ {
+ root_offnum = root_offsets[root_offnum - 1];
+ if (!OffsetNumberIsValid(root_offnum))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"",
+ ItemPointerGetBlockNumber(heapcursor),
+ ItemPointerGetOffsetNumber(heapcursor),
+ RelationGetRelationName(heapRelation))));
+ ItemPointerSetOffsetNumber(&rootTuple, root_offnum);
+ }
+
+ /*
+ * "merge" by skipping through the index tuples until we find or pass
+ * the current root tuple.
+ */
+ while (!tuplesort_empty &&
+ (!indexcursor ||
+ ItemPointerCompare(indexcursor, &rootTuple) < 0))
+ {
+ Datum ts_val;
+ bool ts_isnull;
+
+ if (indexcursor)
+ {
+ /*
+ * Remember index items seen earlier on the current heap page
+ */
+ if (ItemPointerGetBlockNumber(indexcursor) == root_blkno)
+ in_index[ItemPointerGetOffsetNumber(indexcursor) - 1] = true;
+ }
+
+ tuplesort_empty = !tuplesort_getdatum(state->tuplesort, true,
+ &ts_val, &ts_isnull, NULL);
+ Assert(tuplesort_empty || !ts_isnull);
+ if (!tuplesort_empty)
+ {
+ itemptr_decode(&decoded, DatumGetInt64(ts_val));
+ indexcursor = &decoded;
+
+ /* If int8 is pass-by-ref, free (encoded) TID Datum memory */
+#ifndef USE_FLOAT8_BYVAL
+ pfree(DatumGetPointer(ts_val));
+#endif
+ }
+ else
+ {
+ /* Be tidy */
+ indexcursor = NULL;
+ }
+ }
+
+ /*
+ * If the tuplesort has overshot *and* we didn't see a match earlier,
+ * then this tuple is missing from the index, so insert it.
+ */
+ if ((tuplesort_empty ||
+ ItemPointerCompare(indexcursor, &rootTuple) > 0) &&
+ !in_index[root_offnum - 1])
+ {
+ MemoryContextReset(econtext->ecxt_per_tuple_memory);
+
+ /* Set up for predicate or expression evaluation */
+ ExecStoreHeapTuple(heapTuple, slot, false);
+
+ /*
+ * In a partial index, discard tuples that don't satisfy the
+ * predicate.
+ */
+ if (predicate != NULL)
+ {
+ if (!ExecQual(predicate, econtext))
+ continue;
+ }
+
+ /*
+ * For the current heap tuple, extract all the attributes we use
+ * in this index, and note which are null. This also performs
+ * evaluation of any expressions needed.
+ */
+ FormIndexDatum(indexInfo,
+ slot,
+ estate,
+ values,
+ isnull);
+
+ /*
+ * You'd think we should go ahead and build the index tuple here,
+ * but some index AMs want to do further processing on the data
+ * first. So pass the values[] and isnull[] arrays, instead.
+ */
+
+ /*
+ * If the tuple is already committed dead, you might think we
+ * could suppress uniqueness checking, but this is no longer true
+ * in the presence of HOT, because the insert is actually a proxy
+ * for a uniqueness check on the whole HOT-chain. That is, the
+ * tuple we have here could be dead because it was already
+ * HOT-updated, and if so the updating transaction will not have
+ * thought it should insert index entries. The index AM will
+ * check the whole HOT-chain and correctly detect a conflict if
+ * there is one.
+ */
+
+ index_insert(indexRelation,
+ values,
+ isnull,
+ &rootTuple,
+ heapRelation,
+ indexInfo->ii_Unique ?
+ UNIQUE_CHECK_YES : UNIQUE_CHECK_NO,
+ false,
+ indexInfo);
+
+ state->tups_inserted += 1;
+ }
+ }
+
+ table_endscan(scan);
+
+ ExecDropSingleTupleTableSlot(slot);
+
+ FreeExecutorState(estate);
+
+ /* These may have been pointing to the now-gone estate */
+ indexInfo->ii_ExpressionsState = NIL;
+ indexInfo->ii_PredicateState = NULL;
+}
+
+/*
+ * Return the number of blocks that have been read by this scan since
+ * starting. This is meant for progress reporting rather than be fully
+ * accurate: in a parallel scan, workers can be concurrently reading blocks
+ * further ahead than what we report.
+ */
+static BlockNumber
+heapam_scan_get_blocks_done(HeapScanDesc hscan)
+{
+ ParallelBlockTableScanDesc bpscan = NULL;
+ BlockNumber startblock;
+ BlockNumber blocks_done;
+
+ if (hscan->rs_base.rs_parallel != NULL)
+ {
+ bpscan = (ParallelBlockTableScanDesc) hscan->rs_base.rs_parallel;
+ startblock = bpscan->phs_startblock;
+ }
+ else
+ startblock = hscan->rs_startblock;
+
+ /*
+ * Might have wrapped around the end of the relation, if startblock was
+ * not zero.
+ */
+ if (hscan->rs_cblock > startblock)
+ blocks_done = hscan->rs_cblock - startblock;
+ else
+ {
+ BlockNumber nblocks;
+
+ nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks;
+ blocks_done = nblocks - startblock +
+ hscan->rs_cblock;
+ }
+
+ return blocks_done;
+}
+
+
+/* ------------------------------------------------------------------------
+ * Miscellaneous callbacks for the heap AM
+ * ------------------------------------------------------------------------
+ */
+
+/*
+ * Check to see whether the table needs a TOAST table. It does only if
+ * (1) there are any toastable attributes, and (2) the maximum length
+ * of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to
+ * create a toast table for something like "f1 varchar(20)".)
+ */
+static bool
+heapam_relation_needs_toast_table(Relation rel)
+{
+ int32 data_length = 0;
+ bool maxlength_unknown = false;
+ bool has_toastable_attrs = false;
+ TupleDesc tupdesc = rel->rd_att;
+ int32 tuple_length;
+ int i;
+
+ for (i = 0; i < tupdesc->natts; i++)
+ {
+ Form_pg_attribute att = TupleDescAttr(tupdesc, i);
+
+ if (att->attisdropped)
+ continue;
+ data_length = att_align_nominal(data_length, att->attalign);
+ if (att->attlen > 0)
+ {
+ /* Fixed-length types are never toastable */
+ data_length += att->attlen;
+ }
+ else
+ {
+ int32 maxlen = type_maximum_size(att->atttypid,
+ att->atttypmod);
+
+ if (maxlen < 0)
+ maxlength_unknown = true;
+ else
+ data_length += maxlen;
+ if (att->attstorage != TYPSTORAGE_PLAIN)
+ has_toastable_attrs = true;
+ }
+ }
+ if (!has_toastable_attrs)
+ return false; /* nothing to toast? */
+ if (maxlength_unknown)
+ return true; /* any unlimited-length attrs? */
+ tuple_length = MAXALIGN(SizeofHeapTupleHeader +
+ BITMAPLEN(tupdesc->natts)) +
+ MAXALIGN(data_length);
+ return (tuple_length > TOAST_TUPLE_THRESHOLD);
+}
+
+/*
+ * TOAST tables for heap relations are just heap relations.
+ */
+static Oid
+heapam_relation_toast_am(Relation rel)
+{
+ return rel->rd_rel->relam;
+}
+
+
+/* ------------------------------------------------------------------------
+ * Planner related callbacks for the heap AM
+ * ------------------------------------------------------------------------
+ */
+
+#define HEAP_OVERHEAD_BYTES_PER_TUPLE \
+ (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData))
+#define HEAP_USABLE_BYTES_PER_PAGE \
+ (BLCKSZ - SizeOfPageHeaderData)
+
+static void
+heapam_estimate_rel_size(Relation rel, int32 *attr_widths,
+ BlockNumber *pages, double *tuples,
+ double *allvisfrac)
+{
+ table_block_relation_estimate_size(rel, attr_widths, pages,
+ tuples, allvisfrac,
+ HEAP_OVERHEAD_BYTES_PER_TUPLE,
+ HEAP_USABLE_BYTES_PER_PAGE);
+}
+
+
+/* ------------------------------------------------------------------------
+ * Executor related callbacks for the heap AM
+ * ------------------------------------------------------------------------
+ */
+
+static bool
+heapam_scan_bitmap_next_block(TableScanDesc scan,
+ TBMIterateResult *tbmres)
+{
+ HeapScanDesc hscan = (HeapScanDesc) scan;
+ BlockNumber page = tbmres->blockno;
+ Buffer buffer;
+ Snapshot snapshot;
+ int ntup;
+
+ hscan->rs_cindex = 0;
+ hscan->rs_ntuples = 0;
+
+ /*
+ * Ignore any claimed entries past what we think is the end of the
+ * relation. It may have been extended after the start of our scan (we
+ * only hold an AccessShareLock, and it could be inserts from this
+ * backend).
+ */
+ if (page >= hscan->rs_nblocks)
+ return false;
+
+ /*
+ * Acquire pin on the target heap page, trading in any pin we held before.
+ */
+ hscan->rs_cbuf = ReleaseAndReadBuffer(hscan->rs_cbuf,
+ scan->rs_rd,
+ page);
+ hscan->rs_cblock = page;
+ buffer = hscan->rs_cbuf;
+ snapshot = scan->rs_snapshot;
+
+ ntup = 0;
+
+ /*
+ * Prune and repair fragmentation for the whole page, if possible.
+ */
+ heap_page_prune_opt(scan->rs_rd, buffer);
+
+ /*
+ * We must hold share lock on the buffer content while examining tuple
+ * visibility. Afterwards, however, the tuples we have found to be
+ * visible are guaranteed good as long as we hold the buffer pin.
+ */
+ LockBuffer(buffer, BUFFER_LOCK_SHARE);
+
+ /*
+ * We need two separate strategies for lossy and non-lossy cases.
+ */
+ if (tbmres->ntuples >= 0)
+ {
+ /*
+ * Bitmap is non-lossy, so we just look through the offsets listed in
+ * tbmres; but we have to follow any HOT chain starting at each such
+ * offset.
+ */
+ int curslot;
+
+ for (curslot = 0; curslot < tbmres->ntuples; curslot++)
+ {
+ OffsetNumber offnum = tbmres->offsets[curslot];
+ ItemPointerData tid;
+ HeapTupleData heapTuple;
+
+ ItemPointerSet(&tid, page, offnum);
+ if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot,
+ &heapTuple, NULL, true))
+ hscan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid);
+ }
+ }
+ else
+ {
+ /*
+ * Bitmap is lossy, so we must examine each line pointer on the page.
+ * But we can ignore HOT chains, since we'll check each tuple anyway.
+ */
+ Page dp = (Page) BufferGetPage(buffer);
+ OffsetNumber maxoff = PageGetMaxOffsetNumber(dp);
+ OffsetNumber offnum;
+
+ for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
+ {
+ ItemId lp;
+ HeapTupleData loctup;
+ bool valid;
+
+ lp = PageGetItemId(dp, offnum);
+ if (!ItemIdIsNormal(lp))
+ continue;
+ loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
+ loctup.t_len = ItemIdGetLength(lp);
+ loctup.t_tableOid = scan->rs_rd->rd_id;
+ ItemPointerSet(&loctup.t_self, page, offnum);
+ valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
+ if (valid)
+ {
+ hscan->rs_vistuples[ntup++] = offnum;
+ PredicateLockTID(scan->rs_rd, &loctup.t_self, snapshot,
+ HeapTupleHeaderGetXmin(loctup.t_data));
+ }
+ HeapCheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
+ buffer, snapshot);
+ }
+ }
+
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+ Assert(ntup <= MaxHeapTuplesPerPage);
+ hscan->rs_ntuples = ntup;
+
+ return ntup > 0;
+}
+
+static bool
+heapam_scan_bitmap_next_tuple(TableScanDesc scan,
+ TBMIterateResult *tbmres,
+ TupleTableSlot *slot)
+{
+ HeapScanDesc hscan = (HeapScanDesc) scan;
+ OffsetNumber targoffset;
+ Page dp;
+ ItemId lp;
+
+ /*
+ * Out of range? If so, nothing more to look at on this page
+ */
+ if (hscan->rs_cindex < 0 || hscan->rs_cindex >= hscan->rs_ntuples)
+ return false;
+
+ targoffset = hscan->rs_vistuples[hscan->rs_cindex];
+ dp = (Page) BufferGetPage(hscan->rs_cbuf);
+ lp = PageGetItemId(dp, targoffset);
+ Assert(ItemIdIsNormal(lp));
+
+ hscan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
+ hscan->rs_ctup.t_len = ItemIdGetLength(lp);
+ hscan->rs_ctup.t_tableOid = scan->rs_rd->rd_id;
+ ItemPointerSet(&hscan->rs_ctup.t_self, hscan->rs_cblock, targoffset);
+
+ pgstat_count_heap_fetch(scan->rs_rd);
+
+ /*
+ * Set up the result slot to point to this tuple. Note that the slot
+ * acquires a pin on the buffer.
+ */
+ ExecStoreBufferHeapTuple(&hscan->rs_ctup,
+ slot,
+ hscan->rs_cbuf);
+
+ hscan->rs_cindex++;
+
+ return true;
+}
+
+static bool
+heapam_scan_sample_next_block(TableScanDesc scan, SampleScanState *scanstate)
+{
+ HeapScanDesc hscan = (HeapScanDesc) scan;
+ TsmRoutine *tsm = scanstate->tsmroutine;
+ BlockNumber blockno;
+
+ /* return false immediately if relation is empty */
+ if (hscan->rs_nblocks == 0)
+ return false;
+
+ if (tsm->NextSampleBlock)
+ {
+ blockno = tsm->NextSampleBlock(scanstate, hscan->rs_nblocks);
+ hscan->rs_cblock = blockno;
+ }
+ else
+ {
+ /* scanning table sequentially */
+
+ if (hscan->rs_cblock == InvalidBlockNumber)
+ {
+ Assert(!hscan->rs_inited);
+ blockno = hscan->rs_startblock;
+ }
+ else
+ {
+ Assert(hscan->rs_inited);
+
+ blockno = hscan->rs_cblock + 1;
+
+ if (blockno >= hscan->rs_nblocks)
+ {
+ /* wrap to beginning of rel, might not have started at 0 */
+ blockno = 0;
+ }
+
+ /*
+ * Report our new scan position for synchronization purposes.
+ *
+ * Note: we do this before checking for end of scan so that the
+ * final state of the position hint is back at the start of the
+ * rel. That's not strictly necessary, but otherwise when you run
+ * the same query multiple times the starting position would shift
+ * a little bit backwards on every invocation, which is confusing.
+ * We don't guarantee any specific ordering in general, though.
+ */
+ if (scan->rs_flags & SO_ALLOW_SYNC)
+ ss_report_location(scan->rs_rd, blockno);
+
+ if (blockno == hscan->rs_startblock)
+ {
+ blockno = InvalidBlockNumber;
+ }
+ }
+ }
+
+ if (!BlockNumberIsValid(blockno))
+ {
+ if (BufferIsValid(hscan->rs_cbuf))
+ ReleaseBuffer(hscan->rs_cbuf);
+ hscan->rs_cbuf = InvalidBuffer;
+ hscan->rs_cblock = InvalidBlockNumber;
+ hscan->rs_inited = false;
+
+ return false;
+ }
+
+ heapgetpage(scan, blockno);
+ hscan->rs_inited = true;
+
+ return true;
+}
+
+static bool
+heapam_scan_sample_next_tuple(TableScanDesc scan, SampleScanState *scanstate,
+ TupleTableSlot *slot)
+{
+ HeapScanDesc hscan = (HeapScanDesc) scan;
+ TsmRoutine *tsm = scanstate->tsmroutine;
+ BlockNumber blockno = hscan->rs_cblock;
+ bool pagemode = (scan->rs_flags & SO_ALLOW_PAGEMODE) != 0;
+
+ Page page;
+ bool all_visible;
+ OffsetNumber maxoffset;
+
+ /*
+ * When not using pagemode, we must lock the buffer during tuple
+ * visibility checks.
+ */
+ if (!pagemode)
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
+
+ page = (Page) BufferGetPage(hscan->rs_cbuf);
+ all_visible = PageIsAllVisible(page) &&
+ !scan->rs_snapshot->takenDuringRecovery;
+ maxoffset = PageGetMaxOffsetNumber(page);
+
+ for (;;)
+ {
+ OffsetNumber tupoffset;
+
+ CHECK_FOR_INTERRUPTS();
+
+ /* Ask the tablesample method which tuples to check on this page. */
+ tupoffset = tsm->NextSampleTuple(scanstate,
+ blockno,
+ maxoffset);
+
+ if (OffsetNumberIsValid(tupoffset))
+ {
+ ItemId itemid;
+ bool visible;
+ HeapTuple tuple = &(hscan->rs_ctup);
+
+ /* Skip invalid tuple pointers. */
+ itemid = PageGetItemId(page, tupoffset);
+ if (!ItemIdIsNormal(itemid))
+ continue;
+
+ tuple->t_data = (HeapTupleHeader) PageGetItem(page, itemid);
+ tuple->t_len = ItemIdGetLength(itemid);
+ ItemPointerSet(&(tuple->t_self), blockno, tupoffset);
+
+
+ if (all_visible)
+ visible = true;
+ else
+ visible = SampleHeapTupleVisible(scan, hscan->rs_cbuf,
+ tuple, tupoffset);
+
+ /* in pagemode, heapgetpage did this for us */
+ if (!pagemode)
+ HeapCheckForSerializableConflictOut(visible, scan->rs_rd, tuple,
+ hscan->rs_cbuf, scan->rs_snapshot);
+
+ /* Try next tuple from same page. */
+ if (!visible)
+ continue;
+
+ /* Found visible tuple, return it. */
+ if (!pagemode)
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+
+ ExecStoreBufferHeapTuple(tuple, slot, hscan->rs_cbuf);
+
+ /* Count successfully-fetched tuples as heap fetches */
+ pgstat_count_heap_getnext(scan->rs_rd);
+
+ return true;
+ }
+ else
+ {
+ /*
+ * If we get here, it means we've exhausted the items on this page
+ * and it's time to move to the next.
+ */
+ if (!pagemode)
+ LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+
+ ExecClearTuple(slot);
+ return false;
+ }
+ }
+
+ Assert(0);
+}
+
+
+/* ----------------------------------------------------------------------------
+ * Helper functions for the above.
+ * ----------------------------------------------------------------------------
+ */
+
+/*
+ * Reconstruct and rewrite the given tuple
+ *
+ * We cannot simply copy the tuple as-is, for several reasons:
+ *
+ * 1. We'd like to squeeze out the values of any dropped columns, both
+ * to save space and to ensure we have no corner-case failures. (It's
+ * possible for example that the new table hasn't got a TOAST table
+ * and so is unable to store any large values of dropped cols.)
+ *
+ * 2. The tuple might not even be legal for the new table; this is
+ * currently only known to happen as an after-effect of ALTER TABLE
+ * SET WITHOUT OIDS.
+ *
+ * So, we must reconstruct the tuple from component Datums.
+ */
+static void
+reform_and_rewrite_tuple(HeapTuple tuple,
+ Relation OldHeap, Relation NewHeap,
+ Datum *values, bool *isnull, RewriteState rwstate)
+{
+ TupleDesc oldTupDesc = RelationGetDescr(OldHeap);
+ TupleDesc newTupDesc = RelationGetDescr(NewHeap);
+ HeapTuple copiedTuple;
+ int i;
+
+ heap_deform_tuple(tuple, oldTupDesc, values, isnull);
+
+ /* Be sure to null out any dropped columns */
+ for (i = 0; i < newTupDesc->natts; i++)
+ {
+ if (TupleDescAttr(newTupDesc, i)->attisdropped)
+ isnull[i] = true;
+ }
+
+ copiedTuple = heap_form_tuple(newTupDesc, values, isnull);
+
+ /* The heap rewrite module does the rest */
+ rewrite_heap_tuple(rwstate, tuple, copiedTuple);
+
+ heap_freetuple(copiedTuple);
+}
+
+/*
+ * Check visibility of the tuple.
+ */
+static bool
+SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer,
+ HeapTuple tuple,
+ OffsetNumber tupoffset)
+{
+ HeapScanDesc hscan = (HeapScanDesc) scan;
+
+ if (scan->rs_flags & SO_ALLOW_PAGEMODE)
+ {
+ /*
+ * In pageatatime mode, heapgetpage() already did visibility checks,
+ * so just look at the info it left in rs_vistuples[].
+ *
+ * We use a binary search over the known-sorted array. Note: we could
+ * save some effort if we insisted that NextSampleTuple select tuples
+ * in increasing order, but it's not clear that there would be enough
+ * gain to justify the restriction.
+ */
+ int start = 0,
+ end = hscan->rs_ntuples - 1;
+
+ while (start <= end)
+ {
+ int mid = (start + end) / 2;
+ OffsetNumber curoffset = hscan->rs_vistuples[mid];
+
+ if (tupoffset == curoffset)
+ return true;
+ else if (tupoffset < curoffset)
+ end = mid - 1;
+ else
+ start = mid + 1;
+ }
+
+ return false;
+ }
+ else
+ {
+ /* Otherwise, we have to check the tuple individually. */
+ return HeapTupleSatisfiesVisibility(tuple, scan->rs_snapshot,
+ buffer);
+ }
+}
+
+
+/* ------------------------------------------------------------------------
+ * Definition of the heap table access method.
+ * ------------------------------------------------------------------------
+ */
+
+static const TableAmRoutine heapam_methods = {
+ .type = T_TableAmRoutine,
+
+ .slot_callbacks = heapam_slot_callbacks,
+
+ .scan_begin = heap_beginscan,
+ .scan_end = heap_endscan,
+ .scan_rescan = heap_rescan,
+ .scan_getnextslot = heap_getnextslot,
+
+ .scan_set_tidrange = heap_set_tidrange,
+ .scan_getnextslot_tidrange = heap_getnextslot_tidrange,
+
+ .parallelscan_estimate = table_block_parallelscan_estimate,
+ .parallelscan_initialize = table_block_parallelscan_initialize,
+ .parallelscan_reinitialize = table_block_parallelscan_reinitialize,
+
+ .index_fetch_begin = heapam_index_fetch_begin,
+ .index_fetch_reset = heapam_index_fetch_reset,
+ .index_fetch_end = heapam_index_fetch_end,
+ .index_fetch_tuple = heapam_index_fetch_tuple,
+
+ .tuple_insert = heapam_tuple_insert,
+ .tuple_insert_speculative = heapam_tuple_insert_speculative,
+ .tuple_complete_speculative = heapam_tuple_complete_speculative,
+ .multi_insert = heap_multi_insert,
+ .tuple_delete = heapam_tuple_delete,
+ .tuple_update = heapam_tuple_update,
+ .tuple_lock = heapam_tuple_lock,
+
+ .tuple_fetch_row_version = heapam_fetch_row_version,
+ .tuple_get_latest_tid = heap_get_latest_tid,
+ .tuple_tid_valid = heapam_tuple_tid_valid,
+ .tuple_satisfies_snapshot = heapam_tuple_satisfies_snapshot,
+ .index_delete_tuples = heap_index_delete_tuples,
+
+ .relation_set_new_filenode = heapam_relation_set_new_filenode,
+ .relation_nontransactional_truncate = heapam_relation_nontransactional_truncate,
+ .relation_copy_data = heapam_relation_copy_data,
+ .relation_copy_for_cluster = heapam_relation_copy_for_cluster,
+ .relation_vacuum = heap_vacuum_rel,
+ .scan_analyze_next_block = heapam_scan_analyze_next_block,
+ .scan_analyze_next_tuple = heapam_scan_analyze_next_tuple,
+ .index_build_range_scan = heapam_index_build_range_scan,
+ .index_validate_scan = heapam_index_validate_scan,
+
+ .relation_size = table_block_relation_size,
+ .relation_needs_toast_table = heapam_relation_needs_toast_table,
+ .relation_toast_am = heapam_relation_toast_am,
+ .relation_fetch_toast_slice = heap_fetch_toast_slice,
+
+ .relation_estimate_size = heapam_estimate_rel_size,
+
+ .scan_bitmap_next_block = heapam_scan_bitmap_next_block,
+ .scan_bitmap_next_tuple = heapam_scan_bitmap_next_tuple,
+ .scan_sample_next_block = heapam_scan_sample_next_block,
+ .scan_sample_next_tuple = heapam_scan_sample_next_tuple
+};
+
+
+const TableAmRoutine *
+GetHeapamTableAmRoutine(void)
+{
+ return &heapam_methods;
+}
+
+Datum
+heap_tableam_handler(PG_FUNCTION_ARGS)
+{
+ PG_RETURN_POINTER(&heapam_methods);
+}
diff --git a/src/backend/access/heap/heapam_visibility.c b/src/backend/access/heap/heapam_visibility.c
new file mode 100644
index 0000000..20d82ca
--- /dev/null
+++ b/src/backend/access/heap/heapam_visibility.c
@@ -0,0 +1,1794 @@
+/*-------------------------------------------------------------------------
+ *
+ * heapam_visibility.c
+ * Tuple visibility rules for tuples stored in heap.
+ *
+ * NOTE: all the HeapTupleSatisfies routines will update the tuple's
+ * "hint" status bits if we see that the inserting or deleting transaction
+ * has now committed or aborted (and it is safe to set the hint bits).
+ * If the hint bits are changed, MarkBufferDirtyHint is called on
+ * the passed-in buffer. The caller must hold not only a pin, but at least
+ * shared buffer content lock on the buffer containing the tuple.
+ *
+ * NOTE: When using a non-MVCC snapshot, we must check
+ * TransactionIdIsInProgress (which looks in the PGPROC array)
+ * before TransactionIdDidCommit/TransactionIdDidAbort (which look in
+ * pg_xact). Otherwise we have a race condition: we might decide that a
+ * just-committed transaction crashed, because none of the tests succeed.
+ * xact.c is careful to record commit/abort in pg_xact before it unsets
+ * MyProc->xid in the PGPROC array. That fixes that problem, but it
+ * also means there is a window where TransactionIdIsInProgress and
+ * TransactionIdDidCommit will both return true. If we check only
+ * TransactionIdDidCommit, we could consider a tuple committed when a
+ * later GetSnapshotData call will still think the originating transaction
+ * is in progress, which leads to application-level inconsistency. The
+ * upshot is that we gotta check TransactionIdIsInProgress first in all
+ * code paths, except for a few cases where we are looking at
+ * subtransactions of our own main transaction and so there can't be any
+ * race condition.
+ *
+ * When using an MVCC snapshot, we rely on XidInMVCCSnapshot rather than
+ * TransactionIdIsInProgress, but the logic is otherwise the same: do not
+ * check pg_xact until after deciding that the xact is no longer in progress.
+ *
+ *
+ * Summary of visibility functions:
+ *
+ * HeapTupleSatisfiesMVCC()
+ * visible to supplied snapshot, excludes current command
+ * HeapTupleSatisfiesUpdate()
+ * visible to instant snapshot, with user-supplied command
+ * counter and more complex result
+ * HeapTupleSatisfiesSelf()
+ * visible to instant snapshot and current command
+ * HeapTupleSatisfiesDirty()
+ * like HeapTupleSatisfiesSelf(), but includes open transactions
+ * HeapTupleSatisfiesVacuum()
+ * visible to any running transaction, used by VACUUM
+ * HeapTupleSatisfiesNonVacuumable()
+ * Snapshot-style API for HeapTupleSatisfiesVacuum
+ * HeapTupleSatisfiesToast()
+ * visible unless part of interrupted vacuum, used for TOAST
+ * HeapTupleSatisfiesAny()
+ * all tuples are visible
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ * src/backend/access/heap/heapam_visibility.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/heapam.h"
+#include "access/htup_details.h"
+#include "access/multixact.h"
+#include "access/subtrans.h"
+#include "access/tableam.h"
+#include "access/transam.h"
+#include "access/xact.h"
+#include "access/xlog.h"
+#include "storage/bufmgr.h"
+#include "storage/procarray.h"
+#include "utils/builtins.h"
+#include "utils/combocid.h"
+#include "utils/snapmgr.h"
+
+
+/*
+ * SetHintBits()
+ *
+ * Set commit/abort hint bits on a tuple, if appropriate at this time.
+ *
+ * It is only safe to set a transaction-committed hint bit if we know the
+ * transaction's commit record is guaranteed to be flushed to disk before the
+ * buffer, or if the table is temporary or unlogged and will be obliterated by
+ * a crash anyway. We cannot change the LSN of the page here, because we may
+ * hold only a share lock on the buffer, so we can only use the LSN to
+ * interlock this if the buffer's LSN already is newer than the commit LSN;
+ * otherwise we have to just refrain from setting the hint bit until some
+ * future re-examination of the tuple.
+ *
+ * We can always set hint bits when marking a transaction aborted. (Some
+ * code in heapam.c relies on that!)
+ *
+ * Also, if we are cleaning up HEAP_MOVED_IN or HEAP_MOVED_OFF entries, then
+ * we can always set the hint bits, since pre-9.0 VACUUM FULL always used
+ * synchronous commits and didn't move tuples that weren't previously
+ * hinted. (This is not known by this subroutine, but is applied by its
+ * callers.) Note: old-style VACUUM FULL is gone, but we have to keep this
+ * module's support for MOVED_OFF/MOVED_IN flag bits for as long as we
+ * support in-place update from pre-9.0 databases.
+ *
+ * Normal commits may be asynchronous, so for those we need to get the LSN
+ * of the transaction and then check whether this is flushed.
+ *
+ * The caller should pass xid as the XID of the transaction to check, or
+ * InvalidTransactionId if no check is needed.
+ */
+static inline void
+SetHintBits(HeapTupleHeader tuple, Buffer buffer,
+ uint16 infomask, TransactionId xid)
+{
+ if (TransactionIdIsValid(xid))
+ {
+ /* NB: xid must be known committed here! */
+ XLogRecPtr commitLSN = TransactionIdGetCommitLSN(xid);
+
+ if (BufferIsPermanent(buffer) && XLogNeedsFlush(commitLSN) &&
+ BufferGetLSNAtomic(buffer) < commitLSN)
+ {
+ /* not flushed and no LSN interlock, so don't set hint */
+ return;
+ }
+ }
+
+ tuple->t_infomask |= infomask;
+ MarkBufferDirtyHint(buffer, true);
+}
+
+/*
+ * HeapTupleSetHintBits --- exported version of SetHintBits()
+ *
+ * This must be separate because of C99's brain-dead notions about how to
+ * implement inline functions.
+ */
+void
+HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer,
+ uint16 infomask, TransactionId xid)
+{
+ SetHintBits(tuple, buffer, infomask, xid);
+}
+
+
+/*
+ * HeapTupleSatisfiesSelf
+ * True iff heap tuple is valid "for itself".
+ *
+ * See SNAPSHOT_MVCC's definition for the intended behaviour.
+ *
+ * Note:
+ * Assumes heap tuple is valid.
+ *
+ * The satisfaction of "itself" requires the following:
+ *
+ * ((Xmin == my-transaction && the row was updated by the current transaction, and
+ * (Xmax is null it was not deleted
+ * [|| Xmax != my-transaction)]) [or it was deleted by another transaction]
+ * ||
+ *
+ * (Xmin is committed && the row was modified by a committed transaction, and
+ * (Xmax is null || the row has not been deleted, or
+ * (Xmax != my-transaction && the row was deleted by another transaction
+ * Xmax is not committed))) that has not been committed
+ */
+static bool
+HeapTupleSatisfiesSelf(HeapTuple htup, Snapshot snapshot, Buffer buffer)
+{
+ HeapTupleHeader tuple = htup->t_data;
+
+ Assert(ItemPointerIsValid(&htup->t_self));
+ Assert(htup->t_tableOid != InvalidOid);
+
+ if (!HeapTupleHeaderXminCommitted(tuple))
+ {
+ if (HeapTupleHeaderXminInvalid(tuple))
+ return false;
+
+ /* Used by pre-9.0 binary upgrades */
+ if (tuple->t_infomask & HEAP_MOVED_OFF)
+ {
+ TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
+
+ if (TransactionIdIsCurrentTransactionId(xvac))
+ return false;
+ if (!TransactionIdIsInProgress(xvac))
+ {
+ if (TransactionIdDidCommit(xvac))
+ {
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return false;
+ }
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ InvalidTransactionId);
+ }
+ }
+ /* Used by pre-9.0 binary upgrades */
+ else if (tuple->t_infomask & HEAP_MOVED_IN)
+ {
+ TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
+
+ if (!TransactionIdIsCurrentTransactionId(xvac))
+ {
+ if (TransactionIdIsInProgress(xvac))
+ return false;
+ if (TransactionIdDidCommit(xvac))
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ InvalidTransactionId);
+ else
+ {
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return false;
+ }
+ }
+ }
+ else if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmin(tuple)))
+ {
+ if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
+ return true;
+
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
+ return true;
+
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ TransactionId xmax;
+
+ xmax = HeapTupleGetUpdateXid(tuple);
+
+ /* not LOCKED_ONLY, so it has to have an xmax */
+ Assert(TransactionIdIsValid(xmax));
+
+ /* updating subtransaction must have aborted */
+ if (!TransactionIdIsCurrentTransactionId(xmax))
+ return true;
+ else
+ return false;
+ }
+
+ if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmax(tuple)))
+ {
+ /* deleting subtransaction must have aborted */
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ return true;
+ }
+
+ return false;
+ }
+ else if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmin(tuple)))
+ return false;
+ else if (TransactionIdDidCommit(HeapTupleHeaderGetRawXmin(tuple)))
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ HeapTupleHeaderGetRawXmin(tuple));
+ else
+ {
+ /* it must have aborted or crashed */
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return false;
+ }
+ }
+
+ /* by here, the inserting transaction has committed */
+
+ if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid or aborted */
+ return true;
+
+ if (tuple->t_infomask & HEAP_XMAX_COMMITTED)
+ {
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ return true;
+ return false; /* updated by other */
+ }
+
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ TransactionId xmax;
+
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ return true;
+
+ xmax = HeapTupleGetUpdateXid(tuple);
+
+ /* not LOCKED_ONLY, so it has to have an xmax */
+ Assert(TransactionIdIsValid(xmax));
+
+ if (TransactionIdIsCurrentTransactionId(xmax))
+ return false;
+ if (TransactionIdIsInProgress(xmax))
+ return true;
+ if (TransactionIdDidCommit(xmax))
+ return false;
+ /* it must have aborted or crashed */
+ return true;
+ }
+
+ if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmax(tuple)))
+ {
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ return true;
+ return false;
+ }
+
+ if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmax(tuple)))
+ return true;
+
+ if (!TransactionIdDidCommit(HeapTupleHeaderGetRawXmax(tuple)))
+ {
+ /* it must have aborted or crashed */
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ return true;
+ }
+
+ /* xmax transaction committed */
+
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ {
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ return true;
+ }
+
+ SetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
+ HeapTupleHeaderGetRawXmax(tuple));
+ return false;
+}
+
+/*
+ * HeapTupleSatisfiesAny
+ * Dummy "satisfies" routine: any tuple satisfies SnapshotAny.
+ */
+static bool
+HeapTupleSatisfiesAny(HeapTuple htup, Snapshot snapshot, Buffer buffer)
+{
+ return true;
+}
+
+/*
+ * HeapTupleSatisfiesToast
+ * True iff heap tuple is valid as a TOAST row.
+ *
+ * See SNAPSHOT_TOAST's definition for the intended behaviour.
+ *
+ * This is a simplified version that only checks for VACUUM moving conditions.
+ * It's appropriate for TOAST usage because TOAST really doesn't want to do
+ * its own time qual checks; if you can see the main table row that contains
+ * a TOAST reference, you should be able to see the TOASTed value. However,
+ * vacuuming a TOAST table is independent of the main table, and in case such
+ * a vacuum fails partway through, we'd better do this much checking.
+ *
+ * Among other things, this means you can't do UPDATEs of rows in a TOAST
+ * table.
+ */
+static bool
+HeapTupleSatisfiesToast(HeapTuple htup, Snapshot snapshot,
+ Buffer buffer)
+{
+ HeapTupleHeader tuple = htup->t_data;
+
+ Assert(ItemPointerIsValid(&htup->t_self));
+ Assert(htup->t_tableOid != InvalidOid);
+
+ if (!HeapTupleHeaderXminCommitted(tuple))
+ {
+ if (HeapTupleHeaderXminInvalid(tuple))
+ return false;
+
+ /* Used by pre-9.0 binary upgrades */
+ if (tuple->t_infomask & HEAP_MOVED_OFF)
+ {
+ TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
+
+ if (TransactionIdIsCurrentTransactionId(xvac))
+ return false;
+ if (!TransactionIdIsInProgress(xvac))
+ {
+ if (TransactionIdDidCommit(xvac))
+ {
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return false;
+ }
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ InvalidTransactionId);
+ }
+ }
+ /* Used by pre-9.0 binary upgrades */
+ else if (tuple->t_infomask & HEAP_MOVED_IN)
+ {
+ TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
+
+ if (!TransactionIdIsCurrentTransactionId(xvac))
+ {
+ if (TransactionIdIsInProgress(xvac))
+ return false;
+ if (TransactionIdDidCommit(xvac))
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ InvalidTransactionId);
+ else
+ {
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return false;
+ }
+ }
+ }
+
+ /*
+ * An invalid Xmin can be left behind by a speculative insertion that
+ * is canceled by super-deleting the tuple. This also applies to
+ * TOAST tuples created during speculative insertion.
+ */
+ else if (!TransactionIdIsValid(HeapTupleHeaderGetXmin(tuple)))
+ return false;
+ }
+
+ /* otherwise assume the tuple is valid for TOAST. */
+ return true;
+}
+
+/*
+ * HeapTupleSatisfiesUpdate
+ *
+ * This function returns a more detailed result code than most of the
+ * functions in this file, since UPDATE needs to know more than "is it
+ * visible?". It also allows for user-supplied CommandId rather than
+ * relying on CurrentCommandId.
+ *
+ * The possible return codes are:
+ *
+ * TM_Invisible: the tuple didn't exist at all when the scan started, e.g. it
+ * was created by a later CommandId.
+ *
+ * TM_Ok: The tuple is valid and visible, so it may be updated.
+ *
+ * TM_SelfModified: The tuple was updated by the current transaction, after
+ * the current scan started.
+ *
+ * TM_Updated: The tuple was updated by a committed transaction (including
+ * the case where the tuple was moved into a different partition).
+ *
+ * TM_Deleted: The tuple was deleted by a committed transaction.
+ *
+ * TM_BeingModified: The tuple is being updated by an in-progress transaction
+ * other than the current transaction. (Note: this includes the case where
+ * the tuple is share-locked by a MultiXact, even if the MultiXact includes
+ * the current transaction. Callers that want to distinguish that case must
+ * test for it themselves.)
+ */
+TM_Result
+HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
+ Buffer buffer)
+{
+ HeapTupleHeader tuple = htup->t_data;
+
+ Assert(ItemPointerIsValid(&htup->t_self));
+ Assert(htup->t_tableOid != InvalidOid);
+
+ if (!HeapTupleHeaderXminCommitted(tuple))
+ {
+ if (HeapTupleHeaderXminInvalid(tuple))
+ return TM_Invisible;
+
+ /* Used by pre-9.0 binary upgrades */
+ if (tuple->t_infomask & HEAP_MOVED_OFF)
+ {
+ TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
+
+ if (TransactionIdIsCurrentTransactionId(xvac))
+ return TM_Invisible;
+ if (!TransactionIdIsInProgress(xvac))
+ {
+ if (TransactionIdDidCommit(xvac))
+ {
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return TM_Invisible;
+ }
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ InvalidTransactionId);
+ }
+ }
+ /* Used by pre-9.0 binary upgrades */
+ else if (tuple->t_infomask & HEAP_MOVED_IN)
+ {
+ TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
+
+ if (!TransactionIdIsCurrentTransactionId(xvac))
+ {
+ if (TransactionIdIsInProgress(xvac))
+ return TM_Invisible;
+ if (TransactionIdDidCommit(xvac))
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ InvalidTransactionId);
+ else
+ {
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return TM_Invisible;
+ }
+ }
+ }
+ else if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmin(tuple)))
+ {
+ if (HeapTupleHeaderGetCmin(tuple) >= curcid)
+ return TM_Invisible; /* inserted after scan started */
+
+ if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
+ return TM_Ok;
+
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ {
+ TransactionId xmax;
+
+ xmax = HeapTupleHeaderGetRawXmax(tuple);
+
+ /*
+ * Careful here: even though this tuple was created by our own
+ * transaction, it might be locked by other transactions, if
+ * the original version was key-share locked when we updated
+ * it.
+ */
+
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ if (MultiXactIdIsRunning(xmax, true))
+ return TM_BeingModified;
+ else
+ return TM_Ok;
+ }
+
+ /*
+ * If the locker is gone, then there is nothing of interest
+ * left in this Xmax; otherwise, report the tuple as
+ * locked/updated.
+ */
+ if (!TransactionIdIsInProgress(xmax))
+ return TM_Ok;
+ return TM_BeingModified;
+ }
+
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ TransactionId xmax;
+
+ xmax = HeapTupleGetUpdateXid(tuple);
+
+ /* not LOCKED_ONLY, so it has to have an xmax */
+ Assert(TransactionIdIsValid(xmax));
+
+ /* deleting subtransaction must have aborted */
+ if (!TransactionIdIsCurrentTransactionId(xmax))
+ {
+ if (MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple),
+ false))
+ return TM_BeingModified;
+ return TM_Ok;
+ }
+ else
+ {
+ if (HeapTupleHeaderGetCmax(tuple) >= curcid)
+ return TM_SelfModified; /* updated after scan started */
+ else
+ return TM_Invisible; /* updated before scan started */
+ }
+ }
+
+ if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmax(tuple)))
+ {
+ /* deleting subtransaction must have aborted */
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ return TM_Ok;
+ }
+
+ if (HeapTupleHeaderGetCmax(tuple) >= curcid)
+ return TM_SelfModified; /* updated after scan started */
+ else
+ return TM_Invisible; /* updated before scan started */
+ }
+ else if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmin(tuple)))
+ return TM_Invisible;
+ else if (TransactionIdDidCommit(HeapTupleHeaderGetRawXmin(tuple)))
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ HeapTupleHeaderGetRawXmin(tuple));
+ else
+ {
+ /* it must have aborted or crashed */
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return TM_Invisible;
+ }
+ }
+
+ /* by here, the inserting transaction has committed */
+
+ if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid or aborted */
+ return TM_Ok;
+
+ if (tuple->t_infomask & HEAP_XMAX_COMMITTED)
+ {
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ return TM_Ok;
+ if (!ItemPointerEquals(&htup->t_self, &tuple->t_ctid))
+ return TM_Updated; /* updated by other */
+ else
+ return TM_Deleted; /* deleted by other */
+ }
+
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ TransactionId xmax;
+
+ if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
+ return TM_Ok;
+
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ {
+ if (MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple), true))
+ return TM_BeingModified;
+
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID, InvalidTransactionId);
+ return TM_Ok;
+ }
+
+ xmax = HeapTupleGetUpdateXid(tuple);
+ if (!TransactionIdIsValid(xmax))
+ {
+ if (MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple), false))
+ return TM_BeingModified;
+ }
+
+ /* not LOCKED_ONLY, so it has to have an xmax */
+ Assert(TransactionIdIsValid(xmax));
+
+ if (TransactionIdIsCurrentTransactionId(xmax))
+ {
+ if (HeapTupleHeaderGetCmax(tuple) >= curcid)
+ return TM_SelfModified; /* updated after scan started */
+ else
+ return TM_Invisible; /* updated before scan started */
+ }
+
+ if (MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple), false))
+ return TM_BeingModified;
+
+ if (TransactionIdDidCommit(xmax))
+ {
+ if (!ItemPointerEquals(&htup->t_self, &tuple->t_ctid))
+ return TM_Updated;
+ else
+ return TM_Deleted;
+ }
+
+ /*
+ * By here, the update in the Xmax is either aborted or crashed, but
+ * what about the other members?
+ */
+
+ if (!MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple), false))
+ {
+ /*
+ * There's no member, even just a locker, alive anymore, so we can
+ * mark the Xmax as invalid.
+ */
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ return TM_Ok;
+ }
+ else
+ {
+ /* There are lockers running */
+ return TM_BeingModified;
+ }
+ }
+
+ if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmax(tuple)))
+ {
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ return TM_BeingModified;
+ if (HeapTupleHeaderGetCmax(tuple) >= curcid)
+ return TM_SelfModified; /* updated after scan started */
+ else
+ return TM_Invisible; /* updated before scan started */
+ }
+
+ if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmax(tuple)))
+ return TM_BeingModified;
+
+ if (!TransactionIdDidCommit(HeapTupleHeaderGetRawXmax(tuple)))
+ {
+ /* it must have aborted or crashed */
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ return TM_Ok;
+ }
+
+ /* xmax transaction committed */
+
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ {
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ return TM_Ok;
+ }
+
+ SetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
+ HeapTupleHeaderGetRawXmax(tuple));
+ if (!ItemPointerEquals(&htup->t_self, &tuple->t_ctid))
+ return TM_Updated; /* updated by other */
+ else
+ return TM_Deleted; /* deleted by other */
+}
+
+/*
+ * HeapTupleSatisfiesDirty
+ * True iff heap tuple is valid including effects of open transactions.
+ *
+ * See SNAPSHOT_DIRTY's definition for the intended behaviour.
+ *
+ * This is essentially like HeapTupleSatisfiesSelf as far as effects of
+ * the current transaction and committed/aborted xacts are concerned.
+ * However, we also include the effects of other xacts still in progress.
+ *
+ * A special hack is that the passed-in snapshot struct is used as an
+ * output argument to return the xids of concurrent xacts that affected the
+ * tuple. snapshot->xmin is set to the tuple's xmin if that is another
+ * transaction that's still in progress; or to InvalidTransactionId if the
+ * tuple's xmin is committed good, committed dead, or my own xact.
+ * Similarly for snapshot->xmax and the tuple's xmax. If the tuple was
+ * inserted speculatively, meaning that the inserter might still back down
+ * on the insertion without aborting the whole transaction, the associated
+ * token is also returned in snapshot->speculativeToken.
+ */
+static bool
+HeapTupleSatisfiesDirty(HeapTuple htup, Snapshot snapshot,
+ Buffer buffer)
+{
+ HeapTupleHeader tuple = htup->t_data;
+
+ Assert(ItemPointerIsValid(&htup->t_self));
+ Assert(htup->t_tableOid != InvalidOid);
+
+ snapshot->xmin = snapshot->xmax = InvalidTransactionId;
+ snapshot->speculativeToken = 0;
+
+ if (!HeapTupleHeaderXminCommitted(tuple))
+ {
+ if (HeapTupleHeaderXminInvalid(tuple))
+ return false;
+
+ /* Used by pre-9.0 binary upgrades */
+ if (tuple->t_infomask & HEAP_MOVED_OFF)
+ {
+ TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
+
+ if (TransactionIdIsCurrentTransactionId(xvac))
+ return false;
+ if (!TransactionIdIsInProgress(xvac))
+ {
+ if (TransactionIdDidCommit(xvac))
+ {
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return false;
+ }
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ InvalidTransactionId);
+ }
+ }
+ /* Used by pre-9.0 binary upgrades */
+ else if (tuple->t_infomask & HEAP_MOVED_IN)
+ {
+ TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
+
+ if (!TransactionIdIsCurrentTransactionId(xvac))
+ {
+ if (TransactionIdIsInProgress(xvac))
+ return false;
+ if (TransactionIdDidCommit(xvac))
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ InvalidTransactionId);
+ else
+ {
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return false;
+ }
+ }
+ }
+ else if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmin(tuple)))
+ {
+ if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
+ return true;
+
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
+ return true;
+
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ TransactionId xmax;
+
+ xmax = HeapTupleGetUpdateXid(tuple);
+
+ /* not LOCKED_ONLY, so it has to have an xmax */
+ Assert(TransactionIdIsValid(xmax));
+
+ /* updating subtransaction must have aborted */
+ if (!TransactionIdIsCurrentTransactionId(xmax))
+ return true;
+ else
+ return false;
+ }
+
+ if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmax(tuple)))
+ {
+ /* deleting subtransaction must have aborted */
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ return true;
+ }
+
+ return false;
+ }
+ else if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmin(tuple)))
+ {
+ /*
+ * Return the speculative token to caller. Caller can worry about
+ * xmax, since it requires a conclusively locked row version, and
+ * a concurrent update to this tuple is a conflict of its
+ * purposes.
+ */
+ if (HeapTupleHeaderIsSpeculative(tuple))
+ {
+ snapshot->speculativeToken =
+ HeapTupleHeaderGetSpeculativeToken(tuple);
+
+ Assert(snapshot->speculativeToken != 0);
+ }
+
+ snapshot->xmin = HeapTupleHeaderGetRawXmin(tuple);
+ /* XXX shouldn't we fall through to look at xmax? */
+ return true; /* in insertion by other */
+ }
+ else if (TransactionIdDidCommit(HeapTupleHeaderGetRawXmin(tuple)))
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ HeapTupleHeaderGetRawXmin(tuple));
+ else
+ {
+ /* it must have aborted or crashed */
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return false;
+ }
+ }
+
+ /* by here, the inserting transaction has committed */
+
+ if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid or aborted */
+ return true;
+
+ if (tuple->t_infomask & HEAP_XMAX_COMMITTED)
+ {
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ return true;
+ return false; /* updated by other */
+ }
+
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ TransactionId xmax;
+
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ return true;
+
+ xmax = HeapTupleGetUpdateXid(tuple);
+
+ /* not LOCKED_ONLY, so it has to have an xmax */
+ Assert(TransactionIdIsValid(xmax));
+
+ if (TransactionIdIsCurrentTransactionId(xmax))
+ return false;
+ if (TransactionIdIsInProgress(xmax))
+ {
+ snapshot->xmax = xmax;
+ return true;
+ }
+ if (TransactionIdDidCommit(xmax))
+ return false;
+ /* it must have aborted or crashed */
+ return true;
+ }
+
+ if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmax(tuple)))
+ {
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ return true;
+ return false;
+ }
+
+ if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmax(tuple)))
+ {
+ if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ snapshot->xmax = HeapTupleHeaderGetRawXmax(tuple);
+ return true;
+ }
+
+ if (!TransactionIdDidCommit(HeapTupleHeaderGetRawXmax(tuple)))
+ {
+ /* it must have aborted or crashed */
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ return true;
+ }
+
+ /* xmax transaction committed */
+
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ {
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ return true;
+ }
+
+ SetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
+ HeapTupleHeaderGetRawXmax(tuple));
+ return false; /* updated by other */
+}
+
+/*
+ * HeapTupleSatisfiesMVCC
+ * True iff heap tuple is valid for the given MVCC snapshot.
+ *
+ * See SNAPSHOT_MVCC's definition for the intended behaviour.
+ *
+ * Notice that here, we will not update the tuple status hint bits if the
+ * inserting/deleting transaction is still running according to our snapshot,
+ * even if in reality it's committed or aborted by now. This is intentional.
+ * Checking the true transaction state would require access to high-traffic
+ * shared data structures, creating contention we'd rather do without, and it
+ * would not change the result of our visibility check anyway. The hint bits
+ * will be updated by the first visitor that has a snapshot new enough to see
+ * the inserting/deleting transaction as done. In the meantime, the cost of
+ * leaving the hint bits unset is basically that each HeapTupleSatisfiesMVCC
+ * call will need to run TransactionIdIsCurrentTransactionId in addition to
+ * XidInMVCCSnapshot (but it would have to do the latter anyway). In the old
+ * coding where we tried to set the hint bits as soon as possible, we instead
+ * did TransactionIdIsInProgress in each call --- to no avail, as long as the
+ * inserting/deleting transaction was still running --- which was more cycles
+ * and more contention on ProcArrayLock.
+ */
+static bool
+HeapTupleSatisfiesMVCC(HeapTuple htup, Snapshot snapshot,
+ Buffer buffer)
+{
+ HeapTupleHeader tuple = htup->t_data;
+
+ Assert(ItemPointerIsValid(&htup->t_self));
+ Assert(htup->t_tableOid != InvalidOid);
+
+ if (!HeapTupleHeaderXminCommitted(tuple))
+ {
+ if (HeapTupleHeaderXminInvalid(tuple))
+ return false;
+
+ /* Used by pre-9.0 binary upgrades */
+ if (tuple->t_infomask & HEAP_MOVED_OFF)
+ {
+ TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
+
+ if (TransactionIdIsCurrentTransactionId(xvac))
+ return false;
+ if (!XidInMVCCSnapshot(xvac, snapshot))
+ {
+ if (TransactionIdDidCommit(xvac))
+ {
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return false;
+ }
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ InvalidTransactionId);
+ }
+ }
+ /* Used by pre-9.0 binary upgrades */
+ else if (tuple->t_infomask & HEAP_MOVED_IN)
+ {
+ TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
+
+ if (!TransactionIdIsCurrentTransactionId(xvac))
+ {
+ if (XidInMVCCSnapshot(xvac, snapshot))
+ return false;
+ if (TransactionIdDidCommit(xvac))
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ InvalidTransactionId);
+ else
+ {
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return false;
+ }
+ }
+ }
+ else if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmin(tuple)))
+ {
+ if (HeapTupleHeaderGetCmin(tuple) >= snapshot->curcid)
+ return false; /* inserted after scan started */
+
+ if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
+ return true;
+
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
+ return true;
+
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ TransactionId xmax;
+
+ xmax = HeapTupleGetUpdateXid(tuple);
+
+ /* not LOCKED_ONLY, so it has to have an xmax */
+ Assert(TransactionIdIsValid(xmax));
+
+ /* updating subtransaction must have aborted */
+ if (!TransactionIdIsCurrentTransactionId(xmax))
+ return true;
+ else if (HeapTupleHeaderGetCmax(tuple) >= snapshot->curcid)
+ return true; /* updated after scan started */
+ else
+ return false; /* updated before scan started */
+ }
+
+ if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmax(tuple)))
+ {
+ /* deleting subtransaction must have aborted */
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ return true;
+ }
+
+ if (HeapTupleHeaderGetCmax(tuple) >= snapshot->curcid)
+ return true; /* deleted after scan started */
+ else
+ return false; /* deleted before scan started */
+ }
+ else if (XidInMVCCSnapshot(HeapTupleHeaderGetRawXmin(tuple), snapshot))
+ return false;
+ else if (TransactionIdDidCommit(HeapTupleHeaderGetRawXmin(tuple)))
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ HeapTupleHeaderGetRawXmin(tuple));
+ else
+ {
+ /* it must have aborted or crashed */
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return false;
+ }
+ }
+ else
+ {
+ /* xmin is committed, but maybe not according to our snapshot */
+ if (!HeapTupleHeaderXminFrozen(tuple) &&
+ XidInMVCCSnapshot(HeapTupleHeaderGetRawXmin(tuple), snapshot))
+ return false; /* treat as still in progress */
+ }
+
+ /* by here, the inserting transaction has committed */
+
+ if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid or aborted */
+ return true;
+
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ return true;
+
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ TransactionId xmax;
+
+ /* already checked above */
+ Assert(!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
+
+ xmax = HeapTupleGetUpdateXid(tuple);
+
+ /* not LOCKED_ONLY, so it has to have an xmax */
+ Assert(TransactionIdIsValid(xmax));
+
+ if (TransactionIdIsCurrentTransactionId(xmax))
+ {
+ if (HeapTupleHeaderGetCmax(tuple) >= snapshot->curcid)
+ return true; /* deleted after scan started */
+ else
+ return false; /* deleted before scan started */
+ }
+ if (XidInMVCCSnapshot(xmax, snapshot))
+ return true;
+ if (TransactionIdDidCommit(xmax))
+ return false; /* updating transaction committed */
+ /* it must have aborted or crashed */
+ return true;
+ }
+
+ if (!(tuple->t_infomask & HEAP_XMAX_COMMITTED))
+ {
+ if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmax(tuple)))
+ {
+ if (HeapTupleHeaderGetCmax(tuple) >= snapshot->curcid)
+ return true; /* deleted after scan started */
+ else
+ return false; /* deleted before scan started */
+ }
+
+ if (XidInMVCCSnapshot(HeapTupleHeaderGetRawXmax(tuple), snapshot))
+ return true;
+
+ if (!TransactionIdDidCommit(HeapTupleHeaderGetRawXmax(tuple)))
+ {
+ /* it must have aborted or crashed */
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ return true;
+ }
+
+ /* xmax transaction committed */
+ SetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
+ HeapTupleHeaderGetRawXmax(tuple));
+ }
+ else
+ {
+ /* xmax is committed, but maybe not according to our snapshot */
+ if (XidInMVCCSnapshot(HeapTupleHeaderGetRawXmax(tuple), snapshot))
+ return true; /* treat as still in progress */
+ }
+
+ /* xmax transaction committed */
+
+ return false;
+}
+
+
+/*
+ * HeapTupleSatisfiesVacuum
+ *
+ * Determine the status of tuples for VACUUM purposes. Here, what
+ * we mainly want to know is if a tuple is potentially visible to *any*
+ * running transaction. If so, it can't be removed yet by VACUUM.
+ *
+ * OldestXmin is a cutoff XID (obtained from
+ * GetOldestNonRemovableTransactionId()). Tuples deleted by XIDs >=
+ * OldestXmin are deemed "recently dead"; they might still be visible to some
+ * open transaction, so we can't remove them, even if we see that the deleting
+ * transaction has committed.
+ */
+HTSV_Result
+HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin,
+ Buffer buffer)
+{
+ TransactionId dead_after = InvalidTransactionId;
+ HTSV_Result res;
+
+ res = HeapTupleSatisfiesVacuumHorizon(htup, buffer, &dead_after);
+
+ if (res == HEAPTUPLE_RECENTLY_DEAD)
+ {
+ Assert(TransactionIdIsValid(dead_after));
+
+ if (TransactionIdPrecedes(dead_after, OldestXmin))
+ res = HEAPTUPLE_DEAD;
+ }
+ else
+ Assert(!TransactionIdIsValid(dead_after));
+
+ return res;
+}
+
+/*
+ * Work horse for HeapTupleSatisfiesVacuum and similar routines.
+ *
+ * In contrast to HeapTupleSatisfiesVacuum this routine, when encountering a
+ * tuple that could still be visible to some backend, stores the xid that
+ * needs to be compared with the horizon in *dead_after, and returns
+ * HEAPTUPLE_RECENTLY_DEAD. The caller then can perform the comparison with
+ * the horizon. This is e.g. useful when comparing with different horizons.
+ *
+ * Note: HEAPTUPLE_DEAD can still be returned here, e.g. if the inserting
+ * transaction aborted.
+ */
+HTSV_Result
+HeapTupleSatisfiesVacuumHorizon(HeapTuple htup, Buffer buffer, TransactionId *dead_after)
+{
+ HeapTupleHeader tuple = htup->t_data;
+
+ Assert(ItemPointerIsValid(&htup->t_self));
+ Assert(htup->t_tableOid != InvalidOid);
+ Assert(dead_after != NULL);
+
+ *dead_after = InvalidTransactionId;
+
+ /*
+ * Has inserting transaction committed?
+ *
+ * If the inserting transaction aborted, then the tuple was never visible
+ * to any other transaction, so we can delete it immediately.
+ */
+ if (!HeapTupleHeaderXminCommitted(tuple))
+ {
+ if (HeapTupleHeaderXminInvalid(tuple))
+ return HEAPTUPLE_DEAD;
+ /* Used by pre-9.0 binary upgrades */
+ else if (tuple->t_infomask & HEAP_MOVED_OFF)
+ {
+ TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
+
+ if (TransactionIdIsCurrentTransactionId(xvac))
+ return HEAPTUPLE_DELETE_IN_PROGRESS;
+ if (TransactionIdIsInProgress(xvac))
+ return HEAPTUPLE_DELETE_IN_PROGRESS;
+ if (TransactionIdDidCommit(xvac))
+ {
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return HEAPTUPLE_DEAD;
+ }
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ InvalidTransactionId);
+ }
+ /* Used by pre-9.0 binary upgrades */
+ else if (tuple->t_infomask & HEAP_MOVED_IN)
+ {
+ TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
+
+ if (TransactionIdIsCurrentTransactionId(xvac))
+ return HEAPTUPLE_INSERT_IN_PROGRESS;
+ if (TransactionIdIsInProgress(xvac))
+ return HEAPTUPLE_INSERT_IN_PROGRESS;
+ if (TransactionIdDidCommit(xvac))
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ InvalidTransactionId);
+ else
+ {
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return HEAPTUPLE_DEAD;
+ }
+ }
+ else if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmin(tuple)))
+ {
+ if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
+ return HEAPTUPLE_INSERT_IN_PROGRESS;
+ /* only locked? run infomask-only check first, for performance */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) ||
+ HeapTupleHeaderIsOnlyLocked(tuple))
+ return HEAPTUPLE_INSERT_IN_PROGRESS;
+ /* inserted and then deleted by same xact */
+ if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetUpdateXid(tuple)))
+ return HEAPTUPLE_DELETE_IN_PROGRESS;
+ /* deleting subtransaction must have aborted */
+ return HEAPTUPLE_INSERT_IN_PROGRESS;
+ }
+ else if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmin(tuple)))
+ {
+ /*
+ * It'd be possible to discern between INSERT/DELETE in progress
+ * here by looking at xmax - but that doesn't seem beneficial for
+ * the majority of callers and even detrimental for some. We'd
+ * rather have callers look at/wait for xmin than xmax. It's
+ * always correct to return INSERT_IN_PROGRESS because that's
+ * what's happening from the view of other backends.
+ */
+ return HEAPTUPLE_INSERT_IN_PROGRESS;
+ }
+ else if (TransactionIdDidCommit(HeapTupleHeaderGetRawXmin(tuple)))
+ SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED,
+ HeapTupleHeaderGetRawXmin(tuple));
+ else
+ {
+ /*
+ * Not in Progress, Not Committed, so either Aborted or crashed
+ */
+ SetHintBits(tuple, buffer, HEAP_XMIN_INVALID,
+ InvalidTransactionId);
+ return HEAPTUPLE_DEAD;
+ }
+
+ /*
+ * At this point the xmin is known committed, but we might not have
+ * been able to set the hint bit yet; so we can no longer Assert that
+ * it's set.
+ */
+ }
+
+ /*
+ * Okay, the inserter committed, so it was good at some point. Now what
+ * about the deleting transaction?
+ */
+ if (tuple->t_infomask & HEAP_XMAX_INVALID)
+ return HEAPTUPLE_LIVE;
+
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ {
+ /*
+ * "Deleting" xact really only locked it, so the tuple is live in any
+ * case. However, we should make sure that either XMAX_COMMITTED or
+ * XMAX_INVALID gets set once the xact is gone, to reduce the costs of
+ * examining the tuple for future xacts.
+ */
+ if (!(tuple->t_infomask & HEAP_XMAX_COMMITTED))
+ {
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ /*
+ * If it's a pre-pg_upgrade tuple, the multixact cannot
+ * possibly be running; otherwise have to check.
+ */
+ if (!HEAP_LOCKED_UPGRADED(tuple->t_infomask) &&
+ MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple),
+ true))
+ return HEAPTUPLE_LIVE;
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID, InvalidTransactionId);
+ }
+ else
+ {
+ if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmax(tuple)))
+ return HEAPTUPLE_LIVE;
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ }
+ }
+
+ /*
+ * We don't really care whether xmax did commit, abort or crash. We
+ * know that xmax did lock the tuple, but it did not and will never
+ * actually update it.
+ */
+
+ return HEAPTUPLE_LIVE;
+ }
+
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ TransactionId xmax = HeapTupleGetUpdateXid(tuple);
+
+ /* already checked above */
+ Assert(!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
+
+ /* not LOCKED_ONLY, so it has to have an xmax */
+ Assert(TransactionIdIsValid(xmax));
+
+ if (TransactionIdIsInProgress(xmax))
+ return HEAPTUPLE_DELETE_IN_PROGRESS;
+ else if (TransactionIdDidCommit(xmax))
+ {
+ /*
+ * The multixact might still be running due to lockers. Need to
+ * allow for pruning if below the xid horizon regardless --
+ * otherwise we could end up with a tuple where the updater has to
+ * be removed due to the horizon, but is not pruned away. It's
+ * not a problem to prune that tuple, because any remaining
+ * lockers will also be present in newer tuple versions.
+ */
+ *dead_after = xmax;
+ return HEAPTUPLE_RECENTLY_DEAD;
+ }
+ else if (!MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple), false))
+ {
+ /*
+ * Not in Progress, Not Committed, so either Aborted or crashed.
+ * Mark the Xmax as invalid.
+ */
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID, InvalidTransactionId);
+ }
+
+ return HEAPTUPLE_LIVE;
+ }
+
+ if (!(tuple->t_infomask & HEAP_XMAX_COMMITTED))
+ {
+ if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmax(tuple)))
+ return HEAPTUPLE_DELETE_IN_PROGRESS;
+ else if (TransactionIdDidCommit(HeapTupleHeaderGetRawXmax(tuple)))
+ SetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
+ HeapTupleHeaderGetRawXmax(tuple));
+ else
+ {
+ /*
+ * Not in Progress, Not Committed, so either Aborted or crashed
+ */
+ SetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
+ InvalidTransactionId);
+ return HEAPTUPLE_LIVE;
+ }
+
+ /*
+ * At this point the xmax is known committed, but we might not have
+ * been able to set the hint bit yet; so we can no longer Assert that
+ * it's set.
+ */
+ }
+
+ /*
+ * Deleter committed, allow caller to check if it was recent enough that
+ * some open transactions could still see the tuple.
+ */
+ *dead_after = HeapTupleHeaderGetRawXmax(tuple);
+ return HEAPTUPLE_RECENTLY_DEAD;
+}
+
+
+/*
+ * HeapTupleSatisfiesNonVacuumable
+ *
+ * True if tuple might be visible to some transaction; false if it's
+ * surely dead to everyone, ie, vacuumable.
+ *
+ * See SNAPSHOT_NON_VACUUMABLE's definition for the intended behaviour.
+ *
+ * This is an interface to HeapTupleSatisfiesVacuum that's callable via
+ * HeapTupleSatisfiesSnapshot, so it can be used through a Snapshot.
+ * snapshot->vistest must have been set up with the horizon to use.
+ */
+static bool
+HeapTupleSatisfiesNonVacuumable(HeapTuple htup, Snapshot snapshot,
+ Buffer buffer)
+{
+ TransactionId dead_after = InvalidTransactionId;
+ HTSV_Result res;
+
+ res = HeapTupleSatisfiesVacuumHorizon(htup, buffer, &dead_after);
+
+ if (res == HEAPTUPLE_RECENTLY_DEAD)
+ {
+ Assert(TransactionIdIsValid(dead_after));
+
+ if (GlobalVisTestIsRemovableXid(snapshot->vistest, dead_after))
+ res = HEAPTUPLE_DEAD;
+ }
+ else
+ Assert(!TransactionIdIsValid(dead_after));
+
+ return res != HEAPTUPLE_DEAD;
+}
+
+
+/*
+ * HeapTupleIsSurelyDead
+ *
+ * Cheaply determine whether a tuple is surely dead to all onlookers.
+ * We sometimes use this in lieu of HeapTupleSatisfiesVacuum when the
+ * tuple has just been tested by another visibility routine (usually
+ * HeapTupleSatisfiesMVCC) and, therefore, any hint bits that can be set
+ * should already be set. We assume that if no hint bits are set, the xmin
+ * or xmax transaction is still running. This is therefore faster than
+ * HeapTupleSatisfiesVacuum, because we consult neither procarray nor CLOG.
+ * It's okay to return false when in doubt, but we must return true only
+ * if the tuple is removable.
+ */
+bool
+HeapTupleIsSurelyDead(HeapTuple htup, GlobalVisState *vistest)
+{
+ HeapTupleHeader tuple = htup->t_data;
+
+ Assert(ItemPointerIsValid(&htup->t_self));
+ Assert(htup->t_tableOid != InvalidOid);
+
+ /*
+ * If the inserting transaction is marked invalid, then it aborted, and
+ * the tuple is definitely dead. If it's marked neither committed nor
+ * invalid, then we assume it's still alive (since the presumption is that
+ * all relevant hint bits were just set moments ago).
+ */
+ if (!HeapTupleHeaderXminCommitted(tuple))
+ return HeapTupleHeaderXminInvalid(tuple) ? true : false;
+
+ /*
+ * If the inserting transaction committed, but any deleting transaction
+ * aborted, the tuple is still alive.
+ */
+ if (tuple->t_infomask & HEAP_XMAX_INVALID)
+ return false;
+
+ /*
+ * If the XMAX is just a lock, the tuple is still alive.
+ */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ return false;
+
+ /*
+ * If the Xmax is a MultiXact, it might be dead or alive, but we cannot
+ * know without checking pg_multixact.
+ */
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ return false;
+
+ /* If deleter isn't known to have committed, assume it's still running. */
+ if (!(tuple->t_infomask & HEAP_XMAX_COMMITTED))
+ return false;
+
+ /* Deleter committed, so tuple is dead if the XID is old enough. */
+ return GlobalVisTestIsRemovableXid(vistest,
+ HeapTupleHeaderGetRawXmax(tuple));
+}
+
+/*
+ * Is the tuple really only locked? That is, is it not updated?
+ *
+ * It's easy to check just infomask bits if the locker is not a multi; but
+ * otherwise we need to verify that the updating transaction has not aborted.
+ *
+ * This function is here because it follows the same visibility rules laid out
+ * at the top of this file.
+ */
+bool
+HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
+{
+ TransactionId xmax;
+
+ /* if there's no valid Xmax, then there's obviously no update either */
+ if (tuple->t_infomask & HEAP_XMAX_INVALID)
+ return true;
+
+ if (tuple->t_infomask & HEAP_XMAX_LOCK_ONLY)
+ return true;
+
+ /* invalid xmax means no update */
+ if (!TransactionIdIsValid(HeapTupleHeaderGetRawXmax(tuple)))
+ return true;
+
+ /*
+ * if HEAP_XMAX_LOCK_ONLY is not set and not a multi, then this must
+ * necessarily have been updated
+ */
+ if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
+ return false;
+
+ /* ... but if it's a multi, then perhaps the updating Xid aborted. */
+ xmax = HeapTupleGetUpdateXid(tuple);
+
+ /* not LOCKED_ONLY, so it has to have an xmax */
+ Assert(TransactionIdIsValid(xmax));
+
+ if (TransactionIdIsCurrentTransactionId(xmax))
+ return false;
+ if (TransactionIdIsInProgress(xmax))
+ return false;
+ if (TransactionIdDidCommit(xmax))
+ return false;
+
+ /*
+ * not current, not in progress, not committed -- must have aborted or
+ * crashed
+ */
+ return true;
+}
+
+/*
+ * check whether the transaction id 'xid' is in the pre-sorted array 'xip'.
+ */
+static bool
+TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num)
+{
+ return num > 0 &&
+ bsearch(&xid, xip, num, sizeof(TransactionId), xidComparator) != NULL;
+}
+
+/*
+ * See the comments for HeapTupleSatisfiesMVCC for the semantics this function
+ * obeys.
+ *
+ * Only usable on tuples from catalog tables!
+ *
+ * We don't need to support HEAP_MOVED_(IN|OFF) for now because we only support
+ * reading catalog pages which couldn't have been created in an older version.
+ *
+ * We don't set any hint bits in here as it seems unlikely to be beneficial as
+ * those should already be set by normal access and it seems to be too
+ * dangerous to do so as the semantics of doing so during timetravel are more
+ * complicated than when dealing "only" with the present.
+ */
+static bool
+HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
+ Buffer buffer)
+{
+ HeapTupleHeader tuple = htup->t_data;
+ TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
+ TransactionId xmax = HeapTupleHeaderGetRawXmax(tuple);
+
+ Assert(ItemPointerIsValid(&htup->t_self));
+ Assert(htup->t_tableOid != InvalidOid);
+
+ /* inserting transaction aborted */
+ if (HeapTupleHeaderXminInvalid(tuple))
+ {
+ Assert(!TransactionIdDidCommit(xmin));
+ return false;
+ }
+ /* check if it's one of our txids, toplevel is also in there */
+ else if (TransactionIdInArray(xmin, snapshot->subxip, snapshot->subxcnt))
+ {
+ bool resolved;
+ CommandId cmin = HeapTupleHeaderGetRawCommandId(tuple);
+ CommandId cmax = InvalidCommandId;
+
+ /*
+ * another transaction might have (tried to) delete this tuple or
+ * cmin/cmax was stored in a combo CID. So we need to lookup the
+ * actual values externally.
+ */
+ resolved = ResolveCminCmaxDuringDecoding(HistoricSnapshotGetTupleCids(), snapshot,
+ htup, buffer,
+ &cmin, &cmax);
+
+ /*
+ * If we haven't resolved the combo CID to cmin/cmax, that means we
+ * have not decoded the combo CID yet. That means the cmin is
+ * definitely in the future, and we're not supposed to see the tuple
+ * yet.
+ *
+ * XXX This only applies to decoding of in-progress transactions. In
+ * regular logical decoding we only execute this code at commit time,
+ * at which point we should have seen all relevant combo CIDs. So
+ * ideally, we should error out in this case but in practice, this
+ * won't happen. If we are too worried about this then we can add an
+ * elog inside ResolveCminCmaxDuringDecoding.
+ *
+ * XXX For the streaming case, we can track the largest combo CID
+ * assigned, and error out based on this (when unable to resolve combo
+ * CID below that observed maximum value).
+ */
+ if (!resolved)
+ return false;
+
+ Assert(cmin != InvalidCommandId);
+
+ if (cmin >= snapshot->curcid)
+ return false; /* inserted after scan started */
+ /* fall through */
+ }
+ /* committed before our xmin horizon. Do a normal visibility check. */
+ else if (TransactionIdPrecedes(xmin, snapshot->xmin))
+ {
+ Assert(!(HeapTupleHeaderXminCommitted(tuple) &&
+ !TransactionIdDidCommit(xmin)));
+
+ /* check for hint bit first, consult clog afterwards */
+ if (!HeapTupleHeaderXminCommitted(tuple) &&
+ !TransactionIdDidCommit(xmin))
+ return false;
+ /* fall through */
+ }
+ /* beyond our xmax horizon, i.e. invisible */
+ else if (TransactionIdFollowsOrEquals(xmin, snapshot->xmax))
+ {
+ return false;
+ }
+ /* check if it's a committed transaction in [xmin, xmax) */
+ else if (TransactionIdInArray(xmin, snapshot->xip, snapshot->xcnt))
+ {
+ /* fall through */
+ }
+
+ /*
+ * none of the above, i.e. between [xmin, xmax) but hasn't committed. I.e.
+ * invisible.
+ */
+ else
+ {
+ return false;
+ }
+
+ /* at this point we know xmin is visible, go on to check xmax */
+
+ /* xid invalid or aborted */
+ if (tuple->t_infomask & HEAP_XMAX_INVALID)
+ return true;
+ /* locked tuples are always visible */
+ else if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
+ return true;
+
+ /*
+ * We can see multis here if we're looking at user tables or if somebody
+ * SELECT ... FOR SHARE/UPDATE a system table.
+ */
+ else if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ xmax = HeapTupleGetUpdateXid(tuple);
+ }
+
+ /* check if it's one of our txids, toplevel is also in there */
+ if (TransactionIdInArray(xmax, snapshot->subxip, snapshot->subxcnt))
+ {
+ bool resolved;
+ CommandId cmin;
+ CommandId cmax = HeapTupleHeaderGetRawCommandId(tuple);
+
+ /* Lookup actual cmin/cmax values */
+ resolved = ResolveCminCmaxDuringDecoding(HistoricSnapshotGetTupleCids(), snapshot,
+ htup, buffer,
+ &cmin, &cmax);
+
+ /*
+ * If we haven't resolved the combo CID to cmin/cmax, that means we
+ * have not decoded the combo CID yet. That means the cmax is
+ * definitely in the future, and we're still supposed to see the
+ * tuple.
+ *
+ * XXX This only applies to decoding of in-progress transactions. In
+ * regular logical decoding we only execute this code at commit time,
+ * at which point we should have seen all relevant combo CIDs. So
+ * ideally, we should error out in this case but in practice, this
+ * won't happen. If we are too worried about this then we can add an
+ * elog inside ResolveCminCmaxDuringDecoding.
+ *
+ * XXX For the streaming case, we can track the largest combo CID
+ * assigned, and error out based on this (when unable to resolve combo
+ * CID below that observed maximum value).
+ */
+ if (!resolved || cmax == InvalidCommandId)
+ return true;
+
+ if (cmax >= snapshot->curcid)
+ return true; /* deleted after scan started */
+ else
+ return false; /* deleted before scan started */
+ }
+ /* below xmin horizon, normal transaction state is valid */
+ else if (TransactionIdPrecedes(xmax, snapshot->xmin))
+ {
+ Assert(!(tuple->t_infomask & HEAP_XMAX_COMMITTED &&
+ !TransactionIdDidCommit(xmax)));
+
+ /* check hint bit first */
+ if (tuple->t_infomask & HEAP_XMAX_COMMITTED)
+ return false;
+
+ /* check clog */
+ return !TransactionIdDidCommit(xmax);
+ }
+ /* above xmax horizon, we cannot possibly see the deleting transaction */
+ else if (TransactionIdFollowsOrEquals(xmax, snapshot->xmax))
+ return true;
+ /* xmax is between [xmin, xmax), check known committed array */
+ else if (TransactionIdInArray(xmax, snapshot->xip, snapshot->xcnt))
+ return false;
+ /* xmax is between [xmin, xmax), but known not to have committed yet */
+ else
+ return true;
+}
+
+/*
+ * HeapTupleSatisfiesVisibility
+ * True iff heap tuple satisfies a time qual.
+ *
+ * Notes:
+ * Assumes heap tuple is valid, and buffer at least share locked.
+ *
+ * Hint bits in the HeapTuple's t_infomask may be updated as a side effect;
+ * if so, the indicated buffer is marked dirty.
+ */
+bool
+HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)
+{
+ switch (snapshot->snapshot_type)
+ {
+ case SNAPSHOT_MVCC:
+ return HeapTupleSatisfiesMVCC(tup, snapshot, buffer);
+ break;
+ case SNAPSHOT_SELF:
+ return HeapTupleSatisfiesSelf(tup, snapshot, buffer);
+ break;
+ case SNAPSHOT_ANY:
+ return HeapTupleSatisfiesAny(tup, snapshot, buffer);
+ break;
+ case SNAPSHOT_TOAST:
+ return HeapTupleSatisfiesToast(tup, snapshot, buffer);
+ break;
+ case SNAPSHOT_DIRTY:
+ return HeapTupleSatisfiesDirty(tup, snapshot, buffer);
+ break;
+ case SNAPSHOT_HISTORIC_MVCC:
+ return HeapTupleSatisfiesHistoricMVCC(tup, snapshot, buffer);
+ break;
+ case SNAPSHOT_NON_VACUUMABLE:
+ return HeapTupleSatisfiesNonVacuumable(tup, snapshot, buffer);
+ break;
+ }
+
+ return false; /* keep compiler quiet */
+}
diff --git a/src/backend/access/heap/heaptoast.c b/src/backend/access/heap/heaptoast.c
new file mode 100644
index 0000000..55bbe1d
--- /dev/null
+++ b/src/backend/access/heap/heaptoast.c
@@ -0,0 +1,793 @@
+/*-------------------------------------------------------------------------
+ *
+ * heaptoast.c
+ * Heap-specific definitions for external and compressed storage
+ * of variable size attributes.
+ *
+ * Copyright (c) 2000-2021, PostgreSQL Global Development Group
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/access/heap/heaptoast.c
+ *
+ *
+ * INTERFACE ROUTINES
+ * heap_toast_insert_or_update -
+ * Try to make a given tuple fit into one page by compressing
+ * or moving off attributes
+ *
+ * heap_toast_delete -
+ * Reclaim toast storage when a tuple is deleted
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/detoast.h"
+#include "access/genam.h"
+#include "access/heapam.h"
+#include "access/heaptoast.h"
+#include "access/toast_helper.h"
+#include "access/toast_internals.h"
+#include "utils/fmgroids.h"
+
+
+/* ----------
+ * heap_toast_delete -
+ *
+ * Cascaded delete toast-entries on DELETE
+ * ----------
+ */
+void
+heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
+{
+ TupleDesc tupleDesc;
+ Datum toast_values[MaxHeapAttributeNumber];
+ bool toast_isnull[MaxHeapAttributeNumber];
+
+ /*
+ * We should only ever be called for tuples of plain relations or
+ * materialized views --- recursing on a toast rel is bad news.
+ */
+ Assert(rel->rd_rel->relkind == RELKIND_RELATION ||
+ rel->rd_rel->relkind == RELKIND_MATVIEW);
+
+ /*
+ * Get the tuple descriptor and break down the tuple into fields.
+ *
+ * NOTE: it's debatable whether to use heap_deform_tuple() here or just
+ * heap_getattr() only the varlena columns. The latter could win if there
+ * are few varlena columns and many non-varlena ones. However,
+ * heap_deform_tuple costs only O(N) while the heap_getattr way would cost
+ * O(N^2) if there are many varlena columns, so it seems better to err on
+ * the side of linear cost. (We won't even be here unless there's at
+ * least one varlena column, by the way.)
+ */
+ tupleDesc = rel->rd_att;
+
+ Assert(tupleDesc->natts <= MaxHeapAttributeNumber);
+ heap_deform_tuple(oldtup, tupleDesc, toast_values, toast_isnull);
+
+ /* Do the real work. */
+ toast_delete_external(rel, toast_values, toast_isnull, is_speculative);
+}
+
+
+/* ----------
+ * heap_toast_insert_or_update -
+ *
+ * Delete no-longer-used toast-entries and create new ones to
+ * make the new tuple fit on INSERT or UPDATE
+ *
+ * Inputs:
+ * newtup: the candidate new tuple to be inserted
+ * oldtup: the old row version for UPDATE, or NULL for INSERT
+ * options: options to be passed to heap_insert() for toast rows
+ * Result:
+ * either newtup if no toasting is needed, or a palloc'd modified tuple
+ * that is what should actually get stored
+ *
+ * NOTE: neither newtup nor oldtup will be modified. This is a change
+ * from the pre-8.1 API of this routine.
+ * ----------
+ */
+HeapTuple
+heap_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
+ int options)
+{
+ HeapTuple result_tuple;
+ TupleDesc tupleDesc;
+ int numAttrs;
+
+ Size maxDataLen;
+ Size hoff;
+
+ bool toast_isnull[MaxHeapAttributeNumber];
+ bool toast_oldisnull[MaxHeapAttributeNumber];
+ Datum toast_values[MaxHeapAttributeNumber];
+ Datum toast_oldvalues[MaxHeapAttributeNumber];
+ ToastAttrInfo toast_attr[MaxHeapAttributeNumber];
+ ToastTupleContext ttc;
+
+ /*
+ * Ignore the INSERT_SPECULATIVE option. Speculative insertions/super
+ * deletions just normally insert/delete the toast values. It seems
+ * easiest to deal with that here, instead on, potentially, multiple
+ * callers.
+ */
+ options &= ~HEAP_INSERT_SPECULATIVE;
+
+ /*
+ * We should only ever be called for tuples of plain relations or
+ * materialized views --- recursing on a toast rel is bad news.
+ */
+ Assert(rel->rd_rel->relkind == RELKIND_RELATION ||
+ rel->rd_rel->relkind == RELKIND_MATVIEW);
+
+ /*
+ * Get the tuple descriptor and break down the tuple(s) into fields.
+ */
+ tupleDesc = rel->rd_att;
+ numAttrs = tupleDesc->natts;
+
+ Assert(numAttrs <= MaxHeapAttributeNumber);
+ heap_deform_tuple(newtup, tupleDesc, toast_values, toast_isnull);
+ if (oldtup != NULL)
+ heap_deform_tuple(oldtup, tupleDesc, toast_oldvalues, toast_oldisnull);
+
+ /* ----------
+ * Prepare for toasting
+ * ----------
+ */
+ ttc.ttc_rel = rel;
+ ttc.ttc_values = toast_values;
+ ttc.ttc_isnull = toast_isnull;
+ if (oldtup == NULL)
+ {
+ ttc.ttc_oldvalues = NULL;
+ ttc.ttc_oldisnull = NULL;
+ }
+ else
+ {
+ ttc.ttc_oldvalues = toast_oldvalues;
+ ttc.ttc_oldisnull = toast_oldisnull;
+ }
+ ttc.ttc_attr = toast_attr;
+ toast_tuple_init(&ttc);
+
+ /* ----------
+ * Compress and/or save external until data fits into target length
+ *
+ * 1: Inline compress attributes with attstorage EXTENDED, and store very
+ * large attributes with attstorage EXTENDED or EXTERNAL external
+ * immediately
+ * 2: Store attributes with attstorage EXTENDED or EXTERNAL external
+ * 3: Inline compress attributes with attstorage MAIN
+ * 4: Store attributes with attstorage MAIN external
+ * ----------
+ */
+
+ /* compute header overhead --- this should match heap_form_tuple() */
+ hoff = SizeofHeapTupleHeader;
+ if ((ttc.ttc_flags & TOAST_HAS_NULLS) != 0)
+ hoff += BITMAPLEN(numAttrs);
+ hoff = MAXALIGN(hoff);
+ /* now convert to a limit on the tuple data size */
+ maxDataLen = RelationGetToastTupleTarget(rel, TOAST_TUPLE_TARGET) - hoff;
+
+ /*
+ * Look for attributes with attstorage EXTENDED to compress. Also find
+ * large attributes with attstorage EXTENDED or EXTERNAL, and store them
+ * external.
+ */
+ while (heap_compute_data_size(tupleDesc,
+ toast_values, toast_isnull) > maxDataLen)
+ {
+ int biggest_attno;
+
+ biggest_attno = toast_tuple_find_biggest_attribute(&ttc, true, false);
+ if (biggest_attno < 0)
+ break;
+
+ /*
+ * Attempt to compress it inline, if it has attstorage EXTENDED
+ */
+ if (TupleDescAttr(tupleDesc, biggest_attno)->attstorage == TYPSTORAGE_EXTENDED)
+ toast_tuple_try_compression(&ttc, biggest_attno);
+ else
+ {
+ /*
+ * has attstorage EXTERNAL, ignore on subsequent compression
+ * passes
+ */
+ toast_attr[biggest_attno].tai_colflags |= TOASTCOL_INCOMPRESSIBLE;
+ }
+
+ /*
+ * If this value is by itself more than maxDataLen (after compression
+ * if any), push it out to the toast table immediately, if possible.
+ * This avoids uselessly compressing other fields in the common case
+ * where we have one long field and several short ones.
+ *
+ * XXX maybe the threshold should be less than maxDataLen?
+ */
+ if (toast_attr[biggest_attno].tai_size > maxDataLen &&
+ rel->rd_rel->reltoastrelid != InvalidOid)
+ toast_tuple_externalize(&ttc, biggest_attno, options);
+ }
+
+ /*
+ * Second we look for attributes of attstorage EXTENDED or EXTERNAL that
+ * are still inline, and make them external. But skip this if there's no
+ * toast table to push them to.
+ */
+ while (heap_compute_data_size(tupleDesc,
+ toast_values, toast_isnull) > maxDataLen &&
+ rel->rd_rel->reltoastrelid != InvalidOid)
+ {
+ int biggest_attno;
+
+ biggest_attno = toast_tuple_find_biggest_attribute(&ttc, false, false);
+ if (biggest_attno < 0)
+ break;
+ toast_tuple_externalize(&ttc, biggest_attno, options);
+ }
+
+ /*
+ * Round 3 - this time we take attributes with storage MAIN into
+ * compression
+ */
+ while (heap_compute_data_size(tupleDesc,
+ toast_values, toast_isnull) > maxDataLen)
+ {
+ int biggest_attno;
+
+ biggest_attno = toast_tuple_find_biggest_attribute(&ttc, true, true);
+ if (biggest_attno < 0)
+ break;
+
+ toast_tuple_try_compression(&ttc, biggest_attno);
+ }
+
+ /*
+ * Finally we store attributes of type MAIN externally. At this point we
+ * increase the target tuple size, so that MAIN attributes aren't stored
+ * externally unless really necessary.
+ */
+ maxDataLen = TOAST_TUPLE_TARGET_MAIN - hoff;
+
+ while (heap_compute_data_size(tupleDesc,
+ toast_values, toast_isnull) > maxDataLen &&
+ rel->rd_rel->reltoastrelid != InvalidOid)
+ {
+ int biggest_attno;
+
+ biggest_attno = toast_tuple_find_biggest_attribute(&ttc, false, true);
+ if (biggest_attno < 0)
+ break;
+
+ toast_tuple_externalize(&ttc, biggest_attno, options);
+ }
+
+ /*
+ * In the case we toasted any values, we need to build a new heap tuple
+ * with the changed values.
+ */
+ if ((ttc.ttc_flags & TOAST_NEEDS_CHANGE) != 0)
+ {
+ HeapTupleHeader olddata = newtup->t_data;
+ HeapTupleHeader new_data;
+ int32 new_header_len;
+ int32 new_data_len;
+ int32 new_tuple_len;
+
+ /*
+ * Calculate the new size of the tuple.
+ *
+ * Note: we used to assume here that the old tuple's t_hoff must equal
+ * the new_header_len value, but that was incorrect. The old tuple
+ * might have a smaller-than-current natts, if there's been an ALTER
+ * TABLE ADD COLUMN since it was stored; and that would lead to a
+ * different conclusion about the size of the null bitmap, or even
+ * whether there needs to be one at all.
+ */
+ new_header_len = SizeofHeapTupleHeader;
+ if ((ttc.ttc_flags & TOAST_HAS_NULLS) != 0)
+ new_header_len += BITMAPLEN(numAttrs);
+ new_header_len = MAXALIGN(new_header_len);
+ new_data_len = heap_compute_data_size(tupleDesc,
+ toast_values, toast_isnull);
+ new_tuple_len = new_header_len + new_data_len;
+
+ /*
+ * Allocate and zero the space needed, and fill HeapTupleData fields.
+ */
+ result_tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + new_tuple_len);
+ result_tuple->t_len = new_tuple_len;
+ result_tuple->t_self = newtup->t_self;
+ result_tuple->t_tableOid = newtup->t_tableOid;
+ new_data = (HeapTupleHeader) ((char *) result_tuple + HEAPTUPLESIZE);
+ result_tuple->t_data = new_data;
+
+ /*
+ * Copy the existing tuple header, but adjust natts and t_hoff.
+ */
+ memcpy(new_data, olddata, SizeofHeapTupleHeader);
+ HeapTupleHeaderSetNatts(new_data, numAttrs);
+ new_data->t_hoff = new_header_len;
+
+ /* Copy over the data, and fill the null bitmap if needed */
+ heap_fill_tuple(tupleDesc,
+ toast_values,
+ toast_isnull,
+ (char *) new_data + new_header_len,
+ new_data_len,
+ &(new_data->t_infomask),
+ ((ttc.ttc_flags & TOAST_HAS_NULLS) != 0) ?
+ new_data->t_bits : NULL);
+ }
+ else
+ result_tuple = newtup;
+
+ toast_tuple_cleanup(&ttc);
+
+ return result_tuple;
+}
+
+
+/* ----------
+ * toast_flatten_tuple -
+ *
+ * "Flatten" a tuple to contain no out-of-line toasted fields.
+ * (This does not eliminate compressed or short-header datums.)
+ *
+ * Note: we expect the caller already checked HeapTupleHasExternal(tup),
+ * so there is no need for a short-circuit path.
+ * ----------
+ */
+HeapTuple
+toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
+{
+ HeapTuple new_tuple;
+ int numAttrs = tupleDesc->natts;
+ int i;
+ Datum toast_values[MaxTupleAttributeNumber];
+ bool toast_isnull[MaxTupleAttributeNumber];
+ bool toast_free[MaxTupleAttributeNumber];
+
+ /*
+ * Break down the tuple into fields.
+ */
+ Assert(numAttrs <= MaxTupleAttributeNumber);
+ heap_deform_tuple(tup, tupleDesc, toast_values, toast_isnull);
+
+ memset(toast_free, 0, numAttrs * sizeof(bool));
+
+ for (i = 0; i < numAttrs; i++)
+ {
+ /*
+ * Look at non-null varlena attributes
+ */
+ if (!toast_isnull[i] && TupleDescAttr(tupleDesc, i)->attlen == -1)
+ {
+ struct varlena *new_value;
+
+ new_value = (struct varlena *) DatumGetPointer(toast_values[i]);
+ if (VARATT_IS_EXTERNAL(new_value))
+ {
+ new_value = detoast_external_attr(new_value);
+ toast_values[i] = PointerGetDatum(new_value);
+ toast_free[i] = true;
+ }
+ }
+ }
+
+ /*
+ * Form the reconfigured tuple.
+ */
+ new_tuple = heap_form_tuple(tupleDesc, toast_values, toast_isnull);
+
+ /*
+ * Be sure to copy the tuple's identity fields. We also make a point of
+ * copying visibility info, just in case anybody looks at those fields in
+ * a syscache entry.
+ */
+ new_tuple->t_self = tup->t_self;
+ new_tuple->t_tableOid = tup->t_tableOid;
+
+ new_tuple->t_data->t_choice = tup->t_data->t_choice;
+ new_tuple->t_data->t_ctid = tup->t_data->t_ctid;
+ new_tuple->t_data->t_infomask &= ~HEAP_XACT_MASK;
+ new_tuple->t_data->t_infomask |=
+ tup->t_data->t_infomask & HEAP_XACT_MASK;
+ new_tuple->t_data->t_infomask2 &= ~HEAP2_XACT_MASK;
+ new_tuple->t_data->t_infomask2 |=
+ tup->t_data->t_infomask2 & HEAP2_XACT_MASK;
+
+ /*
+ * Free allocated temp values
+ */
+ for (i = 0; i < numAttrs; i++)
+ if (toast_free[i])
+ pfree(DatumGetPointer(toast_values[i]));
+
+ return new_tuple;
+}
+
+
+/* ----------
+ * toast_flatten_tuple_to_datum -
+ *
+ * "Flatten" a tuple containing out-of-line toasted fields into a Datum.
+ * The result is always palloc'd in the current memory context.
+ *
+ * We have a general rule that Datums of container types (rows, arrays,
+ * ranges, etc) must not contain any external TOAST pointers. Without
+ * this rule, we'd have to look inside each Datum when preparing a tuple
+ * for storage, which would be expensive and would fail to extend cleanly
+ * to new sorts of container types.
+ *
+ * However, we don't want to say that tuples represented as HeapTuples
+ * can't contain toasted fields, so instead this routine should be called
+ * when such a HeapTuple is being converted into a Datum.
+ *
+ * While we're at it, we decompress any compressed fields too. This is not
+ * necessary for correctness, but reflects an expectation that compression
+ * will be more effective if applied to the whole tuple not individual
+ * fields. We are not so concerned about that that we want to deconstruct
+ * and reconstruct tuples just to get rid of compressed fields, however.
+ * So callers typically won't call this unless they see that the tuple has
+ * at least one external field.
+ *
+ * On the other hand, in-line short-header varlena fields are left alone.
+ * If we "untoasted" them here, they'd just get changed back to short-header
+ * format anyway within heap_fill_tuple.
+ * ----------
+ */
+Datum
+toast_flatten_tuple_to_datum(HeapTupleHeader tup,
+ uint32 tup_len,
+ TupleDesc tupleDesc)
+{
+ HeapTupleHeader new_data;
+ int32 new_header_len;
+ int32 new_data_len;
+ int32 new_tuple_len;
+ HeapTupleData tmptup;
+ int numAttrs = tupleDesc->natts;
+ int i;
+ bool has_nulls = false;
+ Datum toast_values[MaxTupleAttributeNumber];
+ bool toast_isnull[MaxTupleAttributeNumber];
+ bool toast_free[MaxTupleAttributeNumber];
+
+ /* Build a temporary HeapTuple control structure */
+ tmptup.t_len = tup_len;
+ ItemPointerSetInvalid(&(tmptup.t_self));
+ tmptup.t_tableOid = InvalidOid;
+ tmptup.t_data = tup;
+
+ /*
+ * Break down the tuple into fields.
+ */
+ Assert(numAttrs <= MaxTupleAttributeNumber);
+ heap_deform_tuple(&tmptup, tupleDesc, toast_values, toast_isnull);
+
+ memset(toast_free, 0, numAttrs * sizeof(bool));
+
+ for (i = 0; i < numAttrs; i++)
+ {
+ /*
+ * Look at non-null varlena attributes
+ */
+ if (toast_isnull[i])
+ has_nulls = true;
+ else if (TupleDescAttr(tupleDesc, i)->attlen == -1)
+ {
+ struct varlena *new_value;
+
+ new_value = (struct varlena *) DatumGetPointer(toast_values[i]);
+ if (VARATT_IS_EXTERNAL(new_value) ||
+ VARATT_IS_COMPRESSED(new_value))
+ {
+ new_value = detoast_attr(new_value);
+ toast_values[i] = PointerGetDatum(new_value);
+ toast_free[i] = true;
+ }
+ }
+ }
+
+ /*
+ * Calculate the new size of the tuple.
+ *
+ * This should match the reconstruction code in
+ * heap_toast_insert_or_update.
+ */
+ new_header_len = SizeofHeapTupleHeader;
+ if (has_nulls)
+ new_header_len += BITMAPLEN(numAttrs);
+ new_header_len = MAXALIGN(new_header_len);
+ new_data_len = heap_compute_data_size(tupleDesc,
+ toast_values, toast_isnull);
+ new_tuple_len = new_header_len + new_data_len;
+
+ new_data = (HeapTupleHeader) palloc0(new_tuple_len);
+
+ /*
+ * Copy the existing tuple header, but adjust natts and t_hoff.
+ */
+ memcpy(new_data, tup, SizeofHeapTupleHeader);
+ HeapTupleHeaderSetNatts(new_data, numAttrs);
+ new_data->t_hoff = new_header_len;
+
+ /* Set the composite-Datum header fields correctly */
+ HeapTupleHeaderSetDatumLength(new_data, new_tuple_len);
+ HeapTupleHeaderSetTypeId(new_data, tupleDesc->tdtypeid);
+ HeapTupleHeaderSetTypMod(new_data, tupleDesc->tdtypmod);
+
+ /* Copy over the data, and fill the null bitmap if needed */
+ heap_fill_tuple(tupleDesc,
+ toast_values,
+ toast_isnull,
+ (char *) new_data + new_header_len,
+ new_data_len,
+ &(new_data->t_infomask),
+ has_nulls ? new_data->t_bits : NULL);
+
+ /*
+ * Free allocated temp values
+ */
+ for (i = 0; i < numAttrs; i++)
+ if (toast_free[i])
+ pfree(DatumGetPointer(toast_values[i]));
+
+ return PointerGetDatum(new_data);
+}
+
+
+/* ----------
+ * toast_build_flattened_tuple -
+ *
+ * Build a tuple containing no out-of-line toasted fields.
+ * (This does not eliminate compressed or short-header datums.)
+ *
+ * This is essentially just like heap_form_tuple, except that it will
+ * expand any external-data pointers beforehand.
+ *
+ * It's not very clear whether it would be preferable to decompress
+ * in-line compressed datums while at it. For now, we don't.
+ * ----------
+ */
+HeapTuple
+toast_build_flattened_tuple(TupleDesc tupleDesc,
+ Datum *values,
+ bool *isnull)
+{
+ HeapTuple new_tuple;
+ int numAttrs = tupleDesc->natts;
+ int num_to_free;
+ int i;
+ Datum new_values[MaxTupleAttributeNumber];
+ Pointer freeable_values[MaxTupleAttributeNumber];
+
+ /*
+ * We can pass the caller's isnull array directly to heap_form_tuple, but
+ * we potentially need to modify the values array.
+ */
+ Assert(numAttrs <= MaxTupleAttributeNumber);
+ memcpy(new_values, values, numAttrs * sizeof(Datum));
+
+ num_to_free = 0;
+ for (i = 0; i < numAttrs; i++)
+ {
+ /*
+ * Look at non-null varlena attributes
+ */
+ if (!isnull[i] && TupleDescAttr(tupleDesc, i)->attlen == -1)
+ {
+ struct varlena *new_value;
+
+ new_value = (struct varlena *) DatumGetPointer(new_values[i]);
+ if (VARATT_IS_EXTERNAL(new_value))
+ {
+ new_value = detoast_external_attr(new_value);
+ new_values[i] = PointerGetDatum(new_value);
+ freeable_values[num_to_free++] = (Pointer) new_value;
+ }
+ }
+ }
+
+ /*
+ * Form the reconfigured tuple.
+ */
+ new_tuple = heap_form_tuple(tupleDesc, new_values, isnull);
+
+ /*
+ * Free allocated temp values
+ */
+ for (i = 0; i < num_to_free; i++)
+ pfree(freeable_values[i]);
+
+ return new_tuple;
+}
+
+/*
+ * Fetch a TOAST slice from a heap table.
+ *
+ * toastrel is the relation from which chunks are to be fetched.
+ * valueid identifies the TOAST value from which chunks are being fetched.
+ * attrsize is the total size of the TOAST value.
+ * sliceoffset is the byte offset within the TOAST value from which to fetch.
+ * slicelength is the number of bytes to be fetched from the TOAST value.
+ * result is the varlena into which the results should be written.
+ */
+void
+heap_fetch_toast_slice(Relation toastrel, Oid valueid, int32 attrsize,
+ int32 sliceoffset, int32 slicelength,
+ struct varlena *result)
+{
+ Relation *toastidxs;
+ ScanKeyData toastkey[3];
+ TupleDesc toasttupDesc = toastrel->rd_att;
+ int nscankeys;
+ SysScanDesc toastscan;
+ HeapTuple ttup;
+ int32 expectedchunk;
+ int32 totalchunks = ((attrsize - 1) / TOAST_MAX_CHUNK_SIZE) + 1;
+ int startchunk;
+ int endchunk;
+ int num_indexes;
+ int validIndex;
+ SnapshotData SnapshotToast;
+
+ /* Look for the valid index of toast relation */
+ validIndex = toast_open_indexes(toastrel,
+ AccessShareLock,
+ &toastidxs,
+ &num_indexes);
+
+ startchunk = sliceoffset / TOAST_MAX_CHUNK_SIZE;
+ endchunk = (sliceoffset + slicelength - 1) / TOAST_MAX_CHUNK_SIZE;
+ Assert(endchunk <= totalchunks);
+
+ /* Set up a scan key to fetch from the index. */
+ ScanKeyInit(&toastkey[0],
+ (AttrNumber) 1,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(valueid));
+
+ /*
+ * No additional condition if fetching all chunks. Otherwise, use an
+ * equality condition for one chunk, and a range condition otherwise.
+ */
+ if (startchunk == 0 && endchunk == totalchunks - 1)
+ nscankeys = 1;
+ else if (startchunk == endchunk)
+ {
+ ScanKeyInit(&toastkey[1],
+ (AttrNumber) 2,
+ BTEqualStrategyNumber, F_INT4EQ,
+ Int32GetDatum(startchunk));
+ nscankeys = 2;
+ }
+ else
+ {
+ ScanKeyInit(&toastkey[1],
+ (AttrNumber) 2,
+ BTGreaterEqualStrategyNumber, F_INT4GE,
+ Int32GetDatum(startchunk));
+ ScanKeyInit(&toastkey[2],
+ (AttrNumber) 2,
+ BTLessEqualStrategyNumber, F_INT4LE,
+ Int32GetDatum(endchunk));
+ nscankeys = 3;
+ }
+
+ /* Prepare for scan */
+ init_toast_snapshot(&SnapshotToast);
+ toastscan = systable_beginscan_ordered(toastrel, toastidxs[validIndex],
+ &SnapshotToast, nscankeys, toastkey);
+
+ /*
+ * Read the chunks by index
+ *
+ * The index is on (valueid, chunkidx) so they will come in order
+ */
+ expectedchunk = startchunk;
+ while ((ttup = systable_getnext_ordered(toastscan, ForwardScanDirection)) != NULL)
+ {
+ int32 curchunk;
+ Pointer chunk;
+ bool isnull;
+ char *chunkdata;
+ int32 chunksize;
+ int32 expected_size;
+ int32 chcpystrt;
+ int32 chcpyend;
+
+ /*
+ * Have a chunk, extract the sequence number and the data
+ */
+ curchunk = DatumGetInt32(fastgetattr(ttup, 2, toasttupDesc, &isnull));
+ Assert(!isnull);
+ chunk = DatumGetPointer(fastgetattr(ttup, 3, toasttupDesc, &isnull));
+ Assert(!isnull);
+ if (!VARATT_IS_EXTENDED(chunk))
+ {
+ chunksize = VARSIZE(chunk) - VARHDRSZ;
+ chunkdata = VARDATA(chunk);
+ }
+ else if (VARATT_IS_SHORT(chunk))
+ {
+ /* could happen due to heap_form_tuple doing its thing */
+ chunksize = VARSIZE_SHORT(chunk) - VARHDRSZ_SHORT;
+ chunkdata = VARDATA_SHORT(chunk);
+ }
+ else
+ {
+ /* should never happen */
+ elog(ERROR, "found toasted toast chunk for toast value %u in %s",
+ valueid, RelationGetRelationName(toastrel));
+ chunksize = 0; /* keep compiler quiet */
+ chunkdata = NULL;
+ }
+
+ /*
+ * Some checks on the data we've found
+ */
+ if (curchunk != expectedchunk)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("unexpected chunk number %d (expected %d) for toast value %u in %s",
+ curchunk, expectedchunk, valueid,
+ RelationGetRelationName(toastrel))));
+ if (curchunk > endchunk)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("unexpected chunk number %d (out of range %d..%d) for toast value %u in %s",
+ curchunk,
+ startchunk, endchunk, valueid,
+ RelationGetRelationName(toastrel))));
+ expected_size = curchunk < totalchunks - 1 ? TOAST_MAX_CHUNK_SIZE
+ : attrsize - ((totalchunks - 1) * TOAST_MAX_CHUNK_SIZE);
+ if (chunksize != expected_size)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("unexpected chunk size %d (expected %d) in chunk %d of %d for toast value %u in %s",
+ chunksize, expected_size,
+ curchunk, totalchunks, valueid,
+ RelationGetRelationName(toastrel))));
+
+ /*
+ * Copy the data into proper place in our result
+ */
+ chcpystrt = 0;
+ chcpyend = chunksize - 1;
+ if (curchunk == startchunk)
+ chcpystrt = sliceoffset % TOAST_MAX_CHUNK_SIZE;
+ if (curchunk == endchunk)
+ chcpyend = (sliceoffset + slicelength - 1) % TOAST_MAX_CHUNK_SIZE;
+
+ memcpy(VARDATA(result) +
+ (curchunk * TOAST_MAX_CHUNK_SIZE - sliceoffset) + chcpystrt,
+ chunkdata + chcpystrt,
+ (chcpyend - chcpystrt) + 1);
+
+ expectedchunk++;
+ }
+
+ /*
+ * Final checks that we successfully fetched the datum
+ */
+ if (expectedchunk != (endchunk + 1))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg_internal("missing chunk number %d for toast value %u in %s",
+ expectedchunk, valueid,
+ RelationGetRelationName(toastrel))));
+
+ /* End scan and close indexes. */
+ systable_endscan_ordered(toastscan);
+ toast_close_indexes(toastidxs, num_indexes, AccessShareLock);
+}
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
new file mode 100644
index 0000000..d34edb4
--- /dev/null
+++ b/src/backend/access/heap/hio.c
@@ -0,0 +1,721 @@
+/*-------------------------------------------------------------------------
+ *
+ * hio.c
+ * POSTGRES heap access method input/output code.
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/access/heap/hio.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/heapam.h"
+#include "access/hio.h"
+#include "access/htup_details.h"
+#include "access/visibilitymap.h"
+#include "storage/bufmgr.h"
+#include "storage/freespace.h"
+#include "storage/lmgr.h"
+#include "storage/smgr.h"
+
+
+/*
+ * RelationPutHeapTuple - place tuple at specified page
+ *
+ * !!! EREPORT(ERROR) IS DISALLOWED HERE !!! Must PANIC on failure!!!
+ *
+ * Note - caller must hold BUFFER_LOCK_EXCLUSIVE on the buffer.
+ */
+void
+RelationPutHeapTuple(Relation relation,
+ Buffer buffer,
+ HeapTuple tuple,
+ bool token)
+{
+ Page pageHeader;
+ OffsetNumber offnum;
+
+ /*
+ * A tuple that's being inserted speculatively should already have its
+ * token set.
+ */
+ Assert(!token || HeapTupleHeaderIsSpeculative(tuple->t_data));
+
+ /*
+ * Do not allow tuples with invalid combinations of hint bits to be placed
+ * on a page. This combination is detected as corruption by the
+ * contrib/amcheck logic, so if you disable this assertion, make
+ * corresponding changes there.
+ */
+ Assert(!((tuple->t_data->t_infomask & HEAP_XMAX_COMMITTED) &&
+ (tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI)));
+
+ /* Add the tuple to the page */
+ pageHeader = BufferGetPage(buffer);
+
+ offnum = PageAddItem(pageHeader, (Item) tuple->t_data,
+ tuple->t_len, InvalidOffsetNumber, false, true);
+
+ if (offnum == InvalidOffsetNumber)
+ elog(PANIC, "failed to add tuple to page");
+
+ /* Update tuple->t_self to the actual position where it was stored */
+ ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
+
+ /*
+ * Insert the correct position into CTID of the stored tuple, too (unless
+ * this is a speculative insertion, in which case the token is held in
+ * CTID field instead)
+ */
+ if (!token)
+ {
+ ItemId itemId = PageGetItemId(pageHeader, offnum);
+ HeapTupleHeader item = (HeapTupleHeader) PageGetItem(pageHeader, itemId);
+
+ item->t_ctid = tuple->t_self;
+ }
+}
+
+/*
+ * Read in a buffer in mode, using bulk-insert strategy if bistate isn't NULL.
+ */
+static Buffer
+ReadBufferBI(Relation relation, BlockNumber targetBlock,
+ ReadBufferMode mode, BulkInsertState bistate)
+{
+ Buffer buffer;
+
+ /* If not bulk-insert, exactly like ReadBuffer */
+ if (!bistate)
+ return ReadBufferExtended(relation, MAIN_FORKNUM, targetBlock,
+ mode, NULL);
+
+ /* If we have the desired block already pinned, re-pin and return it */
+ if (bistate->current_buf != InvalidBuffer)
+ {
+ if (BufferGetBlockNumber(bistate->current_buf) == targetBlock)
+ {
+ /*
+ * Currently the LOCK variants are only used for extending
+ * relation, which should never reach this branch.
+ */
+ Assert(mode != RBM_ZERO_AND_LOCK &&
+ mode != RBM_ZERO_AND_CLEANUP_LOCK);
+
+ IncrBufferRefCount(bistate->current_buf);
+ return bistate->current_buf;
+ }
+ /* ... else drop the old buffer */
+ ReleaseBuffer(bistate->current_buf);
+ bistate->current_buf = InvalidBuffer;
+ }
+
+ /* Perform a read using the buffer strategy */
+ buffer = ReadBufferExtended(relation, MAIN_FORKNUM, targetBlock,
+ mode, bistate->strategy);
+
+ /* Save the selected block as target for future inserts */
+ IncrBufferRefCount(buffer);
+ bistate->current_buf = buffer;
+
+ return buffer;
+}
+
+/*
+ * For each heap page which is all-visible, acquire a pin on the appropriate
+ * visibility map page, if we haven't already got one.
+ *
+ * buffer2 may be InvalidBuffer, if only one buffer is involved. buffer1
+ * must not be InvalidBuffer. If both buffers are specified, block1 must
+ * be less than block2.
+ */
+static void
+GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
+ BlockNumber block1, BlockNumber block2,
+ Buffer *vmbuffer1, Buffer *vmbuffer2)
+{
+ bool need_to_pin_buffer1;
+ bool need_to_pin_buffer2;
+
+ Assert(BufferIsValid(buffer1));
+ Assert(buffer2 == InvalidBuffer || block1 <= block2);
+
+ while (1)
+ {
+ /* Figure out which pins we need but don't have. */
+ need_to_pin_buffer1 = PageIsAllVisible(BufferGetPage(buffer1))
+ && !visibilitymap_pin_ok(block1, *vmbuffer1);
+ need_to_pin_buffer2 = buffer2 != InvalidBuffer
+ && PageIsAllVisible(BufferGetPage(buffer2))
+ && !visibilitymap_pin_ok(block2, *vmbuffer2);
+ if (!need_to_pin_buffer1 && !need_to_pin_buffer2)
+ return;
+
+ /* We must unlock both buffers before doing any I/O. */
+ LockBuffer(buffer1, BUFFER_LOCK_UNLOCK);
+ if (buffer2 != InvalidBuffer && buffer2 != buffer1)
+ LockBuffer(buffer2, BUFFER_LOCK_UNLOCK);
+
+ /* Get pins. */
+ if (need_to_pin_buffer1)
+ visibilitymap_pin(relation, block1, vmbuffer1);
+ if (need_to_pin_buffer2)
+ visibilitymap_pin(relation, block2, vmbuffer2);
+
+ /* Relock buffers. */
+ LockBuffer(buffer1, BUFFER_LOCK_EXCLUSIVE);
+ if (buffer2 != InvalidBuffer && buffer2 != buffer1)
+ LockBuffer(buffer2, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * If there are two buffers involved and we pinned just one of them,
+ * it's possible that the second one became all-visible while we were
+ * busy pinning the first one. If it looks like that's a possible
+ * scenario, we'll need to make a second pass through this loop.
+ */
+ if (buffer2 == InvalidBuffer || buffer1 == buffer2
+ || (need_to_pin_buffer1 && need_to_pin_buffer2))
+ break;
+ }
+}
+
+/*
+ * Extend a relation by multiple blocks to avoid future contention on the
+ * relation extension lock. Our goal is to pre-extend the relation by an
+ * amount which ramps up as the degree of contention ramps up, but limiting
+ * the result to some sane overall value.
+ */
+static void
+RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
+{
+ BlockNumber blockNum,
+ firstBlock = InvalidBlockNumber;
+ int extraBlocks;
+ int lockWaiters;
+
+ /* Use the length of the lock wait queue to judge how much to extend. */
+ lockWaiters = RelationExtensionLockWaiterCount(relation);
+ if (lockWaiters <= 0)
+ return;
+
+ /*
+ * It might seem like multiplying the number of lock waiters by as much as
+ * 20 is too aggressive, but benchmarking revealed that smaller numbers
+ * were insufficient. 512 is just an arbitrary cap to prevent
+ * pathological results.
+ */
+ extraBlocks = Min(512, lockWaiters * 20);
+
+ do
+ {
+ Buffer buffer;
+ Page page;
+ Size freespace;
+
+ /*
+ * Extend by one page. This should generally match the main-line
+ * extension code in RelationGetBufferForTuple, except that we hold
+ * the relation extension lock throughout, and we don't immediately
+ * initialize the page (see below).
+ */
+ buffer = ReadBufferBI(relation, P_NEW, RBM_ZERO_AND_LOCK, bistate);
+ page = BufferGetPage(buffer);
+
+ if (!PageIsNew(page))
+ elog(ERROR, "page %u of relation \"%s\" should be empty but is not",
+ BufferGetBlockNumber(buffer),
+ RelationGetRelationName(relation));
+
+ /*
+ * Add the page to the FSM without initializing. If we were to
+ * initialize here, the page would potentially get flushed out to disk
+ * before we add any useful content. There's no guarantee that that'd
+ * happen before a potential crash, so we need to deal with
+ * uninitialized pages anyway, thus avoid the potential for
+ * unnecessary writes.
+ */
+
+ /* we'll need this info below */
+ blockNum = BufferGetBlockNumber(buffer);
+ freespace = BufferGetPageSize(buffer) - SizeOfPageHeaderData;
+
+ UnlockReleaseBuffer(buffer);
+
+ /* Remember first block number thus added. */
+ if (firstBlock == InvalidBlockNumber)
+ firstBlock = blockNum;
+
+ /*
+ * Immediately update the bottom level of the FSM. This has a good
+ * chance of making this page visible to other concurrently inserting
+ * backends, and we want that to happen without delay.
+ */
+ RecordPageWithFreeSpace(relation, blockNum, freespace);
+ }
+ while (--extraBlocks > 0);
+
+ /*
+ * Updating the upper levels of the free space map is too expensive to do
+ * for every block, but it's worth doing once at the end to make sure that
+ * subsequent insertion activity sees all of those nifty free pages we
+ * just inserted.
+ */
+ FreeSpaceMapVacuumRange(relation, firstBlock, blockNum + 1);
+}
+
+/*
+ * RelationGetBufferForTuple
+ *
+ * Returns pinned and exclusive-locked buffer of a page in given relation
+ * with free space >= given len.
+ *
+ * If otherBuffer is not InvalidBuffer, then it references a previously
+ * pinned buffer of another page in the same relation; on return, this
+ * buffer will also be exclusive-locked. (This case is used by heap_update;
+ * the otherBuffer contains the tuple being updated.)
+ *
+ * The reason for passing otherBuffer is that if two backends are doing
+ * concurrent heap_update operations, a deadlock could occur if they try
+ * to lock the same two buffers in opposite orders. To ensure that this
+ * can't happen, we impose the rule that buffers of a relation must be
+ * locked in increasing page number order. This is most conveniently done
+ * by having RelationGetBufferForTuple lock them both, with suitable care
+ * for ordering.
+ *
+ * NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the
+ * same buffer we select for insertion of the new tuple (this could only
+ * happen if space is freed in that page after heap_update finds there's not
+ * enough there). In that case, the page will be pinned and locked only once.
+ *
+ * We also handle the possibility that the all-visible flag will need to be
+ * cleared on one or both pages. If so, pin on the associated visibility map
+ * page must be acquired before acquiring buffer lock(s), to avoid possibly
+ * doing I/O while holding buffer locks. The pins are passed back to the
+ * caller using the input-output arguments vmbuffer and vmbuffer_other.
+ * Note that in some cases the caller might have already acquired such pins,
+ * which is indicated by these arguments not being InvalidBuffer on entry.
+ *
+ * We normally use FSM to help us find free space. However,
+ * if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to
+ * the end of the relation if the tuple won't fit on the current target page.
+ * This can save some cycles when we know the relation is new and doesn't
+ * contain useful amounts of free space.
+ *
+ * HEAP_INSERT_SKIP_FSM is also useful for non-WAL-logged additions to a
+ * relation, if the caller holds exclusive lock and is careful to invalidate
+ * relation's smgr_targblock before the first insertion --- that ensures that
+ * all insertions will occur into newly added pages and not be intermixed
+ * with tuples from other transactions. That way, a crash can't risk losing
+ * any committed data of other transactions. (See heap_insert's comments
+ * for additional constraints needed for safe usage of this behavior.)
+ *
+ * The caller can also provide a BulkInsertState object to optimize many
+ * insertions into the same relation. This keeps a pin on the current
+ * insertion target page (to save pin/unpin cycles) and also passes a
+ * BULKWRITE buffer selection strategy object to the buffer manager.
+ * Passing NULL for bistate selects the default behavior.
+ *
+ * We don't fill existing pages further than the fillfactor, except for large
+ * tuples in nearly-empty pages. This is OK since this routine is not
+ * consulted when updating a tuple and keeping it on the same page, which is
+ * the scenario fillfactor is meant to reserve space for.
+ *
+ * ereport(ERROR) is allowed here, so this routine *must* be called
+ * before any (unlogged) changes are made in buffer pool.
+ */
+Buffer
+RelationGetBufferForTuple(Relation relation, Size len,
+ Buffer otherBuffer, int options,
+ BulkInsertState bistate,
+ Buffer *vmbuffer, Buffer *vmbuffer_other)
+{
+ bool use_fsm = !(options & HEAP_INSERT_SKIP_FSM);
+ Buffer buffer = InvalidBuffer;
+ Page page;
+ Size nearlyEmptyFreeSpace,
+ pageFreeSpace = 0,
+ saveFreeSpace = 0,
+ targetFreeSpace = 0;
+ BlockNumber targetBlock,
+ otherBlock;
+ bool needLock;
+
+ len = MAXALIGN(len); /* be conservative */
+
+ /* Bulk insert is not supported for updates, only inserts. */
+ Assert(otherBuffer == InvalidBuffer || !bistate);
+
+ /*
+ * If we're gonna fail for oversize tuple, do it right away
+ */
+ if (len > MaxHeapTupleSize)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("row is too big: size %zu, maximum size %zu",
+ len, MaxHeapTupleSize)));
+
+ /* Compute desired extra freespace due to fillfactor option */
+ saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
+ HEAP_DEFAULT_FILLFACTOR);
+
+ /*
+ * Since pages without tuples can still have line pointers, we consider
+ * pages "empty" when the unavailable space is slight. This threshold is
+ * somewhat arbitrary, but it should prevent most unnecessary relation
+ * extensions while inserting large tuples into low-fillfactor tables.
+ */
+ nearlyEmptyFreeSpace = MaxHeapTupleSize -
+ (MaxHeapTuplesPerPage / 8 * sizeof(ItemIdData));
+ if (len + saveFreeSpace > nearlyEmptyFreeSpace)
+ targetFreeSpace = Max(len, nearlyEmptyFreeSpace);
+ else
+ targetFreeSpace = len + saveFreeSpace;
+
+ if (otherBuffer != InvalidBuffer)
+ otherBlock = BufferGetBlockNumber(otherBuffer);
+ else
+ otherBlock = InvalidBlockNumber; /* just to keep compiler quiet */
+
+ /*
+ * We first try to put the tuple on the same page we last inserted a tuple
+ * on, as cached in the BulkInsertState or relcache entry. If that
+ * doesn't work, we ask the Free Space Map to locate a suitable page.
+ * Since the FSM's info might be out of date, we have to be prepared to
+ * loop around and retry multiple times. (To insure this isn't an infinite
+ * loop, we must update the FSM with the correct amount of free space on
+ * each page that proves not to be suitable.) If the FSM has no record of
+ * a page with enough free space, we give up and extend the relation.
+ *
+ * When use_fsm is false, we either put the tuple onto the existing target
+ * page or extend the relation.
+ */
+ if (bistate && bistate->current_buf != InvalidBuffer)
+ targetBlock = BufferGetBlockNumber(bistate->current_buf);
+ else
+ targetBlock = RelationGetTargetBlock(relation);
+
+ if (targetBlock == InvalidBlockNumber && use_fsm)
+ {
+ /*
+ * We have no cached target page, so ask the FSM for an initial
+ * target.
+ */
+ targetBlock = GetPageWithFreeSpace(relation, targetFreeSpace);
+ }
+
+ /*
+ * If the FSM knows nothing of the rel, try the last page before we give
+ * up and extend. This avoids one-tuple-per-page syndrome during
+ * bootstrapping or in a recently-started system.
+ */
+ if (targetBlock == InvalidBlockNumber)
+ {
+ BlockNumber nblocks = RelationGetNumberOfBlocks(relation);
+
+ if (nblocks > 0)
+ targetBlock = nblocks - 1;
+ }
+
+loop:
+ while (targetBlock != InvalidBlockNumber)
+ {
+ /*
+ * Read and exclusive-lock the target block, as well as the other
+ * block if one was given, taking suitable care with lock ordering and
+ * the possibility they are the same block.
+ *
+ * If the page-level all-visible flag is set, caller will need to
+ * clear both that and the corresponding visibility map bit. However,
+ * by the time we return, we'll have x-locked the buffer, and we don't
+ * want to do any I/O while in that state. So we check the bit here
+ * before taking the lock, and pin the page if it appears necessary.
+ * Checking without the lock creates a risk of getting the wrong
+ * answer, so we'll have to recheck after acquiring the lock.
+ */
+ if (otherBuffer == InvalidBuffer)
+ {
+ /* easy case */
+ buffer = ReadBufferBI(relation, targetBlock, RBM_NORMAL, bistate);
+ if (PageIsAllVisible(BufferGetPage(buffer)))
+ visibilitymap_pin(relation, targetBlock, vmbuffer);
+
+ /*
+ * If the page is empty, pin vmbuffer to set all_frozen bit later.
+ */
+ if ((options & HEAP_INSERT_FROZEN) &&
+ (PageGetMaxOffsetNumber(BufferGetPage(buffer)) == 0))
+ visibilitymap_pin(relation, targetBlock, vmbuffer);
+
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ }
+ else if (otherBlock == targetBlock)
+ {
+ /* also easy case */
+ buffer = otherBuffer;
+ if (PageIsAllVisible(BufferGetPage(buffer)))
+ visibilitymap_pin(relation, targetBlock, vmbuffer);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ }
+ else if (otherBlock < targetBlock)
+ {
+ /* lock other buffer first */
+ buffer = ReadBuffer(relation, targetBlock);
+ if (PageIsAllVisible(BufferGetPage(buffer)))
+ visibilitymap_pin(relation, targetBlock, vmbuffer);
+ LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ }
+ else
+ {
+ /* lock target buffer first */
+ buffer = ReadBuffer(relation, targetBlock);
+ if (PageIsAllVisible(BufferGetPage(buffer)))
+ visibilitymap_pin(relation, targetBlock, vmbuffer);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
+ }
+
+ /*
+ * We now have the target page (and the other buffer, if any) pinned
+ * and locked. However, since our initial PageIsAllVisible checks
+ * were performed before acquiring the lock, the results might now be
+ * out of date, either for the selected victim buffer, or for the
+ * other buffer passed by the caller. In that case, we'll need to
+ * give up our locks, go get the pin(s) we failed to get earlier, and
+ * re-lock. That's pretty painful, but hopefully shouldn't happen
+ * often.
+ *
+ * Note that there's a small possibility that we didn't pin the page
+ * above but still have the correct page pinned anyway, either because
+ * we've already made a previous pass through this loop, or because
+ * caller passed us the right page anyway.
+ *
+ * Note also that it's possible that by the time we get the pin and
+ * retake the buffer locks, the visibility map bit will have been
+ * cleared by some other backend anyway. In that case, we'll have
+ * done a bit of extra work for no gain, but there's no real harm
+ * done.
+ */
+ if (otherBuffer == InvalidBuffer || targetBlock <= otherBlock)
+ GetVisibilityMapPins(relation, buffer, otherBuffer,
+ targetBlock, otherBlock, vmbuffer,
+ vmbuffer_other);
+ else
+ GetVisibilityMapPins(relation, otherBuffer, buffer,
+ otherBlock, targetBlock, vmbuffer_other,
+ vmbuffer);
+
+ /*
+ * Now we can check to see if there's enough free space here. If so,
+ * we're done.
+ */
+ page = BufferGetPage(buffer);
+
+ /*
+ * If necessary initialize page, it'll be used soon. We could avoid
+ * dirtying the buffer here, and rely on the caller to do so whenever
+ * it puts a tuple onto the page, but there seems not much benefit in
+ * doing so.
+ */
+ if (PageIsNew(page))
+ {
+ PageInit(page, BufferGetPageSize(buffer), 0);
+ MarkBufferDirty(buffer);
+ }
+
+ pageFreeSpace = PageGetHeapFreeSpace(page);
+ if (targetFreeSpace <= pageFreeSpace)
+ {
+ /* use this page as future insert target, too */
+ RelationSetTargetBlock(relation, targetBlock);
+ return buffer;
+ }
+
+ /*
+ * Not enough space, so we must give up our page locks and pin (if
+ * any) and prepare to look elsewhere. We don't care which order we
+ * unlock the two buffers in, so this can be slightly simpler than the
+ * code above.
+ */
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ if (otherBuffer == InvalidBuffer)
+ ReleaseBuffer(buffer);
+ else if (otherBlock != targetBlock)
+ {
+ LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);
+ ReleaseBuffer(buffer);
+ }
+
+ /* Without FSM, always fall out of the loop and extend */
+ if (!use_fsm)
+ break;
+
+ /*
+ * Update FSM as to condition of this page, and ask for another page
+ * to try.
+ */
+ targetBlock = RecordAndGetPageWithFreeSpace(relation,
+ targetBlock,
+ pageFreeSpace,
+ targetFreeSpace);
+ }
+
+ /*
+ * Have to extend the relation.
+ *
+ * We have to use a lock to ensure no one else is extending the rel at the
+ * same time, else we will both try to initialize the same new page. We
+ * can skip locking for new or temp relations, however, since no one else
+ * could be accessing them.
+ */
+ needLock = !RELATION_IS_LOCAL(relation);
+
+ /*
+ * If we need the lock but are not able to acquire it immediately, we'll
+ * consider extending the relation by multiple blocks at a time to manage
+ * contention on the relation extension lock. However, this only makes
+ * sense if we're using the FSM; otherwise, there's no point.
+ */
+ if (needLock)
+ {
+ if (!use_fsm)
+ LockRelationForExtension(relation, ExclusiveLock);
+ else if (!ConditionalLockRelationForExtension(relation, ExclusiveLock))
+ {
+ /* Couldn't get the lock immediately; wait for it. */
+ LockRelationForExtension(relation, ExclusiveLock);
+
+ /*
+ * Check if some other backend has extended a block for us while
+ * we were waiting on the lock.
+ */
+ targetBlock = GetPageWithFreeSpace(relation, targetFreeSpace);
+
+ /*
+ * If some other waiter has already extended the relation, we
+ * don't need to do so; just use the existing freespace.
+ */
+ if (targetBlock != InvalidBlockNumber)
+ {
+ UnlockRelationForExtension(relation, ExclusiveLock);
+ goto loop;
+ }
+
+ /* Time to bulk-extend. */
+ RelationAddExtraBlocks(relation, bistate);
+ }
+ }
+
+ /*
+ * In addition to whatever extension we performed above, we always add at
+ * least one block to satisfy our own request.
+ *
+ * XXX This does an lseek - rather expensive - but at the moment it is the
+ * only way to accurately determine how many blocks are in a relation. Is
+ * it worth keeping an accurate file length in shared memory someplace,
+ * rather than relying on the kernel to do it for us?
+ */
+ buffer = ReadBufferBI(relation, P_NEW, RBM_ZERO_AND_LOCK, bistate);
+
+ /*
+ * We need to initialize the empty new page. Double-check that it really
+ * is empty (this should never happen, but if it does we don't want to
+ * risk wiping out valid data).
+ */
+ page = BufferGetPage(buffer);
+
+ if (!PageIsNew(page))
+ elog(ERROR, "page %u of relation \"%s\" should be empty but is not",
+ BufferGetBlockNumber(buffer),
+ RelationGetRelationName(relation));
+
+ PageInit(page, BufferGetPageSize(buffer), 0);
+ MarkBufferDirty(buffer);
+
+ /*
+ * The page is empty, pin vmbuffer to set all_frozen bit.
+ */
+ if (options & HEAP_INSERT_FROZEN)
+ {
+ Assert(PageGetMaxOffsetNumber(BufferGetPage(buffer)) == 0);
+ visibilitymap_pin(relation, BufferGetBlockNumber(buffer), vmbuffer);
+ }
+
+ /*
+ * Release the file-extension lock; it's now OK for someone else to extend
+ * the relation some more.
+ */
+ if (needLock)
+ UnlockRelationForExtension(relation, ExclusiveLock);
+
+ /*
+ * Lock the other buffer. It's guaranteed to be of a lower page number
+ * than the new page. To conform with the deadlock prevent rules, we ought
+ * to lock otherBuffer first, but that would give other backends a chance
+ * to put tuples on our page. To reduce the likelihood of that, attempt to
+ * lock the other buffer conditionally, that's very likely to work.
+ * Otherwise we need to lock buffers in the correct order, and retry if
+ * the space has been used in the mean time.
+ *
+ * Alternatively, we could acquire the lock on otherBuffer before
+ * extending the relation, but that'd require holding the lock while
+ * performing IO, which seems worse than an unlikely retry.
+ */
+ if (otherBuffer != InvalidBuffer)
+ {
+ Assert(otherBuffer != buffer);
+ targetBlock = BufferGetBlockNumber(buffer);
+ Assert(targetBlock > otherBlock);
+
+ if (unlikely(!ConditionalLockBuffer(otherBuffer)))
+ {
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * Because the buffers were unlocked for a while, it's possible,
+ * although unlikely, that an all-visible flag became set or that
+ * somebody used up the available space in the new page. We can
+ * use GetVisibilityMapPins to deal with the first case. In the
+ * second case, just retry from start.
+ */
+ GetVisibilityMapPins(relation, otherBuffer, buffer,
+ otherBlock, targetBlock, vmbuffer_other,
+ vmbuffer);
+
+ if (len > PageGetHeapFreeSpace(page))
+ {
+ LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);
+ UnlockReleaseBuffer(buffer);
+
+ goto loop;
+ }
+ }
+ }
+
+ if (len > PageGetHeapFreeSpace(page))
+ {
+ /* We should not get here given the test at the top */
+ elog(PANIC, "tuple is too big: size %zu", len);
+ }
+
+ /*
+ * Remember the new page as our target for future insertions.
+ *
+ * XXX should we enter the new page into the free space map immediately,
+ * or just keep it for this backend's exclusive use in the short run
+ * (until VACUUM sees it)? Seems to depend on whether you expect the
+ * current backend to make more insertions or not, which is probably a
+ * good bet most of the time. So for now, don't add it to FSM yet.
+ */
+ RelationSetTargetBlock(relation, BufferGetBlockNumber(buffer));
+
+ return buffer;
+}
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
new file mode 100644
index 0000000..f7f8056
--- /dev/null
+++ b/src/backend/access/heap/pruneheap.c
@@ -0,0 +1,1052 @@
+/*-------------------------------------------------------------------------
+ *
+ * pruneheap.c
+ * heap page pruning and HOT-chain management code
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/access/heap/pruneheap.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/heapam.h"
+#include "access/heapam_xlog.h"
+#include "access/htup_details.h"
+#include "access/transam.h"
+#include "access/xlog.h"
+#include "catalog/catalog.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "storage/bufmgr.h"
+#include "utils/snapmgr.h"
+#include "utils/rel.h"
+#include "utils/snapmgr.h"
+
+/* Working data for heap_page_prune and subroutines */
+typedef struct
+{
+ Relation rel;
+
+ /* tuple visibility test, initialized for the relation */
+ GlobalVisState *vistest;
+
+ /*
+ * Thresholds set by TransactionIdLimitedForOldSnapshots() if they have
+ * been computed (done on demand, and only if
+ * OldSnapshotThresholdActive()). The first time a tuple is about to be
+ * removed based on the limited horizon, old_snap_used is set to true, and
+ * SetOldSnapshotThresholdTimestamp() is called. See
+ * heap_prune_satisfies_vacuum().
+ */
+ TimestampTz old_snap_ts;
+ TransactionId old_snap_xmin;
+ bool old_snap_used;
+
+ TransactionId new_prune_xid; /* new prune hint value for page */
+ TransactionId latestRemovedXid; /* latest xid to be removed by this prune */
+ int nredirected; /* numbers of entries in arrays below */
+ int ndead;
+ int nunused;
+ /* arrays that accumulate indexes of items to be changed */
+ OffsetNumber redirected[MaxHeapTuplesPerPage * 2];
+ OffsetNumber nowdead[MaxHeapTuplesPerPage];
+ OffsetNumber nowunused[MaxHeapTuplesPerPage];
+
+ /*
+ * marked[i] is true if item i is entered in one of the above arrays.
+ *
+ * This needs to be MaxHeapTuplesPerPage + 1 long as FirstOffsetNumber is
+ * 1. Otherwise every access would need to subtract 1.
+ */
+ bool marked[MaxHeapTuplesPerPage + 1];
+
+ /*
+ * Tuple visibility is only computed once for each tuple, for correctness
+ * and efficiency reasons; see comment in heap_page_prune() for
+ * details. This is of type int8[,] intead of HTSV_Result[], so we can use
+ * -1 to indicate no visibility has been computed, e.g. for LP_DEAD items.
+ *
+ * Same indexing as ->marked.
+ */
+ int8 htsv[MaxHeapTuplesPerPage + 1];
+} PruneState;
+
+/* Local functions */
+static HTSV_Result heap_prune_satisfies_vacuum(PruneState *prstate,
+ HeapTuple tup,
+ Buffer buffer);
+static int heap_prune_chain(Buffer buffer,
+ OffsetNumber rootoffnum,
+ PruneState *prstate);
+static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid);
+static void heap_prune_record_redirect(PruneState *prstate,
+ OffsetNumber offnum, OffsetNumber rdoffnum);
+static void heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum);
+static void heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum);
+
+
+/*
+ * Optionally prune and repair fragmentation in the specified page.
+ *
+ * This is an opportunistic function. It will perform housekeeping
+ * only if the page heuristically looks like a candidate for pruning and we
+ * can acquire buffer cleanup lock without blocking.
+ *
+ * Note: this is called quite often. It's important that it fall out quickly
+ * if there's not any use in pruning.
+ *
+ * Caller must have pin on the buffer, and must *not* have a lock on it.
+ */
+void
+heap_page_prune_opt(Relation relation, Buffer buffer)
+{
+ Page page = BufferGetPage(buffer);
+ TransactionId prune_xid;
+ GlobalVisState *vistest;
+ TransactionId limited_xmin = InvalidTransactionId;
+ TimestampTz limited_ts = 0;
+ Size minfree;
+
+ /*
+ * We can't write WAL in recovery mode, so there's no point trying to
+ * clean the page. The primary will likely issue a cleaning WAL record
+ * soon anyway, so this is no particular loss.
+ */
+ if (RecoveryInProgress())
+ return;
+
+ /*
+ * XXX: Magic to keep old_snapshot_threshold tests appear "working". They
+ * currently are broken, and discussion of what to do about them is
+ * ongoing. See
+ * https://www.postgresql.org/message-id/20200403001235.e6jfdll3gh2ygbuc%40alap3.anarazel.de
+ */
+ if (old_snapshot_threshold == 0)
+ SnapshotTooOldMagicForTest();
+
+ /*
+ * First check whether there's any chance there's something to prune,
+ * determining the appropriate horizon is a waste if there's no prune_xid
+ * (i.e. no updates/deletes left potentially dead tuples around).
+ */
+ prune_xid = ((PageHeader) page)->pd_prune_xid;
+ if (!TransactionIdIsValid(prune_xid))
+ return;
+
+ /*
+ * Check whether prune_xid indicates that there may be dead rows that can
+ * be cleaned up.
+ *
+ * It is OK to check the old snapshot limit before acquiring the cleanup
+ * lock because the worst that can happen is that we are not quite as
+ * aggressive about the cleanup (by however many transaction IDs are
+ * consumed between this point and acquiring the lock). This allows us to
+ * save significant overhead in the case where the page is found not to be
+ * prunable.
+ *
+ * Even if old_snapshot_threshold is set, we first check whether the page
+ * can be pruned without. Both because
+ * TransactionIdLimitedForOldSnapshots() is not cheap, and because not
+ * unnecessarily relying on old_snapshot_threshold avoids causing
+ * conflicts.
+ */
+ vistest = GlobalVisTestFor(relation);
+
+ if (!GlobalVisTestIsRemovableXid(vistest, prune_xid))
+ {
+ if (!OldSnapshotThresholdActive())
+ return;
+
+ if (!TransactionIdLimitedForOldSnapshots(GlobalVisTestNonRemovableHorizon(vistest),
+ relation,
+ &limited_xmin, &limited_ts))
+ return;
+
+ if (!TransactionIdPrecedes(prune_xid, limited_xmin))
+ return;
+ }
+
+ /*
+ * We prune when a previous UPDATE failed to find enough space on the page
+ * for a new tuple version, or when free space falls below the relation's
+ * fill-factor target (but not less than 10%).
+ *
+ * Checking free space here is questionable since we aren't holding any
+ * lock on the buffer; in the worst case we could get a bogus answer. It's
+ * unlikely to be *seriously* wrong, though, since reading either pd_lower
+ * or pd_upper is probably atomic. Avoiding taking a lock seems more
+ * important than sometimes getting a wrong answer in what is after all
+ * just a heuristic estimate.
+ */
+ minfree = RelationGetTargetPageFreeSpace(relation,
+ HEAP_DEFAULT_FILLFACTOR);
+ minfree = Max(minfree, BLCKSZ / 10);
+
+ if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
+ {
+ /* OK, try to get exclusive buffer lock */
+ if (!ConditionalLockBufferForCleanup(buffer))
+ return;
+
+ /*
+ * Now that we have buffer lock, get accurate information about the
+ * page's free space, and recheck the heuristic about whether to
+ * prune. (We needn't recheck PageIsPrunable, since no one else could
+ * have pruned while we hold pin.)
+ */
+ if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
+ {
+ /* OK to prune */
+ (void) heap_page_prune(relation, buffer, vistest,
+ limited_xmin, limited_ts,
+ true, NULL);
+ }
+
+ /* And release buffer lock */
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ }
+}
+
+
+/*
+ * Prune and repair fragmentation in the specified page.
+ *
+ * Caller must have pin and buffer cleanup lock on the page.
+ *
+ * vistest is used to distinguish whether tuples are DEAD or RECENTLY_DEAD
+ * (see heap_prune_satisfies_vacuum and
+ * HeapTupleSatisfiesVacuum). old_snap_xmin / old_snap_ts need to
+ * either have been set by TransactionIdLimitedForOldSnapshots, or
+ * InvalidTransactionId/0 respectively.
+ *
+ * If report_stats is true then we send the number of reclaimed heap-only
+ * tuples to pgstats. (This must be false during vacuum, since vacuum will
+ * send its own new total to pgstats, and we don't want this delta applied
+ * on top of that.)
+ *
+ * off_loc is the offset location required by the caller to use in error
+ * callback.
+ *
+ * Returns the number of tuples deleted from the page during this call.
+ */
+int
+heap_page_prune(Relation relation, Buffer buffer,
+ GlobalVisState *vistest,
+ TransactionId old_snap_xmin,
+ TimestampTz old_snap_ts,
+ bool report_stats,
+ OffsetNumber *off_loc)
+{
+ int ndeleted = 0;
+ Page page = BufferGetPage(buffer);
+ OffsetNumber offnum,
+ maxoff;
+ PruneState prstate;
+ HeapTupleData tup;
+
+ /*
+ * Our strategy is to scan the page and make lists of items to change,
+ * then apply the changes within a critical section. This keeps as much
+ * logic as possible out of the critical section, and also ensures that
+ * WAL replay will work the same as the normal case.
+ *
+ * First, initialize the new pd_prune_xid value to zero (indicating no
+ * prunable tuples). If we find any tuples which may soon become
+ * prunable, we will save the lowest relevant XID in new_prune_xid. Also
+ * initialize the rest of our working state.
+ */
+ prstate.new_prune_xid = InvalidTransactionId;
+ prstate.rel = relation;
+ prstate.vistest = vistest;
+ prstate.old_snap_xmin = old_snap_xmin;
+ prstate.old_snap_ts = old_snap_ts;
+ prstate.old_snap_used = false;
+ prstate.latestRemovedXid = InvalidTransactionId;
+ prstate.nredirected = prstate.ndead = prstate.nunused = 0;
+ memset(prstate.marked, 0, sizeof(prstate.marked));
+
+ maxoff = PageGetMaxOffsetNumber(page);
+ tup.t_tableOid = RelationGetRelid(prstate.rel);
+
+ /*
+ * Determine HTSV for all tuples.
+ *
+ * This is required for correctness to deal with cases where running HTSV
+ * twice could result in different results (e.g. RECENTLY_DEAD can turn to
+ * DEAD if another checked item causes GlobalVisTestIsRemovableFullXid()
+ * to update the horizon, INSERT_IN_PROGRESS can change to DEAD if the
+ * inserting transaction aborts, ...). That in turn could cause
+ * heap_prune_chain() to behave incorrectly if a tuple is reached twice,
+ * once directly via a heap_prune_chain() and once following a HOT chain.
+ *
+ * It's also good for performance. Most commonly tuples within a page are
+ * stored at decreasing offsets (while the items are stored at increasing
+ * offsets). When processing all tuples on a page this leads to reading
+ * memory at decreasing offsets within a page, with a variable stride.
+ * That's hard for CPU prefetchers to deal with. Processing the items in
+ * reverse order (and thus the tuples in increasing order) increases
+ * prefetching efficiency significantly / decreases the number of cache
+ * misses.
+ */
+ for (offnum = maxoff;
+ offnum >= FirstOffsetNumber;
+ offnum = OffsetNumberPrev(offnum))
+ {
+ ItemId itemid = PageGetItemId(page, offnum);
+ HeapTupleHeader htup;
+
+ /* Nothing to do if slot doesn't contain a tuple */
+ if (!ItemIdIsNormal(itemid))
+ {
+ prstate.htsv[offnum] = -1;
+ continue;
+ }
+
+ htup = (HeapTupleHeader) PageGetItem(page, itemid);
+ tup.t_data = htup;
+ tup.t_len = ItemIdGetLength(itemid);
+ ItemPointerSet(&(tup.t_self), BufferGetBlockNumber(buffer), offnum);
+
+ /*
+ * Set the offset number so that we can display it along with any
+ * error that occurred while processing this tuple.
+ */
+ if (off_loc)
+ *off_loc = offnum;
+
+ prstate.htsv[offnum] = heap_prune_satisfies_vacuum(&prstate, &tup,
+ buffer);
+ }
+
+ /* Scan the page */
+ for (offnum = FirstOffsetNumber;
+ offnum <= maxoff;
+ offnum = OffsetNumberNext(offnum))
+ {
+ ItemId itemid;
+
+ /* Ignore items already processed as part of an earlier chain */
+ if (prstate.marked[offnum])
+ continue;
+
+ /* see preceding loop */
+ if (off_loc)
+ *off_loc = offnum;
+
+ /* Nothing to do if slot is empty or already dead */
+ itemid = PageGetItemId(page, offnum);
+ if (!ItemIdIsUsed(itemid) || ItemIdIsDead(itemid))
+ continue;
+
+ /* Process this item or chain of items */
+ ndeleted += heap_prune_chain(buffer, offnum, &prstate);
+ }
+
+ /* Clear the offset information once we have processed the given page. */
+ if (off_loc)
+ *off_loc = InvalidOffsetNumber;
+
+ /* Any error while applying the changes is critical */
+ START_CRIT_SECTION();
+
+ /* Have we found any prunable items? */
+ if (prstate.nredirected > 0 || prstate.ndead > 0 || prstate.nunused > 0)
+ {
+ /*
+ * Apply the planned item changes, then repair page fragmentation, and
+ * update the page's hint bit about whether it has free line pointers.
+ */
+ heap_page_prune_execute(buffer,
+ prstate.redirected, prstate.nredirected,
+ prstate.nowdead, prstate.ndead,
+ prstate.nowunused, prstate.nunused);
+
+ /*
+ * Update the page's pd_prune_xid field to either zero, or the lowest
+ * XID of any soon-prunable tuple.
+ */
+ ((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
+
+ /*
+ * Also clear the "page is full" flag, since there's no point in
+ * repeating the prune/defrag process until something else happens to
+ * the page.
+ */
+ PageClearFull(page);
+
+ MarkBufferDirty(buffer);
+
+ /*
+ * Emit a WAL XLOG_HEAP2_PRUNE record showing what we did
+ */
+ if (RelationNeedsWAL(relation))
+ {
+ xl_heap_prune xlrec;
+ XLogRecPtr recptr;
+
+ xlrec.latestRemovedXid = prstate.latestRemovedXid;
+ xlrec.nredirected = prstate.nredirected;
+ xlrec.ndead = prstate.ndead;
+
+ XLogBeginInsert();
+ XLogRegisterData((char *) &xlrec, SizeOfHeapPrune);
+
+ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
+
+ /*
+ * The OffsetNumber arrays are not actually in the buffer, but we
+ * pretend that they are. When XLogInsert stores the whole
+ * buffer, the offset arrays need not be stored too.
+ */
+ if (prstate.nredirected > 0)
+ XLogRegisterBufData(0, (char *) prstate.redirected,
+ prstate.nredirected *
+ sizeof(OffsetNumber) * 2);
+
+ if (prstate.ndead > 0)
+ XLogRegisterBufData(0, (char *) prstate.nowdead,
+ prstate.ndead * sizeof(OffsetNumber));
+
+ if (prstate.nunused > 0)
+ XLogRegisterBufData(0, (char *) prstate.nowunused,
+ prstate.nunused * sizeof(OffsetNumber));
+
+ recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_PRUNE);
+
+ PageSetLSN(BufferGetPage(buffer), recptr);
+ }
+ }
+ else
+ {
+ /*
+ * If we didn't prune anything, but have found a new value for the
+ * pd_prune_xid field, update it and mark the buffer dirty. This is
+ * treated as a non-WAL-logged hint.
+ *
+ * Also clear the "page is full" flag if it is set, since there's no
+ * point in repeating the prune/defrag process until something else
+ * happens to the page.
+ */
+ if (((PageHeader) page)->pd_prune_xid != prstate.new_prune_xid ||
+ PageIsFull(page))
+ {
+ ((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
+ PageClearFull(page);
+ MarkBufferDirtyHint(buffer, true);
+ }
+ }
+
+ END_CRIT_SECTION();
+
+ /*
+ * If requested, report the number of tuples reclaimed to pgstats. This is
+ * ndeleted minus ndead, because we don't want to count a now-DEAD root
+ * item as a deletion for this purpose.
+ */
+ if (report_stats && ndeleted > prstate.ndead)
+ pgstat_update_heap_dead_tuples(relation, ndeleted - prstate.ndead);
+
+ /*
+ * XXX Should we update the FSM information of this page ?
+ *
+ * There are two schools of thought here. We may not want to update FSM
+ * information so that the page is not used for unrelated UPDATEs/INSERTs
+ * and any free space in this page will remain available for further
+ * UPDATEs in *this* page, thus improving chances for doing HOT updates.
+ *
+ * But for a large table and where a page does not receive further UPDATEs
+ * for a long time, we might waste this space by not updating the FSM
+ * information. The relation may get extended and fragmented further.
+ *
+ * One possibility is to leave "fillfactor" worth of space in this page
+ * and update FSM with the remaining space.
+ */
+
+ return ndeleted;
+}
+
+
+/*
+ * Perform visibility checks for heap pruning.
+ *
+ * This is more complicated than just using GlobalVisTestIsRemovableXid()
+ * because of old_snapshot_threshold. We only want to increase the threshold
+ * that triggers errors for old snapshots when we actually decide to remove a
+ * row based on the limited horizon.
+ *
+ * Due to its cost we also only want to call
+ * TransactionIdLimitedForOldSnapshots() if necessary, i.e. we might not have
+ * done so in heap_hot_prune_opt() if pd_prune_xid was old enough. But we
+ * still want to be able to remove rows that are too new to be removed
+ * according to prstate->vistest, but that can be removed based on
+ * old_snapshot_threshold. So we call TransactionIdLimitedForOldSnapshots() on
+ * demand in here, if appropriate.
+ */
+static HTSV_Result
+heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
+{
+ HTSV_Result res;
+ TransactionId dead_after;
+
+ res = HeapTupleSatisfiesVacuumHorizon(tup, buffer, &dead_after);
+
+ if (res != HEAPTUPLE_RECENTLY_DEAD)
+ return res;
+
+ /*
+ * If we are already relying on the limited xmin, there is no need to
+ * delay doing so anymore.
+ */
+ if (prstate->old_snap_used)
+ {
+ Assert(TransactionIdIsValid(prstate->old_snap_xmin));
+
+ if (TransactionIdPrecedes(dead_after, prstate->old_snap_xmin))
+ res = HEAPTUPLE_DEAD;
+ return res;
+ }
+
+ /*
+ * First check if GlobalVisTestIsRemovableXid() is sufficient to find the
+ * row dead. If not, and old_snapshot_threshold is enabled, try to use the
+ * lowered horizon.
+ */
+ if (GlobalVisTestIsRemovableXid(prstate->vistest, dead_after))
+ res = HEAPTUPLE_DEAD;
+ else if (OldSnapshotThresholdActive())
+ {
+ /* haven't determined limited horizon yet, requests */
+ if (!TransactionIdIsValid(prstate->old_snap_xmin))
+ {
+ TransactionId horizon =
+ GlobalVisTestNonRemovableHorizon(prstate->vistest);
+
+ TransactionIdLimitedForOldSnapshots(horizon, prstate->rel,
+ &prstate->old_snap_xmin,
+ &prstate->old_snap_ts);
+ }
+
+ if (TransactionIdIsValid(prstate->old_snap_xmin) &&
+ TransactionIdPrecedes(dead_after, prstate->old_snap_xmin))
+ {
+ /*
+ * About to remove row based on snapshot_too_old. Need to raise
+ * the threshold so problematic accesses would error.
+ */
+ Assert(!prstate->old_snap_used);
+ SetOldSnapshotThresholdTimestamp(prstate->old_snap_ts,
+ prstate->old_snap_xmin);
+ prstate->old_snap_used = true;
+ res = HEAPTUPLE_DEAD;
+ }
+ }
+
+ return res;
+}
+
+
+/*
+ * Prune specified line pointer or a HOT chain originating at line pointer.
+ *
+ * If the item is an index-referenced tuple (i.e. not a heap-only tuple),
+ * the HOT chain is pruned by removing all DEAD tuples at the start of the HOT
+ * chain. We also prune any RECENTLY_DEAD tuples preceding a DEAD tuple.
+ * This is OK because a RECENTLY_DEAD tuple preceding a DEAD tuple is really
+ * DEAD, our visibility test is just too coarse to detect it.
+ *
+ * The root line pointer is redirected to the tuple immediately after the
+ * latest DEAD tuple. If all tuples in the chain are DEAD, the root line
+ * pointer is marked LP_DEAD. (This includes the case of a DEAD simple
+ * tuple, which we treat as a chain of length 1.)
+ *
+ * We don't actually change the page here. We just add entries to the arrays in
+ * prstate showing the changes to be made. Items to be redirected are added
+ * to the redirected[] array (two entries per redirection); items to be set to
+ * LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED
+ * state are added to nowunused[].
+ *
+ * Returns the number of tuples (to be) deleted from the page.
+ */
+static int
+heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum, PruneState *prstate)
+{
+ int ndeleted = 0;
+ Page dp = (Page) BufferGetPage(buffer);
+ TransactionId priorXmax = InvalidTransactionId;
+ ItemId rootlp;
+ HeapTupleHeader htup;
+ OffsetNumber latestdead = InvalidOffsetNumber,
+ maxoff = PageGetMaxOffsetNumber(dp),
+ offnum;
+ OffsetNumber chainitems[MaxHeapTuplesPerPage];
+ int nchain = 0,
+ i;
+
+ rootlp = PageGetItemId(dp, rootoffnum);
+
+ /*
+ * If it's a heap-only tuple, then it is not the start of a HOT chain.
+ */
+ if (ItemIdIsNormal(rootlp))
+ {
+ Assert(prstate->htsv[rootoffnum] != -1);
+ htup = (HeapTupleHeader) PageGetItem(dp, rootlp);
+
+ if (HeapTupleHeaderIsHeapOnly(htup))
+ {
+ /*
+ * If the tuple is DEAD and doesn't chain to anything else, mark
+ * it unused immediately. (If it does chain, we can only remove
+ * it as part of pruning its chain.)
+ *
+ * We need this primarily to handle aborted HOT updates, that is,
+ * XMIN_INVALID heap-only tuples. Those might not be linked to by
+ * any chain, since the parent tuple might be re-updated before
+ * any pruning occurs. So we have to be able to reap them
+ * separately from chain-pruning. (Note that
+ * HeapTupleHeaderIsHotUpdated will never return true for an
+ * XMIN_INVALID tuple, so this code will work even when there were
+ * sequential updates within the aborted transaction.)
+ *
+ * Note that we might first arrive at a dead heap-only tuple
+ * either here or while following a chain below. Whichever path
+ * gets there first will mark the tuple unused.
+ */
+ if (prstate->htsv[rootoffnum] == HEAPTUPLE_DEAD &&
+ !HeapTupleHeaderIsHotUpdated(htup))
+ {
+ heap_prune_record_unused(prstate, rootoffnum);
+ HeapTupleHeaderAdvanceLatestRemovedXid(htup,
+ &prstate->latestRemovedXid);
+ ndeleted++;
+ }
+
+ /* Nothing more to do */
+ return ndeleted;
+ }
+ }
+
+ /* Start from the root tuple */
+ offnum = rootoffnum;
+
+ /* while not end of the chain */
+ for (;;)
+ {
+ ItemId lp;
+ bool tupdead,
+ recent_dead;
+
+ /* Some sanity checks */
+ if (offnum < FirstOffsetNumber || offnum > maxoff)
+ break;
+
+ /* If item is already processed, stop --- it must not be same chain */
+ if (prstate->marked[offnum])
+ break;
+
+ lp = PageGetItemId(dp, offnum);
+
+ /* Unused item obviously isn't part of the chain */
+ if (!ItemIdIsUsed(lp))
+ break;
+
+ /*
+ * If we are looking at the redirected root line pointer, jump to the
+ * first normal tuple in the chain. If we find a redirect somewhere
+ * else, stop --- it must not be same chain.
+ */
+ if (ItemIdIsRedirected(lp))
+ {
+ if (nchain > 0)
+ break; /* not at start of chain */
+ chainitems[nchain++] = offnum;
+ offnum = ItemIdGetRedirect(rootlp);
+ continue;
+ }
+
+ /*
+ * Likewise, a dead line pointer can't be part of the chain. (We
+ * already eliminated the case of dead root tuple outside this
+ * function.)
+ */
+ if (ItemIdIsDead(lp))
+ break;
+
+ Assert(ItemIdIsNormal(lp));
+ Assert(prstate->htsv[offnum] != -1);
+ htup = (HeapTupleHeader) PageGetItem(dp, lp);
+
+ /*
+ * Check the tuple XMIN against prior XMAX, if any
+ */
+ if (TransactionIdIsValid(priorXmax) &&
+ !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
+ break;
+
+ /*
+ * OK, this tuple is indeed a member of the chain.
+ */
+ chainitems[nchain++] = offnum;
+
+ /*
+ * Check tuple's visibility status.
+ */
+ tupdead = recent_dead = false;
+
+ switch ((HTSV_Result) prstate->htsv[offnum])
+ {
+ case HEAPTUPLE_DEAD:
+ tupdead = true;
+ break;
+
+ case HEAPTUPLE_RECENTLY_DEAD:
+ recent_dead = true;
+
+ /*
+ * This tuple may soon become DEAD. Update the hint field so
+ * that the page is reconsidered for pruning in future.
+ */
+ heap_prune_record_prunable(prstate,
+ HeapTupleHeaderGetUpdateXid(htup));
+ break;
+
+ case HEAPTUPLE_DELETE_IN_PROGRESS:
+
+ /*
+ * This tuple may soon become DEAD. Update the hint field so
+ * that the page is reconsidered for pruning in future.
+ */
+ heap_prune_record_prunable(prstate,
+ HeapTupleHeaderGetUpdateXid(htup));
+ break;
+
+ case HEAPTUPLE_LIVE:
+ case HEAPTUPLE_INSERT_IN_PROGRESS:
+
+ /*
+ * If we wanted to optimize for aborts, we might consider
+ * marking the page prunable when we see INSERT_IN_PROGRESS.
+ * But we don't. See related decisions about when to mark the
+ * page prunable in heapam.c.
+ */
+ break;
+
+ default:
+ elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
+ break;
+ }
+
+ /*
+ * Remember the last DEAD tuple seen. We will advance past
+ * RECENTLY_DEAD tuples just in case there's a DEAD one after them;
+ * but we can't advance past anything else. (XXX is it really worth
+ * continuing to scan beyond RECENTLY_DEAD? The case where we will
+ * find another DEAD tuple is a fairly unusual corner case.)
+ */
+ if (tupdead)
+ {
+ latestdead = offnum;
+ HeapTupleHeaderAdvanceLatestRemovedXid(htup,
+ &prstate->latestRemovedXid);
+ }
+ else if (!recent_dead)
+ break;
+
+ /*
+ * If the tuple is not HOT-updated, then we are at the end of this
+ * HOT-update chain.
+ */
+ if (!HeapTupleHeaderIsHotUpdated(htup))
+ break;
+
+ /* HOT implies it can't have moved to different partition */
+ Assert(!HeapTupleHeaderIndicatesMovedPartitions(htup));
+
+ /*
+ * Advance to next chain member.
+ */
+ Assert(ItemPointerGetBlockNumber(&htup->t_ctid) ==
+ BufferGetBlockNumber(buffer));
+ offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
+ priorXmax = HeapTupleHeaderGetUpdateXid(htup);
+ }
+
+ /*
+ * If we found a DEAD tuple in the chain, adjust the HOT chain so that all
+ * the DEAD tuples at the start of the chain are removed and the root line
+ * pointer is appropriately redirected.
+ */
+ if (OffsetNumberIsValid(latestdead))
+ {
+ /*
+ * Mark as unused each intermediate item that we are able to remove
+ * from the chain.
+ *
+ * When the previous item is the last dead tuple seen, we are at the
+ * right candidate for redirection.
+ */
+ for (i = 1; (i < nchain) && (chainitems[i - 1] != latestdead); i++)
+ {
+ heap_prune_record_unused(prstate, chainitems[i]);
+ ndeleted++;
+ }
+
+ /*
+ * If the root entry had been a normal tuple, we are deleting it, so
+ * count it in the result. But changing a redirect (even to DEAD
+ * state) doesn't count.
+ */
+ if (ItemIdIsNormal(rootlp))
+ ndeleted++;
+
+ /*
+ * If the DEAD tuple is at the end of the chain, the entire chain is
+ * dead and the root line pointer can be marked dead. Otherwise just
+ * redirect the root to the correct chain member.
+ */
+ if (i >= nchain)
+ heap_prune_record_dead(prstate, rootoffnum);
+ else
+ heap_prune_record_redirect(prstate, rootoffnum, chainitems[i]);
+ }
+ else if (nchain < 2 && ItemIdIsRedirected(rootlp))
+ {
+ /*
+ * We found a redirect item that doesn't point to a valid follow-on
+ * item. This can happen if the loop in heap_page_prune caused us to
+ * visit the dead successor of a redirect item before visiting the
+ * redirect item. We can clean up by setting the redirect item to
+ * DEAD state.
+ */
+ heap_prune_record_dead(prstate, rootoffnum);
+ }
+
+ return ndeleted;
+}
+
+/* Record lowest soon-prunable XID */
+static void
+heap_prune_record_prunable(PruneState *prstate, TransactionId xid)
+{
+ /*
+ * This should exactly match the PageSetPrunable macro. We can't store
+ * directly into the page header yet, so we update working state.
+ */
+ Assert(TransactionIdIsNormal(xid));
+ if (!TransactionIdIsValid(prstate->new_prune_xid) ||
+ TransactionIdPrecedes(xid, prstate->new_prune_xid))
+ prstate->new_prune_xid = xid;
+}
+
+/* Record line pointer to be redirected */
+static void
+heap_prune_record_redirect(PruneState *prstate,
+ OffsetNumber offnum, OffsetNumber rdoffnum)
+{
+ Assert(prstate->nredirected < MaxHeapTuplesPerPage);
+ prstate->redirected[prstate->nredirected * 2] = offnum;
+ prstate->redirected[prstate->nredirected * 2 + 1] = rdoffnum;
+ prstate->nredirected++;
+ Assert(!prstate->marked[offnum]);
+ prstate->marked[offnum] = true;
+ Assert(!prstate->marked[rdoffnum]);
+ prstate->marked[rdoffnum] = true;
+}
+
+/* Record line pointer to be marked dead */
+static void
+heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum)
+{
+ Assert(prstate->ndead < MaxHeapTuplesPerPage);
+ prstate->nowdead[prstate->ndead] = offnum;
+ prstate->ndead++;
+ Assert(!prstate->marked[offnum]);
+ prstate->marked[offnum] = true;
+}
+
+/* Record line pointer to be marked unused */
+static void
+heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
+{
+ Assert(prstate->nunused < MaxHeapTuplesPerPage);
+ prstate->nowunused[prstate->nunused] = offnum;
+ prstate->nunused++;
+ Assert(!prstate->marked[offnum]);
+ prstate->marked[offnum] = true;
+}
+
+
+/*
+ * Perform the actual page changes needed by heap_page_prune.
+ * It is expected that the caller has a super-exclusive lock on the
+ * buffer.
+ */
+void
+heap_page_prune_execute(Buffer buffer,
+ OffsetNumber *redirected, int nredirected,
+ OffsetNumber *nowdead, int ndead,
+ OffsetNumber *nowunused, int nunused)
+{
+ Page page = (Page) BufferGetPage(buffer);
+ OffsetNumber *offnum;
+ int i;
+
+ /* Shouldn't be called unless there's something to do */
+ Assert(nredirected > 0 || ndead > 0 || nunused > 0);
+
+ /* Update all redirected line pointers */
+ offnum = redirected;
+ for (i = 0; i < nredirected; i++)
+ {
+ OffsetNumber fromoff = *offnum++;
+ OffsetNumber tooff = *offnum++;
+ ItemId fromlp = PageGetItemId(page, fromoff);
+
+ ItemIdSetRedirect(fromlp, tooff);
+ }
+
+ /* Update all now-dead line pointers */
+ offnum = nowdead;
+ for (i = 0; i < ndead; i++)
+ {
+ OffsetNumber off = *offnum++;
+ ItemId lp = PageGetItemId(page, off);
+
+ ItemIdSetDead(lp);
+ }
+
+ /* Update all now-unused line pointers */
+ offnum = nowunused;
+ for (i = 0; i < nunused; i++)
+ {
+ OffsetNumber off = *offnum++;
+ ItemId lp = PageGetItemId(page, off);
+
+ ItemIdSetUnused(lp);
+ }
+
+ /*
+ * Finally, repair any fragmentation, and update the page's hint bit about
+ * whether it has free pointers.
+ */
+ PageRepairFragmentation(page);
+}
+
+
+/*
+ * For all items in this page, find their respective root line pointers.
+ * If item k is part of a HOT-chain with root at item j, then we set
+ * root_offsets[k - 1] = j.
+ *
+ * The passed-in root_offsets array must have MaxHeapTuplesPerPage entries.
+ * Unused entries are filled with InvalidOffsetNumber (zero).
+ *
+ * The function must be called with at least share lock on the buffer, to
+ * prevent concurrent prune operations.
+ *
+ * Note: The information collected here is valid only as long as the caller
+ * holds a pin on the buffer. Once pin is released, a tuple might be pruned
+ * and reused by a completely unrelated tuple.
+ */
+void
+heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
+{
+ OffsetNumber offnum,
+ maxoff;
+
+ MemSet(root_offsets, InvalidOffsetNumber,
+ MaxHeapTuplesPerPage * sizeof(OffsetNumber));
+
+ maxoff = PageGetMaxOffsetNumber(page);
+ for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
+ {
+ ItemId lp = PageGetItemId(page, offnum);
+ HeapTupleHeader htup;
+ OffsetNumber nextoffnum;
+ TransactionId priorXmax;
+
+ /* skip unused and dead items */
+ if (!ItemIdIsUsed(lp) || ItemIdIsDead(lp))
+ continue;
+
+ if (ItemIdIsNormal(lp))
+ {
+ htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+ /*
+ * Check if this tuple is part of a HOT-chain rooted at some other
+ * tuple. If so, skip it for now; we'll process it when we find
+ * its root.
+ */
+ if (HeapTupleHeaderIsHeapOnly(htup))
+ continue;
+
+ /*
+ * This is either a plain tuple or the root of a HOT-chain.
+ * Remember it in the mapping.
+ */
+ root_offsets[offnum - 1] = offnum;
+
+ /* If it's not the start of a HOT-chain, we're done with it */
+ if (!HeapTupleHeaderIsHotUpdated(htup))
+ continue;
+
+ /* Set up to scan the HOT-chain */
+ nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
+ priorXmax = HeapTupleHeaderGetUpdateXid(htup);
+ }
+ else
+ {
+ /* Must be a redirect item. We do not set its root_offsets entry */
+ Assert(ItemIdIsRedirected(lp));
+ /* Set up to scan the HOT-chain */
+ nextoffnum = ItemIdGetRedirect(lp);
+ priorXmax = InvalidTransactionId;
+ }
+
+ /*
+ * Now follow the HOT-chain and collect other tuples in the chain.
+ *
+ * Note: Even though this is a nested loop, the complexity of the
+ * function is O(N) because a tuple in the page should be visited not
+ * more than twice, once in the outer loop and once in HOT-chain
+ * chases.
+ */
+ for (;;)
+ {
+ /* Sanity check */
+ if (nextoffnum < FirstOffsetNumber || nextoffnum > maxoff)
+ break;
+
+ lp = PageGetItemId(page, nextoffnum);
+
+ /* Check for broken chains */
+ if (!ItemIdIsNormal(lp))
+ break;
+
+ htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+ if (TransactionIdIsValid(priorXmax) &&
+ !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(htup)))
+ break;
+
+ /* Remember the root line pointer for this item */
+ root_offsets[nextoffnum - 1] = offnum;
+
+ /* Advance to next chain member, if any */
+ if (!HeapTupleHeaderIsHotUpdated(htup))
+ break;
+
+ /* HOT implies it can't have moved to different partition */
+ Assert(!HeapTupleHeaderIndicatesMovedPartitions(htup));
+
+ nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
+ priorXmax = HeapTupleHeaderGetUpdateXid(htup);
+ }
+ }
+}
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
new file mode 100644
index 0000000..15bef9f
--- /dev/null
+++ b/src/backend/access/heap/rewriteheap.c
@@ -0,0 +1,1295 @@
+/*-------------------------------------------------------------------------
+ *
+ * rewriteheap.c
+ * Support functions to rewrite tables.
+ *
+ * These functions provide a facility to completely rewrite a heap, while
+ * preserving visibility information and update chains.
+ *
+ * INTERFACE
+ *
+ * The caller is responsible for creating the new heap, all catalog
+ * changes, supplying the tuples to be written to the new heap, and
+ * rebuilding indexes. The caller must hold AccessExclusiveLock on the
+ * target table, because we assume no one else is writing into it.
+ *
+ * To use the facility:
+ *
+ * begin_heap_rewrite
+ * while (fetch next tuple)
+ * {
+ * if (tuple is dead)
+ * rewrite_heap_dead_tuple
+ * else
+ * {
+ * // do any transformations here if required
+ * rewrite_heap_tuple
+ * }
+ * }
+ * end_heap_rewrite
+ *
+ * The contents of the new relation shouldn't be relied on until after
+ * end_heap_rewrite is called.
+ *
+ *
+ * IMPLEMENTATION
+ *
+ * This would be a fairly trivial affair, except that we need to maintain
+ * the ctid chains that link versions of an updated tuple together.
+ * Since the newly stored tuples will have tids different from the original
+ * ones, if we just copied t_ctid fields to the new table the links would
+ * be wrong. When we are required to copy a (presumably recently-dead or
+ * delete-in-progress) tuple whose ctid doesn't point to itself, we have
+ * to substitute the correct ctid instead.
+ *
+ * For each ctid reference from A -> B, we might encounter either A first
+ * or B first. (Note that a tuple in the middle of a chain is both A and B
+ * of different pairs.)
+ *
+ * If we encounter A first, we'll store the tuple in the unresolved_tups
+ * hash table. When we later encounter B, we remove A from the hash table,
+ * fix the ctid to point to the new location of B, and insert both A and B
+ * to the new heap.
+ *
+ * If we encounter B first, we can insert B to the new heap right away.
+ * We then add an entry to the old_new_tid_map hash table showing B's
+ * original tid (in the old heap) and new tid (in the new heap).
+ * When we later encounter A, we get the new location of B from the table,
+ * and can write A immediately with the correct ctid.
+ *
+ * Entries in the hash tables can be removed as soon as the later tuple
+ * is encountered. That helps to keep the memory usage down. At the end,
+ * both tables are usually empty; we should have encountered both A and B
+ * of each pair. However, it's possible for A to be RECENTLY_DEAD and B
+ * entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
+ * for deadness using OldestXmin is not exact. In such a case we might
+ * encounter B first, and skip it, and find A later. Then A would be added
+ * to unresolved_tups, and stay there until end of the rewrite. Since
+ * this case is very unusual, we don't worry about the memory usage.
+ *
+ * Using in-memory hash tables means that we use some memory for each live
+ * update chain in the table, from the time we find one end of the
+ * reference until we find the other end. That shouldn't be a problem in
+ * practice, but if you do something like an UPDATE without a where-clause
+ * on a large table, and then run CLUSTER in the same transaction, you
+ * could run out of memory. It doesn't seem worthwhile to add support for
+ * spill-to-disk, as there shouldn't be that many RECENTLY_DEAD tuples in a
+ * table under normal circumstances. Furthermore, in the typical scenario
+ * of CLUSTERing on an unchanging key column, we'll see all the versions
+ * of a given tuple together anyway, and so the peak memory usage is only
+ * proportional to the number of RECENTLY_DEAD versions of a single row, not
+ * in the whole table. Note that if we do fail halfway through a CLUSTER,
+ * the old table is still valid, so failure is not catastrophic.
+ *
+ * We can't use the normal heap_insert function to insert into the new
+ * heap, because heap_insert overwrites the visibility information.
+ * We use a special-purpose raw_heap_insert function instead, which
+ * is optimized for bulk inserting a lot of tuples, knowing that we have
+ * exclusive access to the heap. raw_heap_insert builds new pages in
+ * local storage. When a page is full, or at the end of the process,
+ * we insert it to WAL as a single record and then write it to disk
+ * directly through smgr. Note, however, that any data sent to the new
+ * heap's TOAST table will go through the normal bufmgr.
+ *
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994-5, Regents of the University of California
+ *
+ * IDENTIFICATION
+ * src/backend/access/heap/rewriteheap.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "access/heapam.h"
+#include "access/heapam_xlog.h"
+#include "access/heaptoast.h"
+#include "access/rewriteheap.h"
+#include "access/transam.h"
+#include "access/xact.h"
+#include "access/xloginsert.h"
+#include "catalog/catalog.h"
+#include "lib/ilist.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "replication/logical.h"
+#include "replication/slot.h"
+#include "storage/bufmgr.h"
+#include "storage/fd.h"
+#include "storage/procarray.h"
+#include "storage/smgr.h"
+#include "utils/memutils.h"
+#include "utils/rel.h"
+
+/*
+ * State associated with a rewrite operation. This is opaque to the user
+ * of the rewrite facility.
+ */
+typedef struct RewriteStateData
+{
+ Relation rs_old_rel; /* source heap */
+ Relation rs_new_rel; /* destination heap */
+ Page rs_buffer; /* page currently being built */
+ BlockNumber rs_blockno; /* block where page will go */
+ bool rs_buffer_valid; /* T if any tuples in buffer */
+ bool rs_logical_rewrite; /* do we need to do logical rewriting */
+ TransactionId rs_oldest_xmin; /* oldest xmin used by caller to determine
+ * tuple visibility */
+ TransactionId rs_freeze_xid; /* Xid that will be used as freeze cutoff
+ * point */
+ TransactionId rs_logical_xmin; /* Xid that will be used as cutoff point
+ * for logical rewrites */
+ MultiXactId rs_cutoff_multi; /* MultiXactId that will be used as cutoff
+ * point for multixacts */
+ MemoryContext rs_cxt; /* for hash tables and entries and tuples in
+ * them */
+ XLogRecPtr rs_begin_lsn; /* XLogInsertLsn when starting the rewrite */
+ HTAB *rs_unresolved_tups; /* unmatched A tuples */
+ HTAB *rs_old_new_tid_map; /* unmatched B tuples */
+ HTAB *rs_logical_mappings; /* logical remapping files */
+ uint32 rs_num_rewrite_mappings; /* # in memory mappings */
+} RewriteStateData;
+
+/*
+ * The lookup keys for the hash tables are tuple TID and xmin (we must check
+ * both to avoid false matches from dead tuples). Beware that there is
+ * probably some padding space in this struct; it must be zeroed out for
+ * correct hashtable operation.
+ */
+typedef struct
+{
+ TransactionId xmin; /* tuple xmin */
+ ItemPointerData tid; /* tuple location in old heap */
+} TidHashKey;
+
+/*
+ * Entry structures for the hash tables
+ */
+typedef struct
+{
+ TidHashKey key; /* expected xmin/old location of B tuple */
+ ItemPointerData old_tid; /* A's location in the old heap */
+ HeapTuple tuple; /* A's tuple contents */
+} UnresolvedTupData;
+
+typedef UnresolvedTupData *UnresolvedTup;
+
+typedef struct
+{
+ TidHashKey key; /* actual xmin/old location of B tuple */
+ ItemPointerData new_tid; /* where we put it in the new heap */
+} OldToNewMappingData;
+
+typedef OldToNewMappingData *OldToNewMapping;
+
+/*
+ * In-Memory data for an xid that might need logical remapping entries
+ * to be logged.
+ */
+typedef struct RewriteMappingFile
+{
+ TransactionId xid; /* xid that might need to see the row */
+ int vfd; /* fd of mappings file */
+ off_t off; /* how far have we written yet */
+ uint32 num_mappings; /* number of in-memory mappings */
+ dlist_head mappings; /* list of in-memory mappings */
+ char path[MAXPGPATH]; /* path, for error messages */
+} RewriteMappingFile;
+
+/*
+ * A single In-Memory logical rewrite mapping, hanging off
+ * RewriteMappingFile->mappings.
+ */
+typedef struct RewriteMappingDataEntry
+{
+ LogicalRewriteMappingData map; /* map between old and new location of the
+ * tuple */
+ dlist_node node;
+} RewriteMappingDataEntry;
+
+
+/* prototypes for internal functions */
+static void raw_heap_insert(RewriteState state, HeapTuple tup);
+
+/* internal logical remapping prototypes */
+static void logical_begin_heap_rewrite(RewriteState state);
+static void logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid, HeapTuple new_tuple);
+static void logical_end_heap_rewrite(RewriteState state);
+
+
+/*
+ * Begin a rewrite of a table
+ *
+ * old_heap old, locked heap relation tuples will be read from
+ * new_heap new, locked heap relation to insert tuples to
+ * oldest_xmin xid used by the caller to determine which tuples are dead
+ * freeze_xid xid before which tuples will be frozen
+ * cutoff_multi multixact before which multis will be removed
+ *
+ * Returns an opaque RewriteState, allocated in current memory context,
+ * to be used in subsequent calls to the other functions.
+ */
+RewriteState
+begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin,
+ TransactionId freeze_xid, MultiXactId cutoff_multi)
+{
+ RewriteState state;
+ MemoryContext rw_cxt;
+ MemoryContext old_cxt;
+ HASHCTL hash_ctl;
+
+ /*
+ * To ease cleanup, make a separate context that will contain the
+ * RewriteState struct itself plus all subsidiary data.
+ */
+ rw_cxt = AllocSetContextCreate(CurrentMemoryContext,
+ "Table rewrite",
+ ALLOCSET_DEFAULT_SIZES);
+ old_cxt = MemoryContextSwitchTo(rw_cxt);
+
+ /* Create and fill in the state struct */
+ state = palloc0(sizeof(RewriteStateData));
+
+ state->rs_old_rel = old_heap;
+ state->rs_new_rel = new_heap;
+ state->rs_buffer = (Page) palloc(BLCKSZ);
+ /* new_heap needn't be empty, just locked */
+ state->rs_blockno = RelationGetNumberOfBlocks(new_heap);
+ state->rs_buffer_valid = false;
+ state->rs_oldest_xmin = oldest_xmin;
+ state->rs_freeze_xid = freeze_xid;
+ state->rs_cutoff_multi = cutoff_multi;
+ state->rs_cxt = rw_cxt;
+
+ /* Initialize hash tables used to track update chains */
+ hash_ctl.keysize = sizeof(TidHashKey);
+ hash_ctl.entrysize = sizeof(UnresolvedTupData);
+ hash_ctl.hcxt = state->rs_cxt;
+
+ state->rs_unresolved_tups =
+ hash_create("Rewrite / Unresolved ctids",
+ 128, /* arbitrary initial size */
+ &hash_ctl,
+ HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+
+ hash_ctl.entrysize = sizeof(OldToNewMappingData);
+
+ state->rs_old_new_tid_map =
+ hash_create("Rewrite / Old to new tid map",
+ 128, /* arbitrary initial size */
+ &hash_ctl,
+ HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+
+ MemoryContextSwitchTo(old_cxt);
+
+ logical_begin_heap_rewrite(state);
+
+ return state;
+}
+
+/*
+ * End a rewrite.
+ *
+ * state and any other resources are freed.
+ */
+void
+end_heap_rewrite(RewriteState state)
+{
+ HASH_SEQ_STATUS seq_status;
+ UnresolvedTup unresolved;
+
+ /*
+ * Write any remaining tuples in the UnresolvedTups table. If we have any
+ * left, they should in fact be dead, but let's err on the safe side.
+ */
+ hash_seq_init(&seq_status, state->rs_unresolved_tups);
+
+ while ((unresolved = hash_seq_search(&seq_status)) != NULL)
+ {
+ ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid);
+ raw_heap_insert(state, unresolved->tuple);
+ }
+
+ /* Write the last page, if any */
+ if (state->rs_buffer_valid)
+ {
+ if (RelationNeedsWAL(state->rs_new_rel))
+ log_newpage(&state->rs_new_rel->rd_node,
+ MAIN_FORKNUM,
+ state->rs_blockno,
+ state->rs_buffer,
+ true);
+
+ PageSetChecksumInplace(state->rs_buffer, state->rs_blockno);
+
+ RelationOpenSmgr(state->rs_new_rel);
+ smgrextend(state->rs_new_rel->rd_smgr, MAIN_FORKNUM, state->rs_blockno,
+ (char *) state->rs_buffer, true);
+ }
+
+ /*
+ * When we WAL-logged rel pages, we must nonetheless fsync them. The
+ * reason is the same as in storage.c's RelationCopyStorage(): we're
+ * writing data that's not in shared buffers, and so a CHECKPOINT
+ * occurring during the rewriteheap operation won't have fsync'd data we
+ * wrote before the checkpoint.
+ */
+ if (RelationNeedsWAL(state->rs_new_rel))
+ {
+ /* for an empty table, this could be first smgr access */
+ RelationOpenSmgr(state->rs_new_rel);
+ smgrimmedsync(state->rs_new_rel->rd_smgr, MAIN_FORKNUM);
+ }
+
+ logical_end_heap_rewrite(state);
+
+ /* Deleting the context frees everything */
+ MemoryContextDelete(state->rs_cxt);
+}
+
+/*
+ * Add a tuple to the new heap.
+ *
+ * Visibility information is copied from the original tuple, except that
+ * we "freeze" very-old tuples. Note that since we scribble on new_tuple,
+ * it had better be temp storage not a pointer to the original tuple.
+ *
+ * state opaque state as returned by begin_heap_rewrite
+ * old_tuple original tuple in the old heap
+ * new_tuple new, rewritten tuple to be inserted to new heap
+ */
+void
+rewrite_heap_tuple(RewriteState state,
+ HeapTuple old_tuple, HeapTuple new_tuple)
+{
+ MemoryContext old_cxt;
+ ItemPointerData old_tid;
+ TidHashKey hashkey;
+ bool found;
+ bool free_new;
+
+ old_cxt = MemoryContextSwitchTo(state->rs_cxt);
+
+ /*
+ * Copy the original tuple's visibility information into new_tuple.
+ *
+ * XXX we might later need to copy some t_infomask2 bits, too? Right now,
+ * we intentionally clear the HOT status bits.
+ */
+ memcpy(&new_tuple->t_data->t_choice.t_heap,
+ &old_tuple->t_data->t_choice.t_heap,
+ sizeof(HeapTupleFields));
+
+ new_tuple->t_data->t_infomask &= ~HEAP_XACT_MASK;
+ new_tuple->t_data->t_infomask2 &= ~HEAP2_XACT_MASK;
+ new_tuple->t_data->t_infomask |=
+ old_tuple->t_data->t_infomask & HEAP_XACT_MASK;
+
+ /*
+ * While we have our hands on the tuple, we may as well freeze any
+ * eligible xmin or xmax, so that future VACUUM effort can be saved.
+ */
+ heap_freeze_tuple(new_tuple->t_data,
+ state->rs_old_rel->rd_rel->relfrozenxid,
+ state->rs_old_rel->rd_rel->relminmxid,
+ state->rs_freeze_xid,
+ state->rs_cutoff_multi);
+
+ /*
+ * Invalid ctid means that ctid should point to the tuple itself. We'll
+ * override it later if the tuple is part of an update chain.
+ */
+ ItemPointerSetInvalid(&new_tuple->t_data->t_ctid);
+
+ /*
+ * If the tuple has been updated, check the old-to-new mapping hash table.
+ */
+ if (!((old_tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
+ HeapTupleHeaderIsOnlyLocked(old_tuple->t_data)) &&
+ !HeapTupleHeaderIndicatesMovedPartitions(old_tuple->t_data) &&
+ !(ItemPointerEquals(&(old_tuple->t_self),
+ &(old_tuple->t_data->t_ctid))))
+ {
+ OldToNewMapping mapping;
+
+ memset(&hashkey, 0, sizeof(hashkey));
+ hashkey.xmin = HeapTupleHeaderGetUpdateXid(old_tuple->t_data);
+ hashkey.tid = old_tuple->t_data->t_ctid;
+
+ mapping = (OldToNewMapping)
+ hash_search(state->rs_old_new_tid_map, &hashkey,
+ HASH_FIND, NULL);
+
+ if (mapping != NULL)
+ {
+ /*
+ * We've already copied the tuple that t_ctid points to, so we can
+ * set the ctid of this tuple to point to the new location, and
+ * insert it right away.
+ */
+ new_tuple->t_data->t_ctid = mapping->new_tid;
+
+ /* We don't need the mapping entry anymore */
+ hash_search(state->rs_old_new_tid_map, &hashkey,
+ HASH_REMOVE, &found);
+ Assert(found);
+ }
+ else
+ {
+ /*
+ * We haven't seen the tuple t_ctid points to yet. Stash this
+ * tuple into unresolved_tups to be written later.
+ */
+ UnresolvedTup unresolved;
+
+ unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
+ HASH_ENTER, &found);
+ Assert(!found);
+
+ unresolved->old_tid = old_tuple->t_self;
+ unresolved->tuple = heap_copytuple(new_tuple);
+
+ /*
+ * We can't do anything more now, since we don't know where the
+ * tuple will be written.
+ */
+ MemoryContextSwitchTo(old_cxt);
+ return;
+ }
+ }
+
+ /*
+ * Now we will write the tuple, and then check to see if it is the B tuple
+ * in any new or known pair. When we resolve a known pair, we will be
+ * able to write that pair's A tuple, and then we have to check if it
+ * resolves some other pair. Hence, we need a loop here.
+ */
+ old_tid = old_tuple->t_self;
+ free_new = false;
+
+ for (;;)
+ {
+ ItemPointerData new_tid;
+
+ /* Insert the tuple and find out where it's put in new_heap */
+ raw_heap_insert(state, new_tuple);
+ new_tid = new_tuple->t_self;
+
+ logical_rewrite_heap_tuple(state, old_tid, new_tuple);
+
+ /*
+ * If the tuple is the updated version of a row, and the prior version
+ * wouldn't be DEAD yet, then we need to either resolve the prior
+ * version (if it's waiting in rs_unresolved_tups), or make an entry
+ * in rs_old_new_tid_map (so we can resolve it when we do see it). The
+ * previous tuple's xmax would equal this one's xmin, so it's
+ * RECENTLY_DEAD if and only if the xmin is not before OldestXmin.
+ */
+ if ((new_tuple->t_data->t_infomask & HEAP_UPDATED) &&
+ !TransactionIdPrecedes(HeapTupleHeaderGetXmin(new_tuple->t_data),
+ state->rs_oldest_xmin))
+ {
+ /*
+ * Okay, this is B in an update pair. See if we've seen A.
+ */
+ UnresolvedTup unresolved;
+
+ memset(&hashkey, 0, sizeof(hashkey));
+ hashkey.xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
+ hashkey.tid = old_tid;
+
+ unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
+ HASH_FIND, NULL);
+
+ if (unresolved != NULL)
+ {
+ /*
+ * We have seen and memorized the previous tuple already. Now
+ * that we know where we inserted the tuple its t_ctid points
+ * to, fix its t_ctid and insert it to the new heap.
+ */
+ if (free_new)
+ heap_freetuple(new_tuple);
+ new_tuple = unresolved->tuple;
+ free_new = true;
+ old_tid = unresolved->old_tid;
+ new_tuple->t_data->t_ctid = new_tid;
+
+ /*
+ * We don't need the hash entry anymore, but don't free its
+ * tuple just yet.
+ */
+ hash_search(state->rs_unresolved_tups, &hashkey,
+ HASH_REMOVE, &found);
+ Assert(found);
+
+ /* loop back to insert the previous tuple in the chain */
+ continue;
+ }
+ else
+ {
+ /*
+ * Remember the new tid of this tuple. We'll use it to set the
+ * ctid when we find the previous tuple in the chain.
+ */
+ OldToNewMapping mapping;
+
+ mapping = hash_search(state->rs_old_new_tid_map, &hashkey,
+ HASH_ENTER, &found);
+ Assert(!found);
+
+ mapping->new_tid = new_tid;
+ }
+ }
+
+ /* Done with this (chain of) tuples, for now */
+ if (free_new)
+ heap_freetuple(new_tuple);
+ break;
+ }
+
+ MemoryContextSwitchTo(old_cxt);
+}
+
+/*
+ * Register a dead tuple with an ongoing rewrite. Dead tuples are not
+ * copied to the new table, but we still make note of them so that we
+ * can release some resources earlier.
+ *
+ * Returns true if a tuple was removed from the unresolved_tups table.
+ * This indicates that that tuple, previously thought to be "recently dead",
+ * is now known really dead and won't be written to the output.
+ */
+bool
+rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
+{
+ /*
+ * If we have already seen an earlier tuple in the update chain that
+ * points to this tuple, let's forget about that earlier tuple. It's in
+ * fact dead as well, our simple xmax < OldestXmin test in
+ * HeapTupleSatisfiesVacuum just wasn't enough to detect it. It happens
+ * when xmin of a tuple is greater than xmax, which sounds
+ * counter-intuitive but is perfectly valid.
+ *
+ * We don't bother to try to detect the situation the other way round,
+ * when we encounter the dead tuple first and then the recently dead one
+ * that points to it. If that happens, we'll have some unmatched entries
+ * in the UnresolvedTups hash table at the end. That can happen anyway,
+ * because a vacuum might have removed the dead tuple in the chain before
+ * us.
+ */
+ UnresolvedTup unresolved;
+ TidHashKey hashkey;
+ bool found;
+
+ memset(&hashkey, 0, sizeof(hashkey));
+ hashkey.xmin = HeapTupleHeaderGetXmin(old_tuple->t_data);
+ hashkey.tid = old_tuple->t_self;
+
+ unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
+ HASH_FIND, NULL);
+
+ if (unresolved != NULL)
+ {
+ /* Need to free the contained tuple as well as the hashtable entry */
+ heap_freetuple(unresolved->tuple);
+ hash_search(state->rs_unresolved_tups, &hashkey,
+ HASH_REMOVE, &found);
+ Assert(found);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Insert a tuple to the new relation. This has to track heap_insert
+ * and its subsidiary functions!
+ *
+ * t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
+ * tuple is invalid on entry, it's replaced with the new TID as well (in
+ * the inserted data only, not in the caller's copy).
+ */
+static void
+raw_heap_insert(RewriteState state, HeapTuple tup)
+{
+ Page page = state->rs_buffer;
+ Size pageFreeSpace,
+ saveFreeSpace;
+ Size len;
+ OffsetNumber newoff;
+ HeapTuple heaptup;
+
+ /*
+ * If the new tuple is too big for storage or contains already toasted
+ * out-of-line attributes from some other relation, invoke the toaster.
+ *
+ * Note: below this point, heaptup is the data we actually intend to store
+ * into the relation; tup is the caller's original untoasted data.
+ */
+ if (state->rs_new_rel->rd_rel->relkind == RELKIND_TOASTVALUE)
+ {
+ /* toast table entries should never be recursively toasted */
+ Assert(!HeapTupleHasExternal(tup));
+ heaptup = tup;
+ }
+ else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
+ {
+ int options = HEAP_INSERT_SKIP_FSM;
+
+ /*
+ * While rewriting the heap for VACUUM FULL / CLUSTER, make sure data
+ * for the TOAST table are not logically decoded. The main heap is
+ * WAL-logged as XLOG FPI records, which are not logically decoded.
+ */
+ options |= HEAP_INSERT_NO_LOGICAL;
+
+ heaptup = heap_toast_insert_or_update(state->rs_new_rel, tup, NULL,
+ options);
+ }
+ else
+ heaptup = tup;
+
+ len = MAXALIGN(heaptup->t_len); /* be conservative */
+
+ /*
+ * If we're gonna fail for oversize tuple, do it right away
+ */
+ if (len > MaxHeapTupleSize)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("row is too big: size %zu, maximum size %zu",
+ len, MaxHeapTupleSize)));
+
+ /* Compute desired extra freespace due to fillfactor option */
+ saveFreeSpace = RelationGetTargetPageFreeSpace(state->rs_new_rel,
+ HEAP_DEFAULT_FILLFACTOR);
+
+ /* Now we can check to see if there's enough free space already. */
+ if (state->rs_buffer_valid)
+ {
+ pageFreeSpace = PageGetHeapFreeSpace(page);
+
+ if (len + saveFreeSpace > pageFreeSpace)
+ {
+ /*
+ * Doesn't fit, so write out the existing page. It always
+ * contains a tuple. Hence, unlike RelationGetBufferForTuple(),
+ * enforce saveFreeSpace unconditionally.
+ */
+
+ /* XLOG stuff */
+ if (RelationNeedsWAL(state->rs_new_rel))
+ log_newpage(&state->rs_new_rel->rd_node,
+ MAIN_FORKNUM,
+ state->rs_blockno,
+ page,
+ true);
+
+ /*
+ * Now write the page. We say skipFsync = true because there's no
+ * need for smgr to schedule an fsync for this write; we'll do it
+ * ourselves in end_heap_rewrite.
+ */
+ RelationOpenSmgr(state->rs_new_rel);
+
+ PageSetChecksumInplace(page, state->rs_blockno);
+
+ smgrextend(state->rs_new_rel->rd_smgr, MAIN_FORKNUM,
+ state->rs_blockno, (char *) page, true);
+
+ state->rs_blockno++;
+ state->rs_buffer_valid = false;
+ }
+ }
+
+ if (!state->rs_buffer_valid)
+ {
+ /* Initialize a new empty page */
+ PageInit(page, BLCKSZ, 0);
+ state->rs_buffer_valid = true;
+ }
+
+ /* And now we can insert the tuple into the page */
+ newoff = PageAddItem(page, (Item) heaptup->t_data, heaptup->t_len,
+ InvalidOffsetNumber, false, true);
+ if (newoff == InvalidOffsetNumber)
+ elog(ERROR, "failed to add tuple");
+
+ /* Update caller's t_self to the actual position where it was stored */
+ ItemPointerSet(&(tup->t_self), state->rs_blockno, newoff);
+
+ /*
+ * Insert the correct position into CTID of the stored tuple, too, if the
+ * caller didn't supply a valid CTID.
+ */
+ if (!ItemPointerIsValid(&tup->t_data->t_ctid))
+ {
+ ItemId newitemid;
+ HeapTupleHeader onpage_tup;
+
+ newitemid = PageGetItemId(page, newoff);
+ onpage_tup = (HeapTupleHeader) PageGetItem(page, newitemid);
+
+ onpage_tup->t_ctid = tup->t_self;
+ }
+
+ /* If heaptup is a private copy, release it. */
+ if (heaptup != tup)
+ heap_freetuple(heaptup);
+}
+
+/* ------------------------------------------------------------------------
+ * Logical rewrite support
+ *
+ * When doing logical decoding - which relies on using cmin/cmax of catalog
+ * tuples, via xl_heap_new_cid records - heap rewrites have to log enough
+ * information to allow the decoding backend to updates its internal mapping
+ * of (relfilenode,ctid) => (cmin, cmax) to be correct for the rewritten heap.
+ *
+ * For that, every time we find a tuple that's been modified in a catalog
+ * relation within the xmin horizon of any decoding slot, we log a mapping
+ * from the old to the new location.
+ *
+ * To deal with rewrites that abort the filename of a mapping file contains
+ * the xid of the transaction performing the rewrite, which then can be
+ * checked before being read in.
+ *
+ * For efficiency we don't immediately spill every single map mapping for a
+ * row to disk but only do so in batches when we've collected several of them
+ * in memory or when end_heap_rewrite() has been called.
+ *
+ * Crash-Safety: This module diverts from the usual patterns of doing WAL
+ * since it cannot rely on checkpoint flushing out all buffers and thus
+ * waiting for exclusive locks on buffers. Usually the XLogInsert() covering
+ * buffer modifications is performed while the buffer(s) that are being
+ * modified are exclusively locked guaranteeing that both the WAL record and
+ * the modified heap are on either side of the checkpoint. But since the
+ * mapping files we log aren't in shared_buffers that interlock doesn't work.
+ *
+ * Instead we simply write the mapping files out to disk, *before* the
+ * XLogInsert() is performed. That guarantees that either the XLogInsert() is
+ * inserted after the checkpoint's redo pointer or that the checkpoint (via
+ * CheckPointLogicalRewriteHeap()) has flushed the (partial) mapping file to
+ * disk. That leaves the tail end that has not yet been flushed open to
+ * corruption, which is solved by including the current offset in the
+ * xl_heap_rewrite_mapping records and truncating the mapping file to it
+ * during replay. Every time a rewrite is finished all generated mapping files
+ * are synced to disk.
+ *
+ * Note that if we were only concerned about crash safety we wouldn't have to
+ * deal with WAL logging at all - an fsync() at the end of a rewrite would be
+ * sufficient for crash safety. Any mapping that hasn't been safely flushed to
+ * disk has to be by an aborted (explicitly or via a crash) transaction and is
+ * ignored by virtue of the xid in its name being subject to a
+ * TransactionDidCommit() check. But we want to support having standbys via
+ * physical replication, both for availability and to do logical decoding
+ * there.
+ * ------------------------------------------------------------------------
+ */
+
+/*
+ * Do preparations for logging logical mappings during a rewrite if
+ * necessary. If we detect that we don't need to log anything we'll prevent
+ * any further action by the various logical rewrite functions.
+ */
+static void
+logical_begin_heap_rewrite(RewriteState state)
+{
+ HASHCTL hash_ctl;
+ TransactionId logical_xmin;
+
+ /*
+ * We only need to persist these mappings if the rewritten table can be
+ * accessed during logical decoding, if not, we can skip doing any
+ * additional work.
+ */
+ state->rs_logical_rewrite =
+ RelationIsAccessibleInLogicalDecoding(state->rs_old_rel);
+
+ if (!state->rs_logical_rewrite)
+ return;
+
+ ProcArrayGetReplicationSlotXmin(NULL, &logical_xmin);
+
+ /*
+ * If there are no logical slots in progress we don't need to do anything,
+ * there cannot be any remappings for relevant rows yet. The relation's
+ * lock protects us against races.
+ */
+ if (logical_xmin == InvalidTransactionId)
+ {
+ state->rs_logical_rewrite = false;
+ return;
+ }
+
+ state->rs_logical_xmin = logical_xmin;
+ state->rs_begin_lsn = GetXLogInsertRecPtr();
+ state->rs_num_rewrite_mappings = 0;
+
+ hash_ctl.keysize = sizeof(TransactionId);
+ hash_ctl.entrysize = sizeof(RewriteMappingFile);
+ hash_ctl.hcxt = state->rs_cxt;
+
+ state->rs_logical_mappings =
+ hash_create("Logical rewrite mapping",
+ 128, /* arbitrary initial size */
+ &hash_ctl,
+ HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+}
+
+/*
+ * Flush all logical in-memory mappings to disk, but don't fsync them yet.
+ */
+static void
+logical_heap_rewrite_flush_mappings(RewriteState state)
+{
+ HASH_SEQ_STATUS seq_status;
+ RewriteMappingFile *src;
+ dlist_mutable_iter iter;
+
+ Assert(state->rs_logical_rewrite);
+
+ /* no logical rewrite in progress, no need to iterate over mappings */
+ if (state->rs_num_rewrite_mappings == 0)
+ return;
+
+ elog(DEBUG1, "flushing %u logical rewrite mapping entries",
+ state->rs_num_rewrite_mappings);
+
+ hash_seq_init(&seq_status, state->rs_logical_mappings);
+ while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
+ {
+ char *waldata;
+ char *waldata_start;
+ xl_heap_rewrite_mapping xlrec;
+ Oid dboid;
+ uint32 len;
+ int written;
+
+ /* this file hasn't got any new mappings */
+ if (src->num_mappings == 0)
+ continue;
+
+ if (state->rs_old_rel->rd_rel->relisshared)
+ dboid = InvalidOid;
+ else
+ dboid = MyDatabaseId;
+
+ xlrec.num_mappings = src->num_mappings;
+ xlrec.mapped_rel = RelationGetRelid(state->rs_old_rel);
+ xlrec.mapped_xid = src->xid;
+ xlrec.mapped_db = dboid;
+ xlrec.offset = src->off;
+ xlrec.start_lsn = state->rs_begin_lsn;
+
+ /* write all mappings consecutively */
+ len = src->num_mappings * sizeof(LogicalRewriteMappingData);
+ waldata_start = waldata = palloc(len);
+
+ /*
+ * collect data we need to write out, but don't modify ondisk data yet
+ */
+ dlist_foreach_modify(iter, &src->mappings)
+ {
+ RewriteMappingDataEntry *pmap;
+
+ pmap = dlist_container(RewriteMappingDataEntry, node, iter.cur);
+
+ memcpy(waldata, &pmap->map, sizeof(pmap->map));
+ waldata += sizeof(pmap->map);
+
+ /* remove from the list and free */
+ dlist_delete(&pmap->node);
+ pfree(pmap);
+
+ /* update bookkeeping */
+ state->rs_num_rewrite_mappings--;
+ src->num_mappings--;
+ }
+
+ Assert(src->num_mappings == 0);
+ Assert(waldata == waldata_start + len);
+
+ /*
+ * Note that we deviate from the usual WAL coding practices here,
+ * check the above "Logical rewrite support" comment for reasoning.
+ */
+ written = FileWrite(src->vfd, waldata_start, len, src->off,
+ WAIT_EVENT_LOGICAL_REWRITE_WRITE);
+ if (written != len)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not write to file \"%s\", wrote %d of %d: %m", src->path,
+ written, len)));
+ src->off += len;
+
+ XLogBeginInsert();
+ XLogRegisterData((char *) (&xlrec), sizeof(xlrec));
+ XLogRegisterData(waldata_start, len);
+
+ /* write xlog record */
+ XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_REWRITE);
+
+ pfree(waldata_start);
+ }
+ Assert(state->rs_num_rewrite_mappings == 0);
+}
+
+/*
+ * Logical remapping part of end_heap_rewrite().
+ */
+static void
+logical_end_heap_rewrite(RewriteState state)
+{
+ HASH_SEQ_STATUS seq_status;
+ RewriteMappingFile *src;
+
+ /* done, no logical rewrite in progress */
+ if (!state->rs_logical_rewrite)
+ return;
+
+ /* writeout remaining in-memory entries */
+ if (state->rs_num_rewrite_mappings > 0)
+ logical_heap_rewrite_flush_mappings(state);
+
+ /* Iterate over all mappings we have written and fsync the files. */
+ hash_seq_init(&seq_status, state->rs_logical_mappings);
+ while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
+ {
+ if (FileSync(src->vfd, WAIT_EVENT_LOGICAL_REWRITE_SYNC) != 0)
+ ereport(data_sync_elevel(ERROR),
+ (errcode_for_file_access(),
+ errmsg("could not fsync file \"%s\": %m", src->path)));
+ FileClose(src->vfd);
+ }
+ /* memory context cleanup will deal with the rest */
+}
+
+/*
+ * Log a single (old->new) mapping for 'xid'.
+ */
+static void
+logical_rewrite_log_mapping(RewriteState state, TransactionId xid,
+ LogicalRewriteMappingData *map)
+{
+ RewriteMappingFile *src;
+ RewriteMappingDataEntry *pmap;
+ Oid relid;
+ bool found;
+
+ relid = RelationGetRelid(state->rs_old_rel);
+
+ /* look for existing mappings for this 'mapped' xid */
+ src = hash_search(state->rs_logical_mappings, &xid,
+ HASH_ENTER, &found);
+
+ /*
+ * We haven't yet had the need to map anything for this xid, create
+ * per-xid data structures.
+ */
+ if (!found)
+ {
+ char path[MAXPGPATH];
+ Oid dboid;
+
+ if (state->rs_old_rel->rd_rel->relisshared)
+ dboid = InvalidOid;
+ else
+ dboid = MyDatabaseId;
+
+ snprintf(path, MAXPGPATH,
+ "pg_logical/mappings/" LOGICAL_REWRITE_FORMAT,
+ dboid, relid,
+ LSN_FORMAT_ARGS(state->rs_begin_lsn),
+ xid, GetCurrentTransactionId());
+
+ dlist_init(&src->mappings);
+ src->num_mappings = 0;
+ src->off = 0;
+ memcpy(src->path, path, sizeof(path));
+ src->vfd = PathNameOpenFile(path,
+ O_CREAT | O_EXCL | O_WRONLY | PG_BINARY);
+ if (src->vfd < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not create file \"%s\": %m", path)));
+ }
+
+ pmap = MemoryContextAlloc(state->rs_cxt,
+ sizeof(RewriteMappingDataEntry));
+ memcpy(&pmap->map, map, sizeof(LogicalRewriteMappingData));
+ dlist_push_tail(&src->mappings, &pmap->node);
+ src->num_mappings++;
+ state->rs_num_rewrite_mappings++;
+
+ /*
+ * Write out buffer every time we've too many in-memory entries across all
+ * mapping files.
+ */
+ if (state->rs_num_rewrite_mappings >= 1000 /* arbitrary number */ )
+ logical_heap_rewrite_flush_mappings(state);
+}
+
+/*
+ * Perform logical remapping for a tuple that's mapped from old_tid to
+ * new_tuple->t_self by rewrite_heap_tuple() if necessary for the tuple.
+ */
+static void
+logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid,
+ HeapTuple new_tuple)
+{
+ ItemPointerData new_tid = new_tuple->t_self;
+ TransactionId cutoff = state->rs_logical_xmin;
+ TransactionId xmin;
+ TransactionId xmax;
+ bool do_log_xmin = false;
+ bool do_log_xmax = false;
+ LogicalRewriteMappingData map;
+
+ /* no logical rewrite in progress, we don't need to log anything */
+ if (!state->rs_logical_rewrite)
+ return;
+
+ xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
+ /* use *GetUpdateXid to correctly deal with multixacts */
+ xmax = HeapTupleHeaderGetUpdateXid(new_tuple->t_data);
+
+ /*
+ * Log the mapping iff the tuple has been created recently.
+ */
+ if (TransactionIdIsNormal(xmin) && !TransactionIdPrecedes(xmin, cutoff))
+ do_log_xmin = true;
+
+ if (!TransactionIdIsNormal(xmax))
+ {
+ /*
+ * no xmax is set, can't have any permanent ones, so this check is
+ * sufficient
+ */
+ }
+ else if (HEAP_XMAX_IS_LOCKED_ONLY(new_tuple->t_data->t_infomask))
+ {
+ /* only locked, we don't care */
+ }
+ else if (!TransactionIdPrecedes(xmax, cutoff))
+ {
+ /* tuple has been deleted recently, log */
+ do_log_xmax = true;
+ }
+
+ /* if neither needs to be logged, we're done */
+ if (!do_log_xmin && !do_log_xmax)
+ return;
+
+ /* fill out mapping information */
+ map.old_node = state->rs_old_rel->rd_node;
+ map.old_tid = old_tid;
+ map.new_node = state->rs_new_rel->rd_node;
+ map.new_tid = new_tid;
+
+ /* ---
+ * Now persist the mapping for the individual xids that are affected. We
+ * need to log for both xmin and xmax if they aren't the same transaction
+ * since the mapping files are per "affected" xid.
+ * We don't muster all that much effort detecting whether xmin and xmax
+ * are actually the same transaction, we just check whether the xid is the
+ * same disregarding subtransactions. Logging too much is relatively
+ * harmless and we could never do the check fully since subtransaction
+ * data is thrown away during restarts.
+ * ---
+ */
+ if (do_log_xmin)
+ logical_rewrite_log_mapping(state, xmin, &map);
+ /* separately log mapping for xmax unless it'd be redundant */
+ if (do_log_xmax && !TransactionIdEquals(xmin, xmax))
+ logical_rewrite_log_mapping(state, xmax, &map);
+}
+
+/*
+ * Replay XLOG_HEAP2_REWRITE records
+ */
+void
+heap_xlog_logical_rewrite(XLogReaderState *r)
+{
+ char path[MAXPGPATH];
+ int fd;
+ xl_heap_rewrite_mapping *xlrec;
+ uint32 len;
+ char *data;
+
+ xlrec = (xl_heap_rewrite_mapping *) XLogRecGetData(r);
+
+ snprintf(path, MAXPGPATH,
+ "pg_logical/mappings/" LOGICAL_REWRITE_FORMAT,
+ xlrec->mapped_db, xlrec->mapped_rel,
+ LSN_FORMAT_ARGS(xlrec->start_lsn),
+ xlrec->mapped_xid, XLogRecGetXid(r));
+
+ fd = OpenTransientFile(path,
+ O_CREAT | O_WRONLY | PG_BINARY);
+ if (fd < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not create file \"%s\": %m", path)));
+
+ /*
+ * Truncate all data that's not guaranteed to have been safely fsynced (by
+ * previous record or by the last checkpoint).
+ */
+ pgstat_report_wait_start(WAIT_EVENT_LOGICAL_REWRITE_TRUNCATE);
+ if (ftruncate(fd, xlrec->offset) != 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not truncate file \"%s\" to %u: %m",
+ path, (uint32) xlrec->offset)));
+ pgstat_report_wait_end();
+
+ data = XLogRecGetData(r) + sizeof(*xlrec);
+
+ len = xlrec->num_mappings * sizeof(LogicalRewriteMappingData);
+
+ /* write out tail end of mapping file (again) */
+ errno = 0;
+ pgstat_report_wait_start(WAIT_EVENT_LOGICAL_REWRITE_MAPPING_WRITE);
+ if (pg_pwrite(fd, data, len, xlrec->offset) != len)
+ {
+ /* if write didn't set errno, assume problem is no disk space */
+ if (errno == 0)
+ errno = ENOSPC;
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not write to file \"%s\": %m", path)));
+ }
+ pgstat_report_wait_end();
+
+ /*
+ * Now fsync all previously written data. We could improve things and only
+ * do this for the last write to a file, but the required bookkeeping
+ * doesn't seem worth the trouble.
+ */
+ pgstat_report_wait_start(WAIT_EVENT_LOGICAL_REWRITE_MAPPING_SYNC);
+ if (pg_fsync(fd) != 0)
+ ereport(data_sync_elevel(ERROR),
+ (errcode_for_file_access(),
+ errmsg("could not fsync file \"%s\": %m", path)));
+ pgstat_report_wait_end();
+
+ if (CloseTransientFile(fd) != 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not close file \"%s\": %m", path)));
+}
+
+/* ---
+ * Perform a checkpoint for logical rewrite mappings
+ *
+ * This serves two tasks:
+ * 1) Remove all mappings not needed anymore based on the logical restart LSN
+ * 2) Flush all remaining mappings to disk, so that replay after a checkpoint
+ * only has to deal with the parts of a mapping that have been written out
+ * after the checkpoint started.
+ * ---
+ */
+void
+CheckPointLogicalRewriteHeap(void)
+{
+ XLogRecPtr cutoff;
+ XLogRecPtr redo;
+ DIR *mappings_dir;
+ struct dirent *mapping_de;
+ char path[MAXPGPATH + 20];
+
+ /*
+ * We start of with a minimum of the last redo pointer. No new decoding
+ * slot will start before that, so that's a safe upper bound for removal.
+ */
+ redo = GetRedoRecPtr();
+
+ /* now check for the restart ptrs from existing slots */
+ cutoff = ReplicationSlotsComputeLogicalRestartLSN();
+
+ /* don't start earlier than the restart lsn */
+ if (cutoff != InvalidXLogRecPtr && redo < cutoff)
+ cutoff = redo;
+
+ mappings_dir = AllocateDir("pg_logical/mappings");
+ while ((mapping_de = ReadDir(mappings_dir, "pg_logical/mappings")) != NULL)
+ {
+ struct stat statbuf;
+ Oid dboid;
+ Oid relid;
+ XLogRecPtr lsn;
+ TransactionId rewrite_xid;
+ TransactionId create_xid;
+ uint32 hi,
+ lo;
+
+ if (strcmp(mapping_de->d_name, ".") == 0 ||
+ strcmp(mapping_de->d_name, "..") == 0)
+ continue;
+
+ snprintf(path, sizeof(path), "pg_logical/mappings/%s", mapping_de->d_name);
+ if (lstat(path, &statbuf) == 0 && !S_ISREG(statbuf.st_mode))
+ continue;
+
+ /* Skip over files that cannot be ours. */
+ if (strncmp(mapping_de->d_name, "map-", 4) != 0)
+ continue;
+
+ if (sscanf(mapping_de->d_name, LOGICAL_REWRITE_FORMAT,
+ &dboid, &relid, &hi, &lo, &rewrite_xid, &create_xid) != 6)
+ elog(ERROR, "could not parse filename \"%s\"", mapping_de->d_name);
+
+ lsn = ((uint64) hi) << 32 | lo;
+
+ if (lsn < cutoff || cutoff == InvalidXLogRecPtr)
+ {
+ elog(DEBUG1, "removing logical rewrite file \"%s\"", path);
+ if (unlink(path) < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not remove file \"%s\": %m", path)));
+ }
+ else
+ {
+ /* on some operating systems fsyncing a file requires O_RDWR */
+ int fd = OpenTransientFile(path, O_RDWR | PG_BINARY);
+
+ /*
+ * The file cannot vanish due to concurrency since this function
+ * is the only one removing logical mappings and only one
+ * checkpoint can be in progress at a time.
+ */
+ if (fd < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\": %m", path)));
+
+ /*
+ * We could try to avoid fsyncing files that either haven't
+ * changed or have only been created since the checkpoint's start,
+ * but it's currently not deemed worth the effort.
+ */
+ pgstat_report_wait_start(WAIT_EVENT_LOGICAL_REWRITE_CHECKPOINT_SYNC);
+ if (pg_fsync(fd) != 0)
+ ereport(data_sync_elevel(ERROR),
+ (errcode_for_file_access(),
+ errmsg("could not fsync file \"%s\": %m", path)));
+ pgstat_report_wait_end();
+
+ if (CloseTransientFile(fd) != 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not close file \"%s\": %m", path)));
+ }
+ }
+ FreeDir(mappings_dir);
+
+ /* persist directory entries to disk */
+ fsync_fname("pg_logical/mappings", true);
+}
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
new file mode 100644
index 0000000..8aab6e3
--- /dev/null
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -0,0 +1,4353 @@
+/*-------------------------------------------------------------------------
+ *
+ * vacuumlazy.c
+ * Concurrent ("lazy") vacuuming.
+ *
+ *
+ * The major space usage for LAZY VACUUM is storage for the array of dead tuple
+ * TIDs. We want to ensure we can vacuum even the very largest relations with
+ * finite memory space usage. To do that, we set upper bounds on the number of
+ * tuples we will keep track of at once.
+ *
+ * We are willing to use at most maintenance_work_mem (or perhaps
+ * autovacuum_work_mem) memory space to keep track of dead tuples. We
+ * initially allocate an array of TIDs of that size, with an upper limit that
+ * depends on table size (this limit ensures we don't allocate a huge area
+ * uselessly for vacuuming small tables). If the array threatens to overflow,
+ * we suspend the heap scan phase and perform a pass of index cleanup and page
+ * compaction, then resume the heap scan with an empty TID array.
+ *
+ * If we're processing a table with no indexes, we can just vacuum each page
+ * as we go; there's no need to save up multiple tuples to minimize the number
+ * of index scans performed. So we don't use maintenance_work_mem memory for
+ * the TID array, just enough to hold as many heap tuples as fit on one page.
+ *
+ * Lazy vacuum supports parallel execution with parallel worker processes. In
+ * a parallel vacuum, we perform both index vacuum and index cleanup with
+ * parallel worker processes. Individual indexes are processed by one vacuum
+ * process. At the beginning of a lazy vacuum (at lazy_scan_heap) we prepare
+ * the parallel context and initialize the DSM segment that contains shared
+ * information as well as the memory space for storing dead tuples. When
+ * starting either index vacuum or index cleanup, we launch parallel worker
+ * processes. Once all indexes are processed the parallel worker processes
+ * exit. After that, the leader process re-initializes the parallel context
+ * so that it can use the same DSM for multiple passes of index vacuum and
+ * for performing index cleanup. For updating the index statistics, we need
+ * to update the system table and since updates are not allowed during
+ * parallel mode we update the index statistics after exiting from the
+ * parallel mode.
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/access/heap/vacuumlazy.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <math.h>
+
+#include "access/amapi.h"
+#include "access/genam.h"
+#include "access/heapam.h"
+#include "access/heapam_xlog.h"
+#include "access/htup_details.h"
+#include "access/multixact.h"
+#include "access/parallel.h"
+#include "access/transam.h"
+#include "access/visibilitymap.h"
+#include "access/xact.h"
+#include "access/xlog.h"
+#include "catalog/index.h"
+#include "catalog/storage.h"
+#include "commands/dbcommands.h"
+#include "commands/progress.h"
+#include "commands/vacuum.h"
+#include "executor/instrument.h"
+#include "miscadmin.h"
+#include "optimizer/paths.h"
+#include "pgstat.h"
+#include "portability/instr_time.h"
+#include "postmaster/autovacuum.h"
+#include "storage/bufmgr.h"
+#include "storage/freespace.h"
+#include "storage/lmgr.h"
+#include "tcop/tcopprot.h"
+#include "utils/lsyscache.h"
+#include "utils/memutils.h"
+#include "utils/pg_rusage.h"
+#include "utils/timestamp.h"
+
+
+/*
+ * Space/time tradeoff parameters: do these need to be user-tunable?
+ *
+ * To consider truncating the relation, we want there to be at least
+ * REL_TRUNCATE_MINIMUM or (relsize / REL_TRUNCATE_FRACTION) (whichever
+ * is less) potentially-freeable pages.
+ */
+#define REL_TRUNCATE_MINIMUM 1000
+#define REL_TRUNCATE_FRACTION 16
+
+/*
+ * Timing parameters for truncate locking heuristics.
+ *
+ * These were not exposed as user tunable GUC values because it didn't seem
+ * that the potential for improvement was great enough to merit the cost of
+ * supporting them.
+ */
+#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20 /* ms */
+#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */
+#define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */
+
+/*
+ * Threshold that controls whether we bypass index vacuuming and heap
+ * vacuuming as an optimization
+ */
+#define BYPASS_THRESHOLD_PAGES 0.02 /* i.e. 2% of rel_pages */
+
+/*
+ * Perform a failsafe check every 4GB during the heap scan, approximately
+ */
+#define FAILSAFE_EVERY_PAGES \
+ ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ))
+
+/*
+ * When a table has no indexes, vacuum the FSM after every 8GB, approximately
+ * (it won't be exact because we only vacuum FSM after processing a heap page
+ * that has some removable tuples). When there are indexes, this is ignored,
+ * and we vacuum FSM after each index/heap cleaning pass.
+ */
+#define VACUUM_FSM_EVERY_PAGES \
+ ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ))
+
+/*
+ * Guesstimation of number of dead tuples per page. This is used to
+ * provide an upper limit to memory allocated when vacuuming small
+ * tables.
+ */
+#define LAZY_ALLOC_TUPLES MaxHeapTuplesPerPage
+
+/*
+ * Before we consider skipping a page that's marked as clean in
+ * visibility map, we must've seen at least this many clean pages.
+ */
+#define SKIP_PAGES_THRESHOLD ((BlockNumber) 32)
+
+/*
+ * Size of the prefetch window for lazy vacuum backwards truncation scan.
+ * Needs to be a power of 2.
+ */
+#define PREFETCH_SIZE ((BlockNumber) 32)
+
+/*
+ * DSM keys for parallel vacuum. Unlike other parallel execution code, since
+ * we don't need to worry about DSM keys conflicting with plan_node_id we can
+ * use small integers.
+ */
+#define PARALLEL_VACUUM_KEY_SHARED 1
+#define PARALLEL_VACUUM_KEY_DEAD_TUPLES 2
+#define PARALLEL_VACUUM_KEY_QUERY_TEXT 3
+#define PARALLEL_VACUUM_KEY_BUFFER_USAGE 4
+#define PARALLEL_VACUUM_KEY_WAL_USAGE 5
+
+/*
+ * Macro to check if we are in a parallel vacuum. If true, we are in the
+ * parallel mode and the DSM segment is initialized.
+ */
+#define ParallelVacuumIsActive(vacrel) ((vacrel)->lps != NULL)
+
+/* Phases of vacuum during which we report error context. */
+typedef enum
+{
+ VACUUM_ERRCB_PHASE_UNKNOWN,
+ VACUUM_ERRCB_PHASE_SCAN_HEAP,
+ VACUUM_ERRCB_PHASE_VACUUM_INDEX,
+ VACUUM_ERRCB_PHASE_VACUUM_HEAP,
+ VACUUM_ERRCB_PHASE_INDEX_CLEANUP,
+ VACUUM_ERRCB_PHASE_TRUNCATE
+} VacErrPhase;
+
+/*
+ * LVDeadTuples stores the dead tuple TIDs collected during the heap scan.
+ * This is allocated in the DSM segment in parallel mode and in local memory
+ * in non-parallel mode.
+ */
+typedef struct LVDeadTuples
+{
+ int max_tuples; /* # slots allocated in array */
+ int num_tuples; /* current # of entries */
+ /* List of TIDs of tuples we intend to delete */
+ /* NB: this list is ordered by TID address */
+ ItemPointerData itemptrs[FLEXIBLE_ARRAY_MEMBER]; /* array of
+ * ItemPointerData */
+} LVDeadTuples;
+
+/* The dead tuple space consists of LVDeadTuples and dead tuple TIDs */
+#define SizeOfDeadTuples(cnt) \
+ add_size(offsetof(LVDeadTuples, itemptrs), \
+ mul_size(sizeof(ItemPointerData), cnt))
+#define MAXDEADTUPLES(max_size) \
+ (((max_size) - offsetof(LVDeadTuples, itemptrs)) / sizeof(ItemPointerData))
+
+/*
+ * Shared information among parallel workers. So this is allocated in the DSM
+ * segment.
+ */
+typedef struct LVShared
+{
+ /*
+ * Target table relid and log level. These fields are not modified during
+ * the lazy vacuum.
+ */
+ Oid relid;
+ int elevel;
+
+ /*
+ * An indication for vacuum workers to perform either index vacuum or
+ * index cleanup. first_time is true only if for_cleanup is true and
+ * bulk-deletion is not performed yet.
+ */
+ bool for_cleanup;
+ bool first_time;
+
+ /*
+ * Fields for both index vacuum and cleanup.
+ *
+ * reltuples is the total number of input heap tuples. We set either old
+ * live tuples in the index vacuum case or the new live tuples in the
+ * index cleanup case.
+ *
+ * estimated_count is true if reltuples is an estimated value. (Note that
+ * reltuples could be -1 in this case, indicating we have no idea.)
+ */
+ double reltuples;
+ bool estimated_count;
+
+ /*
+ * In single process lazy vacuum we could consume more memory during index
+ * vacuuming or cleanup apart from the memory for heap scanning. In
+ * parallel vacuum, since individual vacuum workers can consume memory
+ * equal to maintenance_work_mem, the new maintenance_work_mem for each
+ * worker is set such that the parallel operation doesn't consume more
+ * memory than single process lazy vacuum.
+ */
+ int maintenance_work_mem_worker;
+
+ /*
+ * Shared vacuum cost balance. During parallel vacuum,
+ * VacuumSharedCostBalance points to this value and it accumulates the
+ * balance of each parallel vacuum worker.
+ */
+ pg_atomic_uint32 cost_balance;
+
+ /*
+ * Number of active parallel workers. This is used for computing the
+ * minimum threshold of the vacuum cost balance before a worker sleeps for
+ * cost-based delay.
+ */
+ pg_atomic_uint32 active_nworkers;
+
+ /*
+ * Variables to control parallel vacuum. We have a bitmap to indicate
+ * which index has stats in shared memory. The set bit in the map
+ * indicates that the particular index supports a parallel vacuum.
+ */
+ pg_atomic_uint32 idx; /* counter for vacuuming and clean up */
+ uint32 offset; /* sizeof header incl. bitmap */
+ bits8 bitmap[FLEXIBLE_ARRAY_MEMBER]; /* bit map of NULLs */
+
+ /* Shared index statistics data follows at end of struct */
+} LVShared;
+
+#define SizeOfLVShared (offsetof(LVShared, bitmap) + sizeof(bits8))
+#define GetSharedIndStats(s) \
+ ((LVSharedIndStats *)((char *)(s) + ((LVShared *)(s))->offset))
+#define IndStatsIsNull(s, i) \
+ (!(((LVShared *)(s))->bitmap[(i) >> 3] & (1 << ((i) & 0x07))))
+
+/*
+ * Struct for an index bulk-deletion statistic used for parallel vacuum. This
+ * is allocated in the DSM segment.
+ */
+typedef struct LVSharedIndStats
+{
+ bool updated; /* are the stats updated? */
+ IndexBulkDeleteResult istat;
+} LVSharedIndStats;
+
+/* Struct for maintaining a parallel vacuum state. */
+typedef struct LVParallelState
+{
+ ParallelContext *pcxt;
+
+ /* Shared information among parallel vacuum workers */
+ LVShared *lvshared;
+
+ /* Points to buffer usage area in DSM */
+ BufferUsage *buffer_usage;
+
+ /* Points to WAL usage area in DSM */
+ WalUsage *wal_usage;
+
+ /*
+ * The number of indexes that support parallel index bulk-deletion and
+ * parallel index cleanup respectively.
+ */
+ int nindexes_parallel_bulkdel;
+ int nindexes_parallel_cleanup;
+ int nindexes_parallel_condcleanup;
+} LVParallelState;
+
+typedef struct LVRelState
+{
+ /* Target heap relation and its indexes */
+ Relation rel;
+ Relation *indrels;
+ int nindexes;
+
+ /* Wraparound failsafe has been triggered? */
+ bool failsafe_active;
+ /* Consider index vacuuming bypass optimization? */
+ bool consider_bypass_optimization;
+
+ /* Doing index vacuuming, index cleanup, rel truncation? */
+ bool do_index_vacuuming;
+ bool do_index_cleanup;
+ bool do_rel_truncate;
+
+ /* Buffer access strategy and parallel state */
+ BufferAccessStrategy bstrategy;
+ LVParallelState *lps;
+
+ /* Statistics from pg_class when we start out */
+ BlockNumber old_rel_pages; /* previous value of pg_class.relpages */
+ double old_live_tuples; /* previous value of pg_class.reltuples */
+ /* rel's initial relfrozenxid and relminmxid */
+ TransactionId relfrozenxid;
+ MultiXactId relminmxid;
+
+ /* VACUUM operation's cutoff for pruning */
+ TransactionId OldestXmin;
+ /* VACUUM operation's cutoff for freezing XIDs and MultiXactIds */
+ TransactionId FreezeLimit;
+ MultiXactId MultiXactCutoff;
+
+ /* Error reporting state */
+ char *relnamespace;
+ char *relname;
+ char *indname;
+ BlockNumber blkno; /* used only for heap operations */
+ OffsetNumber offnum; /* used only for heap operations */
+ VacErrPhase phase;
+
+ /*
+ * State managed by lazy_scan_heap() follows
+ */
+ LVDeadTuples *dead_tuples; /* items to vacuum from indexes */
+ BlockNumber rel_pages; /* total number of pages */
+ BlockNumber scanned_pages; /* number of pages we examined */
+ BlockNumber pinskipped_pages; /* # of pages skipped due to a pin */
+ BlockNumber frozenskipped_pages; /* # of frozen pages we skipped */
+ BlockNumber tupcount_pages; /* pages whose tuples we counted */
+ BlockNumber pages_removed; /* pages remove by truncation */
+ BlockNumber lpdead_item_pages; /* # pages with LP_DEAD items */
+ BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
+
+ /* Statistics output by us, for table */
+ double new_rel_tuples; /* new estimated total # of tuples */
+ double new_live_tuples; /* new estimated total # of live tuples */
+ /* Statistics output by index AMs */
+ IndexBulkDeleteResult **indstats;
+
+ /* Instrumentation counters */
+ int num_index_scans;
+ int64 tuples_deleted; /* # deleted from table */
+ int64 lpdead_items; /* # deleted from indexes */
+ int64 new_dead_tuples; /* new estimated total # of dead items in
+ * table */
+ int64 num_tuples; /* total number of nonremovable tuples */
+ int64 live_tuples; /* live tuples (reltuples estimate) */
+} LVRelState;
+
+/*
+ * State returned by lazy_scan_prune()
+ */
+typedef struct LVPagePruneState
+{
+ bool hastup; /* Page is truncatable? */
+ bool has_lpdead_items; /* includes existing LP_DEAD items */
+
+ /*
+ * State describes the proper VM bit states to set for the page following
+ * pruning and freezing. all_visible implies !has_lpdead_items, but don't
+ * trust all_frozen result unless all_visible is also set to true.
+ */
+ bool all_visible; /* Every item visible to all? */
+ bool all_frozen; /* provided all_visible is also true */
+ TransactionId visibility_cutoff_xid; /* For recovery conflicts */
+} LVPagePruneState;
+
+/* Struct for saving and restoring vacuum error information. */
+typedef struct LVSavedErrInfo
+{
+ BlockNumber blkno;
+ OffsetNumber offnum;
+ VacErrPhase phase;
+} LVSavedErrInfo;
+
+/* elevel controls whole VACUUM's verbosity */
+static int elevel = -1;
+
+
+/* non-export function prototypes */
+static void lazy_scan_heap(LVRelState *vacrel, VacuumParams *params,
+ bool aggressive);
+static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
+ BlockNumber blkno, Page page,
+ GlobalVisState *vistest,
+ LVPagePruneState *prunestate);
+static void lazy_vacuum(LVRelState *vacrel);
+static bool lazy_vacuum_all_indexes(LVRelState *vacrel);
+static void lazy_vacuum_heap_rel(LVRelState *vacrel);
+static int lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno,
+ Buffer buffer, int tupindex, Buffer *vmbuffer);
+static bool lazy_check_needs_freeze(Buffer buf, bool *hastup,
+ LVRelState *vacrel);
+static bool lazy_check_wraparound_failsafe(LVRelState *vacrel);
+static void do_parallel_lazy_vacuum_all_indexes(LVRelState *vacrel);
+static void do_parallel_lazy_cleanup_all_indexes(LVRelState *vacrel);
+static void do_parallel_vacuum_or_cleanup(LVRelState *vacrel, int nworkers);
+static void do_parallel_processing(LVRelState *vacrel,
+ LVShared *lvshared);
+static void do_serial_processing_for_unsafe_indexes(LVRelState *vacrel,
+ LVShared *lvshared);
+static IndexBulkDeleteResult *parallel_process_one_index(Relation indrel,
+ IndexBulkDeleteResult *istat,
+ LVShared *lvshared,
+ LVSharedIndStats *shared_indstats,
+ LVRelState *vacrel);
+static void lazy_cleanup_all_indexes(LVRelState *vacrel);
+static IndexBulkDeleteResult *lazy_vacuum_one_index(Relation indrel,
+ IndexBulkDeleteResult *istat,
+ double reltuples,
+ LVRelState *vacrel);
+static IndexBulkDeleteResult *lazy_cleanup_one_index(Relation indrel,
+ IndexBulkDeleteResult *istat,
+ double reltuples,
+ bool estimated_count,
+ LVRelState *vacrel);
+static bool should_attempt_truncation(LVRelState *vacrel);
+static void lazy_truncate_heap(LVRelState *vacrel);
+static BlockNumber count_nondeletable_pages(LVRelState *vacrel,
+ bool *lock_waiter_detected);
+static long compute_max_dead_tuples(BlockNumber relblocks, bool hasindex);
+static void lazy_space_alloc(LVRelState *vacrel, int nworkers,
+ BlockNumber relblocks);
+static void lazy_space_free(LVRelState *vacrel);
+static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
+static int vac_cmp_itemptr(const void *left, const void *right);
+static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf,
+ TransactionId *visibility_cutoff_xid, bool *all_frozen);
+static int compute_parallel_vacuum_workers(LVRelState *vacrel,
+ int nrequested,
+ bool *will_parallel_vacuum);
+static void update_index_statistics(LVRelState *vacrel);
+static LVParallelState *begin_parallel_vacuum(LVRelState *vacrel,
+ BlockNumber nblocks,
+ int nrequested);
+static void end_parallel_vacuum(LVRelState *vacrel);
+static LVSharedIndStats *parallel_stats_for_idx(LVShared *lvshared, int getidx);
+static bool parallel_processing_is_safe(Relation indrel, LVShared *lvshared);
+static void vacuum_error_callback(void *arg);
+static void update_vacuum_error_info(LVRelState *vacrel,
+ LVSavedErrInfo *saved_vacrel,
+ int phase, BlockNumber blkno,
+ OffsetNumber offnum);
+static void restore_vacuum_error_info(LVRelState *vacrel,
+ const LVSavedErrInfo *saved_vacrel);
+
+
+/*
+ * heap_vacuum_rel() -- perform VACUUM for one heap relation
+ *
+ * This routine vacuums a single heap, cleans out its indexes, and
+ * updates its relpages and reltuples statistics.
+ *
+ * At entry, we have already established a transaction and opened
+ * and locked the relation.
+ */
+void
+heap_vacuum_rel(Relation rel, VacuumParams *params,
+ BufferAccessStrategy bstrategy)
+{
+ LVRelState *vacrel;
+ PGRUsage ru0;
+ TimestampTz starttime = 0;
+ WalUsage walusage_start = pgWalUsage;
+ WalUsage walusage = {0, 0, 0};
+ long secs;
+ int usecs;
+ double read_rate,
+ write_rate;
+ bool aggressive; /* should we scan all unfrozen pages? */
+ bool scanned_all_unfrozen; /* actually scanned all such pages? */
+ char **indnames = NULL;
+ TransactionId xidFullScanLimit;
+ MultiXactId mxactFullScanLimit;
+ BlockNumber new_rel_pages;
+ BlockNumber new_rel_allvisible;
+ double new_live_tuples;
+ TransactionId new_frozen_xid;
+ MultiXactId new_min_multi;
+ ErrorContextCallback errcallback;
+ PgStat_Counter startreadtime = 0;
+ PgStat_Counter startwritetime = 0;
+ TransactionId OldestXmin;
+ TransactionId FreezeLimit;
+ MultiXactId MultiXactCutoff;
+
+ /* measure elapsed time iff autovacuum logging requires it */
+ if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
+ {
+ pg_rusage_init(&ru0);
+ starttime = GetCurrentTimestamp();
+ if (track_io_timing)
+ {
+ startreadtime = pgStatBlockReadTime;
+ startwritetime = pgStatBlockWriteTime;
+ }
+ }
+
+ if (params->options & VACOPT_VERBOSE)
+ elevel = INFO;
+ else
+ elevel = DEBUG2;
+
+ pgstat_progress_start_command(PROGRESS_COMMAND_VACUUM,
+ RelationGetRelid(rel));
+
+ vacuum_set_xid_limits(rel,
+ params->freeze_min_age,
+ params->freeze_table_age,
+ params->multixact_freeze_min_age,
+ params->multixact_freeze_table_age,
+ &OldestXmin, &FreezeLimit, &xidFullScanLimit,
+ &MultiXactCutoff, &mxactFullScanLimit);
+
+ /*
+ * We request an aggressive scan if the table's frozen Xid is now older
+ * than or equal to the requested Xid full-table scan limit; or if the
+ * table's minimum MultiXactId is older than or equal to the requested
+ * mxid full-table scan limit; or if DISABLE_PAGE_SKIPPING was specified.
+ */
+ aggressive = TransactionIdPrecedesOrEquals(rel->rd_rel->relfrozenxid,
+ xidFullScanLimit);
+ aggressive |= MultiXactIdPrecedesOrEquals(rel->rd_rel->relminmxid,
+ mxactFullScanLimit);
+ if (params->options & VACOPT_DISABLE_PAGE_SKIPPING)
+ aggressive = true;
+
+ vacrel = (LVRelState *) palloc0(sizeof(LVRelState));
+
+ /* Set up high level stuff about rel */
+ vacrel->rel = rel;
+ vac_open_indexes(vacrel->rel, RowExclusiveLock, &vacrel->nindexes,
+ &vacrel->indrels);
+ vacrel->failsafe_active = false;
+ vacrel->consider_bypass_optimization = true;
+
+ /*
+ * The index_cleanup param either disables index vacuuming and cleanup or
+ * forces it to go ahead when we would otherwise apply the index bypass
+ * optimization. The default is 'auto', which leaves the final decision
+ * up to lazy_vacuum().
+ *
+ * The truncate param allows user to avoid attempting relation truncation,
+ * though it can't force truncation to happen.
+ */
+ Assert(params->index_cleanup != VACOPTVALUE_UNSPECIFIED);
+ Assert(params->truncate != VACOPTVALUE_UNSPECIFIED &&
+ params->truncate != VACOPTVALUE_AUTO);
+ vacrel->do_index_vacuuming = true;
+ vacrel->do_index_cleanup = true;
+ vacrel->do_rel_truncate = (params->truncate != VACOPTVALUE_DISABLED);
+ if (params->index_cleanup == VACOPTVALUE_DISABLED)
+ {
+ /* Force disable index vacuuming up-front */
+ vacrel->do_index_vacuuming = false;
+ vacrel->do_index_cleanup = false;
+ }
+ else if (params->index_cleanup == VACOPTVALUE_ENABLED)
+ {
+ /* Force index vacuuming. Note that failsafe can still bypass. */
+ vacrel->consider_bypass_optimization = false;
+ }
+ else
+ {
+ /* Default/auto, make all decisions dynamically */
+ Assert(params->index_cleanup == VACOPTVALUE_AUTO);
+ }
+
+ vacrel->bstrategy = bstrategy;
+ vacrel->old_rel_pages = rel->rd_rel->relpages;
+ vacrel->old_live_tuples = rel->rd_rel->reltuples;
+ vacrel->relfrozenxid = rel->rd_rel->relfrozenxid;
+ vacrel->relminmxid = rel->rd_rel->relminmxid;
+
+ /* Set cutoffs for entire VACUUM */
+ vacrel->OldestXmin = OldestXmin;
+ vacrel->FreezeLimit = FreezeLimit;
+ vacrel->MultiXactCutoff = MultiXactCutoff;
+
+ vacrel->relnamespace = get_namespace_name(RelationGetNamespace(rel));
+ vacrel->relname = pstrdup(RelationGetRelationName(rel));
+ vacrel->indname = NULL;
+ vacrel->phase = VACUUM_ERRCB_PHASE_UNKNOWN;
+
+ /* Save index names iff autovacuum logging requires it */
+ if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0 &&
+ vacrel->nindexes > 0)
+ {
+ indnames = palloc(sizeof(char *) * vacrel->nindexes);
+ for (int i = 0; i < vacrel->nindexes; i++)
+ indnames[i] =
+ pstrdup(RelationGetRelationName(vacrel->indrels[i]));
+ }
+
+ /*
+ * Setup error traceback support for ereport(). The idea is to set up an
+ * error context callback to display additional information on any error
+ * during a vacuum. During different phases of vacuum (heap scan, heap
+ * vacuum, index vacuum, index clean up, heap truncate), we update the
+ * error context callback to display appropriate information.
+ *
+ * Note that the index vacuum and heap vacuum phases may be called
+ * multiple times in the middle of the heap scan phase. So the old phase
+ * information is restored at the end of those phases.
+ */
+ errcallback.callback = vacuum_error_callback;
+ errcallback.arg = vacrel;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ /* Do the vacuuming */
+ lazy_scan_heap(vacrel, params, aggressive);
+
+ /* Done with indexes */
+ vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
+
+ /*
+ * Compute whether we actually scanned the all unfrozen pages. If we did,
+ * we can adjust relfrozenxid and relminmxid.
+ *
+ * NB: We need to check this before truncating the relation, because that
+ * will change ->rel_pages.
+ */
+ if ((vacrel->scanned_pages + vacrel->frozenskipped_pages)
+ < vacrel->rel_pages)
+ {
+ Assert(!aggressive);
+ scanned_all_unfrozen = false;
+ }
+ else
+ scanned_all_unfrozen = true;
+
+ /*
+ * Optionally truncate the relation.
+ */
+ if (should_attempt_truncation(vacrel))
+ {
+ /*
+ * Update error traceback information. This is the last phase during
+ * which we add context information to errors, so we don't need to
+ * revert to the previous phase.
+ */
+ update_vacuum_error_info(vacrel, NULL, VACUUM_ERRCB_PHASE_TRUNCATE,
+ vacrel->nonempty_pages,
+ InvalidOffsetNumber);
+ lazy_truncate_heap(vacrel);
+ }
+
+ /* Pop the error context stack */
+ error_context_stack = errcallback.previous;
+
+ /* Report that we are now doing final cleanup */
+ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
+ PROGRESS_VACUUM_PHASE_FINAL_CLEANUP);
+
+ /*
+ * Update statistics in pg_class.
+ *
+ * In principle new_live_tuples could be -1 indicating that we (still)
+ * don't know the tuple count. In practice that probably can't happen,
+ * since we'd surely have scanned some pages if the table is new and
+ * nonempty.
+ *
+ * For safety, clamp relallvisible to be not more than what we're setting
+ * relpages to.
+ *
+ * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
+ * since then we don't know for certain that all tuples have a newer xmin.
+ */
+ new_rel_pages = vacrel->rel_pages;
+ new_live_tuples = vacrel->new_live_tuples;
+
+ visibilitymap_count(rel, &new_rel_allvisible, NULL);
+ if (new_rel_allvisible > new_rel_pages)
+ new_rel_allvisible = new_rel_pages;
+
+ new_frozen_xid = scanned_all_unfrozen ? FreezeLimit : InvalidTransactionId;
+ new_min_multi = scanned_all_unfrozen ? MultiXactCutoff : InvalidMultiXactId;
+
+ vac_update_relstats(rel,
+ new_rel_pages,
+ new_live_tuples,
+ new_rel_allvisible,
+ vacrel->nindexes > 0,
+ new_frozen_xid,
+ new_min_multi,
+ false);
+
+ /*
+ * Report results to the stats collector, too.
+ *
+ * Deliberately avoid telling the stats collector about LP_DEAD items that
+ * remain in the table due to VACUUM bypassing index and heap vacuuming.
+ * ANALYZE will consider the remaining LP_DEAD items to be dead tuples. It
+ * seems like a good idea to err on the side of not vacuuming again too
+ * soon in cases where the failsafe prevented significant amounts of heap
+ * vacuuming.
+ */
+ pgstat_report_vacuum(RelationGetRelid(rel),
+ rel->rd_rel->relisshared,
+ Max(new_live_tuples, 0),
+ vacrel->new_dead_tuples);
+ pgstat_progress_end_command();
+
+ /* and log the action if appropriate */
+ if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
+ {
+ TimestampTz endtime = GetCurrentTimestamp();
+
+ if (params->log_min_duration == 0 ||
+ TimestampDifferenceExceeds(starttime, endtime,
+ params->log_min_duration))
+ {
+ StringInfoData buf;
+ char *msgfmt;
+ BlockNumber orig_rel_pages;
+
+ TimestampDifference(starttime, endtime, &secs, &usecs);
+
+ memset(&walusage, 0, sizeof(WalUsage));
+ WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start);
+
+ read_rate = 0;
+ write_rate = 0;
+ if ((secs > 0) || (usecs > 0))
+ {
+ read_rate = (double) BLCKSZ * VacuumPageMiss / (1024 * 1024) /
+ (secs + usecs / 1000000.0);
+ write_rate = (double) BLCKSZ * VacuumPageDirty / (1024 * 1024) /
+ (secs + usecs / 1000000.0);
+ }
+
+ /*
+ * This is pretty messy, but we split it up so that we can skip
+ * emitting individual parts of the message when not applicable.
+ */
+ initStringInfo(&buf);
+ if (params->is_wraparound)
+ {
+ /*
+ * While it's possible for a VACUUM to be both is_wraparound
+ * and !aggressive, that's just a corner-case -- is_wraparound
+ * implies aggressive. Produce distinct output for the corner
+ * case all the same, just in case.
+ */
+ if (aggressive)
+ msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
+ else
+ msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
+ }
+ else
+ {
+ if (aggressive)
+ msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n");
+ else
+ msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n");
+ }
+ appendStringInfo(&buf, msgfmt,
+ get_database_name(MyDatabaseId),
+ vacrel->relnamespace,
+ vacrel->relname,
+ vacrel->num_index_scans);
+ appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
+ vacrel->pages_removed,
+ vacrel->rel_pages,
+ vacrel->pinskipped_pages,
+ vacrel->frozenskipped_pages);
+ appendStringInfo(&buf,
+ _("tuples: %lld removed, %lld remain, %lld are dead but not yet removable, oldest xmin: %u\n"),
+ (long long) vacrel->tuples_deleted,
+ (long long) vacrel->new_rel_tuples,
+ (long long) vacrel->new_dead_tuples,
+ OldestXmin);
+ orig_rel_pages = vacrel->rel_pages + vacrel->pages_removed;
+ if (orig_rel_pages > 0)
+ {
+ if (vacrel->do_index_vacuuming)
+ {
+ if (vacrel->nindexes == 0 || vacrel->num_index_scans == 0)
+ appendStringInfoString(&buf, _("index scan not needed: "));
+ else
+ appendStringInfoString(&buf, _("index scan needed: "));
+
+ msgfmt = _("%u pages from table (%.2f%% of total) had %lld dead item identifiers removed\n");
+ }
+ else
+ {
+ if (!vacrel->failsafe_active)
+ appendStringInfoString(&buf, _("index scan bypassed: "));
+ else
+ appendStringInfoString(&buf, _("index scan bypassed by failsafe: "));
+
+ msgfmt = _("%u pages from table (%.2f%% of total) have %lld dead item identifiers\n");
+ }
+ appendStringInfo(&buf, msgfmt,
+ vacrel->lpdead_item_pages,
+ 100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
+ (long long) vacrel->lpdead_items);
+ }
+ for (int i = 0; i < vacrel->nindexes; i++)
+ {
+ IndexBulkDeleteResult *istat = vacrel->indstats[i];
+
+ if (!istat)
+ continue;
+
+ appendStringInfo(&buf,
+ _("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
+ indnames[i],
+ istat->num_pages,
+ istat->pages_newly_deleted,
+ istat->pages_deleted,
+ istat->pages_free);
+ }
+ if (track_io_timing)
+ {
+ double read_ms = (double) (pgStatBlockReadTime - startreadtime) / 1000;
+ double write_ms = (double) (pgStatBlockWriteTime - startwritetime) / 1000;
+
+ appendStringInfo(&buf, _("I/O timings: read: %.3f ms, write: %.3f ms\n"),
+ read_ms, write_ms);
+ }
+ appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
+ read_rate, write_rate);
+ appendStringInfo(&buf,
+ _("buffer usage: %lld hits, %lld misses, %lld dirtied\n"),
+ (long long) VacuumPageHit,
+ (long long) VacuumPageMiss,
+ (long long) VacuumPageDirty);
+ appendStringInfo(&buf,
+ _("WAL usage: %lld records, %lld full page images, %llu bytes\n"),
+ (long long) walusage.wal_records,
+ (long long) walusage.wal_fpi,
+ (unsigned long long) walusage.wal_bytes);
+ appendStringInfo(&buf, _("system usage: %s"), pg_rusage_show(&ru0));
+
+ ereport(LOG,
+ (errmsg_internal("%s", buf.data)));
+ pfree(buf.data);
+ }
+ }
+
+ /* Cleanup index statistics and index names */
+ for (int i = 0; i < vacrel->nindexes; i++)
+ {
+ if (vacrel->indstats[i])
+ pfree(vacrel->indstats[i]);
+
+ if (indnames && indnames[i])
+ pfree(indnames[i]);
+ }
+}
+
+/*
+ * lazy_scan_heap() -- scan an open heap relation
+ *
+ * This routine prunes each page in the heap, which will among other
+ * things truncate dead tuples to dead line pointers, defragment the
+ * page, and set commit status bits (see heap_page_prune). It also builds
+ * lists of dead tuples and pages with free space, calculates statistics
+ * on the number of live tuples in the heap, and marks pages as
+ * all-visible if appropriate. When done, or when we run low on space
+ * for dead-tuple TIDs, invoke lazy_vacuum to vacuum indexes and vacuum
+ * heap relation during its own second pass over the heap.
+ *
+ * If the table has at least two indexes, we execute both index vacuum
+ * and index cleanup with parallel workers unless parallel vacuum is
+ * disabled. In a parallel vacuum, we enter parallel mode and then
+ * create both the parallel context and the DSM segment before starting
+ * heap scan so that we can record dead tuples to the DSM segment. All
+ * parallel workers are launched at beginning of index vacuuming and
+ * index cleanup and they exit once done with all indexes. At the end of
+ * this function we exit from parallel mode. Index bulk-deletion results
+ * are stored in the DSM segment and we update index statistics for all
+ * the indexes after exiting from parallel mode since writes are not
+ * allowed during parallel mode.
+ *
+ * If there are no indexes then we can reclaim line pointers on the fly;
+ * dead line pointers need only be retained until all index pointers that
+ * reference them have been killed.
+ */
+static void
+lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
+{
+ LVDeadTuples *dead_tuples;
+ BlockNumber nblocks,
+ blkno,
+ next_unskippable_block,
+ next_failsafe_block,
+ next_fsm_block_to_vacuum;
+ PGRUsage ru0;
+ Buffer vmbuffer = InvalidBuffer;
+ bool skipping_blocks;
+ StringInfoData buf;
+ const int initprog_index[] = {
+ PROGRESS_VACUUM_PHASE,
+ PROGRESS_VACUUM_TOTAL_HEAP_BLKS,
+ PROGRESS_VACUUM_MAX_DEAD_TUPLES
+ };
+ int64 initprog_val[3];
+ GlobalVisState *vistest;
+
+ pg_rusage_init(&ru0);
+
+ if (aggressive)
+ ereport(elevel,
+ (errmsg("aggressively vacuuming \"%s.%s\"",
+ vacrel->relnamespace,
+ vacrel->relname)));
+ else
+ ereport(elevel,
+ (errmsg("vacuuming \"%s.%s\"",
+ vacrel->relnamespace,
+ vacrel->relname)));
+
+ nblocks = RelationGetNumberOfBlocks(vacrel->rel);
+ next_unskippable_block = 0;
+ next_failsafe_block = 0;
+ next_fsm_block_to_vacuum = 0;
+ vacrel->rel_pages = nblocks;
+ vacrel->scanned_pages = 0;
+ vacrel->pinskipped_pages = 0;
+ vacrel->frozenskipped_pages = 0;
+ vacrel->tupcount_pages = 0;
+ vacrel->pages_removed = 0;
+ vacrel->lpdead_item_pages = 0;
+ vacrel->nonempty_pages = 0;
+
+ /* Initialize instrumentation counters */
+ vacrel->num_index_scans = 0;
+ vacrel->tuples_deleted = 0;
+ vacrel->lpdead_items = 0;
+ vacrel->new_dead_tuples = 0;
+ vacrel->num_tuples = 0;
+ vacrel->live_tuples = 0;
+
+ vistest = GlobalVisTestFor(vacrel->rel);
+
+ vacrel->indstats = (IndexBulkDeleteResult **)
+ palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
+
+ /*
+ * Before beginning scan, check if it's already necessary to apply
+ * failsafe
+ */
+ lazy_check_wraparound_failsafe(vacrel);
+
+ /*
+ * Allocate the space for dead tuples. Note that this handles parallel
+ * VACUUM initialization as part of allocating shared memory space used
+ * for dead_tuples.
+ */
+ lazy_space_alloc(vacrel, params->nworkers, nblocks);
+ dead_tuples = vacrel->dead_tuples;
+
+ /* Report that we're scanning the heap, advertising total # of blocks */
+ initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
+ initprog_val[1] = nblocks;
+ initprog_val[2] = dead_tuples->max_tuples;
+ pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
+
+ /*
+ * Except when aggressive is set, we want to skip pages that are
+ * all-visible according to the visibility map, but only when we can skip
+ * at least SKIP_PAGES_THRESHOLD consecutive pages. Since we're reading
+ * sequentially, the OS should be doing readahead for us, so there's no
+ * gain in skipping a page now and then; that's likely to disable
+ * readahead and so be counterproductive. Also, skipping even a single
+ * page means that we can't update relfrozenxid, so we only want to do it
+ * if we can skip a goodly number of pages.
+ *
+ * When aggressive is set, we can't skip pages just because they are
+ * all-visible, but we can still skip pages that are all-frozen, since
+ * such pages do not need freezing and do not affect the value that we can
+ * safely set for relfrozenxid or relminmxid.
+ *
+ * Before entering the main loop, establish the invariant that
+ * next_unskippable_block is the next block number >= blkno that we can't
+ * skip based on the visibility map, either all-visible for a regular scan
+ * or all-frozen for an aggressive scan. We set it to nblocks if there's
+ * no such block. We also set up the skipping_blocks flag correctly at
+ * this stage.
+ *
+ * Note: The value returned by visibilitymap_get_status could be slightly
+ * out-of-date, since we make this test before reading the corresponding
+ * heap page or locking the buffer. This is OK. If we mistakenly think
+ * that the page is all-visible or all-frozen when in fact the flag's just
+ * been cleared, we might fail to vacuum the page. It's easy to see that
+ * skipping a page when aggressive is not set is not a very big deal; we
+ * might leave some dead tuples lying around, but the next vacuum will
+ * find them. But even when aggressive *is* set, it's still OK if we miss
+ * a page whose all-frozen marking has just been cleared. Any new XIDs
+ * just added to that page are necessarily newer than the GlobalXmin we
+ * computed, so they'll have no effect on the value to which we can safely
+ * set relfrozenxid. A similar argument applies for MXIDs and relminmxid.
+ *
+ * We will scan the table's last page, at least to the extent of
+ * determining whether it has tuples or not, even if it should be skipped
+ * according to the above rules; except when we've already determined that
+ * it's not worth trying to truncate the table. This avoids having
+ * lazy_truncate_heap() take access-exclusive lock on the table to attempt
+ * a truncation that just fails immediately because there are tuples in
+ * the last page. This is worth avoiding mainly because such a lock must
+ * be replayed on any hot standby, where it can be disruptive.
+ */
+ if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
+ {
+ while (next_unskippable_block < nblocks)
+ {
+ uint8 vmstatus;
+
+ vmstatus = visibilitymap_get_status(vacrel->rel,
+ next_unskippable_block,
+ &vmbuffer);
+ if (aggressive)
+ {
+ if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)
+ break;
+ }
+ else
+ {
+ if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0)
+ break;
+ }
+ vacuum_delay_point();
+ next_unskippable_block++;
+ }
+ }
+
+ if (next_unskippable_block >= SKIP_PAGES_THRESHOLD)
+ skipping_blocks = true;
+ else
+ skipping_blocks = false;
+
+ for (blkno = 0; blkno < nblocks; blkno++)
+ {
+ Buffer buf;
+ Page page;
+ bool all_visible_according_to_vm = false;
+ LVPagePruneState prunestate;
+
+ /*
+ * Consider need to skip blocks. See note above about forcing
+ * scanning of last page.
+ */
+#define FORCE_CHECK_PAGE() \
+ (blkno == nblocks - 1 && should_attempt_truncation(vacrel))
+
+ pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
+
+ update_vacuum_error_info(vacrel, NULL, VACUUM_ERRCB_PHASE_SCAN_HEAP,
+ blkno, InvalidOffsetNumber);
+
+ if (blkno == next_unskippable_block)
+ {
+ /* Time to advance next_unskippable_block */
+ next_unskippable_block++;
+ if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
+ {
+ while (next_unskippable_block < nblocks)
+ {
+ uint8 vmskipflags;
+
+ vmskipflags = visibilitymap_get_status(vacrel->rel,
+ next_unskippable_block,
+ &vmbuffer);
+ if (aggressive)
+ {
+ if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN) == 0)
+ break;
+ }
+ else
+ {
+ if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE) == 0)
+ break;
+ }
+ vacuum_delay_point();
+ next_unskippable_block++;
+ }
+ }
+
+ /*
+ * We know we can't skip the current block. But set up
+ * skipping_blocks to do the right thing at the following blocks.
+ */
+ if (next_unskippable_block - blkno > SKIP_PAGES_THRESHOLD)
+ skipping_blocks = true;
+ else
+ skipping_blocks = false;
+
+ /*
+ * Normally, the fact that we can't skip this block must mean that
+ * it's not all-visible. But in an aggressive vacuum we know only
+ * that it's not all-frozen, so it might still be all-visible.
+ */
+ if (aggressive && VM_ALL_VISIBLE(vacrel->rel, blkno, &vmbuffer))
+ all_visible_according_to_vm = true;
+ }
+ else
+ {
+ /*
+ * The current block is potentially skippable; if we've seen a
+ * long enough run of skippable blocks to justify skipping it, and
+ * we're not forced to check it, then go ahead and skip.
+ * Otherwise, the page must be at least all-visible if not
+ * all-frozen, so we can set all_visible_according_to_vm = true.
+ */
+ if (skipping_blocks && !FORCE_CHECK_PAGE())
+ {
+ /*
+ * Tricky, tricky. If this is in aggressive vacuum, the page
+ * must have been all-frozen at the time we checked whether it
+ * was skippable, but it might not be any more. We must be
+ * careful to count it as a skipped all-frozen page in that
+ * case, or else we'll think we can't update relfrozenxid and
+ * relminmxid. If it's not an aggressive vacuum, we don't
+ * know whether it was all-frozen, so we have to recheck; but
+ * in this case an approximate answer is OK.
+ */
+ if (aggressive || VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
+ vacrel->frozenskipped_pages++;
+ continue;
+ }
+ all_visible_according_to_vm = true;
+ }
+
+ vacuum_delay_point();
+
+ /*
+ * Regularly check if wraparound failsafe should trigger.
+ *
+ * There is a similar check inside lazy_vacuum_all_indexes(), but
+ * relfrozenxid might start to look dangerously old before we reach
+ * that point. This check also provides failsafe coverage for the
+ * one-pass strategy, and the two-pass strategy with the index_cleanup
+ * param set to 'off'.
+ */
+ if (blkno - next_failsafe_block >= FAILSAFE_EVERY_PAGES)
+ {
+ lazy_check_wraparound_failsafe(vacrel);
+ next_failsafe_block = blkno;
+ }
+
+ /*
+ * Consider if we definitely have enough space to process TIDs on page
+ * already. If we are close to overrunning the available space for
+ * dead-tuple TIDs, pause and do a cycle of vacuuming before we tackle
+ * this page.
+ */
+ if ((dead_tuples->max_tuples - dead_tuples->num_tuples) < MaxHeapTuplesPerPage &&
+ dead_tuples->num_tuples > 0)
+ {
+ /*
+ * Before beginning index vacuuming, we release any pin we may
+ * hold on the visibility map page. This isn't necessary for
+ * correctness, but we do it anyway to avoid holding the pin
+ * across a lengthy, unrelated operation.
+ */
+ if (BufferIsValid(vmbuffer))
+ {
+ ReleaseBuffer(vmbuffer);
+ vmbuffer = InvalidBuffer;
+ }
+
+ /* Remove the collected garbage tuples from table and indexes */
+ vacrel->consider_bypass_optimization = false;
+ lazy_vacuum(vacrel);
+
+ /*
+ * Vacuum the Free Space Map to make newly-freed space visible on
+ * upper-level FSM pages. Note we have not yet processed blkno.
+ */
+ FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
+ blkno);
+ next_fsm_block_to_vacuum = blkno;
+
+ /* Report that we are once again scanning the heap */
+ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
+ PROGRESS_VACUUM_PHASE_SCAN_HEAP);
+ }
+
+ /*
+ * Set up visibility map page as needed.
+ *
+ * Pin the visibility map page in case we need to mark the page
+ * all-visible. In most cases this will be very cheap, because we'll
+ * already have the correct page pinned anyway. However, it's
+ * possible that (a) next_unskippable_block is covered by a different
+ * VM page than the current block or (b) we released our pin and did a
+ * cycle of index vacuuming.
+ */
+ visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
+
+ buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno,
+ RBM_NORMAL, vacrel->bstrategy);
+
+ /*
+ * We need buffer cleanup lock so that we can prune HOT chains and
+ * defragment the page.
+ */
+ if (!ConditionalLockBufferForCleanup(buf))
+ {
+ bool hastup;
+
+ /*
+ * If we're not performing an aggressive scan to guard against XID
+ * wraparound, and we don't want to forcibly check the page, then
+ * it's OK to skip vacuuming pages we get a lock conflict on. They
+ * will be dealt with in some future vacuum.
+ */
+ if (!aggressive && !FORCE_CHECK_PAGE())
+ {
+ ReleaseBuffer(buf);
+ vacrel->pinskipped_pages++;
+ continue;
+ }
+
+ /*
+ * Read the page with share lock to see if any xids on it need to
+ * be frozen. If not we just skip the page, after updating our
+ * scan statistics. If there are some, we wait for cleanup lock.
+ *
+ * We could defer the lock request further by remembering the page
+ * and coming back to it later, or we could even register
+ * ourselves for multiple buffers and then service whichever one
+ * is received first. For now, this seems good enough.
+ *
+ * If we get here with aggressive false, then we're just forcibly
+ * checking the page, and so we don't want to insist on getting
+ * the lock; we only need to know if the page contains tuples, so
+ * that we can update nonempty_pages correctly. It's convenient
+ * to use lazy_check_needs_freeze() for both situations, though.
+ */
+ LockBuffer(buf, BUFFER_LOCK_SHARE);
+ if (!lazy_check_needs_freeze(buf, &hastup, vacrel))
+ {
+ UnlockReleaseBuffer(buf);
+ vacrel->scanned_pages++;
+ vacrel->pinskipped_pages++;
+ if (hastup)
+ vacrel->nonempty_pages = blkno + 1;
+ continue;
+ }
+ if (!aggressive)
+ {
+ /*
+ * Here, we must not advance scanned_pages; that would amount
+ * to claiming that the page contains no freezable tuples.
+ */
+ UnlockReleaseBuffer(buf);
+ vacrel->pinskipped_pages++;
+ if (hastup)
+ vacrel->nonempty_pages = blkno + 1;
+ continue;
+ }
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+ LockBufferForCleanup(buf);
+ /* drop through to normal processing */
+ }
+
+ /*
+ * By here we definitely have enough dead_tuples space for whatever
+ * LP_DEAD tids are on this page, we have the visibility map page set
+ * up in case we need to set this page's all_visible/all_frozen bit,
+ * and we have a super-exclusive lock. Any tuples on this page are
+ * now sure to be "counted" by this VACUUM.
+ *
+ * One last piece of preamble needs to take place before we can prune:
+ * we need to consider new and empty pages.
+ */
+ vacrel->scanned_pages++;
+ vacrel->tupcount_pages++;
+
+ page = BufferGetPage(buf);
+
+ if (PageIsNew(page))
+ {
+ /*
+ * All-zeroes pages can be left over if either a backend extends
+ * the relation by a single page, but crashes before the newly
+ * initialized page has been written out, or when bulk-extending
+ * the relation (which creates a number of empty pages at the tail
+ * end of the relation, but enters them into the FSM).
+ *
+ * Note we do not enter the page into the visibilitymap. That has
+ * the downside that we repeatedly visit this page in subsequent
+ * vacuums, but otherwise we'll never not discover the space on a
+ * promoted standby. The harm of repeated checking ought to
+ * normally not be too bad - the space usually should be used at
+ * some point, otherwise there wouldn't be any regular vacuums.
+ *
+ * Make sure these pages are in the FSM, to ensure they can be
+ * reused. Do that by testing if there's any space recorded for
+ * the page. If not, enter it. We do so after releasing the lock
+ * on the heap page, the FSM is approximate, after all.
+ */
+ UnlockReleaseBuffer(buf);
+
+ if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
+ {
+ Size freespace = BLCKSZ - SizeOfPageHeaderData;
+
+ RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
+ }
+ continue;
+ }
+
+ if (PageIsEmpty(page))
+ {
+ Size freespace = PageGetHeapFreeSpace(page);
+
+ /*
+ * Empty pages are always all-visible and all-frozen (note that
+ * the same is currently not true for new pages, see above).
+ */
+ if (!PageIsAllVisible(page))
+ {
+ START_CRIT_SECTION();
+
+ /* mark buffer dirty before writing a WAL record */
+ MarkBufferDirty(buf);
+
+ /*
+ * It's possible that another backend has extended the heap,
+ * initialized the page, and then failed to WAL-log the page
+ * due to an ERROR. Since heap extension is not WAL-logged,
+ * recovery might try to replay our record setting the page
+ * all-visible and find that the page isn't initialized, which
+ * will cause a PANIC. To prevent that, check whether the
+ * page has been previously WAL-logged, and if not, do that
+ * now.
+ */
+ if (RelationNeedsWAL(vacrel->rel) &&
+ PageGetLSN(page) == InvalidXLogRecPtr)
+ log_newpage_buffer(buf, true);
+
+ PageSetAllVisible(page);
+ visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
+ vmbuffer, InvalidTransactionId,
+ VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
+ END_CRIT_SECTION();
+ }
+
+ UnlockReleaseBuffer(buf);
+ RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
+ continue;
+ }
+
+ /*
+ * Prune and freeze tuples.
+ *
+ * Accumulates details of remaining LP_DEAD line pointers on page in
+ * dead tuple list. This includes LP_DEAD line pointers that we
+ * pruned ourselves, as well as existing LP_DEAD line pointers that
+ * were pruned some time earlier. Also considers freezing XIDs in the
+ * tuple headers of remaining items with storage.
+ */
+ lazy_scan_prune(vacrel, buf, blkno, page, vistest, &prunestate);
+
+ Assert(!prunestate.all_visible || !prunestate.has_lpdead_items);
+
+ /* Remember the location of the last page with nonremovable tuples */
+ if (prunestate.hastup)
+ vacrel->nonempty_pages = blkno + 1;
+
+ if (vacrel->nindexes == 0)
+ {
+ /*
+ * Consider the need to do page-at-a-time heap vacuuming when
+ * using the one-pass strategy now.
+ *
+ * The one-pass strategy will never call lazy_vacuum(). The steps
+ * performed here can be thought of as the one-pass equivalent of
+ * a call to lazy_vacuum().
+ */
+ if (prunestate.has_lpdead_items)
+ {
+ Size freespace;
+
+ lazy_vacuum_heap_page(vacrel, blkno, buf, 0, &vmbuffer);
+
+ /* Forget the now-vacuumed tuples */
+ dead_tuples->num_tuples = 0;
+
+ /*
+ * Periodically perform FSM vacuuming to make newly-freed
+ * space visible on upper FSM pages. Note we have not yet
+ * performed FSM processing for blkno.
+ */
+ if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
+ {
+ FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
+ blkno);
+ next_fsm_block_to_vacuum = blkno;
+ }
+
+ /*
+ * Now perform FSM processing for blkno, and move on to next
+ * page.
+ *
+ * Our call to lazy_vacuum_heap_page() will have considered if
+ * it's possible to set all_visible/all_frozen independently
+ * of lazy_scan_prune(). Note that prunestate was invalidated
+ * by lazy_vacuum_heap_page() call.
+ */
+ freespace = PageGetHeapFreeSpace(page);
+
+ UnlockReleaseBuffer(buf);
+ RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
+ continue;
+ }
+
+ /*
+ * There was no call to lazy_vacuum_heap_page() because pruning
+ * didn't encounter/create any LP_DEAD items that needed to be
+ * vacuumed. Prune state has not been invalidated, so proceed
+ * with prunestate-driven visibility map and FSM steps (just like
+ * the two-pass strategy).
+ */
+ Assert(dead_tuples->num_tuples == 0);
+ }
+
+ /*
+ * Handle setting visibility map bit based on what the VM said about
+ * the page before pruning started, and using prunestate
+ */
+ if (!all_visible_according_to_vm && prunestate.all_visible)
+ {
+ uint8 flags = VISIBILITYMAP_ALL_VISIBLE;
+
+ if (prunestate.all_frozen)
+ flags |= VISIBILITYMAP_ALL_FROZEN;
+
+ /*
+ * It should never be the case that the visibility map page is set
+ * while the page-level bit is clear, but the reverse is allowed
+ * (if checksums are not enabled). Regardless, set both bits so
+ * that we get back in sync.
+ *
+ * NB: If the heap page is all-visible but the VM bit is not set,
+ * we don't need to dirty the heap page. However, if checksums
+ * are enabled, we do need to make sure that the heap page is
+ * dirtied before passing it to visibilitymap_set(), because it
+ * may be logged. Given that this situation should only happen in
+ * rare cases after a crash, it is not worth optimizing.
+ */
+ PageSetAllVisible(page);
+ MarkBufferDirty(buf);
+ visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
+ vmbuffer, prunestate.visibility_cutoff_xid,
+ flags);
+ }
+
+ /*
+ * As of PostgreSQL 9.2, the visibility map bit should never be set if
+ * the page-level bit is clear. However, it's possible that the bit
+ * got cleared after we checked it and before we took the buffer
+ * content lock, so we must recheck before jumping to the conclusion
+ * that something bad has happened.
+ */
+ else if (all_visible_according_to_vm && !PageIsAllVisible(page)
+ && VM_ALL_VISIBLE(vacrel->rel, blkno, &vmbuffer))
+ {
+ elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
+ vacrel->relname, blkno);
+ visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
+ VISIBILITYMAP_VALID_BITS);
+ }
+
+ /*
+ * It's possible for the value returned by
+ * GetOldestNonRemovableTransactionId() to move backwards, so it's not
+ * wrong for us to see tuples that appear to not be visible to
+ * everyone yet, while PD_ALL_VISIBLE is already set. The real safe
+ * xmin value never moves backwards, but
+ * GetOldestNonRemovableTransactionId() is conservative and sometimes
+ * returns a value that's unnecessarily small, so if we see that
+ * contradiction it just means that the tuples that we think are not
+ * visible to everyone yet actually are, and the PD_ALL_VISIBLE flag
+ * is correct.
+ *
+ * There should never be dead tuples on a page with PD_ALL_VISIBLE
+ * set, however.
+ */
+ else if (prunestate.has_lpdead_items && PageIsAllVisible(page))
+ {
+ elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
+ vacrel->relname, blkno);
+ PageClearAllVisible(page);
+ MarkBufferDirty(buf);
+ visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
+ VISIBILITYMAP_VALID_BITS);
+ }
+
+ /*
+ * If the all-visible page is all-frozen but not marked as such yet,
+ * mark it as all-frozen. Note that all_frozen is only valid if
+ * all_visible is true, so we must check both.
+ */
+ else if (all_visible_according_to_vm && prunestate.all_visible &&
+ prunestate.all_frozen &&
+ !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
+ {
+ /*
+ * We can pass InvalidTransactionId as the cutoff XID here,
+ * because setting the all-frozen bit doesn't cause recovery
+ * conflicts.
+ */
+ visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
+ vmbuffer, InvalidTransactionId,
+ VISIBILITYMAP_ALL_FROZEN);
+ }
+
+ /*
+ * Final steps for block: drop super-exclusive lock, record free space
+ * in the FSM
+ */
+ if (prunestate.has_lpdead_items && vacrel->do_index_vacuuming)
+ {
+ /*
+ * Wait until lazy_vacuum_heap_rel() to save free space. This
+ * doesn't just save us some cycles; it also allows us to record
+ * any additional free space that lazy_vacuum_heap_page() will
+ * make available in cases where it's possible to truncate the
+ * page's line pointer array.
+ *
+ * Note: It's not in fact 100% certain that we really will call
+ * lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip
+ * index vacuuming (and so must skip heap vacuuming). This is
+ * deemed okay because it only happens in emergencies, or when
+ * there is very little free space anyway. (Besides, we start
+ * recording free space in the FSM once index vacuuming has been
+ * abandoned.)
+ *
+ * Note: The one-pass (no indexes) case is only supposed to make
+ * it this far when there were no LP_DEAD items during pruning.
+ */
+ Assert(vacrel->nindexes > 0);
+ UnlockReleaseBuffer(buf);
+ }
+ else
+ {
+ Size freespace = PageGetHeapFreeSpace(page);
+
+ UnlockReleaseBuffer(buf);
+ RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
+ }
+ }
+
+ /* report that everything is now scanned */
+ pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
+
+ /* Clear the block number information */
+ vacrel->blkno = InvalidBlockNumber;
+
+ /* now we can compute the new value for pg_class.reltuples */
+ vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, nblocks,
+ vacrel->tupcount_pages,
+ vacrel->live_tuples);
+
+ /*
+ * Also compute the total number of surviving heap entries. In the
+ * (unlikely) scenario that new_live_tuples is -1, take it as zero.
+ */
+ vacrel->new_rel_tuples =
+ Max(vacrel->new_live_tuples, 0) + vacrel->new_dead_tuples;
+
+ /*
+ * Release any remaining pin on visibility map page.
+ */
+ if (BufferIsValid(vmbuffer))
+ {
+ ReleaseBuffer(vmbuffer);
+ vmbuffer = InvalidBuffer;
+ }
+
+ /* If any tuples need to be deleted, perform final vacuum cycle */
+ if (dead_tuples->num_tuples > 0)
+ lazy_vacuum(vacrel);
+
+ /*
+ * Vacuum the remainder of the Free Space Map. We must do this whether or
+ * not there were indexes, and whether or not we bypassed index vacuuming.
+ */
+ if (blkno > next_fsm_block_to_vacuum)
+ FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, blkno);
+
+ /* report all blocks vacuumed */
+ pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
+
+ /* Do post-vacuum cleanup */
+ if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
+ lazy_cleanup_all_indexes(vacrel);
+
+ /*
+ * Free resources managed by lazy_space_alloc(). (We must end parallel
+ * mode/free shared memory before updating index statistics. We cannot
+ * write while in parallel mode.)
+ */
+ lazy_space_free(vacrel);
+
+ /* Update index statistics */
+ if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
+ update_index_statistics(vacrel);
+
+ /*
+ * When the table has no indexes (i.e. in the one-pass strategy case),
+ * make log report that lazy_vacuum_heap_rel would've made had there been
+ * indexes. (As in the two-pass strategy case, only make this report when
+ * there were LP_DEAD line pointers vacuumed in lazy_vacuum_heap_page.)
+ */
+ if (vacrel->nindexes == 0 && vacrel->lpdead_item_pages > 0)
+ ereport(elevel,
+ (errmsg("table \"%s\": removed %lld dead item identifiers in %u pages",
+ vacrel->relname, (long long) vacrel->lpdead_items,
+ vacrel->lpdead_item_pages)));
+
+ /*
+ * Make a log report summarizing pruning and freezing.
+ *
+ * The autovacuum specific logging in heap_vacuum_rel summarizes an entire
+ * VACUUM operation, whereas each VACUUM VERBOSE log report generally
+ * summarizes a single round of index/heap vacuuming (or rel truncation).
+ * It wouldn't make sense to report on pruning or freezing while following
+ * that convention, though. You can think of this log report as a summary
+ * of our first pass over the heap.
+ */
+ initStringInfo(&buf);
+ appendStringInfo(&buf,
+ _("%lld dead row versions cannot be removed yet, oldest xmin: %u\n"),
+ (long long) vacrel->new_dead_tuples, vacrel->OldestXmin);
+ appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
+ "Skipped %u pages due to buffer pins, ",
+ vacrel->pinskipped_pages),
+ vacrel->pinskipped_pages);
+ appendStringInfo(&buf, ngettext("%u frozen page.\n",
+ "%u frozen pages.\n",
+ vacrel->frozenskipped_pages),
+ vacrel->frozenskipped_pages);
+ appendStringInfo(&buf, _("%s."), pg_rusage_show(&ru0));
+
+ ereport(elevel,
+ (errmsg("table \"%s\": found %lld removable, %lld nonremovable row versions in %u out of %u pages",
+ vacrel->relname,
+ (long long) vacrel->tuples_deleted,
+ (long long) vacrel->num_tuples, vacrel->scanned_pages,
+ nblocks),
+ errdetail_internal("%s", buf.data)));
+ pfree(buf.data);
+}
+
+/*
+ * lazy_scan_prune() -- lazy_scan_heap() pruning and freezing.
+ *
+ * Caller must hold pin and buffer cleanup lock on the buffer.
+ *
+ * Prior to PostgreSQL 14 there were very rare cases where heap_page_prune()
+ * was allowed to disagree with our HeapTupleSatisfiesVacuum() call about
+ * whether or not a tuple should be considered DEAD. This happened when an
+ * inserting transaction concurrently aborted (after our heap_page_prune()
+ * call, before our HeapTupleSatisfiesVacuum() call). There was rather a lot
+ * of complexity just so we could deal with tuples that were DEAD to VACUUM,
+ * but nevertheless were left with storage after pruning.
+ *
+ * The approach we take now is to restart pruning when the race condition is
+ * detected. This allows heap_page_prune() to prune the tuples inserted by
+ * the now-aborted transaction. This is a little crude, but it guarantees
+ * that any items that make it into the dead_tuples array are simple LP_DEAD
+ * line pointers, and that every remaining item with tuple storage is
+ * considered as a candidate for freezing.
+ */
+static void
+lazy_scan_prune(LVRelState *vacrel,
+ Buffer buf,
+ BlockNumber blkno,
+ Page page,
+ GlobalVisState *vistest,
+ LVPagePruneState *prunestate)
+{
+ Relation rel = vacrel->rel;
+ OffsetNumber offnum,
+ maxoff;
+ ItemId itemid;
+ HeapTupleData tuple;
+ HTSV_Result res;
+ int tuples_deleted,
+ lpdead_items,
+ new_dead_tuples,
+ num_tuples,
+ live_tuples;
+ int nfrozen;
+ OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
+ xl_heap_freeze_tuple frozen[MaxHeapTuplesPerPage];
+
+ maxoff = PageGetMaxOffsetNumber(page);
+
+retry:
+
+ /* Initialize (or reset) page-level counters */
+ tuples_deleted = 0;
+ lpdead_items = 0;
+ new_dead_tuples = 0;
+ num_tuples = 0;
+ live_tuples = 0;
+
+ /*
+ * Prune all HOT-update chains in this page.
+ *
+ * We count tuples removed by the pruning step as tuples_deleted. Its
+ * final value can be thought of as the number of tuples that have been
+ * deleted from the table. It should not be confused with lpdead_items;
+ * lpdead_items's final value can be thought of as the number of tuples
+ * that were deleted from indexes.
+ */
+ tuples_deleted = heap_page_prune(rel, buf, vistest,
+ InvalidTransactionId, 0, false,
+ &vacrel->offnum);
+
+ /*
+ * Now scan the page to collect LP_DEAD items and check for tuples
+ * requiring freezing among remaining tuples with storage
+ */
+ prunestate->hastup = false;
+ prunestate->has_lpdead_items = false;
+ prunestate->all_visible = true;
+ prunestate->all_frozen = true;
+ prunestate->visibility_cutoff_xid = InvalidTransactionId;
+ nfrozen = 0;
+
+ for (offnum = FirstOffsetNumber;
+ offnum <= maxoff;
+ offnum = OffsetNumberNext(offnum))
+ {
+ bool tuple_totally_frozen;
+
+ /*
+ * Set the offset number so that we can display it along with any
+ * error that occurred while processing this tuple.
+ */
+ vacrel->offnum = offnum;
+ itemid = PageGetItemId(page, offnum);
+
+ if (!ItemIdIsUsed(itemid))
+ continue;
+
+ /* Redirect items mustn't be touched */
+ if (ItemIdIsRedirected(itemid))
+ {
+ prunestate->hastup = true; /* page won't be truncatable */
+ continue;
+ }
+
+ /*
+ * LP_DEAD items are processed outside of the loop.
+ *
+ * Note that we deliberately don't set hastup=true in the case of an
+ * LP_DEAD item here, which is not how lazy_check_needs_freeze() or
+ * count_nondeletable_pages() do it -- they only consider pages empty
+ * when they only have LP_UNUSED items, which is important for
+ * correctness.
+ *
+ * Our assumption is that any LP_DEAD items we encounter here will
+ * become LP_UNUSED inside lazy_vacuum_heap_page() before we actually
+ * call count_nondeletable_pages(). In any case our opinion of
+ * whether or not a page 'hastup' (which is how our caller sets its
+ * vacrel->nonempty_pages value) is inherently race-prone. It must be
+ * treated as advisory/unreliable, so we might as well be slightly
+ * optimistic.
+ */
+ if (ItemIdIsDead(itemid))
+ {
+ deadoffsets[lpdead_items++] = offnum;
+ prunestate->all_visible = false;
+ prunestate->has_lpdead_items = true;
+ continue;
+ }
+
+ Assert(ItemIdIsNormal(itemid));
+
+ ItemPointerSet(&(tuple.t_self), blkno, offnum);
+ tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
+ tuple.t_len = ItemIdGetLength(itemid);
+ tuple.t_tableOid = RelationGetRelid(rel);
+
+ /*
+ * DEAD tuples are almost always pruned into LP_DEAD line pointers by
+ * heap_page_prune(), but it's possible that the tuple state changed
+ * since heap_page_prune() looked. Handle that here by restarting.
+ * (See comments at the top of function for a full explanation.)
+ */
+ res = HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf);
+
+ if (unlikely(res == HEAPTUPLE_DEAD))
+ goto retry;
+
+ /*
+ * The criteria for counting a tuple as live in this block need to
+ * match what analyze.c's acquire_sample_rows() does, otherwise VACUUM
+ * and ANALYZE may produce wildly different reltuples values, e.g.
+ * when there are many recently-dead tuples.
+ *
+ * The logic here is a bit simpler than acquire_sample_rows(), as
+ * VACUUM can't run inside a transaction block, which makes some cases
+ * impossible (e.g. in-progress insert from the same transaction).
+ *
+ * We treat LP_DEAD items a little differently, too -- we don't count
+ * them as dead_tuples at all (we only consider new_dead_tuples). The
+ * outcome is no different because we assume that any LP_DEAD items we
+ * encounter here will become LP_UNUSED inside lazy_vacuum_heap_page()
+ * before we report anything to the stats collector. (Cases where we
+ * bypass index vacuuming will violate our assumption, but the overall
+ * impact of that should be negligible.)
+ */
+ switch (res)
+ {
+ case HEAPTUPLE_LIVE:
+
+ /*
+ * Count it as live. Not only is this natural, but it's also
+ * what acquire_sample_rows() does.
+ */
+ live_tuples++;
+
+ /*
+ * Is the tuple definitely visible to all transactions?
+ *
+ * NB: Like with per-tuple hint bits, we can't set the
+ * PD_ALL_VISIBLE flag if the inserter committed
+ * asynchronously. See SetHintBits for more info. Check that
+ * the tuple is hinted xmin-committed because of that.
+ */
+ if (prunestate->all_visible)
+ {
+ TransactionId xmin;
+
+ if (!HeapTupleHeaderXminCommitted(tuple.t_data))
+ {
+ prunestate->all_visible = false;
+ break;
+ }
+
+ /*
+ * The inserter definitely committed. But is it old enough
+ * that everyone sees it as committed?
+ */
+ xmin = HeapTupleHeaderGetXmin(tuple.t_data);
+ if (!TransactionIdPrecedes(xmin, vacrel->OldestXmin))
+ {
+ prunestate->all_visible = false;
+ break;
+ }
+
+ /* Track newest xmin on page. */
+ if (TransactionIdFollows(xmin, prunestate->visibility_cutoff_xid))
+ prunestate->visibility_cutoff_xid = xmin;
+ }
+ break;
+ case HEAPTUPLE_RECENTLY_DEAD:
+
+ /*
+ * If tuple is recently deleted then we must not remove it
+ * from relation. (We only remove items that are LP_DEAD from
+ * pruning.)
+ */
+ new_dead_tuples++;
+ prunestate->all_visible = false;
+ break;
+ case HEAPTUPLE_INSERT_IN_PROGRESS:
+
+ /*
+ * We do not count these rows as live, because we expect the
+ * inserting transaction to update the counters at commit, and
+ * we assume that will happen only after we report our
+ * results. This assumption is a bit shaky, but it is what
+ * acquire_sample_rows() does, so be consistent.
+ */
+ prunestate->all_visible = false;
+ break;
+ case HEAPTUPLE_DELETE_IN_PROGRESS:
+ /* This is an expected case during concurrent vacuum */
+ prunestate->all_visible = false;
+
+ /*
+ * Count such rows as live. As above, we assume the deleting
+ * transaction will commit and update the counters after we
+ * report.
+ */
+ live_tuples++;
+ break;
+ default:
+ elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
+ break;
+ }
+
+ /*
+ * Non-removable tuple (i.e. tuple with storage).
+ *
+ * Check tuple left behind after pruning to see if needs to be frozen
+ * now.
+ */
+ num_tuples++;
+ prunestate->hastup = true;
+ if (heap_prepare_freeze_tuple(tuple.t_data,
+ vacrel->relfrozenxid,
+ vacrel->relminmxid,
+ vacrel->FreezeLimit,
+ vacrel->MultiXactCutoff,
+ &frozen[nfrozen],
+ &tuple_totally_frozen))
+ {
+ /* Will execute freeze below */
+ frozen[nfrozen++].offset = offnum;
+ }
+
+ /*
+ * If tuple is not frozen (and not about to become frozen) then caller
+ * had better not go on to set this page's VM bit
+ */
+ if (!tuple_totally_frozen)
+ prunestate->all_frozen = false;
+ }
+
+ /*
+ * We have now divided every item on the page into either an LP_DEAD item
+ * that will need to be vacuumed in indexes later, or a LP_NORMAL tuple
+ * that remains and needs to be considered for freezing now (LP_UNUSED and
+ * LP_REDIRECT items also remain, but are of no further interest to us).
+ */
+ vacrel->offnum = InvalidOffsetNumber;
+
+ /*
+ * Consider the need to freeze any items with tuple storage from the page
+ * first (arbitrary)
+ */
+ if (nfrozen > 0)
+ {
+ Assert(prunestate->hastup);
+
+ /*
+ * At least one tuple with storage needs to be frozen -- execute that
+ * now.
+ *
+ * If we need to freeze any tuples we'll mark the buffer dirty, and
+ * write a WAL record recording the changes. We must log the changes
+ * to be crash-safe against future truncation of CLOG.
+ */
+ START_CRIT_SECTION();
+
+ MarkBufferDirty(buf);
+
+ /* execute collected freezes */
+ for (int i = 0; i < nfrozen; i++)
+ {
+ HeapTupleHeader htup;
+
+ itemid = PageGetItemId(page, frozen[i].offset);
+ htup = (HeapTupleHeader) PageGetItem(page, itemid);
+
+ heap_execute_freeze_tuple(htup, &frozen[i]);
+ }
+
+ /* Now WAL-log freezing if necessary */
+ if (RelationNeedsWAL(vacrel->rel))
+ {
+ XLogRecPtr recptr;
+
+ recptr = log_heap_freeze(vacrel->rel, buf, vacrel->FreezeLimit,
+ frozen, nfrozen);
+ PageSetLSN(page, recptr);
+ }
+
+ END_CRIT_SECTION();
+ }
+
+ /*
+ * The second pass over the heap can also set visibility map bits, using
+ * the same approach. This is important when the table frequently has a
+ * few old LP_DEAD items on each page by the time we get to it (typically
+ * because past opportunistic pruning operations freed some non-HOT
+ * tuples).
+ *
+ * VACUUM will call heap_page_is_all_visible() during the second pass over
+ * the heap to determine all_visible and all_frozen for the page -- this
+ * is a specialized version of the logic from this function. Now that
+ * we've finished pruning and freezing, make sure that we're in total
+ * agreement with heap_page_is_all_visible() using an assertion.
+ */
+#ifdef USE_ASSERT_CHECKING
+ /* Note that all_frozen value does not matter when !all_visible */
+ if (prunestate->all_visible)
+ {
+ TransactionId cutoff;
+ bool all_frozen;
+
+ if (!heap_page_is_all_visible(vacrel, buf, &cutoff, &all_frozen))
+ Assert(false);
+
+ Assert(lpdead_items == 0);
+ Assert(prunestate->all_frozen == all_frozen);
+
+ /*
+ * It's possible that we froze tuples and made the page's XID cutoff
+ * (for recovery conflict purposes) FrozenTransactionId. This is okay
+ * because visibility_cutoff_xid will be logged by our caller in a
+ * moment.
+ */
+ Assert(cutoff == FrozenTransactionId ||
+ cutoff == prunestate->visibility_cutoff_xid);
+ }
+#endif
+
+ /*
+ * Now save details of the LP_DEAD items from the page in the dead_tuples
+ * array. Also record that page has dead items in per-page prunestate.
+ */
+ if (lpdead_items > 0)
+ {
+ LVDeadTuples *dead_tuples = vacrel->dead_tuples;
+ ItemPointerData tmp;
+
+ Assert(!prunestate->all_visible);
+ Assert(prunestate->has_lpdead_items);
+
+ vacrel->lpdead_item_pages++;
+
+ ItemPointerSetBlockNumber(&tmp, blkno);
+
+ for (int i = 0; i < lpdead_items; i++)
+ {
+ ItemPointerSetOffsetNumber(&tmp, deadoffsets[i]);
+ dead_tuples->itemptrs[dead_tuples->num_tuples++] = tmp;
+ }
+
+ Assert(dead_tuples->num_tuples <= dead_tuples->max_tuples);
+ pgstat_progress_update_param(PROGRESS_VACUUM_NUM_DEAD_TUPLES,
+ dead_tuples->num_tuples);
+ }
+
+ /* Finally, add page-local counts to whole-VACUUM counts */
+ vacrel->tuples_deleted += tuples_deleted;
+ vacrel->lpdead_items += lpdead_items;
+ vacrel->new_dead_tuples += new_dead_tuples;
+ vacrel->num_tuples += num_tuples;
+ vacrel->live_tuples += live_tuples;
+}
+
+/*
+ * Remove the collected garbage tuples from the table and its indexes.
+ *
+ * We may choose to bypass index vacuuming at this point, though only when the
+ * ongoing VACUUM operation will definitely only have one index scan/round of
+ * index vacuuming. Caller indicates whether or not this is such a VACUUM
+ * operation using 'onecall' argument.
+ *
+ * In rare emergencies, the ongoing VACUUM operation can be made to skip both
+ * index vacuuming and index cleanup at the point we're called. This avoids
+ * having the whole system refuse to allocate further XIDs/MultiXactIds due to
+ * wraparound.
+ */
+static void
+lazy_vacuum(LVRelState *vacrel)
+{
+ bool bypass;
+
+ /* Should not end up here with no indexes */
+ Assert(vacrel->nindexes > 0);
+ Assert(!IsParallelWorker());
+ Assert(vacrel->lpdead_item_pages > 0);
+
+ if (!vacrel->do_index_vacuuming)
+ {
+ Assert(!vacrel->do_index_cleanup);
+ vacrel->dead_tuples->num_tuples = 0;
+ return;
+ }
+
+ /*
+ * Consider bypassing index vacuuming (and heap vacuuming) entirely.
+ *
+ * We currently only do this in cases where the number of LP_DEAD items
+ * for the entire VACUUM operation is close to zero. This avoids sharp
+ * discontinuities in the duration and overhead of successive VACUUM
+ * operations that run against the same table with a fixed workload.
+ * Ideally, successive VACUUM operations will behave as if there are
+ * exactly zero LP_DEAD items in cases where there are close to zero.
+ *
+ * This is likely to be helpful with a table that is continually affected
+ * by UPDATEs that can mostly apply the HOT optimization, but occasionally
+ * have small aberrations that lead to just a few heap pages retaining
+ * only one or two LP_DEAD items. This is pretty common; even when the
+ * DBA goes out of their way to make UPDATEs use HOT, it is practically
+ * impossible to predict whether HOT will be applied in 100% of cases.
+ * It's far easier to ensure that 99%+ of all UPDATEs against a table use
+ * HOT through careful tuning.
+ */
+ bypass = false;
+ if (vacrel->consider_bypass_optimization && vacrel->rel_pages > 0)
+ {
+ BlockNumber threshold;
+
+ Assert(vacrel->num_index_scans == 0);
+ Assert(vacrel->lpdead_items == vacrel->dead_tuples->num_tuples);
+ Assert(vacrel->do_index_vacuuming);
+ Assert(vacrel->do_index_cleanup);
+
+ /*
+ * This crossover point at which we'll start to do index vacuuming is
+ * expressed as a percentage of the total number of heap pages in the
+ * table that are known to have at least one LP_DEAD item. This is
+ * much more important than the total number of LP_DEAD items, since
+ * it's a proxy for the number of heap pages whose visibility map bits
+ * cannot be set on account of bypassing index and heap vacuuming.
+ *
+ * We apply one further precautionary test: the space currently used
+ * to store the TIDs (TIDs that now all point to LP_DEAD items) must
+ * not exceed 32MB. This limits the risk that we will bypass index
+ * vacuuming again and again until eventually there is a VACUUM whose
+ * dead_tuples space is not CPU cache resident.
+ *
+ * We don't take any special steps to remember the LP_DEAD items (such
+ * as counting them in new_dead_tuples report to the stats collector)
+ * when the optimization is applied. Though the accounting used in
+ * analyze.c's acquire_sample_rows() will recognize the same LP_DEAD
+ * items as dead rows in its own stats collector report, that's okay.
+ * The discrepancy should be negligible. If this optimization is ever
+ * expanded to cover more cases then this may need to be reconsidered.
+ */
+ threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
+ bypass = (vacrel->lpdead_item_pages < threshold &&
+ vacrel->lpdead_items < MAXDEADTUPLES(32L * 1024L * 1024L));
+ }
+
+ if (bypass)
+ {
+ /*
+ * There are almost zero TIDs. Behave as if there were precisely
+ * zero: bypass index vacuuming, but do index cleanup.
+ *
+ * We expect that the ongoing VACUUM operation will finish very
+ * quickly, so there is no point in considering speeding up as a
+ * failsafe against wraparound failure. (Index cleanup is expected to
+ * finish very quickly in cases where there were no ambulkdelete()
+ * calls.)
+ */
+ vacrel->do_index_vacuuming = false;
+ ereport(elevel,
+ (errmsg("table \"%s\": index scan bypassed: %u pages from table (%.2f%% of total) have %lld dead item identifiers",
+ vacrel->relname, vacrel->lpdead_item_pages,
+ 100.0 * vacrel->lpdead_item_pages / vacrel->rel_pages,
+ (long long) vacrel->lpdead_items)));
+ }
+ else if (lazy_vacuum_all_indexes(vacrel))
+ {
+ /*
+ * We successfully completed a round of index vacuuming. Do related
+ * heap vacuuming now.
+ */
+ lazy_vacuum_heap_rel(vacrel);
+ }
+ else
+ {
+ /*
+ * Failsafe case.
+ *
+ * we attempted index vacuuming, but didn't finish a full round/full
+ * index scan. This happens when relfrozenxid or relminmxid is too
+ * far in the past.
+ *
+ * From this point on the VACUUM operation will do no further index
+ * vacuuming or heap vacuuming. This VACUUM operation won't end up
+ * back here again.
+ */
+ Assert(vacrel->failsafe_active);
+ }
+
+ /*
+ * Forget the LP_DEAD items that we just vacuumed (or just decided to not
+ * vacuum)
+ */
+ vacrel->dead_tuples->num_tuples = 0;
+}
+
+/*
+ * lazy_vacuum_all_indexes() -- Main entry for index vacuuming
+ *
+ * Returns true in the common case when all indexes were successfully
+ * vacuumed. Returns false in rare cases where we determined that the ongoing
+ * VACUUM operation is at risk of taking too long to finish, leading to
+ * wraparound failure.
+ */
+static bool
+lazy_vacuum_all_indexes(LVRelState *vacrel)
+{
+ bool allindexes = true;
+
+ Assert(!IsParallelWorker());
+ Assert(vacrel->nindexes > 0);
+ Assert(vacrel->do_index_vacuuming);
+ Assert(vacrel->do_index_cleanup);
+ Assert(TransactionIdIsNormal(vacrel->relfrozenxid));
+ Assert(MultiXactIdIsValid(vacrel->relminmxid));
+
+ /* Precheck for XID wraparound emergencies */
+ if (lazy_check_wraparound_failsafe(vacrel))
+ {
+ /* Wraparound emergency -- don't even start an index scan */
+ return false;
+ }
+
+ /* Report that we are now vacuuming indexes */
+ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
+ PROGRESS_VACUUM_PHASE_VACUUM_INDEX);
+
+ if (!ParallelVacuumIsActive(vacrel))
+ {
+ for (int idx = 0; idx < vacrel->nindexes; idx++)
+ {
+ Relation indrel = vacrel->indrels[idx];
+ IndexBulkDeleteResult *istat = vacrel->indstats[idx];
+
+ vacrel->indstats[idx] =
+ lazy_vacuum_one_index(indrel, istat, vacrel->old_live_tuples,
+ vacrel);
+
+ if (lazy_check_wraparound_failsafe(vacrel))
+ {
+ /* Wraparound emergency -- end current index scan */
+ allindexes = false;
+ break;
+ }
+ }
+ }
+ else
+ {
+ /* Outsource everything to parallel variant */
+ do_parallel_lazy_vacuum_all_indexes(vacrel);
+
+ /*
+ * Do a postcheck to consider applying wraparound failsafe now. Note
+ * that parallel VACUUM only gets the precheck and this postcheck.
+ */
+ if (lazy_check_wraparound_failsafe(vacrel))
+ allindexes = false;
+ }
+
+ /*
+ * We delete all LP_DEAD items from the first heap pass in all indexes on
+ * each call here (except calls where we choose to do the failsafe). This
+ * makes the next call to lazy_vacuum_heap_rel() safe (except in the event
+ * of the failsafe triggering, which prevents the next call from taking
+ * place).
+ */
+ Assert(vacrel->num_index_scans > 0 ||
+ vacrel->dead_tuples->num_tuples == vacrel->lpdead_items);
+ Assert(allindexes || vacrel->failsafe_active);
+
+ /*
+ * Increase and report the number of index scans.
+ *
+ * We deliberately include the case where we started a round of bulk
+ * deletes that we weren't able to finish due to the failsafe triggering.
+ */
+ vacrel->num_index_scans++;
+ pgstat_progress_update_param(PROGRESS_VACUUM_NUM_INDEX_VACUUMS,
+ vacrel->num_index_scans);
+
+ return allindexes;
+}
+
+/*
+ * lazy_vacuum_heap_rel() -- second pass over the heap for two pass strategy
+ *
+ * This routine marks LP_DEAD items in vacrel->dead_tuples array as LP_UNUSED.
+ * Pages that never had lazy_scan_prune record LP_DEAD items are not visited
+ * at all.
+ *
+ * We may also be able to truncate the line pointer array of the heap pages we
+ * visit. If there is a contiguous group of LP_UNUSED items at the end of the
+ * array, it can be reclaimed as free space. These LP_UNUSED items usually
+ * start out as LP_DEAD items recorded by lazy_scan_prune (we set items from
+ * each page to LP_UNUSED, and then consider if it's possible to truncate the
+ * page's line pointer array).
+ *
+ * Note: the reason for doing this as a second pass is we cannot remove the
+ * tuples until we've removed their index entries, and we want to process
+ * index entry removal in batches as large as possible.
+ */
+static void
+lazy_vacuum_heap_rel(LVRelState *vacrel)
+{
+ int tupindex;
+ BlockNumber vacuumed_pages;
+ PGRUsage ru0;
+ Buffer vmbuffer = InvalidBuffer;
+ LVSavedErrInfo saved_err_info;
+
+ Assert(vacrel->do_index_vacuuming);
+ Assert(vacrel->do_index_cleanup);
+ Assert(vacrel->num_index_scans > 0);
+
+ /* Report that we are now vacuuming the heap */
+ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
+ PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
+
+ /* Update error traceback information */
+ update_vacuum_error_info(vacrel, &saved_err_info,
+ VACUUM_ERRCB_PHASE_VACUUM_HEAP,
+ InvalidBlockNumber, InvalidOffsetNumber);
+
+ pg_rusage_init(&ru0);
+ vacuumed_pages = 0;
+
+ tupindex = 0;
+ while (tupindex < vacrel->dead_tuples->num_tuples)
+ {
+ BlockNumber tblk;
+ Buffer buf;
+ Page page;
+ Size freespace;
+
+ vacuum_delay_point();
+
+ tblk = ItemPointerGetBlockNumber(&vacrel->dead_tuples->itemptrs[tupindex]);
+ vacrel->blkno = tblk;
+ buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, tblk, RBM_NORMAL,
+ vacrel->bstrategy);
+ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+ tupindex = lazy_vacuum_heap_page(vacrel, tblk, buf, tupindex,
+ &vmbuffer);
+
+ /* Now that we've vacuumed the page, record its available space */
+ page = BufferGetPage(buf);
+ freespace = PageGetHeapFreeSpace(page);
+
+ UnlockReleaseBuffer(buf);
+ RecordPageWithFreeSpace(vacrel->rel, tblk, freespace);
+ vacuumed_pages++;
+ }
+
+ /* Clear the block number information */
+ vacrel->blkno = InvalidBlockNumber;
+
+ if (BufferIsValid(vmbuffer))
+ {
+ ReleaseBuffer(vmbuffer);
+ vmbuffer = InvalidBuffer;
+ }
+
+ /*
+ * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
+ * the second heap pass. No more, no less.
+ */
+ Assert(tupindex > 0);
+ Assert(vacrel->num_index_scans > 1 ||
+ (tupindex == vacrel->lpdead_items &&
+ vacuumed_pages == vacrel->lpdead_item_pages));
+
+ ereport(elevel,
+ (errmsg("table \"%s\": removed %lld dead item identifiers in %u pages",
+ vacrel->relname, (long long ) tupindex, vacuumed_pages),
+ errdetail_internal("%s", pg_rusage_show(&ru0))));
+
+ /* Revert to the previous phase information for error traceback */
+ restore_vacuum_error_info(vacrel, &saved_err_info);
+}
+
+/*
+ * lazy_vacuum_heap_page() -- free page's LP_DEAD items listed in the
+ * vacrel->dead_tuples array.
+ *
+ * Caller must have an exclusive buffer lock on the buffer (though a
+ * super-exclusive lock is also acceptable).
+ *
+ * tupindex is the index in vacrel->dead_tuples of the first dead tuple for
+ * this page. We assume the rest follow sequentially. The return value is
+ * the first tupindex after the tuples of this page.
+ *
+ * Prior to PostgreSQL 14 there were rare cases where this routine had to set
+ * tuples with storage to unused. These days it is strictly responsible for
+ * marking LP_DEAD stub line pointers as unused. This only happens for those
+ * LP_DEAD items on the page that were determined to be LP_DEAD items back
+ * when the same page was visited by lazy_scan_prune() (i.e. those whose TID
+ * was recorded in the dead_tuples array).
+ */
+static int
+lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
+ int tupindex, Buffer *vmbuffer)
+{
+ LVDeadTuples *dead_tuples = vacrel->dead_tuples;
+ Page page = BufferGetPage(buffer);
+ OffsetNumber unused[MaxHeapTuplesPerPage];
+ int uncnt = 0;
+ TransactionId visibility_cutoff_xid;
+ bool all_frozen;
+ LVSavedErrInfo saved_err_info;
+
+ Assert(vacrel->nindexes == 0 || vacrel->do_index_vacuuming);
+
+ pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
+
+ /* Update error traceback information */
+ update_vacuum_error_info(vacrel, &saved_err_info,
+ VACUUM_ERRCB_PHASE_VACUUM_HEAP, blkno,
+ InvalidOffsetNumber);
+
+ START_CRIT_SECTION();
+
+ for (; tupindex < dead_tuples->num_tuples; tupindex++)
+ {
+ BlockNumber tblk;
+ OffsetNumber toff;
+ ItemId itemid;
+
+ tblk = ItemPointerGetBlockNumber(&dead_tuples->itemptrs[tupindex]);
+ if (tblk != blkno)
+ break; /* past end of tuples for this block */
+ toff = ItemPointerGetOffsetNumber(&dead_tuples->itemptrs[tupindex]);
+ itemid = PageGetItemId(page, toff);
+
+ Assert(ItemIdIsDead(itemid) && !ItemIdHasStorage(itemid));
+ ItemIdSetUnused(itemid);
+ unused[uncnt++] = toff;
+ }
+
+ Assert(uncnt > 0);
+
+ /* Attempt to truncate line pointer array now */
+ PageTruncateLinePointerArray(page);
+
+ /*
+ * Mark buffer dirty before we write WAL.
+ */
+ MarkBufferDirty(buffer);
+
+ /* XLOG stuff */
+ if (RelationNeedsWAL(vacrel->rel))
+ {
+ xl_heap_vacuum xlrec;
+ XLogRecPtr recptr;
+
+ xlrec.nunused = uncnt;
+
+ XLogBeginInsert();
+ XLogRegisterData((char *) &xlrec, SizeOfHeapVacuum);
+
+ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
+ XLogRegisterBufData(0, (char *) unused, uncnt * sizeof(OffsetNumber));
+
+ recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VACUUM);
+
+ PageSetLSN(page, recptr);
+ }
+
+ /*
+ * End critical section, so we safely can do visibility tests (which
+ * possibly need to perform IO and allocate memory!). If we crash now the
+ * page (including the corresponding vm bit) might not be marked all
+ * visible, but that's fine. A later vacuum will fix that.
+ */
+ END_CRIT_SECTION();
+
+ /*
+ * Now that we have removed the LD_DEAD items from the page, once again
+ * check if the page has become all-visible. The page is already marked
+ * dirty, exclusively locked, and, if needed, a full page image has been
+ * emitted.
+ */
+ if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid,
+ &all_frozen))
+ PageSetAllVisible(page);
+
+ /*
+ * All the changes to the heap page have been done. If the all-visible
+ * flag is now set, also set the VM all-visible bit (and, if possible, the
+ * all-frozen bit) unless this has already been done previously.
+ */
+ if (PageIsAllVisible(page))
+ {
+ uint8 flags = 0;
+ uint8 vm_status = visibilitymap_get_status(vacrel->rel,
+ blkno, vmbuffer);
+
+ /* Set the VM all-frozen bit to flag, if needed */
+ if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
+ flags |= VISIBILITYMAP_ALL_VISIBLE;
+ if ((vm_status & VISIBILITYMAP_ALL_FROZEN) == 0 && all_frozen)
+ flags |= VISIBILITYMAP_ALL_FROZEN;
+
+ Assert(BufferIsValid(*vmbuffer));
+ if (flags != 0)
+ visibilitymap_set(vacrel->rel, blkno, buffer, InvalidXLogRecPtr,
+ *vmbuffer, visibility_cutoff_xid, flags);
+ }
+
+ /* Revert to the previous phase information for error traceback */
+ restore_vacuum_error_info(vacrel, &saved_err_info);
+ return tupindex;
+}
+
+/*
+ * lazy_check_needs_freeze() -- scan page to see if any tuples
+ * need to be cleaned to avoid wraparound
+ *
+ * Returns true if the page needs to be vacuumed using cleanup lock.
+ * Also returns a flag indicating whether page contains any tuples at all.
+ */
+static bool
+lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelState *vacrel)
+{
+ Page page = BufferGetPage(buf);
+ OffsetNumber offnum,
+ maxoff;
+ HeapTupleHeader tupleheader;
+
+ *hastup = false;
+
+ /*
+ * New and empty pages, obviously, don't contain tuples. We could make
+ * sure that the page is registered in the FSM, but it doesn't seem worth
+ * waiting for a cleanup lock just for that, especially because it's
+ * likely that the pin holder will do so.
+ */
+ if (PageIsNew(page) || PageIsEmpty(page))
+ return false;
+
+ maxoff = PageGetMaxOffsetNumber(page);
+ for (offnum = FirstOffsetNumber;
+ offnum <= maxoff;
+ offnum = OffsetNumberNext(offnum))
+ {
+ ItemId itemid;
+
+ /*
+ * Set the offset number so that we can display it along with any
+ * error that occurred while processing this tuple.
+ */
+ vacrel->offnum = offnum;
+ itemid = PageGetItemId(page, offnum);
+
+ /* this should match hastup test in count_nondeletable_pages() */
+ if (ItemIdIsUsed(itemid))
+ *hastup = true;
+
+ /* dead and redirect items never need freezing */
+ if (!ItemIdIsNormal(itemid))
+ continue;
+
+ tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
+
+ if (heap_tuple_needs_freeze(tupleheader, vacrel->FreezeLimit,
+ vacrel->MultiXactCutoff, buf))
+ break;
+ } /* scan along page */
+
+ /* Clear the offset information once we have processed the given page. */
+ vacrel->offnum = InvalidOffsetNumber;
+
+ return (offnum <= maxoff);
+}
+
+/*
+ * Trigger the failsafe to avoid wraparound failure when vacrel table has a
+ * relfrozenxid and/or relminmxid that is dangerously far in the past.
+ * Triggering the failsafe makes the ongoing VACUUM bypass any further index
+ * vacuuming and heap vacuuming. Truncating the heap is also bypassed.
+ *
+ * Any remaining work (work that VACUUM cannot just bypass) is typically sped
+ * up when the failsafe triggers. VACUUM stops applying any cost-based delay
+ * that it started out with.
+ *
+ * Returns true when failsafe has been triggered.
+ */
+static bool
+lazy_check_wraparound_failsafe(LVRelState *vacrel)
+{
+ /* Don't warn more than once per VACUUM */
+ if (vacrel->failsafe_active)
+ return true;
+
+ if (unlikely(vacuum_xid_failsafe_check(vacrel->relfrozenxid,
+ vacrel->relminmxid)))
+ {
+ vacrel->failsafe_active = true;
+
+ /* Disable index vacuuming, index cleanup, and heap rel truncation */
+ vacrel->do_index_vacuuming = false;
+ vacrel->do_index_cleanup = false;
+ vacrel->do_rel_truncate = false;
+
+ ereport(WARNING,
+ (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans",
+ get_database_name(MyDatabaseId),
+ vacrel->relnamespace,
+ vacrel->relname,
+ vacrel->num_index_scans),
+ errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
+ errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
+ "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
+
+ /* Stop applying cost limits from this point on */
+ VacuumCostActive = false;
+ VacuumCostBalance = 0;
+
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Perform lazy_vacuum_all_indexes() steps in parallel
+ */
+static void
+do_parallel_lazy_vacuum_all_indexes(LVRelState *vacrel)
+{
+ /* Tell parallel workers to do index vacuuming */
+ vacrel->lps->lvshared->for_cleanup = false;
+ vacrel->lps->lvshared->first_time = false;
+
+ /*
+ * We can only provide an approximate value of num_heap_tuples, at least
+ * for now. Matches serial VACUUM case.
+ */
+ vacrel->lps->lvshared->reltuples = vacrel->old_live_tuples;
+ vacrel->lps->lvshared->estimated_count = true;
+
+ do_parallel_vacuum_or_cleanup(vacrel,
+ vacrel->lps->nindexes_parallel_bulkdel);
+}
+
+/*
+ * Perform lazy_cleanup_all_indexes() steps in parallel
+ */
+static void
+do_parallel_lazy_cleanup_all_indexes(LVRelState *vacrel)
+{
+ int nworkers;
+
+ /*
+ * If parallel vacuum is active we perform index cleanup with parallel
+ * workers.
+ *
+ * Tell parallel workers to do index cleanup.
+ */
+ vacrel->lps->lvshared->for_cleanup = true;
+ vacrel->lps->lvshared->first_time = (vacrel->num_index_scans == 0);
+
+ /*
+ * Now we can provide a better estimate of total number of surviving
+ * tuples (we assume indexes are more interested in that than in the
+ * number of nominally live tuples).
+ */
+ vacrel->lps->lvshared->reltuples = vacrel->new_rel_tuples;
+ vacrel->lps->lvshared->estimated_count =
+ (vacrel->tupcount_pages < vacrel->rel_pages);
+
+ /* Determine the number of parallel workers to launch */
+ if (vacrel->lps->lvshared->first_time)
+ nworkers = vacrel->lps->nindexes_parallel_cleanup +
+ vacrel->lps->nindexes_parallel_condcleanup;
+ else
+ nworkers = vacrel->lps->nindexes_parallel_cleanup;
+
+ do_parallel_vacuum_or_cleanup(vacrel, nworkers);
+}
+
+/*
+ * Perform index vacuum or index cleanup with parallel workers. This function
+ * must be used by the parallel vacuum leader process. The caller must set
+ * lps->lvshared->for_cleanup to indicate whether to perform vacuum or
+ * cleanup.
+ */
+static void
+do_parallel_vacuum_or_cleanup(LVRelState *vacrel, int nworkers)
+{
+ LVParallelState *lps = vacrel->lps;
+
+ Assert(!IsParallelWorker());
+ Assert(ParallelVacuumIsActive(vacrel));
+ Assert(vacrel->nindexes > 0);
+
+ /* The leader process will participate */
+ nworkers--;
+
+ /*
+ * It is possible that parallel context is initialized with fewer workers
+ * than the number of indexes that need a separate worker in the current
+ * phase, so we need to consider it. See compute_parallel_vacuum_workers.
+ */
+ nworkers = Min(nworkers, lps->pcxt->nworkers);
+
+ /* Setup the shared cost-based vacuum delay and launch workers */
+ if (nworkers > 0)
+ {
+ if (vacrel->num_index_scans > 0)
+ {
+ /* Reset the parallel index processing counter */
+ pg_atomic_write_u32(&(lps->lvshared->idx), 0);
+
+ /* Reinitialize the parallel context to relaunch parallel workers */
+ ReinitializeParallelDSM(lps->pcxt);
+ }
+
+ /*
+ * Set up shared cost balance and the number of active workers for
+ * vacuum delay. We need to do this before launching workers as
+ * otherwise, they might not see the updated values for these
+ * parameters.
+ */
+ pg_atomic_write_u32(&(lps->lvshared->cost_balance), VacuumCostBalance);
+ pg_atomic_write_u32(&(lps->lvshared->active_nworkers), 0);
+
+ /*
+ * The number of workers can vary between bulkdelete and cleanup
+ * phase.
+ */
+ ReinitializeParallelWorkers(lps->pcxt, nworkers);
+
+ LaunchParallelWorkers(lps->pcxt);
+
+ if (lps->pcxt->nworkers_launched > 0)
+ {
+ /*
+ * Reset the local cost values for leader backend as we have
+ * already accumulated the remaining balance of heap.
+ */
+ VacuumCostBalance = 0;
+ VacuumCostBalanceLocal = 0;
+
+ /* Enable shared cost balance for leader backend */
+ VacuumSharedCostBalance = &(lps->lvshared->cost_balance);
+ VacuumActiveNWorkers = &(lps->lvshared->active_nworkers);
+ }
+
+ if (lps->lvshared->for_cleanup)
+ ereport(elevel,
+ (errmsg(ngettext("launched %d parallel vacuum worker for index cleanup (planned: %d)",
+ "launched %d parallel vacuum workers for index cleanup (planned: %d)",
+ lps->pcxt->nworkers_launched),
+ lps->pcxt->nworkers_launched, nworkers)));
+ else
+ ereport(elevel,
+ (errmsg(ngettext("launched %d parallel vacuum worker for index vacuuming (planned: %d)",
+ "launched %d parallel vacuum workers for index vacuuming (planned: %d)",
+ lps->pcxt->nworkers_launched),
+ lps->pcxt->nworkers_launched, nworkers)));
+ }
+
+ /* Process the indexes that can be processed by only leader process */
+ do_serial_processing_for_unsafe_indexes(vacrel, lps->lvshared);
+
+ /*
+ * Join as a parallel worker. The leader process alone processes all the
+ * indexes in the case where no workers are launched.
+ */
+ do_parallel_processing(vacrel, lps->lvshared);
+
+ /*
+ * Next, accumulate buffer and WAL usage. (This must wait for the workers
+ * to finish, or we might get incomplete data.)
+ */
+ if (nworkers > 0)
+ {
+ /* Wait for all vacuum workers to finish */
+ WaitForParallelWorkersToFinish(lps->pcxt);
+
+ for (int i = 0; i < lps->pcxt->nworkers_launched; i++)
+ InstrAccumParallelQuery(&lps->buffer_usage[i], &lps->wal_usage[i]);
+ }
+
+ /*
+ * Carry the shared balance value to heap scan and disable shared costing
+ */
+ if (VacuumSharedCostBalance)
+ {
+ VacuumCostBalance = pg_atomic_read_u32(VacuumSharedCostBalance);
+ VacuumSharedCostBalance = NULL;
+ VacuumActiveNWorkers = NULL;
+ }
+}
+
+/*
+ * Index vacuum/cleanup routine used by the leader process and parallel
+ * vacuum worker processes to process the indexes in parallel.
+ */
+static void
+do_parallel_processing(LVRelState *vacrel, LVShared *lvshared)
+{
+ /*
+ * Increment the active worker count if we are able to launch any worker.
+ */
+ if (VacuumActiveNWorkers)
+ pg_atomic_add_fetch_u32(VacuumActiveNWorkers, 1);
+
+ /* Loop until all indexes are vacuumed */
+ for (;;)
+ {
+ int idx;
+ LVSharedIndStats *shared_istat;
+ Relation indrel;
+ IndexBulkDeleteResult *istat;
+
+ /* Get an index number to process */
+ idx = pg_atomic_fetch_add_u32(&(lvshared->idx), 1);
+
+ /* Done for all indexes? */
+ if (idx >= vacrel->nindexes)
+ break;
+
+ /* Get the index statistics space from DSM, if any */
+ shared_istat = parallel_stats_for_idx(lvshared, idx);
+
+ /* Skip indexes not participating in parallelism */
+ if (shared_istat == NULL)
+ continue;
+
+ indrel = vacrel->indrels[idx];
+
+ /*
+ * Skip processing indexes that are unsafe for workers (these are
+ * processed in do_serial_processing_for_unsafe_indexes() by leader)
+ */
+ if (!parallel_processing_is_safe(indrel, lvshared))
+ continue;
+
+ /* Do vacuum or cleanup of the index */
+ istat = (vacrel->indstats[idx]);
+ vacrel->indstats[idx] = parallel_process_one_index(indrel, istat,
+ lvshared,
+ shared_istat,
+ vacrel);
+ }
+
+ /*
+ * We have completed the index vacuum so decrement the active worker
+ * count.
+ */
+ if (VacuumActiveNWorkers)
+ pg_atomic_sub_fetch_u32(VacuumActiveNWorkers, 1);
+}
+
+/*
+ * Perform parallel processing of indexes in leader process.
+ *
+ * Handles index vacuuming (or index cleanup) for indexes that are not
+ * parallel safe. It's possible that this will vary for a given index, based
+ * on details like whether we're performing for_cleanup processing right now.
+ *
+ * Also performs processing of smaller indexes that fell under the size cutoff
+ * enforced by compute_parallel_vacuum_workers(). These indexes never get a
+ * slot for statistics in DSM.
+ */
+static void
+do_serial_processing_for_unsafe_indexes(LVRelState *vacrel, LVShared *lvshared)
+{
+ Assert(!IsParallelWorker());
+
+ /*
+ * Increment the active worker count if we are able to launch any worker.
+ */
+ if (VacuumActiveNWorkers)
+ pg_atomic_add_fetch_u32(VacuumActiveNWorkers, 1);
+
+ for (int idx = 0; idx < vacrel->nindexes; idx++)
+ {
+ LVSharedIndStats *shared_istat;
+ Relation indrel;
+ IndexBulkDeleteResult *istat;
+
+ shared_istat = parallel_stats_for_idx(lvshared, idx);
+ indrel = vacrel->indrels[idx];
+
+ /*
+ * We're only here for the indexes that parallel workers won't
+ * process. Note that the shared_istat test ensures that we process
+ * indexes that fell under initial size cutoff.
+ */
+ if (shared_istat != NULL &&
+ parallel_processing_is_safe(indrel, lvshared))
+ continue;
+
+ /* Do vacuum or cleanup of the index */
+ istat = (vacrel->indstats[idx]);
+ vacrel->indstats[idx] = parallel_process_one_index(indrel, istat,
+ lvshared,
+ shared_istat,
+ vacrel);
+ }
+
+ /*
+ * We have completed the index vacuum so decrement the active worker
+ * count.
+ */
+ if (VacuumActiveNWorkers)
+ pg_atomic_sub_fetch_u32(VacuumActiveNWorkers, 1);
+}
+
+/*
+ * Vacuum or cleanup index either by leader process or by one of the worker
+ * process. After processing the index this function copies the index
+ * statistics returned from ambulkdelete and amvacuumcleanup to the DSM
+ * segment.
+ */
+static IndexBulkDeleteResult *
+parallel_process_one_index(Relation indrel,
+ IndexBulkDeleteResult *istat,
+ LVShared *lvshared,
+ LVSharedIndStats *shared_istat,
+ LVRelState *vacrel)
+{
+ IndexBulkDeleteResult *istat_res;
+
+ /*
+ * Update the pointer to the corresponding bulk-deletion result if someone
+ * has already updated it
+ */
+ if (shared_istat && shared_istat->updated && istat == NULL)
+ istat = &shared_istat->istat;
+
+ /* Do vacuum or cleanup of the index */
+ if (lvshared->for_cleanup)
+ istat_res = lazy_cleanup_one_index(indrel, istat, lvshared->reltuples,
+ lvshared->estimated_count, vacrel);
+ else
+ istat_res = lazy_vacuum_one_index(indrel, istat, lvshared->reltuples,
+ vacrel);
+
+ /*
+ * Copy the index bulk-deletion result returned from ambulkdelete and
+ * amvacuumcleanup to the DSM segment if it's the first cycle because they
+ * allocate locally and it's possible that an index will be vacuumed by a
+ * different vacuum process the next cycle. Copying the result normally
+ * happens only the first time an index is vacuumed. For any additional
+ * vacuum pass, we directly point to the result on the DSM segment and
+ * pass it to vacuum index APIs so that workers can update it directly.
+ *
+ * Since all vacuum workers write the bulk-deletion result at different
+ * slots we can write them without locking.
+ */
+ if (shared_istat && !shared_istat->updated && istat_res != NULL)
+ {
+ memcpy(&shared_istat->istat, istat_res, sizeof(IndexBulkDeleteResult));
+ shared_istat->updated = true;
+
+ /* Free the locally-allocated bulk-deletion result */
+ pfree(istat_res);
+
+ /* return the pointer to the result from shared memory */
+ return &shared_istat->istat;
+ }
+
+ return istat_res;
+}
+
+/*
+ * lazy_cleanup_all_indexes() -- cleanup all indexes of relation.
+ */
+static void
+lazy_cleanup_all_indexes(LVRelState *vacrel)
+{
+ Assert(!IsParallelWorker());
+ Assert(vacrel->nindexes > 0);
+
+ /* Report that we are now cleaning up indexes */
+ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
+ PROGRESS_VACUUM_PHASE_INDEX_CLEANUP);
+
+ if (!ParallelVacuumIsActive(vacrel))
+ {
+ double reltuples = vacrel->new_rel_tuples;
+ bool estimated_count =
+ vacrel->tupcount_pages < vacrel->rel_pages;
+
+ for (int idx = 0; idx < vacrel->nindexes; idx++)
+ {
+ Relation indrel = vacrel->indrels[idx];
+ IndexBulkDeleteResult *istat = vacrel->indstats[idx];
+
+ vacrel->indstats[idx] =
+ lazy_cleanup_one_index(indrel, istat, reltuples,
+ estimated_count, vacrel);
+ }
+ }
+ else
+ {
+ /* Outsource everything to parallel variant */
+ do_parallel_lazy_cleanup_all_indexes(vacrel);
+ }
+}
+
+/*
+ * lazy_vacuum_one_index() -- vacuum index relation.
+ *
+ * Delete all the index entries pointing to tuples listed in
+ * dead_tuples, and update running statistics.
+ *
+ * reltuples is the number of heap tuples to be passed to the
+ * bulkdelete callback. It's always assumed to be estimated.
+ *
+ * Returns bulk delete stats derived from input stats
+ */
+static IndexBulkDeleteResult *
+lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat,
+ double reltuples, LVRelState *vacrel)
+{
+ IndexVacuumInfo ivinfo;
+ PGRUsage ru0;
+ LVSavedErrInfo saved_err_info;
+
+ pg_rusage_init(&ru0);
+
+ ivinfo.index = indrel;
+ ivinfo.analyze_only = false;
+ ivinfo.report_progress = false;
+ ivinfo.estimated_count = true;
+ ivinfo.message_level = elevel;
+ ivinfo.num_heap_tuples = reltuples;
+ ivinfo.strategy = vacrel->bstrategy;
+
+ /*
+ * Update error traceback information.
+ *
+ * The index name is saved during this phase and restored immediately
+ * after this phase. See vacuum_error_callback.
+ */
+ Assert(vacrel->indname == NULL);
+ vacrel->indname = pstrdup(RelationGetRelationName(indrel));
+ update_vacuum_error_info(vacrel, &saved_err_info,
+ VACUUM_ERRCB_PHASE_VACUUM_INDEX,
+ InvalidBlockNumber, InvalidOffsetNumber);
+
+ /* Do bulk deletion */
+ istat = index_bulk_delete(&ivinfo, istat, lazy_tid_reaped,
+ (void *) vacrel->dead_tuples);
+
+ ereport(elevel,
+ (errmsg("scanned index \"%s\" to remove %d row versions",
+ vacrel->indname, vacrel->dead_tuples->num_tuples),
+ errdetail_internal("%s", pg_rusage_show(&ru0))));
+
+ /* Revert to the previous phase information for error traceback */
+ restore_vacuum_error_info(vacrel, &saved_err_info);
+ pfree(vacrel->indname);
+ vacrel->indname = NULL;
+
+ return istat;
+}
+
+/*
+ * lazy_cleanup_one_index() -- do post-vacuum cleanup for index relation.
+ *
+ * reltuples is the number of heap tuples and estimated_count is true
+ * if reltuples is an estimated value.
+ *
+ * Returns bulk delete stats derived from input stats
+ */
+static IndexBulkDeleteResult *
+lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat,
+ double reltuples, bool estimated_count,
+ LVRelState *vacrel)
+{
+ IndexVacuumInfo ivinfo;
+ PGRUsage ru0;
+ LVSavedErrInfo saved_err_info;
+
+ pg_rusage_init(&ru0);
+
+ ivinfo.index = indrel;
+ ivinfo.analyze_only = false;
+ ivinfo.report_progress = false;
+ ivinfo.estimated_count = estimated_count;
+ ivinfo.message_level = elevel;
+
+ ivinfo.num_heap_tuples = reltuples;
+ ivinfo.strategy = vacrel->bstrategy;
+
+ /*
+ * Update error traceback information.
+ *
+ * The index name is saved during this phase and restored immediately
+ * after this phase. See vacuum_error_callback.
+ */
+ Assert(vacrel->indname == NULL);
+ vacrel->indname = pstrdup(RelationGetRelationName(indrel));
+ update_vacuum_error_info(vacrel, &saved_err_info,
+ VACUUM_ERRCB_PHASE_INDEX_CLEANUP,
+ InvalidBlockNumber, InvalidOffsetNumber);
+
+ istat = index_vacuum_cleanup(&ivinfo, istat);
+
+ if (istat)
+ {
+ ereport(elevel,
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ (istat)->num_index_tuples,
+ (istat)->num_pages),
+ errdetail("%.0f index row versions were removed.\n"
+ "%u index pages were newly deleted.\n"
+ "%u index pages are currently deleted, of which %u are currently reusable.\n"
+ "%s.",
+ (istat)->tuples_removed,
+ (istat)->pages_newly_deleted,
+ (istat)->pages_deleted, (istat)->pages_free,
+ pg_rusage_show(&ru0))));
+ }
+
+ /* Revert to the previous phase information for error traceback */
+ restore_vacuum_error_info(vacrel, &saved_err_info);
+ pfree(vacrel->indname);
+ vacrel->indname = NULL;
+
+ return istat;
+}
+
+/*
+ * should_attempt_truncation - should we attempt to truncate the heap?
+ *
+ * Don't even think about it unless we have a shot at releasing a goodly
+ * number of pages. Otherwise, the time taken isn't worth it.
+ *
+ * Also don't attempt it if wraparound failsafe is in effect. It's hard to
+ * predict how long lazy_truncate_heap will take. Don't take any chances.
+ * There is very little chance of truncation working out when the failsafe is
+ * in effect in any case. lazy_scan_prune makes the optimistic assumption
+ * that any LP_DEAD items it encounters will always be LP_UNUSED by the time
+ * we're called.
+ *
+ * Also don't attempt it if we are doing early pruning/vacuuming, because a
+ * scan which cannot find a truncated heap page cannot determine that the
+ * snapshot is too old to read that page.
+ *
+ * This is split out so that we can test whether truncation is going to be
+ * called for before we actually do it. If you change the logic here, be
+ * careful to depend only on fields that lazy_scan_heap updates on-the-fly.
+ */
+static bool
+should_attempt_truncation(LVRelState *vacrel)
+{
+ BlockNumber possibly_freeable;
+
+ if (!vacrel->do_rel_truncate || vacrel->failsafe_active)
+ return false;
+
+ possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
+ if (possibly_freeable > 0 &&
+ (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
+ possibly_freeable >= vacrel->rel_pages / REL_TRUNCATE_FRACTION) &&
+ old_snapshot_threshold < 0)
+ return true;
+ else
+ return false;
+}
+
+/*
+ * lazy_truncate_heap - try to truncate off any empty pages at the end
+ */
+static void
+lazy_truncate_heap(LVRelState *vacrel)
+{
+ BlockNumber old_rel_pages = vacrel->rel_pages;
+ BlockNumber new_rel_pages;
+ bool lock_waiter_detected;
+ int lock_retry;
+
+ /* Report that we are now truncating */
+ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
+ PROGRESS_VACUUM_PHASE_TRUNCATE);
+
+ /*
+ * Loop until no more truncating can be done.
+ */
+ do
+ {
+ PGRUsage ru0;
+
+ pg_rusage_init(&ru0);
+
+ /*
+ * We need full exclusive lock on the relation in order to do
+ * truncation. If we can't get it, give up rather than waiting --- we
+ * don't want to block other backends, and we don't want to deadlock
+ * (which is quite possible considering we already hold a lower-grade
+ * lock).
+ */
+ lock_waiter_detected = false;
+ lock_retry = 0;
+ while (true)
+ {
+ if (ConditionalLockRelation(vacrel->rel, AccessExclusiveLock))
+ break;
+
+ /*
+ * Check for interrupts while trying to (re-)acquire the exclusive
+ * lock.
+ */
+ CHECK_FOR_INTERRUPTS();
+
+ if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT /
+ VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL))
+ {
+ /*
+ * We failed to establish the lock in the specified number of
+ * retries. This means we give up truncating.
+ */
+ ereport(elevel,
+ (errmsg("\"%s\": stopping truncate due to conflicting lock request",
+ vacrel->relname)));
+ return;
+ }
+
+ pg_usleep(VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL * 1000L);
+ }
+
+ /*
+ * Now that we have exclusive lock, look to see if the rel has grown
+ * whilst we were vacuuming with non-exclusive lock. If so, give up;
+ * the newly added pages presumably contain non-deletable tuples.
+ */
+ new_rel_pages = RelationGetNumberOfBlocks(vacrel->rel);
+ if (new_rel_pages != old_rel_pages)
+ {
+ /*
+ * Note: we intentionally don't update vacrel->rel_pages with the
+ * new rel size here. If we did, it would amount to assuming that
+ * the new pages are empty, which is unlikely. Leaving the numbers
+ * alone amounts to assuming that the new pages have the same
+ * tuple density as existing ones, which is less unlikely.
+ */
+ UnlockRelation(vacrel->rel, AccessExclusiveLock);
+ return;
+ }
+
+ /*
+ * Scan backwards from the end to verify that the end pages actually
+ * contain no tuples. This is *necessary*, not optional, because
+ * other backends could have added tuples to these pages whilst we
+ * were vacuuming.
+ */
+ new_rel_pages = count_nondeletable_pages(vacrel, &lock_waiter_detected);
+ vacrel->blkno = new_rel_pages;
+
+ if (new_rel_pages >= old_rel_pages)
+ {
+ /* can't do anything after all */
+ UnlockRelation(vacrel->rel, AccessExclusiveLock);
+ return;
+ }
+
+ /*
+ * Okay to truncate.
+ */
+ RelationTruncate(vacrel->rel, new_rel_pages);
+
+ /*
+ * We can release the exclusive lock as soon as we have truncated.
+ * Other backends can't safely access the relation until they have
+ * processed the smgr invalidation that smgrtruncate sent out ... but
+ * that should happen as part of standard invalidation processing once
+ * they acquire lock on the relation.
+ */
+ UnlockRelation(vacrel->rel, AccessExclusiveLock);
+
+ /*
+ * Update statistics. Here, it *is* correct to adjust rel_pages
+ * without also touching reltuples, since the tuple count wasn't
+ * changed by the truncation.
+ */
+ vacrel->pages_removed += old_rel_pages - new_rel_pages;
+ vacrel->rel_pages = new_rel_pages;
+
+ ereport(elevel,
+ (errmsg("table \"%s\": truncated %u to %u pages",
+ vacrel->relname,
+ old_rel_pages, new_rel_pages),
+ errdetail_internal("%s",
+ pg_rusage_show(&ru0))));
+ old_rel_pages = new_rel_pages;
+ } while (new_rel_pages > vacrel->nonempty_pages && lock_waiter_detected);
+}
+
+/*
+ * Rescan end pages to verify that they are (still) empty of tuples.
+ *
+ * Returns number of nondeletable pages (last nonempty page + 1).
+ */
+static BlockNumber
+count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
+{
+ BlockNumber blkno;
+ BlockNumber prefetchedUntil;
+ instr_time starttime;
+
+ /* Initialize the starttime if we check for conflicting lock requests */
+ INSTR_TIME_SET_CURRENT(starttime);
+
+ /*
+ * Start checking blocks at what we believe relation end to be and move
+ * backwards. (Strange coding of loop control is needed because blkno is
+ * unsigned.) To make the scan faster, we prefetch a few blocks at a time
+ * in forward direction, so that OS-level readahead can kick in.
+ */
+ blkno = vacrel->rel_pages;
+ StaticAssertStmt((PREFETCH_SIZE & (PREFETCH_SIZE - 1)) == 0,
+ "prefetch size must be power of 2");
+ prefetchedUntil = InvalidBlockNumber;
+ while (blkno > vacrel->nonempty_pages)
+ {
+ Buffer buf;
+ Page page;
+ OffsetNumber offnum,
+ maxoff;
+ bool hastup;
+
+ /*
+ * Check if another process requests a lock on our relation. We are
+ * holding an AccessExclusiveLock here, so they will be waiting. We
+ * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we
+ * only check if that interval has elapsed once every 32 blocks to
+ * keep the number of system calls and actual shared lock table
+ * lookups to a minimum.
+ */
+ if ((blkno % 32) == 0)
+ {
+ instr_time currenttime;
+ instr_time elapsed;
+
+ INSTR_TIME_SET_CURRENT(currenttime);
+ elapsed = currenttime;
+ INSTR_TIME_SUBTRACT(elapsed, starttime);
+ if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
+ >= VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL)
+ {
+ if (LockHasWaitersRelation(vacrel->rel, AccessExclusiveLock))
+ {
+ ereport(elevel,
+ (errmsg("table \"%s\": suspending truncate due to conflicting lock request",
+ vacrel->relname)));
+
+ *lock_waiter_detected = true;
+ return blkno;
+ }
+ starttime = currenttime;
+ }
+ }
+
+ /*
+ * We don't insert a vacuum delay point here, because we have an
+ * exclusive lock on the table which we want to hold for as short a
+ * time as possible. We still need to check for interrupts however.
+ */
+ CHECK_FOR_INTERRUPTS();
+
+ blkno--;
+
+ /* If we haven't prefetched this lot yet, do so now. */
+ if (prefetchedUntil > blkno)
+ {
+ BlockNumber prefetchStart;
+ BlockNumber pblkno;
+
+ prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
+ for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
+ {
+ PrefetchBuffer(vacrel->rel, MAIN_FORKNUM, pblkno);
+ CHECK_FOR_INTERRUPTS();
+ }
+ prefetchedUntil = prefetchStart;
+ }
+
+ buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
+ vacrel->bstrategy);
+
+ /* In this phase we only need shared access to the buffer */
+ LockBuffer(buf, BUFFER_LOCK_SHARE);
+
+ page = BufferGetPage(buf);
+
+ if (PageIsNew(page) || PageIsEmpty(page))
+ {
+ UnlockReleaseBuffer(buf);
+ continue;
+ }
+
+ hastup = false;
+ maxoff = PageGetMaxOffsetNumber(page);
+ for (offnum = FirstOffsetNumber;
+ offnum <= maxoff;
+ offnum = OffsetNumberNext(offnum))
+ {
+ ItemId itemid;
+
+ itemid = PageGetItemId(page, offnum);
+
+ /*
+ * Note: any non-unused item should be taken as a reason to keep
+ * this page. Even an LP_DEAD item makes truncation unsafe, since
+ * we must not have cleaned out its index entries.
+ */
+ if (ItemIdIsUsed(itemid))
+ {
+ hastup = true;
+ break; /* can stop scanning */
+ }
+ } /* scan along page */
+
+ UnlockReleaseBuffer(buf);
+
+ /* Done scanning if we found a tuple here */
+ if (hastup)
+ return blkno + 1;
+ }
+
+ /*
+ * If we fall out of the loop, all the previously-thought-to-be-empty
+ * pages still are; we need not bother to look at the last known-nonempty
+ * page.
+ */
+ return vacrel->nonempty_pages;
+}
+
+/*
+ * Return the maximum number of dead tuples we can record.
+ */
+static long
+compute_max_dead_tuples(BlockNumber relblocks, bool hasindex)
+{
+ long maxtuples;
+ int vac_work_mem = IsAutoVacuumWorkerProcess() &&
+ autovacuum_work_mem != -1 ?
+ autovacuum_work_mem : maintenance_work_mem;
+
+ if (hasindex)
+ {
+ maxtuples = MAXDEADTUPLES(vac_work_mem * 1024L);
+ maxtuples = Min(maxtuples, INT_MAX);
+ maxtuples = Min(maxtuples, MAXDEADTUPLES(MaxAllocSize));
+
+ /* curious coding here to ensure the multiplication can't overflow */
+ if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks)
+ maxtuples = relblocks * LAZY_ALLOC_TUPLES;
+
+ /* stay sane if small maintenance_work_mem */
+ maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
+ }
+ else
+ maxtuples = MaxHeapTuplesPerPage;
+
+ return maxtuples;
+}
+
+/*
+ * lazy_space_alloc - space allocation decisions for lazy vacuum
+ *
+ * See the comments at the head of this file for rationale.
+ */
+static void
+lazy_space_alloc(LVRelState *vacrel, int nworkers, BlockNumber nblocks)
+{
+ LVDeadTuples *dead_tuples;
+ long maxtuples;
+
+ /*
+ * Initialize state for a parallel vacuum. As of now, only one worker can
+ * be used for an index, so we invoke parallelism only if there are at
+ * least two indexes on a table.
+ */
+ if (nworkers >= 0 && vacrel->nindexes > 1 && vacrel->do_index_vacuuming)
+ {
+ /*
+ * Since parallel workers cannot access data in temporary tables, we
+ * can't perform parallel vacuum on them.
+ */
+ if (RelationUsesLocalBuffers(vacrel->rel))
+ {
+ /*
+ * Give warning only if the user explicitly tries to perform a
+ * parallel vacuum on the temporary table.
+ */
+ if (nworkers > 0)
+ ereport(WARNING,
+ (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
+ vacrel->relname)));
+ }
+ else
+ vacrel->lps = begin_parallel_vacuum(vacrel, nblocks, nworkers);
+
+ /* If parallel mode started, we're done */
+ if (ParallelVacuumIsActive(vacrel))
+ return;
+ }
+
+ maxtuples = compute_max_dead_tuples(nblocks, vacrel->nindexes > 0);
+
+ dead_tuples = (LVDeadTuples *) palloc(SizeOfDeadTuples(maxtuples));
+ dead_tuples->num_tuples = 0;
+ dead_tuples->max_tuples = (int) maxtuples;
+
+ vacrel->dead_tuples = dead_tuples;
+}
+
+/*
+ * lazy_space_free - free space allocated in lazy_space_alloc
+ */
+static void
+lazy_space_free(LVRelState *vacrel)
+{
+ if (!ParallelVacuumIsActive(vacrel))
+ return;
+
+ /*
+ * End parallel mode before updating index statistics as we cannot write
+ * during parallel mode.
+ */
+ end_parallel_vacuum(vacrel);
+}
+
+/*
+ * lazy_tid_reaped() -- is a particular tid deletable?
+ *
+ * This has the right signature to be an IndexBulkDeleteCallback.
+ *
+ * Assumes dead_tuples array is in sorted order.
+ */
+static bool
+lazy_tid_reaped(ItemPointer itemptr, void *state)
+{
+ LVDeadTuples *dead_tuples = (LVDeadTuples *) state;
+ int64 litem,
+ ritem,
+ item;
+ ItemPointer res;
+
+ litem = itemptr_encode(&dead_tuples->itemptrs[0]);
+ ritem = itemptr_encode(&dead_tuples->itemptrs[dead_tuples->num_tuples - 1]);
+ item = itemptr_encode(itemptr);
+
+ /*
+ * Doing a simple bound check before bsearch() is useful to avoid the
+ * extra cost of bsearch(), especially if dead tuples on the heap are
+ * concentrated in a certain range. Since this function is called for
+ * every index tuple, it pays to be really fast.
+ */
+ if (item < litem || item > ritem)
+ return false;
+
+ res = (ItemPointer) bsearch((void *) itemptr,
+ (void *) dead_tuples->itemptrs,
+ dead_tuples->num_tuples,
+ sizeof(ItemPointerData),
+ vac_cmp_itemptr);
+
+ return (res != NULL);
+}
+
+/*
+ * Comparator routines for use with qsort() and bsearch().
+ */
+static int
+vac_cmp_itemptr(const void *left, const void *right)
+{
+ BlockNumber lblk,
+ rblk;
+ OffsetNumber loff,
+ roff;
+
+ lblk = ItemPointerGetBlockNumber((ItemPointer) left);
+ rblk = ItemPointerGetBlockNumber((ItemPointer) right);
+
+ if (lblk < rblk)
+ return -1;
+ if (lblk > rblk)
+ return 1;
+
+ loff = ItemPointerGetOffsetNumber((ItemPointer) left);
+ roff = ItemPointerGetOffsetNumber((ItemPointer) right);
+
+ if (loff < roff)
+ return -1;
+ if (loff > roff)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Check if every tuple in the given page is visible to all current and future
+ * transactions. Also return the visibility_cutoff_xid which is the highest
+ * xmin amongst the visible tuples. Set *all_frozen to true if every tuple
+ * on this page is frozen.
+ */
+static bool
+heap_page_is_all_visible(LVRelState *vacrel, Buffer buf,
+ TransactionId *visibility_cutoff_xid,
+ bool *all_frozen)
+{
+ Page page = BufferGetPage(buf);
+ BlockNumber blockno = BufferGetBlockNumber(buf);
+ OffsetNumber offnum,
+ maxoff;
+ bool all_visible = true;
+
+ *visibility_cutoff_xid = InvalidTransactionId;
+ *all_frozen = true;
+
+ /*
+ * This is a stripped down version of the line pointer scan in
+ * lazy_scan_heap(). So if you change anything here, also check that code.
+ */
+ maxoff = PageGetMaxOffsetNumber(page);
+ for (offnum = FirstOffsetNumber;
+ offnum <= maxoff && all_visible;
+ offnum = OffsetNumberNext(offnum))
+ {
+ ItemId itemid;
+ HeapTupleData tuple;
+
+ /*
+ * Set the offset number so that we can display it along with any
+ * error that occurred while processing this tuple.
+ */
+ vacrel->offnum = offnum;
+ itemid = PageGetItemId(page, offnum);
+
+ /* Unused or redirect line pointers are of no interest */
+ if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
+ continue;
+
+ ItemPointerSet(&(tuple.t_self), blockno, offnum);
+
+ /*
+ * Dead line pointers can have index pointers pointing to them. So
+ * they can't be treated as visible
+ */
+ if (ItemIdIsDead(itemid))
+ {
+ all_visible = false;
+ *all_frozen = false;
+ break;
+ }
+
+ Assert(ItemIdIsNormal(itemid));
+
+ tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
+ tuple.t_len = ItemIdGetLength(itemid);
+ tuple.t_tableOid = RelationGetRelid(vacrel->rel);
+
+ switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf))
+ {
+ case HEAPTUPLE_LIVE:
+ {
+ TransactionId xmin;
+
+ /* Check comments in lazy_scan_heap. */
+ if (!HeapTupleHeaderXminCommitted(tuple.t_data))
+ {
+ all_visible = false;
+ *all_frozen = false;
+ break;
+ }
+
+ /*
+ * The inserter definitely committed. But is it old enough
+ * that everyone sees it as committed?
+ */
+ xmin = HeapTupleHeaderGetXmin(tuple.t_data);
+ if (!TransactionIdPrecedes(xmin, vacrel->OldestXmin))
+ {
+ all_visible = false;
+ *all_frozen = false;
+ break;
+ }
+
+ /* Track newest xmin on page. */
+ if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
+ *visibility_cutoff_xid = xmin;
+
+ /* Check whether this tuple is already frozen or not */
+ if (all_visible && *all_frozen &&
+ heap_tuple_needs_eventual_freeze(tuple.t_data))
+ *all_frozen = false;
+ }
+ break;
+
+ case HEAPTUPLE_DEAD:
+ case HEAPTUPLE_RECENTLY_DEAD:
+ case HEAPTUPLE_INSERT_IN_PROGRESS:
+ case HEAPTUPLE_DELETE_IN_PROGRESS:
+ {
+ all_visible = false;
+ *all_frozen = false;
+ break;
+ }
+ default:
+ elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
+ break;
+ }
+ } /* scan along page */
+
+ /* Clear the offset information once we have processed the given page. */
+ vacrel->offnum = InvalidOffsetNumber;
+
+ return all_visible;
+}
+
+/*
+ * Compute the number of parallel worker processes to request. Both index
+ * vacuum and index cleanup can be executed with parallel workers. The index
+ * is eligible for parallel vacuum iff its size is greater than
+ * min_parallel_index_scan_size as invoking workers for very small indexes
+ * can hurt performance.
+ *
+ * nrequested is the number of parallel workers that user requested. If
+ * nrequested is 0, we compute the parallel degree based on nindexes, that is
+ * the number of indexes that support parallel vacuum. This function also
+ * sets will_parallel_vacuum to remember indexes that participate in parallel
+ * vacuum.
+ */
+static int
+compute_parallel_vacuum_workers(LVRelState *vacrel, int nrequested,
+ bool *will_parallel_vacuum)
+{
+ int nindexes_parallel = 0;
+ int nindexes_parallel_bulkdel = 0;
+ int nindexes_parallel_cleanup = 0;
+ int parallel_workers;
+
+ /*
+ * We don't allow performing parallel operation in standalone backend or
+ * when parallelism is disabled.
+ */
+ if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0)
+ return 0;
+
+ /*
+ * Compute the number of indexes that can participate in parallel vacuum.
+ */
+ for (int idx = 0; idx < vacrel->nindexes; idx++)
+ {
+ Relation indrel = vacrel->indrels[idx];
+ uint8 vacoptions = indrel->rd_indam->amparallelvacuumoptions;
+
+ if (vacoptions == VACUUM_OPTION_NO_PARALLEL ||
+ RelationGetNumberOfBlocks(indrel) < min_parallel_index_scan_size)
+ continue;
+
+ will_parallel_vacuum[idx] = true;
+
+ if ((vacoptions & VACUUM_OPTION_PARALLEL_BULKDEL) != 0)
+ nindexes_parallel_bulkdel++;
+ if (((vacoptions & VACUUM_OPTION_PARALLEL_CLEANUP) != 0) ||
+ ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) != 0))
+ nindexes_parallel_cleanup++;
+ }
+
+ nindexes_parallel = Max(nindexes_parallel_bulkdel,
+ nindexes_parallel_cleanup);
+
+ /* The leader process takes one index */
+ nindexes_parallel--;
+
+ /* No index supports parallel vacuum */
+ if (nindexes_parallel <= 0)
+ return 0;
+
+ /* Compute the parallel degree */
+ parallel_workers = (nrequested > 0) ?
+ Min(nrequested, nindexes_parallel) : nindexes_parallel;
+
+ /* Cap by max_parallel_maintenance_workers */
+ parallel_workers = Min(parallel_workers, max_parallel_maintenance_workers);
+
+ return parallel_workers;
+}
+
+/*
+ * Update index statistics in pg_class if the statistics are accurate.
+ */
+static void
+update_index_statistics(LVRelState *vacrel)
+{
+ Relation *indrels = vacrel->indrels;
+ int nindexes = vacrel->nindexes;
+ IndexBulkDeleteResult **indstats = vacrel->indstats;
+
+ Assert(!IsInParallelMode());
+
+ for (int idx = 0; idx < nindexes; idx++)
+ {
+ Relation indrel = indrels[idx];
+ IndexBulkDeleteResult *istat = indstats[idx];
+
+ if (istat == NULL || istat->estimated_count)
+ continue;
+
+ /* Update index statistics */
+ vac_update_relstats(indrel,
+ istat->num_pages,
+ istat->num_index_tuples,
+ 0,
+ false,
+ InvalidTransactionId,
+ InvalidMultiXactId,
+ false);
+ }
+}
+
+/*
+ * This function prepares and returns parallel vacuum state if we can launch
+ * even one worker. This function is responsible for entering parallel mode,
+ * create a parallel context, and then initialize the DSM segment.
+ */
+static LVParallelState *
+begin_parallel_vacuum(LVRelState *vacrel, BlockNumber nblocks,
+ int nrequested)
+{
+ LVParallelState *lps = NULL;
+ Relation *indrels = vacrel->indrels;
+ int nindexes = vacrel->nindexes;
+ ParallelContext *pcxt;
+ LVShared *shared;
+ LVDeadTuples *dead_tuples;
+ BufferUsage *buffer_usage;
+ WalUsage *wal_usage;
+ bool *will_parallel_vacuum;
+ long maxtuples;
+ Size est_shared;
+ Size est_deadtuples;
+ int nindexes_mwm = 0;
+ int parallel_workers = 0;
+ int querylen;
+
+ /*
+ * A parallel vacuum must be requested and there must be indexes on the
+ * relation
+ */
+ Assert(nrequested >= 0);
+ Assert(nindexes > 0);
+
+ /*
+ * Compute the number of parallel vacuum workers to launch
+ */
+ will_parallel_vacuum = (bool *) palloc0(sizeof(bool) * nindexes);
+ parallel_workers = compute_parallel_vacuum_workers(vacrel,
+ nrequested,
+ will_parallel_vacuum);
+
+ /* Can't perform vacuum in parallel */
+ if (parallel_workers <= 0)
+ {
+ pfree(will_parallel_vacuum);
+ return lps;
+ }
+
+ lps = (LVParallelState *) palloc0(sizeof(LVParallelState));
+
+ EnterParallelMode();
+ pcxt = CreateParallelContext("postgres", "parallel_vacuum_main",
+ parallel_workers);
+ Assert(pcxt->nworkers > 0);
+ lps->pcxt = pcxt;
+
+ /* Estimate size for shared information -- PARALLEL_VACUUM_KEY_SHARED */
+ est_shared = MAXALIGN(add_size(SizeOfLVShared, BITMAPLEN(nindexes)));
+ for (int idx = 0; idx < nindexes; idx++)
+ {
+ Relation indrel = indrels[idx];
+ uint8 vacoptions = indrel->rd_indam->amparallelvacuumoptions;
+
+ /*
+ * Cleanup option should be either disabled, always performing in
+ * parallel or conditionally performing in parallel.
+ */
+ Assert(((vacoptions & VACUUM_OPTION_PARALLEL_CLEANUP) == 0) ||
+ ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) == 0));
+ Assert(vacoptions <= VACUUM_OPTION_MAX_VALID_VALUE);
+
+ /* Skip indexes that don't participate in parallel vacuum */
+ if (!will_parallel_vacuum[idx])
+ continue;
+
+ if (indrel->rd_indam->amusemaintenanceworkmem)
+ nindexes_mwm++;
+
+ est_shared = add_size(est_shared, sizeof(LVSharedIndStats));
+
+ /*
+ * Remember the number of indexes that support parallel operation for
+ * each phase.
+ */
+ if ((vacoptions & VACUUM_OPTION_PARALLEL_BULKDEL) != 0)
+ lps->nindexes_parallel_bulkdel++;
+ if ((vacoptions & VACUUM_OPTION_PARALLEL_CLEANUP) != 0)
+ lps->nindexes_parallel_cleanup++;
+ if ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) != 0)
+ lps->nindexes_parallel_condcleanup++;
+ }
+ shm_toc_estimate_chunk(&pcxt->estimator, est_shared);
+ shm_toc_estimate_keys(&pcxt->estimator, 1);
+
+ /* Estimate size for dead tuples -- PARALLEL_VACUUM_KEY_DEAD_TUPLES */
+ maxtuples = compute_max_dead_tuples(nblocks, true);
+ est_deadtuples = MAXALIGN(SizeOfDeadTuples(maxtuples));
+ shm_toc_estimate_chunk(&pcxt->estimator, est_deadtuples);
+ shm_toc_estimate_keys(&pcxt->estimator, 1);
+
+ /*
+ * Estimate space for BufferUsage and WalUsage --
+ * PARALLEL_VACUUM_KEY_BUFFER_USAGE and PARALLEL_VACUUM_KEY_WAL_USAGE.
+ *
+ * If there are no extensions loaded that care, we could skip this. We
+ * have no way of knowing whether anyone's looking at pgBufferUsage or
+ * pgWalUsage, so do it unconditionally.
+ */
+ shm_toc_estimate_chunk(&pcxt->estimator,
+ mul_size(sizeof(BufferUsage), pcxt->nworkers));
+ shm_toc_estimate_keys(&pcxt->estimator, 1);
+ shm_toc_estimate_chunk(&pcxt->estimator,
+ mul_size(sizeof(WalUsage), pcxt->nworkers));
+ shm_toc_estimate_keys(&pcxt->estimator, 1);
+
+ /* Finally, estimate PARALLEL_VACUUM_KEY_QUERY_TEXT space */
+ if (debug_query_string)
+ {
+ querylen = strlen(debug_query_string);
+ shm_toc_estimate_chunk(&pcxt->estimator, querylen + 1);
+ shm_toc_estimate_keys(&pcxt->estimator, 1);
+ }
+ else
+ querylen = 0; /* keep compiler quiet */
+
+ InitializeParallelDSM(pcxt);
+
+ /* Prepare shared information */
+ shared = (LVShared *) shm_toc_allocate(pcxt->toc, est_shared);
+ MemSet(shared, 0, est_shared);
+ shared->relid = RelationGetRelid(vacrel->rel);
+ shared->elevel = elevel;
+ shared->maintenance_work_mem_worker =
+ (nindexes_mwm > 0) ?
+ maintenance_work_mem / Min(parallel_workers, nindexes_mwm) :
+ maintenance_work_mem;
+
+ pg_atomic_init_u32(&(shared->cost_balance), 0);
+ pg_atomic_init_u32(&(shared->active_nworkers), 0);
+ pg_atomic_init_u32(&(shared->idx), 0);
+ shared->offset = MAXALIGN(add_size(SizeOfLVShared, BITMAPLEN(nindexes)));
+
+ /*
+ * Initialize variables for shared index statistics, set NULL bitmap and
+ * the size of stats for each index.
+ */
+ memset(shared->bitmap, 0x00, BITMAPLEN(nindexes));
+ for (int idx = 0; idx < nindexes; idx++)
+ {
+ if (!will_parallel_vacuum[idx])
+ continue;
+
+ /* Set NOT NULL as this index does support parallelism */
+ shared->bitmap[idx >> 3] |= 1 << (idx & 0x07);
+ }
+
+ shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_SHARED, shared);
+ lps->lvshared = shared;
+
+ /* Prepare the dead tuple space */
+ dead_tuples = (LVDeadTuples *) shm_toc_allocate(pcxt->toc, est_deadtuples);
+ dead_tuples->max_tuples = maxtuples;
+ dead_tuples->num_tuples = 0;
+ MemSet(dead_tuples->itemptrs, 0, sizeof(ItemPointerData) * maxtuples);
+ shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_DEAD_TUPLES, dead_tuples);
+ vacrel->dead_tuples = dead_tuples;
+
+ /*
+ * Allocate space for each worker's BufferUsage and WalUsage; no need to
+ * initialize
+ */
+ buffer_usage = shm_toc_allocate(pcxt->toc,
+ mul_size(sizeof(BufferUsage), pcxt->nworkers));
+ shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_BUFFER_USAGE, buffer_usage);
+ lps->buffer_usage = buffer_usage;
+ wal_usage = shm_toc_allocate(pcxt->toc,
+ mul_size(sizeof(WalUsage), pcxt->nworkers));
+ shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_WAL_USAGE, wal_usage);
+ lps->wal_usage = wal_usage;
+
+ /* Store query string for workers */
+ if (debug_query_string)
+ {
+ char *sharedquery;
+
+ sharedquery = (char *) shm_toc_allocate(pcxt->toc, querylen + 1);
+ memcpy(sharedquery, debug_query_string, querylen + 1);
+ sharedquery[querylen] = '\0';
+ shm_toc_insert(pcxt->toc,
+ PARALLEL_VACUUM_KEY_QUERY_TEXT, sharedquery);
+ }
+
+ pfree(will_parallel_vacuum);
+ return lps;
+}
+
+/*
+ * Destroy the parallel context, and end parallel mode.
+ *
+ * Since writes are not allowed during parallel mode, copy the
+ * updated index statistics from DSM into local memory and then later use that
+ * to update the index statistics. One might think that we can exit from
+ * parallel mode, update the index statistics and then destroy parallel
+ * context, but that won't be safe (see ExitParallelMode).
+ */
+static void
+end_parallel_vacuum(LVRelState *vacrel)
+{
+ IndexBulkDeleteResult **indstats = vacrel->indstats;
+ LVParallelState *lps = vacrel->lps;
+ int nindexes = vacrel->nindexes;
+
+ Assert(!IsParallelWorker());
+
+ /* Copy the updated statistics */
+ for (int idx = 0; idx < nindexes; idx++)
+ {
+ LVSharedIndStats *shared_istat;
+
+ shared_istat = parallel_stats_for_idx(lps->lvshared, idx);
+
+ /*
+ * Skip index -- it must have been processed by the leader, from
+ * inside do_serial_processing_for_unsafe_indexes()
+ */
+ if (shared_istat == NULL)
+ continue;
+
+ if (shared_istat->updated)
+ {
+ indstats[idx] = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
+ memcpy(indstats[idx], &(shared_istat->istat), sizeof(IndexBulkDeleteResult));
+ }
+ else
+ indstats[idx] = NULL;
+ }
+
+ DestroyParallelContext(lps->pcxt);
+ ExitParallelMode();
+
+ /* Deactivate parallel vacuum */
+ pfree(lps);
+ vacrel->lps = NULL;
+}
+
+/*
+ * Return shared memory statistics for index at offset 'getidx', if any
+ *
+ * Returning NULL indicates that compute_parallel_vacuum_workers() determined
+ * that the index is a totally unsuitable target for all parallel processing
+ * up front. For example, the index could be < min_parallel_index_scan_size
+ * cutoff.
+ */
+static LVSharedIndStats *
+parallel_stats_for_idx(LVShared *lvshared, int getidx)
+{
+ char *p;
+
+ if (IndStatsIsNull(lvshared, getidx))
+ return NULL;
+
+ p = (char *) GetSharedIndStats(lvshared);
+ for (int idx = 0; idx < getidx; idx++)
+ {
+ if (IndStatsIsNull(lvshared, idx))
+ continue;
+
+ p += sizeof(LVSharedIndStats);
+ }
+
+ return (LVSharedIndStats *) p;
+}
+
+/*
+ * Returns false, if the given index can't participate in parallel index
+ * vacuum or parallel index cleanup
+ */
+static bool
+parallel_processing_is_safe(Relation indrel, LVShared *lvshared)
+{
+ uint8 vacoptions = indrel->rd_indam->amparallelvacuumoptions;
+
+ /* first_time must be true only if for_cleanup is true */
+ Assert(lvshared->for_cleanup || !lvshared->first_time);
+
+ if (lvshared->for_cleanup)
+ {
+ /* Skip, if the index does not support parallel cleanup */
+ if (((vacoptions & VACUUM_OPTION_PARALLEL_CLEANUP) == 0) &&
+ ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) == 0))
+ return false;
+
+ /*
+ * Skip, if the index supports parallel cleanup conditionally, but we
+ * have already processed the index (for bulkdelete). See the
+ * comments for option VACUUM_OPTION_PARALLEL_COND_CLEANUP to know
+ * when indexes support parallel cleanup conditionally.
+ */
+ if (!lvshared->first_time &&
+ ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) != 0))
+ return false;
+ }
+ else if ((vacoptions & VACUUM_OPTION_PARALLEL_BULKDEL) == 0)
+ {
+ /* Skip if the index does not support parallel bulk deletion */
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Perform work within a launched parallel process.
+ *
+ * Since parallel vacuum workers perform only index vacuum or index cleanup,
+ * we don't need to report progress information.
+ */
+void
+parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
+{
+ Relation rel;
+ Relation *indrels;
+ LVShared *lvshared;
+ LVDeadTuples *dead_tuples;
+ BufferUsage *buffer_usage;
+ WalUsage *wal_usage;
+ int nindexes;
+ char *sharedquery;
+ LVRelState vacrel;
+ ErrorContextCallback errcallback;
+
+ /*
+ * A parallel vacuum worker must have only PROC_IN_VACUUM flag since we
+ * don't support parallel vacuum for autovacuum as of now.
+ */
+ Assert(MyProc->statusFlags == PROC_IN_VACUUM);
+
+ lvshared = (LVShared *) shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_SHARED,
+ false);
+ elevel = lvshared->elevel;
+
+ if (lvshared->for_cleanup)
+ elog(DEBUG1, "starting parallel vacuum worker for cleanup");
+ else
+ elog(DEBUG1, "starting parallel vacuum worker for bulk delete");
+
+ /* Set debug_query_string for individual workers */
+ sharedquery = shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_QUERY_TEXT, true);
+ debug_query_string = sharedquery;
+ pgstat_report_activity(STATE_RUNNING, debug_query_string);
+
+ /*
+ * Open table. The lock mode is the same as the leader process. It's
+ * okay because the lock mode does not conflict among the parallel
+ * workers.
+ */
+ rel = table_open(lvshared->relid, ShareUpdateExclusiveLock);
+
+ /*
+ * Open all indexes. indrels are sorted in order by OID, which should be
+ * matched to the leader's one.
+ */
+ vac_open_indexes(rel, RowExclusiveLock, &nindexes, &indrels);
+ Assert(nindexes > 0);
+
+ /* Set dead tuple space */
+ dead_tuples = (LVDeadTuples *) shm_toc_lookup(toc,
+ PARALLEL_VACUUM_KEY_DEAD_TUPLES,
+ false);
+
+ /* Set cost-based vacuum delay */
+ VacuumCostActive = (VacuumCostDelay > 0);
+ VacuumCostBalance = 0;
+ VacuumPageHit = 0;
+ VacuumPageMiss = 0;
+ VacuumPageDirty = 0;
+ VacuumCostBalanceLocal = 0;
+ VacuumSharedCostBalance = &(lvshared->cost_balance);
+ VacuumActiveNWorkers = &(lvshared->active_nworkers);
+
+ vacrel.rel = rel;
+ vacrel.indrels = indrels;
+ vacrel.nindexes = nindexes;
+ /* Each parallel VACUUM worker gets its own access strategy */
+ vacrel.bstrategy = GetAccessStrategy(BAS_VACUUM);
+ vacrel.indstats = (IndexBulkDeleteResult **)
+ palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
+
+ if (lvshared->maintenance_work_mem_worker > 0)
+ maintenance_work_mem = lvshared->maintenance_work_mem_worker;
+
+ /*
+ * Initialize vacrel for use as error callback arg by parallel worker.
+ */
+ vacrel.relnamespace = get_namespace_name(RelationGetNamespace(rel));
+ vacrel.relname = pstrdup(RelationGetRelationName(rel));
+ vacrel.indname = NULL;
+ vacrel.phase = VACUUM_ERRCB_PHASE_UNKNOWN; /* Not yet processing */
+ vacrel.dead_tuples = dead_tuples;
+
+ /* Setup error traceback support for ereport() */
+ errcallback.callback = vacuum_error_callback;
+ errcallback.arg = &vacrel;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ /* Prepare to track buffer usage during parallel execution */
+ InstrStartParallelQuery();
+
+ /* Process indexes to perform vacuum/cleanup */
+ do_parallel_processing(&vacrel, lvshared);
+
+ /* Report buffer/WAL usage during parallel execution */
+ buffer_usage = shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_BUFFER_USAGE, false);
+ wal_usage = shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_WAL_USAGE, false);
+ InstrEndParallelQuery(&buffer_usage[ParallelWorkerNumber],
+ &wal_usage[ParallelWorkerNumber]);
+
+ /* Pop the error context stack */
+ error_context_stack = errcallback.previous;
+
+ vac_close_indexes(nindexes, indrels, RowExclusiveLock);
+ table_close(rel, ShareUpdateExclusiveLock);
+ FreeAccessStrategy(vacrel.bstrategy);
+ pfree(vacrel.indstats);
+}
+
+/*
+ * Error context callback for errors occurring during vacuum.
+ */
+static void
+vacuum_error_callback(void *arg)
+{
+ LVRelState *errinfo = arg;
+
+ switch (errinfo->phase)
+ {
+ case VACUUM_ERRCB_PHASE_SCAN_HEAP:
+ if (BlockNumberIsValid(errinfo->blkno))
+ {
+ if (OffsetNumberIsValid(errinfo->offnum))
+ errcontext("while scanning block %u offset %u of relation \"%s.%s\"",
+ errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
+ else
+ errcontext("while scanning block %u of relation \"%s.%s\"",
+ errinfo->blkno, errinfo->relnamespace, errinfo->relname);
+ }
+ else
+ errcontext("while scanning relation \"%s.%s\"",
+ errinfo->relnamespace, errinfo->relname);
+ break;
+
+ case VACUUM_ERRCB_PHASE_VACUUM_HEAP:
+ if (BlockNumberIsValid(errinfo->blkno))
+ {
+ if (OffsetNumberIsValid(errinfo->offnum))
+ errcontext("while vacuuming block %u offset %u of relation \"%s.%s\"",
+ errinfo->blkno, errinfo->offnum, errinfo->relnamespace, errinfo->relname);
+ else
+ errcontext("while vacuuming block %u of relation \"%s.%s\"",
+ errinfo->blkno, errinfo->relnamespace, errinfo->relname);
+ }
+ else
+ errcontext("while vacuuming relation \"%s.%s\"",
+ errinfo->relnamespace, errinfo->relname);
+ break;
+
+ case VACUUM_ERRCB_PHASE_VACUUM_INDEX:
+ errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
+ errinfo->indname, errinfo->relnamespace, errinfo->relname);
+ break;
+
+ case VACUUM_ERRCB_PHASE_INDEX_CLEANUP:
+ errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
+ errinfo->indname, errinfo->relnamespace, errinfo->relname);
+ break;
+
+ case VACUUM_ERRCB_PHASE_TRUNCATE:
+ if (BlockNumberIsValid(errinfo->blkno))
+ errcontext("while truncating relation \"%s.%s\" to %u blocks",
+ errinfo->relnamespace, errinfo->relname, errinfo->blkno);
+ break;
+
+ case VACUUM_ERRCB_PHASE_UNKNOWN:
+ default:
+ return; /* do nothing; the errinfo may not be
+ * initialized */
+ }
+}
+
+/*
+ * Updates the information required for vacuum error callback. This also saves
+ * the current information which can be later restored via restore_vacuum_error_info.
+ */
+static void
+update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel,
+ int phase, BlockNumber blkno, OffsetNumber offnum)
+{
+ if (saved_vacrel)
+ {
+ saved_vacrel->offnum = vacrel->offnum;
+ saved_vacrel->blkno = vacrel->blkno;
+ saved_vacrel->phase = vacrel->phase;
+ }
+
+ vacrel->blkno = blkno;
+ vacrel->offnum = offnum;
+ vacrel->phase = phase;
+}
+
+/*
+ * Restores the vacuum information saved via a prior call to update_vacuum_error_info.
+ */
+static void
+restore_vacuum_error_info(LVRelState *vacrel,
+ const LVSavedErrInfo *saved_vacrel)
+{
+ vacrel->blkno = saved_vacrel->blkno;
+ vacrel->offnum = saved_vacrel->offnum;
+ vacrel->phase = saved_vacrel->phase;
+}
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
new file mode 100644
index 0000000..e198df6
--- /dev/null
+++ b/src/backend/access/heap/visibilitymap.c
@@ -0,0 +1,672 @@
+/*-------------------------------------------------------------------------
+ *
+ * visibilitymap.c
+ * bitmap for tracking visibility of heap tuples
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/access/heap/visibilitymap.c
+ *
+ * INTERFACE ROUTINES
+ * visibilitymap_clear - clear bits for one page in the visibility map
+ * visibilitymap_pin - pin a map page for setting a bit
+ * visibilitymap_pin_ok - check whether correct map page is already pinned
+ * visibilitymap_set - set a bit in a previously pinned page
+ * visibilitymap_get_status - get status of bits
+ * visibilitymap_count - count number of bits set in visibility map
+ * visibilitymap_prepare_truncate -
+ * prepare for truncation of the visibility map
+ *
+ * NOTES
+ *
+ * The visibility map is a bitmap with two bits (all-visible and all-frozen)
+ * per heap page. A set all-visible bit means that all tuples on the page are
+ * known visible to all transactions, and therefore the page doesn't need to
+ * be vacuumed. A set all-frozen bit means that all tuples on the page are
+ * completely frozen, and therefore the page doesn't need to be vacuumed even
+ * if whole table scanning vacuum is required (e.g. anti-wraparound vacuum).
+ * The all-frozen bit must be set only when the page is already all-visible.
+ *
+ * The map is conservative in the sense that we make sure that whenever a bit
+ * is set, we know the condition is true, but if a bit is not set, it might or
+ * might not be true.
+ *
+ * Clearing visibility map bits is not separately WAL-logged. The callers
+ * must make sure that whenever a bit is cleared, the bit is cleared on WAL
+ * replay of the updating operation as well.
+ *
+ * When we *set* a visibility map during VACUUM, we must write WAL. This may
+ * seem counterintuitive, since the bit is basically a hint: if it is clear,
+ * it may still be the case that every tuple on the page is visible to all
+ * transactions; we just don't know that for certain. The difficulty is that
+ * there are two bits which are typically set together: the PD_ALL_VISIBLE bit
+ * on the page itself, and the visibility map bit. If a crash occurs after the
+ * visibility map page makes it to disk and before the updated heap page makes
+ * it to disk, redo must set the bit on the heap page. Otherwise, the next
+ * insert, update, or delete on the heap page will fail to realize that the
+ * visibility map bit must be cleared, possibly causing index-only scans to
+ * return wrong answers.
+ *
+ * VACUUM will normally skip pages for which the visibility map bit is set;
+ * such pages can't contain any dead tuples and therefore don't need vacuuming.
+ *
+ * LOCKING
+ *
+ * In heapam.c, whenever a page is modified so that not all tuples on the
+ * page are visible to everyone anymore, the corresponding bit in the
+ * visibility map is cleared. In order to be crash-safe, we need to do this
+ * while still holding a lock on the heap page and in the same critical
+ * section that logs the page modification. However, we don't want to hold
+ * the buffer lock over any I/O that may be required to read in the visibility
+ * map page. To avoid this, we examine the heap page before locking it;
+ * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
+ * bit. Then, we lock the buffer. But this creates a race condition: there
+ * is a possibility that in the time it takes to lock the buffer, the
+ * PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
+ * buffer, pin the visibility map page, and relock the buffer. This shouldn't
+ * happen often, because only VACUUM currently sets visibility map bits,
+ * and the race will only occur if VACUUM processes a given page at almost
+ * exactly the same time that someone tries to further modify it.
+ *
+ * To set a bit, you need to hold a lock on the heap page. That prevents
+ * the race condition where VACUUM sees that all tuples on the page are
+ * visible to everyone, but another backend modifies the page before VACUUM
+ * sets the bit in the visibility map.
+ *
+ * When a bit is set, the LSN of the visibility map page is updated to make
+ * sure that the visibility map update doesn't get written to disk before the
+ * WAL record of the changes that made it possible to set the bit is flushed.
+ * But when a bit is cleared, we don't have to do that because it's always
+ * safe to clear a bit in the map from correctness point of view.
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/heapam_xlog.h"
+#include "access/visibilitymap.h"
+#include "access/xlog.h"
+#include "miscadmin.h"
+#include "port/pg_bitutils.h"
+#include "storage/bufmgr.h"
+#include "storage/lmgr.h"
+#include "storage/smgr.h"
+#include "utils/inval.h"
+
+
+/*#define TRACE_VISIBILITYMAP */
+
+/*
+ * Size of the bitmap on each visibility map page, in bytes. There's no
+ * extra headers, so the whole page minus the standard page header is
+ * used for the bitmap.
+ */
+#define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
+
+/* Number of heap blocks we can represent in one byte */
+#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
+
+/* Number of heap blocks we can represent in one visibility map page. */
+#define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE)
+
+/* Mapping from heap block number to the right bit in the visibility map */
+#define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE)
+#define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
+#define HEAPBLK_TO_OFFSET(x) (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK)
+
+/* Masks for counting subsets of bits in the visibility map. */
+#define VISIBLE_MASK64 UINT64CONST(0x5555555555555555) /* The lower bit of each
+ * bit pair */
+#define FROZEN_MASK64 UINT64CONST(0xaaaaaaaaaaaaaaaa) /* The upper bit of each
+ * bit pair */
+
+/* prototypes for internal routines */
+static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend);
+static void vm_extend(Relation rel, BlockNumber vm_nblocks);
+
+
+/*
+ * visibilitymap_clear - clear specified bits for one page in visibility map
+ *
+ * You must pass a buffer containing the correct map page to this function.
+ * Call visibilitymap_pin first to pin the right one. This function doesn't do
+ * any I/O. Returns true if any bits have been cleared and false otherwise.
+ */
+bool
+visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
+{
+ BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
+ int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
+ int mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
+ uint8 mask = flags << mapOffset;
+ char *map;
+ bool cleared = false;
+
+ Assert(flags & VISIBILITYMAP_VALID_BITS);
+
+#ifdef TRACE_VISIBILITYMAP
+ elog(DEBUG1, "vm_clear %s %d", RelationGetRelationName(rel), heapBlk);
+#endif
+
+ if (!BufferIsValid(buf) || BufferGetBlockNumber(buf) != mapBlock)
+ elog(ERROR, "wrong buffer passed to visibilitymap_clear");
+
+ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+ map = PageGetContents(BufferGetPage(buf));
+
+ if (map[mapByte] & mask)
+ {
+ map[mapByte] &= ~mask;
+
+ MarkBufferDirty(buf);
+ cleared = true;
+ }
+
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+
+ return cleared;
+}
+
+/*
+ * visibilitymap_pin - pin a map page for setting a bit
+ *
+ * Setting a bit in the visibility map is a two-phase operation. First, call
+ * visibilitymap_pin, to pin the visibility map page containing the bit for
+ * the heap page. Because that can require I/O to read the map page, you
+ * shouldn't hold a lock on the heap page while doing that. Then, call
+ * visibilitymap_set to actually set the bit.
+ *
+ * On entry, *buf should be InvalidBuffer or a valid buffer returned by
+ * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
+ * relation. On return, *buf is a valid buffer with the map page containing
+ * the bit for heapBlk.
+ *
+ * If the page doesn't exist in the map file yet, it is extended.
+ */
+void
+visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
+{
+ BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
+
+ /* Reuse the old pinned buffer if possible */
+ if (BufferIsValid(*buf))
+ {
+ if (BufferGetBlockNumber(*buf) == mapBlock)
+ return;
+
+ ReleaseBuffer(*buf);
+ }
+ *buf = vm_readbuf(rel, mapBlock, true);
+}
+
+/*
+ * visibilitymap_pin_ok - do we already have the correct page pinned?
+ *
+ * On entry, buf should be InvalidBuffer or a valid buffer returned by
+ * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
+ * relation. The return value indicates whether the buffer covers the
+ * given heapBlk.
+ */
+bool
+visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
+{
+ BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
+
+ return BufferIsValid(buf) && BufferGetBlockNumber(buf) == mapBlock;
+}
+
+/*
+ * visibilitymap_set - set bit(s) on a previously pinned page
+ *
+ * recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
+ * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
+ * one provided; in normal running, we generate a new XLOG record and set the
+ * page LSN to that value. cutoff_xid is the largest xmin on the page being
+ * marked all-visible; it is needed for Hot Standby, and can be
+ * InvalidTransactionId if the page contains no tuples. It can also be set
+ * to InvalidTransactionId when a page that is already all-visible is being
+ * marked all-frozen.
+ *
+ * Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling
+ * this function. Except in recovery, caller should also pass the heap
+ * buffer. When checksums are enabled and we're not in recovery, we must add
+ * the heap buffer to the WAL chain to protect it from being torn.
+ *
+ * You must pass a buffer containing the correct map page to this function.
+ * Call visibilitymap_pin first to pin the right one. This function doesn't do
+ * any I/O.
+ */
+void
+visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
+ XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid,
+ uint8 flags)
+{
+ BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
+ uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
+ uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
+ Page page;
+ uint8 *map;
+
+#ifdef TRACE_VISIBILITYMAP
+ elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
+#endif
+
+ Assert(InRecovery || XLogRecPtrIsInvalid(recptr));
+ Assert(InRecovery || BufferIsValid(heapBuf));
+ Assert(flags & VISIBILITYMAP_VALID_BITS);
+
+ /* Check that we have the right heap page pinned, if present */
+ if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk)
+ elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
+
+ /* Check that we have the right VM page pinned */
+ if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
+ elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
+
+ page = BufferGetPage(vmBuf);
+ map = (uint8 *) PageGetContents(page);
+ LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
+
+ if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS))
+ {
+ START_CRIT_SECTION();
+
+ map[mapByte] |= (flags << mapOffset);
+ MarkBufferDirty(vmBuf);
+
+ if (RelationNeedsWAL(rel))
+ {
+ if (XLogRecPtrIsInvalid(recptr))
+ {
+ Assert(!InRecovery);
+ recptr = log_heap_visible(rel->rd_node, heapBuf, vmBuf,
+ cutoff_xid, flags);
+
+ /*
+ * If data checksums are enabled (or wal_log_hints=on), we
+ * need to protect the heap page from being torn.
+ */
+ if (XLogHintBitIsNeeded())
+ {
+ Page heapPage = BufferGetPage(heapBuf);
+
+ /* caller is expected to set PD_ALL_VISIBLE first */
+ Assert(PageIsAllVisible(heapPage));
+ PageSetLSN(heapPage, recptr);
+ }
+ }
+ PageSetLSN(page, recptr);
+ }
+
+ END_CRIT_SECTION();
+ }
+
+ LockBuffer(vmBuf, BUFFER_LOCK_UNLOCK);
+}
+
+/*
+ * visibilitymap_get_status - get status of bits
+ *
+ * Are all tuples on heapBlk visible to all or are marked frozen, according
+ * to the visibility map?
+ *
+ * On entry, *buf should be InvalidBuffer or a valid buffer returned by an
+ * earlier call to visibilitymap_pin or visibilitymap_get_status on the same
+ * relation. On return, *buf is a valid buffer with the map page containing
+ * the bit for heapBlk, or InvalidBuffer. The caller is responsible for
+ * releasing *buf after it's done testing and setting bits.
+ *
+ * NOTE: This function is typically called without a lock on the heap page,
+ * so somebody else could change the bit just after we look at it. In fact,
+ * since we don't lock the visibility map page either, it's even possible that
+ * someone else could have changed the bit just before we look at it, but yet
+ * we might see the old value. It is the caller's responsibility to deal with
+ * all concurrency issues!
+ */
+uint8
+visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf)
+{
+ BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
+ uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
+ uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
+ char *map;
+ uint8 result;
+
+#ifdef TRACE_VISIBILITYMAP
+ elog(DEBUG1, "vm_get_status %s %d", RelationGetRelationName(rel), heapBlk);
+#endif
+
+ /* Reuse the old pinned buffer if possible */
+ if (BufferIsValid(*buf))
+ {
+ if (BufferGetBlockNumber(*buf) != mapBlock)
+ {
+ ReleaseBuffer(*buf);
+ *buf = InvalidBuffer;
+ }
+ }
+
+ if (!BufferIsValid(*buf))
+ {
+ *buf = vm_readbuf(rel, mapBlock, false);
+ if (!BufferIsValid(*buf))
+ return false;
+ }
+
+ map = PageGetContents(BufferGetPage(*buf));
+
+ /*
+ * A single byte read is atomic. There could be memory-ordering effects
+ * here, but for performance reasons we make it the caller's job to worry
+ * about that.
+ */
+ result = ((map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS);
+ return result;
+}
+
+/*
+ * visibilitymap_count - count number of bits set in visibility map
+ *
+ * Note: we ignore the possibility of race conditions when the table is being
+ * extended concurrently with the call. New pages added to the table aren't
+ * going to be marked all-visible or all-frozen, so they won't affect the result.
+ */
+void
+visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
+{
+ BlockNumber mapBlock;
+ BlockNumber nvisible = 0;
+ BlockNumber nfrozen = 0;
+
+ /* all_visible must be specified */
+ Assert(all_visible);
+
+ for (mapBlock = 0;; mapBlock++)
+ {
+ Buffer mapBuffer;
+ uint64 *map;
+ int i;
+
+ /*
+ * Read till we fall off the end of the map. We assume that any extra
+ * bytes in the last page are zeroed, so we don't bother excluding
+ * them from the count.
+ */
+ mapBuffer = vm_readbuf(rel, mapBlock, false);
+ if (!BufferIsValid(mapBuffer))
+ break;
+
+ /*
+ * We choose not to lock the page, since the result is going to be
+ * immediately stale anyway if anyone is concurrently setting or
+ * clearing bits, and we only really need an approximate value.
+ */
+ map = (uint64 *) PageGetContents(BufferGetPage(mapBuffer));
+
+ StaticAssertStmt(MAPSIZE % sizeof(uint64) == 0,
+ "unsupported MAPSIZE");
+ if (all_frozen == NULL)
+ {
+ for (i = 0; i < MAPSIZE / sizeof(uint64); i++)
+ nvisible += pg_popcount64(map[i] & VISIBLE_MASK64);
+ }
+ else
+ {
+ for (i = 0; i < MAPSIZE / sizeof(uint64); i++)
+ {
+ nvisible += pg_popcount64(map[i] & VISIBLE_MASK64);
+ nfrozen += pg_popcount64(map[i] & FROZEN_MASK64);
+ }
+ }
+
+ ReleaseBuffer(mapBuffer);
+ }
+
+ *all_visible = nvisible;
+ if (all_frozen)
+ *all_frozen = nfrozen;
+}
+
+/*
+ * visibilitymap_prepare_truncate -
+ * prepare for truncation of the visibility map
+ *
+ * nheapblocks is the new size of the heap.
+ *
+ * Return the number of blocks of new visibility map.
+ * If it's InvalidBlockNumber, there is nothing to truncate;
+ * otherwise the caller is responsible for calling smgrtruncate()
+ * to truncate the visibility map pages.
+ */
+BlockNumber
+visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks)
+{
+ BlockNumber newnblocks;
+
+ /* last remaining block, byte, and bit */
+ BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks);
+ uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks);
+ uint8 truncOffset = HEAPBLK_TO_OFFSET(nheapblocks);
+
+#ifdef TRACE_VISIBILITYMAP
+ elog(DEBUG1, "vm_truncate %s %d", RelationGetRelationName(rel), nheapblocks);
+#endif
+
+ RelationOpenSmgr(rel);
+
+ /*
+ * If no visibility map has been created yet for this relation, there's
+ * nothing to truncate.
+ */
+ if (!smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM))
+ return InvalidBlockNumber;
+
+ /*
+ * Unless the new size is exactly at a visibility map page boundary, the
+ * tail bits in the last remaining map page, representing truncated heap
+ * blocks, need to be cleared. This is not only tidy, but also necessary
+ * because we don't get a chance to clear the bits if the heap is extended
+ * again.
+ */
+ if (truncByte != 0 || truncOffset != 0)
+ {
+ Buffer mapBuffer;
+ Page page;
+ char *map;
+
+ newnblocks = truncBlock + 1;
+
+ mapBuffer = vm_readbuf(rel, truncBlock, false);
+ if (!BufferIsValid(mapBuffer))
+ {
+ /* nothing to do, the file was already smaller */
+ return InvalidBlockNumber;
+ }
+
+ page = BufferGetPage(mapBuffer);
+ map = PageGetContents(page);
+
+ LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE);
+
+ /* NO EREPORT(ERROR) from here till changes are logged */
+ START_CRIT_SECTION();
+
+ /* Clear out the unwanted bytes. */
+ MemSet(&map[truncByte + 1], 0, MAPSIZE - (truncByte + 1));
+
+ /*----
+ * Mask out the unwanted bits of the last remaining byte.
+ *
+ * ((1 << 0) - 1) = 00000000
+ * ((1 << 1) - 1) = 00000001
+ * ...
+ * ((1 << 6) - 1) = 00111111
+ * ((1 << 7) - 1) = 01111111
+ *----
+ */
+ map[truncByte] &= (1 << truncOffset) - 1;
+
+ /*
+ * Truncation of a relation is WAL-logged at a higher-level, and we
+ * will be called at WAL replay. But if checksums are enabled, we need
+ * to still write a WAL record to protect against a torn page, if the
+ * page is flushed to disk before the truncation WAL record. We cannot
+ * use MarkBufferDirtyHint here, because that will not dirty the page
+ * during recovery.
+ */
+ MarkBufferDirty(mapBuffer);
+ if (!InRecovery && RelationNeedsWAL(rel) && XLogHintBitIsNeeded())
+ log_newpage_buffer(mapBuffer, false);
+
+ END_CRIT_SECTION();
+
+ UnlockReleaseBuffer(mapBuffer);
+ }
+ else
+ newnblocks = truncBlock;
+
+ if (smgrnblocks(rel->rd_smgr, VISIBILITYMAP_FORKNUM) <= newnblocks)
+ {
+ /* nothing to do, the file was already smaller than requested size */
+ return InvalidBlockNumber;
+ }
+
+ return newnblocks;
+}
+
+/*
+ * Read a visibility map page.
+ *
+ * If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is
+ * true, the visibility map file is extended.
+ */
+static Buffer
+vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
+{
+ Buffer buf;
+
+ /*
+ * We might not have opened the relation at the smgr level yet, or we
+ * might have been forced to close it by a sinval message. The code below
+ * won't necessarily notice relation extension immediately when extend =
+ * false, so we rely on sinval messages to ensure that our ideas about the
+ * size of the map aren't too far out of date.
+ */
+ RelationOpenSmgr(rel);
+
+ /*
+ * If we haven't cached the size of the visibility map fork yet, check it
+ * first.
+ */
+ if (rel->rd_smgr->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] == InvalidBlockNumber)
+ {
+ if (smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM))
+ smgrnblocks(rel->rd_smgr, VISIBILITYMAP_FORKNUM);
+ else
+ rel->rd_smgr->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = 0;
+ }
+
+ /* Handle requests beyond EOF */
+ if (blkno >= rel->rd_smgr->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM])
+ {
+ if (extend)
+ vm_extend(rel, blkno + 1);
+ else
+ return InvalidBuffer;
+ }
+
+ /*
+ * Use ZERO_ON_ERROR mode, and initialize the page if necessary. It's
+ * always safe to clear bits, so it's better to clear corrupt pages than
+ * error out.
+ *
+ * The initialize-the-page part is trickier than it looks, because of the
+ * possibility of multiple backends doing this concurrently, and our
+ * desire to not uselessly take the buffer lock in the normal path where
+ * the page is OK. We must take the lock to initialize the page, so
+ * recheck page newness after we have the lock, in case someone else
+ * already did it. Also, because we initially check PageIsNew with no
+ * lock, it's possible to fall through and return the buffer while someone
+ * else is still initializing the page (i.e., we might see pd_upper as set
+ * but other page header fields are still zeroes). This is harmless for
+ * callers that will take a buffer lock themselves, but some callers
+ * inspect the page without any lock at all. The latter is OK only so
+ * long as it doesn't depend on the page header having correct contents.
+ * Current usage is safe because PageGetContents() does not require that.
+ */
+ buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
+ RBM_ZERO_ON_ERROR, NULL);
+ if (PageIsNew(BufferGetPage(buf)))
+ {
+ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+ if (PageIsNew(BufferGetPage(buf)))
+ PageInit(BufferGetPage(buf), BLCKSZ, 0);
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+ }
+ return buf;
+}
+
+/*
+ * Ensure that the visibility map fork is at least vm_nblocks long, extending
+ * it if necessary with zeroed pages.
+ */
+static void
+vm_extend(Relation rel, BlockNumber vm_nblocks)
+{
+ BlockNumber vm_nblocks_now;
+ PGAlignedBlock pg;
+
+ PageInit((Page) pg.data, BLCKSZ, 0);
+
+ /*
+ * We use the relation extension lock to lock out other backends trying to
+ * extend the visibility map at the same time. It also locks out extension
+ * of the main fork, unnecessarily, but extending the visibility map
+ * happens seldom enough that it doesn't seem worthwhile to have a
+ * separate lock tag type for it.
+ *
+ * Note that another backend might have extended or created the relation
+ * by the time we get the lock.
+ */
+ LockRelationForExtension(rel, ExclusiveLock);
+
+ /* Might have to re-open if a cache flush happened */
+ RelationOpenSmgr(rel);
+
+ /*
+ * Create the file first if it doesn't exist. If smgr_vm_nblocks is
+ * positive then it must exist, no need for an smgrexists call.
+ */
+ if ((rel->rd_smgr->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] == 0 ||
+ rel->rd_smgr->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] == InvalidBlockNumber) &&
+ !smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM))
+ smgrcreate(rel->rd_smgr, VISIBILITYMAP_FORKNUM, false);
+
+ /* Invalidate cache so that smgrnblocks() asks the kernel. */
+ rel->rd_smgr->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = InvalidBlockNumber;
+ vm_nblocks_now = smgrnblocks(rel->rd_smgr, VISIBILITYMAP_FORKNUM);
+
+ /* Now extend the file */
+ while (vm_nblocks_now < vm_nblocks)
+ {
+ PageSetChecksumInplace((Page) pg.data, vm_nblocks_now);
+
+ smgrextend(rel->rd_smgr, VISIBILITYMAP_FORKNUM, vm_nblocks_now,
+ pg.data, false);
+ vm_nblocks_now++;
+ }
+
+ /*
+ * Send a shared-inval message to force other backends to close any smgr
+ * references they may have for this rel, which we are about to change.
+ * This is a useful optimization because it means that backends don't have
+ * to keep checking for creation or extension of the file, which happens
+ * infrequently.
+ */
+ CacheInvalidateSmgr(rel->rd_smgr->smgr_rnode);
+
+ UnlockRelationForExtension(rel, ExclusiveLock);
+}