summaryrefslogtreecommitdiffstats
path: root/src/include/access/heapam.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/include/access/heapam.h')
-rw-r--r--src/include/access/heapam.h235
1 files changed, 235 insertions, 0 deletions
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
new file mode 100644
index 0000000..4f1dff9
--- /dev/null
+++ b/src/include/access/heapam.h
@@ -0,0 +1,235 @@
+/*-------------------------------------------------------------------------
+ *
+ * heapam.h
+ * POSTGRES heap access method definitions.
+ *
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/access/heapam.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef HEAPAM_H
+#define HEAPAM_H
+
+#include "access/relation.h" /* for backward compatibility */
+#include "access/relscan.h"
+#include "access/sdir.h"
+#include "access/skey.h"
+#include "access/table.h" /* for backward compatibility */
+#include "access/tableam.h"
+#include "nodes/lockoptions.h"
+#include "nodes/primnodes.h"
+#include "storage/bufpage.h"
+#include "storage/dsm.h"
+#include "storage/lockdefs.h"
+#include "storage/shm_toc.h"
+#include "utils/relcache.h"
+#include "utils/snapshot.h"
+
+
+/* "options" flag bits for heap_insert */
+#define HEAP_INSERT_SKIP_FSM TABLE_INSERT_SKIP_FSM
+#define HEAP_INSERT_FROZEN TABLE_INSERT_FROZEN
+#define HEAP_INSERT_NO_LOGICAL TABLE_INSERT_NO_LOGICAL
+#define HEAP_INSERT_SPECULATIVE 0x0010
+
+typedef struct BulkInsertStateData *BulkInsertState;
+struct TupleTableSlot;
+
+#define MaxLockTupleMode LockTupleExclusive
+
+/*
+ * Descriptor for heap table scans.
+ */
+typedef struct HeapScanDescData
+{
+ TableScanDescData rs_base; /* AM independent part of the descriptor */
+
+ /* state set up at initscan time */
+ BlockNumber rs_nblocks; /* total number of blocks in rel */
+ BlockNumber rs_startblock; /* block # to start at */
+ BlockNumber rs_numblocks; /* max number of blocks to scan */
+ /* rs_numblocks is usually InvalidBlockNumber, meaning "scan whole rel" */
+
+ /* scan current state */
+ bool rs_inited; /* false = scan not init'd yet */
+ BlockNumber rs_cblock; /* current block # in scan, if any */
+ Buffer rs_cbuf; /* current buffer in scan, if any */
+ /* NB: if rs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
+
+ /* rs_numblocks is usually InvalidBlockNumber, meaning "scan whole rel" */
+ BufferAccessStrategy rs_strategy; /* access strategy for reads */
+
+ HeapTupleData rs_ctup; /* current tuple in scan, if any */
+
+ /*
+ * For parallel scans to store page allocation data. NULL when not
+ * performing a parallel scan.
+ */
+ ParallelBlockTableScanWorkerData *rs_parallelworkerdata;
+
+ /* these fields only used in page-at-a-time mode and for bitmap scans */
+ int rs_cindex; /* current tuple's index in vistuples */
+ int rs_ntuples; /* number of visible tuples on page */
+ OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]; /* their offsets */
+} HeapScanDescData;
+typedef struct HeapScanDescData *HeapScanDesc;
+
+/*
+ * Descriptor for fetches from heap via an index.
+ */
+typedef struct IndexFetchHeapData
+{
+ IndexFetchTableData xs_base; /* AM independent part of the descriptor */
+
+ Buffer xs_cbuf; /* current heap buffer in scan, if any */
+ /* NB: if xs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
+} IndexFetchHeapData;
+
+/* Result codes for HeapTupleSatisfiesVacuum */
+typedef enum
+{
+ HEAPTUPLE_DEAD, /* tuple is dead and deletable */
+ HEAPTUPLE_LIVE, /* tuple is live (committed, no deleter) */
+ HEAPTUPLE_RECENTLY_DEAD, /* tuple is dead, but not deletable yet */
+ HEAPTUPLE_INSERT_IN_PROGRESS, /* inserting xact is still in progress */
+ HEAPTUPLE_DELETE_IN_PROGRESS /* deleting xact is still in progress */
+} HTSV_Result;
+
+/* ----------------
+ * function prototypes for heap access method
+ *
+ * heap_create, heap_create_with_catalog, and heap_drop_with_catalog
+ * are declared in catalog/heap.h
+ * ----------------
+ */
+
+
+/*
+ * HeapScanIsValid
+ * True iff the heap scan is valid.
+ */
+#define HeapScanIsValid(scan) PointerIsValid(scan)
+
+extern TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot,
+ int nkeys, ScanKey key,
+ ParallelTableScanDesc parallel_scan,
+ uint32 flags);
+extern void heap_setscanlimits(TableScanDesc scan, BlockNumber startBlk,
+ BlockNumber numBlks);
+extern void heapgetpage(TableScanDesc scan, BlockNumber page);
+extern void heap_rescan(TableScanDesc scan, ScanKey key, bool set_params,
+ bool allow_strat, bool allow_sync, bool allow_pagemode);
+extern void heap_endscan(TableScanDesc scan);
+extern HeapTuple heap_getnext(TableScanDesc scan, ScanDirection direction);
+extern bool heap_getnextslot(TableScanDesc sscan,
+ ScanDirection direction, struct TupleTableSlot *slot);
+extern void heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
+ ItemPointer maxtid);
+extern bool heap_getnextslot_tidrange(TableScanDesc sscan,
+ ScanDirection direction,
+ TupleTableSlot *slot);
+extern bool heap_fetch(Relation relation, Snapshot snapshot,
+ HeapTuple tuple, Buffer *userbuf);
+extern bool heap_fetch_extended(Relation relation, Snapshot snapshot,
+ HeapTuple tuple, Buffer *userbuf,
+ bool keep_buf);
+extern bool heap_hot_search_buffer(ItemPointer tid, Relation relation,
+ Buffer buffer, Snapshot snapshot, HeapTuple heapTuple,
+ bool *all_dead, bool first_call);
+
+extern void heap_get_latest_tid(TableScanDesc scan, ItemPointer tid);
+
+extern BulkInsertState GetBulkInsertState(void);
+extern void FreeBulkInsertState(BulkInsertState);
+extern void ReleaseBulkInsertStatePin(BulkInsertState bistate);
+
+extern void heap_insert(Relation relation, HeapTuple tup, CommandId cid,
+ int options, BulkInsertState bistate);
+extern void heap_multi_insert(Relation relation, struct TupleTableSlot **slots,
+ int ntuples, CommandId cid, int options,
+ BulkInsertState bistate);
+extern TM_Result heap_delete(Relation relation, ItemPointer tid,
+ CommandId cid, Snapshot crosscheck, bool wait,
+ struct TM_FailureData *tmfd, bool changingPart);
+extern void heap_finish_speculative(Relation relation, ItemPointer tid);
+extern void heap_abort_speculative(Relation relation, ItemPointer tid);
+extern TM_Result heap_update(Relation relation, ItemPointer otid,
+ HeapTuple newtup,
+ CommandId cid, Snapshot crosscheck, bool wait,
+ struct TM_FailureData *tmfd, LockTupleMode *lockmode);
+extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
+ CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
+ bool follow_update,
+ Buffer *buffer, struct TM_FailureData *tmfd);
+
+extern void heap_inplace_update(Relation relation, HeapTuple tuple);
+extern bool heap_freeze_tuple(HeapTupleHeader tuple,
+ TransactionId relfrozenxid, TransactionId relminmxid,
+ TransactionId cutoff_xid, TransactionId cutoff_multi);
+extern bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
+ MultiXactId cutoff_multi, Buffer buf);
+extern bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple);
+
+extern void simple_heap_insert(Relation relation, HeapTuple tup);
+extern void simple_heap_delete(Relation relation, ItemPointer tid);
+extern void simple_heap_update(Relation relation, ItemPointer otid,
+ HeapTuple tup);
+
+extern TransactionId heap_index_delete_tuples(Relation rel,
+ TM_IndexDeleteOp *delstate);
+
+/* in heap/pruneheap.c */
+struct GlobalVisState;
+extern void heap_page_prune_opt(Relation relation, Buffer buffer);
+extern int heap_page_prune(Relation relation, Buffer buffer,
+ struct GlobalVisState *vistest,
+ TransactionId old_snap_xmin,
+ TimestampTz old_snap_ts_ts,
+ bool report_stats,
+ OffsetNumber *off_loc);
+extern void heap_page_prune_execute(Buffer buffer,
+ OffsetNumber *redirected, int nredirected,
+ OffsetNumber *nowdead, int ndead,
+ OffsetNumber *nowunused, int nunused);
+extern void heap_get_root_tuples(Page page, OffsetNumber *root_offsets);
+
+/* in heap/vacuumlazy.c */
+struct VacuumParams;
+extern void heap_vacuum_rel(Relation rel,
+ struct VacuumParams *params, BufferAccessStrategy bstrategy);
+extern void parallel_vacuum_main(dsm_segment *seg, shm_toc *toc);
+
+/* in heap/heapam_visibility.c */
+extern bool HeapTupleSatisfiesVisibility(HeapTuple stup, Snapshot snapshot,
+ Buffer buffer);
+extern TM_Result HeapTupleSatisfiesUpdate(HeapTuple stup, CommandId curcid,
+ Buffer buffer);
+extern HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple stup, TransactionId OldestXmin,
+ Buffer buffer);
+extern HTSV_Result HeapTupleSatisfiesVacuumHorizon(HeapTuple stup, Buffer buffer,
+ TransactionId *dead_after);
+extern void HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer,
+ uint16 infomask, TransactionId xid);
+extern bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple);
+extern bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot);
+extern bool HeapTupleIsSurelyDead(HeapTuple htup,
+ struct GlobalVisState *vistest);
+
+/*
+ * To avoid leaking too much knowledge about reorderbuffer implementation
+ * details this is implemented in reorderbuffer.c not heapam_visibility.c
+ */
+struct HTAB;
+extern bool ResolveCminCmaxDuringDecoding(struct HTAB *tuplecid_data,
+ Snapshot snapshot,
+ HeapTuple htup,
+ Buffer buffer,
+ CommandId *cmin, CommandId *cmax);
+extern void HeapCheckForSerializableConflictOut(bool valid, Relation relation, HeapTuple tuple,
+ Buffer buffer, Snapshot snapshot);
+
+#endif /* HEAPAM_H */