1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
|
/*----------------------------------------------------------------------
*
* tableam.c
* Table access method routines too big to be inline functions.
*
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/table/tableam.c
*
* NOTES
* Note that most function in here are documented in tableam.h, rather than
* here. That's because there's a lot of inline functions in tableam.h and
* it'd be harder to understand if one constantly had to switch between files.
*
*----------------------------------------------------------------------
*/
#include "postgres.h"
#include <math.h>
#include "access/syncscan.h"
#include "access/tableam.h"
#include "access/xact.h"
#include "optimizer/plancat.h"
#include "port/pg_bitutils.h"
#include "storage/bufmgr.h"
#include "storage/shmem.h"
#include "storage/smgr.h"
/*
* Constants to control the behavior of block allocation to parallel workers
* during a parallel seqscan. Technically these values do not need to be
* powers of 2, but having them as powers of 2 makes the math more optimal
* and makes the ramp-down stepping more even.
*/
/* The number of I/O chunks we try to break a parallel seqscan down into */
#define PARALLEL_SEQSCAN_NCHUNKS 2048
/* Ramp down size of allocations when we've only this number of chunks left */
#define PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS 64
/* Cap the size of parallel I/O chunks to this number of blocks */
#define PARALLEL_SEQSCAN_MAX_CHUNK_SIZE 8192
/* GUC variables */
char *default_table_access_method = DEFAULT_TABLE_ACCESS_METHOD;
bool synchronize_seqscans = true;
/* ----------------------------------------------------------------------------
* Slot functions.
* ----------------------------------------------------------------------------
*/
const TupleTableSlotOps *
table_slot_callbacks(Relation relation)
{
const TupleTableSlotOps *tts_cb;
if (relation->rd_tableam)
tts_cb = relation->rd_tableam->slot_callbacks(relation);
else if (relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
{
/*
* Historically FDWs expect to store heap tuples in slots. Continue
* handing them one, to make it less painful to adapt FDWs to new
* versions. The cost of a heap slot over a virtual slot is pretty
* small.
*/
tts_cb = &TTSOpsHeapTuple;
}
else
{
/*
* These need to be supported, as some parts of the code (like COPY)
* need to create slots for such relations too. It seems better to
* centralize the knowledge that a heap slot is the right thing in
* that case here.
*/
Assert(relation->rd_rel->relkind == RELKIND_VIEW ||
relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE);
tts_cb = &TTSOpsVirtual;
}
return tts_cb;
}
TupleTableSlot *
table_slot_create(Relation relation, List **reglist)
{
const TupleTableSlotOps *tts_cb;
TupleTableSlot *slot;
tts_cb = table_slot_callbacks(relation);
slot = MakeSingleTupleTableSlot(RelationGetDescr(relation), tts_cb);
if (reglist)
*reglist = lappend(*reglist, slot);
return slot;
}
/* ----------------------------------------------------------------------------
* Table scan functions.
* ----------------------------------------------------------------------------
*/
TableScanDesc
table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
{
uint32 flags = SO_TYPE_SEQSCAN |
SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
Oid relid = RelationGetRelid(relation);
Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
return relation->rd_tableam->scan_begin(relation, snapshot, nkeys, key,
NULL, flags);
}
void
table_scan_update_snapshot(TableScanDesc scan, Snapshot snapshot)
{
Assert(IsMVCCSnapshot(snapshot));
RegisterSnapshot(snapshot);
scan->rs_snapshot = snapshot;
scan->rs_flags |= SO_TEMP_SNAPSHOT;
}
/* ----------------------------------------------------------------------------
* Parallel table scan related functions.
* ----------------------------------------------------------------------------
*/
Size
table_parallelscan_estimate(Relation rel, Snapshot snapshot)
{
Size sz = 0;
if (IsMVCCSnapshot(snapshot))
sz = add_size(sz, EstimateSnapshotSpace(snapshot));
else
Assert(snapshot == SnapshotAny);
sz = add_size(sz, rel->rd_tableam->parallelscan_estimate(rel));
return sz;
}
void
table_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan,
Snapshot snapshot)
{
Size snapshot_off = rel->rd_tableam->parallelscan_initialize(rel, pscan);
pscan->phs_snapshot_off = snapshot_off;
if (IsMVCCSnapshot(snapshot))
{
SerializeSnapshot(snapshot, (char *) pscan + pscan->phs_snapshot_off);
pscan->phs_snapshot_any = false;
}
else
{
Assert(snapshot == SnapshotAny);
pscan->phs_snapshot_any = true;
}
}
TableScanDesc
table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan)
{
Snapshot snapshot;
uint32 flags = SO_TYPE_SEQSCAN |
SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
Assert(RelationGetRelid(relation) == pscan->phs_relid);
if (!pscan->phs_snapshot_any)
{
/* Snapshot was serialized -- restore it */
snapshot = RestoreSnapshot((char *) pscan + pscan->phs_snapshot_off);
RegisterSnapshot(snapshot);
flags |= SO_TEMP_SNAPSHOT;
}
else
{
/* SnapshotAny passed by caller (not serialized) */
snapshot = SnapshotAny;
}
return relation->rd_tableam->scan_begin(relation, snapshot, 0, NULL,
pscan, flags);
}
/* ----------------------------------------------------------------------------
* Index scan related functions.
* ----------------------------------------------------------------------------
*/
/*
* To perform that check simply start an index scan, create the necessary
* slot, do the heap lookup, and shut everything down again. This could be
* optimized, but is unlikely to matter from a performance POV. If there
* frequently are live index pointers also matching a unique index key, the
* CPU overhead of this routine is unlikely to matter.
*
* Note that *tid may be modified when we return true if the AM supports
* storing multiple row versions reachable via a single index entry (like
* heap's HOT).
*/
bool
table_index_fetch_tuple_check(Relation rel,
ItemPointer tid,
Snapshot snapshot,
bool *all_dead)
{
IndexFetchTableData *scan;
TupleTableSlot *slot;
bool call_again = false;
bool found;
slot = table_slot_create(rel, NULL);
scan = table_index_fetch_begin(rel);
found = table_index_fetch_tuple(scan, tid, snapshot, slot, &call_again,
all_dead);
table_index_fetch_end(scan);
ExecDropSingleTupleTableSlot(slot);
return found;
}
/* ------------------------------------------------------------------------
* Functions for non-modifying operations on individual tuples
* ------------------------------------------------------------------------
*/
void
table_tuple_get_latest_tid(TableScanDesc scan, ItemPointer tid)
{
Relation rel = scan->rs_rd;
const TableAmRoutine *tableam = rel->rd_tableam;
/*
* We don't expect direct calls to table_tuple_get_latest_tid with valid
* CheckXidAlive for catalog or regular tables. See detailed comments in
* xact.c where these variables are declared.
*/
if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
elog(ERROR, "unexpected table_tuple_get_latest_tid call during logical decoding");
/*
* Since this can be called with user-supplied TID, don't trust the input
* too much.
*/
if (!tableam->tuple_tid_valid(scan, tid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("tid (%u, %u) is not valid for relation \"%s\"",
ItemPointerGetBlockNumberNoCheck(tid),
ItemPointerGetOffsetNumberNoCheck(tid),
RelationGetRelationName(rel))));
tableam->tuple_get_latest_tid(scan, tid);
}
/* ----------------------------------------------------------------------------
* Functions to make modifications a bit simpler.
* ----------------------------------------------------------------------------
*/
/*
* simple_table_tuple_insert - insert a tuple
*
* Currently, this routine differs from table_tuple_insert only in supplying a
* default command ID and not allowing access to the speedup options.
*/
void
simple_table_tuple_insert(Relation rel, TupleTableSlot *slot)
{
table_tuple_insert(rel, slot, GetCurrentCommandId(true), 0, NULL);
}
/*
* simple_table_tuple_delete - delete a tuple
*
* This routine may be used to delete a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
* on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
simple_table_tuple_delete(Relation rel, ItemPointer tid, Snapshot snapshot)
{
TM_Result result;
TM_FailureData tmfd;
result = table_tuple_delete(rel, tid,
GetCurrentCommandId(true),
snapshot, InvalidSnapshot,
true /* wait for commit */ ,
&tmfd, false /* changingPart */ );
switch (result)
{
case TM_SelfModified:
/* Tuple was already updated in current command? */
elog(ERROR, "tuple already updated by self");
break;
case TM_Ok:
/* done successfully */
break;
case TM_Updated:
elog(ERROR, "tuple concurrently updated");
break;
case TM_Deleted:
elog(ERROR, "tuple concurrently deleted");
break;
default:
elog(ERROR, "unrecognized table_tuple_delete status: %u", result);
break;
}
}
/*
* simple_table_tuple_update - replace a tuple
*
* This routine may be used to update a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
* on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
simple_table_tuple_update(Relation rel, ItemPointer otid,
TupleTableSlot *slot,
Snapshot snapshot,
TU_UpdateIndexes *update_indexes)
{
TM_Result result;
TM_FailureData tmfd;
LockTupleMode lockmode;
result = table_tuple_update(rel, otid, slot,
GetCurrentCommandId(true),
snapshot, InvalidSnapshot,
true /* wait for commit */ ,
&tmfd, &lockmode, update_indexes);
switch (result)
{
case TM_SelfModified:
/* Tuple was already updated in current command? */
elog(ERROR, "tuple already updated by self");
break;
case TM_Ok:
/* done successfully */
break;
case TM_Updated:
elog(ERROR, "tuple concurrently updated");
break;
case TM_Deleted:
elog(ERROR, "tuple concurrently deleted");
break;
default:
elog(ERROR, "unrecognized table_tuple_update status: %u", result);
break;
}
}
/* ----------------------------------------------------------------------------
* Helper functions to implement parallel scans for block oriented AMs.
* ----------------------------------------------------------------------------
*/
Size
table_block_parallelscan_estimate(Relation rel)
{
return sizeof(ParallelBlockTableScanDescData);
}
Size
table_block_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan)
{
ParallelBlockTableScanDesc bpscan = (ParallelBlockTableScanDesc) pscan;
bpscan->base.phs_relid = RelationGetRelid(rel);
bpscan->phs_nblocks = RelationGetNumberOfBlocks(rel);
/* compare phs_syncscan initialization to similar logic in initscan */
bpscan->base.phs_syncscan = synchronize_seqscans &&
!RelationUsesLocalBuffers(rel) &&
bpscan->phs_nblocks > NBuffers / 4;
SpinLockInit(&bpscan->phs_mutex);
bpscan->phs_startblock = InvalidBlockNumber;
pg_atomic_init_u64(&bpscan->phs_nallocated, 0);
return sizeof(ParallelBlockTableScanDescData);
}
void
table_block_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
{
ParallelBlockTableScanDesc bpscan = (ParallelBlockTableScanDesc) pscan;
pg_atomic_write_u64(&bpscan->phs_nallocated, 0);
}
/*
* find and set the scan's startblock
*
* Determine where the parallel seq scan should start. This function may be
* called many times, once by each parallel worker. We must be careful only
* to set the startblock once.
*/
void
table_block_parallelscan_startblock_init(Relation rel,
ParallelBlockTableScanWorker pbscanwork,
ParallelBlockTableScanDesc pbscan)
{
BlockNumber sync_startpage = InvalidBlockNumber;
/* Reset the state we use for controlling allocation size. */
memset(pbscanwork, 0, sizeof(*pbscanwork));
StaticAssertStmt(MaxBlockNumber <= 0xFFFFFFFE,
"pg_nextpower2_32 may be too small for non-standard BlockNumber width");
/*
* We determine the chunk size based on the size of the relation. First we
* split the relation into PARALLEL_SEQSCAN_NCHUNKS chunks but we then
* take the next highest power of 2 number of the chunk size. This means
* we split the relation into somewhere between PARALLEL_SEQSCAN_NCHUNKS
* and PARALLEL_SEQSCAN_NCHUNKS / 2 chunks.
*/
pbscanwork->phsw_chunk_size = pg_nextpower2_32(Max(pbscan->phs_nblocks /
PARALLEL_SEQSCAN_NCHUNKS, 1));
/*
* Ensure we don't go over the maximum chunk size with larger tables. This
* means we may get much more than PARALLEL_SEQSCAN_NCHUNKS for larger
* tables. Too large a chunk size has been shown to be detrimental to
* synchronous scan performance.
*/
pbscanwork->phsw_chunk_size = Min(pbscanwork->phsw_chunk_size,
PARALLEL_SEQSCAN_MAX_CHUNK_SIZE);
retry:
/* Grab the spinlock. */
SpinLockAcquire(&pbscan->phs_mutex);
/*
* If the scan's startblock has not yet been initialized, we must do so
* now. If this is not a synchronized scan, we just start at block 0, but
* if it is a synchronized scan, we must get the starting position from
* the synchronized scan machinery. We can't hold the spinlock while
* doing that, though, so release the spinlock, get the information we
* need, and retry. If nobody else has initialized the scan in the
* meantime, we'll fill in the value we fetched on the second time
* through.
*/
if (pbscan->phs_startblock == InvalidBlockNumber)
{
if (!pbscan->base.phs_syncscan)
pbscan->phs_startblock = 0;
else if (sync_startpage != InvalidBlockNumber)
pbscan->phs_startblock = sync_startpage;
else
{
SpinLockRelease(&pbscan->phs_mutex);
sync_startpage = ss_get_location(rel, pbscan->phs_nblocks);
goto retry;
}
}
SpinLockRelease(&pbscan->phs_mutex);
}
/*
* get the next page to scan
*
* Get the next page to scan. Even if there are no pages left to scan,
* another backend could have grabbed a page to scan and not yet finished
* looking at it, so it doesn't follow that the scan is done when the first
* backend gets an InvalidBlockNumber return.
*/
BlockNumber
table_block_parallelscan_nextpage(Relation rel,
ParallelBlockTableScanWorker pbscanwork,
ParallelBlockTableScanDesc pbscan)
{
BlockNumber page;
uint64 nallocated;
/*
* The logic below allocates block numbers out to parallel workers in a
* way that each worker will receive a set of consecutive block numbers to
* scan. Earlier versions of this would allocate the next highest block
* number to the next worker to call this function. This would generally
* result in workers never receiving consecutive block numbers. Some
* operating systems would not detect the sequential I/O pattern due to
* each backend being a different process which could result in poor
* performance due to inefficient or no readahead. To work around this
* issue, we now allocate a range of block numbers for each worker and
* when they come back for another block, we give them the next one in
* that range until the range is complete. When the worker completes the
* range of blocks we then allocate another range for it and return the
* first block number from that range.
*
* Here we name these ranges of blocks "chunks". The initial size of
* these chunks is determined in table_block_parallelscan_startblock_init
* based on the size of the relation. Towards the end of the scan, we
* start making reductions in the size of the chunks in order to attempt
* to divide the remaining work over all the workers as evenly as
* possible.
*
* Here pbscanwork is local worker memory. phsw_chunk_remaining tracks
* the number of blocks remaining in the chunk. When that reaches 0 then
* we must allocate a new chunk for the worker.
*
* phs_nallocated tracks how many blocks have been allocated to workers
* already. When phs_nallocated >= rs_nblocks, all blocks have been
* allocated.
*
* Because we use an atomic fetch-and-add to fetch the current value, the
* phs_nallocated counter will exceed rs_nblocks, because workers will
* still increment the value, when they try to allocate the next block but
* all blocks have been allocated already. The counter must be 64 bits
* wide because of that, to avoid wrapping around when rs_nblocks is close
* to 2^32.
*
* The actual block to return is calculated by adding the counter to the
* starting block number, modulo nblocks.
*/
/*
* First check if we have any remaining blocks in a previous chunk for
* this worker. We must consume all of the blocks from that before we
* allocate a new chunk to the worker.
*/
if (pbscanwork->phsw_chunk_remaining > 0)
{
/*
* Give them the next block in the range and update the remaining
* number of blocks.
*/
nallocated = ++pbscanwork->phsw_nallocated;
pbscanwork->phsw_chunk_remaining--;
}
else
{
/*
* When we've only got PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS chunks
* remaining in the scan, we half the chunk size. Since we reduce the
* chunk size here, we'll hit this again after doing
* PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS at the new size. After a few
* iterations of this, we'll end up doing the last few blocks with the
* chunk size set to 1.
*/
if (pbscanwork->phsw_chunk_size > 1 &&
pbscanwork->phsw_nallocated > pbscan->phs_nblocks -
(pbscanwork->phsw_chunk_size * PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS))
pbscanwork->phsw_chunk_size >>= 1;
nallocated = pbscanwork->phsw_nallocated =
pg_atomic_fetch_add_u64(&pbscan->phs_nallocated,
pbscanwork->phsw_chunk_size);
/*
* Set the remaining number of blocks in this chunk so that subsequent
* calls from this worker continue on with this chunk until it's done.
*/
pbscanwork->phsw_chunk_remaining = pbscanwork->phsw_chunk_size - 1;
}
if (nallocated >= pbscan->phs_nblocks)
page = InvalidBlockNumber; /* all blocks have been allocated */
else
page = (nallocated + pbscan->phs_startblock) % pbscan->phs_nblocks;
/*
* Report scan location. Normally, we report the current page number.
* When we reach the end of the scan, though, we report the starting page,
* not the ending page, just so the starting positions for later scans
* doesn't slew backwards. We only report the position at the end of the
* scan once, though: subsequent callers will report nothing.
*/
if (pbscan->base.phs_syncscan)
{
if (page != InvalidBlockNumber)
ss_report_location(rel, page);
else if (nallocated == pbscan->phs_nblocks)
ss_report_location(rel, pbscan->phs_startblock);
}
return page;
}
/* ----------------------------------------------------------------------------
* Helper functions to implement relation sizing for block oriented AMs.
* ----------------------------------------------------------------------------
*/
/*
* table_block_relation_size
*
* If a table AM uses the various relation forks as the sole place where data
* is stored, and if it uses them in the expected manner (e.g. the actual data
* is in the main fork rather than some other), it can use this implementation
* of the relation_size callback rather than implementing its own.
*/
uint64
table_block_relation_size(Relation rel, ForkNumber forkNumber)
{
uint64 nblocks = 0;
/* InvalidForkNumber indicates returning the size for all forks */
if (forkNumber == InvalidForkNumber)
{
for (int i = 0; i < MAX_FORKNUM; i++)
nblocks += smgrnblocks(RelationGetSmgr(rel), i);
}
else
nblocks = smgrnblocks(RelationGetSmgr(rel), forkNumber);
return nblocks * BLCKSZ;
}
/*
* table_block_relation_estimate_size
*
* This function can't be directly used as the implementation of the
* relation_estimate_size callback, because it has a few additional parameters.
* Instead, it is intended to be used as a helper function; the caller can
* pass through the arguments to its relation_estimate_size function plus the
* additional values required here.
*
* overhead_bytes_per_tuple should contain the approximate number of bytes
* of storage required to store a tuple above and beyond what is required for
* the tuple data proper. Typically, this would include things like the
* size of the tuple header and item pointer. This is only used for query
* planning, so a table AM where the value is not constant could choose to
* pass a "best guess".
*
* usable_bytes_per_page should contain the approximate number of bytes per
* page usable for tuple data, excluding the page header and any anticipated
* special space.
*/
void
table_block_relation_estimate_size(Relation rel, int32 *attr_widths,
BlockNumber *pages, double *tuples,
double *allvisfrac,
Size overhead_bytes_per_tuple,
Size usable_bytes_per_page)
{
BlockNumber curpages;
BlockNumber relpages;
double reltuples;
BlockNumber relallvisible;
double density;
/* it should have storage, so we can call the smgr */
curpages = RelationGetNumberOfBlocks(rel);
/* coerce values in pg_class to more desirable types */
relpages = (BlockNumber) rel->rd_rel->relpages;
reltuples = (double) rel->rd_rel->reltuples;
relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
/*
* HACK: if the relation has never yet been vacuumed, use a minimum size
* estimate of 10 pages. The idea here is to avoid assuming a
* newly-created table is really small, even if it currently is, because
* that may not be true once some data gets loaded into it. Once a vacuum
* or analyze cycle has been done on it, it's more reasonable to believe
* the size is somewhat stable.
*
* (Note that this is only an issue if the plan gets cached and used again
* after the table has been filled. What we're trying to avoid is using a
* nestloop-type plan on a table that has grown substantially since the
* plan was made. Normally, autovacuum/autoanalyze will occur once enough
* inserts have happened and cause cached-plan invalidation; but that
* doesn't happen instantaneously, and it won't happen at all for cases
* such as temporary tables.)
*
* We test "never vacuumed" by seeing whether reltuples < 0.
*
* If the table has inheritance children, we don't apply this heuristic.
* Totally empty parent tables are quite common, so we should be willing
* to believe that they are empty.
*/
if (curpages < 10 &&
reltuples < 0 &&
!rel->rd_rel->relhassubclass)
curpages = 10;
/* report estimated # pages */
*pages = curpages;
/* quick exit if rel is clearly empty */
if (curpages == 0)
{
*tuples = 0;
*allvisfrac = 0;
return;
}
/* estimate number of tuples from previous tuple density */
if (reltuples >= 0 && relpages > 0)
density = reltuples / (double) relpages;
else
{
/*
* When we have no data because the relation was never yet vacuumed,
* estimate tuple width from attribute datatypes. We assume here that
* the pages are completely full, which is OK for tables but is
* probably an overestimate for indexes. Fortunately
* get_relation_info() can clamp the overestimate to the parent
* table's size.
*
* Note: this code intentionally disregards alignment considerations,
* because (a) that would be gilding the lily considering how crude
* the estimate is, (b) it creates platform dependencies in the
* default plans which are kind of a headache for regression testing,
* and (c) different table AMs might use different padding schemes.
*/
int32 tuple_width;
tuple_width = get_rel_data_width(rel, attr_widths);
tuple_width += overhead_bytes_per_tuple;
/* note: integer division is intentional here */
density = usable_bytes_per_page / tuple_width;
}
*tuples = rint(density * (double) curpages);
/*
* We use relallvisible as-is, rather than scaling it up like we do for
* the pages and tuples counts, on the theory that any pages added since
* the last VACUUM are most likely not marked all-visible. But costsize.c
* wants it converted to a fraction.
*/
if (relallvisible == 0 || curpages <= 0)
*allvisfrac = 0;
else if ((double) relallvisible >= curpages)
*allvisfrac = 1;
else
*allvisfrac = (double) relallvisible / curpages;
}
|