1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
|
/*-------------------------------------------------------------------------
*
* gistvacuum.c
* vacuuming routines for the postgres GiST index access method.
*
*
* Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/access/gist/gistvacuum.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/genam.h"
#include "access/gist_private.h"
#include "access/transam.h"
#include "commands/vacuum.h"
#include "lib/integerset.h"
#include "miscadmin.h"
#include "storage/indexfsm.h"
#include "storage/lmgr.h"
#include "utils/memutils.h"
/* Working state needed by gistbulkdelete */
typedef struct
{
IndexVacuumInfo *info;
IndexBulkDeleteResult *stats;
IndexBulkDeleteCallback callback;
void *callback_state;
GistNSN startNSN;
/*
* These are used to memorize all internal and empty leaf pages. They are
* used for deleting all the empty pages.
*/
IntegerSet *internal_page_set;
IntegerSet *empty_leaf_set;
MemoryContext page_set_context;
} GistVacState;
static void gistvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
IndexBulkDeleteCallback callback, void *callback_state);
static void gistvacuumpage(GistVacState *vstate, BlockNumber blkno,
BlockNumber orig_blkno);
static void gistvacuum_delete_empty_pages(IndexVacuumInfo *info,
GistVacState *vstate);
static bool gistdeletepage(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
Buffer buffer, OffsetNumber downlink,
Buffer leafBuffer);
/*
* VACUUM bulkdelete stage: remove index entries.
*/
IndexBulkDeleteResult *
gistbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
IndexBulkDeleteCallback callback, void *callback_state)
{
/* allocate stats if first time through, else re-use existing struct */
if (stats == NULL)
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
gistvacuumscan(info, stats, callback, callback_state);
return stats;
}
/*
* VACUUM cleanup stage: delete empty pages, and update index statistics.
*/
IndexBulkDeleteResult *
gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
{
/* No-op in ANALYZE ONLY mode */
if (info->analyze_only)
return stats;
/*
* If gistbulkdelete was called, we need not do anything, just return the
* stats from the latest gistbulkdelete call. If it wasn't called, we
* still need to do a pass over the index, to obtain index statistics.
*/
if (stats == NULL)
{
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
gistvacuumscan(info, stats, NULL, NULL);
}
/*
* It's quite possible for us to be fooled by concurrent page splits into
* double-counting some index tuples, so disbelieve any total that exceeds
* the underlying heap's count ... if we know that accurately. Otherwise
* this might just make matters worse.
*/
if (!info->estimated_count)
{
if (stats->num_index_tuples > info->num_heap_tuples)
stats->num_index_tuples = info->num_heap_tuples;
}
return stats;
}
/*
* gistvacuumscan --- scan the index for VACUUMing purposes
*
* This scans the index for leaf tuples that are deletable according to the
* vacuum callback, and updates the stats. Both btbulkdelete and
* btvacuumcleanup invoke this (the latter only if no btbulkdelete call
* occurred).
*
* This also makes note of any empty leaf pages, as well as all internal
* pages while looping over all index pages. After scanning all the pages, we
* remove the empty pages so that they can be reused. Any deleted pages are
* added directly to the free space map. (They should've been added there
* when they were originally deleted, already, but it's possible that the FSM
* was lost at a crash, for example.)
*
* The caller is responsible for initially allocating/zeroing a stats struct.
*/
static void
gistvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
IndexBulkDeleteCallback callback, void *callback_state)
{
Relation rel = info->index;
GistVacState vstate;
BlockNumber num_pages;
bool needLock;
BlockNumber blkno;
MemoryContext oldctx;
/*
* Reset fields that track information about the entire index now. This
* avoids double-counting in the case where a single VACUUM command
* requires multiple scans of the index.
*
* Avoid resetting the tuples_removed and pages_newly_deleted fields here,
* since they track information about the VACUUM command, and so must last
* across each call to gistvacuumscan().
*
* (Note that pages_free is treated as state about the whole index, not
* the current VACUUM. This is appropriate because RecordFreeIndexPage()
* calls are idempotent, and get repeated for the same deleted pages in
* some scenarios. The point for us is to track the number of recyclable
* pages in the index at the end of the VACUUM command.)
*/
stats->num_pages = 0;
stats->estimated_count = false;
stats->num_index_tuples = 0;
stats->pages_deleted = 0;
stats->pages_free = 0;
/*
* Create the integer sets to remember all the internal and the empty leaf
* pages in page_set_context. Internally, the integer set will remember
* this context so that the subsequent allocations for these integer sets
* will be done from the same context.
*/
vstate.page_set_context = GenerationContextCreate(CurrentMemoryContext,
"GiST VACUUM page set context",
16 * 1024);
oldctx = MemoryContextSwitchTo(vstate.page_set_context);
vstate.internal_page_set = intset_create();
vstate.empty_leaf_set = intset_create();
MemoryContextSwitchTo(oldctx);
/* Set up info to pass down to gistvacuumpage */
vstate.info = info;
vstate.stats = stats;
vstate.callback = callback;
vstate.callback_state = callback_state;
if (RelationNeedsWAL(rel))
vstate.startNSN = GetInsertRecPtr();
else
vstate.startNSN = gistGetFakeLSN(rel);
/*
* The outer loop iterates over all index pages, in physical order (we
* hope the kernel will cooperate in providing read-ahead for speed). It
* is critical that we visit all leaf pages, including ones added after we
* start the scan, else we might fail to delete some deletable tuples.
* Hence, we must repeatedly check the relation length. We must acquire
* the relation-extension lock while doing so to avoid a race condition:
* if someone else is extending the relation, there is a window where
* bufmgr/smgr have created a new all-zero page but it hasn't yet been
* write-locked by gistNewBuffer(). If we manage to scan such a page
* here, we'll improperly assume it can be recycled. Taking the lock
* synchronizes things enough to prevent a problem: either num_pages won't
* include the new page, or gistNewBuffer already has write lock on the
* buffer and it will be fully initialized before we can examine it. (See
* also vacuumlazy.c, which has the same issue.) Also, we need not worry
* if a page is added immediately after we look; the page splitting code
* already has write-lock on the left page before it adds a right page, so
* we must already have processed any tuples due to be moved into such a
* page.
*
* We can skip locking for new or temp relations, however, since no one
* else could be accessing them.
*/
needLock = !RELATION_IS_LOCAL(rel);
blkno = GIST_ROOT_BLKNO;
for (;;)
{
/* Get the current relation length */
if (needLock)
LockRelationForExtension(rel, ExclusiveLock);
num_pages = RelationGetNumberOfBlocks(rel);
if (needLock)
UnlockRelationForExtension(rel, ExclusiveLock);
/* Quit if we've scanned the whole relation */
if (blkno >= num_pages)
break;
/* Iterate over pages, then loop back to recheck length */
for (; blkno < num_pages; blkno++)
gistvacuumpage(&vstate, blkno, blkno);
}
/*
* If we found any recyclable pages (and recorded them in the FSM), then
* forcibly update the upper-level FSM pages to ensure that searchers can
* find them. It's possible that the pages were also found during
* previous scans and so this is a waste of time, but it's cheap enough
* relative to scanning the index that it shouldn't matter much, and
* making sure that free pages are available sooner not later seems
* worthwhile.
*
* Note that if no recyclable pages exist, we don't bother vacuuming the
* FSM at all.
*/
if (stats->pages_free > 0)
IndexFreeSpaceMapVacuum(rel);
/* update statistics */
stats->num_pages = num_pages;
/*
* If we saw any empty pages, try to unlink them from the tree so that
* they can be reused.
*/
gistvacuum_delete_empty_pages(info, &vstate);
/* we don't need the internal and empty page sets anymore */
MemoryContextDelete(vstate.page_set_context);
vstate.page_set_context = NULL;
vstate.internal_page_set = NULL;
vstate.empty_leaf_set = NULL;
}
/*
* gistvacuumpage --- VACUUM one page
*
* This processes a single page for gistbulkdelete(). In some cases we
* must go back and re-examine previously-scanned pages; this routine
* recurses when necessary to handle that case.
*
* blkno is the page to process. orig_blkno is the highest block number
* reached by the outer gistvacuumscan loop (the same as blkno, unless we
* are recursing to re-examine a previous page).
*/
static void
gistvacuumpage(GistVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno)
{
IndexVacuumInfo *info = vstate->info;
IndexBulkDeleteCallback callback = vstate->callback;
void *callback_state = vstate->callback_state;
Relation rel = info->index;
Buffer buffer;
Page page;
BlockNumber recurse_to;
restart:
recurse_to = InvalidBlockNumber;
/* call vacuum_delay_point while not holding any buffer lock */
vacuum_delay_point();
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
info->strategy);
/*
* We are not going to stay here for a long time, aggressively grab an
* exclusive lock.
*/
LockBuffer(buffer, GIST_EXCLUSIVE);
page = (Page) BufferGetPage(buffer);
if (gistPageRecyclable(page))
{
/* Okay to recycle this page */
RecordFreeIndexPage(rel, blkno);
vstate->stats->pages_deleted++;
vstate->stats->pages_free++;
}
else if (GistPageIsDeleted(page))
{
/* Already deleted, but can't recycle yet */
vstate->stats->pages_deleted++;
}
else if (GistPageIsLeaf(page))
{
OffsetNumber todelete[MaxOffsetNumber];
int ntodelete = 0;
int nremain;
GISTPageOpaque opaque = GistPageGetOpaque(page);
OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
/*
* Check whether we need to recurse back to earlier pages. What we
* are concerned about is a page split that happened since we started
* the vacuum scan. If the split moved some tuples to a lower page
* then we might have missed 'em. If so, set up for tail recursion.
*
* This is similar to the checks we do during searches, when following
* a downlink, but we don't need to jump to higher-numbered pages,
* because we will process them later, anyway.
*/
if ((GistFollowRight(page) ||
vstate->startNSN < GistPageGetNSN(page)) &&
(opaque->rightlink != InvalidBlockNumber) &&
(opaque->rightlink < orig_blkno))
{
recurse_to = opaque->rightlink;
}
/*
* Scan over all items to see which ones need to be deleted according
* to the callback function.
*/
if (callback)
{
OffsetNumber off;
for (off = FirstOffsetNumber;
off <= maxoff;
off = OffsetNumberNext(off))
{
ItemId iid = PageGetItemId(page, off);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
if (callback(&(idxtuple->t_tid), callback_state))
todelete[ntodelete++] = off;
}
}
/*
* Apply any needed deletes. We issue just one WAL record per page,
* so as to minimize WAL traffic.
*/
if (ntodelete > 0)
{
START_CRIT_SECTION();
MarkBufferDirty(buffer);
PageIndexMultiDelete(page, todelete, ntodelete);
GistMarkTuplesDeleted(page);
if (RelationNeedsWAL(rel))
{
XLogRecPtr recptr;
recptr = gistXLogUpdate(buffer,
todelete, ntodelete,
NULL, 0, InvalidBuffer);
PageSetLSN(page, recptr);
}
else
PageSetLSN(page, gistGetFakeLSN(rel));
END_CRIT_SECTION();
vstate->stats->tuples_removed += ntodelete;
/* must recompute maxoff */
maxoff = PageGetMaxOffsetNumber(page);
}
nremain = maxoff - FirstOffsetNumber + 1;
if (nremain == 0)
{
/*
* The page is now completely empty. Remember its block number,
* so that we will try to delete the page in the second stage.
*
* Skip this when recursing, because IntegerSet requires that the
* values are added in ascending order. The next VACUUM will pick
* it up.
*/
if (blkno == orig_blkno)
intset_add_member(vstate->empty_leaf_set, blkno);
}
else
vstate->stats->num_index_tuples += nremain;
}
else
{
/*
* On an internal page, check for "invalid tuples", left behind by an
* incomplete page split on PostgreSQL 9.0 or below. These are not
* created by newer PostgreSQL versions, but unfortunately, there is
* no version number anywhere in a GiST index, so we don't know
* whether this index might still contain invalid tuples or not.
*/
OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
OffsetNumber off;
for (off = FirstOffsetNumber;
off <= maxoff;
off = OffsetNumberNext(off))
{
ItemId iid = PageGetItemId(page, off);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
if (GistTupleIsInvalid(idxtuple))
ereport(LOG,
(errmsg("index \"%s\" contains an inner tuple marked as invalid",
RelationGetRelationName(rel)),
errdetail("This is caused by an incomplete page split at crash recovery before upgrading to PostgreSQL 9.1."),
errhint("Please REINDEX it.")));
}
/*
* Remember the block number of this page, so that we can revisit it
* later in gistvacuum_delete_empty_pages(), when we search for
* parents of empty leaf pages.
*/
if (blkno == orig_blkno)
intset_add_member(vstate->internal_page_set, blkno);
}
UnlockReleaseBuffer(buffer);
/*
* This is really tail recursion, but if the compiler is too stupid to
* optimize it as such, we'd eat an uncomfortably large amount of stack
* space per recursion level (due to the deletable[] array). A failure is
* improbable since the number of levels isn't likely to be large ... but
* just in case, let's hand-optimize into a loop.
*/
if (recurse_to != InvalidBlockNumber)
{
blkno = recurse_to;
goto restart;
}
}
/*
* Scan all internal pages, and try to delete their empty child pages.
*/
static void
gistvacuum_delete_empty_pages(IndexVacuumInfo *info, GistVacState *vstate)
{
Relation rel = info->index;
BlockNumber empty_pages_remaining;
uint64 blkno;
/*
* Rescan all inner pages to find those that have empty child pages.
*/
empty_pages_remaining = intset_num_entries(vstate->empty_leaf_set);
intset_begin_iterate(vstate->internal_page_set);
while (empty_pages_remaining > 0 &&
intset_iterate_next(vstate->internal_page_set, &blkno))
{
Buffer buffer;
Page page;
OffsetNumber off,
maxoff;
OffsetNumber todelete[MaxOffsetNumber];
BlockNumber leafs_to_delete[MaxOffsetNumber];
int ntodelete;
int deleted;
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, (BlockNumber) blkno,
RBM_NORMAL, info->strategy);
LockBuffer(buffer, GIST_SHARE);
page = (Page) BufferGetPage(buffer);
if (PageIsNew(page) || GistPageIsDeleted(page) || GistPageIsLeaf(page))
{
/*
* This page was an internal page earlier, but now it's something
* else. Shouldn't happen...
*/
Assert(false);
UnlockReleaseBuffer(buffer);
continue;
}
/*
* Scan all the downlinks, and see if any of them point to empty leaf
* pages.
*/
maxoff = PageGetMaxOffsetNumber(page);
ntodelete = 0;
for (off = FirstOffsetNumber;
off <= maxoff && ntodelete < maxoff - 1;
off = OffsetNumberNext(off))
{
ItemId iid = PageGetItemId(page, off);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
BlockNumber leafblk;
leafblk = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
if (intset_is_member(vstate->empty_leaf_set, leafblk))
{
leafs_to_delete[ntodelete] = leafblk;
todelete[ntodelete++] = off;
}
}
/*
* In order to avoid deadlock, child page must be locked before
* parent, so we must release the lock on the parent, lock the child,
* and then re-acquire the lock the parent. (And we wouldn't want to
* do I/O, while holding a lock, anyway.)
*
* At the instant that we're not holding a lock on the parent, the
* downlink might get moved by a concurrent insert, so we must
* re-check that it still points to the same child page after we have
* acquired both locks. Also, another backend might have inserted a
* tuple to the page, so that it is no longer empty. gistdeletepage()
* re-checks all these conditions.
*/
LockBuffer(buffer, GIST_UNLOCK);
deleted = 0;
for (int i = 0; i < ntodelete; i++)
{
Buffer leafbuf;
/*
* Don't remove the last downlink from the parent. That would
* confuse the insertion code.
*/
if (PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
break;
leafbuf = ReadBufferExtended(rel, MAIN_FORKNUM, leafs_to_delete[i],
RBM_NORMAL, info->strategy);
LockBuffer(leafbuf, GIST_EXCLUSIVE);
gistcheckpage(rel, leafbuf);
LockBuffer(buffer, GIST_EXCLUSIVE);
if (gistdeletepage(info, vstate->stats,
buffer, todelete[i] - deleted,
leafbuf))
deleted++;
LockBuffer(buffer, GIST_UNLOCK);
UnlockReleaseBuffer(leafbuf);
}
ReleaseBuffer(buffer);
/*
* We can stop the scan as soon as we have seen the downlinks, even if
* we were not able to remove them all.
*/
empty_pages_remaining -= ntodelete;
}
}
/*
* gistdeletepage takes a leaf page, and its parent, and tries to delete the
* leaf. Both pages must be locked.
*
* Even if the page was empty when we first saw it, a concurrent inserter might
* have added a tuple to it since. Similarly, the downlink might have moved.
* We re-check all the conditions, to make sure the page is still deletable,
* before modifying anything.
*
* Returns true, if the page was deleted, and false if a concurrent update
* prevented it.
*/
static bool
gistdeletepage(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
Buffer parentBuffer, OffsetNumber downlink,
Buffer leafBuffer)
{
Page parentPage = BufferGetPage(parentBuffer);
Page leafPage = BufferGetPage(leafBuffer);
ItemId iid;
IndexTuple idxtuple;
XLogRecPtr recptr;
FullTransactionId txid;
/*
* Check that the leaf is still empty and deletable.
*/
if (!GistPageIsLeaf(leafPage))
{
/* a leaf page should never become a non-leaf page */
Assert(false);
return false;
}
if (GistFollowRight(leafPage))
return false; /* don't mess with a concurrent page split */
if (PageGetMaxOffsetNumber(leafPage) != InvalidOffsetNumber)
return false; /* not empty anymore */
/*
* Ok, the leaf is deletable. Is the downlink in the parent page still
* valid? It might have been moved by a concurrent insert. We could try
* to re-find it by scanning the page again, possibly moving right if the
* was split. But for now, let's keep it simple and just give up. The
* next VACUUM will pick it up.
*/
if (PageIsNew(parentPage) || GistPageIsDeleted(parentPage) ||
GistPageIsLeaf(parentPage))
{
/* shouldn't happen, internal pages are never deleted */
Assert(false);
return false;
}
if (PageGetMaxOffsetNumber(parentPage) < downlink
|| PageGetMaxOffsetNumber(parentPage) <= FirstOffsetNumber)
return false;
iid = PageGetItemId(parentPage, downlink);
idxtuple = (IndexTuple) PageGetItem(parentPage, iid);
if (BufferGetBlockNumber(leafBuffer) !=
ItemPointerGetBlockNumber(&(idxtuple->t_tid)))
return false;
/*
* All good, proceed with the deletion.
*
* The page cannot be immediately recycled, because in-progress scans that
* saw the downlink might still visit it. Mark the page with the current
* next-XID counter, so that we know when it can be recycled. Once that
* XID becomes older than GlobalXmin, we know that all scans that are
* currently in progress must have ended. (That's much more conservative
* than needed, but let's keep it safe and simple.)
*/
txid = ReadNextFullTransactionId();
START_CRIT_SECTION();
/* mark the page as deleted */
MarkBufferDirty(leafBuffer);
GistPageSetDeleted(leafPage, txid);
stats->pages_newly_deleted++;
stats->pages_deleted++;
/* remove the downlink from the parent */
MarkBufferDirty(parentBuffer);
PageIndexTupleDelete(parentPage, downlink);
if (RelationNeedsWAL(info->index))
recptr = gistXLogPageDelete(leafBuffer, txid, parentBuffer, downlink);
else
recptr = gistGetFakeLSN(info->index);
PageSetLSN(parentPage, recptr);
PageSetLSN(leafPage, recptr);
END_CRIT_SECTION();
return true;
}
|