1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
|
/*
** 2008 August 05
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file implements that page cache.
*/
#include "sqliteInt.h"
/*
** A complete page cache is an instance of this structure. Every
** entry in the cache holds a single page of the database file. The
** btree layer only operates on the cached copy of the database pages.
**
** A page cache entry is "clean" if it exactly matches what is currently
** on disk. A page is "dirty" if it has been modified and needs to be
** persisted to disk.
**
** pDirty, pDirtyTail, pSynced:
** All dirty pages are linked into the doubly linked list using
** PgHdr.pDirtyNext and pDirtyPrev. The list is maintained in LRU order
** such that p was added to the list more recently than p->pDirtyNext.
** PCache.pDirty points to the first (newest) element in the list and
** pDirtyTail to the last (oldest).
**
** The PCache.pSynced variable is used to optimize searching for a dirty
** page to eject from the cache mid-transaction. It is better to eject
** a page that does not require a journal sync than one that does.
** Therefore, pSynced is maintained so that it *almost* always points
** to either the oldest page in the pDirty/pDirtyTail list that has a
** clear PGHDR_NEED_SYNC flag or to a page that is older than this one
** (so that the right page to eject can be found by following pDirtyPrev
** pointers).
*/
struct PCache {
PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */
PgHdr *pSynced; /* Last synced page in dirty page list */
i64 nRefSum; /* Sum of ref counts over all pages */
int szCache; /* Configured cache size */
int szSpill; /* Size before spilling occurs */
int szPage; /* Size of every page in this cache */
int szExtra; /* Size of extra space for each page */
u8 bPurgeable; /* True if pages are on backing store */
u8 eCreate; /* eCreate value for for xFetch() */
int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */
void *pStress; /* Argument to xStress */
sqlite3_pcache *pCache; /* Pluggable cache module */
};
/********************************** Test and Debug Logic **********************/
/*
** Debug tracing macros. Enable by by changing the "0" to "1" and
** recompiling.
**
** When sqlite3PcacheTrace is 1, single line trace messages are issued.
** When sqlite3PcacheTrace is 2, a dump of the pcache showing all cache entries
** is displayed for many operations, resulting in a lot of output.
*/
#if defined(SQLITE_DEBUG) && 0
int sqlite3PcacheTrace = 2; /* 0: off 1: simple 2: cache dumps */
int sqlite3PcacheMxDump = 9999; /* Max cache entries for pcacheDump() */
# define pcacheTrace(X) if(sqlite3PcacheTrace){sqlite3DebugPrintf X;}
static void pcachePageTrace(int i, sqlite3_pcache_page *pLower){
PgHdr *pPg;
unsigned char *a;
int j;
if( pLower==0 ){
printf("%3d: NULL\n", i);
}else{
pPg = (PgHdr*)pLower->pExtra;
printf("%3d: nRef %2lld flgs %02x data ", i, pPg->nRef, pPg->flags);
a = (unsigned char *)pLower->pBuf;
for(j=0; j<12; j++) printf("%02x", a[j]);
printf(" ptr %p\n", pPg);
}
}
static void pcacheDump(PCache *pCache){
int N;
int i;
sqlite3_pcache_page *pLower;
if( sqlite3PcacheTrace<2 ) return;
if( pCache->pCache==0 ) return;
N = sqlite3PcachePagecount(pCache);
if( N>sqlite3PcacheMxDump ) N = sqlite3PcacheMxDump;
for(i=1; i<=N; i++){
pLower = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, i, 0);
pcachePageTrace(i, pLower);
if( pLower && ((PgHdr*)pLower)->pPage==0 ){
sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, pLower, 0);
}
}
}
#else
# define pcacheTrace(X)
# define pcachePageTrace(PGNO, X)
# define pcacheDump(X)
#endif
/*
** Return 1 if pPg is on the dirty list for pCache. Return 0 if not.
** This routine runs inside of assert() statements only.
*/
#if defined(SQLITE_ENABLE_EXPENSIVE_ASSERT)
static int pageOnDirtyList(PCache *pCache, PgHdr *pPg){
PgHdr *p;
for(p=pCache->pDirty; p; p=p->pDirtyNext){
if( p==pPg ) return 1;
}
return 0;
}
static int pageNotOnDirtyList(PCache *pCache, PgHdr *pPg){
PgHdr *p;
for(p=pCache->pDirty; p; p=p->pDirtyNext){
if( p==pPg ) return 0;
}
return 1;
}
#else
# define pageOnDirtyList(A,B) 1
# define pageNotOnDirtyList(A,B) 1
#endif
/*
** Check invariants on a PgHdr entry. Return true if everything is OK.
** Return false if any invariant is violated.
**
** This routine is for use inside of assert() statements only. For
** example:
**
** assert( sqlite3PcachePageSanity(pPg) );
*/
#ifdef SQLITE_DEBUG
int sqlite3PcachePageSanity(PgHdr *pPg){
PCache *pCache;
assert( pPg!=0 );
assert( pPg->pgno>0 || pPg->pPager==0 ); /* Page number is 1 or more */
pCache = pPg->pCache;
assert( pCache!=0 ); /* Every page has an associated PCache */
if( pPg->flags & PGHDR_CLEAN ){
assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */
assert( pageNotOnDirtyList(pCache, pPg) );/* CLEAN pages not on dirtylist */
}else{
assert( (pPg->flags & PGHDR_DIRTY)!=0 );/* If not CLEAN must be DIRTY */
assert( pPg->pDirtyNext==0 || pPg->pDirtyNext->pDirtyPrev==pPg );
assert( pPg->pDirtyPrev==0 || pPg->pDirtyPrev->pDirtyNext==pPg );
assert( pPg->pDirtyPrev!=0 || pCache->pDirty==pPg );
assert( pageOnDirtyList(pCache, pPg) );
}
/* WRITEABLE pages must also be DIRTY */
if( pPg->flags & PGHDR_WRITEABLE ){
assert( pPg->flags & PGHDR_DIRTY ); /* WRITEABLE implies DIRTY */
}
/* NEED_SYNC can be set independently of WRITEABLE. This can happen,
** for example, when using the sqlite3PagerDontWrite() optimization:
** (1) Page X is journalled, and gets WRITEABLE and NEED_SEEK.
** (2) Page X moved to freelist, WRITEABLE is cleared
** (3) Page X reused, WRITEABLE is set again
** If NEED_SYNC had been cleared in step 2, then it would not be reset
** in step 3, and page might be written into the database without first
** syncing the rollback journal, which might cause corruption on a power
** loss.
**
** Another example is when the database page size is smaller than the
** disk sector size. When any page of a sector is journalled, all pages
** in that sector are marked NEED_SYNC even if they are still CLEAN, just
** in case they are later modified, since all pages in the same sector
** must be journalled and synced before any of those pages can be safely
** written.
*/
return 1;
}
#endif /* SQLITE_DEBUG */
/********************************** Linked List Management ********************/
/* Allowed values for second argument to pcacheManageDirtyList() */
#define PCACHE_DIRTYLIST_REMOVE 1 /* Remove pPage from dirty list */
#define PCACHE_DIRTYLIST_ADD 2 /* Add pPage to the dirty list */
#define PCACHE_DIRTYLIST_FRONT 3 /* Move pPage to the front of the list */
/*
** Manage pPage's participation on the dirty list. Bits of the addRemove
** argument determines what operation to do. The 0x01 bit means first
** remove pPage from the dirty list. The 0x02 means add pPage back to
** the dirty list. Doing both moves pPage to the front of the dirty list.
*/
static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){
PCache *p = pPage->pCache;
pcacheTrace(("%p.DIRTYLIST.%s %d\n", p,
addRemove==1 ? "REMOVE" : addRemove==2 ? "ADD" : "FRONT",
pPage->pgno));
if( addRemove & PCACHE_DIRTYLIST_REMOVE ){
assert( pPage->pDirtyNext || pPage==p->pDirtyTail );
assert( pPage->pDirtyPrev || pPage==p->pDirty );
/* Update the PCache1.pSynced variable if necessary. */
if( p->pSynced==pPage ){
p->pSynced = pPage->pDirtyPrev;
}
if( pPage->pDirtyNext ){
pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev;
}else{
assert( pPage==p->pDirtyTail );
p->pDirtyTail = pPage->pDirtyPrev;
}
if( pPage->pDirtyPrev ){
pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext;
}else{
/* If there are now no dirty pages in the cache, set eCreate to 2.
** This is an optimization that allows sqlite3PcacheFetch() to skip
** searching for a dirty page to eject from the cache when it might
** otherwise have to. */
assert( pPage==p->pDirty );
p->pDirty = pPage->pDirtyNext;
assert( p->bPurgeable || p->eCreate==2 );
if( p->pDirty==0 ){ /*OPTIMIZATION-IF-TRUE*/
assert( p->bPurgeable==0 || p->eCreate==1 );
p->eCreate = 2;
}
}
}
if( addRemove & PCACHE_DIRTYLIST_ADD ){
pPage->pDirtyPrev = 0;
pPage->pDirtyNext = p->pDirty;
if( pPage->pDirtyNext ){
assert( pPage->pDirtyNext->pDirtyPrev==0 );
pPage->pDirtyNext->pDirtyPrev = pPage;
}else{
p->pDirtyTail = pPage;
if( p->bPurgeable ){
assert( p->eCreate==2 );
p->eCreate = 1;
}
}
p->pDirty = pPage;
/* If pSynced is NULL and this page has a clear NEED_SYNC flag, set
** pSynced to point to it. Checking the NEED_SYNC flag is an
** optimization, as if pSynced points to a page with the NEED_SYNC
** flag set sqlite3PcacheFetchStress() searches through all newer
** entries of the dirty-list for a page with NEED_SYNC clear anyway. */
if( !p->pSynced
&& 0==(pPage->flags&PGHDR_NEED_SYNC) /*OPTIMIZATION-IF-FALSE*/
){
p->pSynced = pPage;
}
}
pcacheDump(p);
}
/*
** Wrapper around the pluggable caches xUnpin method. If the cache is
** being used for an in-memory database, this function is a no-op.
*/
static void pcacheUnpin(PgHdr *p){
if( p->pCache->bPurgeable ){
pcacheTrace(("%p.UNPIN %d\n", p->pCache, p->pgno));
sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0);
pcacheDump(p->pCache);
}
}
/*
** Compute the number of pages of cache requested. p->szCache is the
** cache size requested by the "PRAGMA cache_size" statement.
*/
static int numberOfCachePages(PCache *p){
if( p->szCache>=0 ){
/* IMPLEMENTATION-OF: R-42059-47211 If the argument N is positive then the
** suggested cache size is set to N. */
return p->szCache;
}else{
i64 n;
/* IMPLEMENTATION-OF: R-59858-46238 If the argument N is negative, then the
** number of cache pages is adjusted to be a number of pages that would
** use approximately abs(N*1024) bytes of memory based on the current
** page size. */
n = ((-1024*(i64)p->szCache)/(p->szPage+p->szExtra));
if( n>1000000000 ) n = 1000000000;
return (int)n;
}
}
/*************************************************** General Interfaces ******
**
** Initialize and shutdown the page cache subsystem. Neither of these
** functions are threadsafe.
*/
int sqlite3PcacheInitialize(void){
if( sqlite3GlobalConfig.pcache2.xInit==0 ){
/* IMPLEMENTATION-OF: R-26801-64137 If the xInit() method is NULL, then the
** built-in default page cache is used instead of the application defined
** page cache. */
sqlite3PCacheSetDefault();
assert( sqlite3GlobalConfig.pcache2.xInit!=0 );
}
return sqlite3GlobalConfig.pcache2.xInit(sqlite3GlobalConfig.pcache2.pArg);
}
void sqlite3PcacheShutdown(void){
if( sqlite3GlobalConfig.pcache2.xShutdown ){
/* IMPLEMENTATION-OF: R-26000-56589 The xShutdown() method may be NULL. */
sqlite3GlobalConfig.pcache2.xShutdown(sqlite3GlobalConfig.pcache2.pArg);
}
}
/*
** Return the size in bytes of a PCache object.
*/
int sqlite3PcacheSize(void){ return sizeof(PCache); }
/*
** Create a new PCache object. Storage space to hold the object
** has already been allocated and is passed in as the p pointer.
** The caller discovers how much space needs to be allocated by
** calling sqlite3PcacheSize().
**
** szExtra is some extra space allocated for each page. The first
** 8 bytes of the extra space will be zeroed as the page is allocated,
** but remaining content will be uninitialized. Though it is opaque
** to this module, the extra space really ends up being the MemPage
** structure in the pager.
*/
int sqlite3PcacheOpen(
int szPage, /* Size of every page */
int szExtra, /* Extra space associated with each page */
int bPurgeable, /* True if pages are on backing store */
int (*xStress)(void*,PgHdr*),/* Call to try to make pages clean */
void *pStress, /* Argument to xStress */
PCache *p /* Preallocated space for the PCache */
){
memset(p, 0, sizeof(PCache));
p->szPage = 1;
p->szExtra = szExtra;
assert( szExtra>=8 ); /* First 8 bytes will be zeroed */
p->bPurgeable = bPurgeable;
p->eCreate = 2;
p->xStress = xStress;
p->pStress = pStress;
p->szCache = 100;
p->szSpill = 1;
pcacheTrace(("%p.OPEN szPage %d bPurgeable %d\n",p,szPage,bPurgeable));
return sqlite3PcacheSetPageSize(p, szPage);
}
/*
** Change the page size for PCache object. The caller must ensure that there
** are no outstanding page references when this function is called.
*/
int sqlite3PcacheSetPageSize(PCache *pCache, int szPage){
assert( pCache->nRefSum==0 && pCache->pDirty==0 );
if( pCache->szPage ){
sqlite3_pcache *pNew;
pNew = sqlite3GlobalConfig.pcache2.xCreate(
szPage, pCache->szExtra + ROUND8(sizeof(PgHdr)),
pCache->bPurgeable
);
if( pNew==0 ) return SQLITE_NOMEM_BKPT;
sqlite3GlobalConfig.pcache2.xCachesize(pNew, numberOfCachePages(pCache));
if( pCache->pCache ){
sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache);
}
pCache->pCache = pNew;
pCache->szPage = szPage;
pcacheTrace(("%p.PAGESIZE %d\n",pCache,szPage));
}
return SQLITE_OK;
}
/*
** Try to obtain a page from the cache.
**
** This routine returns a pointer to an sqlite3_pcache_page object if
** such an object is already in cache, or if a new one is created.
** This routine returns a NULL pointer if the object was not in cache
** and could not be created.
**
** The createFlags should be 0 to check for existing pages and should
** be 3 (not 1, but 3) to try to create a new page.
**
** If the createFlag is 0, then NULL is always returned if the page
** is not already in the cache. If createFlag is 1, then a new page
** is created only if that can be done without spilling dirty pages
** and without exceeding the cache size limit.
**
** The caller needs to invoke sqlite3PcacheFetchFinish() to properly
** initialize the sqlite3_pcache_page object and convert it into a
** PgHdr object. The sqlite3PcacheFetch() and sqlite3PcacheFetchFinish()
** routines are split this way for performance reasons. When separated
** they can both (usually) operate without having to push values to
** the stack on entry and pop them back off on exit, which saves a
** lot of pushing and popping.
*/
sqlite3_pcache_page *sqlite3PcacheFetch(
PCache *pCache, /* Obtain the page from this cache */
Pgno pgno, /* Page number to obtain */
int createFlag /* If true, create page if it does not exist already */
){
int eCreate;
sqlite3_pcache_page *pRes;
assert( pCache!=0 );
assert( pCache->pCache!=0 );
assert( createFlag==3 || createFlag==0 );
assert( pCache->eCreate==((pCache->bPurgeable && pCache->pDirty) ? 1 : 2) );
/* eCreate defines what to do if the page does not exist.
** 0 Do not allocate a new page. (createFlag==0)
** 1 Allocate a new page if doing so is inexpensive.
** (createFlag==1 AND bPurgeable AND pDirty)
** 2 Allocate a new page even it doing so is difficult.
** (createFlag==1 AND !(bPurgeable AND pDirty)
*/
eCreate = createFlag & pCache->eCreate;
assert( eCreate==0 || eCreate==1 || eCreate==2 );
assert( createFlag==0 || pCache->eCreate==eCreate );
assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) );
pRes = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate);
pcacheTrace(("%p.FETCH %d%s (result: %p) ",pCache,pgno,
createFlag?" create":"",pRes));
pcachePageTrace(pgno, pRes);
return pRes;
}
/*
** If the sqlite3PcacheFetch() routine is unable to allocate a new
** page because no clean pages are available for reuse and the cache
** size limit has been reached, then this routine can be invoked to
** try harder to allocate a page. This routine might invoke the stress
** callback to spill dirty pages to the journal. It will then try to
** allocate the new page and will only fail to allocate a new page on
** an OOM error.
**
** This routine should be invoked only after sqlite3PcacheFetch() fails.
*/
int sqlite3PcacheFetchStress(
PCache *pCache, /* Obtain the page from this cache */
Pgno pgno, /* Page number to obtain */
sqlite3_pcache_page **ppPage /* Write result here */
){
PgHdr *pPg;
if( pCache->eCreate==2 ) return 0;
if( sqlite3PcachePagecount(pCache)>pCache->szSpill ){
/* Find a dirty page to write-out and recycle. First try to find a
** page that does not require a journal-sync (one with PGHDR_NEED_SYNC
** cleared), but if that is not possible settle for any other
** unreferenced dirty page.
**
** If the LRU page in the dirty list that has a clear PGHDR_NEED_SYNC
** flag is currently referenced, then the following may leave pSynced
** set incorrectly (pointing to other than the LRU page with NEED_SYNC
** cleared). This is Ok, as pSynced is just an optimization. */
for(pPg=pCache->pSynced;
pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC));
pPg=pPg->pDirtyPrev
);
pCache->pSynced = pPg;
if( !pPg ){
for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev);
}
if( pPg ){
int rc;
#ifdef SQLITE_LOG_CACHE_SPILL
sqlite3_log(SQLITE_FULL,
"spill page %d making room for %d - cache used: %d/%d",
pPg->pgno, pgno,
sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache),
numberOfCachePages(pCache));
#endif
pcacheTrace(("%p.SPILL %d\n",pCache,pPg->pgno));
rc = pCache->xStress(pCache->pStress, pPg);
pcacheDump(pCache);
if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){
return rc;
}
}
}
*ppPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2);
return *ppPage==0 ? SQLITE_NOMEM_BKPT : SQLITE_OK;
}
/*
** This is a helper routine for sqlite3PcacheFetchFinish()
**
** In the uncommon case where the page being fetched has not been
** initialized, this routine is invoked to do the initialization.
** This routine is broken out into a separate function since it
** requires extra stack manipulation that can be avoided in the common
** case.
*/
static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit(
PCache *pCache, /* Obtain the page from this cache */
Pgno pgno, /* Page number obtained */
sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */
){
PgHdr *pPgHdr;
assert( pPage!=0 );
pPgHdr = (PgHdr*)pPage->pExtra;
assert( pPgHdr->pPage==0 );
memset(&pPgHdr->pDirty, 0, sizeof(PgHdr) - offsetof(PgHdr,pDirty));
pPgHdr->pPage = pPage;
pPgHdr->pData = pPage->pBuf;
pPgHdr->pExtra = (void *)&pPgHdr[1];
memset(pPgHdr->pExtra, 0, 8);
pPgHdr->pCache = pCache;
pPgHdr->pgno = pgno;
pPgHdr->flags = PGHDR_CLEAN;
return sqlite3PcacheFetchFinish(pCache,pgno,pPage);
}
/*
** This routine converts the sqlite3_pcache_page object returned by
** sqlite3PcacheFetch() into an initialized PgHdr object. This routine
** must be called after sqlite3PcacheFetch() in order to get a usable
** result.
*/
PgHdr *sqlite3PcacheFetchFinish(
PCache *pCache, /* Obtain the page from this cache */
Pgno pgno, /* Page number obtained */
sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */
){
PgHdr *pPgHdr;
assert( pPage!=0 );
pPgHdr = (PgHdr *)pPage->pExtra;
if( !pPgHdr->pPage ){
return pcacheFetchFinishWithInit(pCache, pgno, pPage);
}
pCache->nRefSum++;
pPgHdr->nRef++;
assert( sqlite3PcachePageSanity(pPgHdr) );
return pPgHdr;
}
/*
** Decrement the reference count on a page. If the page is clean and the
** reference count drops to 0, then it is made eligible for recycling.
*/
void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){
assert( p->nRef>0 );
p->pCache->nRefSum--;
if( (--p->nRef)==0 ){
if( p->flags&PGHDR_CLEAN ){
pcacheUnpin(p);
}else{
pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
assert( sqlite3PcachePageSanity(p) );
}
}
}
/*
** Increase the reference count of a supplied page by 1.
*/
void sqlite3PcacheRef(PgHdr *p){
assert(p->nRef>0);
assert( sqlite3PcachePageSanity(p) );
p->nRef++;
p->pCache->nRefSum++;
}
/*
** Drop a page from the cache. There must be exactly one reference to the
** page. This function deletes that reference, so after it returns the
** page pointed to by p is invalid.
*/
void sqlite3PcacheDrop(PgHdr *p){
assert( p->nRef==1 );
assert( sqlite3PcachePageSanity(p) );
if( p->flags&PGHDR_DIRTY ){
pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE);
}
p->pCache->nRefSum--;
sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 1);
}
/*
** Make sure the page is marked as dirty. If it isn't dirty already,
** make it so.
*/
void sqlite3PcacheMakeDirty(PgHdr *p){
assert( p->nRef>0 );
assert( sqlite3PcachePageSanity(p) );
if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ /*OPTIMIZATION-IF-FALSE*/
p->flags &= ~PGHDR_DONT_WRITE;
if( p->flags & PGHDR_CLEAN ){
p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN);
pcacheTrace(("%p.DIRTY %d\n",p->pCache,p->pgno));
assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY );
pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD);
assert( sqlite3PcachePageSanity(p) );
}
assert( sqlite3PcachePageSanity(p) );
}
}
/*
** Make sure the page is marked as clean. If it isn't clean already,
** make it so.
*/
void sqlite3PcacheMakeClean(PgHdr *p){
assert( sqlite3PcachePageSanity(p) );
assert( (p->flags & PGHDR_DIRTY)!=0 );
assert( (p->flags & PGHDR_CLEAN)==0 );
pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE);
p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE);
p->flags |= PGHDR_CLEAN;
pcacheTrace(("%p.CLEAN %d\n",p->pCache,p->pgno));
assert( sqlite3PcachePageSanity(p) );
if( p->nRef==0 ){
pcacheUnpin(p);
}
}
/*
** Make every page in the cache clean.
*/
void sqlite3PcacheCleanAll(PCache *pCache){
PgHdr *p;
pcacheTrace(("%p.CLEAN-ALL\n",pCache));
while( (p = pCache->pDirty)!=0 ){
sqlite3PcacheMakeClean(p);
}
}
/*
** Clear the PGHDR_NEED_SYNC and PGHDR_WRITEABLE flag from all dirty pages.
*/
void sqlite3PcacheClearWritable(PCache *pCache){
PgHdr *p;
pcacheTrace(("%p.CLEAR-WRITEABLE\n",pCache));
for(p=pCache->pDirty; p; p=p->pDirtyNext){
p->flags &= ~(PGHDR_NEED_SYNC|PGHDR_WRITEABLE);
}
pCache->pSynced = pCache->pDirtyTail;
}
/*
** Clear the PGHDR_NEED_SYNC flag from all dirty pages.
*/
void sqlite3PcacheClearSyncFlags(PCache *pCache){
PgHdr *p;
for(p=pCache->pDirty; p; p=p->pDirtyNext){
p->flags &= ~PGHDR_NEED_SYNC;
}
pCache->pSynced = pCache->pDirtyTail;
}
/*
** Change the page number of page p to newPgno.
*/
void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){
PCache *pCache = p->pCache;
sqlite3_pcache_page *pOther;
assert( p->nRef>0 );
assert( newPgno>0 );
assert( sqlite3PcachePageSanity(p) );
pcacheTrace(("%p.MOVE %d -> %d\n",pCache,p->pgno,newPgno));
pOther = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, newPgno, 0);
if( pOther ){
PgHdr *pXPage = (PgHdr*)pOther->pExtra;
assert( pXPage->nRef==0 );
pXPage->nRef++;
pCache->nRefSum++;
sqlite3PcacheDrop(pXPage);
}
sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno);
p->pgno = newPgno;
if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){
pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
assert( sqlite3PcachePageSanity(p) );
}
}
/*
** Drop every cache entry whose page number is greater than "pgno". The
** caller must ensure that there are no outstanding references to any pages
** other than page 1 with a page number greater than pgno.
**
** If there is a reference to page 1 and the pgno parameter passed to this
** function is 0, then the data area associated with page 1 is zeroed, but
** the page object is not dropped.
*/
void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){
if( pCache->pCache ){
PgHdr *p;
PgHdr *pNext;
pcacheTrace(("%p.TRUNCATE %d\n",pCache,pgno));
for(p=pCache->pDirty; p; p=pNext){
pNext = p->pDirtyNext;
/* This routine never gets call with a positive pgno except right
** after sqlite3PcacheCleanAll(). So if there are dirty pages,
** it must be that pgno==0.
*/
assert( p->pgno>0 );
if( p->pgno>pgno ){
assert( p->flags&PGHDR_DIRTY );
sqlite3PcacheMakeClean(p);
}
}
if( pgno==0 && pCache->nRefSum ){
sqlite3_pcache_page *pPage1;
pPage1 = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache,1,0);
if( ALWAYS(pPage1) ){ /* Page 1 is always available in cache, because
** pCache->nRefSum>0 */
memset(pPage1->pBuf, 0, pCache->szPage);
pgno = 1;
}
}
sqlite3GlobalConfig.pcache2.xTruncate(pCache->pCache, pgno+1);
}
}
/*
** Close a cache.
*/
void sqlite3PcacheClose(PCache *pCache){
assert( pCache->pCache!=0 );
pcacheTrace(("%p.CLOSE\n",pCache));
sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache);
}
/*
** Discard the contents of the cache.
*/
void sqlite3PcacheClear(PCache *pCache){
sqlite3PcacheTruncate(pCache, 0);
}
/*
** Merge two lists of pages connected by pDirty and in pgno order.
** Do not bother fixing the pDirtyPrev pointers.
*/
static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){
PgHdr result, *pTail;
pTail = &result;
assert( pA!=0 && pB!=0 );
for(;;){
if( pA->pgno<pB->pgno ){
pTail->pDirty = pA;
pTail = pA;
pA = pA->pDirty;
if( pA==0 ){
pTail->pDirty = pB;
break;
}
}else{
pTail->pDirty = pB;
pTail = pB;
pB = pB->pDirty;
if( pB==0 ){
pTail->pDirty = pA;
break;
}
}
}
return result.pDirty;
}
/*
** Sort the list of pages in ascending order by pgno. Pages are
** connected by pDirty pointers. The pDirtyPrev pointers are
** corrupted by this sort.
**
** Since there cannot be more than 2^31 distinct pages in a database,
** there cannot be more than 31 buckets required by the merge sorter.
** One extra bucket is added to catch overflow in case something
** ever changes to make the previous sentence incorrect.
*/
#define N_SORT_BUCKET 32
static PgHdr *pcacheSortDirtyList(PgHdr *pIn){
PgHdr *a[N_SORT_BUCKET], *p;
int i;
memset(a, 0, sizeof(a));
while( pIn ){
p = pIn;
pIn = p->pDirty;
p->pDirty = 0;
for(i=0; ALWAYS(i<N_SORT_BUCKET-1); i++){
if( a[i]==0 ){
a[i] = p;
break;
}else{
p = pcacheMergeDirtyList(a[i], p);
a[i] = 0;
}
}
if( NEVER(i==N_SORT_BUCKET-1) ){
/* To get here, there need to be 2^(N_SORT_BUCKET) elements in
** the input list. But that is impossible.
*/
a[i] = pcacheMergeDirtyList(a[i], p);
}
}
p = a[0];
for(i=1; i<N_SORT_BUCKET; i++){
if( a[i]==0 ) continue;
p = p ? pcacheMergeDirtyList(p, a[i]) : a[i];
}
return p;
}
/*
** Return a list of all dirty pages in the cache, sorted by page number.
*/
PgHdr *sqlite3PcacheDirtyList(PCache *pCache){
PgHdr *p;
for(p=pCache->pDirty; p; p=p->pDirtyNext){
p->pDirty = p->pDirtyNext;
}
return pcacheSortDirtyList(pCache->pDirty);
}
/*
** Return the total number of references to all pages held by the cache.
**
** This is not the total number of pages referenced, but the sum of the
** reference count for all pages.
*/
i64 sqlite3PcacheRefCount(PCache *pCache){
return pCache->nRefSum;
}
/*
** Return the number of references to the page supplied as an argument.
*/
i64 sqlite3PcachePageRefcount(PgHdr *p){
return p->nRef;
}
/*
** Return the total number of pages in the cache.
*/
int sqlite3PcachePagecount(PCache *pCache){
assert( pCache->pCache!=0 );
return sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache);
}
#ifdef SQLITE_TEST
/*
** Get the suggested cache-size value.
*/
int sqlite3PcacheGetCachesize(PCache *pCache){
return numberOfCachePages(pCache);
}
#endif
/*
** Set the suggested cache-size value.
*/
void sqlite3PcacheSetCachesize(PCache *pCache, int mxPage){
assert( pCache->pCache!=0 );
pCache->szCache = mxPage;
sqlite3GlobalConfig.pcache2.xCachesize(pCache->pCache,
numberOfCachePages(pCache));
}
/*
** Set the suggested cache-spill value. Make no changes if if the
** argument is zero. Return the effective cache-spill size, which will
** be the larger of the szSpill and szCache.
*/
int sqlite3PcacheSetSpillsize(PCache *p, int mxPage){
int res;
assert( p->pCache!=0 );
if( mxPage ){
if( mxPage<0 ){
mxPage = (int)((-1024*(i64)mxPage)/(p->szPage+p->szExtra));
}
p->szSpill = mxPage;
}
res = numberOfCachePages(p);
if( res<p->szSpill ) res = p->szSpill;
return res;
}
/*
** Free up as much memory as possible from the page cache.
*/
void sqlite3PcacheShrink(PCache *pCache){
assert( pCache->pCache!=0 );
sqlite3GlobalConfig.pcache2.xShrink(pCache->pCache);
}
/*
** Return the size of the header added by this middleware layer
** in the page-cache hierarchy.
*/
int sqlite3HeaderSizePcache(void){ return ROUND8(sizeof(PgHdr)); }
/*
** Return the number of dirty pages currently in the cache, as a percentage
** of the configured cache size.
*/
int sqlite3PCachePercentDirty(PCache *pCache){
PgHdr *pDirty;
int nDirty = 0;
int nCache = numberOfCachePages(pCache);
for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext) nDirty++;
return nCache ? (int)(((i64)nDirty * 100) / nCache) : 0;
}
#ifdef SQLITE_DIRECT_OVERFLOW_READ
/*
** Return true if there are one or more dirty pages in the cache. Else false.
*/
int sqlite3PCacheIsDirty(PCache *pCache){
return (pCache->pDirty!=0);
}
#endif
#if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG)
/*
** For all dirty pages currently in the cache, invoke the specified
** callback. This is only used if the SQLITE_CHECK_PAGES macro is
** defined.
*/
void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)){
PgHdr *pDirty;
for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext){
xIter(pDirty);
}
}
#endif
|