| OLD | NEW |
| 1 /* | 1 /* |
| 2 ** 2008 August 05 | 2 ** 2008 August 05 |
| 3 ** | 3 ** |
| 4 ** The author disclaims copyright to this source code. In place of | 4 ** The author disclaims copyright to this source code. In place of |
| 5 ** a legal notice, here is a blessing: | 5 ** a legal notice, here is a blessing: |
| 6 ** | 6 ** |
| 7 ** May you do good and not evil. | 7 ** May you do good and not evil. |
| 8 ** May you find forgiveness for yourself and forgive others. | 8 ** May you find forgiveness for yourself and forgive others. |
| 9 ** May you share freely, never taking more than you give. | 9 ** May you share freely, never taking more than you give. |
| 10 ** | 10 ** |
| 11 ************************************************************************* | 11 ************************************************************************* |
| 12 ** This file implements that page cache. | 12 ** This file implements that page cache. |
| 13 */ | 13 */ |
| 14 #include "sqliteInt.h" | 14 #include "sqliteInt.h" |
| 15 | 15 |
| 16 /* | 16 /* |
| 17 ** A complete page cache is an instance of this structure. | 17 ** A complete page cache is an instance of this structure. Every |
| 18 ** entry in the cache holds a single page of the database file. The |
| 19 ** btree layer only operates on the cached copy of the database pages. |
| 20 ** |
| 21 ** A page cache entry is "clean" if it exactly matches what is currently |
| 22 ** on disk. A page is "dirty" if it has been modified and needs to be |
| 23 ** persisted to disk. |
| 24 ** |
| 25 ** pDirty, pDirtyTail, pSynced: |
| 26 ** All dirty pages are linked into the doubly linked list using |
| 27 ** PgHdr.pDirtyNext and pDirtyPrev. The list is maintained in LRU order |
| 28 ** such that p was added to the list more recently than p->pDirtyNext. |
| 29 ** PCache.pDirty points to the first (newest) element in the list and |
| 30 ** pDirtyTail to the last (oldest). |
| 31 ** |
| 32 ** The PCache.pSynced variable is used to optimize searching for a dirty |
| 33 ** page to eject from the cache mid-transaction. It is better to eject |
| 34 ** a page that does not require a journal sync than one that does. |
| 35 ** Therefore, pSynced is maintained to that it *almost* always points |
| 36 ** to either the oldest page in the pDirty/pDirtyTail list that has a |
| 37 ** clear PGHDR_NEED_SYNC flag or to a page that is older than this one |
| 38 ** (so that the right page to eject can be found by following pDirtyPrev |
| 39 ** pointers). |
| 18 */ | 40 */ |
| 19 struct PCache { | 41 struct PCache { |
| 20 PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */ | 42 PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */ |
| 21 PgHdr *pSynced; /* Last synced page in dirty page list */ | 43 PgHdr *pSynced; /* Last synced page in dirty page list */ |
| 22 int nRefSum; /* Sum of ref counts over all pages */ | 44 int nRefSum; /* Sum of ref counts over all pages */ |
| 23 int szCache; /* Configured cache size */ | 45 int szCache; /* Configured cache size */ |
| 24 int szSpill; /* Size before spilling occurs */ | 46 int szSpill; /* Size before spilling occurs */ |
| 25 int szPage; /* Size of every page in this cache */ | 47 int szPage; /* Size of every page in this cache */ |
| 26 int szExtra; /* Size of extra space for each page */ | 48 int szExtra; /* Size of extra space for each page */ |
| 27 u8 bPurgeable; /* True if pages are on backing store */ | 49 u8 bPurgeable; /* True if pages are on backing store */ |
| 28 u8 eCreate; /* eCreate value for for xFetch() */ | 50 u8 eCreate; /* eCreate value for for xFetch() */ |
| 29 int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */ | 51 int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */ |
| 30 void *pStress; /* Argument to xStress */ | 52 void *pStress; /* Argument to xStress */ |
| 31 sqlite3_pcache *pCache; /* Pluggable cache module */ | 53 sqlite3_pcache *pCache; /* Pluggable cache module */ |
| 32 }; | 54 }; |
| 33 | 55 |
| 56 /********************************** Test and Debug Logic **********************/ |
| 57 /* |
| 58 ** Debug tracing macros. Enable by by changing the "0" to "1" and |
| 59 ** recompiling. |
| 60 ** |
| 61 ** When sqlite3PcacheTrace is 1, single line trace messages are issued. |
| 62 ** When sqlite3PcacheTrace is 2, a dump of the pcache showing all cache entries |
| 63 ** is displayed for many operations, resulting in a lot of output. |
| 64 */ |
| 65 #if defined(SQLITE_DEBUG) && 0 |
| 66 int sqlite3PcacheTrace = 2; /* 0: off 1: simple 2: cache dumps */ |
| 67 int sqlite3PcacheMxDump = 9999; /* Max cache entries for pcacheDump() */ |
| 68 # define pcacheTrace(X) if(sqlite3PcacheTrace){sqlite3DebugPrintf X;} |
| 69 void pcacheDump(PCache *pCache){ |
| 70 int N; |
| 71 int i, j; |
| 72 sqlite3_pcache_page *pLower; |
| 73 PgHdr *pPg; |
| 74 unsigned char *a; |
| 75 |
| 76 if( sqlite3PcacheTrace<2 ) return; |
| 77 if( pCache->pCache==0 ) return; |
| 78 N = sqlite3PcachePagecount(pCache); |
| 79 if( N>sqlite3PcacheMxDump ) N = sqlite3PcacheMxDump; |
| 80 for(i=1; i<=N; i++){ |
| 81 pLower = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, i, 0); |
| 82 if( pLower==0 ) continue; |
| 83 pPg = (PgHdr*)pLower->pExtra; |
| 84 printf("%3d: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags); |
| 85 a = (unsigned char *)pLower->pBuf; |
| 86 for(j=0; j<12; j++) printf("%02x", a[j]); |
| 87 printf("\n"); |
| 88 if( pPg->pPage==0 ){ |
| 89 sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, pLower, 0); |
| 90 } |
| 91 } |
| 92 } |
| 93 #else |
| 94 # define pcacheTrace(X) |
| 95 # define pcacheDump(X) |
| 96 #endif |
| 97 |
| 98 /* |
| 99 ** Check invariants on a PgHdr entry. Return true if everything is OK. |
| 100 ** Return false if any invariant is violated. |
| 101 ** |
| 102 ** This routine is for use inside of assert() statements only. For |
| 103 ** example: |
| 104 ** |
| 105 ** assert( sqlite3PcachePageSanity(pPg) ); |
| 106 */ |
| 107 #if SQLITE_DEBUG |
| 108 int sqlite3PcachePageSanity(PgHdr *pPg){ |
| 109 PCache *pCache; |
| 110 assert( pPg!=0 ); |
| 111 assert( pPg->pgno>0 || pPg->pPager==0 ); /* Page number is 1 or more */ |
| 112 pCache = pPg->pCache; |
| 113 assert( pCache!=0 ); /* Every page has an associated PCache */ |
| 114 if( pPg->flags & PGHDR_CLEAN ){ |
| 115 assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */ |
| 116 assert( pCache->pDirty!=pPg ); /* CLEAN pages not on dirty list */ |
| 117 assert( pCache->pDirtyTail!=pPg ); |
| 118 } |
| 119 /* WRITEABLE pages must also be DIRTY */ |
| 120 if( pPg->flags & PGHDR_WRITEABLE ){ |
| 121 assert( pPg->flags & PGHDR_DIRTY ); /* WRITEABLE implies DIRTY */ |
| 122 } |
| 123 /* NEED_SYNC can be set independently of WRITEABLE. This can happen, |
| 124 ** for example, when using the sqlite3PagerDontWrite() optimization: |
| 125 ** (1) Page X is journalled, and gets WRITEABLE and NEED_SEEK. |
| 126 ** (2) Page X moved to freelist, WRITEABLE is cleared |
| 127 ** (3) Page X reused, WRITEABLE is set again |
| 128 ** If NEED_SYNC had been cleared in step 2, then it would not be reset |
| 129 ** in step 3, and page might be written into the database without first |
| 130 ** syncing the rollback journal, which might cause corruption on a power |
| 131 ** loss. |
| 132 ** |
| 133 ** Another example is when the database page size is smaller than the |
| 134 ** disk sector size. When any page of a sector is journalled, all pages |
| 135 ** in that sector are marked NEED_SYNC even if they are still CLEAN, just |
| 136 ** in case they are later modified, since all pages in the same sector |
| 137 ** must be journalled and synced before any of those pages can be safely |
| 138 ** written. |
| 139 */ |
| 140 return 1; |
| 141 } |
| 142 #endif /* SQLITE_DEBUG */ |
| 143 |
| 144 |
| 34 /********************************** Linked List Management ********************/ | 145 /********************************** Linked List Management ********************/ |
| 35 | 146 |
| 36 /* Allowed values for second argument to pcacheManageDirtyList() */ | 147 /* Allowed values for second argument to pcacheManageDirtyList() */ |
| 37 #define PCACHE_DIRTYLIST_REMOVE 1 /* Remove pPage from dirty list */ | 148 #define PCACHE_DIRTYLIST_REMOVE 1 /* Remove pPage from dirty list */ |
| 38 #define PCACHE_DIRTYLIST_ADD 2 /* Add pPage to the dirty list */ | 149 #define PCACHE_DIRTYLIST_ADD 2 /* Add pPage to the dirty list */ |
| 39 #define PCACHE_DIRTYLIST_FRONT 3 /* Move pPage to the front of the list */ | 150 #define PCACHE_DIRTYLIST_FRONT 3 /* Move pPage to the front of the list */ |
| 40 | 151 |
| 41 /* | 152 /* |
| 42 ** Manage pPage's participation on the dirty list. Bits of the addRemove | 153 ** Manage pPage's participation on the dirty list. Bits of the addRemove |
| 43 ** argument determines what operation to do. The 0x01 bit means first | 154 ** argument determines what operation to do. The 0x01 bit means first |
| 44 ** remove pPage from the dirty list. The 0x02 means add pPage back to | 155 ** remove pPage from the dirty list. The 0x02 means add pPage back to |
| 45 ** the dirty list. Doing both moves pPage to the front of the dirty list. | 156 ** the dirty list. Doing both moves pPage to the front of the dirty list. |
| 46 */ | 157 */ |
| 47 static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){ | 158 static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){ |
| 48 PCache *p = pPage->pCache; | 159 PCache *p = pPage->pCache; |
| 49 | 160 |
| 161 pcacheTrace(("%p.DIRTYLIST.%s %d\n", p, |
| 162 addRemove==1 ? "REMOVE" : addRemove==2 ? "ADD" : "FRONT", |
| 163 pPage->pgno)); |
| 50 if( addRemove & PCACHE_DIRTYLIST_REMOVE ){ | 164 if( addRemove & PCACHE_DIRTYLIST_REMOVE ){ |
| 51 assert( pPage->pDirtyNext || pPage==p->pDirtyTail ); | 165 assert( pPage->pDirtyNext || pPage==p->pDirtyTail ); |
| 52 assert( pPage->pDirtyPrev || pPage==p->pDirty ); | 166 assert( pPage->pDirtyPrev || pPage==p->pDirty ); |
| 53 | 167 |
| 54 /* Update the PCache1.pSynced variable if necessary. */ | 168 /* Update the PCache1.pSynced variable if necessary. */ |
| 55 if( p->pSynced==pPage ){ | 169 if( p->pSynced==pPage ){ |
| 56 PgHdr *pSynced = pPage->pDirtyPrev; | 170 p->pSynced = pPage->pDirtyPrev; |
| 57 while( pSynced && (pSynced->flags&PGHDR_NEED_SYNC) ){ | |
| 58 pSynced = pSynced->pDirtyPrev; | |
| 59 } | |
| 60 p->pSynced = pSynced; | |
| 61 } | 171 } |
| 62 | 172 |
| 63 if( pPage->pDirtyNext ){ | 173 if( pPage->pDirtyNext ){ |
| 64 pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev; | 174 pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev; |
| 65 }else{ | 175 }else{ |
| 66 assert( pPage==p->pDirtyTail ); | 176 assert( pPage==p->pDirtyTail ); |
| 67 p->pDirtyTail = pPage->pDirtyPrev; | 177 p->pDirtyTail = pPage->pDirtyPrev; |
| 68 } | 178 } |
| 69 if( pPage->pDirtyPrev ){ | 179 if( pPage->pDirtyPrev ){ |
| 70 pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext; | 180 pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext; |
| 71 }else{ | 181 }else{ |
| 182 /* If there are now no dirty pages in the cache, set eCreate to 2. |
| 183 ** This is an optimization that allows sqlite3PcacheFetch() to skip |
| 184 ** searching for a dirty page to eject from the cache when it might |
| 185 ** otherwise have to. */ |
| 72 assert( pPage==p->pDirty ); | 186 assert( pPage==p->pDirty ); |
| 73 p->pDirty = pPage->pDirtyNext; | 187 p->pDirty = pPage->pDirtyNext; |
| 74 if( p->pDirty==0 && p->bPurgeable ){ | 188 assert( p->bPurgeable || p->eCreate==2 ); |
| 75 assert( p->eCreate==1 ); | 189 if( p->pDirty==0 ){ /*OPTIMIZATION-IF-TRUE*/ |
| 190 assert( p->bPurgeable==0 || p->eCreate==1 ); |
| 76 p->eCreate = 2; | 191 p->eCreate = 2; |
| 77 } | 192 } |
| 78 } | 193 } |
| 79 pPage->pDirtyNext = 0; | 194 pPage->pDirtyNext = 0; |
| 80 pPage->pDirtyPrev = 0; | 195 pPage->pDirtyPrev = 0; |
| 81 } | 196 } |
| 82 if( addRemove & PCACHE_DIRTYLIST_ADD ){ | 197 if( addRemove & PCACHE_DIRTYLIST_ADD ){ |
| 83 assert( pPage->pDirtyNext==0 && pPage->pDirtyPrev==0 && p->pDirty!=pPage ); | 198 assert( pPage->pDirtyNext==0 && pPage->pDirtyPrev==0 && p->pDirty!=pPage ); |
| 84 | 199 |
| 85 pPage->pDirtyNext = p->pDirty; | 200 pPage->pDirtyNext = p->pDirty; |
| 86 if( pPage->pDirtyNext ){ | 201 if( pPage->pDirtyNext ){ |
| 87 assert( pPage->pDirtyNext->pDirtyPrev==0 ); | 202 assert( pPage->pDirtyNext->pDirtyPrev==0 ); |
| 88 pPage->pDirtyNext->pDirtyPrev = pPage; | 203 pPage->pDirtyNext->pDirtyPrev = pPage; |
| 89 }else{ | 204 }else{ |
| 90 p->pDirtyTail = pPage; | 205 p->pDirtyTail = pPage; |
| 91 if( p->bPurgeable ){ | 206 if( p->bPurgeable ){ |
| 92 assert( p->eCreate==2 ); | 207 assert( p->eCreate==2 ); |
| 93 p->eCreate = 1; | 208 p->eCreate = 1; |
| 94 } | 209 } |
| 95 } | 210 } |
| 96 p->pDirty = pPage; | 211 p->pDirty = pPage; |
| 97 if( !p->pSynced && 0==(pPage->flags&PGHDR_NEED_SYNC) ){ | 212 |
| 213 /* If pSynced is NULL and this page has a clear NEED_SYNC flag, set |
| 214 ** pSynced to point to it. Checking the NEED_SYNC flag is an |
| 215 ** optimization, as if pSynced points to a page with the NEED_SYNC |
| 216 ** flag set sqlite3PcacheFetchStress() searches through all newer |
| 217 ** entries of the dirty-list for a page with NEED_SYNC clear anyway. */ |
| 218 if( !p->pSynced |
| 219 && 0==(pPage->flags&PGHDR_NEED_SYNC) /*OPTIMIZATION-IF-FALSE*/ |
| 220 ){ |
| 98 p->pSynced = pPage; | 221 p->pSynced = pPage; |
| 99 } | 222 } |
| 100 } | 223 } |
| 224 pcacheDump(p); |
| 101 } | 225 } |
| 102 | 226 |
| 103 /* | 227 /* |
| 104 ** Wrapper around the pluggable caches xUnpin method. If the cache is | 228 ** Wrapper around the pluggable caches xUnpin method. If the cache is |
| 105 ** being used for an in-memory database, this function is a no-op. | 229 ** being used for an in-memory database, this function is a no-op. |
| 106 */ | 230 */ |
| 107 static void pcacheUnpin(PgHdr *p){ | 231 static void pcacheUnpin(PgHdr *p){ |
| 108 if( p->pCache->bPurgeable ){ | 232 if( p->pCache->bPurgeable ){ |
| 233 pcacheTrace(("%p.UNPIN %d\n", p->pCache, p->pgno)); |
| 109 sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0); | 234 sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0); |
| 235 pcacheDump(p->pCache); |
| 110 } | 236 } |
| 111 } | 237 } |
| 112 | 238 |
| 113 /* | 239 /* |
| 114 ** Compute the number of pages of cache requested. p->szCache is the | 240 ** Compute the number of pages of cache requested. p->szCache is the |
| 115 ** cache size requested by the "PRAGMA cache_size" statement. | 241 ** cache size requested by the "PRAGMA cache_size" statement. |
| 116 */ | 242 */ |
| 117 static int numberOfCachePages(PCache *p){ | 243 static int numberOfCachePages(PCache *p){ |
| 118 if( p->szCache>=0 ){ | 244 if( p->szCache>=0 ){ |
| 119 /* IMPLEMENTATION-OF: R-42059-47211 If the argument N is positive then the | 245 /* IMPLEMENTATION-OF: R-42059-47211 If the argument N is positive then the |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 151 /* | 277 /* |
| 152 ** Return the size in bytes of a PCache object. | 278 ** Return the size in bytes of a PCache object. |
| 153 */ | 279 */ |
| 154 int sqlite3PcacheSize(void){ return sizeof(PCache); } | 280 int sqlite3PcacheSize(void){ return sizeof(PCache); } |
| 155 | 281 |
| 156 /* | 282 /* |
| 157 ** Create a new PCache object. Storage space to hold the object | 283 ** Create a new PCache object. Storage space to hold the object |
| 158 ** has already been allocated and is passed in as the p pointer. | 284 ** has already been allocated and is passed in as the p pointer. |
| 159 ** The caller discovers how much space needs to be allocated by | 285 ** The caller discovers how much space needs to be allocated by |
| 160 ** calling sqlite3PcacheSize(). | 286 ** calling sqlite3PcacheSize(). |
| 287 ** |
| 288 ** szExtra is some extra space allocated for each page. The first |
| 289 ** 8 bytes of the extra space will be zeroed as the page is allocated, |
| 290 ** but remaining content will be uninitialized. Though it is opaque |
| 291 ** to this module, the extra space really ends up being the MemPage |
| 292 ** structure in the pager. |
| 161 */ | 293 */ |
| 162 int sqlite3PcacheOpen( | 294 int sqlite3PcacheOpen( |
| 163 int szPage, /* Size of every page */ | 295 int szPage, /* Size of every page */ |
| 164 int szExtra, /* Extra space associated with each page */ | 296 int szExtra, /* Extra space associated with each page */ |
| 165 int bPurgeable, /* True if pages are on backing store */ | 297 int bPurgeable, /* True if pages are on backing store */ |
| 166 int (*xStress)(void*,PgHdr*),/* Call to try to make pages clean */ | 298 int (*xStress)(void*,PgHdr*),/* Call to try to make pages clean */ |
| 167 void *pStress, /* Argument to xStress */ | 299 void *pStress, /* Argument to xStress */ |
| 168 PCache *p /* Preallocated space for the PCache */ | 300 PCache *p /* Preallocated space for the PCache */ |
| 169 ){ | 301 ){ |
| 170 memset(p, 0, sizeof(PCache)); | 302 memset(p, 0, sizeof(PCache)); |
| 171 p->szPage = 1; | 303 p->szPage = 1; |
| 172 p->szExtra = szExtra; | 304 p->szExtra = szExtra; |
| 305 assert( szExtra>=8 ); /* First 8 bytes will be zeroed */ |
| 173 p->bPurgeable = bPurgeable; | 306 p->bPurgeable = bPurgeable; |
| 174 p->eCreate = 2; | 307 p->eCreate = 2; |
| 175 p->xStress = xStress; | 308 p->xStress = xStress; |
| 176 p->pStress = pStress; | 309 p->pStress = pStress; |
| 177 p->szCache = 100; | 310 p->szCache = 100; |
| 178 p->szSpill = 1; | 311 p->szSpill = 1; |
| 312 pcacheTrace(("%p.OPEN szPage %d bPurgeable %d\n",p,szPage,bPurgeable)); |
| 179 return sqlite3PcacheSetPageSize(p, szPage); | 313 return sqlite3PcacheSetPageSize(p, szPage); |
| 180 } | 314 } |
| 181 | 315 |
| 182 /* | 316 /* |
| 183 ** Change the page size for PCache object. The caller must ensure that there | 317 ** Change the page size for PCache object. The caller must ensure that there |
| 184 ** are no outstanding page references when this function is called. | 318 ** are no outstanding page references when this function is called. |
| 185 */ | 319 */ |
| 186 int sqlite3PcacheSetPageSize(PCache *pCache, int szPage){ | 320 int sqlite3PcacheSetPageSize(PCache *pCache, int szPage){ |
| 187 assert( pCache->nRefSum==0 && pCache->pDirty==0 ); | 321 assert( pCache->nRefSum==0 && pCache->pDirty==0 ); |
| 188 if( pCache->szPage ){ | 322 if( pCache->szPage ){ |
| 189 sqlite3_pcache *pNew; | 323 sqlite3_pcache *pNew; |
| 190 pNew = sqlite3GlobalConfig.pcache2.xCreate( | 324 pNew = sqlite3GlobalConfig.pcache2.xCreate( |
| 191 szPage, pCache->szExtra + ROUND8(sizeof(PgHdr)), | 325 szPage, pCache->szExtra + ROUND8(sizeof(PgHdr)), |
| 192 pCache->bPurgeable | 326 pCache->bPurgeable |
| 193 ); | 327 ); |
| 194 if( pNew==0 ) return SQLITE_NOMEM; | 328 if( pNew==0 ) return SQLITE_NOMEM_BKPT; |
| 195 sqlite3GlobalConfig.pcache2.xCachesize(pNew, numberOfCachePages(pCache)); | 329 sqlite3GlobalConfig.pcache2.xCachesize(pNew, numberOfCachePages(pCache)); |
| 196 if( pCache->pCache ){ | 330 if( pCache->pCache ){ |
| 197 sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); | 331 sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); |
| 198 } | 332 } |
| 199 pCache->pCache = pNew; | 333 pCache->pCache = pNew; |
| 200 pCache->szPage = szPage; | 334 pCache->szPage = szPage; |
| 335 pcacheTrace(("%p.PAGESIZE %d\n",pCache,szPage)); |
| 201 } | 336 } |
| 202 return SQLITE_OK; | 337 return SQLITE_OK; |
| 203 } | 338 } |
| 204 | 339 |
| 205 /* | 340 /* |
| 206 ** Try to obtain a page from the cache. | 341 ** Try to obtain a page from the cache. |
| 207 ** | 342 ** |
| 208 ** This routine returns a pointer to an sqlite3_pcache_page object if | 343 ** This routine returns a pointer to an sqlite3_pcache_page object if |
| 209 ** such an object is already in cache, or if a new one is created. | 344 ** such an object is already in cache, or if a new one is created. |
| 210 ** This routine returns a NULL pointer if the object was not in cache | 345 ** This routine returns a NULL pointer if the object was not in cache |
| (...skipping 14 matching lines...) Expand all Loading... |
| 225 ** they can both (usually) operate without having to push values to | 360 ** they can both (usually) operate without having to push values to |
| 226 ** the stack on entry and pop them back off on exit, which saves a | 361 ** the stack on entry and pop them back off on exit, which saves a |
| 227 ** lot of pushing and popping. | 362 ** lot of pushing and popping. |
| 228 */ | 363 */ |
| 229 sqlite3_pcache_page *sqlite3PcacheFetch( | 364 sqlite3_pcache_page *sqlite3PcacheFetch( |
| 230 PCache *pCache, /* Obtain the page from this cache */ | 365 PCache *pCache, /* Obtain the page from this cache */ |
| 231 Pgno pgno, /* Page number to obtain */ | 366 Pgno pgno, /* Page number to obtain */ |
| 232 int createFlag /* If true, create page if it does not exist already */ | 367 int createFlag /* If true, create page if it does not exist already */ |
| 233 ){ | 368 ){ |
| 234 int eCreate; | 369 int eCreate; |
| 370 sqlite3_pcache_page *pRes; |
| 235 | 371 |
| 236 assert( pCache!=0 ); | 372 assert( pCache!=0 ); |
| 237 assert( pCache->pCache!=0 ); | 373 assert( pCache->pCache!=0 ); |
| 238 assert( createFlag==3 || createFlag==0 ); | 374 assert( createFlag==3 || createFlag==0 ); |
| 239 assert( pgno>0 ); | 375 assert( pCache->eCreate==((pCache->bPurgeable && pCache->pDirty) ? 1 : 2) ); |
| 240 | 376 |
| 241 /* eCreate defines what to do if the page does not exist. | 377 /* eCreate defines what to do if the page does not exist. |
| 242 ** 0 Do not allocate a new page. (createFlag==0) | 378 ** 0 Do not allocate a new page. (createFlag==0) |
| 243 ** 1 Allocate a new page if doing so is inexpensive. | 379 ** 1 Allocate a new page if doing so is inexpensive. |
| 244 ** (createFlag==1 AND bPurgeable AND pDirty) | 380 ** (createFlag==1 AND bPurgeable AND pDirty) |
| 245 ** 2 Allocate a new page even it doing so is difficult. | 381 ** 2 Allocate a new page even it doing so is difficult. |
| 246 ** (createFlag==1 AND !(bPurgeable AND pDirty) | 382 ** (createFlag==1 AND !(bPurgeable AND pDirty) |
| 247 */ | 383 */ |
| 248 eCreate = createFlag & pCache->eCreate; | 384 eCreate = createFlag & pCache->eCreate; |
| 249 assert( eCreate==0 || eCreate==1 || eCreate==2 ); | 385 assert( eCreate==0 || eCreate==1 || eCreate==2 ); |
| 250 assert( createFlag==0 || pCache->eCreate==eCreate ); | 386 assert( createFlag==0 || pCache->eCreate==eCreate ); |
| 251 assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) ); | 387 assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) ); |
| 252 return sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); | 388 pRes = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); |
| 389 pcacheTrace(("%p.FETCH %d%s (result: %p)\n",pCache,pgno, |
| 390 createFlag?" create":"",pRes)); |
| 391 return pRes; |
| 253 } | 392 } |
| 254 | 393 |
| 255 /* | 394 /* |
| 256 ** If the sqlite3PcacheFetch() routine is unable to allocate a new | 395 ** If the sqlite3PcacheFetch() routine is unable to allocate a new |
| 257 ** page because new clean pages are available for reuse and the cache | 396 ** page because no clean pages are available for reuse and the cache |
| 258 ** size limit has been reached, then this routine can be invoked to | 397 ** size limit has been reached, then this routine can be invoked to |
| 259 ** try harder to allocate a page. This routine might invoke the stress | 398 ** try harder to allocate a page. This routine might invoke the stress |
| 260 ** callback to spill dirty pages to the journal. It will then try to | 399 ** callback to spill dirty pages to the journal. It will then try to |
| 261 ** allocate the new page and will only fail to allocate a new page on | 400 ** allocate the new page and will only fail to allocate a new page on |
| 262 ** an OOM error. | 401 ** an OOM error. |
| 263 ** | 402 ** |
| 264 ** This routine should be invoked only after sqlite3PcacheFetch() fails. | 403 ** This routine should be invoked only after sqlite3PcacheFetch() fails. |
| 265 */ | 404 */ |
| 266 int sqlite3PcacheFetchStress( | 405 int sqlite3PcacheFetchStress( |
| 267 PCache *pCache, /* Obtain the page from this cache */ | 406 PCache *pCache, /* Obtain the page from this cache */ |
| 268 Pgno pgno, /* Page number to obtain */ | 407 Pgno pgno, /* Page number to obtain */ |
| 269 sqlite3_pcache_page **ppPage /* Write result here */ | 408 sqlite3_pcache_page **ppPage /* Write result here */ |
| 270 ){ | 409 ){ |
| 271 PgHdr *pPg; | 410 PgHdr *pPg; |
| 272 if( pCache->eCreate==2 ) return 0; | 411 if( pCache->eCreate==2 ) return 0; |
| 273 | 412 |
| 274 if( sqlite3PcachePagecount(pCache)>pCache->szSpill ){ | 413 if( sqlite3PcachePagecount(pCache)>pCache->szSpill ){ |
| 275 /* Find a dirty page to write-out and recycle. First try to find a | 414 /* Find a dirty page to write-out and recycle. First try to find a |
| 276 ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC | 415 ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC |
| 277 ** cleared), but if that is not possible settle for any other | 416 ** cleared), but if that is not possible settle for any other |
| 278 ** unreferenced dirty page. | 417 ** unreferenced dirty page. |
| 279 */ | 418 ** |
| 419 ** If the LRU page in the dirty list that has a clear PGHDR_NEED_SYNC |
| 420 ** flag is currently referenced, then the following may leave pSynced |
| 421 ** set incorrectly (pointing to other than the LRU page with NEED_SYNC |
| 422 ** cleared). This is Ok, as pSynced is just an optimization. */ |
| 280 for(pPg=pCache->pSynced; | 423 for(pPg=pCache->pSynced; |
| 281 pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); | 424 pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); |
| 282 pPg=pPg->pDirtyPrev | 425 pPg=pPg->pDirtyPrev |
| 283 ); | 426 ); |
| 284 pCache->pSynced = pPg; | 427 pCache->pSynced = pPg; |
| 285 if( !pPg ){ | 428 if( !pPg ){ |
| 286 for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev); | 429 for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev); |
| 287 } | 430 } |
| 288 if( pPg ){ | 431 if( pPg ){ |
| 289 int rc; | 432 int rc; |
| 290 #ifdef SQLITE_LOG_CACHE_SPILL | 433 #ifdef SQLITE_LOG_CACHE_SPILL |
| 291 sqlite3_log(SQLITE_FULL, | 434 sqlite3_log(SQLITE_FULL, |
| 292 "spill page %d making room for %d - cache used: %d/%d", | 435 "spill page %d making room for %d - cache used: %d/%d", |
| 293 pPg->pgno, pgno, | 436 pPg->pgno, pgno, |
| 294 sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache), | 437 sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache), |
| 295 numberOfCachePages(pCache)); | 438 numberOfCachePages(pCache)); |
| 296 #endif | 439 #endif |
| 440 pcacheTrace(("%p.SPILL %d\n",pCache,pPg->pgno)); |
| 297 rc = pCache->xStress(pCache->pStress, pPg); | 441 rc = pCache->xStress(pCache->pStress, pPg); |
| 442 pcacheDump(pCache); |
| 298 if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){ | 443 if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){ |
| 299 return rc; | 444 return rc; |
| 300 } | 445 } |
| 301 } | 446 } |
| 302 } | 447 } |
| 303 *ppPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2); | 448 *ppPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2); |
| 304 return *ppPage==0 ? SQLITE_NOMEM : SQLITE_OK; | 449 return *ppPage==0 ? SQLITE_NOMEM_BKPT : SQLITE_OK; |
| 305 } | 450 } |
| 306 | 451 |
| 307 /* | 452 /* |
| 308 ** This is a helper routine for sqlite3PcacheFetchFinish() | 453 ** This is a helper routine for sqlite3PcacheFetchFinish() |
| 309 ** | 454 ** |
| 310 ** In the uncommon case where the page being fetched has not been | 455 ** In the uncommon case where the page being fetched has not been |
| 311 ** initialized, this routine is invoked to do the initialization. | 456 ** initialized, this routine is invoked to do the initialization. |
| 312 ** This routine is broken out into a separate function since it | 457 ** This routine is broken out into a separate function since it |
| 313 ** requires extra stack manipulation that can be avoided in the common | 458 ** requires extra stack manipulation that can be avoided in the common |
| 314 ** case. | 459 ** case. |
| 315 */ | 460 */ |
| 316 static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit( | 461 static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit( |
| 317 PCache *pCache, /* Obtain the page from this cache */ | 462 PCache *pCache, /* Obtain the page from this cache */ |
| 318 Pgno pgno, /* Page number obtained */ | 463 Pgno pgno, /* Page number obtained */ |
| 319 sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */ | 464 sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */ |
| 320 ){ | 465 ){ |
| 321 PgHdr *pPgHdr; | 466 PgHdr *pPgHdr; |
| 322 assert( pPage!=0 ); | 467 assert( pPage!=0 ); |
| 323 pPgHdr = (PgHdr*)pPage->pExtra; | 468 pPgHdr = (PgHdr*)pPage->pExtra; |
| 324 assert( pPgHdr->pPage==0 ); | 469 assert( pPgHdr->pPage==0 ); |
| 325 memset(pPgHdr, 0, sizeof(PgHdr)); | 470 memset(&pPgHdr->pDirty, 0, sizeof(PgHdr) - offsetof(PgHdr,pDirty)); |
| 326 pPgHdr->pPage = pPage; | 471 pPgHdr->pPage = pPage; |
| 327 pPgHdr->pData = pPage->pBuf; | 472 pPgHdr->pData = pPage->pBuf; |
| 328 pPgHdr->pExtra = (void *)&pPgHdr[1]; | 473 pPgHdr->pExtra = (void *)&pPgHdr[1]; |
| 329 memset(pPgHdr->pExtra, 0, pCache->szExtra); | 474 memset(pPgHdr->pExtra, 0, 8); |
| 330 pPgHdr->pCache = pCache; | 475 pPgHdr->pCache = pCache; |
| 331 pPgHdr->pgno = pgno; | 476 pPgHdr->pgno = pgno; |
| 332 pPgHdr->flags = PGHDR_CLEAN; | 477 pPgHdr->flags = PGHDR_CLEAN; |
| 333 return sqlite3PcacheFetchFinish(pCache,pgno,pPage); | 478 return sqlite3PcacheFetchFinish(pCache,pgno,pPage); |
| 334 } | 479 } |
| 335 | 480 |
| 336 /* | 481 /* |
| 337 ** This routine converts the sqlite3_pcache_page object returned by | 482 ** This routine converts the sqlite3_pcache_page object returned by |
| 338 ** sqlite3PcacheFetch() into an initialized PgHdr object. This routine | 483 ** sqlite3PcacheFetch() into an initialized PgHdr object. This routine |
| 339 ** must be called after sqlite3PcacheFetch() in order to get a usable | 484 ** must be called after sqlite3PcacheFetch() in order to get a usable |
| 340 ** result. | 485 ** result. |
| 341 */ | 486 */ |
| 342 PgHdr *sqlite3PcacheFetchFinish( | 487 PgHdr *sqlite3PcacheFetchFinish( |
| 343 PCache *pCache, /* Obtain the page from this cache */ | 488 PCache *pCache, /* Obtain the page from this cache */ |
| 344 Pgno pgno, /* Page number obtained */ | 489 Pgno pgno, /* Page number obtained */ |
| 345 sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */ | 490 sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */ |
| 346 ){ | 491 ){ |
| 347 PgHdr *pPgHdr; | 492 PgHdr *pPgHdr; |
| 348 | 493 |
| 349 assert( pPage!=0 ); | 494 assert( pPage!=0 ); |
| 350 pPgHdr = (PgHdr *)pPage->pExtra; | 495 pPgHdr = (PgHdr *)pPage->pExtra; |
| 351 | 496 |
| 352 if( !pPgHdr->pPage ){ | 497 if( !pPgHdr->pPage ){ |
| 353 return pcacheFetchFinishWithInit(pCache, pgno, pPage); | 498 return pcacheFetchFinishWithInit(pCache, pgno, pPage); |
| 354 } | 499 } |
| 355 pCache->nRefSum++; | 500 pCache->nRefSum++; |
| 356 pPgHdr->nRef++; | 501 pPgHdr->nRef++; |
| 502 assert( sqlite3PcachePageSanity(pPgHdr) ); |
| 357 return pPgHdr; | 503 return pPgHdr; |
| 358 } | 504 } |
| 359 | 505 |
| 360 /* | 506 /* |
| 361 ** Decrement the reference count on a page. If the page is clean and the | 507 ** Decrement the reference count on a page. If the page is clean and the |
| 362 ** reference count drops to 0, then it is made eligible for recycling. | 508 ** reference count drops to 0, then it is made eligible for recycling. |
| 363 */ | 509 */ |
| 364 void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){ | 510 void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){ |
| 365 assert( p->nRef>0 ); | 511 assert( p->nRef>0 ); |
| 366 p->pCache->nRefSum--; | 512 p->pCache->nRefSum--; |
| 367 if( (--p->nRef)==0 ){ | 513 if( (--p->nRef)==0 ){ |
| 368 if( p->flags&PGHDR_CLEAN ){ | 514 if( p->flags&PGHDR_CLEAN ){ |
| 369 pcacheUnpin(p); | 515 pcacheUnpin(p); |
| 370 }else if( p->pDirtyPrev!=0 ){ | 516 }else if( p->pDirtyPrev!=0 ){ /*OPTIMIZATION-IF-FALSE*/ |
| 371 /* Move the page to the head of the dirty list. */ | 517 /* Move the page to the head of the dirty list. If p->pDirtyPrev==0, |
| 518 ** then page p is already at the head of the dirty list and the |
| 519 ** following call would be a no-op. Hence the OPTIMIZATION-IF-FALSE |
| 520 ** tag above. */ |
| 372 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); | 521 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); |
| 373 } | 522 } |
| 374 } | 523 } |
| 375 } | 524 } |
| 376 | 525 |
| 377 /* | 526 /* |
| 378 ** Increase the reference count of a supplied page by 1. | 527 ** Increase the reference count of a supplied page by 1. |
| 379 */ | 528 */ |
| 380 void sqlite3PcacheRef(PgHdr *p){ | 529 void sqlite3PcacheRef(PgHdr *p){ |
| 381 assert(p->nRef>0); | 530 assert(p->nRef>0); |
| 531 assert( sqlite3PcachePageSanity(p) ); |
| 382 p->nRef++; | 532 p->nRef++; |
| 383 p->pCache->nRefSum++; | 533 p->pCache->nRefSum++; |
| 384 } | 534 } |
| 385 | 535 |
| 386 /* | 536 /* |
| 387 ** Drop a page from the cache. There must be exactly one reference to the | 537 ** Drop a page from the cache. There must be exactly one reference to the |
| 388 ** page. This function deletes that reference, so after it returns the | 538 ** page. This function deletes that reference, so after it returns the |
| 389 ** page pointed to by p is invalid. | 539 ** page pointed to by p is invalid. |
| 390 */ | 540 */ |
| 391 void sqlite3PcacheDrop(PgHdr *p){ | 541 void sqlite3PcacheDrop(PgHdr *p){ |
| 392 assert( p->nRef==1 ); | 542 assert( p->nRef==1 ); |
| 543 assert( sqlite3PcachePageSanity(p) ); |
| 393 if( p->flags&PGHDR_DIRTY ){ | 544 if( p->flags&PGHDR_DIRTY ){ |
| 394 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); | 545 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); |
| 395 } | 546 } |
| 396 p->pCache->nRefSum--; | 547 p->pCache->nRefSum--; |
| 397 sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 1); | 548 sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 1); |
| 398 } | 549 } |
| 399 | 550 |
| 400 /* | 551 /* |
| 401 ** Make sure the page is marked as dirty. If it isn't dirty already, | 552 ** Make sure the page is marked as dirty. If it isn't dirty already, |
| 402 ** make it so. | 553 ** make it so. |
| 403 */ | 554 */ |
| 404 void sqlite3PcacheMakeDirty(PgHdr *p){ | 555 void sqlite3PcacheMakeDirty(PgHdr *p){ |
| 405 assert( p->nRef>0 ); | 556 assert( p->nRef>0 ); |
| 406 if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ | 557 assert( sqlite3PcachePageSanity(p) ); |
| 558 if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ /*OPTIMIZATION-IF-FALSE*/ |
| 407 p->flags &= ~PGHDR_DONT_WRITE; | 559 p->flags &= ~PGHDR_DONT_WRITE; |
| 408 if( p->flags & PGHDR_CLEAN ){ | 560 if( p->flags & PGHDR_CLEAN ){ |
| 409 p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN); | 561 p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN); |
| 562 pcacheTrace(("%p.DIRTY %d\n",p->pCache,p->pgno)); |
| 410 assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY ); | 563 assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY ); |
| 411 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD); | 564 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD); |
| 412 } | 565 } |
| 566 assert( sqlite3PcachePageSanity(p) ); |
| 413 } | 567 } |
| 414 } | 568 } |
| 415 | 569 |
| 416 /* | 570 /* |
| 417 ** Make sure the page is marked as clean. If it isn't clean already, | 571 ** Make sure the page is marked as clean. If it isn't clean already, |
| 418 ** make it so. | 572 ** make it so. |
| 419 */ | 573 */ |
| 420 void sqlite3PcacheMakeClean(PgHdr *p){ | 574 void sqlite3PcacheMakeClean(PgHdr *p){ |
| 421 if( (p->flags & PGHDR_DIRTY) ){ | 575 assert( sqlite3PcachePageSanity(p) ); |
| 576 if( ALWAYS((p->flags & PGHDR_DIRTY)!=0) ){ |
| 422 assert( (p->flags & PGHDR_CLEAN)==0 ); | 577 assert( (p->flags & PGHDR_CLEAN)==0 ); |
| 423 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); | 578 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); |
| 424 p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE); | 579 p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE); |
| 425 p->flags |= PGHDR_CLEAN; | 580 p->flags |= PGHDR_CLEAN; |
| 581 pcacheTrace(("%p.CLEAN %d\n",p->pCache,p->pgno)); |
| 582 assert( sqlite3PcachePageSanity(p) ); |
| 426 if( p->nRef==0 ){ | 583 if( p->nRef==0 ){ |
| 427 pcacheUnpin(p); | 584 pcacheUnpin(p); |
| 428 } | 585 } |
| 429 } | 586 } |
| 430 } | 587 } |
| 431 | 588 |
| 432 /* | 589 /* |
| 433 ** Make every page in the cache clean. | 590 ** Make every page in the cache clean. |
| 434 */ | 591 */ |
| 435 void sqlite3PcacheCleanAll(PCache *pCache){ | 592 void sqlite3PcacheCleanAll(PCache *pCache){ |
| 436 PgHdr *p; | 593 PgHdr *p; |
| 594 pcacheTrace(("%p.CLEAN-ALL\n",pCache)); |
| 437 while( (p = pCache->pDirty)!=0 ){ | 595 while( (p = pCache->pDirty)!=0 ){ |
| 438 sqlite3PcacheMakeClean(p); | 596 sqlite3PcacheMakeClean(p); |
| 439 } | 597 } |
| 440 } | 598 } |
| 441 | 599 |
| 442 /* | 600 /* |
| 601 ** Clear the PGHDR_NEED_SYNC and PGHDR_WRITEABLE flag from all dirty pages. |
| 602 */ |
| 603 void sqlite3PcacheClearWritable(PCache *pCache){ |
| 604 PgHdr *p; |
| 605 pcacheTrace(("%p.CLEAR-WRITEABLE\n",pCache)); |
| 606 for(p=pCache->pDirty; p; p=p->pDirtyNext){ |
| 607 p->flags &= ~(PGHDR_NEED_SYNC|PGHDR_WRITEABLE); |
| 608 } |
| 609 pCache->pSynced = pCache->pDirtyTail; |
| 610 } |
| 611 |
| 612 /* |
| 443 ** Clear the PGHDR_NEED_SYNC flag from all dirty pages. | 613 ** Clear the PGHDR_NEED_SYNC flag from all dirty pages. |
| 444 */ | 614 */ |
| 445 void sqlite3PcacheClearSyncFlags(PCache *pCache){ | 615 void sqlite3PcacheClearSyncFlags(PCache *pCache){ |
| 446 PgHdr *p; | 616 PgHdr *p; |
| 447 for(p=pCache->pDirty; p; p=p->pDirtyNext){ | 617 for(p=pCache->pDirty; p; p=p->pDirtyNext){ |
| 448 p->flags &= ~PGHDR_NEED_SYNC; | 618 p->flags &= ~PGHDR_NEED_SYNC; |
| 449 } | 619 } |
| 450 pCache->pSynced = pCache->pDirtyTail; | 620 pCache->pSynced = pCache->pDirtyTail; |
| 451 } | 621 } |
| 452 | 622 |
| 453 /* | 623 /* |
| 454 ** Change the page number of page p to newPgno. | 624 ** Change the page number of page p to newPgno. |
| 455 */ | 625 */ |
| 456 void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){ | 626 void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){ |
| 457 PCache *pCache = p->pCache; | 627 PCache *pCache = p->pCache; |
| 458 assert( p->nRef>0 ); | 628 assert( p->nRef>0 ); |
| 459 assert( newPgno>0 ); | 629 assert( newPgno>0 ); |
| 630 assert( sqlite3PcachePageSanity(p) ); |
| 631 pcacheTrace(("%p.MOVE %d -> %d\n",pCache,p->pgno,newPgno)); |
| 460 sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno); | 632 sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno); |
| 461 p->pgno = newPgno; | 633 p->pgno = newPgno; |
| 462 if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){ | 634 if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){ |
| 463 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); | 635 pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); |
| 464 } | 636 } |
| 465 } | 637 } |
| 466 | 638 |
| 467 /* | 639 /* |
| 468 ** Drop every cache entry whose page number is greater than "pgno". The | 640 ** Drop every cache entry whose page number is greater than "pgno". The |
| 469 ** caller must ensure that there are no outstanding references to any pages | 641 ** caller must ensure that there are no outstanding references to any pages |
| 470 ** other than page 1 with a page number greater than pgno. | 642 ** other than page 1 with a page number greater than pgno. |
| 471 ** | 643 ** |
| 472 ** If there is a reference to page 1 and the pgno parameter passed to this | 644 ** If there is a reference to page 1 and the pgno parameter passed to this |
| 473 ** function is 0, then the data area associated with page 1 is zeroed, but | 645 ** function is 0, then the data area associated with page 1 is zeroed, but |
| 474 ** the page object is not dropped. | 646 ** the page object is not dropped. |
| 475 */ | 647 */ |
| 476 void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ | 648 void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ |
| 477 if( pCache->pCache ){ | 649 if( pCache->pCache ){ |
| 478 PgHdr *p; | 650 PgHdr *p; |
| 479 PgHdr *pNext; | 651 PgHdr *pNext; |
| 652 pcacheTrace(("%p.TRUNCATE %d\n",pCache,pgno)); |
| 480 for(p=pCache->pDirty; p; p=pNext){ | 653 for(p=pCache->pDirty; p; p=pNext){ |
| 481 pNext = p->pDirtyNext; | 654 pNext = p->pDirtyNext; |
| 482 /* This routine never gets call with a positive pgno except right | 655 /* This routine never gets call with a positive pgno except right |
| 483 ** after sqlite3PcacheCleanAll(). So if there are dirty pages, | 656 ** after sqlite3PcacheCleanAll(). So if there are dirty pages, |
| 484 ** it must be that pgno==0. | 657 ** it must be that pgno==0. |
| 485 */ | 658 */ |
| 486 assert( p->pgno>0 ); | 659 assert( p->pgno>0 ); |
| 487 if( ALWAYS(p->pgno>pgno) ){ | 660 if( p->pgno>pgno ){ |
| 488 assert( p->flags&PGHDR_DIRTY ); | 661 assert( p->flags&PGHDR_DIRTY ); |
| 489 sqlite3PcacheMakeClean(p); | 662 sqlite3PcacheMakeClean(p); |
| 490 } | 663 } |
| 491 } | 664 } |
| 492 if( pgno==0 && pCache->nRefSum ){ | 665 if( pgno==0 && pCache->nRefSum ){ |
| 493 sqlite3_pcache_page *pPage1; | 666 sqlite3_pcache_page *pPage1; |
| 494 pPage1 = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache,1,0); | 667 pPage1 = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache,1,0); |
| 495 if( ALWAYS(pPage1) ){ /* Page 1 is always available in cache, because | 668 if( ALWAYS(pPage1) ){ /* Page 1 is always available in cache, because |
| 496 ** pCache->nRefSum>0 */ | 669 ** pCache->nRefSum>0 */ |
| 497 memset(pPage1->pBuf, 0, pCache->szPage); | 670 memset(pPage1->pBuf, 0, pCache->szPage); |
| 498 pgno = 1; | 671 pgno = 1; |
| 499 } | 672 } |
| 500 } | 673 } |
| 501 sqlite3GlobalConfig.pcache2.xTruncate(pCache->pCache, pgno+1); | 674 sqlite3GlobalConfig.pcache2.xTruncate(pCache->pCache, pgno+1); |
| 502 } | 675 } |
| 503 } | 676 } |
| 504 | 677 |
| 505 /* | 678 /* |
| 506 ** Close a cache. | 679 ** Close a cache. |
| 507 */ | 680 */ |
| 508 void sqlite3PcacheClose(PCache *pCache){ | 681 void sqlite3PcacheClose(PCache *pCache){ |
| 509 assert( pCache->pCache!=0 ); | 682 assert( pCache->pCache!=0 ); |
| 683 pcacheTrace(("%p.CLOSE\n",pCache)); |
| 510 sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); | 684 sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); |
| 511 } | 685 } |
| 512 | 686 |
| 513 /* | 687 /* |
| 514 ** Discard the contents of the cache. | 688 ** Discard the contents of the cache. |
| 515 */ | 689 */ |
| 516 void sqlite3PcacheClear(PCache *pCache){ | 690 void sqlite3PcacheClear(PCache *pCache){ |
| 517 sqlite3PcacheTruncate(pCache, 0); | 691 sqlite3PcacheTruncate(pCache, 0); |
| 518 } | 692 } |
| 519 | 693 |
| 520 /* | 694 /* |
| 521 ** Merge two lists of pages connected by pDirty and in pgno order. | 695 ** Merge two lists of pages connected by pDirty and in pgno order. |
| 522 ** Do not both fixing the pDirtyPrev pointers. | 696 ** Do not bother fixing the pDirtyPrev pointers. |
| 523 */ | 697 */ |
| 524 static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){ | 698 static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){ |
| 525 PgHdr result, *pTail; | 699 PgHdr result, *pTail; |
| 526 pTail = &result; | 700 pTail = &result; |
| 527 while( pA && pB ){ | 701 assert( pA!=0 && pB!=0 ); |
| 702 for(;;){ |
| 528 if( pA->pgno<pB->pgno ){ | 703 if( pA->pgno<pB->pgno ){ |
| 529 pTail->pDirty = pA; | 704 pTail->pDirty = pA; |
| 530 pTail = pA; | 705 pTail = pA; |
| 531 pA = pA->pDirty; | 706 pA = pA->pDirty; |
| 707 if( pA==0 ){ |
| 708 pTail->pDirty = pB; |
| 709 break; |
| 710 } |
| 532 }else{ | 711 }else{ |
| 533 pTail->pDirty = pB; | 712 pTail->pDirty = pB; |
| 534 pTail = pB; | 713 pTail = pB; |
| 535 pB = pB->pDirty; | 714 pB = pB->pDirty; |
| 715 if( pB==0 ){ |
| 716 pTail->pDirty = pA; |
| 717 break; |
| 718 } |
| 536 } | 719 } |
| 537 } | 720 } |
| 538 if( pA ){ | |
| 539 pTail->pDirty = pA; | |
| 540 }else if( pB ){ | |
| 541 pTail->pDirty = pB; | |
| 542 }else{ | |
| 543 pTail->pDirty = 0; | |
| 544 } | |
| 545 return result.pDirty; | 721 return result.pDirty; |
| 546 } | 722 } |
| 547 | 723 |
| 548 /* | 724 /* |
| 549 ** Sort the list of pages in accending order by pgno. Pages are | 725 ** Sort the list of pages in accending order by pgno. Pages are |
| 550 ** connected by pDirty pointers. The pDirtyPrev pointers are | 726 ** connected by pDirty pointers. The pDirtyPrev pointers are |
| 551 ** corrupted by this sort. | 727 ** corrupted by this sort. |
| 552 ** | 728 ** |
| 553 ** Since there cannot be more than 2^31 distinct pages in a database, | 729 ** Since there cannot be more than 2^31 distinct pages in a database, |
| 554 ** there cannot be more than 31 buckets required by the merge sorter. | 730 ** there cannot be more than 31 buckets required by the merge sorter. |
| (...skipping 20 matching lines...) Expand all Loading... |
| 575 } | 751 } |
| 576 if( NEVER(i==N_SORT_BUCKET-1) ){ | 752 if( NEVER(i==N_SORT_BUCKET-1) ){ |
| 577 /* To get here, there need to be 2^(N_SORT_BUCKET) elements in | 753 /* To get here, there need to be 2^(N_SORT_BUCKET) elements in |
| 578 ** the input list. But that is impossible. | 754 ** the input list. But that is impossible. |
| 579 */ | 755 */ |
| 580 a[i] = pcacheMergeDirtyList(a[i], p); | 756 a[i] = pcacheMergeDirtyList(a[i], p); |
| 581 } | 757 } |
| 582 } | 758 } |
| 583 p = a[0]; | 759 p = a[0]; |
| 584 for(i=1; i<N_SORT_BUCKET; i++){ | 760 for(i=1; i<N_SORT_BUCKET; i++){ |
| 585 p = pcacheMergeDirtyList(p, a[i]); | 761 if( a[i]==0 ) continue; |
| 762 p = p ? pcacheMergeDirtyList(p, a[i]) : a[i]; |
| 586 } | 763 } |
| 587 return p; | 764 return p; |
| 588 } | 765 } |
| 589 | 766 |
| 590 /* | 767 /* |
| 591 ** Return a list of all dirty pages in the cache, sorted by page number. | 768 ** Return a list of all dirty pages in the cache, sorted by page number. |
| 592 */ | 769 */ |
| 593 PgHdr *sqlite3PcacheDirtyList(PCache *pCache){ | 770 PgHdr *sqlite3PcacheDirtyList(PCache *pCache){ |
| 594 PgHdr *p; | 771 PgHdr *p; |
| 595 for(p=pCache->pDirty; p; p=p->pDirtyNext){ | 772 for(p=pCache->pDirty; p; p=p->pDirtyNext){ |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 668 assert( pCache->pCache!=0 ); | 845 assert( pCache->pCache!=0 ); |
| 669 sqlite3GlobalConfig.pcache2.xShrink(pCache->pCache); | 846 sqlite3GlobalConfig.pcache2.xShrink(pCache->pCache); |
| 670 } | 847 } |
| 671 | 848 |
| 672 /* | 849 /* |
| 673 ** Return the size of the header added by this middleware layer | 850 ** Return the size of the header added by this middleware layer |
| 674 ** in the page-cache hierarchy. | 851 ** in the page-cache hierarchy. |
| 675 */ | 852 */ |
| 676 int sqlite3HeaderSizePcache(void){ return ROUND8(sizeof(PgHdr)); } | 853 int sqlite3HeaderSizePcache(void){ return ROUND8(sizeof(PgHdr)); } |
| 677 | 854 |
| 855 /* |
| 856 ** Return the number of dirty pages currently in the cache, as a percentage |
| 857 ** of the configured cache size. |
| 858 */ |
| 859 int sqlite3PCachePercentDirty(PCache *pCache){ |
| 860 PgHdr *pDirty; |
| 861 int nDirty = 0; |
| 862 int nCache = numberOfCachePages(pCache); |
| 863 for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext) nDirty++; |
| 864 return nCache ? (int)(((i64)nDirty * 100) / nCache) : 0; |
| 865 } |
| 678 | 866 |
| 679 #if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG) | 867 #if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG) |
| 680 /* | 868 /* |
| 681 ** For all dirty pages currently in the cache, invoke the specified | 869 ** For all dirty pages currently in the cache, invoke the specified |
| 682 ** callback. This is only used if the SQLITE_CHECK_PAGES macro is | 870 ** callback. This is only used if the SQLITE_CHECK_PAGES macro is |
| 683 ** defined. | 871 ** defined. |
| 684 */ | 872 */ |
| 685 void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)){ | 873 void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)){ |
| 686 PgHdr *pDirty; | 874 PgHdr *pDirty; |
| 687 for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext){ | 875 for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext){ |
| 688 xIter(pDirty); | 876 xIter(pDirty); |
| 689 } | 877 } |
| 690 } | 878 } |
| 691 #endif | 879 #endif |
| OLD | NEW |