sl@0
|
1 |
/*
|
sl@0
|
2 |
** 2008 August 05
|
sl@0
|
3 |
**
|
sl@0
|
4 |
** The author disclaims copyright to this source code. In place of
|
sl@0
|
5 |
** a legal notice, here is a blessing:
|
sl@0
|
6 |
**
|
sl@0
|
7 |
** May you do good and not evil.
|
sl@0
|
8 |
** May you find forgiveness for yourself and forgive others.
|
sl@0
|
9 |
** May you share freely, never taking more than you give.
|
sl@0
|
10 |
**
|
sl@0
|
11 |
*************************************************************************
|
sl@0
|
12 |
** This file implements that page cache.
|
sl@0
|
13 |
**
|
sl@0
|
14 |
** @(#) $Id: pcache.c,v 1.33 2008/09/29 11:49:48 danielk1977 Exp $
|
sl@0
|
15 |
*/
|
sl@0
|
16 |
#include "sqliteInt.h"
|
sl@0
|
17 |
|
sl@0
|
18 |
/*
|
sl@0
|
19 |
** A complete page cache is an instance of this structure.
|
sl@0
|
20 |
**
|
sl@0
|
21 |
** A cache may only be deleted by its owner and while holding the
|
sl@0
|
22 |
** SQLITE_MUTEX_STATUS_LRU mutex.
|
sl@0
|
23 |
*/
|
sl@0
|
24 |
struct PCache {
|
sl@0
|
25 |
/*********************************************************************
|
sl@0
|
26 |
** The first group of elements may be read or written at any time by
|
sl@0
|
27 |
** the cache owner without holding the mutex. No thread other than the
|
sl@0
|
28 |
** cache owner is permitted to access these elements at any time.
|
sl@0
|
29 |
*/
|
sl@0
|
30 |
PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */
|
sl@0
|
31 |
PgHdr *pSynced; /* Last synced page in dirty page list */
|
sl@0
|
32 |
int nRef; /* Number of pinned pages */
|
sl@0
|
33 |
int nPinned; /* Number of pinned and/or dirty pages */
|
sl@0
|
34 |
int nMax; /* Configured cache size */
|
sl@0
|
35 |
int nMin; /* Configured minimum cache size */
|
sl@0
|
36 |
/**********************************************************************
|
sl@0
|
37 |
** The next group of elements are fixed when the cache is created and
|
sl@0
|
38 |
** may not be changed afterwards. These elements can read at any time by
|
sl@0
|
39 |
** the cache owner or by any thread holding the the mutex. Non-owner
|
sl@0
|
40 |
** threads must hold the mutex when reading these elements to prevent
|
sl@0
|
41 |
** the entire PCache object from being deleted during the read.
|
sl@0
|
42 |
*/
|
sl@0
|
43 |
int szPage; /* Size of every page in this cache */
|
sl@0
|
44 |
int szExtra; /* Size of extra space for each page */
|
sl@0
|
45 |
int bPurgeable; /* True if pages are on backing store */
|
sl@0
|
46 |
int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */
|
sl@0
|
47 |
void *pStress; /* Argument to xStress */
|
sl@0
|
48 |
/**********************************************************************
|
sl@0
|
49 |
** The final group of elements can only be accessed while holding the
|
sl@0
|
50 |
** mutex. Both the cache owner and any other thread must hold the mutex
|
sl@0
|
51 |
** to read or write any of these elements.
|
sl@0
|
52 |
*/
|
sl@0
|
53 |
int nPage; /* Total number of pages in apHash */
|
sl@0
|
54 |
int nHash; /* Number of slots in apHash[] */
|
sl@0
|
55 |
PgHdr **apHash; /* Hash table for fast lookup by pgno */
|
sl@0
|
56 |
PgHdr *pClean; /* List of clean pages in use */
|
sl@0
|
57 |
};
|
sl@0
|
58 |
|
sl@0
|
59 |
/*
|
sl@0
|
60 |
** Free slots in the page block allocator
|
sl@0
|
61 |
*/
|
sl@0
|
62 |
typedef struct PgFreeslot PgFreeslot;
|
sl@0
|
63 |
struct PgFreeslot {
|
sl@0
|
64 |
PgFreeslot *pNext; /* Next free slot */
|
sl@0
|
65 |
};
|
sl@0
|
66 |
|
sl@0
|
67 |
/*
|
sl@0
|
68 |
** Global data for the page cache.
|
sl@0
|
69 |
*/
|
sl@0
|
70 |
static SQLITE_WSD struct PCacheGlobal {
|
sl@0
|
71 |
int isInit; /* True when initialized */
|
sl@0
|
72 |
sqlite3_mutex *mutex; /* static mutex MUTEX_STATIC_LRU */
|
sl@0
|
73 |
|
sl@0
|
74 |
int nMaxPage; /* Sum of nMaxPage for purgeable caches */
|
sl@0
|
75 |
int nMinPage; /* Sum of nMinPage for purgeable caches */
|
sl@0
|
76 |
int nCurrentPage; /* Number of purgeable pages allocated */
|
sl@0
|
77 |
PgHdr *pLruHead, *pLruTail; /* LRU list of unused clean pgs */
|
sl@0
|
78 |
|
sl@0
|
79 |
/* Variables related to SQLITE_CONFIG_PAGECACHE settings. */
|
sl@0
|
80 |
int szSlot; /* Size of each free slot */
|
sl@0
|
81 |
void *pStart, *pEnd; /* Bounds of pagecache malloc range */
|
sl@0
|
82 |
PgFreeslot *pFree; /* Free page blocks */
|
sl@0
|
83 |
} pcache = {0};
|
sl@0
|
84 |
|
sl@0
|
85 |
/*
|
sl@0
|
86 |
** All code in this file should access the global pcache structure via the
|
sl@0
|
87 |
** alias "pcache_g". This ensures that the WSD emulation is used when
|
sl@0
|
88 |
** compiling for systems that do not support real WSD.
|
sl@0
|
89 |
*/
|
sl@0
|
90 |
#define pcache_g (GLOBAL(struct PCacheGlobal, pcache))
|
sl@0
|
91 |
|
sl@0
|
92 |
/*
|
sl@0
|
93 |
** All global variables used by this module (all of which are grouped
|
sl@0
|
94 |
** together in global structure "pcache" above) are protected by the static
|
sl@0
|
95 |
** SQLITE_MUTEX_STATIC_LRU mutex. A pointer to this mutex is stored in
|
sl@0
|
96 |
** variable "pcache.mutex".
|
sl@0
|
97 |
**
|
sl@0
|
98 |
** Some elements of the PCache and PgHdr structures are protected by the
|
sl@0
|
99 |
** SQLITE_MUTEX_STATUS_LRU mutex and other are not. The protected
|
sl@0
|
100 |
** elements are grouped at the end of the structures and are clearly
|
sl@0
|
101 |
** marked.
|
sl@0
|
102 |
**
|
sl@0
|
103 |
** Use the following macros must surround all access (read or write)
|
sl@0
|
104 |
** of protected elements. The mutex is not recursive and may not be
|
sl@0
|
105 |
** entered more than once. The pcacheMutexHeld() macro should only be
|
sl@0
|
106 |
** used within an assert() to verify that the mutex is being held.
|
sl@0
|
107 |
*/
|
sl@0
|
108 |
#define pcacheEnterMutex() sqlite3_mutex_enter(pcache_g.mutex)
|
sl@0
|
109 |
#define pcacheExitMutex() sqlite3_mutex_leave(pcache_g.mutex)
|
sl@0
|
110 |
#define pcacheMutexHeld() sqlite3_mutex_held(pcache_g.mutex)
|
sl@0
|
111 |
|
sl@0
|
112 |
/*
|
sl@0
|
113 |
** Some of the assert() macros in this code are too expensive to run
|
sl@0
|
114 |
** even during normal debugging. Use them only rarely on long-running
|
sl@0
|
115 |
** tests. Enable the expensive asserts using the
|
sl@0
|
116 |
** -DSQLITE_ENABLE_EXPENSIVE_ASSERT=1 compile-time option.
|
sl@0
|
117 |
*/
|
sl@0
|
118 |
#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT
|
sl@0
|
119 |
# define expensive_assert(X) assert(X)
|
sl@0
|
120 |
#else
|
sl@0
|
121 |
# define expensive_assert(X)
|
sl@0
|
122 |
#endif
|
sl@0
|
123 |
|
sl@0
|
124 |
/********************************** Linked List Management ********************/
|
sl@0
|
125 |
|
sl@0
|
126 |
#if !defined(NDEBUG) && defined(SQLITE_ENABLE_EXPENSIVE_ASSERT)
|
sl@0
|
127 |
/*
|
sl@0
|
128 |
** This routine verifies that the number of entries in the hash table
|
sl@0
|
129 |
** is pCache->nPage. This routine is used within assert() statements
|
sl@0
|
130 |
** only and is therefore disabled during production builds.
|
sl@0
|
131 |
*/
|
sl@0
|
132 |
static int pcacheCheckHashCount(PCache *pCache){
|
sl@0
|
133 |
int i;
|
sl@0
|
134 |
int nPage = 0;
|
sl@0
|
135 |
for(i=0; i<pCache->nHash; i++){
|
sl@0
|
136 |
PgHdr *p;
|
sl@0
|
137 |
for(p=pCache->apHash[i]; p; p=p->pNextHash){
|
sl@0
|
138 |
nPage++;
|
sl@0
|
139 |
}
|
sl@0
|
140 |
}
|
sl@0
|
141 |
assert( nPage==pCache->nPage );
|
sl@0
|
142 |
return 1;
|
sl@0
|
143 |
}
|
sl@0
|
144 |
#endif /* !NDEBUG && SQLITE_ENABLE_EXPENSIVE_ASSERT */
|
sl@0
|
145 |
|
sl@0
|
146 |
|
sl@0
|
147 |
#if !defined(NDEBUG) && defined(SQLITE_ENABLE_EXPENSIVE_ASSERT)
|
sl@0
|
148 |
/*
|
sl@0
|
149 |
** Based on the current value of PCache.nRef and the contents of the
|
sl@0
|
150 |
** PCache.pDirty list, return the expected value of the PCache.nPinned
|
sl@0
|
151 |
** counter. This is only used in debugging builds, as follows:
|
sl@0
|
152 |
**
|
sl@0
|
153 |
** expensive_assert( pCache->nPinned==pcachePinnedCount(pCache) );
|
sl@0
|
154 |
*/
|
sl@0
|
155 |
static int pcachePinnedCount(PCache *pCache){
|
sl@0
|
156 |
PgHdr *p;
|
sl@0
|
157 |
int nPinned = pCache->nRef;
|
sl@0
|
158 |
for(p=pCache->pDirty; p; p=p->pNext){
|
sl@0
|
159 |
if( p->nRef==0 ){
|
sl@0
|
160 |
nPinned++;
|
sl@0
|
161 |
}
|
sl@0
|
162 |
}
|
sl@0
|
163 |
return nPinned;
|
sl@0
|
164 |
}
|
sl@0
|
165 |
#endif /* !NDEBUG && SQLITE_ENABLE_EXPENSIVE_ASSERT */
|
sl@0
|
166 |
|
sl@0
|
167 |
|
sl@0
|
168 |
#if !defined(NDEBUG) && defined(SQLITE_ENABLE_EXPENSIVE_ASSERT)
|
sl@0
|
169 |
/*
|
sl@0
|
170 |
** Check that the pCache->pSynced variable is set correctly. If it
|
sl@0
|
171 |
** is not, either fail an assert or return zero. Otherwise, return
|
sl@0
|
172 |
** non-zero. This is only used in debugging builds, as follows:
|
sl@0
|
173 |
**
|
sl@0
|
174 |
** expensive_assert( pcacheCheckSynced(pCache) );
|
sl@0
|
175 |
*/
|
sl@0
|
176 |
static int pcacheCheckSynced(PCache *pCache){
|
sl@0
|
177 |
PgHdr *p = pCache->pDirtyTail;
|
sl@0
|
178 |
for(p=pCache->pDirtyTail; p!=pCache->pSynced; p=p->pPrev){
|
sl@0
|
179 |
assert( p->nRef || (p->flags&PGHDR_NEED_SYNC) );
|
sl@0
|
180 |
}
|
sl@0
|
181 |
return (p==0 || p->nRef || (p->flags&PGHDR_NEED_SYNC)==0);
|
sl@0
|
182 |
}
|
sl@0
|
183 |
#endif /* !NDEBUG && SQLITE_ENABLE_EXPENSIVE_ASSERT */
|
sl@0
|
184 |
|
sl@0
|
185 |
|
sl@0
|
186 |
|
sl@0
|
187 |
/*
|
sl@0
|
188 |
** Remove a page from its hash table (PCache.apHash[]).
|
sl@0
|
189 |
*/
|
sl@0
|
190 |
static void pcacheRemoveFromHash(PgHdr *pPage){
|
sl@0
|
191 |
assert( pcacheMutexHeld() );
|
sl@0
|
192 |
if( pPage->pPrevHash ){
|
sl@0
|
193 |
pPage->pPrevHash->pNextHash = pPage->pNextHash;
|
sl@0
|
194 |
}else{
|
sl@0
|
195 |
PCache *pCache = pPage->pCache;
|
sl@0
|
196 |
u32 h = pPage->pgno % pCache->nHash;
|
sl@0
|
197 |
assert( pCache->apHash[h]==pPage );
|
sl@0
|
198 |
pCache->apHash[h] = pPage->pNextHash;
|
sl@0
|
199 |
}
|
sl@0
|
200 |
if( pPage->pNextHash ){
|
sl@0
|
201 |
pPage->pNextHash->pPrevHash = pPage->pPrevHash;
|
sl@0
|
202 |
}
|
sl@0
|
203 |
pPage->pCache->nPage--;
|
sl@0
|
204 |
expensive_assert( pcacheCheckHashCount(pPage->pCache) );
|
sl@0
|
205 |
}
|
sl@0
|
206 |
|
sl@0
|
207 |
/*
|
sl@0
|
208 |
** Insert a page into the hash table
|
sl@0
|
209 |
**
|
sl@0
|
210 |
** The mutex must be held by the caller.
|
sl@0
|
211 |
*/
|
sl@0
|
212 |
static void pcacheAddToHash(PgHdr *pPage){
|
sl@0
|
213 |
PCache *pCache = pPage->pCache;
|
sl@0
|
214 |
u32 h = pPage->pgno % pCache->nHash;
|
sl@0
|
215 |
assert( pcacheMutexHeld() );
|
sl@0
|
216 |
pPage->pNextHash = pCache->apHash[h];
|
sl@0
|
217 |
pPage->pPrevHash = 0;
|
sl@0
|
218 |
if( pCache->apHash[h] ){
|
sl@0
|
219 |
pCache->apHash[h]->pPrevHash = pPage;
|
sl@0
|
220 |
}
|
sl@0
|
221 |
pCache->apHash[h] = pPage;
|
sl@0
|
222 |
pCache->nPage++;
|
sl@0
|
223 |
expensive_assert( pcacheCheckHashCount(pCache) );
|
sl@0
|
224 |
}
|
sl@0
|
225 |
|
sl@0
|
226 |
/*
|
sl@0
|
227 |
** Attempt to increase the size the hash table to contain
|
sl@0
|
228 |
** at least nHash buckets.
|
sl@0
|
229 |
*/
|
sl@0
|
230 |
static int pcacheResizeHash(PCache *pCache, int nHash){
|
sl@0
|
231 |
PgHdr *p;
|
sl@0
|
232 |
PgHdr **pNew;
|
sl@0
|
233 |
assert( pcacheMutexHeld() );
|
sl@0
|
234 |
#ifdef SQLITE_MALLOC_SOFT_LIMIT
|
sl@0
|
235 |
if( nHash*sizeof(PgHdr*)>SQLITE_MALLOC_SOFT_LIMIT ){
|
sl@0
|
236 |
nHash = SQLITE_MALLOC_SOFT_LIMIT/sizeof(PgHdr *);
|
sl@0
|
237 |
}
|
sl@0
|
238 |
#endif
|
sl@0
|
239 |
pcacheExitMutex();
|
sl@0
|
240 |
pNew = (PgHdr **)sqlite3Malloc(sizeof(PgHdr*)*nHash);
|
sl@0
|
241 |
pcacheEnterMutex();
|
sl@0
|
242 |
if( !pNew ){
|
sl@0
|
243 |
return SQLITE_NOMEM;
|
sl@0
|
244 |
}
|
sl@0
|
245 |
memset(pNew, 0, sizeof(PgHdr *)*nHash);
|
sl@0
|
246 |
sqlite3_free(pCache->apHash);
|
sl@0
|
247 |
pCache->apHash = pNew;
|
sl@0
|
248 |
pCache->nHash = nHash;
|
sl@0
|
249 |
pCache->nPage = 0;
|
sl@0
|
250 |
|
sl@0
|
251 |
for(p=pCache->pClean; p; p=p->pNext){
|
sl@0
|
252 |
pcacheAddToHash(p);
|
sl@0
|
253 |
}
|
sl@0
|
254 |
for(p=pCache->pDirty; p; p=p->pNext){
|
sl@0
|
255 |
pcacheAddToHash(p);
|
sl@0
|
256 |
}
|
sl@0
|
257 |
return SQLITE_OK;
|
sl@0
|
258 |
}
|
sl@0
|
259 |
|
sl@0
|
260 |
/*
|
sl@0
|
261 |
** Remove a page from a linked list that is headed by *ppHead.
|
sl@0
|
262 |
** *ppHead is either PCache.pClean or PCache.pDirty.
|
sl@0
|
263 |
*/
|
sl@0
|
264 |
static void pcacheRemoveFromList(PgHdr **ppHead, PgHdr *pPage){
|
sl@0
|
265 |
int isDirtyList = (ppHead==&pPage->pCache->pDirty);
|
sl@0
|
266 |
assert( ppHead==&pPage->pCache->pClean || ppHead==&pPage->pCache->pDirty );
|
sl@0
|
267 |
assert( pcacheMutexHeld() || ppHead!=&pPage->pCache->pClean );
|
sl@0
|
268 |
|
sl@0
|
269 |
if( pPage->pPrev ){
|
sl@0
|
270 |
pPage->pPrev->pNext = pPage->pNext;
|
sl@0
|
271 |
}else{
|
sl@0
|
272 |
assert( *ppHead==pPage );
|
sl@0
|
273 |
*ppHead = pPage->pNext;
|
sl@0
|
274 |
}
|
sl@0
|
275 |
if( pPage->pNext ){
|
sl@0
|
276 |
pPage->pNext->pPrev = pPage->pPrev;
|
sl@0
|
277 |
}
|
sl@0
|
278 |
|
sl@0
|
279 |
if( isDirtyList ){
|
sl@0
|
280 |
PCache *pCache = pPage->pCache;
|
sl@0
|
281 |
assert( pPage->pNext || pCache->pDirtyTail==pPage );
|
sl@0
|
282 |
if( !pPage->pNext ){
|
sl@0
|
283 |
pCache->pDirtyTail = pPage->pPrev;
|
sl@0
|
284 |
}
|
sl@0
|
285 |
if( pCache->pSynced==pPage ){
|
sl@0
|
286 |
PgHdr *pSynced = pPage->pPrev;
|
sl@0
|
287 |
while( pSynced && (pSynced->flags&PGHDR_NEED_SYNC) ){
|
sl@0
|
288 |
pSynced = pSynced->pPrev;
|
sl@0
|
289 |
}
|
sl@0
|
290 |
pCache->pSynced = pSynced;
|
sl@0
|
291 |
}
|
sl@0
|
292 |
}
|
sl@0
|
293 |
}
|
sl@0
|
294 |
|
sl@0
|
295 |
/*
|
sl@0
|
296 |
** Add a page from a linked list that is headed by *ppHead.
|
sl@0
|
297 |
** *ppHead is either PCache.pClean or PCache.pDirty.
|
sl@0
|
298 |
*/
|
sl@0
|
299 |
static void pcacheAddToList(PgHdr **ppHead, PgHdr *pPage){
|
sl@0
|
300 |
int isDirtyList = (ppHead==&pPage->pCache->pDirty);
|
sl@0
|
301 |
assert( ppHead==&pPage->pCache->pClean || ppHead==&pPage->pCache->pDirty );
|
sl@0
|
302 |
|
sl@0
|
303 |
if( (*ppHead) ){
|
sl@0
|
304 |
(*ppHead)->pPrev = pPage;
|
sl@0
|
305 |
}
|
sl@0
|
306 |
pPage->pNext = *ppHead;
|
sl@0
|
307 |
pPage->pPrev = 0;
|
sl@0
|
308 |
*ppHead = pPage;
|
sl@0
|
309 |
|
sl@0
|
310 |
if( isDirtyList ){
|
sl@0
|
311 |
PCache *pCache = pPage->pCache;
|
sl@0
|
312 |
if( !pCache->pDirtyTail ){
|
sl@0
|
313 |
assert( pPage->pNext==0 );
|
sl@0
|
314 |
pCache->pDirtyTail = pPage;
|
sl@0
|
315 |
}
|
sl@0
|
316 |
if( !pCache->pSynced && 0==(pPage->flags&PGHDR_NEED_SYNC) ){
|
sl@0
|
317 |
pCache->pSynced = pPage;
|
sl@0
|
318 |
}
|
sl@0
|
319 |
}
|
sl@0
|
320 |
}
|
sl@0
|
321 |
|
sl@0
|
322 |
/*
|
sl@0
|
323 |
** Remove a page from the global LRU list
|
sl@0
|
324 |
*/
|
sl@0
|
325 |
static void pcacheRemoveFromLruList(PgHdr *pPage){
|
sl@0
|
326 |
assert( sqlite3_mutex_held(pcache_g.mutex) );
|
sl@0
|
327 |
assert( (pPage->flags&PGHDR_DIRTY)==0 );
|
sl@0
|
328 |
if( pPage->pCache->bPurgeable==0 ) return;
|
sl@0
|
329 |
if( pPage->pNextLru ){
|
sl@0
|
330 |
assert( pcache_g.pLruTail!=pPage );
|
sl@0
|
331 |
pPage->pNextLru->pPrevLru = pPage->pPrevLru;
|
sl@0
|
332 |
}else{
|
sl@0
|
333 |
assert( pcache_g.pLruTail==pPage );
|
sl@0
|
334 |
pcache_g.pLruTail = pPage->pPrevLru;
|
sl@0
|
335 |
}
|
sl@0
|
336 |
if( pPage->pPrevLru ){
|
sl@0
|
337 |
assert( pcache_g.pLruHead!=pPage );
|
sl@0
|
338 |
pPage->pPrevLru->pNextLru = pPage->pNextLru;
|
sl@0
|
339 |
}else{
|
sl@0
|
340 |
assert( pcache_g.pLruHead==pPage );
|
sl@0
|
341 |
pcache_g.pLruHead = pPage->pNextLru;
|
sl@0
|
342 |
}
|
sl@0
|
343 |
}
|
sl@0
|
344 |
|
sl@0
|
345 |
/*
|
sl@0
|
346 |
** Add a page to the global LRU list. The page is normally added
|
sl@0
|
347 |
** to the front of the list so that it will be the last page recycled.
|
sl@0
|
348 |
** However, if the PGHDR_REUSE_UNLIKELY bit is set, the page is added
|
sl@0
|
349 |
** to the end of the LRU list so that it will be the next to be recycled.
|
sl@0
|
350 |
*/
|
sl@0
|
351 |
static void pcacheAddToLruList(PgHdr *pPage){
|
sl@0
|
352 |
assert( sqlite3_mutex_held(pcache_g.mutex) );
|
sl@0
|
353 |
assert( (pPage->flags&PGHDR_DIRTY)==0 );
|
sl@0
|
354 |
if( pPage->pCache->bPurgeable==0 ) return;
|
sl@0
|
355 |
if( pcache_g.pLruTail && (pPage->flags & PGHDR_REUSE_UNLIKELY)!=0 ){
|
sl@0
|
356 |
/* If reuse is unlikely. Put the page at the end of the LRU list
|
sl@0
|
357 |
** where it will be recycled sooner rather than later.
|
sl@0
|
358 |
*/
|
sl@0
|
359 |
assert( pcache_g.pLruHead );
|
sl@0
|
360 |
pPage->pNextLru = 0;
|
sl@0
|
361 |
pPage->pPrevLru = pcache_g.pLruTail;
|
sl@0
|
362 |
pcache_g.pLruTail->pNextLru = pPage;
|
sl@0
|
363 |
pcache_g.pLruTail = pPage;
|
sl@0
|
364 |
pPage->flags &= ~PGHDR_REUSE_UNLIKELY;
|
sl@0
|
365 |
}else{
|
sl@0
|
366 |
/* If reuse is possible. the page goes at the beginning of the LRU
|
sl@0
|
367 |
** list so that it will be the last to be recycled.
|
sl@0
|
368 |
*/
|
sl@0
|
369 |
if( pcache_g.pLruHead ){
|
sl@0
|
370 |
pcache_g.pLruHead->pPrevLru = pPage;
|
sl@0
|
371 |
}
|
sl@0
|
372 |
pPage->pNextLru = pcache_g.pLruHead;
|
sl@0
|
373 |
pcache_g.pLruHead = pPage;
|
sl@0
|
374 |
pPage->pPrevLru = 0;
|
sl@0
|
375 |
if( pcache_g.pLruTail==0 ){
|
sl@0
|
376 |
pcache_g.pLruTail = pPage;
|
sl@0
|
377 |
}
|
sl@0
|
378 |
}
|
sl@0
|
379 |
}
|
sl@0
|
380 |
|
sl@0
|
381 |
/*********************************************** Memory Allocation ***********
|
sl@0
|
382 |
**
|
sl@0
|
383 |
** Initialize the page cache memory pool.
|
sl@0
|
384 |
**
|
sl@0
|
385 |
** This must be called at start-time when no page cache lines are
|
sl@0
|
386 |
** checked out. This function is not threadsafe.
|
sl@0
|
387 |
*/
|
sl@0
|
388 |
void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){
|
sl@0
|
389 |
PgFreeslot *p;
|
sl@0
|
390 |
sz &= ~7;
|
sl@0
|
391 |
pcache_g.szSlot = sz;
|
sl@0
|
392 |
pcache_g.pStart = pBuf;
|
sl@0
|
393 |
pcache_g.pFree = 0;
|
sl@0
|
394 |
while( n-- ){
|
sl@0
|
395 |
p = (PgFreeslot*)pBuf;
|
sl@0
|
396 |
p->pNext = pcache_g.pFree;
|
sl@0
|
397 |
pcache_g.pFree = p;
|
sl@0
|
398 |
pBuf = (void*)&((char*)pBuf)[sz];
|
sl@0
|
399 |
}
|
sl@0
|
400 |
pcache_g.pEnd = pBuf;
|
sl@0
|
401 |
}
|
sl@0
|
402 |
|
sl@0
|
403 |
/*
|
sl@0
|
404 |
** Allocate a page cache line. Look in the page cache memory pool first
|
sl@0
|
405 |
** and use an element from it first if available. If nothing is available
|
sl@0
|
406 |
** in the page cache memory pool, go to the general purpose memory allocator.
|
sl@0
|
407 |
*/
|
sl@0
|
408 |
static void *pcacheMalloc(int sz, PCache *pCache){
|
sl@0
|
409 |
assert( sqlite3_mutex_held(pcache_g.mutex) );
|
sl@0
|
410 |
if( sz<=pcache_g.szSlot && pcache_g.pFree ){
|
sl@0
|
411 |
PgFreeslot *p = pcache_g.pFree;
|
sl@0
|
412 |
pcache_g.pFree = p->pNext;
|
sl@0
|
413 |
sqlite3StatusSet(SQLITE_STATUS_PAGECACHE_SIZE, sz);
|
sl@0
|
414 |
sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_USED, 1);
|
sl@0
|
415 |
return (void*)p;
|
sl@0
|
416 |
}else{
|
sl@0
|
417 |
void *p;
|
sl@0
|
418 |
|
sl@0
|
419 |
/* Allocate a new buffer using sqlite3Malloc. Before doing so, exit the
|
sl@0
|
420 |
** global pcache mutex and unlock the pager-cache object pCache. This is
|
sl@0
|
421 |
** so that if the attempt to allocate a new buffer causes the the
|
sl@0
|
422 |
** configured soft-heap-limit to be breached, it will be possible to
|
sl@0
|
423 |
** reclaim memory from this pager-cache.
|
sl@0
|
424 |
*/
|
sl@0
|
425 |
pcacheExitMutex();
|
sl@0
|
426 |
p = sqlite3Malloc(sz);
|
sl@0
|
427 |
pcacheEnterMutex();
|
sl@0
|
428 |
|
sl@0
|
429 |
if( p ){
|
sl@0
|
430 |
sz = sqlite3MallocSize(p);
|
sl@0
|
431 |
sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_OVERFLOW, sz);
|
sl@0
|
432 |
}
|
sl@0
|
433 |
return p;
|
sl@0
|
434 |
}
|
sl@0
|
435 |
}
|
sl@0
|
436 |
void *sqlite3PageMalloc(int sz){
|
sl@0
|
437 |
void *p;
|
sl@0
|
438 |
pcacheEnterMutex();
|
sl@0
|
439 |
p = pcacheMalloc(sz, 0);
|
sl@0
|
440 |
pcacheExitMutex();
|
sl@0
|
441 |
return p;
|
sl@0
|
442 |
}
|
sl@0
|
443 |
|
sl@0
|
444 |
/*
|
sl@0
|
445 |
** Release a pager memory allocation
|
sl@0
|
446 |
*/
|
sl@0
|
447 |
static void pcacheFree(void *p){
|
sl@0
|
448 |
assert( sqlite3_mutex_held(pcache_g.mutex) );
|
sl@0
|
449 |
if( p==0 ) return;
|
sl@0
|
450 |
if( p>=pcache_g.pStart && p<pcache_g.pEnd ){
|
sl@0
|
451 |
PgFreeslot *pSlot;
|
sl@0
|
452 |
sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_USED, -1);
|
sl@0
|
453 |
pSlot = (PgFreeslot*)p;
|
sl@0
|
454 |
pSlot->pNext = pcache_g.pFree;
|
sl@0
|
455 |
pcache_g.pFree = pSlot;
|
sl@0
|
456 |
}else{
|
sl@0
|
457 |
int iSize = sqlite3MallocSize(p);
|
sl@0
|
458 |
sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_OVERFLOW, -iSize);
|
sl@0
|
459 |
sqlite3_free(p);
|
sl@0
|
460 |
}
|
sl@0
|
461 |
}
|
sl@0
|
462 |
void sqlite3PageFree(void *p){
|
sl@0
|
463 |
pcacheEnterMutex();
|
sl@0
|
464 |
pcacheFree(p);
|
sl@0
|
465 |
pcacheExitMutex();
|
sl@0
|
466 |
}
|
sl@0
|
467 |
|
sl@0
|
468 |
/*
|
sl@0
|
469 |
** Allocate a new page.
|
sl@0
|
470 |
*/
|
sl@0
|
471 |
static PgHdr *pcachePageAlloc(PCache *pCache){
|
sl@0
|
472 |
PgHdr *p;
|
sl@0
|
473 |
int sz = sizeof(*p) + pCache->szPage + pCache->szExtra;
|
sl@0
|
474 |
assert( sqlite3_mutex_held(pcache_g.mutex) );
|
sl@0
|
475 |
p = pcacheMalloc(sz, pCache);
|
sl@0
|
476 |
if( p==0 ) return 0;
|
sl@0
|
477 |
memset(p, 0, sizeof(PgHdr));
|
sl@0
|
478 |
p->pData = (void*)&p[1];
|
sl@0
|
479 |
p->pExtra = (void*)&((char*)p->pData)[pCache->szPage];
|
sl@0
|
480 |
if( pCache->bPurgeable ){
|
sl@0
|
481 |
pcache_g.nCurrentPage++;
|
sl@0
|
482 |
}
|
sl@0
|
483 |
return p;
|
sl@0
|
484 |
}
|
sl@0
|
485 |
|
sl@0
|
486 |
/*
|
sl@0
|
487 |
** Deallocate a page
|
sl@0
|
488 |
*/
|
sl@0
|
489 |
static void pcachePageFree(PgHdr *p){
|
sl@0
|
490 |
assert( sqlite3_mutex_held(pcache_g.mutex) );
|
sl@0
|
491 |
if( p->pCache->bPurgeable ){
|
sl@0
|
492 |
pcache_g.nCurrentPage--;
|
sl@0
|
493 |
}
|
sl@0
|
494 |
pcacheFree(p->apSave[0]);
|
sl@0
|
495 |
pcacheFree(p->apSave[1]);
|
sl@0
|
496 |
pcacheFree(p);
|
sl@0
|
497 |
}
|
sl@0
|
498 |
|
sl@0
|
499 |
#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
|
sl@0
|
500 |
/*
|
sl@0
|
501 |
** Return the number of bytes that will be returned to the heap when
|
sl@0
|
502 |
** the argument is passed to pcachePageFree().
|
sl@0
|
503 |
*/
|
sl@0
|
504 |
static int pcachePageSize(PgHdr *p){
|
sl@0
|
505 |
assert( sqlite3_mutex_held(pcache_g.mutex) );
|
sl@0
|
506 |
assert( !pcache_g.pStart );
|
sl@0
|
507 |
assert( p->apSave[0]==0 );
|
sl@0
|
508 |
assert( p->apSave[1]==0 );
|
sl@0
|
509 |
assert( p && p->pCache );
|
sl@0
|
510 |
return sqlite3MallocSize(p);
|
sl@0
|
511 |
}
|
sl@0
|
512 |
#endif
|
sl@0
|
513 |
|
sl@0
|
514 |
/*
|
sl@0
|
515 |
** Attempt to 'recycle' a page from the global LRU list. Only clean,
|
sl@0
|
516 |
** unreferenced pages from purgeable caches are eligible for recycling.
|
sl@0
|
517 |
**
|
sl@0
|
518 |
** This function removes page pcache.pLruTail from the global LRU list,
|
sl@0
|
519 |
** and from the hash-table and PCache.pClean list of the owner pcache.
|
sl@0
|
520 |
** There should be no other references to the page.
|
sl@0
|
521 |
**
|
sl@0
|
522 |
** A pointer to the recycled page is returned, or NULL if no page is
|
sl@0
|
523 |
** eligible for recycling.
|
sl@0
|
524 |
*/
|
sl@0
|
525 |
static PgHdr *pcacheRecyclePage(void){
|
sl@0
|
526 |
PgHdr *p = 0;
|
sl@0
|
527 |
assert( sqlite3_mutex_held(pcache_g.mutex) );
|
sl@0
|
528 |
|
sl@0
|
529 |
if( (p=pcache_g.pLruTail) ){
|
sl@0
|
530 |
assert( (p->flags&PGHDR_DIRTY)==0 );
|
sl@0
|
531 |
pcacheRemoveFromLruList(p);
|
sl@0
|
532 |
pcacheRemoveFromHash(p);
|
sl@0
|
533 |
pcacheRemoveFromList(&p->pCache->pClean, p);
|
sl@0
|
534 |
}
|
sl@0
|
535 |
|
sl@0
|
536 |
return p;
|
sl@0
|
537 |
}
|
sl@0
|
538 |
|
sl@0
|
539 |
/*
|
sl@0
|
540 |
** Obtain space for a page. Try to recycle an old page if the limit on the
|
sl@0
|
541 |
** number of pages has been reached. If the limit has not been reached or
|
sl@0
|
542 |
** there are no pages eligible for recycling, allocate a new page.
|
sl@0
|
543 |
**
|
sl@0
|
544 |
** Return a pointer to the new page, or NULL if an OOM condition occurs.
|
sl@0
|
545 |
*/
|
sl@0
|
546 |
static int pcacheRecycleOrAlloc(PCache *pCache, PgHdr **ppPage){
|
sl@0
|
547 |
PgHdr *p = 0;
|
sl@0
|
548 |
|
sl@0
|
549 |
int szPage = pCache->szPage;
|
sl@0
|
550 |
int szExtra = pCache->szExtra;
|
sl@0
|
551 |
|
sl@0
|
552 |
assert( pcache_g.isInit );
|
sl@0
|
553 |
assert( sqlite3_mutex_held(pcache_g.mutex) );
|
sl@0
|
554 |
|
sl@0
|
555 |
*ppPage = 0;
|
sl@0
|
556 |
|
sl@0
|
557 |
/* If we have reached either the global or the local limit for
|
sl@0
|
558 |
** pinned+dirty pages, and there is at least one dirty page,
|
sl@0
|
559 |
** invoke the xStress callback to cause a page to become clean.
|
sl@0
|
560 |
*/
|
sl@0
|
561 |
expensive_assert( pCache->nPinned==pcachePinnedCount(pCache) );
|
sl@0
|
562 |
expensive_assert( pcacheCheckSynced(pCache) );
|
sl@0
|
563 |
if( pCache->xStress
|
sl@0
|
564 |
&& pCache->pDirty
|
sl@0
|
565 |
&& (pCache->nPinned>=(pcache_g.nMaxPage+pCache->nMin-pcache_g.nMinPage)
|
sl@0
|
566 |
|| pCache->nPinned>=pCache->nMax)
|
sl@0
|
567 |
){
|
sl@0
|
568 |
PgHdr *pPg;
|
sl@0
|
569 |
assert(pCache->pDirtyTail);
|
sl@0
|
570 |
|
sl@0
|
571 |
for(pPg=pCache->pSynced;
|
sl@0
|
572 |
pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC));
|
sl@0
|
573 |
pPg=pPg->pPrev
|
sl@0
|
574 |
);
|
sl@0
|
575 |
if( !pPg ){
|
sl@0
|
576 |
for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pPrev);
|
sl@0
|
577 |
}
|
sl@0
|
578 |
if( pPg ){
|
sl@0
|
579 |
int rc;
|
sl@0
|
580 |
pcacheExitMutex();
|
sl@0
|
581 |
rc = pCache->xStress(pCache->pStress, pPg);
|
sl@0
|
582 |
pcacheEnterMutex();
|
sl@0
|
583 |
if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){
|
sl@0
|
584 |
return rc;
|
sl@0
|
585 |
}
|
sl@0
|
586 |
}
|
sl@0
|
587 |
}
|
sl@0
|
588 |
|
sl@0
|
589 |
/* If either the local or the global page limit has been reached,
|
sl@0
|
590 |
** try to recycle a page.
|
sl@0
|
591 |
*/
|
sl@0
|
592 |
if( pCache->bPurgeable && (pCache->nPage>=pCache->nMax-1 ||
|
sl@0
|
593 |
pcache_g.nCurrentPage>=pcache_g.nMaxPage) ){
|
sl@0
|
594 |
p = pcacheRecyclePage();
|
sl@0
|
595 |
}
|
sl@0
|
596 |
|
sl@0
|
597 |
/* If a page has been recycled but it is the wrong size, free it. */
|
sl@0
|
598 |
if( p && (p->pCache->szPage!=szPage || p->pCache->szPage!=szExtra) ){
|
sl@0
|
599 |
pcachePageFree(p);
|
sl@0
|
600 |
p = 0;
|
sl@0
|
601 |
}
|
sl@0
|
602 |
|
sl@0
|
603 |
if( !p ){
|
sl@0
|
604 |
p = pcachePageAlloc(pCache);
|
sl@0
|
605 |
}
|
sl@0
|
606 |
|
sl@0
|
607 |
*ppPage = p;
|
sl@0
|
608 |
return (p?SQLITE_OK:SQLITE_NOMEM);
|
sl@0
|
609 |
}
|
sl@0
|
610 |
|
sl@0
|
611 |
/*************************************************** General Interfaces ******
|
sl@0
|
612 |
**
|
sl@0
|
613 |
** Initialize and shutdown the page cache subsystem. Neither of these
|
sl@0
|
614 |
** functions are threadsafe.
|
sl@0
|
615 |
*/
|
sl@0
|
616 |
int sqlite3PcacheInitialize(void){
|
sl@0
|
617 |
assert( pcache_g.isInit==0 );
|
sl@0
|
618 |
memset(&pcache_g, 0, sizeof(pcache));
|
sl@0
|
619 |
if( sqlite3GlobalConfig.bCoreMutex ){
|
sl@0
|
620 |
/* No need to check the return value of sqlite3_mutex_alloc().
|
sl@0
|
621 |
** Allocating a static mutex cannot fail.
|
sl@0
|
622 |
*/
|
sl@0
|
623 |
pcache_g.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU);
|
sl@0
|
624 |
}
|
sl@0
|
625 |
pcache_g.isInit = 1;
|
sl@0
|
626 |
return SQLITE_OK;
|
sl@0
|
627 |
}
|
sl@0
|
628 |
void sqlite3PcacheShutdown(void){
|
sl@0
|
629 |
memset(&pcache_g, 0, sizeof(pcache));
|
sl@0
|
630 |
}
|
sl@0
|
631 |
|
sl@0
|
632 |
/*
|
sl@0
|
633 |
** Return the size in bytes of a PCache object.
|
sl@0
|
634 |
*/
|
sl@0
|
635 |
int sqlite3PcacheSize(void){ return sizeof(PCache); }
|
sl@0
|
636 |
|
sl@0
|
637 |
/*
|
sl@0
|
638 |
** Create a new PCache object. Storage space to hold the object
|
sl@0
|
639 |
** has already been allocated and is passed in as the p pointer.
|
sl@0
|
640 |
*/
|
sl@0
|
641 |
void sqlite3PcacheOpen(
|
sl@0
|
642 |
int szPage, /* Size of every page */
|
sl@0
|
643 |
int szExtra, /* Extra space associated with each page */
|
sl@0
|
644 |
int bPurgeable, /* True if pages are on backing store */
|
sl@0
|
645 |
int (*xStress)(void*,PgHdr*),/* Call to try to make pages clean */
|
sl@0
|
646 |
void *pStress, /* Argument to xStress */
|
sl@0
|
647 |
PCache *p /* Preallocated space for the PCache */
|
sl@0
|
648 |
){
|
sl@0
|
649 |
assert( pcache_g.isInit );
|
sl@0
|
650 |
memset(p, 0, sizeof(PCache));
|
sl@0
|
651 |
p->szPage = szPage;
|
sl@0
|
652 |
p->szExtra = szExtra;
|
sl@0
|
653 |
p->bPurgeable = bPurgeable;
|
sl@0
|
654 |
p->xStress = xStress;
|
sl@0
|
655 |
p->pStress = pStress;
|
sl@0
|
656 |
p->nMax = 100;
|
sl@0
|
657 |
p->nMin = 10;
|
sl@0
|
658 |
|
sl@0
|
659 |
pcacheEnterMutex();
|
sl@0
|
660 |
if( bPurgeable ){
|
sl@0
|
661 |
pcache_g.nMaxPage += p->nMax;
|
sl@0
|
662 |
pcache_g.nMinPage += p->nMin;
|
sl@0
|
663 |
}
|
sl@0
|
664 |
|
sl@0
|
665 |
pcacheExitMutex();
|
sl@0
|
666 |
}
|
sl@0
|
667 |
|
sl@0
|
668 |
/*
|
sl@0
|
669 |
** Change the page size for PCache object. This can only happen
|
sl@0
|
670 |
** when the cache is empty.
|
sl@0
|
671 |
*/
|
sl@0
|
672 |
void sqlite3PcacheSetPageSize(PCache *pCache, int szPage){
|
sl@0
|
673 |
assert(pCache->nPage==0);
|
sl@0
|
674 |
pCache->szPage = szPage;
|
sl@0
|
675 |
}
|
sl@0
|
676 |
|
sl@0
|
677 |
/*
|
sl@0
|
678 |
** Try to obtain a page from the cache.
|
sl@0
|
679 |
*/
|
sl@0
|
680 |
int sqlite3PcacheFetch(
|
sl@0
|
681 |
PCache *pCache, /* Obtain the page from this cache */
|
sl@0
|
682 |
Pgno pgno, /* Page number to obtain */
|
sl@0
|
683 |
int createFlag, /* If true, create page if it does not exist already */
|
sl@0
|
684 |
PgHdr **ppPage /* Write the page here */
|
sl@0
|
685 |
){
|
sl@0
|
686 |
int rc = SQLITE_OK;
|
sl@0
|
687 |
PgHdr *pPage = 0;
|
sl@0
|
688 |
|
sl@0
|
689 |
assert( pcache_g.isInit );
|
sl@0
|
690 |
assert( pCache!=0 );
|
sl@0
|
691 |
assert( pgno>0 );
|
sl@0
|
692 |
expensive_assert( pCache->nPinned==pcachePinnedCount(pCache) );
|
sl@0
|
693 |
|
sl@0
|
694 |
pcacheEnterMutex();
|
sl@0
|
695 |
|
sl@0
|
696 |
/* Search the hash table for the requested page. Exit early if it is found. */
|
sl@0
|
697 |
if( pCache->apHash ){
|
sl@0
|
698 |
u32 h = pgno % pCache->nHash;
|
sl@0
|
699 |
for(pPage=pCache->apHash[h]; pPage; pPage=pPage->pNextHash){
|
sl@0
|
700 |
if( pPage->pgno==pgno ){
|
sl@0
|
701 |
if( pPage->nRef==0 ){
|
sl@0
|
702 |
if( 0==(pPage->flags&PGHDR_DIRTY) ){
|
sl@0
|
703 |
pcacheRemoveFromLruList(pPage);
|
sl@0
|
704 |
pCache->nPinned++;
|
sl@0
|
705 |
}
|
sl@0
|
706 |
pCache->nRef++;
|
sl@0
|
707 |
}
|
sl@0
|
708 |
pPage->nRef++;
|
sl@0
|
709 |
break;
|
sl@0
|
710 |
}
|
sl@0
|
711 |
}
|
sl@0
|
712 |
}
|
sl@0
|
713 |
|
sl@0
|
714 |
if( !pPage && createFlag ){
|
sl@0
|
715 |
if( pCache->nHash<=pCache->nPage ){
|
sl@0
|
716 |
rc = pcacheResizeHash(pCache, pCache->nHash<256 ? 256 : pCache->nHash*2);
|
sl@0
|
717 |
}
|
sl@0
|
718 |
if( rc==SQLITE_OK ){
|
sl@0
|
719 |
rc = pcacheRecycleOrAlloc(pCache, &pPage);
|
sl@0
|
720 |
}
|
sl@0
|
721 |
if( rc==SQLITE_OK ){
|
sl@0
|
722 |
pPage->pPager = 0;
|
sl@0
|
723 |
pPage->flags = 0;
|
sl@0
|
724 |
pPage->pDirty = 0;
|
sl@0
|
725 |
pPage->pgno = pgno;
|
sl@0
|
726 |
pPage->pCache = pCache;
|
sl@0
|
727 |
pPage->nRef = 1;
|
sl@0
|
728 |
pCache->nRef++;
|
sl@0
|
729 |
pCache->nPinned++;
|
sl@0
|
730 |
pcacheAddToList(&pCache->pClean, pPage);
|
sl@0
|
731 |
pcacheAddToHash(pPage);
|
sl@0
|
732 |
}
|
sl@0
|
733 |
}
|
sl@0
|
734 |
|
sl@0
|
735 |
pcacheExitMutex();
|
sl@0
|
736 |
|
sl@0
|
737 |
*ppPage = pPage;
|
sl@0
|
738 |
expensive_assert( pCache->nPinned==pcachePinnedCount(pCache) );
|
sl@0
|
739 |
assert( pPage || !createFlag || rc!=SQLITE_OK );
|
sl@0
|
740 |
return rc;
|
sl@0
|
741 |
}
|
sl@0
|
742 |
|
sl@0
|
743 |
/*
|
sl@0
|
744 |
** Dereference a page. When the reference count reaches zero,
|
sl@0
|
745 |
** move the page to the LRU list if it is clean.
|
sl@0
|
746 |
*/
|
sl@0
|
747 |
void sqlite3PcacheRelease(PgHdr *p){
|
sl@0
|
748 |
assert( p->nRef>0 );
|
sl@0
|
749 |
p->nRef--;
|
sl@0
|
750 |
if( p->nRef==0 ){
|
sl@0
|
751 |
PCache *pCache = p->pCache;
|
sl@0
|
752 |
pCache->nRef--;
|
sl@0
|
753 |
if( (p->flags&PGHDR_DIRTY)==0 ){
|
sl@0
|
754 |
pCache->nPinned--;
|
sl@0
|
755 |
pcacheEnterMutex();
|
sl@0
|
756 |
if( pcache_g.nCurrentPage>pcache_g.nMaxPage ){
|
sl@0
|
757 |
pcacheRemoveFromList(&pCache->pClean, p);
|
sl@0
|
758 |
pcacheRemoveFromHash(p);
|
sl@0
|
759 |
pcachePageFree(p);
|
sl@0
|
760 |
}else{
|
sl@0
|
761 |
pcacheAddToLruList(p);
|
sl@0
|
762 |
}
|
sl@0
|
763 |
pcacheExitMutex();
|
sl@0
|
764 |
}else{
|
sl@0
|
765 |
/* Move the page to the head of the caches dirty list. */
|
sl@0
|
766 |
pcacheRemoveFromList(&pCache->pDirty, p);
|
sl@0
|
767 |
pcacheAddToList(&pCache->pDirty, p);
|
sl@0
|
768 |
}
|
sl@0
|
769 |
}
|
sl@0
|
770 |
}
|
sl@0
|
771 |
|
sl@0
|
772 |
void sqlite3PcacheRef(PgHdr *p){
|
sl@0
|
773 |
assert(p->nRef>0);
|
sl@0
|
774 |
p->nRef++;
|
sl@0
|
775 |
}
|
sl@0
|
776 |
|
sl@0
|
777 |
/*
|
sl@0
|
778 |
** Drop a page from the cache. There must be exactly one reference to the
|
sl@0
|
779 |
** page. This function deletes that reference, so after it returns the
|
sl@0
|
780 |
** page pointed to by p is invalid.
|
sl@0
|
781 |
*/
|
sl@0
|
782 |
void sqlite3PcacheDrop(PgHdr *p){
|
sl@0
|
783 |
PCache *pCache;
|
sl@0
|
784 |
assert( p->nRef==1 );
|
sl@0
|
785 |
assert( 0==(p->flags&PGHDR_DIRTY) );
|
sl@0
|
786 |
pCache = p->pCache;
|
sl@0
|
787 |
pCache->nRef--;
|
sl@0
|
788 |
pCache->nPinned--;
|
sl@0
|
789 |
pcacheEnterMutex();
|
sl@0
|
790 |
pcacheRemoveFromList(&pCache->pClean, p);
|
sl@0
|
791 |
pcacheRemoveFromHash(p);
|
sl@0
|
792 |
pcachePageFree(p);
|
sl@0
|
793 |
pcacheExitMutex();
|
sl@0
|
794 |
}
|
sl@0
|
795 |
|
sl@0
|
796 |
/*
|
sl@0
|
797 |
** Make sure the page is marked as dirty. If it isn't dirty already,
|
sl@0
|
798 |
** make it so.
|
sl@0
|
799 |
*/
|
sl@0
|
800 |
void sqlite3PcacheMakeDirty(PgHdr *p){
|
sl@0
|
801 |
PCache *pCache;
|
sl@0
|
802 |
p->flags &= ~PGHDR_DONT_WRITE;
|
sl@0
|
803 |
if( p->flags & PGHDR_DIRTY ) return;
|
sl@0
|
804 |
assert( (p->flags & PGHDR_DIRTY)==0 );
|
sl@0
|
805 |
assert( p->nRef>0 );
|
sl@0
|
806 |
pCache = p->pCache;
|
sl@0
|
807 |
pcacheEnterMutex();
|
sl@0
|
808 |
pcacheRemoveFromList(&pCache->pClean, p);
|
sl@0
|
809 |
pcacheAddToList(&pCache->pDirty, p);
|
sl@0
|
810 |
pcacheExitMutex();
|
sl@0
|
811 |
p->flags |= PGHDR_DIRTY;
|
sl@0
|
812 |
}
|
sl@0
|
813 |
|
sl@0
|
814 |
static void pcacheMakeClean(PgHdr *p){
|
sl@0
|
815 |
PCache *pCache = p->pCache;
|
sl@0
|
816 |
assert( p->apSave[0]==0 && p->apSave[1]==0 );
|
sl@0
|
817 |
assert( p->flags & PGHDR_DIRTY );
|
sl@0
|
818 |
pcacheRemoveFromList(&pCache->pDirty, p);
|
sl@0
|
819 |
pcacheAddToList(&pCache->pClean, p);
|
sl@0
|
820 |
p->flags &= ~PGHDR_DIRTY;
|
sl@0
|
821 |
if( p->nRef==0 ){
|
sl@0
|
822 |
pcacheAddToLruList(p);
|
sl@0
|
823 |
pCache->nPinned--;
|
sl@0
|
824 |
}
|
sl@0
|
825 |
expensive_assert( pCache->nPinned==pcachePinnedCount(pCache) );
|
sl@0
|
826 |
}
|
sl@0
|
827 |
|
sl@0
|
828 |
/*
|
sl@0
|
829 |
** Make sure the page is marked as clean. If it isn't clean already,
|
sl@0
|
830 |
** make it so.
|
sl@0
|
831 |
*/
|
sl@0
|
832 |
void sqlite3PcacheMakeClean(PgHdr *p){
|
sl@0
|
833 |
if( (p->flags & PGHDR_DIRTY) ){
|
sl@0
|
834 |
pcacheEnterMutex();
|
sl@0
|
835 |
pcacheMakeClean(p);
|
sl@0
|
836 |
pcacheExitMutex();
|
sl@0
|
837 |
}
|
sl@0
|
838 |
}
|
sl@0
|
839 |
|
sl@0
|
840 |
/*
|
sl@0
|
841 |
** Make every page in the cache clean.
|
sl@0
|
842 |
*/
|
sl@0
|
843 |
void sqlite3PcacheCleanAll(PCache *pCache){
|
sl@0
|
844 |
PgHdr *p;
|
sl@0
|
845 |
pcacheEnterMutex();
|
sl@0
|
846 |
while( (p = pCache->pDirty)!=0 ){
|
sl@0
|
847 |
assert( p->apSave[0]==0 && p->apSave[1]==0 );
|
sl@0
|
848 |
pcacheRemoveFromList(&pCache->pDirty, p);
|
sl@0
|
849 |
p->flags &= ~PGHDR_DIRTY;
|
sl@0
|
850 |
pcacheAddToList(&pCache->pClean, p);
|
sl@0
|
851 |
if( p->nRef==0 ){
|
sl@0
|
852 |
pcacheAddToLruList(p);
|
sl@0
|
853 |
pCache->nPinned--;
|
sl@0
|
854 |
}
|
sl@0
|
855 |
}
|
sl@0
|
856 |
sqlite3PcacheAssertFlags(pCache, 0, PGHDR_DIRTY);
|
sl@0
|
857 |
expensive_assert( pCache->nPinned==pcachePinnedCount(pCache) );
|
sl@0
|
858 |
pcacheExitMutex();
|
sl@0
|
859 |
}
|
sl@0
|
860 |
|
sl@0
|
861 |
/*
|
sl@0
|
862 |
** Change the page number of page p to newPgno. If newPgno is 0, then the
|
sl@0
|
863 |
** page object is added to the clean-list and the PGHDR_REUSE_UNLIKELY
|
sl@0
|
864 |
** flag set.
|
sl@0
|
865 |
*/
|
sl@0
|
866 |
void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){
|
sl@0
|
867 |
assert( p->nRef>0 );
|
sl@0
|
868 |
pcacheEnterMutex();
|
sl@0
|
869 |
pcacheRemoveFromHash(p);
|
sl@0
|
870 |
p->pgno = newPgno;
|
sl@0
|
871 |
if( newPgno==0 ){
|
sl@0
|
872 |
pcacheFree(p->apSave[0]);
|
sl@0
|
873 |
pcacheFree(p->apSave[1]);
|
sl@0
|
874 |
p->apSave[0] = 0;
|
sl@0
|
875 |
p->apSave[1] = 0;
|
sl@0
|
876 |
if( (p->flags & PGHDR_DIRTY) ){
|
sl@0
|
877 |
pcacheMakeClean(p);
|
sl@0
|
878 |
}
|
sl@0
|
879 |
p->flags = PGHDR_REUSE_UNLIKELY;
|
sl@0
|
880 |
}
|
sl@0
|
881 |
pcacheAddToHash(p);
|
sl@0
|
882 |
pcacheExitMutex();
|
sl@0
|
883 |
}
|
sl@0
|
884 |
|
sl@0
|
885 |
/*
|
sl@0
|
886 |
** Remove all content from a page cache
|
sl@0
|
887 |
*/
|
sl@0
|
888 |
static void pcacheClear(PCache *pCache){
|
sl@0
|
889 |
PgHdr *p, *pNext;
|
sl@0
|
890 |
assert( sqlite3_mutex_held(pcache_g.mutex) );
|
sl@0
|
891 |
for(p=pCache->pClean; p; p=pNext){
|
sl@0
|
892 |
pNext = p->pNext;
|
sl@0
|
893 |
pcacheRemoveFromLruList(p);
|
sl@0
|
894 |
pcachePageFree(p);
|
sl@0
|
895 |
}
|
sl@0
|
896 |
for(p=pCache->pDirty; p; p=pNext){
|
sl@0
|
897 |
pNext = p->pNext;
|
sl@0
|
898 |
pcachePageFree(p);
|
sl@0
|
899 |
}
|
sl@0
|
900 |
pCache->pClean = 0;
|
sl@0
|
901 |
pCache->pDirty = 0;
|
sl@0
|
902 |
pCache->pDirtyTail = 0;
|
sl@0
|
903 |
pCache->nPage = 0;
|
sl@0
|
904 |
pCache->nPinned = 0;
|
sl@0
|
905 |
memset(pCache->apHash, 0, pCache->nHash*sizeof(pCache->apHash[0]));
|
sl@0
|
906 |
}
|
sl@0
|
907 |
|
sl@0
|
908 |
|
sl@0
|
909 |
/*
|
sl@0
|
910 |
** Drop every cache entry whose page number is greater than "pgno".
|
sl@0
|
911 |
*/
|
sl@0
|
912 |
void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){
|
sl@0
|
913 |
PgHdr *p, *pNext;
|
sl@0
|
914 |
PgHdr *pDirty = pCache->pDirty;
|
sl@0
|
915 |
pcacheEnterMutex();
|
sl@0
|
916 |
for(p=pCache->pClean; p||pDirty; p=pNext){
|
sl@0
|
917 |
if( !p ){
|
sl@0
|
918 |
p = pDirty;
|
sl@0
|
919 |
pDirty = 0;
|
sl@0
|
920 |
}
|
sl@0
|
921 |
pNext = p->pNext;
|
sl@0
|
922 |
if( p->pgno>pgno ){
|
sl@0
|
923 |
if( p->nRef==0 ){
|
sl@0
|
924 |
pcacheRemoveFromHash(p);
|
sl@0
|
925 |
if( p->flags&PGHDR_DIRTY ){
|
sl@0
|
926 |
pcacheRemoveFromList(&pCache->pDirty, p);
|
sl@0
|
927 |
pCache->nPinned--;
|
sl@0
|
928 |
}else{
|
sl@0
|
929 |
pcacheRemoveFromList(&pCache->pClean, p);
|
sl@0
|
930 |
pcacheRemoveFromLruList(p);
|
sl@0
|
931 |
}
|
sl@0
|
932 |
pcachePageFree(p);
|
sl@0
|
933 |
}else{
|
sl@0
|
934 |
/* If there are references to the page, it cannot be freed. In this
|
sl@0
|
935 |
** case, zero the page content instead.
|
sl@0
|
936 |
*/
|
sl@0
|
937 |
memset(p->pData, 0, pCache->szPage);
|
sl@0
|
938 |
}
|
sl@0
|
939 |
}
|
sl@0
|
940 |
}
|
sl@0
|
941 |
pcacheExitMutex();
|
sl@0
|
942 |
}
|
sl@0
|
943 |
|
sl@0
|
944 |
/*
|
sl@0
|
945 |
** If there are currently more than pcache.nMaxPage pages allocated, try
|
sl@0
|
946 |
** to recycle pages to reduce the number allocated to pcache.nMaxPage.
|
sl@0
|
947 |
*/
|
sl@0
|
948 |
static void pcacheEnforceMaxPage(void){
|
sl@0
|
949 |
PgHdr *p;
|
sl@0
|
950 |
assert( sqlite3_mutex_held(pcache_g.mutex) );
|
sl@0
|
951 |
while( pcache_g.nCurrentPage>pcache_g.nMaxPage && (p = pcacheRecyclePage()) ){
|
sl@0
|
952 |
pcachePageFree(p);
|
sl@0
|
953 |
}
|
sl@0
|
954 |
}
|
sl@0
|
955 |
|
sl@0
|
956 |
/*
|
sl@0
|
957 |
** Close a cache.
|
sl@0
|
958 |
*/
|
sl@0
|
959 |
void sqlite3PcacheClose(PCache *pCache){
|
sl@0
|
960 |
pcacheEnterMutex();
|
sl@0
|
961 |
|
sl@0
|
962 |
/* Free all the pages used by this pager and remove them from the LRU list. */
|
sl@0
|
963 |
pcacheClear(pCache);
|
sl@0
|
964 |
if( pCache->bPurgeable ){
|
sl@0
|
965 |
pcache_g.nMaxPage -= pCache->nMax;
|
sl@0
|
966 |
pcache_g.nMinPage -= pCache->nMin;
|
sl@0
|
967 |
pcacheEnforceMaxPage();
|
sl@0
|
968 |
}
|
sl@0
|
969 |
sqlite3_free(pCache->apHash);
|
sl@0
|
970 |
pcacheExitMutex();
|
sl@0
|
971 |
}
|
sl@0
|
972 |
|
sl@0
|
973 |
/*
|
sl@0
|
974 |
** Preserve the content of the page. It is assumed that the content
|
sl@0
|
975 |
** has not been preserved already.
|
sl@0
|
976 |
**
|
sl@0
|
977 |
** If idJournal==0 then this is for the overall transaction.
|
sl@0
|
978 |
** If idJournal==1 then this is for the statement journal.
|
sl@0
|
979 |
**
|
sl@0
|
980 |
** This routine is used for in-memory databases only.
|
sl@0
|
981 |
**
|
sl@0
|
982 |
** Return SQLITE_OK or SQLITE_NOMEM if a memory allocation fails.
|
sl@0
|
983 |
*/
|
sl@0
|
984 |
int sqlite3PcachePreserve(PgHdr *p, int idJournal){
|
sl@0
|
985 |
void *x;
|
sl@0
|
986 |
int sz;
|
sl@0
|
987 |
assert( p->pCache->bPurgeable==0 );
|
sl@0
|
988 |
assert( p->apSave[idJournal]==0 );
|
sl@0
|
989 |
sz = p->pCache->szPage;
|
sl@0
|
990 |
p->apSave[idJournal] = x = sqlite3PageMalloc( sz );
|
sl@0
|
991 |
if( x==0 ) return SQLITE_NOMEM;
|
sl@0
|
992 |
memcpy(x, p->pData, sz);
|
sl@0
|
993 |
return SQLITE_OK;
|
sl@0
|
994 |
}
|
sl@0
|
995 |
|
sl@0
|
996 |
/*
|
sl@0
|
997 |
** Commit a change previously preserved.
|
sl@0
|
998 |
*/
|
sl@0
|
999 |
void sqlite3PcacheCommit(PCache *pCache, int idJournal){
|
sl@0
|
1000 |
PgHdr *p;
|
sl@0
|
1001 |
int mask = idJournal==0 ? ~PGHDR_IN_JOURNAL : 0xffffff;
|
sl@0
|
1002 |
pcacheEnterMutex(); /* Mutex is required to call pcacheFree() */
|
sl@0
|
1003 |
for(p=pCache->pDirty; p; p=p->pNext){
|
sl@0
|
1004 |
if( p->apSave[idJournal] ){
|
sl@0
|
1005 |
pcacheFree(p->apSave[idJournal]);
|
sl@0
|
1006 |
p->apSave[idJournal] = 0;
|
sl@0
|
1007 |
}
|
sl@0
|
1008 |
p->flags &= mask;
|
sl@0
|
1009 |
}
|
sl@0
|
1010 |
pcacheExitMutex();
|
sl@0
|
1011 |
}
|
sl@0
|
1012 |
|
sl@0
|
1013 |
/*
|
sl@0
|
1014 |
** Rollback a change previously preserved.
|
sl@0
|
1015 |
*/
|
sl@0
|
1016 |
void sqlite3PcacheRollback(
|
sl@0
|
1017 |
PCache *pCache, /* Pager cache */
|
sl@0
|
1018 |
int idJournal, /* Which copy to rollback to */
|
sl@0
|
1019 |
void (*xReiniter)(PgHdr*) /* Called on each rolled back page */
|
sl@0
|
1020 |
){
|
sl@0
|
1021 |
PgHdr *p;
|
sl@0
|
1022 |
int sz;
|
sl@0
|
1023 |
int mask = idJournal==0 ? ~PGHDR_IN_JOURNAL : 0xffffff;
|
sl@0
|
1024 |
pcacheEnterMutex(); /* Mutex is required to call pcacheFree() */
|
sl@0
|
1025 |
sz = pCache->szPage;
|
sl@0
|
1026 |
for(p=pCache->pDirty; p; p=p->pNext){
|
sl@0
|
1027 |
if( p->apSave[idJournal] ){
|
sl@0
|
1028 |
memcpy(p->pData, p->apSave[idJournal], sz);
|
sl@0
|
1029 |
pcacheFree(p->apSave[idJournal]);
|
sl@0
|
1030 |
p->apSave[idJournal] = 0;
|
sl@0
|
1031 |
if( xReiniter ){
|
sl@0
|
1032 |
xReiniter(p);
|
sl@0
|
1033 |
}
|
sl@0
|
1034 |
}
|
sl@0
|
1035 |
p->flags &= mask;
|
sl@0
|
1036 |
}
|
sl@0
|
1037 |
pcacheExitMutex();
|
sl@0
|
1038 |
}
|
sl@0
|
1039 |
|
sl@0
|
1040 |
#ifndef NDEBUG
|
sl@0
|
1041 |
/*
|
sl@0
|
1042 |
** Assert flags settings on all pages. Debugging only.
|
sl@0
|
1043 |
*/
|
sl@0
|
1044 |
void sqlite3PcacheAssertFlags(PCache *pCache, int trueMask, int falseMask){
|
sl@0
|
1045 |
PgHdr *p;
|
sl@0
|
1046 |
for(p=pCache->pDirty; p; p=p->pNext){
|
sl@0
|
1047 |
assert( (p->flags&trueMask)==trueMask );
|
sl@0
|
1048 |
assert( (p->flags&falseMask)==0 );
|
sl@0
|
1049 |
}
|
sl@0
|
1050 |
for(p=pCache->pClean; p; p=p->pNext){
|
sl@0
|
1051 |
assert( (p->flags&trueMask)==trueMask );
|
sl@0
|
1052 |
assert( (p->flags&falseMask)==0 );
|
sl@0
|
1053 |
}
|
sl@0
|
1054 |
}
|
sl@0
|
1055 |
#endif
|
sl@0
|
1056 |
|
sl@0
|
1057 |
/*
|
sl@0
|
1058 |
** Discard the contents of the cache.
|
sl@0
|
1059 |
*/
|
sl@0
|
1060 |
int sqlite3PcacheClear(PCache *pCache){
|
sl@0
|
1061 |
assert(pCache->nRef==0);
|
sl@0
|
1062 |
pcacheEnterMutex();
|
sl@0
|
1063 |
pcacheClear(pCache);
|
sl@0
|
1064 |
pcacheExitMutex();
|
sl@0
|
1065 |
return SQLITE_OK;
|
sl@0
|
1066 |
}
|
sl@0
|
1067 |
|
sl@0
|
1068 |
/*
|
sl@0
|
1069 |
** Merge two lists of pages connected by pDirty and in pgno order.
|
sl@0
|
1070 |
** Do not both fixing the pPrevDirty pointers.
|
sl@0
|
1071 |
*/
|
sl@0
|
1072 |
static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){
|
sl@0
|
1073 |
PgHdr result, *pTail;
|
sl@0
|
1074 |
pTail = &result;
|
sl@0
|
1075 |
while( pA && pB ){
|
sl@0
|
1076 |
if( pA->pgno<pB->pgno ){
|
sl@0
|
1077 |
pTail->pDirty = pA;
|
sl@0
|
1078 |
pTail = pA;
|
sl@0
|
1079 |
pA = pA->pDirty;
|
sl@0
|
1080 |
}else{
|
sl@0
|
1081 |
pTail->pDirty = pB;
|
sl@0
|
1082 |
pTail = pB;
|
sl@0
|
1083 |
pB = pB->pDirty;
|
sl@0
|
1084 |
}
|
sl@0
|
1085 |
}
|
sl@0
|
1086 |
if( pA ){
|
sl@0
|
1087 |
pTail->pDirty = pA;
|
sl@0
|
1088 |
}else if( pB ){
|
sl@0
|
1089 |
pTail->pDirty = pB;
|
sl@0
|
1090 |
}else{
|
sl@0
|
1091 |
pTail->pDirty = 0;
|
sl@0
|
1092 |
}
|
sl@0
|
1093 |
return result.pDirty;
|
sl@0
|
1094 |
}
|
sl@0
|
1095 |
|
sl@0
|
1096 |
/*
|
sl@0
|
1097 |
** Sort the list of pages in accending order by pgno. Pages are
|
sl@0
|
1098 |
** connected by pDirty pointers. The pPrevDirty pointers are
|
sl@0
|
1099 |
** corrupted by this sort.
|
sl@0
|
1100 |
*/
|
sl@0
|
1101 |
#define N_SORT_BUCKET_ALLOC 25
|
sl@0
|
1102 |
#define N_SORT_BUCKET 25
|
sl@0
|
1103 |
#ifdef SQLITE_TEST
|
sl@0
|
1104 |
int sqlite3_pager_n_sort_bucket = 0;
|
sl@0
|
1105 |
#undef N_SORT_BUCKET
|
sl@0
|
1106 |
#define N_SORT_BUCKET \
|
sl@0
|
1107 |
(sqlite3_pager_n_sort_bucket?sqlite3_pager_n_sort_bucket:N_SORT_BUCKET_ALLOC)
|
sl@0
|
1108 |
#endif
|
sl@0
|
1109 |
static PgHdr *pcacheSortDirtyList(PgHdr *pIn){
|
sl@0
|
1110 |
PgHdr *a[N_SORT_BUCKET_ALLOC], *p;
|
sl@0
|
1111 |
int i;
|
sl@0
|
1112 |
memset(a, 0, sizeof(a));
|
sl@0
|
1113 |
while( pIn ){
|
sl@0
|
1114 |
p = pIn;
|
sl@0
|
1115 |
pIn = p->pDirty;
|
sl@0
|
1116 |
p->pDirty = 0;
|
sl@0
|
1117 |
for(i=0; i<N_SORT_BUCKET-1; i++){
|
sl@0
|
1118 |
if( a[i]==0 ){
|
sl@0
|
1119 |
a[i] = p;
|
sl@0
|
1120 |
break;
|
sl@0
|
1121 |
}else{
|
sl@0
|
1122 |
p = pcacheMergeDirtyList(a[i], p);
|
sl@0
|
1123 |
a[i] = 0;
|
sl@0
|
1124 |
}
|
sl@0
|
1125 |
}
|
sl@0
|
1126 |
if( i==N_SORT_BUCKET-1 ){
|
sl@0
|
1127 |
/* Coverage: To get here, there need to be 2^(N_SORT_BUCKET)
|
sl@0
|
1128 |
** elements in the input list. This is possible, but impractical.
|
sl@0
|
1129 |
** Testing this line is the point of global variable
|
sl@0
|
1130 |
** sqlite3_pager_n_sort_bucket.
|
sl@0
|
1131 |
*/
|
sl@0
|
1132 |
a[i] = pcacheMergeDirtyList(a[i], p);
|
sl@0
|
1133 |
}
|
sl@0
|
1134 |
}
|
sl@0
|
1135 |
p = a[0];
|
sl@0
|
1136 |
for(i=1; i<N_SORT_BUCKET; i++){
|
sl@0
|
1137 |
p = pcacheMergeDirtyList(p, a[i]);
|
sl@0
|
1138 |
}
|
sl@0
|
1139 |
return p;
|
sl@0
|
1140 |
}
|
sl@0
|
1141 |
|
sl@0
|
1142 |
/*
|
sl@0
|
1143 |
** Return a list of all dirty pages in the cache, sorted by page number.
|
sl@0
|
1144 |
*/
|
sl@0
|
1145 |
PgHdr *sqlite3PcacheDirtyList(PCache *pCache){
|
sl@0
|
1146 |
PgHdr *p;
|
sl@0
|
1147 |
for(p=pCache->pDirty; p; p=p->pNext){
|
sl@0
|
1148 |
p->pDirty = p->pNext;
|
sl@0
|
1149 |
}
|
sl@0
|
1150 |
return pcacheSortDirtyList(pCache->pDirty);
|
sl@0
|
1151 |
}
|
sl@0
|
1152 |
|
sl@0
|
1153 |
/*
|
sl@0
|
1154 |
** Return the total number of outstanding page references.
|
sl@0
|
1155 |
*/
|
sl@0
|
1156 |
int sqlite3PcacheRefCount(PCache *pCache){
|
sl@0
|
1157 |
return pCache->nRef;
|
sl@0
|
1158 |
}
|
sl@0
|
1159 |
|
sl@0
|
1160 |
int sqlite3PcachePageRefcount(PgHdr *p){
|
sl@0
|
1161 |
return p->nRef;
|
sl@0
|
1162 |
}
|
sl@0
|
1163 |
|
sl@0
|
1164 |
/*
|
sl@0
|
1165 |
** Return the total number of pages in the cache.
|
sl@0
|
1166 |
*/
|
sl@0
|
1167 |
int sqlite3PcachePagecount(PCache *pCache){
|
sl@0
|
1168 |
assert( pCache->nPage>=0 );
|
sl@0
|
1169 |
return pCache->nPage;
|
sl@0
|
1170 |
}
|
sl@0
|
1171 |
|
sl@0
|
1172 |
#ifdef SQLITE_CHECK_PAGES
|
sl@0
|
1173 |
/*
|
sl@0
|
1174 |
** This function is used by the pager.c module to iterate through all
|
sl@0
|
1175 |
** pages in the cache. At present, this is only required if the
|
sl@0
|
1176 |
** SQLITE_CHECK_PAGES macro (used for debugging) is specified.
|
sl@0
|
1177 |
*/
|
sl@0
|
1178 |
void sqlite3PcacheIterate(PCache *pCache, void (*xIter)(PgHdr *)){
|
sl@0
|
1179 |
PgHdr *p;
|
sl@0
|
1180 |
for(p=pCache->pClean; p; p=p->pNext){
|
sl@0
|
1181 |
xIter(p);
|
sl@0
|
1182 |
}
|
sl@0
|
1183 |
for(p=pCache->pDirty; p; p=p->pNext){
|
sl@0
|
1184 |
xIter(p);
|
sl@0
|
1185 |
}
|
sl@0
|
1186 |
}
|
sl@0
|
1187 |
#endif
|
sl@0
|
1188 |
|
sl@0
|
1189 |
/*
|
sl@0
|
1190 |
** Set flags on all pages in the page cache
|
sl@0
|
1191 |
*/
|
sl@0
|
1192 |
void sqlite3PcacheClearFlags(PCache *pCache, int mask){
|
sl@0
|
1193 |
PgHdr *p;
|
sl@0
|
1194 |
|
sl@0
|
1195 |
/* Obtain the global mutex before modifying any PgHdr.flags variables
|
sl@0
|
1196 |
** or traversing the LRU list.
|
sl@0
|
1197 |
*/
|
sl@0
|
1198 |
pcacheEnterMutex();
|
sl@0
|
1199 |
|
sl@0
|
1200 |
mask = ~mask;
|
sl@0
|
1201 |
for(p=pCache->pDirty; p; p=p->pNext){
|
sl@0
|
1202 |
p->flags &= mask;
|
sl@0
|
1203 |
}
|
sl@0
|
1204 |
for(p=pCache->pClean; p; p=p->pNext){
|
sl@0
|
1205 |
p->flags &= mask;
|
sl@0
|
1206 |
}
|
sl@0
|
1207 |
|
sl@0
|
1208 |
if( 0==(mask&PGHDR_NEED_SYNC) ){
|
sl@0
|
1209 |
pCache->pSynced = pCache->pDirtyTail;
|
sl@0
|
1210 |
assert( !pCache->pSynced || (pCache->pSynced->flags&PGHDR_NEED_SYNC)==0 );
|
sl@0
|
1211 |
}
|
sl@0
|
1212 |
|
sl@0
|
1213 |
pcacheExitMutex();
|
sl@0
|
1214 |
}
|
sl@0
|
1215 |
|
sl@0
|
1216 |
/*
|
sl@0
|
1217 |
** Set the suggested cache-size value.
|
sl@0
|
1218 |
*/
|
sl@0
|
1219 |
int sqlite3PcacheGetCachesize(PCache *pCache){
|
sl@0
|
1220 |
return pCache->nMax;
|
sl@0
|
1221 |
}
|
sl@0
|
1222 |
|
sl@0
|
1223 |
/*
|
sl@0
|
1224 |
** Set the suggested cache-size value.
|
sl@0
|
1225 |
*/
|
sl@0
|
1226 |
void sqlite3PcacheSetCachesize(PCache *pCache, int mxPage){
|
sl@0
|
1227 |
if( mxPage<10 ){
|
sl@0
|
1228 |
mxPage = 10;
|
sl@0
|
1229 |
}
|
sl@0
|
1230 |
if( pCache->bPurgeable ){
|
sl@0
|
1231 |
pcacheEnterMutex();
|
sl@0
|
1232 |
pcache_g.nMaxPage -= pCache->nMax;
|
sl@0
|
1233 |
pcache_g.nMaxPage += mxPage;
|
sl@0
|
1234 |
pcacheEnforceMaxPage();
|
sl@0
|
1235 |
pcacheExitMutex();
|
sl@0
|
1236 |
}
|
sl@0
|
1237 |
pCache->nMax = mxPage;
|
sl@0
|
1238 |
}
|
sl@0
|
1239 |
|
sl@0
|
1240 |
#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
|
sl@0
|
1241 |
/*
|
sl@0
|
1242 |
** This function is called to free superfluous dynamically allocated memory
|
sl@0
|
1243 |
** held by the pager system. Memory in use by any SQLite pager allocated
|
sl@0
|
1244 |
** by the current thread may be sqlite3_free()ed.
|
sl@0
|
1245 |
**
|
sl@0
|
1246 |
** nReq is the number of bytes of memory required. Once this much has
|
sl@0
|
1247 |
** been released, the function returns. The return value is the total number
|
sl@0
|
1248 |
** of bytes of memory released.
|
sl@0
|
1249 |
*/
|
sl@0
|
1250 |
int sqlite3PcacheReleaseMemory(int nReq){
|
sl@0
|
1251 |
int nFree = 0;
|
sl@0
|
1252 |
if( pcache_g.pStart==0 ){
|
sl@0
|
1253 |
PgHdr *p;
|
sl@0
|
1254 |
pcacheEnterMutex();
|
sl@0
|
1255 |
while( (nReq<0 || nFree<nReq) && (p=pcacheRecyclePage()) ){
|
sl@0
|
1256 |
nFree += pcachePageSize(p);
|
sl@0
|
1257 |
pcachePageFree(p);
|
sl@0
|
1258 |
}
|
sl@0
|
1259 |
pcacheExitMutex();
|
sl@0
|
1260 |
}
|
sl@0
|
1261 |
return nFree;
|
sl@0
|
1262 |
}
|
sl@0
|
1263 |
#endif /* SQLITE_ENABLE_MEMORY_MANAGEMENT */
|
sl@0
|
1264 |
|
sl@0
|
1265 |
#ifdef SQLITE_TEST
|
sl@0
|
1266 |
void sqlite3PcacheStats(
|
sl@0
|
1267 |
int *pnCurrent,
|
sl@0
|
1268 |
int *pnMax,
|
sl@0
|
1269 |
int *pnMin,
|
sl@0
|
1270 |
int *pnRecyclable
|
sl@0
|
1271 |
){
|
sl@0
|
1272 |
PgHdr *p;
|
sl@0
|
1273 |
int nRecyclable = 0;
|
sl@0
|
1274 |
for(p=pcache_g.pLruHead; p; p=p->pNextLru){
|
sl@0
|
1275 |
nRecyclable++;
|
sl@0
|
1276 |
}
|
sl@0
|
1277 |
|
sl@0
|
1278 |
*pnCurrent = pcache_g.nCurrentPage;
|
sl@0
|
1279 |
*pnMax = pcache_g.nMaxPage;
|
sl@0
|
1280 |
*pnMin = pcache_g.nMinPage;
|
sl@0
|
1281 |
*pnRecyclable = nRecyclable;
|
sl@0
|
1282 |
}
|
sl@0
|
1283 |
#endif
|