sl@0
|
1 |
/*
|
sl@0
|
2 |
** 2007 October 14
|
sl@0
|
3 |
**
|
sl@0
|
4 |
** The author disclaims copyright to this source code. In place of
|
sl@0
|
5 |
** a legal notice, here is a blessing:
|
sl@0
|
6 |
**
|
sl@0
|
7 |
** May you do good and not evil.
|
sl@0
|
8 |
** May you find forgiveness for yourself and forgive others.
|
sl@0
|
9 |
** May you share freely, never taking more than you give.
|
sl@0
|
10 |
**
|
sl@0
|
11 |
*************************************************************************
|
sl@0
|
12 |
** This file contains the C functions that implement a memory
|
sl@0
|
13 |
** allocation subsystem for use by SQLite.
|
sl@0
|
14 |
**
|
sl@0
|
15 |
** This version of the memory allocation subsystem omits all
|
sl@0
|
16 |
** use of malloc(). The SQLite user supplies a block of memory
|
sl@0
|
17 |
** before calling sqlite3_initialize() from which allocations
|
sl@0
|
18 |
** are made and returned by the xMalloc() and xRealloc()
|
sl@0
|
19 |
** implementations. Once sqlite3_initialize() has been called,
|
sl@0
|
20 |
** the amount of memory available to SQLite is fixed and cannot
|
sl@0
|
21 |
** be changed.
|
sl@0
|
22 |
**
|
sl@0
|
23 |
** This version of the memory allocation subsystem is included
|
sl@0
|
24 |
** in the build only if SQLITE_ENABLE_MEMSYS3 is defined.
|
sl@0
|
25 |
**
|
sl@0
|
26 |
** $Id: mem3.c,v 1.20 2008/07/18 18:56:17 drh Exp $
|
sl@0
|
27 |
*/
|
sl@0
|
28 |
#include "sqliteInt.h"
|
sl@0
|
29 |
|
sl@0
|
30 |
/*
|
sl@0
|
31 |
** This version of the memory allocator is only built into the library
|
sl@0
|
32 |
** SQLITE_ENABLE_MEMSYS3 is defined. Defining this symbol does not
|
sl@0
|
33 |
** mean that the library will use a memory-pool by default, just that
|
sl@0
|
34 |
** it is available. The mempool allocator is activated by calling
|
sl@0
|
35 |
** sqlite3_config().
|
sl@0
|
36 |
*/
|
sl@0
|
37 |
#ifdef SQLITE_ENABLE_MEMSYS3
|
sl@0
|
38 |
|
sl@0
|
39 |
/*
|
sl@0
|
40 |
** Maximum size (in Mem3Blocks) of a "small" chunk.
|
sl@0
|
41 |
*/
|
sl@0
|
42 |
#define MX_SMALL 10
|
sl@0
|
43 |
|
sl@0
|
44 |
|
sl@0
|
45 |
/*
|
sl@0
|
46 |
** Number of freelist hash slots
|
sl@0
|
47 |
*/
|
sl@0
|
48 |
#define N_HASH 61
|
sl@0
|
49 |
|
sl@0
|
50 |
/*
|
sl@0
|
51 |
** A memory allocation (also called a "chunk") consists of two or
|
sl@0
|
52 |
** more blocks where each block is 8 bytes. The first 8 bytes are
|
sl@0
|
53 |
** a header that is not returned to the user.
|
sl@0
|
54 |
**
|
sl@0
|
55 |
** A chunk is two or more blocks that is either checked out or
|
sl@0
|
56 |
** free. The first block has format u.hdr. u.hdr.size4x is 4 times the
|
sl@0
|
57 |
** size of the allocation in blocks if the allocation is free.
|
sl@0
|
58 |
** The u.hdr.size4x&1 bit is true if the chunk is checked out and
|
sl@0
|
59 |
** false if the chunk is on the freelist. The u.hdr.size4x&2 bit
|
sl@0
|
60 |
** is true if the previous chunk is checked out and false if the
|
sl@0
|
61 |
** previous chunk is free. The u.hdr.prevSize field is the size of
|
sl@0
|
62 |
** the previous chunk in blocks if the previous chunk is on the
|
sl@0
|
63 |
** freelist. If the previous chunk is checked out, then
|
sl@0
|
64 |
** u.hdr.prevSize can be part of the data for that chunk and should
|
sl@0
|
65 |
** not be read or written.
|
sl@0
|
66 |
**
|
sl@0
|
67 |
** We often identify a chunk by its index in mem3.aPool[]. When
|
sl@0
|
68 |
** this is done, the chunk index refers to the second block of
|
sl@0
|
69 |
** the chunk. In this way, the first chunk has an index of 1.
|
sl@0
|
70 |
** A chunk index of 0 means "no such chunk" and is the equivalent
|
sl@0
|
71 |
** of a NULL pointer.
|
sl@0
|
72 |
**
|
sl@0
|
73 |
** The second block of free chunks is of the form u.list. The
|
sl@0
|
74 |
** two fields form a double-linked list of chunks of related sizes.
|
sl@0
|
75 |
** Pointers to the head of the list are stored in mem3.aiSmall[]
|
sl@0
|
76 |
** for smaller chunks and mem3.aiHash[] for larger chunks.
|
sl@0
|
77 |
**
|
sl@0
|
78 |
** The second block of a chunk is user data if the chunk is checked
|
sl@0
|
79 |
** out. If a chunk is checked out, the user data may extend into
|
sl@0
|
80 |
** the u.hdr.prevSize value of the following chunk.
|
sl@0
|
81 |
*/
|
sl@0
|
82 |
typedef struct Mem3Block Mem3Block;
|
sl@0
|
83 |
struct Mem3Block {
|
sl@0
|
84 |
union {
|
sl@0
|
85 |
struct {
|
sl@0
|
86 |
u32 prevSize; /* Size of previous chunk in Mem3Block elements */
|
sl@0
|
87 |
u32 size4x; /* 4x the size of current chunk in Mem3Block elements */
|
sl@0
|
88 |
} hdr;
|
sl@0
|
89 |
struct {
|
sl@0
|
90 |
u32 next; /* Index in mem3.aPool[] of next free chunk */
|
sl@0
|
91 |
u32 prev; /* Index in mem3.aPool[] of previous free chunk */
|
sl@0
|
92 |
} list;
|
sl@0
|
93 |
} u;
|
sl@0
|
94 |
};
|
sl@0
|
95 |
|
sl@0
|
96 |
/*
|
sl@0
|
97 |
** All of the static variables used by this module are collected
|
sl@0
|
98 |
** into a single structure named "mem3". This is to keep the
|
sl@0
|
99 |
** static variables organized and to reduce namespace pollution
|
sl@0
|
100 |
** when this module is combined with other in the amalgamation.
|
sl@0
|
101 |
*/
|
sl@0
|
102 |
static struct {
|
sl@0
|
103 |
/*
|
sl@0
|
104 |
** True if we are evaluating an out-of-memory callback.
|
sl@0
|
105 |
*/
|
sl@0
|
106 |
int alarmBusy;
|
sl@0
|
107 |
|
sl@0
|
108 |
/*
|
sl@0
|
109 |
** Mutex to control access to the memory allocation subsystem.
|
sl@0
|
110 |
*/
|
sl@0
|
111 |
sqlite3_mutex *mutex;
|
sl@0
|
112 |
|
sl@0
|
113 |
/*
|
sl@0
|
114 |
** The minimum amount of free space that we have seen.
|
sl@0
|
115 |
*/
|
sl@0
|
116 |
u32 mnMaster;
|
sl@0
|
117 |
|
sl@0
|
118 |
/*
|
sl@0
|
119 |
** iMaster is the index of the master chunk. Most new allocations
|
sl@0
|
120 |
** occur off of this chunk. szMaster is the size (in Mem3Blocks)
|
sl@0
|
121 |
** of the current master. iMaster is 0 if there is not master chunk.
|
sl@0
|
122 |
** The master chunk is not in either the aiHash[] or aiSmall[].
|
sl@0
|
123 |
*/
|
sl@0
|
124 |
u32 iMaster;
|
sl@0
|
125 |
u32 szMaster;
|
sl@0
|
126 |
|
sl@0
|
127 |
/*
|
sl@0
|
128 |
** Array of lists of free blocks according to the block size
|
sl@0
|
129 |
** for smaller chunks, or a hash on the block size for larger
|
sl@0
|
130 |
** chunks.
|
sl@0
|
131 |
*/
|
sl@0
|
132 |
u32 aiSmall[MX_SMALL-1]; /* For sizes 2 through MX_SMALL, inclusive */
|
sl@0
|
133 |
u32 aiHash[N_HASH]; /* For sizes MX_SMALL+1 and larger */
|
sl@0
|
134 |
|
sl@0
|
135 |
/*
|
sl@0
|
136 |
** Memory available for allocation. nPool is the size of the array
|
sl@0
|
137 |
** (in Mem3Blocks) pointed to by aPool less 2.
|
sl@0
|
138 |
*/
|
sl@0
|
139 |
u32 nPool;
|
sl@0
|
140 |
Mem3Block *aPool;
|
sl@0
|
141 |
} mem3;
|
sl@0
|
142 |
|
sl@0
|
143 |
/*
|
sl@0
|
144 |
** Unlink the chunk at mem3.aPool[i] from list it is currently
|
sl@0
|
145 |
** on. *pRoot is the list that i is a member of.
|
sl@0
|
146 |
*/
|
sl@0
|
147 |
static void memsys3UnlinkFromList(u32 i, u32 *pRoot){
|
sl@0
|
148 |
u32 next = mem3.aPool[i].u.list.next;
|
sl@0
|
149 |
u32 prev = mem3.aPool[i].u.list.prev;
|
sl@0
|
150 |
assert( sqlite3_mutex_held(mem3.mutex) );
|
sl@0
|
151 |
if( prev==0 ){
|
sl@0
|
152 |
*pRoot = next;
|
sl@0
|
153 |
}else{
|
sl@0
|
154 |
mem3.aPool[prev].u.list.next = next;
|
sl@0
|
155 |
}
|
sl@0
|
156 |
if( next ){
|
sl@0
|
157 |
mem3.aPool[next].u.list.prev = prev;
|
sl@0
|
158 |
}
|
sl@0
|
159 |
mem3.aPool[i].u.list.next = 0;
|
sl@0
|
160 |
mem3.aPool[i].u.list.prev = 0;
|
sl@0
|
161 |
}
|
sl@0
|
162 |
|
sl@0
|
163 |
/*
|
sl@0
|
164 |
** Unlink the chunk at index i from
|
sl@0
|
165 |
** whatever list is currently a member of.
|
sl@0
|
166 |
*/
|
sl@0
|
167 |
static void memsys3Unlink(u32 i){
|
sl@0
|
168 |
u32 size, hash;
|
sl@0
|
169 |
assert( sqlite3_mutex_held(mem3.mutex) );
|
sl@0
|
170 |
assert( (mem3.aPool[i-1].u.hdr.size4x & 1)==0 );
|
sl@0
|
171 |
assert( i>=1 );
|
sl@0
|
172 |
size = mem3.aPool[i-1].u.hdr.size4x/4;
|
sl@0
|
173 |
assert( size==mem3.aPool[i+size-1].u.hdr.prevSize );
|
sl@0
|
174 |
assert( size>=2 );
|
sl@0
|
175 |
if( size <= MX_SMALL ){
|
sl@0
|
176 |
memsys3UnlinkFromList(i, &mem3.aiSmall[size-2]);
|
sl@0
|
177 |
}else{
|
sl@0
|
178 |
hash = size % N_HASH;
|
sl@0
|
179 |
memsys3UnlinkFromList(i, &mem3.aiHash[hash]);
|
sl@0
|
180 |
}
|
sl@0
|
181 |
}
|
sl@0
|
182 |
|
sl@0
|
183 |
/*
|
sl@0
|
184 |
** Link the chunk at mem3.aPool[i] so that is on the list rooted
|
sl@0
|
185 |
** at *pRoot.
|
sl@0
|
186 |
*/
|
sl@0
|
187 |
static void memsys3LinkIntoList(u32 i, u32 *pRoot){
|
sl@0
|
188 |
assert( sqlite3_mutex_held(mem3.mutex) );
|
sl@0
|
189 |
mem3.aPool[i].u.list.next = *pRoot;
|
sl@0
|
190 |
mem3.aPool[i].u.list.prev = 0;
|
sl@0
|
191 |
if( *pRoot ){
|
sl@0
|
192 |
mem3.aPool[*pRoot].u.list.prev = i;
|
sl@0
|
193 |
}
|
sl@0
|
194 |
*pRoot = i;
|
sl@0
|
195 |
}
|
sl@0
|
196 |
|
sl@0
|
197 |
/*
|
sl@0
|
198 |
** Link the chunk at index i into either the appropriate
|
sl@0
|
199 |
** small chunk list, or into the large chunk hash table.
|
sl@0
|
200 |
*/
|
sl@0
|
201 |
static void memsys3Link(u32 i){
|
sl@0
|
202 |
u32 size, hash;
|
sl@0
|
203 |
assert( sqlite3_mutex_held(mem3.mutex) );
|
sl@0
|
204 |
assert( i>=1 );
|
sl@0
|
205 |
assert( (mem3.aPool[i-1].u.hdr.size4x & 1)==0 );
|
sl@0
|
206 |
size = mem3.aPool[i-1].u.hdr.size4x/4;
|
sl@0
|
207 |
assert( size==mem3.aPool[i+size-1].u.hdr.prevSize );
|
sl@0
|
208 |
assert( size>=2 );
|
sl@0
|
209 |
if( size <= MX_SMALL ){
|
sl@0
|
210 |
memsys3LinkIntoList(i, &mem3.aiSmall[size-2]);
|
sl@0
|
211 |
}else{
|
sl@0
|
212 |
hash = size % N_HASH;
|
sl@0
|
213 |
memsys3LinkIntoList(i, &mem3.aiHash[hash]);
|
sl@0
|
214 |
}
|
sl@0
|
215 |
}
|
sl@0
|
216 |
|
sl@0
|
217 |
/*
|
sl@0
|
218 |
** If the STATIC_MEM mutex is not already held, obtain it now. The mutex
|
sl@0
|
219 |
** will already be held (obtained by code in malloc.c) if
|
sl@0
|
220 |
** sqlite3Config.bMemStat is true.
|
sl@0
|
221 |
*/
|
sl@0
|
222 |
static void memsys3Enter(void){
|
sl@0
|
223 |
if( sqlite3Config.bMemstat==0 && mem3.mutex==0 ){
|
sl@0
|
224 |
mem3.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM);
|
sl@0
|
225 |
}
|
sl@0
|
226 |
sqlite3_mutex_enter(mem3.mutex);
|
sl@0
|
227 |
}
|
sl@0
|
228 |
static void memsys3Leave(void){
|
sl@0
|
229 |
sqlite3_mutex_leave(mem3.mutex);
|
sl@0
|
230 |
}
|
sl@0
|
231 |
|
sl@0
|
232 |
/*
|
sl@0
|
233 |
** Called when we are unable to satisfy an allocation of nBytes.
|
sl@0
|
234 |
*/
|
sl@0
|
235 |
static void memsys3OutOfMemory(int nByte){
|
sl@0
|
236 |
if( !mem3.alarmBusy ){
|
sl@0
|
237 |
mem3.alarmBusy = 1;
|
sl@0
|
238 |
assert( sqlite3_mutex_held(mem3.mutex) );
|
sl@0
|
239 |
sqlite3_mutex_leave(mem3.mutex);
|
sl@0
|
240 |
sqlite3_release_memory(nByte);
|
sl@0
|
241 |
sqlite3_mutex_enter(mem3.mutex);
|
sl@0
|
242 |
mem3.alarmBusy = 0;
|
sl@0
|
243 |
}
|
sl@0
|
244 |
}
|
sl@0
|
245 |
|
sl@0
|
246 |
|
sl@0
|
247 |
/*
|
sl@0
|
248 |
** Chunk i is a free chunk that has been unlinked. Adjust its
|
sl@0
|
249 |
** size parameters for check-out and return a pointer to the
|
sl@0
|
250 |
** user portion of the chunk.
|
sl@0
|
251 |
*/
|
sl@0
|
252 |
static void *memsys3Checkout(u32 i, int nBlock){
|
sl@0
|
253 |
u32 x;
|
sl@0
|
254 |
assert( sqlite3_mutex_held(mem3.mutex) );
|
sl@0
|
255 |
assert( i>=1 );
|
sl@0
|
256 |
assert( mem3.aPool[i-1].u.hdr.size4x/4==nBlock );
|
sl@0
|
257 |
assert( mem3.aPool[i+nBlock-1].u.hdr.prevSize==nBlock );
|
sl@0
|
258 |
x = mem3.aPool[i-1].u.hdr.size4x;
|
sl@0
|
259 |
mem3.aPool[i-1].u.hdr.size4x = nBlock*4 | 1 | (x&2);
|
sl@0
|
260 |
mem3.aPool[i+nBlock-1].u.hdr.prevSize = nBlock;
|
sl@0
|
261 |
mem3.aPool[i+nBlock-1].u.hdr.size4x |= 2;
|
sl@0
|
262 |
return &mem3.aPool[i];
|
sl@0
|
263 |
}
|
sl@0
|
264 |
|
sl@0
|
265 |
/*
|
sl@0
|
266 |
** Carve a piece off of the end of the mem3.iMaster free chunk.
|
sl@0
|
267 |
** Return a pointer to the new allocation. Or, if the master chunk
|
sl@0
|
268 |
** is not large enough, return 0.
|
sl@0
|
269 |
*/
|
sl@0
|
270 |
static void *memsys3FromMaster(int nBlock){
|
sl@0
|
271 |
assert( sqlite3_mutex_held(mem3.mutex) );
|
sl@0
|
272 |
assert( mem3.szMaster>=nBlock );
|
sl@0
|
273 |
if( nBlock>=mem3.szMaster-1 ){
|
sl@0
|
274 |
/* Use the entire master */
|
sl@0
|
275 |
void *p = memsys3Checkout(mem3.iMaster, mem3.szMaster);
|
sl@0
|
276 |
mem3.iMaster = 0;
|
sl@0
|
277 |
mem3.szMaster = 0;
|
sl@0
|
278 |
mem3.mnMaster = 0;
|
sl@0
|
279 |
return p;
|
sl@0
|
280 |
}else{
|
sl@0
|
281 |
/* Split the master block. Return the tail. */
|
sl@0
|
282 |
u32 newi, x;
|
sl@0
|
283 |
newi = mem3.iMaster + mem3.szMaster - nBlock;
|
sl@0
|
284 |
assert( newi > mem3.iMaster+1 );
|
sl@0
|
285 |
mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = nBlock;
|
sl@0
|
286 |
mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x |= 2;
|
sl@0
|
287 |
mem3.aPool[newi-1].u.hdr.size4x = nBlock*4 + 1;
|
sl@0
|
288 |
mem3.szMaster -= nBlock;
|
sl@0
|
289 |
mem3.aPool[newi-1].u.hdr.prevSize = mem3.szMaster;
|
sl@0
|
290 |
x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2;
|
sl@0
|
291 |
mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x;
|
sl@0
|
292 |
if( mem3.szMaster < mem3.mnMaster ){
|
sl@0
|
293 |
mem3.mnMaster = mem3.szMaster;
|
sl@0
|
294 |
}
|
sl@0
|
295 |
return (void*)&mem3.aPool[newi];
|
sl@0
|
296 |
}
|
sl@0
|
297 |
}
|
sl@0
|
298 |
|
sl@0
|
299 |
/*
|
sl@0
|
300 |
** *pRoot is the head of a list of free chunks of the same size
|
sl@0
|
301 |
** or same size hash. In other words, *pRoot is an entry in either
|
sl@0
|
302 |
** mem3.aiSmall[] or mem3.aiHash[].
|
sl@0
|
303 |
**
|
sl@0
|
304 |
** This routine examines all entries on the given list and tries
|
sl@0
|
305 |
** to coalesce each entries with adjacent free chunks.
|
sl@0
|
306 |
**
|
sl@0
|
307 |
** If it sees a chunk that is larger than mem3.iMaster, it replaces
|
sl@0
|
308 |
** the current mem3.iMaster with the new larger chunk. In order for
|
sl@0
|
309 |
** this mem3.iMaster replacement to work, the master chunk must be
|
sl@0
|
310 |
** linked into the hash tables. That is not the normal state of
|
sl@0
|
311 |
** affairs, of course. The calling routine must link the master
|
sl@0
|
312 |
** chunk before invoking this routine, then must unlink the (possibly
|
sl@0
|
313 |
** changed) master chunk once this routine has finished.
|
sl@0
|
314 |
*/
|
sl@0
|
315 |
static void memsys3Merge(u32 *pRoot){
|
sl@0
|
316 |
u32 iNext, prev, size, i, x;
|
sl@0
|
317 |
|
sl@0
|
318 |
assert( sqlite3_mutex_held(mem3.mutex) );
|
sl@0
|
319 |
for(i=*pRoot; i>0; i=iNext){
|
sl@0
|
320 |
iNext = mem3.aPool[i].u.list.next;
|
sl@0
|
321 |
size = mem3.aPool[i-1].u.hdr.size4x;
|
sl@0
|
322 |
assert( (size&1)==0 );
|
sl@0
|
323 |
if( (size&2)==0 ){
|
sl@0
|
324 |
memsys3UnlinkFromList(i, pRoot);
|
sl@0
|
325 |
assert( i > mem3.aPool[i-1].u.hdr.prevSize );
|
sl@0
|
326 |
prev = i - mem3.aPool[i-1].u.hdr.prevSize;
|
sl@0
|
327 |
if( prev==iNext ){
|
sl@0
|
328 |
iNext = mem3.aPool[prev].u.list.next;
|
sl@0
|
329 |
}
|
sl@0
|
330 |
memsys3Unlink(prev);
|
sl@0
|
331 |
size = i + size/4 - prev;
|
sl@0
|
332 |
x = mem3.aPool[prev-1].u.hdr.size4x & 2;
|
sl@0
|
333 |
mem3.aPool[prev-1].u.hdr.size4x = size*4 | x;
|
sl@0
|
334 |
mem3.aPool[prev+size-1].u.hdr.prevSize = size;
|
sl@0
|
335 |
memsys3Link(prev);
|
sl@0
|
336 |
i = prev;
|
sl@0
|
337 |
}else{
|
sl@0
|
338 |
size /= 4;
|
sl@0
|
339 |
}
|
sl@0
|
340 |
if( size>mem3.szMaster ){
|
sl@0
|
341 |
mem3.iMaster = i;
|
sl@0
|
342 |
mem3.szMaster = size;
|
sl@0
|
343 |
}
|
sl@0
|
344 |
}
|
sl@0
|
345 |
}
|
sl@0
|
346 |
|
sl@0
|
347 |
/*
|
sl@0
|
348 |
** Return a block of memory of at least nBytes in size.
|
sl@0
|
349 |
** Return NULL if unable.
|
sl@0
|
350 |
**
|
sl@0
|
351 |
** This function assumes that the necessary mutexes, if any, are
|
sl@0
|
352 |
** already held by the caller. Hence "Unsafe".
|
sl@0
|
353 |
*/
|
sl@0
|
354 |
static void *memsys3MallocUnsafe(int nByte){
|
sl@0
|
355 |
u32 i;
|
sl@0
|
356 |
int nBlock;
|
sl@0
|
357 |
int toFree;
|
sl@0
|
358 |
|
sl@0
|
359 |
assert( sqlite3_mutex_held(mem3.mutex) );
|
sl@0
|
360 |
assert( sizeof(Mem3Block)==8 );
|
sl@0
|
361 |
if( nByte<=12 ){
|
sl@0
|
362 |
nBlock = 2;
|
sl@0
|
363 |
}else{
|
sl@0
|
364 |
nBlock = (nByte + 11)/8;
|
sl@0
|
365 |
}
|
sl@0
|
366 |
assert( nBlock>=2 );
|
sl@0
|
367 |
|
sl@0
|
368 |
/* STEP 1:
|
sl@0
|
369 |
** Look for an entry of the correct size in either the small
|
sl@0
|
370 |
** chunk table or in the large chunk hash table. This is
|
sl@0
|
371 |
** successful most of the time (about 9 times out of 10).
|
sl@0
|
372 |
*/
|
sl@0
|
373 |
if( nBlock <= MX_SMALL ){
|
sl@0
|
374 |
i = mem3.aiSmall[nBlock-2];
|
sl@0
|
375 |
if( i>0 ){
|
sl@0
|
376 |
memsys3UnlinkFromList(i, &mem3.aiSmall[nBlock-2]);
|
sl@0
|
377 |
return memsys3Checkout(i, nBlock);
|
sl@0
|
378 |
}
|
sl@0
|
379 |
}else{
|
sl@0
|
380 |
int hash = nBlock % N_HASH;
|
sl@0
|
381 |
for(i=mem3.aiHash[hash]; i>0; i=mem3.aPool[i].u.list.next){
|
sl@0
|
382 |
if( mem3.aPool[i-1].u.hdr.size4x/4==nBlock ){
|
sl@0
|
383 |
memsys3UnlinkFromList(i, &mem3.aiHash[hash]);
|
sl@0
|
384 |
return memsys3Checkout(i, nBlock);
|
sl@0
|
385 |
}
|
sl@0
|
386 |
}
|
sl@0
|
387 |
}
|
sl@0
|
388 |
|
sl@0
|
389 |
/* STEP 2:
|
sl@0
|
390 |
** Try to satisfy the allocation by carving a piece off of the end
|
sl@0
|
391 |
** of the master chunk. This step usually works if step 1 fails.
|
sl@0
|
392 |
*/
|
sl@0
|
393 |
if( mem3.szMaster>=nBlock ){
|
sl@0
|
394 |
return memsys3FromMaster(nBlock);
|
sl@0
|
395 |
}
|
sl@0
|
396 |
|
sl@0
|
397 |
|
sl@0
|
398 |
/* STEP 3:
|
sl@0
|
399 |
** Loop through the entire memory pool. Coalesce adjacent free
|
sl@0
|
400 |
** chunks. Recompute the master chunk as the largest free chunk.
|
sl@0
|
401 |
** Then try again to satisfy the allocation by carving a piece off
|
sl@0
|
402 |
** of the end of the master chunk. This step happens very
|
sl@0
|
403 |
** rarely (we hope!)
|
sl@0
|
404 |
*/
|
sl@0
|
405 |
for(toFree=nBlock*16; toFree<(mem3.nPool*16); toFree *= 2){
|
sl@0
|
406 |
memsys3OutOfMemory(toFree);
|
sl@0
|
407 |
if( mem3.iMaster ){
|
sl@0
|
408 |
memsys3Link(mem3.iMaster);
|
sl@0
|
409 |
mem3.iMaster = 0;
|
sl@0
|
410 |
mem3.szMaster = 0;
|
sl@0
|
411 |
}
|
sl@0
|
412 |
for(i=0; i<N_HASH; i++){
|
sl@0
|
413 |
memsys3Merge(&mem3.aiHash[i]);
|
sl@0
|
414 |
}
|
sl@0
|
415 |
for(i=0; i<MX_SMALL-1; i++){
|
sl@0
|
416 |
memsys3Merge(&mem3.aiSmall[i]);
|
sl@0
|
417 |
}
|
sl@0
|
418 |
if( mem3.szMaster ){
|
sl@0
|
419 |
memsys3Unlink(mem3.iMaster);
|
sl@0
|
420 |
if( mem3.szMaster>=nBlock ){
|
sl@0
|
421 |
return memsys3FromMaster(nBlock);
|
sl@0
|
422 |
}
|
sl@0
|
423 |
}
|
sl@0
|
424 |
}
|
sl@0
|
425 |
|
sl@0
|
426 |
/* If none of the above worked, then we fail. */
|
sl@0
|
427 |
return 0;
|
sl@0
|
428 |
}
|
sl@0
|
429 |
|
sl@0
|
430 |
/*
|
sl@0
|
431 |
** Free an outstanding memory allocation.
|
sl@0
|
432 |
**
|
sl@0
|
433 |
** This function assumes that the necessary mutexes, if any, are
|
sl@0
|
434 |
** already held by the caller. Hence "Unsafe".
|
sl@0
|
435 |
*/
|
sl@0
|
436 |
void memsys3FreeUnsafe(void *pOld){
|
sl@0
|
437 |
Mem3Block *p = (Mem3Block*)pOld;
|
sl@0
|
438 |
int i;
|
sl@0
|
439 |
u32 size, x;
|
sl@0
|
440 |
assert( sqlite3_mutex_held(mem3.mutex) );
|
sl@0
|
441 |
assert( p>mem3.aPool && p<&mem3.aPool[mem3.nPool] );
|
sl@0
|
442 |
i = p - mem3.aPool;
|
sl@0
|
443 |
assert( (mem3.aPool[i-1].u.hdr.size4x&1)==1 );
|
sl@0
|
444 |
size = mem3.aPool[i-1].u.hdr.size4x/4;
|
sl@0
|
445 |
assert( i+size<=mem3.nPool+1 );
|
sl@0
|
446 |
mem3.aPool[i-1].u.hdr.size4x &= ~1;
|
sl@0
|
447 |
mem3.aPool[i+size-1].u.hdr.prevSize = size;
|
sl@0
|
448 |
mem3.aPool[i+size-1].u.hdr.size4x &= ~2;
|
sl@0
|
449 |
memsys3Link(i);
|
sl@0
|
450 |
|
sl@0
|
451 |
/* Try to expand the master using the newly freed chunk */
|
sl@0
|
452 |
if( mem3.iMaster ){
|
sl@0
|
453 |
while( (mem3.aPool[mem3.iMaster-1].u.hdr.size4x&2)==0 ){
|
sl@0
|
454 |
size = mem3.aPool[mem3.iMaster-1].u.hdr.prevSize;
|
sl@0
|
455 |
mem3.iMaster -= size;
|
sl@0
|
456 |
mem3.szMaster += size;
|
sl@0
|
457 |
memsys3Unlink(mem3.iMaster);
|
sl@0
|
458 |
x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2;
|
sl@0
|
459 |
mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x;
|
sl@0
|
460 |
mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = mem3.szMaster;
|
sl@0
|
461 |
}
|
sl@0
|
462 |
x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2;
|
sl@0
|
463 |
while( (mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x&1)==0 ){
|
sl@0
|
464 |
memsys3Unlink(mem3.iMaster+mem3.szMaster);
|
sl@0
|
465 |
mem3.szMaster += mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x/4;
|
sl@0
|
466 |
mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x;
|
sl@0
|
467 |
mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = mem3.szMaster;
|
sl@0
|
468 |
}
|
sl@0
|
469 |
}
|
sl@0
|
470 |
}
|
sl@0
|
471 |
|
sl@0
|
472 |
/*
|
sl@0
|
473 |
** Return the size of an outstanding allocation, in bytes. The
|
sl@0
|
474 |
** size returned omits the 8-byte header overhead. This only
|
sl@0
|
475 |
** works for chunks that are currently checked out.
|
sl@0
|
476 |
*/
|
sl@0
|
477 |
static int memsys3Size(void *p){
|
sl@0
|
478 |
Mem3Block *pBlock;
|
sl@0
|
479 |
if( p==0 ) return 0;
|
sl@0
|
480 |
pBlock = (Mem3Block*)p;
|
sl@0
|
481 |
assert( (pBlock[-1].u.hdr.size4x&1)!=0 );
|
sl@0
|
482 |
return (pBlock[-1].u.hdr.size4x&~3)*2 - 4;
|
sl@0
|
483 |
}
|
sl@0
|
484 |
|
sl@0
|
485 |
/*
|
sl@0
|
486 |
** Round up a request size to the next valid allocation size.
|
sl@0
|
487 |
*/
|
sl@0
|
488 |
static int memsys3Roundup(int n){
|
sl@0
|
489 |
if( n<=12 ){
|
sl@0
|
490 |
return 12;
|
sl@0
|
491 |
}else{
|
sl@0
|
492 |
return ((n+11)&~7) - 4;
|
sl@0
|
493 |
}
|
sl@0
|
494 |
}
|
sl@0
|
495 |
|
sl@0
|
496 |
/*
|
sl@0
|
497 |
** Allocate nBytes of memory.
|
sl@0
|
498 |
*/
|
sl@0
|
499 |
static void *memsys3Malloc(int nBytes){
|
sl@0
|
500 |
sqlite3_int64 *p;
|
sl@0
|
501 |
assert( nBytes>0 ); /* malloc.c filters out 0 byte requests */
|
sl@0
|
502 |
memsys3Enter();
|
sl@0
|
503 |
p = memsys3MallocUnsafe(nBytes);
|
sl@0
|
504 |
memsys3Leave();
|
sl@0
|
505 |
return (void*)p;
|
sl@0
|
506 |
}
|
sl@0
|
507 |
|
sl@0
|
508 |
/*
|
sl@0
|
509 |
** Free memory.
|
sl@0
|
510 |
*/
|
sl@0
|
511 |
void memsys3Free(void *pPrior){
|
sl@0
|
512 |
assert( pPrior );
|
sl@0
|
513 |
memsys3Enter();
|
sl@0
|
514 |
memsys3FreeUnsafe(pPrior);
|
sl@0
|
515 |
memsys3Leave();
|
sl@0
|
516 |
}
|
sl@0
|
517 |
|
sl@0
|
518 |
/*
|
sl@0
|
519 |
** Change the size of an existing memory allocation
|
sl@0
|
520 |
*/
|
sl@0
|
521 |
void *memsys3Realloc(void *pPrior, int nBytes){
|
sl@0
|
522 |
int nOld;
|
sl@0
|
523 |
void *p;
|
sl@0
|
524 |
if( pPrior==0 ){
|
sl@0
|
525 |
return sqlite3_malloc(nBytes);
|
sl@0
|
526 |
}
|
sl@0
|
527 |
if( nBytes<=0 ){
|
sl@0
|
528 |
sqlite3_free(pPrior);
|
sl@0
|
529 |
return 0;
|
sl@0
|
530 |
}
|
sl@0
|
531 |
nOld = memsys3Size(pPrior);
|
sl@0
|
532 |
if( nBytes<=nOld && nBytes>=nOld-128 ){
|
sl@0
|
533 |
return pPrior;
|
sl@0
|
534 |
}
|
sl@0
|
535 |
memsys3Enter();
|
sl@0
|
536 |
p = memsys3MallocUnsafe(nBytes);
|
sl@0
|
537 |
if( p ){
|
sl@0
|
538 |
if( nOld<nBytes ){
|
sl@0
|
539 |
memcpy(p, pPrior, nOld);
|
sl@0
|
540 |
}else{
|
sl@0
|
541 |
memcpy(p, pPrior, nBytes);
|
sl@0
|
542 |
}
|
sl@0
|
543 |
memsys3FreeUnsafe(pPrior);
|
sl@0
|
544 |
}
|
sl@0
|
545 |
memsys3Leave();
|
sl@0
|
546 |
return p;
|
sl@0
|
547 |
}
|
sl@0
|
548 |
|
sl@0
|
549 |
/*
|
sl@0
|
550 |
** Initialize this module.
|
sl@0
|
551 |
*/
|
sl@0
|
552 |
static int memsys3Init(void *NotUsed){
|
sl@0
|
553 |
if( !sqlite3Config.pHeap ){
|
sl@0
|
554 |
return SQLITE_ERROR;
|
sl@0
|
555 |
}
|
sl@0
|
556 |
|
sl@0
|
557 |
/* Store a pointer to the memory block in global structure mem3. */
|
sl@0
|
558 |
assert( sizeof(Mem3Block)==8 );
|
sl@0
|
559 |
mem3.aPool = (Mem3Block *)sqlite3Config.pHeap;
|
sl@0
|
560 |
mem3.nPool = (sqlite3Config.nHeap / sizeof(Mem3Block)) - 2;
|
sl@0
|
561 |
|
sl@0
|
562 |
/* Initialize the master block. */
|
sl@0
|
563 |
mem3.szMaster = mem3.nPool;
|
sl@0
|
564 |
mem3.mnMaster = mem3.szMaster;
|
sl@0
|
565 |
mem3.iMaster = 1;
|
sl@0
|
566 |
mem3.aPool[0].u.hdr.size4x = (mem3.szMaster<<2) + 2;
|
sl@0
|
567 |
mem3.aPool[mem3.nPool].u.hdr.prevSize = mem3.nPool;
|
sl@0
|
568 |
mem3.aPool[mem3.nPool].u.hdr.size4x = 1;
|
sl@0
|
569 |
|
sl@0
|
570 |
return SQLITE_OK;
|
sl@0
|
571 |
}
|
sl@0
|
572 |
|
sl@0
|
573 |
/*
|
sl@0
|
574 |
** Deinitialize this module.
|
sl@0
|
575 |
*/
|
sl@0
|
576 |
static void memsys3Shutdown(void *NotUsed){
|
sl@0
|
577 |
return;
|
sl@0
|
578 |
}
|
sl@0
|
579 |
|
sl@0
|
580 |
|
sl@0
|
581 |
|
sl@0
|
582 |
/*
|
sl@0
|
583 |
** Open the file indicated and write a log of all unfreed memory
|
sl@0
|
584 |
** allocations into that log.
|
sl@0
|
585 |
*/
|
sl@0
|
586 |
#ifdef SQLITE_DEBUG
|
sl@0
|
587 |
void sqlite3Memsys3Dump(const char *zFilename){
|
sl@0
|
588 |
FILE *out;
|
sl@0
|
589 |
int i, j;
|
sl@0
|
590 |
u32 size;
|
sl@0
|
591 |
if( zFilename==0 || zFilename[0]==0 ){
|
sl@0
|
592 |
out = stdout;
|
sl@0
|
593 |
}else{
|
sl@0
|
594 |
out = fopen(zFilename, "w");
|
sl@0
|
595 |
if( out==0 ){
|
sl@0
|
596 |
fprintf(stderr, "** Unable to output memory debug output log: %s **\n",
|
sl@0
|
597 |
zFilename);
|
sl@0
|
598 |
return;
|
sl@0
|
599 |
}
|
sl@0
|
600 |
}
|
sl@0
|
601 |
memsys3Enter();
|
sl@0
|
602 |
fprintf(out, "CHUNKS:\n");
|
sl@0
|
603 |
for(i=1; i<=mem3.nPool; i+=size/4){
|
sl@0
|
604 |
size = mem3.aPool[i-1].u.hdr.size4x;
|
sl@0
|
605 |
if( size/4<=1 ){
|
sl@0
|
606 |
fprintf(out, "%p size error\n", &mem3.aPool[i]);
|
sl@0
|
607 |
assert( 0 );
|
sl@0
|
608 |
break;
|
sl@0
|
609 |
}
|
sl@0
|
610 |
if( (size&1)==0 && mem3.aPool[i+size/4-1].u.hdr.prevSize!=size/4 ){
|
sl@0
|
611 |
fprintf(out, "%p tail size does not match\n", &mem3.aPool[i]);
|
sl@0
|
612 |
assert( 0 );
|
sl@0
|
613 |
break;
|
sl@0
|
614 |
}
|
sl@0
|
615 |
if( ((mem3.aPool[i+size/4-1].u.hdr.size4x&2)>>1)!=(size&1) ){
|
sl@0
|
616 |
fprintf(out, "%p tail checkout bit is incorrect\n", &mem3.aPool[i]);
|
sl@0
|
617 |
assert( 0 );
|
sl@0
|
618 |
break;
|
sl@0
|
619 |
}
|
sl@0
|
620 |
if( size&1 ){
|
sl@0
|
621 |
fprintf(out, "%p %6d bytes checked out\n", &mem3.aPool[i], (size/4)*8-8);
|
sl@0
|
622 |
}else{
|
sl@0
|
623 |
fprintf(out, "%p %6d bytes free%s\n", &mem3.aPool[i], (size/4)*8-8,
|
sl@0
|
624 |
i==mem3.iMaster ? " **master**" : "");
|
sl@0
|
625 |
}
|
sl@0
|
626 |
}
|
sl@0
|
627 |
for(i=0; i<MX_SMALL-1; i++){
|
sl@0
|
628 |
if( mem3.aiSmall[i]==0 ) continue;
|
sl@0
|
629 |
fprintf(out, "small(%2d):", i);
|
sl@0
|
630 |
for(j = mem3.aiSmall[i]; j>0; j=mem3.aPool[j].u.list.next){
|
sl@0
|
631 |
fprintf(out, " %p(%d)", &mem3.aPool[j],
|
sl@0
|
632 |
(mem3.aPool[j-1].u.hdr.size4x/4)*8-8);
|
sl@0
|
633 |
}
|
sl@0
|
634 |
fprintf(out, "\n");
|
sl@0
|
635 |
}
|
sl@0
|
636 |
for(i=0; i<N_HASH; i++){
|
sl@0
|
637 |
if( mem3.aiHash[i]==0 ) continue;
|
sl@0
|
638 |
fprintf(out, "hash(%2d):", i);
|
sl@0
|
639 |
for(j = mem3.aiHash[i]; j>0; j=mem3.aPool[j].u.list.next){
|
sl@0
|
640 |
fprintf(out, " %p(%d)", &mem3.aPool[j],
|
sl@0
|
641 |
(mem3.aPool[j-1].u.hdr.size4x/4)*8-8);
|
sl@0
|
642 |
}
|
sl@0
|
643 |
fprintf(out, "\n");
|
sl@0
|
644 |
}
|
sl@0
|
645 |
fprintf(out, "master=%d\n", mem3.iMaster);
|
sl@0
|
646 |
fprintf(out, "nowUsed=%d\n", mem3.nPool*8 - mem3.szMaster*8);
|
sl@0
|
647 |
fprintf(out, "mxUsed=%d\n", mem3.nPool*8 - mem3.mnMaster*8);
|
sl@0
|
648 |
sqlite3_mutex_leave(mem3.mutex);
|
sl@0
|
649 |
if( out==stdout ){
|
sl@0
|
650 |
fflush(stdout);
|
sl@0
|
651 |
}else{
|
sl@0
|
652 |
fclose(out);
|
sl@0
|
653 |
}
|
sl@0
|
654 |
}
|
sl@0
|
655 |
#endif
|
sl@0
|
656 |
|
sl@0
|
657 |
/*
|
sl@0
|
658 |
** This routine is the only routine in this file with external
|
sl@0
|
659 |
** linkage.
|
sl@0
|
660 |
**
|
sl@0
|
661 |
** Populate the low-level memory allocation function pointers in
|
sl@0
|
662 |
** sqlite3Config.m with pointers to the routines in this file. The
|
sl@0
|
663 |
** arguments specify the block of memory to manage.
|
sl@0
|
664 |
**
|
sl@0
|
665 |
** This routine is only called by sqlite3_config(), and therefore
|
sl@0
|
666 |
** is not required to be threadsafe (it is not).
|
sl@0
|
667 |
*/
|
sl@0
|
668 |
const sqlite3_mem_methods *sqlite3MemGetMemsys3(void){
|
sl@0
|
669 |
static const sqlite3_mem_methods mempoolMethods = {
|
sl@0
|
670 |
memsys3Malloc,
|
sl@0
|
671 |
memsys3Free,
|
sl@0
|
672 |
memsys3Realloc,
|
sl@0
|
673 |
memsys3Size,
|
sl@0
|
674 |
memsys3Roundup,
|
sl@0
|
675 |
memsys3Init,
|
sl@0
|
676 |
memsys3Shutdown,
|
sl@0
|
677 |
0
|
sl@0
|
678 |
};
|
sl@0
|
679 |
return &mempoolMethods;
|
sl@0
|
680 |
}
|
sl@0
|
681 |
|
sl@0
|
682 |
#endif /* SQLITE_ENABLE_MEMSYS3 */
|