sl@0: /* sl@0: ** 2008 July 24 sl@0: ** sl@0: ** The author disclaims copyright to this source code. In place of sl@0: ** a legal notice, here is a blessing: sl@0: ** sl@0: ** May you do good and not evil. sl@0: ** May you find forgiveness for yourself and forgive others. sl@0: ** May you share freely, never taking more than you give. sl@0: ** sl@0: ************************************************************************* sl@0: ** sl@0: ** This file contains an alternative memory allocation system for SQLite. sl@0: ** This system is implemented as a wrapper around the system provided sl@0: ** by the operating system - vanilla malloc(), realloc() and free(). sl@0: ** sl@0: ** This system differentiates between requests for "small" allocations sl@0: ** (by default those of 128 bytes or less) and "large" allocations (all sl@0: ** others). The 256 byte threshhold is configurable at runtime. sl@0: ** sl@0: ** All requests for large allocations are passed through to the sl@0: ** default system. sl@0: ** sl@0: ** Requests for small allocations are met by allocating space within sl@0: ** one or more larger "chunks" of memory obtained from the default sl@0: ** memory allocation system. Chunks of memory are usually 64KB or sl@0: ** larger. The algorithm used to manage space within each chunk is sl@0: ** the same as that used by mem5.c. sl@0: ** sl@0: ** This strategy is designed to prevent the default memory allocation sl@0: ** system (usually the system malloc) from suffering from heap sl@0: ** fragmentation. On some systems, heap fragmentation can cause a sl@0: ** significant real-time slowdown. sl@0: ** sl@0: ** $Id: mem6.c,v 1.10 2008/09/02 17:52:52 danielk1977 Exp $ sl@0: */ sl@0: sl@0: #ifdef SQLITE_ENABLE_MEMSYS6 sl@0: sl@0: #include "sqliteInt.h" sl@0: sl@0: /* sl@0: ** Maximum size of any "small" allocation is ((1<zPool[(idx)*pChunk->nAtom])) sl@0: sl@0: static SQLITE_WSD struct Mem6Global { sl@0: int nMinAlloc; /* Minimum allowed allocation size */ sl@0: int nThreshold; /* Allocs larger than this go to malloc() */ sl@0: int nLogThreshold; /* log2 of (nThreshold/nMinAlloc) */ sl@0: sqlite3_mutex *mutex; sl@0: Mem6Chunk *pChunk; /* Singly linked list of all memory chunks */ sl@0: } mem6 = { 48642791 }; sl@0: sl@0: #define mem6 GLOBAL(struct Mem6Global, mem6) sl@0: sl@0: /* sl@0: ** Unlink the chunk at pChunk->aPool[i] from list it is currently sl@0: ** on. It should be found on pChunk->aiFreelist[iLogsize]. sl@0: */ sl@0: static void memsys6Unlink(Mem6Chunk *pChunk, int i, int iLogsize){ sl@0: int next, prev; sl@0: assert( i>=0 && inBlock ); sl@0: assert( iLogsize>=0 && iLogsize<=mem6.nLogThreshold ); sl@0: assert( (pChunk->aCtrl[i] & CTRL_LOGSIZE)==iLogsize ); sl@0: sl@0: next = MEM6LINK(i)->next; sl@0: prev = MEM6LINK(i)->prev; sl@0: if( prev<0 ){ sl@0: pChunk->aiFreelist[iLogsize] = next; sl@0: }else{ sl@0: MEM6LINK(prev)->next = next; sl@0: } sl@0: if( next>=0 ){ sl@0: MEM6LINK(next)->prev = prev; sl@0: } sl@0: } sl@0: sl@0: /* sl@0: ** Link the chunk at mem5.aPool[i] so that is on the iLogsize sl@0: ** free list. sl@0: */ sl@0: static void memsys6Link(Mem6Chunk *pChunk, int i, int iLogsize){ sl@0: int x; sl@0: assert( i>=0 && inBlock ); sl@0: assert( iLogsize>=0 && iLogsize<=mem6.nLogThreshold ); sl@0: assert( (pChunk->aCtrl[i] & CTRL_LOGSIZE)==iLogsize ); sl@0: sl@0: x = MEM6LINK(i)->next = pChunk->aiFreelist[iLogsize]; sl@0: MEM6LINK(i)->prev = -1; sl@0: if( x>=0 ){ sl@0: assert( xnBlock ); sl@0: MEM6LINK(x)->prev = i; sl@0: } sl@0: pChunk->aiFreelist[iLogsize] = i; sl@0: } sl@0: sl@0: sl@0: /* sl@0: ** Find the first entry on the freelist iLogsize. Unlink that sl@0: ** entry and return its index. sl@0: */ sl@0: static int memsys6UnlinkFirst(Mem6Chunk *pChunk, int iLogsize){ sl@0: int i; sl@0: int iFirst; sl@0: sl@0: assert( iLogsize>=0 && iLogsize<=mem6.nLogThreshold ); sl@0: i = iFirst = pChunk->aiFreelist[iLogsize]; sl@0: assert( iFirst>=0 ); sl@0: memsys6Unlink(pChunk, iFirst, iLogsize); sl@0: return iFirst; sl@0: } sl@0: sl@0: static int roundupLog2(int n){ sl@0: static const char LogTable256[256] = { sl@0: 0, /* 1 */ sl@0: 1, /* 2 */ sl@0: 2, 2, /* 3..4 */ sl@0: 3, 3, 3, 3, /* 5..8 */ sl@0: 4, 4, 4, 4, 4, 4, 4, 4, /* 9..16 */ sl@0: 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, /* 17..32 */ sl@0: 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, sl@0: 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, /* 33..64 */ sl@0: 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, sl@0: 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, sl@0: 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, sl@0: 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, /* 65..128 */ sl@0: 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, sl@0: 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, sl@0: 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, sl@0: 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, sl@0: 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, sl@0: 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, sl@0: 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, sl@0: 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, /* 129..256 */ sl@0: }; sl@0: sl@0: assert(n<=(1<<16) && n>0); sl@0: if( n<=256 ) return LogTable256[n-1]; sl@0: return LogTable256[(n>>8) - ((n&0xFF)?0:1)] + 8; sl@0: } sl@0: sl@0: /* sl@0: ** Allocate and return a block of (pChunk->nAtom << iLogsize) bytes from chunk sl@0: ** pChunk. If the allocation request cannot be satisfied, return 0. sl@0: */ sl@0: static void *chunkMalloc(Mem6Chunk *pChunk, int iLogsize){ sl@0: int i; /* Index of a mem5.aPool[] slot */ sl@0: int iBin; /* Index into mem5.aiFreelist[] */ sl@0: sl@0: /* Make sure mem5.aiFreelist[iLogsize] contains at least one free sl@0: ** block. If not, then split a block of the next larger power of sl@0: ** two in order to create a new free block of size iLogsize. sl@0: */ sl@0: for(iBin=iLogsize; pChunk->aiFreelist[iBin]<0 && iBin<=mem6.nLogThreshold; iBin++){} sl@0: if( iBin>mem6.nLogThreshold ) return 0; sl@0: i = memsys6UnlinkFirst(pChunk, iBin); sl@0: while( iBin>iLogsize ){ sl@0: int newSize; sl@0: iBin--; sl@0: newSize = 1 << iBin; sl@0: pChunk->aCtrl[i+newSize] = CTRL_FREE | iBin; sl@0: memsys6Link(pChunk, i+newSize, iBin); sl@0: } sl@0: pChunk->aCtrl[i] = iLogsize; sl@0: sl@0: /* Return a pointer to the allocated memory. */ sl@0: pChunk->nCheckedOut++; sl@0: return (void*)&pChunk->zPool[i*pChunk->nAtom]; sl@0: } sl@0: sl@0: /* sl@0: ** Free the allocation pointed to by p, which is guaranteed to be non-zero sl@0: ** and a part of chunk object pChunk. sl@0: */ sl@0: static void chunkFree(Mem6Chunk *pChunk, void *pOld){ sl@0: u32 size, iLogsize; sl@0: int iBlock; sl@0: sl@0: /* Set iBlock to the index of the block pointed to by pOld in sl@0: ** the array of pChunk->nAtom byte blocks pointed to by pChunk->zPool. sl@0: */ sl@0: iBlock = ((u8 *)pOld-pChunk->zPool)/pChunk->nAtom; sl@0: sl@0: /* Check that the pointer pOld points to a valid, non-free block. */ sl@0: assert( iBlock>=0 && iBlocknBlock ); sl@0: assert( ((u8 *)pOld-pChunk->zPool)%pChunk->nAtom==0 ); sl@0: assert( (pChunk->aCtrl[iBlock] & CTRL_FREE)==0 ); sl@0: sl@0: iLogsize = pChunk->aCtrl[iBlock] & CTRL_LOGSIZE; sl@0: size = 1<nBlock ); sl@0: sl@0: pChunk->aCtrl[iBlock] |= CTRL_FREE; sl@0: pChunk->aCtrl[iBlock+size-1] |= CTRL_FREE; sl@0: sl@0: pChunk->aCtrl[iBlock] = CTRL_FREE | iLogsize; sl@0: while( iLogsize>iLogsize) & 1 ){ sl@0: iBuddy = iBlock - size; sl@0: }else{ sl@0: iBuddy = iBlock + size; sl@0: } sl@0: assert( iBuddy>=0 ); sl@0: if( (iBuddy+(1<pChunk->nBlock ) break; sl@0: if( pChunk->aCtrl[iBuddy]!=(CTRL_FREE | iLogsize) ) break; sl@0: memsys6Unlink(pChunk, iBuddy, iLogsize); sl@0: iLogsize++; sl@0: if( iBuddyaCtrl[iBuddy] = CTRL_FREE | iLogsize; sl@0: pChunk->aCtrl[iBlock] = 0; sl@0: iBlock = iBuddy; sl@0: }else{ sl@0: pChunk->aCtrl[iBlock] = CTRL_FREE | iLogsize; sl@0: pChunk->aCtrl[iBuddy] = 0; sl@0: } sl@0: size *= 2; sl@0: } sl@0: pChunk->nCheckedOut--; sl@0: memsys6Link(pChunk, iBlock, iLogsize); sl@0: } sl@0: sl@0: /* sl@0: ** Return the actual size of the block pointed to by p, which is guaranteed sl@0: ** to have been allocated from chunk pChunk. sl@0: */ sl@0: static int chunkSize(Mem6Chunk *pChunk, void *p){ sl@0: int iSize = 0; sl@0: if( p ){ sl@0: int i = ((u8 *)p-pChunk->zPool)/pChunk->nAtom; sl@0: assert( i>=0 && inBlock ); sl@0: iSize = pChunk->nAtom * (1 << (pChunk->aCtrl[i]&CTRL_LOGSIZE)); sl@0: } sl@0: return iSize; sl@0: } sl@0: sl@0: /* sl@0: ** Return true if there are currently no outstanding allocations. sl@0: */ sl@0: static int chunkIsEmpty(Mem6Chunk *pChunk){ sl@0: return (pChunk->nCheckedOut==0); sl@0: } sl@0: sl@0: /* sl@0: ** Initialize the buffer zChunk, which is nChunk bytes in size, as sl@0: ** an Mem6Chunk object. Return a copy of the zChunk pointer. sl@0: */ sl@0: static Mem6Chunk *chunkInit(u8 *zChunk, int nChunk, int nMinAlloc){ sl@0: int ii; sl@0: int iOffset; sl@0: Mem6Chunk *pChunk = (Mem6Chunk *)zChunk; sl@0: sl@0: assert( nChunk>sizeof(Mem6Chunk) ); sl@0: assert( nMinAlloc>sizeof(Mem6Link) ); sl@0: sl@0: memset(pChunk, 0, sizeof(Mem6Chunk)); sl@0: pChunk->nAtom = nMinAlloc; sl@0: pChunk->nBlock = ((nChunk-sizeof(Mem6Chunk)) / (pChunk->nAtom+sizeof(u8))); sl@0: sl@0: pChunk->zPool = (u8 *)&pChunk[1]; sl@0: pChunk->aCtrl = &pChunk->zPool[pChunk->nBlock*pChunk->nAtom]; sl@0: sl@0: for(ii=0; ii<=mem6.nLogThreshold; ii++){ sl@0: pChunk->aiFreelist[ii] = -1; sl@0: } sl@0: sl@0: iOffset = 0; sl@0: for(ii=mem6.nLogThreshold; ii>=0; ii--){ sl@0: int nAlloc = (1<nBlock ){ sl@0: pChunk->aCtrl[iOffset] = ii | CTRL_FREE; sl@0: memsys6Link(pChunk, iOffset, ii); sl@0: iOffset += nAlloc; sl@0: } sl@0: } sl@0: sl@0: return pChunk; sl@0: } sl@0: sl@0: sl@0: static void mem6Enter(void){ sl@0: sqlite3_mutex_enter(mem6.mutex); sl@0: } sl@0: sl@0: static void mem6Leave(void){ sl@0: sqlite3_mutex_leave(mem6.mutex); sl@0: } sl@0: sl@0: /* sl@0: ** Based on the number and size of the currently allocated chunks, return sl@0: ** the size of the next chunk to allocate, in bytes. sl@0: */ sl@0: static int nextChunkSize(void){ sl@0: int iTotal = MIN_CHUNKSIZE; sl@0: Mem6Chunk *p; sl@0: for(p=mem6.pChunk; p; p=p->pNext){ sl@0: iTotal = iTotal*2; sl@0: } sl@0: return iTotal; sl@0: } sl@0: sl@0: static void freeChunk(Mem6Chunk *pChunk){ sl@0: Mem6Chunk **pp = &mem6.pChunk; sl@0: for( pp=&mem6.pChunk; *pp!=pChunk; pp = &(*pp)->pNext ); sl@0: *pp = (*pp)->pNext; sl@0: free(pChunk); sl@0: } sl@0: sl@0: static void *memsys6Malloc(int nByte){ sl@0: Mem6Chunk *pChunk; sl@0: void *p = 0; sl@0: int nTotal = nByte+8; sl@0: int iOffset = 0; sl@0: sl@0: if( nTotal>mem6.nThreshold ){ sl@0: p = malloc(nTotal); sl@0: }else{ sl@0: int iLogsize = 0; sl@0: if( nTotal>(1<pNext){ sl@0: p = chunkMalloc(pChunk, iLogsize); sl@0: if( p ){ sl@0: break; sl@0: } sl@0: } sl@0: if( !p ){ sl@0: int iSize = nextChunkSize(); sl@0: p = malloc(iSize); sl@0: if( p ){ sl@0: pChunk = chunkInit((u8 *)p, iSize, mem6.nMinAlloc); sl@0: pChunk->pNext = mem6.pChunk; sl@0: mem6.pChunk = pChunk; sl@0: p = chunkMalloc(pChunk, iLogsize); sl@0: assert(p); sl@0: } sl@0: } sl@0: iOffset = ((u8*)p - (u8*)pChunk); sl@0: mem6Leave(); sl@0: } sl@0: sl@0: if( !p ){ sl@0: return 0; sl@0: } sl@0: ((u32 *)p)[0] = iOffset; sl@0: ((u32 *)p)[1] = nByte; sl@0: return &((u32 *)p)[2]; sl@0: } sl@0: sl@0: static int memsys6Size(void *pPrior){ sl@0: if( pPrior==0 ) return 0; sl@0: return ((u32*)pPrior)[-1]; sl@0: } sl@0: sl@0: static void memsys6Free(void *pPrior){ sl@0: int iSlot; sl@0: void *p = &((u32 *)pPrior)[-2]; sl@0: iSlot = ((u32 *)p)[0]; sl@0: if( iSlot ){ sl@0: Mem6Chunk *pChunk; sl@0: mem6Enter(); sl@0: pChunk = (Mem6Chunk *)(&((u8 *)p)[-1 * iSlot]); sl@0: chunkFree(pChunk, p); sl@0: if( chunkIsEmpty(pChunk) ){ sl@0: freeChunk(pChunk); sl@0: } sl@0: mem6Leave(); sl@0: }else{ sl@0: free(p); sl@0: } sl@0: } sl@0: sl@0: static void *memsys6Realloc(void *p, int nByte){ sl@0: void *p2; sl@0: sl@0: if( p && nByte<=memsys6Size(p) ){ sl@0: p2 = p; sl@0: }else{ sl@0: p2 = memsys6Malloc(nByte); sl@0: if( p && p2 ){ sl@0: memcpy(p2, p, memsys6Size(p)); sl@0: memsys6Free(p); sl@0: } sl@0: } sl@0: sl@0: return p2; sl@0: } sl@0: sl@0: static int memsys6Roundup(int n){ sl@0: if( n>mem6.nThreshold ){ sl@0: return n; sl@0: }else{ sl@0: return (1<