sl@0: /* sl@0: * tclThreadAlloc.c -- sl@0: * sl@0: * This is a very fast storage allocator for used with threads (designed sl@0: * avoid lock contention). The basic strategy is to allocate memory in sl@0: * fixed size blocks from block caches. sl@0: * sl@0: * The Initial Developer of the Original Code is America Online, Inc. sl@0: * Portions created by AOL are Copyright (C) 1999 America Online, Inc. sl@0: * sl@0: * See the file "license.terms" for information on usage and redistribution sl@0: * of this file, and for a DISCLAIMER OF ALL WARRANTIES. sl@0: * sl@0: * RCS: @(#) $Id: tclThreadAlloc.c,v 1.4.2.7 2005/12/20 22:16:34 dkf Exp $ sl@0: */ sl@0: sl@0: #include "tclInt.h" sl@0: sl@0: #if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC) && !defined(TCL_MEM_DEBUG) sl@0: sl@0: #ifdef WIN32 sl@0: #include "tclWinInt.h" sl@0: #else sl@0: extern Tcl_Mutex *TclpNewAllocMutex(void); sl@0: extern void *TclpGetAllocCache(void); sl@0: extern void TclpSetAllocCache(void *); sl@0: #endif sl@0: sl@0: /* sl@0: * If range checking is enabled, an additional byte will be allocated sl@0: * to store the magic number at the end of the requested memory. sl@0: */ sl@0: sl@0: #ifndef RCHECK sl@0: #ifdef NDEBUG sl@0: #define RCHECK 0 sl@0: #else sl@0: #define RCHECK 1 sl@0: #endif sl@0: #endif sl@0: sl@0: /* sl@0: * The following define the number of Tcl_Obj's to allocate/move sl@0: * at a time and the high water mark to prune a per-thread cache. sl@0: * On a 32 bit system, sizeof(Tcl_Obj) = 24 so 800 * 24 = ~16k. sl@0: * sl@0: */ sl@0: sl@0: #define NOBJALLOC 800 sl@0: #define NOBJHIGH 1200 sl@0: sl@0: /* sl@0: * The following defines the number of buckets in the bucket sl@0: * cache and those block sizes from (1<<4) to (1<<(3+NBUCKETS)) sl@0: */ sl@0: sl@0: #define NBUCKETS 11 sl@0: #define MAXALLOC 16284 sl@0: sl@0: /* sl@0: * The following union stores accounting information for sl@0: * each block including two small magic numbers and sl@0: * a bucket number when in use or a next pointer when sl@0: * free. The original requested size (not including sl@0: * the Block overhead) is also maintained. sl@0: */ sl@0: sl@0: typedef struct Block { sl@0: union { sl@0: struct Block *next; /* Next in free list. */ sl@0: struct { sl@0: unsigned char magic1; /* First magic number. */ sl@0: unsigned char bucket; /* Bucket block allocated from. */ sl@0: unsigned char unused; /* Padding. */ sl@0: unsigned char magic2; /* Second magic number. */ sl@0: } b_s; sl@0: } b_u; sl@0: size_t b_reqsize; /* Requested allocation size. */ sl@0: } Block; sl@0: #define b_next b_u.next sl@0: #define b_bucket b_u.b_s.bucket sl@0: #define b_magic1 b_u.b_s.magic1 sl@0: #define b_magic2 b_u.b_s.magic2 sl@0: #define MAGIC 0xef sl@0: sl@0: /* sl@0: * The following structure defines a bucket of blocks with sl@0: * various accounting and statistics information. sl@0: */ sl@0: sl@0: typedef struct Bucket { sl@0: Block *firstPtr; sl@0: long nfree; sl@0: long nget; sl@0: long nput; sl@0: long nwait; sl@0: long nlock; sl@0: long nrequest; sl@0: } Bucket; sl@0: sl@0: /* sl@0: * The following structure defines a cache of buckets and objs. sl@0: */ sl@0: sl@0: typedef struct Cache { sl@0: struct Cache *nextPtr; sl@0: Tcl_ThreadId owner; sl@0: Tcl_Obj *firstObjPtr; sl@0: int nobjs; sl@0: int nsysalloc; sl@0: Bucket buckets[NBUCKETS]; sl@0: } Cache; sl@0: sl@0: /* sl@0: * The following array specifies various per-bucket sl@0: * limits and locks. The values are statically initialized sl@0: * to avoid calculating them repeatedly. sl@0: */ sl@0: sl@0: struct binfo { sl@0: size_t blocksize; /* Bucket blocksize. */ sl@0: int maxblocks; /* Max blocks before move to share. */ sl@0: int nmove; /* Num blocks to move to share. */ sl@0: Tcl_Mutex *lockPtr; /* Share bucket lock. */ sl@0: } binfo[NBUCKETS] = { sl@0: { 16, 1024, 512, NULL}, sl@0: { 32, 512, 256, NULL}, sl@0: { 64, 256, 128, NULL}, sl@0: { 128, 128, 64, NULL}, sl@0: { 256, 64, 32, NULL}, sl@0: { 512, 32, 16, NULL}, sl@0: { 1024, 16, 8, NULL}, sl@0: { 2048, 8, 4, NULL}, sl@0: { 4096, 4, 2, NULL}, sl@0: { 8192, 2, 1, NULL}, sl@0: {16284, 1, 1, NULL}, sl@0: }; sl@0: sl@0: /* sl@0: * Static functions defined in this file. sl@0: */ sl@0: sl@0: static void LockBucket(Cache *cachePtr, int bucket); sl@0: static void UnlockBucket(Cache *cachePtr, int bucket); sl@0: static void PutBlocks(Cache *cachePtr, int bucket, int nmove); sl@0: static int GetBlocks(Cache *cachePtr, int bucket); sl@0: static Block *Ptr2Block(char *ptr); sl@0: static char *Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize); sl@0: static void MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove); sl@0: sl@0: /* sl@0: * Local variables defined in this file and initialized at sl@0: * startup. sl@0: */ sl@0: sl@0: static Tcl_Mutex *listLockPtr; sl@0: static Tcl_Mutex *objLockPtr; sl@0: static Cache sharedCache; sl@0: static Cache *sharedPtr = &sharedCache; sl@0: static Cache *firstCachePtr = &sharedCache; sl@0: sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * GetCache --- sl@0: * sl@0: * Gets per-thread memory cache, allocating it if necessary. sl@0: * sl@0: * Results: sl@0: * Pointer to cache. sl@0: * sl@0: * Side effects: sl@0: * None. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: static Cache * sl@0: GetCache(void) sl@0: { sl@0: Cache *cachePtr; sl@0: sl@0: /* sl@0: * Check for first-time initialization. sl@0: */ sl@0: sl@0: if (listLockPtr == NULL) { sl@0: Tcl_Mutex *initLockPtr; sl@0: int i; sl@0: sl@0: initLockPtr = Tcl_GetAllocMutex(); sl@0: Tcl_MutexLock(initLockPtr); sl@0: if (listLockPtr == NULL) { sl@0: listLockPtr = TclpNewAllocMutex(); sl@0: objLockPtr = TclpNewAllocMutex(); sl@0: for (i = 0; i < NBUCKETS; ++i) { sl@0: binfo[i].lockPtr = TclpNewAllocMutex(); sl@0: } sl@0: } sl@0: Tcl_MutexUnlock(initLockPtr); sl@0: } sl@0: sl@0: /* sl@0: * Get this thread's cache, allocating if necessary. sl@0: */ sl@0: sl@0: cachePtr = TclpGetAllocCache(); sl@0: if (cachePtr == NULL) { sl@0: cachePtr = calloc(1, sizeof(Cache)); sl@0: if (cachePtr == NULL) { sl@0: panic("alloc: could not allocate new cache"); sl@0: } sl@0: Tcl_MutexLock(listLockPtr); sl@0: cachePtr->nextPtr = firstCachePtr; sl@0: firstCachePtr = cachePtr; sl@0: Tcl_MutexUnlock(listLockPtr); sl@0: cachePtr->owner = Tcl_GetCurrentThread(); sl@0: TclpSetAllocCache(cachePtr); sl@0: } sl@0: return cachePtr; sl@0: } sl@0: sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * TclFreeAllocCache -- sl@0: * sl@0: * Flush and delete a cache, removing from list of caches. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * None. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: void sl@0: TclFreeAllocCache(void *arg) sl@0: { sl@0: Cache *cachePtr = arg; sl@0: Cache **nextPtrPtr; sl@0: register int bucket; sl@0: sl@0: /* sl@0: * Flush blocks. sl@0: */ sl@0: sl@0: for (bucket = 0; bucket < NBUCKETS; ++bucket) { sl@0: if (cachePtr->buckets[bucket].nfree > 0) { sl@0: PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].nfree); sl@0: } sl@0: } sl@0: sl@0: /* sl@0: * Flush objs. sl@0: */ sl@0: sl@0: if (cachePtr->nobjs > 0) { sl@0: Tcl_MutexLock(objLockPtr); sl@0: MoveObjs(cachePtr, sharedPtr, cachePtr->nobjs); sl@0: Tcl_MutexUnlock(objLockPtr); sl@0: } sl@0: sl@0: /* sl@0: * Remove from pool list. sl@0: */ sl@0: sl@0: Tcl_MutexLock(listLockPtr); sl@0: nextPtrPtr = &firstCachePtr; sl@0: while (*nextPtrPtr != cachePtr) { sl@0: nextPtrPtr = &(*nextPtrPtr)->nextPtr; sl@0: } sl@0: *nextPtrPtr = cachePtr->nextPtr; sl@0: cachePtr->nextPtr = NULL; sl@0: Tcl_MutexUnlock(listLockPtr); sl@0: free(cachePtr); sl@0: } sl@0: sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * TclpAlloc -- sl@0: * sl@0: * Allocate memory. sl@0: * sl@0: * Results: sl@0: * Pointer to memory just beyond Block pointer. sl@0: * sl@0: * Side effects: sl@0: * May allocate more blocks for a bucket. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: char * sl@0: TclpAlloc(unsigned int reqsize) sl@0: { sl@0: Cache *cachePtr = TclpGetAllocCache(); sl@0: Block *blockPtr; sl@0: register int bucket; sl@0: size_t size; sl@0: sl@0: if (cachePtr == NULL) { sl@0: cachePtr = GetCache(); sl@0: } sl@0: sl@0: /* sl@0: * Increment the requested size to include room for sl@0: * the Block structure. Call malloc() directly if the sl@0: * required amount is greater than the largest block, sl@0: * otherwise pop the smallest block large enough, sl@0: * allocating more blocks if necessary. sl@0: */ sl@0: sl@0: blockPtr = NULL; sl@0: size = reqsize + sizeof(Block); sl@0: #if RCHECK sl@0: ++size; sl@0: #endif sl@0: if (size > MAXALLOC) { sl@0: bucket = NBUCKETS; sl@0: blockPtr = malloc(size); sl@0: if (blockPtr != NULL) { sl@0: cachePtr->nsysalloc += reqsize; sl@0: } sl@0: } else { sl@0: bucket = 0; sl@0: while (binfo[bucket].blocksize < size) { sl@0: ++bucket; sl@0: } sl@0: if (cachePtr->buckets[bucket].nfree || GetBlocks(cachePtr, bucket)) { sl@0: blockPtr = cachePtr->buckets[bucket].firstPtr; sl@0: cachePtr->buckets[bucket].firstPtr = blockPtr->b_next; sl@0: --cachePtr->buckets[bucket].nfree; sl@0: ++cachePtr->buckets[bucket].nget; sl@0: cachePtr->buckets[bucket].nrequest += reqsize; sl@0: } sl@0: } sl@0: if (blockPtr == NULL) { sl@0: return NULL; sl@0: } sl@0: return Block2Ptr(blockPtr, bucket, reqsize); sl@0: } sl@0: sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * TclpFree -- sl@0: * sl@0: * Return blocks to the thread block cache. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * May move blocks to shared cache. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: void sl@0: TclpFree(char *ptr) sl@0: { sl@0: if (ptr != NULL) { sl@0: Cache *cachePtr = TclpGetAllocCache(); sl@0: Block *blockPtr; sl@0: int bucket; sl@0: sl@0: if (cachePtr == NULL) { sl@0: cachePtr = GetCache(); sl@0: } sl@0: sl@0: /* sl@0: * Get the block back from the user pointer and sl@0: * call system free directly for large blocks. sl@0: * Otherwise, push the block back on the bucket and sl@0: * move blocks to the shared cache if there are now sl@0: * too many free. sl@0: */ sl@0: sl@0: blockPtr = Ptr2Block(ptr); sl@0: bucket = blockPtr->b_bucket; sl@0: if (bucket == NBUCKETS) { sl@0: cachePtr->nsysalloc -= blockPtr->b_reqsize; sl@0: free(blockPtr); sl@0: } else { sl@0: cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize; sl@0: blockPtr->b_next = cachePtr->buckets[bucket].firstPtr; sl@0: cachePtr->buckets[bucket].firstPtr = blockPtr; sl@0: ++cachePtr->buckets[bucket].nfree; sl@0: ++cachePtr->buckets[bucket].nput; sl@0: if (cachePtr != sharedPtr && sl@0: cachePtr->buckets[bucket].nfree > binfo[bucket].maxblocks) { sl@0: PutBlocks(cachePtr, bucket, binfo[bucket].nmove); sl@0: } sl@0: } sl@0: } sl@0: } sl@0: sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * TclpRealloc -- sl@0: * sl@0: * Re-allocate memory to a larger or smaller size. sl@0: * sl@0: * Results: sl@0: * Pointer to memory just beyond Block pointer. sl@0: * sl@0: * Side effects: sl@0: * Previous memory, if any, may be freed. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: char * sl@0: TclpRealloc(char *ptr, unsigned int reqsize) sl@0: { sl@0: Cache *cachePtr = TclpGetAllocCache(); sl@0: Block *blockPtr; sl@0: void *new; sl@0: size_t size, min; sl@0: int bucket; sl@0: sl@0: if (ptr == NULL) { sl@0: return TclpAlloc(reqsize); sl@0: } sl@0: sl@0: if (cachePtr == NULL) { sl@0: cachePtr = GetCache(); sl@0: } sl@0: sl@0: /* sl@0: * If the block is not a system block and fits in place, sl@0: * simply return the existing pointer. Otherwise, if the block sl@0: * is a system block and the new size would also require a system sl@0: * block, call realloc() directly. sl@0: */ sl@0: sl@0: blockPtr = Ptr2Block(ptr); sl@0: size = reqsize + sizeof(Block); sl@0: #if RCHECK sl@0: ++size; sl@0: #endif sl@0: bucket = blockPtr->b_bucket; sl@0: if (bucket != NBUCKETS) { sl@0: if (bucket > 0) { sl@0: min = binfo[bucket-1].blocksize; sl@0: } else { sl@0: min = 0; sl@0: } sl@0: if (size > min && size <= binfo[bucket].blocksize) { sl@0: cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize; sl@0: cachePtr->buckets[bucket].nrequest += reqsize; sl@0: return Block2Ptr(blockPtr, bucket, reqsize); sl@0: } sl@0: } else if (size > MAXALLOC) { sl@0: cachePtr->nsysalloc -= blockPtr->b_reqsize; sl@0: cachePtr->nsysalloc += reqsize; sl@0: blockPtr = realloc(blockPtr, size); sl@0: if (blockPtr == NULL) { sl@0: return NULL; sl@0: } sl@0: return Block2Ptr(blockPtr, NBUCKETS, reqsize); sl@0: } sl@0: sl@0: /* sl@0: * Finally, perform an expensive malloc/copy/free. sl@0: */ sl@0: sl@0: new = TclpAlloc(reqsize); sl@0: if (new != NULL) { sl@0: if (reqsize > blockPtr->b_reqsize) { sl@0: reqsize = blockPtr->b_reqsize; sl@0: } sl@0: memcpy(new, ptr, reqsize); sl@0: TclpFree(ptr); sl@0: } sl@0: return new; sl@0: } sl@0: sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * TclThreadAllocObj -- sl@0: * sl@0: * Allocate a Tcl_Obj from the per-thread cache. sl@0: * sl@0: * Results: sl@0: * Pointer to uninitialized Tcl_Obj. sl@0: * sl@0: * Side effects: sl@0: * May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's sl@0: * if list is empty. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: Tcl_Obj * sl@0: TclThreadAllocObj(void) sl@0: { sl@0: register Cache *cachePtr = TclpGetAllocCache(); sl@0: register int nmove; sl@0: register Tcl_Obj *objPtr; sl@0: Tcl_Obj *newObjsPtr; sl@0: sl@0: if (cachePtr == NULL) { sl@0: cachePtr = GetCache(); sl@0: } sl@0: sl@0: /* sl@0: * Get this thread's obj list structure and move sl@0: * or allocate new objs if necessary. sl@0: */ sl@0: sl@0: if (cachePtr->nobjs == 0) { sl@0: Tcl_MutexLock(objLockPtr); sl@0: nmove = sharedPtr->nobjs; sl@0: if (nmove > 0) { sl@0: if (nmove > NOBJALLOC) { sl@0: nmove = NOBJALLOC; sl@0: } sl@0: MoveObjs(sharedPtr, cachePtr, nmove); sl@0: } sl@0: Tcl_MutexUnlock(objLockPtr); sl@0: if (cachePtr->nobjs == 0) { sl@0: cachePtr->nobjs = nmove = NOBJALLOC; sl@0: newObjsPtr = malloc(sizeof(Tcl_Obj) * nmove); sl@0: if (newObjsPtr == NULL) { sl@0: panic("alloc: could not allocate %d new objects", nmove); sl@0: } sl@0: while (--nmove >= 0) { sl@0: objPtr = &newObjsPtr[nmove]; sl@0: objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr; sl@0: cachePtr->firstObjPtr = objPtr; sl@0: } sl@0: } sl@0: } sl@0: sl@0: /* sl@0: * Pop the first object. sl@0: */ sl@0: sl@0: objPtr = cachePtr->firstObjPtr; sl@0: cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr; sl@0: --cachePtr->nobjs; sl@0: return objPtr; sl@0: } sl@0: sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * TclThreadFreeObj -- sl@0: * sl@0: * Return a free Tcl_Obj to the per-thread cache. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * May move free Tcl_Obj's to shared list upon hitting high sl@0: * water mark. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: void sl@0: TclThreadFreeObj(Tcl_Obj *objPtr) sl@0: { sl@0: Cache *cachePtr = TclpGetAllocCache(); sl@0: sl@0: if (cachePtr == NULL) { sl@0: cachePtr = GetCache(); sl@0: } sl@0: sl@0: /* sl@0: * Get this thread's list and push on the free Tcl_Obj. sl@0: */ sl@0: sl@0: objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr; sl@0: cachePtr->firstObjPtr = objPtr; sl@0: ++cachePtr->nobjs; sl@0: sl@0: /* sl@0: * If the number of free objects has exceeded the high sl@0: * water mark, move some blocks to the shared list. sl@0: */ sl@0: sl@0: if (cachePtr->nobjs > NOBJHIGH) { sl@0: Tcl_MutexLock(objLockPtr); sl@0: MoveObjs(cachePtr, sharedPtr, NOBJALLOC); sl@0: Tcl_MutexUnlock(objLockPtr); sl@0: } sl@0: } sl@0: sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * Tcl_GetMemoryInfo -- sl@0: * sl@0: * Return a list-of-lists of memory stats. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * List appended to given dstring. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: void sl@0: Tcl_GetMemoryInfo(Tcl_DString *dsPtr) sl@0: { sl@0: Cache *cachePtr; sl@0: char buf[200]; sl@0: int n; sl@0: sl@0: Tcl_MutexLock(listLockPtr); sl@0: cachePtr = firstCachePtr; sl@0: while (cachePtr != NULL) { sl@0: Tcl_DStringStartSublist(dsPtr); sl@0: if (cachePtr == sharedPtr) { sl@0: Tcl_DStringAppendElement(dsPtr, "shared"); sl@0: } else { sl@0: sprintf(buf, "thread%d", (int) cachePtr->owner); sl@0: Tcl_DStringAppendElement(dsPtr, buf); sl@0: } sl@0: for (n = 0; n < NBUCKETS; ++n) { sl@0: sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld", sl@0: (unsigned long) binfo[n].blocksize, sl@0: cachePtr->buckets[n].nfree, sl@0: cachePtr->buckets[n].nget, sl@0: cachePtr->buckets[n].nput, sl@0: cachePtr->buckets[n].nrequest, sl@0: cachePtr->buckets[n].nlock, sl@0: cachePtr->buckets[n].nwait); sl@0: Tcl_DStringAppendElement(dsPtr, buf); sl@0: } sl@0: Tcl_DStringEndSublist(dsPtr); sl@0: cachePtr = cachePtr->nextPtr; sl@0: } sl@0: Tcl_MutexUnlock(listLockPtr); sl@0: } sl@0: sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * MoveObjs -- sl@0: * sl@0: * Move Tcl_Obj's between caches. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * None. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: static void sl@0: MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove) sl@0: { sl@0: register Tcl_Obj *objPtr = fromPtr->firstObjPtr; sl@0: Tcl_Obj *fromFirstObjPtr = objPtr; sl@0: sl@0: toPtr->nobjs += nmove; sl@0: fromPtr->nobjs -= nmove; sl@0: sl@0: /* sl@0: * Find the last object to be moved; set the next one sl@0: * (the first one not to be moved) as the first object sl@0: * in the 'from' cache. sl@0: */ sl@0: sl@0: while (--nmove) { sl@0: objPtr = objPtr->internalRep.otherValuePtr; sl@0: } sl@0: fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr; sl@0: sl@0: /* sl@0: * Move all objects as a block - they are already linked to sl@0: * each other, we just have to update the first and last. sl@0: */ sl@0: sl@0: objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr; sl@0: toPtr->firstObjPtr = fromFirstObjPtr; sl@0: } sl@0: sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * Block2Ptr, Ptr2Block -- sl@0: * sl@0: * Convert between internal blocks and user pointers. sl@0: * sl@0: * Results: sl@0: * User pointer or internal block. sl@0: * sl@0: * Side effects: sl@0: * Invalid blocks will abort the server. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: static char * sl@0: Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize) sl@0: { sl@0: register void *ptr; sl@0: sl@0: blockPtr->b_magic1 = blockPtr->b_magic2 = MAGIC; sl@0: blockPtr->b_bucket = bucket; sl@0: blockPtr->b_reqsize = reqsize; sl@0: ptr = ((void *) (blockPtr + 1)); sl@0: #if RCHECK sl@0: ((unsigned char *)(ptr))[reqsize] = MAGIC; sl@0: #endif sl@0: return (char *) ptr; sl@0: } sl@0: sl@0: static Block * sl@0: Ptr2Block(char *ptr) sl@0: { sl@0: register Block *blockPtr; sl@0: sl@0: blockPtr = (((Block *) ptr) - 1); sl@0: if (blockPtr->b_magic1 != MAGIC sl@0: #if RCHECK sl@0: || ((unsigned char *) ptr)[blockPtr->b_reqsize] != MAGIC sl@0: #endif sl@0: || blockPtr->b_magic2 != MAGIC) { sl@0: panic("alloc: invalid block: %p: %x %x %x\n", sl@0: blockPtr, blockPtr->b_magic1, blockPtr->b_magic2, sl@0: ((unsigned char *) ptr)[blockPtr->b_reqsize]); sl@0: } sl@0: return blockPtr; sl@0: } sl@0: sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * LockBucket, UnlockBucket -- sl@0: * sl@0: * Set/unset the lock to access a bucket in the shared cache. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * Lock activity and contention are monitored globally and on sl@0: * a per-cache basis. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: static void sl@0: LockBucket(Cache *cachePtr, int bucket) sl@0: { sl@0: #if 0 sl@0: if (Tcl_MutexTryLock(binfo[bucket].lockPtr) != TCL_OK) { sl@0: Tcl_MutexLock(binfo[bucket].lockPtr); sl@0: ++cachePtr->buckets[bucket].nwait; sl@0: ++sharedPtr->buckets[bucket].nwait; sl@0: } sl@0: #else sl@0: Tcl_MutexLock(binfo[bucket].lockPtr); sl@0: #endif sl@0: ++cachePtr->buckets[bucket].nlock; sl@0: ++sharedPtr->buckets[bucket].nlock; sl@0: } sl@0: sl@0: sl@0: static void sl@0: UnlockBucket(Cache *cachePtr, int bucket) sl@0: { sl@0: Tcl_MutexUnlock(binfo[bucket].lockPtr); sl@0: } sl@0: sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * PutBlocks -- sl@0: * sl@0: * Return unused blocks to the shared cache. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * None. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: static void sl@0: PutBlocks(Cache *cachePtr, int bucket, int nmove) sl@0: { sl@0: register Block *lastPtr, *firstPtr; sl@0: register int n = nmove; sl@0: sl@0: /* sl@0: * Before acquiring the lock, walk the block list to find sl@0: * the last block to be moved. sl@0: */ sl@0: sl@0: firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr; sl@0: while (--n > 0) { sl@0: lastPtr = lastPtr->b_next; sl@0: } sl@0: cachePtr->buckets[bucket].firstPtr = lastPtr->b_next; sl@0: cachePtr->buckets[bucket].nfree -= nmove; sl@0: sl@0: /* sl@0: * Aquire the lock and place the list of blocks at the front sl@0: * of the shared cache bucket. sl@0: */ sl@0: sl@0: LockBucket(cachePtr, bucket); sl@0: lastPtr->b_next = sharedPtr->buckets[bucket].firstPtr; sl@0: sharedPtr->buckets[bucket].firstPtr = firstPtr; sl@0: sharedPtr->buckets[bucket].nfree += nmove; sl@0: UnlockBucket(cachePtr, bucket); sl@0: } sl@0: sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * GetBlocks -- sl@0: * sl@0: * Get more blocks for a bucket. sl@0: * sl@0: * Results: sl@0: * 1 if blocks where allocated, 0 otherwise. sl@0: * sl@0: * Side effects: sl@0: * Cache may be filled with available blocks. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: static int sl@0: GetBlocks(Cache *cachePtr, int bucket) sl@0: { sl@0: register Block *blockPtr; sl@0: register int n; sl@0: register size_t size; sl@0: sl@0: /* sl@0: * First, atttempt to move blocks from the shared cache. Note sl@0: * the potentially dirty read of nfree before acquiring the lock sl@0: * which is a slight performance enhancement. The value is sl@0: * verified after the lock is actually acquired. sl@0: */ sl@0: sl@0: if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].nfree > 0) { sl@0: LockBucket(cachePtr, bucket); sl@0: if (sharedPtr->buckets[bucket].nfree > 0) { sl@0: sl@0: /* sl@0: * Either move the entire list or walk the list to find sl@0: * the last block to move. sl@0: */ sl@0: sl@0: n = binfo[bucket].nmove; sl@0: if (n >= sharedPtr->buckets[bucket].nfree) { sl@0: cachePtr->buckets[bucket].firstPtr = sl@0: sharedPtr->buckets[bucket].firstPtr; sl@0: cachePtr->buckets[bucket].nfree = sl@0: sharedPtr->buckets[bucket].nfree; sl@0: sharedPtr->buckets[bucket].firstPtr = NULL; sl@0: sharedPtr->buckets[bucket].nfree = 0; sl@0: } else { sl@0: blockPtr = sharedPtr->buckets[bucket].firstPtr; sl@0: cachePtr->buckets[bucket].firstPtr = blockPtr; sl@0: sharedPtr->buckets[bucket].nfree -= n; sl@0: cachePtr->buckets[bucket].nfree = n; sl@0: while (--n > 0) { sl@0: blockPtr = blockPtr->b_next; sl@0: } sl@0: sharedPtr->buckets[bucket].firstPtr = blockPtr->b_next; sl@0: blockPtr->b_next = NULL; sl@0: } sl@0: } sl@0: UnlockBucket(cachePtr, bucket); sl@0: } sl@0: sl@0: if (cachePtr->buckets[bucket].nfree == 0) { sl@0: sl@0: /* sl@0: * If no blocks could be moved from shared, first look for a sl@0: * larger block in this cache to split up. sl@0: */ sl@0: sl@0: blockPtr = NULL; sl@0: n = NBUCKETS; sl@0: size = 0; /* lint */ sl@0: while (--n > bucket) { sl@0: if (cachePtr->buckets[n].nfree > 0) { sl@0: size = binfo[n].blocksize; sl@0: blockPtr = cachePtr->buckets[n].firstPtr; sl@0: cachePtr->buckets[n].firstPtr = blockPtr->b_next; sl@0: --cachePtr->buckets[n].nfree; sl@0: break; sl@0: } sl@0: } sl@0: sl@0: /* sl@0: * Otherwise, allocate a big new block directly. sl@0: */ sl@0: sl@0: if (blockPtr == NULL) { sl@0: size = MAXALLOC; sl@0: blockPtr = malloc(size); sl@0: if (blockPtr == NULL) { sl@0: return 0; sl@0: } sl@0: } sl@0: sl@0: /* sl@0: * Split the larger block into smaller blocks for this bucket. sl@0: */ sl@0: sl@0: n = size / binfo[bucket].blocksize; sl@0: cachePtr->buckets[bucket].nfree = n; sl@0: cachePtr->buckets[bucket].firstPtr = blockPtr; sl@0: while (--n > 0) { sl@0: blockPtr->b_next = (Block *) sl@0: ((char *) blockPtr + binfo[bucket].blocksize); sl@0: blockPtr = blockPtr->b_next; sl@0: } sl@0: blockPtr->b_next = NULL; sl@0: } sl@0: return 1; sl@0: } sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * TclFinalizeThreadAlloc -- sl@0: * sl@0: * This procedure is used to destroy all private resources used in sl@0: * this file. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * None. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: void sl@0: TclFinalizeThreadAlloc() sl@0: { sl@0: int i; sl@0: for (i = 0; i < NBUCKETS; ++i) { sl@0: TclpFreeAllocMutex(binfo[i].lockPtr); sl@0: binfo[i].lockPtr = NULL; sl@0: } sl@0: sl@0: TclpFreeAllocMutex(objLockPtr); sl@0: objLockPtr = NULL; sl@0: sl@0: TclpFreeAllocMutex(listLockPtr); sl@0: listLockPtr = NULL; sl@0: sl@0: TclpFreeAllocCache(NULL); sl@0: } sl@0: sl@0: #else /* ! defined(TCL_THREADS) && ! defined(USE_THREAD_ALLOC) */ sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * TclFinalizeThreadAlloc -- sl@0: * sl@0: * This procedure is used to destroy all private resources used in sl@0: * this file. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * None. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: void sl@0: TclFinalizeThreadAlloc() sl@0: { sl@0: Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use."); sl@0: } sl@0: sl@0: #endif /* TCL_THREADS */