os/persistentdata/persistentstorage/sqlite3api/TEST/TCL/tcldistribution/generic/tclThreadAlloc.c
First public contribution.
4 * This is a very fast storage allocator for used with threads (designed
5 * avoid lock contention). The basic strategy is to allocate memory in
6 * fixed size blocks from block caches.
8 * The Initial Developer of the Original Code is America Online, Inc.
9 * Portions created by AOL are Copyright (C) 1999 America Online, Inc.
11 * See the file "license.terms" for information on usage and redistribution
12 * of this file, and for a DISCLAIMER OF ALL WARRANTIES.
14 * RCS: @(#) $Id: tclThreadAlloc.c,v 1.4.2.7 2005/12/20 22:16:34 dkf Exp $
19 #if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC) && !defined(TCL_MEM_DEBUG)
22 #include "tclWinInt.h"
24 extern Tcl_Mutex *TclpNewAllocMutex(void);
25 extern void *TclpGetAllocCache(void);
26 extern void TclpSetAllocCache(void *);
30 * If range checking is enabled, an additional byte will be allocated
31 * to store the magic number at the end of the requested memory.
43 * The following define the number of Tcl_Obj's to allocate/move
44 * at a time and the high water mark to prune a per-thread cache.
45 * On a 32 bit system, sizeof(Tcl_Obj) = 24 so 800 * 24 = ~16k.
53 * The following defines the number of buckets in the bucket
54 * cache and those block sizes from (1<<4) to (1<<(3+NBUCKETS))
58 #define MAXALLOC 16284
61 * The following union stores accounting information for
62 * each block including two small magic numbers and
63 * a bucket number when in use or a next pointer when
64 * free. The original requested size (not including
65 * the Block overhead) is also maintained.
68 typedef struct Block {
70 struct Block *next; /* Next in free list. */
72 unsigned char magic1; /* First magic number. */
73 unsigned char bucket; /* Bucket block allocated from. */
74 unsigned char unused; /* Padding. */
75 unsigned char magic2; /* Second magic number. */
78 size_t b_reqsize; /* Requested allocation size. */
80 #define b_next b_u.next
81 #define b_bucket b_u.b_s.bucket
82 #define b_magic1 b_u.b_s.magic1
83 #define b_magic2 b_u.b_s.magic2
87 * The following structure defines a bucket of blocks with
88 * various accounting and statistics information.
91 typedef struct Bucket {
102 * The following structure defines a cache of buckets and objs.
105 typedef struct Cache {
106 struct Cache *nextPtr;
108 Tcl_Obj *firstObjPtr;
111 Bucket buckets[NBUCKETS];
115 * The following array specifies various per-bucket
116 * limits and locks. The values are statically initialized
117 * to avoid calculating them repeatedly.
121 size_t blocksize; /* Bucket blocksize. */
122 int maxblocks; /* Max blocks before move to share. */
123 int nmove; /* Num blocks to move to share. */
124 Tcl_Mutex *lockPtr; /* Share bucket lock. */
125 } binfo[NBUCKETS] = {
126 { 16, 1024, 512, NULL},
127 { 32, 512, 256, NULL},
128 { 64, 256, 128, NULL},
129 { 128, 128, 64, NULL},
130 { 256, 64, 32, NULL},
131 { 512, 32, 16, NULL},
132 { 1024, 16, 8, NULL},
140 * Static functions defined in this file.
143 static void LockBucket(Cache *cachePtr, int bucket);
144 static void UnlockBucket(Cache *cachePtr, int bucket);
145 static void PutBlocks(Cache *cachePtr, int bucket, int nmove);
146 static int GetBlocks(Cache *cachePtr, int bucket);
147 static Block *Ptr2Block(char *ptr);
148 static char *Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize);
149 static void MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove);
152 * Local variables defined in this file and initialized at
156 static Tcl_Mutex *listLockPtr;
157 static Tcl_Mutex *objLockPtr;
158 static Cache sharedCache;
159 static Cache *sharedPtr = &sharedCache;
160 static Cache *firstCachePtr = &sharedCache;
164 *----------------------------------------------------------------------
168 * Gets per-thread memory cache, allocating it if necessary.
176 *----------------------------------------------------------------------
185 * Check for first-time initialization.
188 if (listLockPtr == NULL) {
189 Tcl_Mutex *initLockPtr;
192 initLockPtr = Tcl_GetAllocMutex();
193 Tcl_MutexLock(initLockPtr);
194 if (listLockPtr == NULL) {
195 listLockPtr = TclpNewAllocMutex();
196 objLockPtr = TclpNewAllocMutex();
197 for (i = 0; i < NBUCKETS; ++i) {
198 binfo[i].lockPtr = TclpNewAllocMutex();
201 Tcl_MutexUnlock(initLockPtr);
205 * Get this thread's cache, allocating if necessary.
208 cachePtr = TclpGetAllocCache();
209 if (cachePtr == NULL) {
210 cachePtr = calloc(1, sizeof(Cache));
211 if (cachePtr == NULL) {
212 panic("alloc: could not allocate new cache");
214 Tcl_MutexLock(listLockPtr);
215 cachePtr->nextPtr = firstCachePtr;
216 firstCachePtr = cachePtr;
217 Tcl_MutexUnlock(listLockPtr);
218 cachePtr->owner = Tcl_GetCurrentThread();
219 TclpSetAllocCache(cachePtr);
226 *----------------------------------------------------------------------
228 * TclFreeAllocCache --
230 * Flush and delete a cache, removing from list of caches.
238 *----------------------------------------------------------------------
242 TclFreeAllocCache(void *arg)
244 Cache *cachePtr = arg;
252 for (bucket = 0; bucket < NBUCKETS; ++bucket) {
253 if (cachePtr->buckets[bucket].nfree > 0) {
254 PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].nfree);
262 if (cachePtr->nobjs > 0) {
263 Tcl_MutexLock(objLockPtr);
264 MoveObjs(cachePtr, sharedPtr, cachePtr->nobjs);
265 Tcl_MutexUnlock(objLockPtr);
269 * Remove from pool list.
272 Tcl_MutexLock(listLockPtr);
273 nextPtrPtr = &firstCachePtr;
274 while (*nextPtrPtr != cachePtr) {
275 nextPtrPtr = &(*nextPtrPtr)->nextPtr;
277 *nextPtrPtr = cachePtr->nextPtr;
278 cachePtr->nextPtr = NULL;
279 Tcl_MutexUnlock(listLockPtr);
285 *----------------------------------------------------------------------
292 * Pointer to memory just beyond Block pointer.
295 * May allocate more blocks for a bucket.
297 *----------------------------------------------------------------------
301 TclpAlloc(unsigned int reqsize)
303 Cache *cachePtr = TclpGetAllocCache();
308 if (cachePtr == NULL) {
309 cachePtr = GetCache();
313 * Increment the requested size to include room for
314 * the Block structure. Call malloc() directly if the
315 * required amount is greater than the largest block,
316 * otherwise pop the smallest block large enough,
317 * allocating more blocks if necessary.
321 size = reqsize + sizeof(Block);
325 if (size > MAXALLOC) {
327 blockPtr = malloc(size);
328 if (blockPtr != NULL) {
329 cachePtr->nsysalloc += reqsize;
333 while (binfo[bucket].blocksize < size) {
336 if (cachePtr->buckets[bucket].nfree || GetBlocks(cachePtr, bucket)) {
337 blockPtr = cachePtr->buckets[bucket].firstPtr;
338 cachePtr->buckets[bucket].firstPtr = blockPtr->b_next;
339 --cachePtr->buckets[bucket].nfree;
340 ++cachePtr->buckets[bucket].nget;
341 cachePtr->buckets[bucket].nrequest += reqsize;
344 if (blockPtr == NULL) {
347 return Block2Ptr(blockPtr, bucket, reqsize);
352 *----------------------------------------------------------------------
356 * Return blocks to the thread block cache.
362 * May move blocks to shared cache.
364 *----------------------------------------------------------------------
371 Cache *cachePtr = TclpGetAllocCache();
375 if (cachePtr == NULL) {
376 cachePtr = GetCache();
380 * Get the block back from the user pointer and
381 * call system free directly for large blocks.
382 * Otherwise, push the block back on the bucket and
383 * move blocks to the shared cache if there are now
387 blockPtr = Ptr2Block(ptr);
388 bucket = blockPtr->b_bucket;
389 if (bucket == NBUCKETS) {
390 cachePtr->nsysalloc -= blockPtr->b_reqsize;
393 cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize;
394 blockPtr->b_next = cachePtr->buckets[bucket].firstPtr;
395 cachePtr->buckets[bucket].firstPtr = blockPtr;
396 ++cachePtr->buckets[bucket].nfree;
397 ++cachePtr->buckets[bucket].nput;
398 if (cachePtr != sharedPtr &&
399 cachePtr->buckets[bucket].nfree > binfo[bucket].maxblocks) {
400 PutBlocks(cachePtr, bucket, binfo[bucket].nmove);
408 *----------------------------------------------------------------------
412 * Re-allocate memory to a larger or smaller size.
415 * Pointer to memory just beyond Block pointer.
418 * Previous memory, if any, may be freed.
420 *----------------------------------------------------------------------
424 TclpRealloc(char *ptr, unsigned int reqsize)
426 Cache *cachePtr = TclpGetAllocCache();
433 return TclpAlloc(reqsize);
436 if (cachePtr == NULL) {
437 cachePtr = GetCache();
441 * If the block is not a system block and fits in place,
442 * simply return the existing pointer. Otherwise, if the block
443 * is a system block and the new size would also require a system
444 * block, call realloc() directly.
447 blockPtr = Ptr2Block(ptr);
448 size = reqsize + sizeof(Block);
452 bucket = blockPtr->b_bucket;
453 if (bucket != NBUCKETS) {
455 min = binfo[bucket-1].blocksize;
459 if (size > min && size <= binfo[bucket].blocksize) {
460 cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize;
461 cachePtr->buckets[bucket].nrequest += reqsize;
462 return Block2Ptr(blockPtr, bucket, reqsize);
464 } else if (size > MAXALLOC) {
465 cachePtr->nsysalloc -= blockPtr->b_reqsize;
466 cachePtr->nsysalloc += reqsize;
467 blockPtr = realloc(blockPtr, size);
468 if (blockPtr == NULL) {
471 return Block2Ptr(blockPtr, NBUCKETS, reqsize);
475 * Finally, perform an expensive malloc/copy/free.
478 new = TclpAlloc(reqsize);
480 if (reqsize > blockPtr->b_reqsize) {
481 reqsize = blockPtr->b_reqsize;
483 memcpy(new, ptr, reqsize);
491 *----------------------------------------------------------------------
493 * TclThreadAllocObj --
495 * Allocate a Tcl_Obj from the per-thread cache.
498 * Pointer to uninitialized Tcl_Obj.
501 * May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's
504 *----------------------------------------------------------------------
508 TclThreadAllocObj(void)
510 register Cache *cachePtr = TclpGetAllocCache();
512 register Tcl_Obj *objPtr;
515 if (cachePtr == NULL) {
516 cachePtr = GetCache();
520 * Get this thread's obj list structure and move
521 * or allocate new objs if necessary.
524 if (cachePtr->nobjs == 0) {
525 Tcl_MutexLock(objLockPtr);
526 nmove = sharedPtr->nobjs;
528 if (nmove > NOBJALLOC) {
531 MoveObjs(sharedPtr, cachePtr, nmove);
533 Tcl_MutexUnlock(objLockPtr);
534 if (cachePtr->nobjs == 0) {
535 cachePtr->nobjs = nmove = NOBJALLOC;
536 newObjsPtr = malloc(sizeof(Tcl_Obj) * nmove);
537 if (newObjsPtr == NULL) {
538 panic("alloc: could not allocate %d new objects", nmove);
540 while (--nmove >= 0) {
541 objPtr = &newObjsPtr[nmove];
542 objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
543 cachePtr->firstObjPtr = objPtr;
549 * Pop the first object.
552 objPtr = cachePtr->firstObjPtr;
553 cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
560 *----------------------------------------------------------------------
562 * TclThreadFreeObj --
564 * Return a free Tcl_Obj to the per-thread cache.
570 * May move free Tcl_Obj's to shared list upon hitting high
573 *----------------------------------------------------------------------
577 TclThreadFreeObj(Tcl_Obj *objPtr)
579 Cache *cachePtr = TclpGetAllocCache();
581 if (cachePtr == NULL) {
582 cachePtr = GetCache();
586 * Get this thread's list and push on the free Tcl_Obj.
589 objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
590 cachePtr->firstObjPtr = objPtr;
594 * If the number of free objects has exceeded the high
595 * water mark, move some blocks to the shared list.
598 if (cachePtr->nobjs > NOBJHIGH) {
599 Tcl_MutexLock(objLockPtr);
600 MoveObjs(cachePtr, sharedPtr, NOBJALLOC);
601 Tcl_MutexUnlock(objLockPtr);
607 *----------------------------------------------------------------------
609 * Tcl_GetMemoryInfo --
611 * Return a list-of-lists of memory stats.
617 * List appended to given dstring.
619 *----------------------------------------------------------------------
623 Tcl_GetMemoryInfo(Tcl_DString *dsPtr)
629 Tcl_MutexLock(listLockPtr);
630 cachePtr = firstCachePtr;
631 while (cachePtr != NULL) {
632 Tcl_DStringStartSublist(dsPtr);
633 if (cachePtr == sharedPtr) {
634 Tcl_DStringAppendElement(dsPtr, "shared");
636 sprintf(buf, "thread%d", (int) cachePtr->owner);
637 Tcl_DStringAppendElement(dsPtr, buf);
639 for (n = 0; n < NBUCKETS; ++n) {
640 sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld",
641 (unsigned long) binfo[n].blocksize,
642 cachePtr->buckets[n].nfree,
643 cachePtr->buckets[n].nget,
644 cachePtr->buckets[n].nput,
645 cachePtr->buckets[n].nrequest,
646 cachePtr->buckets[n].nlock,
647 cachePtr->buckets[n].nwait);
648 Tcl_DStringAppendElement(dsPtr, buf);
650 Tcl_DStringEndSublist(dsPtr);
651 cachePtr = cachePtr->nextPtr;
653 Tcl_MutexUnlock(listLockPtr);
658 *----------------------------------------------------------------------
662 * Move Tcl_Obj's between caches.
670 *----------------------------------------------------------------------
674 MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove)
676 register Tcl_Obj *objPtr = fromPtr->firstObjPtr;
677 Tcl_Obj *fromFirstObjPtr = objPtr;
679 toPtr->nobjs += nmove;
680 fromPtr->nobjs -= nmove;
683 * Find the last object to be moved; set the next one
684 * (the first one not to be moved) as the first object
685 * in the 'from' cache.
689 objPtr = objPtr->internalRep.otherValuePtr;
691 fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
694 * Move all objects as a block - they are already linked to
695 * each other, we just have to update the first and last.
698 objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr;
699 toPtr->firstObjPtr = fromFirstObjPtr;
704 *----------------------------------------------------------------------
706 * Block2Ptr, Ptr2Block --
708 * Convert between internal blocks and user pointers.
711 * User pointer or internal block.
714 * Invalid blocks will abort the server.
716 *----------------------------------------------------------------------
720 Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize)
724 blockPtr->b_magic1 = blockPtr->b_magic2 = MAGIC;
725 blockPtr->b_bucket = bucket;
726 blockPtr->b_reqsize = reqsize;
727 ptr = ((void *) (blockPtr + 1));
729 ((unsigned char *)(ptr))[reqsize] = MAGIC;
737 register Block *blockPtr;
739 blockPtr = (((Block *) ptr) - 1);
740 if (blockPtr->b_magic1 != MAGIC
742 || ((unsigned char *) ptr)[blockPtr->b_reqsize] != MAGIC
744 || blockPtr->b_magic2 != MAGIC) {
745 panic("alloc: invalid block: %p: %x %x %x\n",
746 blockPtr, blockPtr->b_magic1, blockPtr->b_magic2,
747 ((unsigned char *) ptr)[blockPtr->b_reqsize]);
754 *----------------------------------------------------------------------
756 * LockBucket, UnlockBucket --
758 * Set/unset the lock to access a bucket in the shared cache.
764 * Lock activity and contention are monitored globally and on
767 *----------------------------------------------------------------------
771 LockBucket(Cache *cachePtr, int bucket)
774 if (Tcl_MutexTryLock(binfo[bucket].lockPtr) != TCL_OK) {
775 Tcl_MutexLock(binfo[bucket].lockPtr);
776 ++cachePtr->buckets[bucket].nwait;
777 ++sharedPtr->buckets[bucket].nwait;
780 Tcl_MutexLock(binfo[bucket].lockPtr);
782 ++cachePtr->buckets[bucket].nlock;
783 ++sharedPtr->buckets[bucket].nlock;
788 UnlockBucket(Cache *cachePtr, int bucket)
790 Tcl_MutexUnlock(binfo[bucket].lockPtr);
795 *----------------------------------------------------------------------
799 * Return unused blocks to the shared cache.
807 *----------------------------------------------------------------------
811 PutBlocks(Cache *cachePtr, int bucket, int nmove)
813 register Block *lastPtr, *firstPtr;
814 register int n = nmove;
817 * Before acquiring the lock, walk the block list to find
818 * the last block to be moved.
821 firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr;
823 lastPtr = lastPtr->b_next;
825 cachePtr->buckets[bucket].firstPtr = lastPtr->b_next;
826 cachePtr->buckets[bucket].nfree -= nmove;
829 * Aquire the lock and place the list of blocks at the front
830 * of the shared cache bucket.
833 LockBucket(cachePtr, bucket);
834 lastPtr->b_next = sharedPtr->buckets[bucket].firstPtr;
835 sharedPtr->buckets[bucket].firstPtr = firstPtr;
836 sharedPtr->buckets[bucket].nfree += nmove;
837 UnlockBucket(cachePtr, bucket);
842 *----------------------------------------------------------------------
846 * Get more blocks for a bucket.
849 * 1 if blocks where allocated, 0 otherwise.
852 * Cache may be filled with available blocks.
854 *----------------------------------------------------------------------
858 GetBlocks(Cache *cachePtr, int bucket)
860 register Block *blockPtr;
862 register size_t size;
865 * First, atttempt to move blocks from the shared cache. Note
866 * the potentially dirty read of nfree before acquiring the lock
867 * which is a slight performance enhancement. The value is
868 * verified after the lock is actually acquired.
871 if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].nfree > 0) {
872 LockBucket(cachePtr, bucket);
873 if (sharedPtr->buckets[bucket].nfree > 0) {
876 * Either move the entire list or walk the list to find
877 * the last block to move.
880 n = binfo[bucket].nmove;
881 if (n >= sharedPtr->buckets[bucket].nfree) {
882 cachePtr->buckets[bucket].firstPtr =
883 sharedPtr->buckets[bucket].firstPtr;
884 cachePtr->buckets[bucket].nfree =
885 sharedPtr->buckets[bucket].nfree;
886 sharedPtr->buckets[bucket].firstPtr = NULL;
887 sharedPtr->buckets[bucket].nfree = 0;
889 blockPtr = sharedPtr->buckets[bucket].firstPtr;
890 cachePtr->buckets[bucket].firstPtr = blockPtr;
891 sharedPtr->buckets[bucket].nfree -= n;
892 cachePtr->buckets[bucket].nfree = n;
894 blockPtr = blockPtr->b_next;
896 sharedPtr->buckets[bucket].firstPtr = blockPtr->b_next;
897 blockPtr->b_next = NULL;
900 UnlockBucket(cachePtr, bucket);
903 if (cachePtr->buckets[bucket].nfree == 0) {
906 * If no blocks could be moved from shared, first look for a
907 * larger block in this cache to split up.
913 while (--n > bucket) {
914 if (cachePtr->buckets[n].nfree > 0) {
915 size = binfo[n].blocksize;
916 blockPtr = cachePtr->buckets[n].firstPtr;
917 cachePtr->buckets[n].firstPtr = blockPtr->b_next;
918 --cachePtr->buckets[n].nfree;
924 * Otherwise, allocate a big new block directly.
927 if (blockPtr == NULL) {
929 blockPtr = malloc(size);
930 if (blockPtr == NULL) {
936 * Split the larger block into smaller blocks for this bucket.
939 n = size / binfo[bucket].blocksize;
940 cachePtr->buckets[bucket].nfree = n;
941 cachePtr->buckets[bucket].firstPtr = blockPtr;
943 blockPtr->b_next = (Block *)
944 ((char *) blockPtr + binfo[bucket].blocksize);
945 blockPtr = blockPtr->b_next;
947 blockPtr->b_next = NULL;
953 *----------------------------------------------------------------------
955 * TclFinalizeThreadAlloc --
957 * This procedure is used to destroy all private resources used in
966 *----------------------------------------------------------------------
970 TclFinalizeThreadAlloc()
973 for (i = 0; i < NBUCKETS; ++i) {
974 TclpFreeAllocMutex(binfo[i].lockPtr);
975 binfo[i].lockPtr = NULL;
978 TclpFreeAllocMutex(objLockPtr);
981 TclpFreeAllocMutex(listLockPtr);
984 TclpFreeAllocCache(NULL);
987 #else /* ! defined(TCL_THREADS) && ! defined(USE_THREAD_ALLOC) */
990 *----------------------------------------------------------------------
992 * TclFinalizeThreadAlloc --
994 * This procedure is used to destroy all private resources used in
1003 *----------------------------------------------------------------------
1007 TclFinalizeThreadAlloc()
1009 Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use.");
1012 #endif /* TCL_THREADS */