os/persistentdata/persistentstorage/sqlite3api/TEST/TCL/tcldistribution/generic/tclThreadAlloc.c
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 /*
     2  * tclThreadAlloc.c --
     3  *
     4  *	This is a very fast storage allocator for used with threads (designed
     5  *	avoid lock contention).  The basic strategy is to allocate memory in
     6  *  	fixed size blocks from block caches.
     7  * 
     8  * The Initial Developer of the Original Code is America Online, Inc.
     9  * Portions created by AOL are Copyright (C) 1999 America Online, Inc.
    10  *
    11  * See the file "license.terms" for information on usage and redistribution
    12  * of this file, and for a DISCLAIMER OF ALL WARRANTIES.
    13  *
    14  * RCS: @(#) $Id: tclThreadAlloc.c,v 1.4.2.7 2005/12/20 22:16:34 dkf Exp $ 
    15  */
    16 
    17 #include "tclInt.h"
    18 
    19 #if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC) && !defined(TCL_MEM_DEBUG)
    20 
    21 #ifdef WIN32
    22 #include "tclWinInt.h"
    23 #else
    24 extern Tcl_Mutex *TclpNewAllocMutex(void);
    25 extern void *TclpGetAllocCache(void);
    26 extern void TclpSetAllocCache(void *);
    27 #endif
    28 
    29 /*
    30  * If range checking is enabled, an additional byte will be allocated
    31  * to store the magic number at the end of the requested memory.
    32  */
    33 
    34 #ifndef RCHECK
    35 #ifdef  NDEBUG
    36 #define RCHECK		0
    37 #else
    38 #define RCHECK		1
    39 #endif
    40 #endif
    41 
    42 /*
    43  * The following define the number of Tcl_Obj's to allocate/move
    44  * at a time and the high water mark to prune a per-thread cache.
    45  * On a 32 bit system, sizeof(Tcl_Obj) = 24 so 800 * 24 = ~16k.
    46  *
    47  */
    48  
    49 #define NOBJALLOC	 800
    50 #define NOBJHIGH	1200
    51 
    52 /*
    53  * The following defines the number of buckets in the bucket
    54  * cache and those block sizes from (1<<4) to (1<<(3+NBUCKETS))
    55  */
    56 
    57 #define NBUCKETS	  11
    58 #define MAXALLOC	  16284
    59 
    60 /*
    61  * The following union stores accounting information for
    62  * each block including two small magic numbers and
    63  * a bucket number when in use or a next pointer when
    64  * free.  The original requested size (not including
    65  * the Block overhead) is also maintained.
    66  */
    67  
    68 typedef struct Block {
    69     union {
    70     	struct Block *next;	  /* Next in free list. */
    71     	struct {
    72 	    unsigned char magic1; /* First magic number. */
    73 	    unsigned char bucket; /* Bucket block allocated from. */
    74 	    unsigned char unused; /* Padding. */
    75 	    unsigned char magic2; /* Second magic number. */
    76         } b_s;
    77     } b_u;
    78     size_t b_reqsize;		  /* Requested allocation size. */
    79 } Block;
    80 #define b_next		b_u.next
    81 #define b_bucket	b_u.b_s.bucket
    82 #define b_magic1	b_u.b_s.magic1
    83 #define b_magic2	b_u.b_s.magic2
    84 #define MAGIC		0xef
    85 
    86 /*
    87  * The following structure defines a bucket of blocks with
    88  * various accounting and statistics information.
    89  */
    90 
    91 typedef struct Bucket {
    92     Block *firstPtr;
    93     long nfree;
    94     long nget;
    95     long nput;
    96     long nwait;
    97     long nlock;
    98     long nrequest;
    99 } Bucket;
   100 
   101 /*
   102  * The following structure defines a cache of buckets and objs.
   103  */
   104 
   105 typedef struct Cache {
   106     struct Cache  *nextPtr;
   107     Tcl_ThreadId   owner;
   108     Tcl_Obj       *firstObjPtr;
   109     int            nobjs;
   110     int	           nsysalloc;
   111     Bucket         buckets[NBUCKETS];
   112 } Cache;
   113 
   114 /*
   115  * The following array specifies various per-bucket 
   116  * limits and locks.  The values are statically initialized
   117  * to avoid calculating them repeatedly.
   118  */
   119 
   120 struct binfo {
   121     size_t blocksize;	/* Bucket blocksize. */
   122     int maxblocks;	/* Max blocks before move to share. */
   123     int nmove;		/* Num blocks to move to share. */
   124     Tcl_Mutex *lockPtr; /* Share bucket lock. */
   125 } binfo[NBUCKETS] = {
   126     {   16, 1024, 512, NULL},
   127     {   32,  512, 256, NULL},
   128     {   64,  256, 128, NULL},
   129     {  128,  128,  64, NULL},
   130     {  256,   64,  32, NULL},
   131     {  512,   32,  16, NULL},
   132     { 1024,   16,   8, NULL},
   133     { 2048,    8,   4, NULL},
   134     { 4096,    4,   2, NULL},
   135     { 8192,    2,   1, NULL},
   136     {16284,    1,   1, NULL},
   137 };
   138 
   139 /*
   140  * Static functions defined in this file.
   141  */
   142 
   143 static void LockBucket(Cache *cachePtr, int bucket);
   144 static void UnlockBucket(Cache *cachePtr, int bucket);
   145 static void PutBlocks(Cache *cachePtr, int bucket, int nmove);
   146 static int  GetBlocks(Cache *cachePtr, int bucket);
   147 static Block *Ptr2Block(char *ptr);
   148 static char *Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize);
   149 static void MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove);
   150 
   151 /*
   152  * Local variables defined in this file and initialized at
   153  * startup.
   154  */
   155 
   156 static Tcl_Mutex *listLockPtr;
   157 static Tcl_Mutex *objLockPtr;
   158 static Cache     sharedCache;
   159 static Cache    *sharedPtr = &sharedCache;
   160 static Cache    *firstCachePtr = &sharedCache;
   161 
   162 
   163 /*
   164  *----------------------------------------------------------------------
   165  *
   166  *  GetCache ---
   167  *
   168  *	Gets per-thread memory cache, allocating it if necessary.
   169  *
   170  * Results:
   171  *	Pointer to cache.
   172  *
   173  * Side effects:
   174  *  	None.
   175  *
   176  *----------------------------------------------------------------------
   177  */
   178 
   179 static Cache *
   180 GetCache(void)
   181 {
   182     Cache *cachePtr;
   183 
   184     /*
   185      * Check for first-time initialization.
   186      */
   187 
   188     if (listLockPtr == NULL) {
   189 	Tcl_Mutex *initLockPtr;
   190     	int i;
   191 
   192 	initLockPtr = Tcl_GetAllocMutex();
   193 	Tcl_MutexLock(initLockPtr);
   194 	if (listLockPtr == NULL) {
   195 	    listLockPtr = TclpNewAllocMutex();
   196 	    objLockPtr = TclpNewAllocMutex();
   197 	    for (i = 0; i < NBUCKETS; ++i) {
   198 	        binfo[i].lockPtr = TclpNewAllocMutex();
   199 	    }
   200 	}
   201 	Tcl_MutexUnlock(initLockPtr);
   202     }
   203 
   204     /*
   205      * Get this thread's cache, allocating if necessary.
   206      */
   207 
   208     cachePtr = TclpGetAllocCache();
   209     if (cachePtr == NULL) {
   210     	cachePtr = calloc(1, sizeof(Cache));
   211     	if (cachePtr == NULL) {
   212 	    panic("alloc: could not allocate new cache");
   213     	}
   214     	Tcl_MutexLock(listLockPtr);
   215     	cachePtr->nextPtr = firstCachePtr;
   216     	firstCachePtr = cachePtr;
   217     	Tcl_MutexUnlock(listLockPtr);
   218     	cachePtr->owner = Tcl_GetCurrentThread();
   219 	TclpSetAllocCache(cachePtr);
   220     }
   221     return cachePtr;
   222 }
   223 
   224 
   225 /*
   226  *----------------------------------------------------------------------
   227  *
   228  *  TclFreeAllocCache --
   229  *
   230  *	Flush and delete a cache, removing from list of caches.
   231  *
   232  * Results:
   233  *	None.
   234  *
   235  * Side effects:
   236  *	None.
   237  *
   238  *----------------------------------------------------------------------
   239  */
   240 
   241 void
   242 TclFreeAllocCache(void *arg)
   243 {
   244     Cache *cachePtr = arg;
   245     Cache **nextPtrPtr;
   246     register int   bucket;
   247 
   248     /*
   249      * Flush blocks.
   250      */
   251 
   252     for (bucket = 0; bucket < NBUCKETS; ++bucket) {
   253 	if (cachePtr->buckets[bucket].nfree > 0) {
   254 	    PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].nfree);
   255 	}
   256     }
   257 
   258     /*
   259      * Flush objs.
   260      */
   261 
   262     if (cachePtr->nobjs > 0) {
   263     	Tcl_MutexLock(objLockPtr);
   264     	MoveObjs(cachePtr, sharedPtr, cachePtr->nobjs);
   265     	Tcl_MutexUnlock(objLockPtr);
   266     }
   267 
   268     /*
   269      * Remove from pool list.
   270      */
   271 
   272     Tcl_MutexLock(listLockPtr);
   273     nextPtrPtr = &firstCachePtr;
   274     while (*nextPtrPtr != cachePtr) {
   275 	nextPtrPtr = &(*nextPtrPtr)->nextPtr;
   276     }
   277     *nextPtrPtr = cachePtr->nextPtr;
   278     cachePtr->nextPtr = NULL;
   279     Tcl_MutexUnlock(listLockPtr);
   280     free(cachePtr);
   281 }
   282 
   283 
   284 /*
   285  *----------------------------------------------------------------------
   286  *
   287  *  TclpAlloc --
   288  *
   289  *	Allocate memory.
   290  *
   291  * Results:
   292  *	Pointer to memory just beyond Block pointer.
   293  *
   294  * Side effects:
   295  *	May allocate more blocks for a bucket.
   296  *
   297  *----------------------------------------------------------------------
   298  */
   299 
   300 char *
   301 TclpAlloc(unsigned int reqsize)
   302 {
   303     Cache         *cachePtr = TclpGetAllocCache();
   304     Block         *blockPtr;
   305     register int   bucket;
   306     size_t  	   size;
   307 
   308     if (cachePtr == NULL) {
   309 	cachePtr = GetCache();
   310     }
   311     
   312     /*
   313      * Increment the requested size to include room for 
   314      * the Block structure.  Call malloc() directly if the
   315      * required amount is greater than the largest block,
   316      * otherwise pop the smallest block large enough,
   317      * allocating more blocks if necessary.
   318      */
   319 
   320     blockPtr = NULL;     
   321     size = reqsize + sizeof(Block);
   322 #if RCHECK
   323     ++size;
   324 #endif
   325     if (size > MAXALLOC) {
   326 	bucket = NBUCKETS;
   327     	blockPtr = malloc(size);
   328 	if (blockPtr != NULL) {
   329 	    cachePtr->nsysalloc += reqsize;
   330 	}
   331     } else {
   332     	bucket = 0;
   333     	while (binfo[bucket].blocksize < size) {
   334     	    ++bucket;
   335     	}
   336     	if (cachePtr->buckets[bucket].nfree || GetBlocks(cachePtr, bucket)) {
   337 	    blockPtr = cachePtr->buckets[bucket].firstPtr;
   338 	    cachePtr->buckets[bucket].firstPtr = blockPtr->b_next;
   339 	    --cachePtr->buckets[bucket].nfree;
   340     	    ++cachePtr->buckets[bucket].nget;
   341 	    cachePtr->buckets[bucket].nrequest += reqsize;
   342 	}
   343     }
   344     if (blockPtr == NULL) {
   345     	return NULL;
   346     }
   347     return Block2Ptr(blockPtr, bucket, reqsize);
   348 }
   349 
   350 
   351 /*
   352  *----------------------------------------------------------------------
   353  *
   354  *  TclpFree --
   355  *
   356  *	Return blocks to the thread block cache.
   357  *
   358  * Results:
   359  *	None.
   360  *
   361  * Side effects:
   362  *	May move blocks to shared cache.
   363  *
   364  *----------------------------------------------------------------------
   365  */
   366 
   367 void
   368 TclpFree(char *ptr)
   369 {
   370     if (ptr != NULL) {
   371 	Cache  *cachePtr = TclpGetAllocCache();
   372 	Block *blockPtr;
   373 	int bucket;
   374 
   375 	if (cachePtr == NULL) {
   376 	    cachePtr = GetCache();
   377 	}
   378  
   379 	/*
   380 	 * Get the block back from the user pointer and
   381 	 * call system free directly for large blocks.
   382 	 * Otherwise, push the block back on the bucket and
   383 	 * move blocks to the shared cache if there are now
   384 	 * too many free.
   385 	 */
   386 
   387 	blockPtr = Ptr2Block(ptr);
   388 	bucket = blockPtr->b_bucket;
   389 	if (bucket == NBUCKETS) {
   390 	    cachePtr->nsysalloc -= blockPtr->b_reqsize;
   391 	    free(blockPtr);
   392 	} else {
   393 	    cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize;
   394 	    blockPtr->b_next = cachePtr->buckets[bucket].firstPtr;
   395 	    cachePtr->buckets[bucket].firstPtr = blockPtr;
   396 	    ++cachePtr->buckets[bucket].nfree;
   397 	    ++cachePtr->buckets[bucket].nput;
   398 	    if (cachePtr != sharedPtr &&
   399 		    cachePtr->buckets[bucket].nfree > binfo[bucket].maxblocks) {
   400 		PutBlocks(cachePtr, bucket, binfo[bucket].nmove);
   401 	    }
   402 	}
   403     }
   404 }
   405 
   406 
   407 /*
   408  *----------------------------------------------------------------------
   409  *
   410  *  TclpRealloc --
   411  *
   412  *	Re-allocate memory to a larger or smaller size.
   413  *
   414  * Results:
   415  *	Pointer to memory just beyond Block pointer.
   416  *
   417  * Side effects:
   418  *	Previous memory, if any, may be freed.
   419  *
   420  *----------------------------------------------------------------------
   421  */
   422 
   423 char *
   424 TclpRealloc(char *ptr, unsigned int reqsize)
   425 {
   426     Cache *cachePtr = TclpGetAllocCache();
   427     Block *blockPtr;
   428     void *new;
   429     size_t size, min;
   430     int bucket;
   431 
   432     if (ptr == NULL) {
   433 	return TclpAlloc(reqsize);
   434     }
   435 
   436     if (cachePtr == NULL) {
   437 	cachePtr = GetCache();
   438     }
   439 
   440     /*
   441      * If the block is not a system block and fits in place,
   442      * simply return the existing pointer.  Otherwise, if the block
   443      * is a system block and the new size would also require a system
   444      * block, call realloc() directly.
   445      */
   446 
   447     blockPtr = Ptr2Block(ptr);
   448     size = reqsize + sizeof(Block);
   449 #if RCHECK
   450     ++size;
   451 #endif
   452     bucket = blockPtr->b_bucket;
   453     if (bucket != NBUCKETS) {
   454 	if (bucket > 0) {
   455 	    min = binfo[bucket-1].blocksize;
   456 	} else {
   457 	    min = 0;
   458 	}
   459 	if (size > min && size <= binfo[bucket].blocksize) {
   460 	    cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize;
   461 	    cachePtr->buckets[bucket].nrequest += reqsize;
   462 	    return Block2Ptr(blockPtr, bucket, reqsize);
   463 	}
   464     } else if (size > MAXALLOC) {
   465 	cachePtr->nsysalloc -= blockPtr->b_reqsize;
   466 	cachePtr->nsysalloc += reqsize;
   467 	blockPtr = realloc(blockPtr, size);
   468 	if (blockPtr == NULL) {
   469 	    return NULL;
   470 	}
   471 	return Block2Ptr(blockPtr, NBUCKETS, reqsize);
   472     }
   473 
   474     /*
   475      * Finally, perform an expensive malloc/copy/free.
   476      */
   477 
   478     new = TclpAlloc(reqsize);
   479     if (new != NULL) {
   480 	if (reqsize > blockPtr->b_reqsize) {
   481 	    reqsize = blockPtr->b_reqsize;
   482 	}
   483     	memcpy(new, ptr, reqsize);
   484     	TclpFree(ptr);
   485     }
   486     return new;
   487 }
   488 
   489 
   490 /*
   491  *----------------------------------------------------------------------
   492  *
   493  * TclThreadAllocObj --
   494  *
   495  *	Allocate a Tcl_Obj from the per-thread cache.
   496  *
   497  * Results:
   498  *	Pointer to uninitialized Tcl_Obj.
   499  *
   500  * Side effects:
   501  *	May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's
   502  *  	if list is empty.
   503  *
   504  *----------------------------------------------------------------------
   505  */
   506 
   507 Tcl_Obj *
   508 TclThreadAllocObj(void)
   509 {
   510     register Cache *cachePtr = TclpGetAllocCache();
   511     register int nmove;
   512     register Tcl_Obj *objPtr;
   513     Tcl_Obj *newObjsPtr;
   514 
   515     if (cachePtr == NULL) {
   516 	cachePtr = GetCache();
   517     }
   518 
   519     /*
   520      * Get this thread's obj list structure and move
   521      * or allocate new objs if necessary.
   522      */
   523      
   524     if (cachePtr->nobjs == 0) {
   525     	Tcl_MutexLock(objLockPtr);
   526 	nmove = sharedPtr->nobjs;
   527 	if (nmove > 0) {
   528 	    if (nmove > NOBJALLOC) {
   529 		nmove = NOBJALLOC;
   530 	    }
   531 	    MoveObjs(sharedPtr, cachePtr, nmove);
   532 	}
   533     	Tcl_MutexUnlock(objLockPtr);
   534 	if (cachePtr->nobjs == 0) {
   535 	    cachePtr->nobjs = nmove = NOBJALLOC;
   536 	    newObjsPtr = malloc(sizeof(Tcl_Obj) * nmove);
   537 	    if (newObjsPtr == NULL) {
   538 		panic("alloc: could not allocate %d new objects", nmove);
   539 	    }
   540 	    while (--nmove >= 0) {
   541 		objPtr = &newObjsPtr[nmove];
   542 		objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
   543 		cachePtr->firstObjPtr = objPtr;
   544 	    }
   545 	}
   546     }
   547 
   548     /*
   549      * Pop the first object.
   550      */
   551 
   552     objPtr = cachePtr->firstObjPtr;
   553     cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
   554     --cachePtr->nobjs;
   555     return objPtr;
   556 }
   557 
   558 
   559 /*
   560  *----------------------------------------------------------------------
   561  *
   562  * TclThreadFreeObj --
   563  *
   564  *	Return a free Tcl_Obj to the per-thread cache.
   565  *
   566  * Results:
   567  *	None.
   568  *
   569  * Side effects:
   570  *	May move free Tcl_Obj's to shared list upon hitting high
   571  *  	water mark.
   572  *
   573  *----------------------------------------------------------------------
   574  */
   575 
   576 void
   577 TclThreadFreeObj(Tcl_Obj *objPtr)
   578 {
   579     Cache *cachePtr = TclpGetAllocCache();
   580 
   581     if (cachePtr == NULL) {
   582 	cachePtr = GetCache();
   583     }
   584 
   585     /*
   586      * Get this thread's list and push on the free Tcl_Obj.
   587      */
   588      
   589     objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
   590     cachePtr->firstObjPtr = objPtr;
   591     ++cachePtr->nobjs;
   592     
   593     /*
   594      * If the number of free objects has exceeded the high
   595      * water mark, move some blocks to the shared list.
   596      */
   597      
   598     if (cachePtr->nobjs > NOBJHIGH) {
   599 	Tcl_MutexLock(objLockPtr);
   600 	MoveObjs(cachePtr, sharedPtr, NOBJALLOC);
   601 	Tcl_MutexUnlock(objLockPtr);
   602     }
   603 }
   604 
   605 
   606 /*
   607  *----------------------------------------------------------------------
   608  *
   609  * Tcl_GetMemoryInfo --
   610  *
   611  *	Return a list-of-lists of memory stats.
   612  *
   613  * Results:
   614  *	None.
   615  *
   616  * Side effects:
   617  *  	List appended to given dstring.
   618  *
   619  *----------------------------------------------------------------------
   620  */
   621 
   622 void
   623 Tcl_GetMemoryInfo(Tcl_DString *dsPtr)
   624 {
   625     Cache *cachePtr;
   626     char buf[200];
   627     int n;
   628 
   629     Tcl_MutexLock(listLockPtr);
   630     cachePtr = firstCachePtr;
   631     while (cachePtr != NULL) {
   632 	Tcl_DStringStartSublist(dsPtr);
   633 	if (cachePtr == sharedPtr) {
   634     	    Tcl_DStringAppendElement(dsPtr, "shared");
   635 	} else {
   636 	    sprintf(buf, "thread%d", (int) cachePtr->owner);
   637     	    Tcl_DStringAppendElement(dsPtr, buf);
   638 	}
   639 	for (n = 0; n < NBUCKETS; ++n) {
   640     	    sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld",
   641 		(unsigned long) binfo[n].blocksize,
   642 		cachePtr->buckets[n].nfree,
   643 		cachePtr->buckets[n].nget,
   644 		cachePtr->buckets[n].nput,
   645 		cachePtr->buckets[n].nrequest,
   646 		cachePtr->buckets[n].nlock,
   647 		cachePtr->buckets[n].nwait);
   648 	    Tcl_DStringAppendElement(dsPtr, buf);
   649 	}
   650 	Tcl_DStringEndSublist(dsPtr);
   651 	    cachePtr = cachePtr->nextPtr;
   652     }
   653     Tcl_MutexUnlock(listLockPtr);
   654 }
   655 
   656 
   657 /*
   658  *----------------------------------------------------------------------
   659  *
   660  * MoveObjs --
   661  *
   662  *	Move Tcl_Obj's between caches.
   663  *
   664  * Results:
   665  *	None.
   666  *
   667  * Side effects:
   668  *  	None.
   669  *
   670  *----------------------------------------------------------------------
   671  */
   672 
   673 static void
   674 MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove)
   675 {
   676     register Tcl_Obj *objPtr = fromPtr->firstObjPtr;
   677     Tcl_Obj *fromFirstObjPtr = objPtr;
   678 
   679     toPtr->nobjs += nmove;
   680     fromPtr->nobjs -= nmove;
   681 
   682     /*
   683      * Find the last object to be moved; set the next one
   684      * (the first one not to be moved) as the first object
   685      * in the 'from' cache.
   686      */
   687 
   688     while (--nmove) {
   689 	objPtr = objPtr->internalRep.otherValuePtr;
   690     }
   691     fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr;    
   692 
   693     /*
   694      * Move all objects as a block - they are already linked to
   695      * each other, we just have to update the first and last.
   696      */
   697 
   698     objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr;
   699     toPtr->firstObjPtr = fromFirstObjPtr;
   700 }
   701 
   702 
   703 /*
   704  *----------------------------------------------------------------------
   705  *
   706  *  Block2Ptr, Ptr2Block --
   707  *
   708  *	Convert between internal blocks and user pointers.
   709  *
   710  * Results:
   711  *	User pointer or internal block.
   712  *
   713  * Side effects:
   714  *	Invalid blocks will abort the server.
   715  *
   716  *----------------------------------------------------------------------
   717  */
   718 
   719 static char *
   720 Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize) 
   721 {
   722     register void *ptr;
   723 
   724     blockPtr->b_magic1 = blockPtr->b_magic2 = MAGIC;
   725     blockPtr->b_bucket = bucket;
   726     blockPtr->b_reqsize = reqsize;
   727     ptr = ((void *) (blockPtr + 1));
   728 #if RCHECK
   729     ((unsigned char *)(ptr))[reqsize] = MAGIC;
   730 #endif
   731     return (char *) ptr;
   732 }
   733 
   734 static Block *
   735 Ptr2Block(char *ptr)
   736 {
   737     register Block *blockPtr;
   738 
   739     blockPtr = (((Block *) ptr) - 1);
   740     if (blockPtr->b_magic1 != MAGIC
   741 #if RCHECK
   742 	|| ((unsigned char *) ptr)[blockPtr->b_reqsize] != MAGIC
   743 #endif
   744 	|| blockPtr->b_magic2 != MAGIC) {
   745 	panic("alloc: invalid block: %p: %x %x %x\n",
   746 	    blockPtr, blockPtr->b_magic1, blockPtr->b_magic2,
   747 	    ((unsigned char *) ptr)[blockPtr->b_reqsize]);
   748     }
   749     return blockPtr;
   750 }
   751 
   752 
   753 /*
   754  *----------------------------------------------------------------------
   755  *
   756  *  LockBucket, UnlockBucket --
   757  *
   758  *	Set/unset the lock to access a bucket in the shared cache.
   759  *
   760  * Results:
   761  *  	None.
   762  *
   763  * Side effects:
   764  *	Lock activity and contention are monitored globally and on
   765  *  	a per-cache basis.
   766  *
   767  *----------------------------------------------------------------------
   768  */
   769 
   770 static void
   771 LockBucket(Cache *cachePtr, int bucket)
   772 {
   773 #if 0
   774     if (Tcl_MutexTryLock(binfo[bucket].lockPtr) != TCL_OK) {
   775 	Tcl_MutexLock(binfo[bucket].lockPtr);
   776     	++cachePtr->buckets[bucket].nwait;
   777     	++sharedPtr->buckets[bucket].nwait;
   778     }
   779 #else
   780     Tcl_MutexLock(binfo[bucket].lockPtr);
   781 #endif
   782     ++cachePtr->buckets[bucket].nlock;
   783     ++sharedPtr->buckets[bucket].nlock;
   784 }
   785 
   786 
   787 static void
   788 UnlockBucket(Cache *cachePtr, int bucket)
   789 {
   790     Tcl_MutexUnlock(binfo[bucket].lockPtr);
   791 }
   792 
   793 
   794 /*
   795  *----------------------------------------------------------------------
   796  *
   797  *  PutBlocks --
   798  *
   799  *	Return unused blocks to the shared cache.
   800  *
   801  * Results:
   802  *	None.
   803  *
   804  * Side effects:
   805  *	None.
   806  *
   807  *----------------------------------------------------------------------
   808  */
   809 
   810 static void
   811 PutBlocks(Cache *cachePtr, int bucket, int nmove)
   812 {
   813     register Block *lastPtr, *firstPtr;
   814     register int n = nmove;
   815 
   816     /*
   817      * Before acquiring the lock, walk the block list to find
   818      * the last block to be moved.
   819      */
   820 
   821     firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr;
   822     while (--n > 0) {
   823 	lastPtr = lastPtr->b_next;
   824     }
   825     cachePtr->buckets[bucket].firstPtr = lastPtr->b_next;
   826     cachePtr->buckets[bucket].nfree -= nmove;
   827 
   828     /*
   829      * Aquire the lock and place the list of blocks at the front
   830      * of the shared cache bucket.
   831      */
   832 
   833     LockBucket(cachePtr, bucket);
   834     lastPtr->b_next = sharedPtr->buckets[bucket].firstPtr;
   835     sharedPtr->buckets[bucket].firstPtr = firstPtr;
   836     sharedPtr->buckets[bucket].nfree += nmove;
   837     UnlockBucket(cachePtr, bucket);
   838 }
   839 
   840 
   841 /*
   842  *----------------------------------------------------------------------
   843  *
   844  *  GetBlocks --
   845  *
   846  *	Get more blocks for a bucket.
   847  *
   848  * Results:
   849  *	1 if blocks where allocated, 0 otherwise.
   850  *
   851  * Side effects:
   852  *	Cache may be filled with available blocks.
   853  *
   854  *----------------------------------------------------------------------
   855  */
   856 
   857 static int
   858 GetBlocks(Cache *cachePtr, int bucket)
   859 {
   860     register Block *blockPtr;
   861     register int n;
   862     register size_t size;
   863 
   864     /*
   865      * First, atttempt to move blocks from the shared cache.  Note
   866      * the potentially dirty read of nfree before acquiring the lock
   867      * which is a slight performance enhancement.  The value is
   868      * verified after the lock is actually acquired.
   869      */
   870      
   871     if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].nfree > 0) {
   872 	LockBucket(cachePtr, bucket);
   873 	if (sharedPtr->buckets[bucket].nfree > 0) {
   874 
   875 	    /*
   876 	     * Either move the entire list or walk the list to find
   877 	     * the last block to move.
   878 	     */
   879 
   880 	    n = binfo[bucket].nmove;
   881 	    if (n >= sharedPtr->buckets[bucket].nfree) {
   882 		cachePtr->buckets[bucket].firstPtr =
   883 		    sharedPtr->buckets[bucket].firstPtr;
   884 		cachePtr->buckets[bucket].nfree =
   885 		    sharedPtr->buckets[bucket].nfree;
   886 		sharedPtr->buckets[bucket].firstPtr = NULL;
   887 		sharedPtr->buckets[bucket].nfree = 0;
   888 	    } else {
   889 		blockPtr = sharedPtr->buckets[bucket].firstPtr;
   890 		cachePtr->buckets[bucket].firstPtr = blockPtr;
   891 		sharedPtr->buckets[bucket].nfree -= n;
   892 		cachePtr->buckets[bucket].nfree = n;
   893 		while (--n > 0) {
   894     		    blockPtr = blockPtr->b_next;
   895 		}
   896 		sharedPtr->buckets[bucket].firstPtr = blockPtr->b_next;
   897 		blockPtr->b_next = NULL;
   898 	    }
   899 	}
   900 	UnlockBucket(cachePtr, bucket);
   901     }
   902     
   903     if (cachePtr->buckets[bucket].nfree == 0) {
   904 
   905 	/*
   906 	 * If no blocks could be moved from shared, first look for a
   907 	 * larger block in this cache to split up.
   908 	 */
   909 
   910     	blockPtr = NULL;
   911 	n = NBUCKETS;
   912 	size = 0; /* lint */
   913 	while (--n > bucket) {
   914     	    if (cachePtr->buckets[n].nfree > 0) {
   915 		size = binfo[n].blocksize;
   916 		blockPtr = cachePtr->buckets[n].firstPtr;
   917 		cachePtr->buckets[n].firstPtr = blockPtr->b_next;
   918 		--cachePtr->buckets[n].nfree;
   919 		break;
   920 	    }
   921 	}
   922 
   923 	/*
   924 	 * Otherwise, allocate a big new block directly.
   925 	 */
   926 
   927 	if (blockPtr == NULL) {
   928 	    size = MAXALLOC;
   929 	    blockPtr = malloc(size);
   930 	    if (blockPtr == NULL) {
   931 		return 0;
   932 	    }
   933 	}
   934 
   935 	/*
   936 	 * Split the larger block into smaller blocks for this bucket.
   937 	 */
   938 
   939 	n = size / binfo[bucket].blocksize;
   940 	cachePtr->buckets[bucket].nfree = n;
   941 	cachePtr->buckets[bucket].firstPtr = blockPtr;
   942 	while (--n > 0) {
   943 	    blockPtr->b_next = (Block *) 
   944 		((char *) blockPtr + binfo[bucket].blocksize);
   945 	    blockPtr = blockPtr->b_next;
   946 	}
   947 	blockPtr->b_next = NULL;
   948     }
   949     return 1;
   950 }
   951 
   952 /*
   953  *----------------------------------------------------------------------
   954  *
   955  * TclFinalizeThreadAlloc --
   956  *
   957  *	This procedure is used to destroy all private resources used in
   958  *	this file.
   959  *
   960  * Results:
   961  *	None.
   962  *
   963  * Side effects:
   964  *	None.
   965  *
   966  *----------------------------------------------------------------------
   967  */
   968 
   969 void
   970 TclFinalizeThreadAlloc()
   971 {
   972     int i;
   973     for (i = 0; i < NBUCKETS; ++i) {
   974         TclpFreeAllocMutex(binfo[i].lockPtr); 
   975         binfo[i].lockPtr = NULL;
   976     }
   977 
   978     TclpFreeAllocMutex(objLockPtr);
   979     objLockPtr = NULL;
   980 
   981     TclpFreeAllocMutex(listLockPtr);
   982     listLockPtr = NULL;
   983 
   984     TclpFreeAllocCache(NULL);
   985 }
   986 
   987 #else /* ! defined(TCL_THREADS) && ! defined(USE_THREAD_ALLOC) */
   988 
   989 /*
   990  *----------------------------------------------------------------------
   991  *
   992  * TclFinalizeThreadAlloc --
   993  *
   994  *	This procedure is used to destroy all private resources used in
   995  *	this file.
   996  *
   997  * Results:
   998  *	None.
   999  *
  1000  * Side effects:
  1001  *	None.
  1002  *
  1003  *----------------------------------------------------------------------
  1004  */
  1005 
  1006 void
  1007 TclFinalizeThreadAlloc()
  1008 {
  1009     Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use.");
  1010 }
  1011 
  1012 #endif /* TCL_THREADS */