os/persistentdata/persistentstorage/sqlite3api/TEST/TCL/tcldistribution/generic/tclThreadAlloc.c
1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/persistentdata/persistentstorage/sqlite3api/TEST/TCL/tcldistribution/generic/tclThreadAlloc.c Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,1012 @@
1.4 +/*
1.5 + * tclThreadAlloc.c --
1.6 + *
1.7 + * This is a very fast storage allocator for used with threads (designed
1.8 + * avoid lock contention). The basic strategy is to allocate memory in
1.9 + * fixed size blocks from block caches.
1.10 + *
1.11 + * The Initial Developer of the Original Code is America Online, Inc.
1.12 + * Portions created by AOL are Copyright (C) 1999 America Online, Inc.
1.13 + *
1.14 + * See the file "license.terms" for information on usage and redistribution
1.15 + * of this file, and for a DISCLAIMER OF ALL WARRANTIES.
1.16 + *
1.17 + * RCS: @(#) $Id: tclThreadAlloc.c,v 1.4.2.7 2005/12/20 22:16:34 dkf Exp $
1.18 + */
1.19 +
1.20 +#include "tclInt.h"
1.21 +
1.22 +#if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC) && !defined(TCL_MEM_DEBUG)
1.23 +
1.24 +#ifdef WIN32
1.25 +#include "tclWinInt.h"
1.26 +#else
1.27 +extern Tcl_Mutex *TclpNewAllocMutex(void);
1.28 +extern void *TclpGetAllocCache(void);
1.29 +extern void TclpSetAllocCache(void *);
1.30 +#endif
1.31 +
1.32 +/*
1.33 + * If range checking is enabled, an additional byte will be allocated
1.34 + * to store the magic number at the end of the requested memory.
1.35 + */
1.36 +
1.37 +#ifndef RCHECK
1.38 +#ifdef NDEBUG
1.39 +#define RCHECK 0
1.40 +#else
1.41 +#define RCHECK 1
1.42 +#endif
1.43 +#endif
1.44 +
1.45 +/*
1.46 + * The following define the number of Tcl_Obj's to allocate/move
1.47 + * at a time and the high water mark to prune a per-thread cache.
1.48 + * On a 32 bit system, sizeof(Tcl_Obj) = 24 so 800 * 24 = ~16k.
1.49 + *
1.50 + */
1.51 +
1.52 +#define NOBJALLOC 800
1.53 +#define NOBJHIGH 1200
1.54 +
1.55 +/*
1.56 + * The following defines the number of buckets in the bucket
1.57 + * cache and those block sizes from (1<<4) to (1<<(3+NBUCKETS))
1.58 + */
1.59 +
1.60 +#define NBUCKETS 11
1.61 +#define MAXALLOC 16284
1.62 +
1.63 +/*
1.64 + * The following union stores accounting information for
1.65 + * each block including two small magic numbers and
1.66 + * a bucket number when in use or a next pointer when
1.67 + * free. The original requested size (not including
1.68 + * the Block overhead) is also maintained.
1.69 + */
1.70 +
1.71 +typedef struct Block {
1.72 + union {
1.73 + struct Block *next; /* Next in free list. */
1.74 + struct {
1.75 + unsigned char magic1; /* First magic number. */
1.76 + unsigned char bucket; /* Bucket block allocated from. */
1.77 + unsigned char unused; /* Padding. */
1.78 + unsigned char magic2; /* Second magic number. */
1.79 + } b_s;
1.80 + } b_u;
1.81 + size_t b_reqsize; /* Requested allocation size. */
1.82 +} Block;
1.83 +#define b_next b_u.next
1.84 +#define b_bucket b_u.b_s.bucket
1.85 +#define b_magic1 b_u.b_s.magic1
1.86 +#define b_magic2 b_u.b_s.magic2
1.87 +#define MAGIC 0xef
1.88 +
1.89 +/*
1.90 + * The following structure defines a bucket of blocks with
1.91 + * various accounting and statistics information.
1.92 + */
1.93 +
1.94 +typedef struct Bucket {
1.95 + Block *firstPtr;
1.96 + long nfree;
1.97 + long nget;
1.98 + long nput;
1.99 + long nwait;
1.100 + long nlock;
1.101 + long nrequest;
1.102 +} Bucket;
1.103 +
1.104 +/*
1.105 + * The following structure defines a cache of buckets and objs.
1.106 + */
1.107 +
1.108 +typedef struct Cache {
1.109 + struct Cache *nextPtr;
1.110 + Tcl_ThreadId owner;
1.111 + Tcl_Obj *firstObjPtr;
1.112 + int nobjs;
1.113 + int nsysalloc;
1.114 + Bucket buckets[NBUCKETS];
1.115 +} Cache;
1.116 +
1.117 +/*
1.118 + * The following array specifies various per-bucket
1.119 + * limits and locks. The values are statically initialized
1.120 + * to avoid calculating them repeatedly.
1.121 + */
1.122 +
1.123 +struct binfo {
1.124 + size_t blocksize; /* Bucket blocksize. */
1.125 + int maxblocks; /* Max blocks before move to share. */
1.126 + int nmove; /* Num blocks to move to share. */
1.127 + Tcl_Mutex *lockPtr; /* Share bucket lock. */
1.128 +} binfo[NBUCKETS] = {
1.129 + { 16, 1024, 512, NULL},
1.130 + { 32, 512, 256, NULL},
1.131 + { 64, 256, 128, NULL},
1.132 + { 128, 128, 64, NULL},
1.133 + { 256, 64, 32, NULL},
1.134 + { 512, 32, 16, NULL},
1.135 + { 1024, 16, 8, NULL},
1.136 + { 2048, 8, 4, NULL},
1.137 + { 4096, 4, 2, NULL},
1.138 + { 8192, 2, 1, NULL},
1.139 + {16284, 1, 1, NULL},
1.140 +};
1.141 +
1.142 +/*
1.143 + * Static functions defined in this file.
1.144 + */
1.145 +
1.146 +static void LockBucket(Cache *cachePtr, int bucket);
1.147 +static void UnlockBucket(Cache *cachePtr, int bucket);
1.148 +static void PutBlocks(Cache *cachePtr, int bucket, int nmove);
1.149 +static int GetBlocks(Cache *cachePtr, int bucket);
1.150 +static Block *Ptr2Block(char *ptr);
1.151 +static char *Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize);
1.152 +static void MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove);
1.153 +
1.154 +/*
1.155 + * Local variables defined in this file and initialized at
1.156 + * startup.
1.157 + */
1.158 +
1.159 +static Tcl_Mutex *listLockPtr;
1.160 +static Tcl_Mutex *objLockPtr;
1.161 +static Cache sharedCache;
1.162 +static Cache *sharedPtr = &sharedCache;
1.163 +static Cache *firstCachePtr = &sharedCache;
1.164 +
1.165 +
1.166 +/*
1.167 + *----------------------------------------------------------------------
1.168 + *
1.169 + * GetCache ---
1.170 + *
1.171 + * Gets per-thread memory cache, allocating it if necessary.
1.172 + *
1.173 + * Results:
1.174 + * Pointer to cache.
1.175 + *
1.176 + * Side effects:
1.177 + * None.
1.178 + *
1.179 + *----------------------------------------------------------------------
1.180 + */
1.181 +
1.182 +static Cache *
1.183 +GetCache(void)
1.184 +{
1.185 + Cache *cachePtr;
1.186 +
1.187 + /*
1.188 + * Check for first-time initialization.
1.189 + */
1.190 +
1.191 + if (listLockPtr == NULL) {
1.192 + Tcl_Mutex *initLockPtr;
1.193 + int i;
1.194 +
1.195 + initLockPtr = Tcl_GetAllocMutex();
1.196 + Tcl_MutexLock(initLockPtr);
1.197 + if (listLockPtr == NULL) {
1.198 + listLockPtr = TclpNewAllocMutex();
1.199 + objLockPtr = TclpNewAllocMutex();
1.200 + for (i = 0; i < NBUCKETS; ++i) {
1.201 + binfo[i].lockPtr = TclpNewAllocMutex();
1.202 + }
1.203 + }
1.204 + Tcl_MutexUnlock(initLockPtr);
1.205 + }
1.206 +
1.207 + /*
1.208 + * Get this thread's cache, allocating if necessary.
1.209 + */
1.210 +
1.211 + cachePtr = TclpGetAllocCache();
1.212 + if (cachePtr == NULL) {
1.213 + cachePtr = calloc(1, sizeof(Cache));
1.214 + if (cachePtr == NULL) {
1.215 + panic("alloc: could not allocate new cache");
1.216 + }
1.217 + Tcl_MutexLock(listLockPtr);
1.218 + cachePtr->nextPtr = firstCachePtr;
1.219 + firstCachePtr = cachePtr;
1.220 + Tcl_MutexUnlock(listLockPtr);
1.221 + cachePtr->owner = Tcl_GetCurrentThread();
1.222 + TclpSetAllocCache(cachePtr);
1.223 + }
1.224 + return cachePtr;
1.225 +}
1.226 +
1.227 +
1.228 +/*
1.229 + *----------------------------------------------------------------------
1.230 + *
1.231 + * TclFreeAllocCache --
1.232 + *
1.233 + * Flush and delete a cache, removing from list of caches.
1.234 + *
1.235 + * Results:
1.236 + * None.
1.237 + *
1.238 + * Side effects:
1.239 + * None.
1.240 + *
1.241 + *----------------------------------------------------------------------
1.242 + */
1.243 +
1.244 +void
1.245 +TclFreeAllocCache(void *arg)
1.246 +{
1.247 + Cache *cachePtr = arg;
1.248 + Cache **nextPtrPtr;
1.249 + register int bucket;
1.250 +
1.251 + /*
1.252 + * Flush blocks.
1.253 + */
1.254 +
1.255 + for (bucket = 0; bucket < NBUCKETS; ++bucket) {
1.256 + if (cachePtr->buckets[bucket].nfree > 0) {
1.257 + PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].nfree);
1.258 + }
1.259 + }
1.260 +
1.261 + /*
1.262 + * Flush objs.
1.263 + */
1.264 +
1.265 + if (cachePtr->nobjs > 0) {
1.266 + Tcl_MutexLock(objLockPtr);
1.267 + MoveObjs(cachePtr, sharedPtr, cachePtr->nobjs);
1.268 + Tcl_MutexUnlock(objLockPtr);
1.269 + }
1.270 +
1.271 + /*
1.272 + * Remove from pool list.
1.273 + */
1.274 +
1.275 + Tcl_MutexLock(listLockPtr);
1.276 + nextPtrPtr = &firstCachePtr;
1.277 + while (*nextPtrPtr != cachePtr) {
1.278 + nextPtrPtr = &(*nextPtrPtr)->nextPtr;
1.279 + }
1.280 + *nextPtrPtr = cachePtr->nextPtr;
1.281 + cachePtr->nextPtr = NULL;
1.282 + Tcl_MutexUnlock(listLockPtr);
1.283 + free(cachePtr);
1.284 +}
1.285 +
1.286 +
1.287 +/*
1.288 + *----------------------------------------------------------------------
1.289 + *
1.290 + * TclpAlloc --
1.291 + *
1.292 + * Allocate memory.
1.293 + *
1.294 + * Results:
1.295 + * Pointer to memory just beyond Block pointer.
1.296 + *
1.297 + * Side effects:
1.298 + * May allocate more blocks for a bucket.
1.299 + *
1.300 + *----------------------------------------------------------------------
1.301 + */
1.302 +
1.303 +char *
1.304 +TclpAlloc(unsigned int reqsize)
1.305 +{
1.306 + Cache *cachePtr = TclpGetAllocCache();
1.307 + Block *blockPtr;
1.308 + register int bucket;
1.309 + size_t size;
1.310 +
1.311 + if (cachePtr == NULL) {
1.312 + cachePtr = GetCache();
1.313 + }
1.314 +
1.315 + /*
1.316 + * Increment the requested size to include room for
1.317 + * the Block structure. Call malloc() directly if the
1.318 + * required amount is greater than the largest block,
1.319 + * otherwise pop the smallest block large enough,
1.320 + * allocating more blocks if necessary.
1.321 + */
1.322 +
1.323 + blockPtr = NULL;
1.324 + size = reqsize + sizeof(Block);
1.325 +#if RCHECK
1.326 + ++size;
1.327 +#endif
1.328 + if (size > MAXALLOC) {
1.329 + bucket = NBUCKETS;
1.330 + blockPtr = malloc(size);
1.331 + if (blockPtr != NULL) {
1.332 + cachePtr->nsysalloc += reqsize;
1.333 + }
1.334 + } else {
1.335 + bucket = 0;
1.336 + while (binfo[bucket].blocksize < size) {
1.337 + ++bucket;
1.338 + }
1.339 + if (cachePtr->buckets[bucket].nfree || GetBlocks(cachePtr, bucket)) {
1.340 + blockPtr = cachePtr->buckets[bucket].firstPtr;
1.341 + cachePtr->buckets[bucket].firstPtr = blockPtr->b_next;
1.342 + --cachePtr->buckets[bucket].nfree;
1.343 + ++cachePtr->buckets[bucket].nget;
1.344 + cachePtr->buckets[bucket].nrequest += reqsize;
1.345 + }
1.346 + }
1.347 + if (blockPtr == NULL) {
1.348 + return NULL;
1.349 + }
1.350 + return Block2Ptr(blockPtr, bucket, reqsize);
1.351 +}
1.352 +
1.353 +
1.354 +/*
1.355 + *----------------------------------------------------------------------
1.356 + *
1.357 + * TclpFree --
1.358 + *
1.359 + * Return blocks to the thread block cache.
1.360 + *
1.361 + * Results:
1.362 + * None.
1.363 + *
1.364 + * Side effects:
1.365 + * May move blocks to shared cache.
1.366 + *
1.367 + *----------------------------------------------------------------------
1.368 + */
1.369 +
1.370 +void
1.371 +TclpFree(char *ptr)
1.372 +{
1.373 + if (ptr != NULL) {
1.374 + Cache *cachePtr = TclpGetAllocCache();
1.375 + Block *blockPtr;
1.376 + int bucket;
1.377 +
1.378 + if (cachePtr == NULL) {
1.379 + cachePtr = GetCache();
1.380 + }
1.381 +
1.382 + /*
1.383 + * Get the block back from the user pointer and
1.384 + * call system free directly for large blocks.
1.385 + * Otherwise, push the block back on the bucket and
1.386 + * move blocks to the shared cache if there are now
1.387 + * too many free.
1.388 + */
1.389 +
1.390 + blockPtr = Ptr2Block(ptr);
1.391 + bucket = blockPtr->b_bucket;
1.392 + if (bucket == NBUCKETS) {
1.393 + cachePtr->nsysalloc -= blockPtr->b_reqsize;
1.394 + free(blockPtr);
1.395 + } else {
1.396 + cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize;
1.397 + blockPtr->b_next = cachePtr->buckets[bucket].firstPtr;
1.398 + cachePtr->buckets[bucket].firstPtr = blockPtr;
1.399 + ++cachePtr->buckets[bucket].nfree;
1.400 + ++cachePtr->buckets[bucket].nput;
1.401 + if (cachePtr != sharedPtr &&
1.402 + cachePtr->buckets[bucket].nfree > binfo[bucket].maxblocks) {
1.403 + PutBlocks(cachePtr, bucket, binfo[bucket].nmove);
1.404 + }
1.405 + }
1.406 + }
1.407 +}
1.408 +
1.409 +
1.410 +/*
1.411 + *----------------------------------------------------------------------
1.412 + *
1.413 + * TclpRealloc --
1.414 + *
1.415 + * Re-allocate memory to a larger or smaller size.
1.416 + *
1.417 + * Results:
1.418 + * Pointer to memory just beyond Block pointer.
1.419 + *
1.420 + * Side effects:
1.421 + * Previous memory, if any, may be freed.
1.422 + *
1.423 + *----------------------------------------------------------------------
1.424 + */
1.425 +
1.426 +char *
1.427 +TclpRealloc(char *ptr, unsigned int reqsize)
1.428 +{
1.429 + Cache *cachePtr = TclpGetAllocCache();
1.430 + Block *blockPtr;
1.431 + void *new;
1.432 + size_t size, min;
1.433 + int bucket;
1.434 +
1.435 + if (ptr == NULL) {
1.436 + return TclpAlloc(reqsize);
1.437 + }
1.438 +
1.439 + if (cachePtr == NULL) {
1.440 + cachePtr = GetCache();
1.441 + }
1.442 +
1.443 + /*
1.444 + * If the block is not a system block and fits in place,
1.445 + * simply return the existing pointer. Otherwise, if the block
1.446 + * is a system block and the new size would also require a system
1.447 + * block, call realloc() directly.
1.448 + */
1.449 +
1.450 + blockPtr = Ptr2Block(ptr);
1.451 + size = reqsize + sizeof(Block);
1.452 +#if RCHECK
1.453 + ++size;
1.454 +#endif
1.455 + bucket = blockPtr->b_bucket;
1.456 + if (bucket != NBUCKETS) {
1.457 + if (bucket > 0) {
1.458 + min = binfo[bucket-1].blocksize;
1.459 + } else {
1.460 + min = 0;
1.461 + }
1.462 + if (size > min && size <= binfo[bucket].blocksize) {
1.463 + cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize;
1.464 + cachePtr->buckets[bucket].nrequest += reqsize;
1.465 + return Block2Ptr(blockPtr, bucket, reqsize);
1.466 + }
1.467 + } else if (size > MAXALLOC) {
1.468 + cachePtr->nsysalloc -= blockPtr->b_reqsize;
1.469 + cachePtr->nsysalloc += reqsize;
1.470 + blockPtr = realloc(blockPtr, size);
1.471 + if (blockPtr == NULL) {
1.472 + return NULL;
1.473 + }
1.474 + return Block2Ptr(blockPtr, NBUCKETS, reqsize);
1.475 + }
1.476 +
1.477 + /*
1.478 + * Finally, perform an expensive malloc/copy/free.
1.479 + */
1.480 +
1.481 + new = TclpAlloc(reqsize);
1.482 + if (new != NULL) {
1.483 + if (reqsize > blockPtr->b_reqsize) {
1.484 + reqsize = blockPtr->b_reqsize;
1.485 + }
1.486 + memcpy(new, ptr, reqsize);
1.487 + TclpFree(ptr);
1.488 + }
1.489 + return new;
1.490 +}
1.491 +
1.492 +
1.493 +/*
1.494 + *----------------------------------------------------------------------
1.495 + *
1.496 + * TclThreadAllocObj --
1.497 + *
1.498 + * Allocate a Tcl_Obj from the per-thread cache.
1.499 + *
1.500 + * Results:
1.501 + * Pointer to uninitialized Tcl_Obj.
1.502 + *
1.503 + * Side effects:
1.504 + * May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's
1.505 + * if list is empty.
1.506 + *
1.507 + *----------------------------------------------------------------------
1.508 + */
1.509 +
1.510 +Tcl_Obj *
1.511 +TclThreadAllocObj(void)
1.512 +{
1.513 + register Cache *cachePtr = TclpGetAllocCache();
1.514 + register int nmove;
1.515 + register Tcl_Obj *objPtr;
1.516 + Tcl_Obj *newObjsPtr;
1.517 +
1.518 + if (cachePtr == NULL) {
1.519 + cachePtr = GetCache();
1.520 + }
1.521 +
1.522 + /*
1.523 + * Get this thread's obj list structure and move
1.524 + * or allocate new objs if necessary.
1.525 + */
1.526 +
1.527 + if (cachePtr->nobjs == 0) {
1.528 + Tcl_MutexLock(objLockPtr);
1.529 + nmove = sharedPtr->nobjs;
1.530 + if (nmove > 0) {
1.531 + if (nmove > NOBJALLOC) {
1.532 + nmove = NOBJALLOC;
1.533 + }
1.534 + MoveObjs(sharedPtr, cachePtr, nmove);
1.535 + }
1.536 + Tcl_MutexUnlock(objLockPtr);
1.537 + if (cachePtr->nobjs == 0) {
1.538 + cachePtr->nobjs = nmove = NOBJALLOC;
1.539 + newObjsPtr = malloc(sizeof(Tcl_Obj) * nmove);
1.540 + if (newObjsPtr == NULL) {
1.541 + panic("alloc: could not allocate %d new objects", nmove);
1.542 + }
1.543 + while (--nmove >= 0) {
1.544 + objPtr = &newObjsPtr[nmove];
1.545 + objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
1.546 + cachePtr->firstObjPtr = objPtr;
1.547 + }
1.548 + }
1.549 + }
1.550 +
1.551 + /*
1.552 + * Pop the first object.
1.553 + */
1.554 +
1.555 + objPtr = cachePtr->firstObjPtr;
1.556 + cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
1.557 + --cachePtr->nobjs;
1.558 + return objPtr;
1.559 +}
1.560 +
1.561 +
1.562 +/*
1.563 + *----------------------------------------------------------------------
1.564 + *
1.565 + * TclThreadFreeObj --
1.566 + *
1.567 + * Return a free Tcl_Obj to the per-thread cache.
1.568 + *
1.569 + * Results:
1.570 + * None.
1.571 + *
1.572 + * Side effects:
1.573 + * May move free Tcl_Obj's to shared list upon hitting high
1.574 + * water mark.
1.575 + *
1.576 + *----------------------------------------------------------------------
1.577 + */
1.578 +
1.579 +void
1.580 +TclThreadFreeObj(Tcl_Obj *objPtr)
1.581 +{
1.582 + Cache *cachePtr = TclpGetAllocCache();
1.583 +
1.584 + if (cachePtr == NULL) {
1.585 + cachePtr = GetCache();
1.586 + }
1.587 +
1.588 + /*
1.589 + * Get this thread's list and push on the free Tcl_Obj.
1.590 + */
1.591 +
1.592 + objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
1.593 + cachePtr->firstObjPtr = objPtr;
1.594 + ++cachePtr->nobjs;
1.595 +
1.596 + /*
1.597 + * If the number of free objects has exceeded the high
1.598 + * water mark, move some blocks to the shared list.
1.599 + */
1.600 +
1.601 + if (cachePtr->nobjs > NOBJHIGH) {
1.602 + Tcl_MutexLock(objLockPtr);
1.603 + MoveObjs(cachePtr, sharedPtr, NOBJALLOC);
1.604 + Tcl_MutexUnlock(objLockPtr);
1.605 + }
1.606 +}
1.607 +
1.608 +
1.609 +/*
1.610 + *----------------------------------------------------------------------
1.611 + *
1.612 + * Tcl_GetMemoryInfo --
1.613 + *
1.614 + * Return a list-of-lists of memory stats.
1.615 + *
1.616 + * Results:
1.617 + * None.
1.618 + *
1.619 + * Side effects:
1.620 + * List appended to given dstring.
1.621 + *
1.622 + *----------------------------------------------------------------------
1.623 + */
1.624 +
1.625 +void
1.626 +Tcl_GetMemoryInfo(Tcl_DString *dsPtr)
1.627 +{
1.628 + Cache *cachePtr;
1.629 + char buf[200];
1.630 + int n;
1.631 +
1.632 + Tcl_MutexLock(listLockPtr);
1.633 + cachePtr = firstCachePtr;
1.634 + while (cachePtr != NULL) {
1.635 + Tcl_DStringStartSublist(dsPtr);
1.636 + if (cachePtr == sharedPtr) {
1.637 + Tcl_DStringAppendElement(dsPtr, "shared");
1.638 + } else {
1.639 + sprintf(buf, "thread%d", (int) cachePtr->owner);
1.640 + Tcl_DStringAppendElement(dsPtr, buf);
1.641 + }
1.642 + for (n = 0; n < NBUCKETS; ++n) {
1.643 + sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld",
1.644 + (unsigned long) binfo[n].blocksize,
1.645 + cachePtr->buckets[n].nfree,
1.646 + cachePtr->buckets[n].nget,
1.647 + cachePtr->buckets[n].nput,
1.648 + cachePtr->buckets[n].nrequest,
1.649 + cachePtr->buckets[n].nlock,
1.650 + cachePtr->buckets[n].nwait);
1.651 + Tcl_DStringAppendElement(dsPtr, buf);
1.652 + }
1.653 + Tcl_DStringEndSublist(dsPtr);
1.654 + cachePtr = cachePtr->nextPtr;
1.655 + }
1.656 + Tcl_MutexUnlock(listLockPtr);
1.657 +}
1.658 +
1.659 +
1.660 +/*
1.661 + *----------------------------------------------------------------------
1.662 + *
1.663 + * MoveObjs --
1.664 + *
1.665 + * Move Tcl_Obj's between caches.
1.666 + *
1.667 + * Results:
1.668 + * None.
1.669 + *
1.670 + * Side effects:
1.671 + * None.
1.672 + *
1.673 + *----------------------------------------------------------------------
1.674 + */
1.675 +
1.676 +static void
1.677 +MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove)
1.678 +{
1.679 + register Tcl_Obj *objPtr = fromPtr->firstObjPtr;
1.680 + Tcl_Obj *fromFirstObjPtr = objPtr;
1.681 +
1.682 + toPtr->nobjs += nmove;
1.683 + fromPtr->nobjs -= nmove;
1.684 +
1.685 + /*
1.686 + * Find the last object to be moved; set the next one
1.687 + * (the first one not to be moved) as the first object
1.688 + * in the 'from' cache.
1.689 + */
1.690 +
1.691 + while (--nmove) {
1.692 + objPtr = objPtr->internalRep.otherValuePtr;
1.693 + }
1.694 + fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
1.695 +
1.696 + /*
1.697 + * Move all objects as a block - they are already linked to
1.698 + * each other, we just have to update the first and last.
1.699 + */
1.700 +
1.701 + objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr;
1.702 + toPtr->firstObjPtr = fromFirstObjPtr;
1.703 +}
1.704 +
1.705 +
1.706 +/*
1.707 + *----------------------------------------------------------------------
1.708 + *
1.709 + * Block2Ptr, Ptr2Block --
1.710 + *
1.711 + * Convert between internal blocks and user pointers.
1.712 + *
1.713 + * Results:
1.714 + * User pointer or internal block.
1.715 + *
1.716 + * Side effects:
1.717 + * Invalid blocks will abort the server.
1.718 + *
1.719 + *----------------------------------------------------------------------
1.720 + */
1.721 +
1.722 +static char *
1.723 +Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize)
1.724 +{
1.725 + register void *ptr;
1.726 +
1.727 + blockPtr->b_magic1 = blockPtr->b_magic2 = MAGIC;
1.728 + blockPtr->b_bucket = bucket;
1.729 + blockPtr->b_reqsize = reqsize;
1.730 + ptr = ((void *) (blockPtr + 1));
1.731 +#if RCHECK
1.732 + ((unsigned char *)(ptr))[reqsize] = MAGIC;
1.733 +#endif
1.734 + return (char *) ptr;
1.735 +}
1.736 +
1.737 +static Block *
1.738 +Ptr2Block(char *ptr)
1.739 +{
1.740 + register Block *blockPtr;
1.741 +
1.742 + blockPtr = (((Block *) ptr) - 1);
1.743 + if (blockPtr->b_magic1 != MAGIC
1.744 +#if RCHECK
1.745 + || ((unsigned char *) ptr)[blockPtr->b_reqsize] != MAGIC
1.746 +#endif
1.747 + || blockPtr->b_magic2 != MAGIC) {
1.748 + panic("alloc: invalid block: %p: %x %x %x\n",
1.749 + blockPtr, blockPtr->b_magic1, blockPtr->b_magic2,
1.750 + ((unsigned char *) ptr)[blockPtr->b_reqsize]);
1.751 + }
1.752 + return blockPtr;
1.753 +}
1.754 +
1.755 +
1.756 +/*
1.757 + *----------------------------------------------------------------------
1.758 + *
1.759 + * LockBucket, UnlockBucket --
1.760 + *
1.761 + * Set/unset the lock to access a bucket in the shared cache.
1.762 + *
1.763 + * Results:
1.764 + * None.
1.765 + *
1.766 + * Side effects:
1.767 + * Lock activity and contention are monitored globally and on
1.768 + * a per-cache basis.
1.769 + *
1.770 + *----------------------------------------------------------------------
1.771 + */
1.772 +
1.773 +static void
1.774 +LockBucket(Cache *cachePtr, int bucket)
1.775 +{
1.776 +#if 0
1.777 + if (Tcl_MutexTryLock(binfo[bucket].lockPtr) != TCL_OK) {
1.778 + Tcl_MutexLock(binfo[bucket].lockPtr);
1.779 + ++cachePtr->buckets[bucket].nwait;
1.780 + ++sharedPtr->buckets[bucket].nwait;
1.781 + }
1.782 +#else
1.783 + Tcl_MutexLock(binfo[bucket].lockPtr);
1.784 +#endif
1.785 + ++cachePtr->buckets[bucket].nlock;
1.786 + ++sharedPtr->buckets[bucket].nlock;
1.787 +}
1.788 +
1.789 +
1.790 +static void
1.791 +UnlockBucket(Cache *cachePtr, int bucket)
1.792 +{
1.793 + Tcl_MutexUnlock(binfo[bucket].lockPtr);
1.794 +}
1.795 +
1.796 +
1.797 +/*
1.798 + *----------------------------------------------------------------------
1.799 + *
1.800 + * PutBlocks --
1.801 + *
1.802 + * Return unused blocks to the shared cache.
1.803 + *
1.804 + * Results:
1.805 + * None.
1.806 + *
1.807 + * Side effects:
1.808 + * None.
1.809 + *
1.810 + *----------------------------------------------------------------------
1.811 + */
1.812 +
1.813 +static void
1.814 +PutBlocks(Cache *cachePtr, int bucket, int nmove)
1.815 +{
1.816 + register Block *lastPtr, *firstPtr;
1.817 + register int n = nmove;
1.818 +
1.819 + /*
1.820 + * Before acquiring the lock, walk the block list to find
1.821 + * the last block to be moved.
1.822 + */
1.823 +
1.824 + firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr;
1.825 + while (--n > 0) {
1.826 + lastPtr = lastPtr->b_next;
1.827 + }
1.828 + cachePtr->buckets[bucket].firstPtr = lastPtr->b_next;
1.829 + cachePtr->buckets[bucket].nfree -= nmove;
1.830 +
1.831 + /*
1.832 + * Aquire the lock and place the list of blocks at the front
1.833 + * of the shared cache bucket.
1.834 + */
1.835 +
1.836 + LockBucket(cachePtr, bucket);
1.837 + lastPtr->b_next = sharedPtr->buckets[bucket].firstPtr;
1.838 + sharedPtr->buckets[bucket].firstPtr = firstPtr;
1.839 + sharedPtr->buckets[bucket].nfree += nmove;
1.840 + UnlockBucket(cachePtr, bucket);
1.841 +}
1.842 +
1.843 +
1.844 +/*
1.845 + *----------------------------------------------------------------------
1.846 + *
1.847 + * GetBlocks --
1.848 + *
1.849 + * Get more blocks for a bucket.
1.850 + *
1.851 + * Results:
1.852 + * 1 if blocks where allocated, 0 otherwise.
1.853 + *
1.854 + * Side effects:
1.855 + * Cache may be filled with available blocks.
1.856 + *
1.857 + *----------------------------------------------------------------------
1.858 + */
1.859 +
1.860 +static int
1.861 +GetBlocks(Cache *cachePtr, int bucket)
1.862 +{
1.863 + register Block *blockPtr;
1.864 + register int n;
1.865 + register size_t size;
1.866 +
1.867 + /*
1.868 + * First, atttempt to move blocks from the shared cache. Note
1.869 + * the potentially dirty read of nfree before acquiring the lock
1.870 + * which is a slight performance enhancement. The value is
1.871 + * verified after the lock is actually acquired.
1.872 + */
1.873 +
1.874 + if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].nfree > 0) {
1.875 + LockBucket(cachePtr, bucket);
1.876 + if (sharedPtr->buckets[bucket].nfree > 0) {
1.877 +
1.878 + /*
1.879 + * Either move the entire list or walk the list to find
1.880 + * the last block to move.
1.881 + */
1.882 +
1.883 + n = binfo[bucket].nmove;
1.884 + if (n >= sharedPtr->buckets[bucket].nfree) {
1.885 + cachePtr->buckets[bucket].firstPtr =
1.886 + sharedPtr->buckets[bucket].firstPtr;
1.887 + cachePtr->buckets[bucket].nfree =
1.888 + sharedPtr->buckets[bucket].nfree;
1.889 + sharedPtr->buckets[bucket].firstPtr = NULL;
1.890 + sharedPtr->buckets[bucket].nfree = 0;
1.891 + } else {
1.892 + blockPtr = sharedPtr->buckets[bucket].firstPtr;
1.893 + cachePtr->buckets[bucket].firstPtr = blockPtr;
1.894 + sharedPtr->buckets[bucket].nfree -= n;
1.895 + cachePtr->buckets[bucket].nfree = n;
1.896 + while (--n > 0) {
1.897 + blockPtr = blockPtr->b_next;
1.898 + }
1.899 + sharedPtr->buckets[bucket].firstPtr = blockPtr->b_next;
1.900 + blockPtr->b_next = NULL;
1.901 + }
1.902 + }
1.903 + UnlockBucket(cachePtr, bucket);
1.904 + }
1.905 +
1.906 + if (cachePtr->buckets[bucket].nfree == 0) {
1.907 +
1.908 + /*
1.909 + * If no blocks could be moved from shared, first look for a
1.910 + * larger block in this cache to split up.
1.911 + */
1.912 +
1.913 + blockPtr = NULL;
1.914 + n = NBUCKETS;
1.915 + size = 0; /* lint */
1.916 + while (--n > bucket) {
1.917 + if (cachePtr->buckets[n].nfree > 0) {
1.918 + size = binfo[n].blocksize;
1.919 + blockPtr = cachePtr->buckets[n].firstPtr;
1.920 + cachePtr->buckets[n].firstPtr = blockPtr->b_next;
1.921 + --cachePtr->buckets[n].nfree;
1.922 + break;
1.923 + }
1.924 + }
1.925 +
1.926 + /*
1.927 + * Otherwise, allocate a big new block directly.
1.928 + */
1.929 +
1.930 + if (blockPtr == NULL) {
1.931 + size = MAXALLOC;
1.932 + blockPtr = malloc(size);
1.933 + if (blockPtr == NULL) {
1.934 + return 0;
1.935 + }
1.936 + }
1.937 +
1.938 + /*
1.939 + * Split the larger block into smaller blocks for this bucket.
1.940 + */
1.941 +
1.942 + n = size / binfo[bucket].blocksize;
1.943 + cachePtr->buckets[bucket].nfree = n;
1.944 + cachePtr->buckets[bucket].firstPtr = blockPtr;
1.945 + while (--n > 0) {
1.946 + blockPtr->b_next = (Block *)
1.947 + ((char *) blockPtr + binfo[bucket].blocksize);
1.948 + blockPtr = blockPtr->b_next;
1.949 + }
1.950 + blockPtr->b_next = NULL;
1.951 + }
1.952 + return 1;
1.953 +}
1.954 +
1.955 +/*
1.956 + *----------------------------------------------------------------------
1.957 + *
1.958 + * TclFinalizeThreadAlloc --
1.959 + *
1.960 + * This procedure is used to destroy all private resources used in
1.961 + * this file.
1.962 + *
1.963 + * Results:
1.964 + * None.
1.965 + *
1.966 + * Side effects:
1.967 + * None.
1.968 + *
1.969 + *----------------------------------------------------------------------
1.970 + */
1.971 +
1.972 +void
1.973 +TclFinalizeThreadAlloc()
1.974 +{
1.975 + int i;
1.976 + for (i = 0; i < NBUCKETS; ++i) {
1.977 + TclpFreeAllocMutex(binfo[i].lockPtr);
1.978 + binfo[i].lockPtr = NULL;
1.979 + }
1.980 +
1.981 + TclpFreeAllocMutex(objLockPtr);
1.982 + objLockPtr = NULL;
1.983 +
1.984 + TclpFreeAllocMutex(listLockPtr);
1.985 + listLockPtr = NULL;
1.986 +
1.987 + TclpFreeAllocCache(NULL);
1.988 +}
1.989 +
1.990 +#else /* ! defined(TCL_THREADS) && ! defined(USE_THREAD_ALLOC) */
1.991 +
1.992 +/*
1.993 + *----------------------------------------------------------------------
1.994 + *
1.995 + * TclFinalizeThreadAlloc --
1.996 + *
1.997 + * This procedure is used to destroy all private resources used in
1.998 + * this file.
1.999 + *
1.1000 + * Results:
1.1001 + * None.
1.1002 + *
1.1003 + * Side effects:
1.1004 + * None.
1.1005 + *
1.1006 + *----------------------------------------------------------------------
1.1007 + */
1.1008 +
1.1009 +void
1.1010 +TclFinalizeThreadAlloc()
1.1011 +{
1.1012 + Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use.");
1.1013 +}
1.1014 +
1.1015 +#endif /* TCL_THREADS */