os/persistentdata/persistentstorage/sqlite3api/TEST/TCL/tcldistribution/generic/tclThreadAlloc.c
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
/*
sl@0
     2
 * tclThreadAlloc.c --
sl@0
     3
 *
sl@0
     4
 *	This is a very fast storage allocator for used with threads (designed
sl@0
     5
 *	avoid lock contention).  The basic strategy is to allocate memory in
sl@0
     6
 *  	fixed size blocks from block caches.
sl@0
     7
 * 
sl@0
     8
 * The Initial Developer of the Original Code is America Online, Inc.
sl@0
     9
 * Portions created by AOL are Copyright (C) 1999 America Online, Inc.
sl@0
    10
 *
sl@0
    11
 * See the file "license.terms" for information on usage and redistribution
sl@0
    12
 * of this file, and for a DISCLAIMER OF ALL WARRANTIES.
sl@0
    13
 *
sl@0
    14
 * RCS: @(#) $Id: tclThreadAlloc.c,v 1.4.2.7 2005/12/20 22:16:34 dkf Exp $ 
sl@0
    15
 */
sl@0
    16
sl@0
    17
#include "tclInt.h"
sl@0
    18
sl@0
    19
#if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC) && !defined(TCL_MEM_DEBUG)
sl@0
    20
sl@0
    21
#ifdef WIN32
sl@0
    22
#include "tclWinInt.h"
sl@0
    23
#else
sl@0
    24
extern Tcl_Mutex *TclpNewAllocMutex(void);
sl@0
    25
extern void *TclpGetAllocCache(void);
sl@0
    26
extern void TclpSetAllocCache(void *);
sl@0
    27
#endif
sl@0
    28
sl@0
    29
/*
sl@0
    30
 * If range checking is enabled, an additional byte will be allocated
sl@0
    31
 * to store the magic number at the end of the requested memory.
sl@0
    32
 */
sl@0
    33
sl@0
    34
#ifndef RCHECK
sl@0
    35
#ifdef  NDEBUG
sl@0
    36
#define RCHECK		0
sl@0
    37
#else
sl@0
    38
#define RCHECK		1
sl@0
    39
#endif
sl@0
    40
#endif
sl@0
    41
sl@0
    42
/*
sl@0
    43
 * The following define the number of Tcl_Obj's to allocate/move
sl@0
    44
 * at a time and the high water mark to prune a per-thread cache.
sl@0
    45
 * On a 32 bit system, sizeof(Tcl_Obj) = 24 so 800 * 24 = ~16k.
sl@0
    46
 *
sl@0
    47
 */
sl@0
    48
 
sl@0
    49
#define NOBJALLOC	 800
sl@0
    50
#define NOBJHIGH	1200
sl@0
    51
sl@0
    52
/*
sl@0
    53
 * The following defines the number of buckets in the bucket
sl@0
    54
 * cache and those block sizes from (1<<4) to (1<<(3+NBUCKETS))
sl@0
    55
 */
sl@0
    56
sl@0
    57
#define NBUCKETS	  11
sl@0
    58
#define MAXALLOC	  16284
sl@0
    59
sl@0
    60
/*
sl@0
    61
 * The following union stores accounting information for
sl@0
    62
 * each block including two small magic numbers and
sl@0
    63
 * a bucket number when in use or a next pointer when
sl@0
    64
 * free.  The original requested size (not including
sl@0
    65
 * the Block overhead) is also maintained.
sl@0
    66
 */
sl@0
    67
 
sl@0
    68
typedef struct Block {
sl@0
    69
    union {
sl@0
    70
    	struct Block *next;	  /* Next in free list. */
sl@0
    71
    	struct {
sl@0
    72
	    unsigned char magic1; /* First magic number. */
sl@0
    73
	    unsigned char bucket; /* Bucket block allocated from. */
sl@0
    74
	    unsigned char unused; /* Padding. */
sl@0
    75
	    unsigned char magic2; /* Second magic number. */
sl@0
    76
        } b_s;
sl@0
    77
    } b_u;
sl@0
    78
    size_t b_reqsize;		  /* Requested allocation size. */
sl@0
    79
} Block;
sl@0
    80
#define b_next		b_u.next
sl@0
    81
#define b_bucket	b_u.b_s.bucket
sl@0
    82
#define b_magic1	b_u.b_s.magic1
sl@0
    83
#define b_magic2	b_u.b_s.magic2
sl@0
    84
#define MAGIC		0xef
sl@0
    85
sl@0
    86
/*
sl@0
    87
 * The following structure defines a bucket of blocks with
sl@0
    88
 * various accounting and statistics information.
sl@0
    89
 */
sl@0
    90
sl@0
    91
typedef struct Bucket {
sl@0
    92
    Block *firstPtr;
sl@0
    93
    long nfree;
sl@0
    94
    long nget;
sl@0
    95
    long nput;
sl@0
    96
    long nwait;
sl@0
    97
    long nlock;
sl@0
    98
    long nrequest;
sl@0
    99
} Bucket;
sl@0
   100
sl@0
   101
/*
sl@0
   102
 * The following structure defines a cache of buckets and objs.
sl@0
   103
 */
sl@0
   104
sl@0
   105
typedef struct Cache {
sl@0
   106
    struct Cache  *nextPtr;
sl@0
   107
    Tcl_ThreadId   owner;
sl@0
   108
    Tcl_Obj       *firstObjPtr;
sl@0
   109
    int            nobjs;
sl@0
   110
    int	           nsysalloc;
sl@0
   111
    Bucket         buckets[NBUCKETS];
sl@0
   112
} Cache;
sl@0
   113
sl@0
   114
/*
sl@0
   115
 * The following array specifies various per-bucket 
sl@0
   116
 * limits and locks.  The values are statically initialized
sl@0
   117
 * to avoid calculating them repeatedly.
sl@0
   118
 */
sl@0
   119
sl@0
   120
struct binfo {
sl@0
   121
    size_t blocksize;	/* Bucket blocksize. */
sl@0
   122
    int maxblocks;	/* Max blocks before move to share. */
sl@0
   123
    int nmove;		/* Num blocks to move to share. */
sl@0
   124
    Tcl_Mutex *lockPtr; /* Share bucket lock. */
sl@0
   125
} binfo[NBUCKETS] = {
sl@0
   126
    {   16, 1024, 512, NULL},
sl@0
   127
    {   32,  512, 256, NULL},
sl@0
   128
    {   64,  256, 128, NULL},
sl@0
   129
    {  128,  128,  64, NULL},
sl@0
   130
    {  256,   64,  32, NULL},
sl@0
   131
    {  512,   32,  16, NULL},
sl@0
   132
    { 1024,   16,   8, NULL},
sl@0
   133
    { 2048,    8,   4, NULL},
sl@0
   134
    { 4096,    4,   2, NULL},
sl@0
   135
    { 8192,    2,   1, NULL},
sl@0
   136
    {16284,    1,   1, NULL},
sl@0
   137
};
sl@0
   138
sl@0
   139
/*
sl@0
   140
 * Static functions defined in this file.
sl@0
   141
 */
sl@0
   142
sl@0
   143
static void LockBucket(Cache *cachePtr, int bucket);
sl@0
   144
static void UnlockBucket(Cache *cachePtr, int bucket);
sl@0
   145
static void PutBlocks(Cache *cachePtr, int bucket, int nmove);
sl@0
   146
static int  GetBlocks(Cache *cachePtr, int bucket);
sl@0
   147
static Block *Ptr2Block(char *ptr);
sl@0
   148
static char *Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize);
sl@0
   149
static void MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove);
sl@0
   150
sl@0
   151
/*
sl@0
   152
 * Local variables defined in this file and initialized at
sl@0
   153
 * startup.
sl@0
   154
 */
sl@0
   155
sl@0
   156
static Tcl_Mutex *listLockPtr;
sl@0
   157
static Tcl_Mutex *objLockPtr;
sl@0
   158
static Cache     sharedCache;
sl@0
   159
static Cache    *sharedPtr = &sharedCache;
sl@0
   160
static Cache    *firstCachePtr = &sharedCache;
sl@0
   161
sl@0
   162

sl@0
   163
/*
sl@0
   164
 *----------------------------------------------------------------------
sl@0
   165
 *
sl@0
   166
 *  GetCache ---
sl@0
   167
 *
sl@0
   168
 *	Gets per-thread memory cache, allocating it if necessary.
sl@0
   169
 *
sl@0
   170
 * Results:
sl@0
   171
 *	Pointer to cache.
sl@0
   172
 *
sl@0
   173
 * Side effects:
sl@0
   174
 *  	None.
sl@0
   175
 *
sl@0
   176
 *----------------------------------------------------------------------
sl@0
   177
 */
sl@0
   178
sl@0
   179
static Cache *
sl@0
   180
GetCache(void)
sl@0
   181
{
sl@0
   182
    Cache *cachePtr;
sl@0
   183
sl@0
   184
    /*
sl@0
   185
     * Check for first-time initialization.
sl@0
   186
     */
sl@0
   187
sl@0
   188
    if (listLockPtr == NULL) {
sl@0
   189
	Tcl_Mutex *initLockPtr;
sl@0
   190
    	int i;
sl@0
   191
sl@0
   192
	initLockPtr = Tcl_GetAllocMutex();
sl@0
   193
	Tcl_MutexLock(initLockPtr);
sl@0
   194
	if (listLockPtr == NULL) {
sl@0
   195
	    listLockPtr = TclpNewAllocMutex();
sl@0
   196
	    objLockPtr = TclpNewAllocMutex();
sl@0
   197
	    for (i = 0; i < NBUCKETS; ++i) {
sl@0
   198
	        binfo[i].lockPtr = TclpNewAllocMutex();
sl@0
   199
	    }
sl@0
   200
	}
sl@0
   201
	Tcl_MutexUnlock(initLockPtr);
sl@0
   202
    }
sl@0
   203
sl@0
   204
    /*
sl@0
   205
     * Get this thread's cache, allocating if necessary.
sl@0
   206
     */
sl@0
   207
sl@0
   208
    cachePtr = TclpGetAllocCache();
sl@0
   209
    if (cachePtr == NULL) {
sl@0
   210
    	cachePtr = calloc(1, sizeof(Cache));
sl@0
   211
    	if (cachePtr == NULL) {
sl@0
   212
	    panic("alloc: could not allocate new cache");
sl@0
   213
    	}
sl@0
   214
    	Tcl_MutexLock(listLockPtr);
sl@0
   215
    	cachePtr->nextPtr = firstCachePtr;
sl@0
   216
    	firstCachePtr = cachePtr;
sl@0
   217
    	Tcl_MutexUnlock(listLockPtr);
sl@0
   218
    	cachePtr->owner = Tcl_GetCurrentThread();
sl@0
   219
	TclpSetAllocCache(cachePtr);
sl@0
   220
    }
sl@0
   221
    return cachePtr;
sl@0
   222
}
sl@0
   223
sl@0
   224

sl@0
   225
/*
sl@0
   226
 *----------------------------------------------------------------------
sl@0
   227
 *
sl@0
   228
 *  TclFreeAllocCache --
sl@0
   229
 *
sl@0
   230
 *	Flush and delete a cache, removing from list of caches.
sl@0
   231
 *
sl@0
   232
 * Results:
sl@0
   233
 *	None.
sl@0
   234
 *
sl@0
   235
 * Side effects:
sl@0
   236
 *	None.
sl@0
   237
 *
sl@0
   238
 *----------------------------------------------------------------------
sl@0
   239
 */
sl@0
   240
sl@0
   241
void
sl@0
   242
TclFreeAllocCache(void *arg)
sl@0
   243
{
sl@0
   244
    Cache *cachePtr = arg;
sl@0
   245
    Cache **nextPtrPtr;
sl@0
   246
    register int   bucket;
sl@0
   247
sl@0
   248
    /*
sl@0
   249
     * Flush blocks.
sl@0
   250
     */
sl@0
   251
sl@0
   252
    for (bucket = 0; bucket < NBUCKETS; ++bucket) {
sl@0
   253
	if (cachePtr->buckets[bucket].nfree > 0) {
sl@0
   254
	    PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].nfree);
sl@0
   255
	}
sl@0
   256
    }
sl@0
   257
sl@0
   258
    /*
sl@0
   259
     * Flush objs.
sl@0
   260
     */
sl@0
   261
sl@0
   262
    if (cachePtr->nobjs > 0) {
sl@0
   263
    	Tcl_MutexLock(objLockPtr);
sl@0
   264
    	MoveObjs(cachePtr, sharedPtr, cachePtr->nobjs);
sl@0
   265
    	Tcl_MutexUnlock(objLockPtr);
sl@0
   266
    }
sl@0
   267
sl@0
   268
    /*
sl@0
   269
     * Remove from pool list.
sl@0
   270
     */
sl@0
   271
sl@0
   272
    Tcl_MutexLock(listLockPtr);
sl@0
   273
    nextPtrPtr = &firstCachePtr;
sl@0
   274
    while (*nextPtrPtr != cachePtr) {
sl@0
   275
	nextPtrPtr = &(*nextPtrPtr)->nextPtr;
sl@0
   276
    }
sl@0
   277
    *nextPtrPtr = cachePtr->nextPtr;
sl@0
   278
    cachePtr->nextPtr = NULL;
sl@0
   279
    Tcl_MutexUnlock(listLockPtr);
sl@0
   280
    free(cachePtr);
sl@0
   281
}
sl@0
   282
sl@0
   283

sl@0
   284
/*
sl@0
   285
 *----------------------------------------------------------------------
sl@0
   286
 *
sl@0
   287
 *  TclpAlloc --
sl@0
   288
 *
sl@0
   289
 *	Allocate memory.
sl@0
   290
 *
sl@0
   291
 * Results:
sl@0
   292
 *	Pointer to memory just beyond Block pointer.
sl@0
   293
 *
sl@0
   294
 * Side effects:
sl@0
   295
 *	May allocate more blocks for a bucket.
sl@0
   296
 *
sl@0
   297
 *----------------------------------------------------------------------
sl@0
   298
 */
sl@0
   299
sl@0
   300
char *
sl@0
   301
TclpAlloc(unsigned int reqsize)
sl@0
   302
{
sl@0
   303
    Cache         *cachePtr = TclpGetAllocCache();
sl@0
   304
    Block         *blockPtr;
sl@0
   305
    register int   bucket;
sl@0
   306
    size_t  	   size;
sl@0
   307
sl@0
   308
    if (cachePtr == NULL) {
sl@0
   309
	cachePtr = GetCache();
sl@0
   310
    }
sl@0
   311
    
sl@0
   312
    /*
sl@0
   313
     * Increment the requested size to include room for 
sl@0
   314
     * the Block structure.  Call malloc() directly if the
sl@0
   315
     * required amount is greater than the largest block,
sl@0
   316
     * otherwise pop the smallest block large enough,
sl@0
   317
     * allocating more blocks if necessary.
sl@0
   318
     */
sl@0
   319
sl@0
   320
    blockPtr = NULL;     
sl@0
   321
    size = reqsize + sizeof(Block);
sl@0
   322
#if RCHECK
sl@0
   323
    ++size;
sl@0
   324
#endif
sl@0
   325
    if (size > MAXALLOC) {
sl@0
   326
	bucket = NBUCKETS;
sl@0
   327
    	blockPtr = malloc(size);
sl@0
   328
	if (blockPtr != NULL) {
sl@0
   329
	    cachePtr->nsysalloc += reqsize;
sl@0
   330
	}
sl@0
   331
    } else {
sl@0
   332
    	bucket = 0;
sl@0
   333
    	while (binfo[bucket].blocksize < size) {
sl@0
   334
    	    ++bucket;
sl@0
   335
    	}
sl@0
   336
    	if (cachePtr->buckets[bucket].nfree || GetBlocks(cachePtr, bucket)) {
sl@0
   337
	    blockPtr = cachePtr->buckets[bucket].firstPtr;
sl@0
   338
	    cachePtr->buckets[bucket].firstPtr = blockPtr->b_next;
sl@0
   339
	    --cachePtr->buckets[bucket].nfree;
sl@0
   340
    	    ++cachePtr->buckets[bucket].nget;
sl@0
   341
	    cachePtr->buckets[bucket].nrequest += reqsize;
sl@0
   342
	}
sl@0
   343
    }
sl@0
   344
    if (blockPtr == NULL) {
sl@0
   345
    	return NULL;
sl@0
   346
    }
sl@0
   347
    return Block2Ptr(blockPtr, bucket, reqsize);
sl@0
   348
}
sl@0
   349
sl@0
   350

sl@0
   351
/*
sl@0
   352
 *----------------------------------------------------------------------
sl@0
   353
 *
sl@0
   354
 *  TclpFree --
sl@0
   355
 *
sl@0
   356
 *	Return blocks to the thread block cache.
sl@0
   357
 *
sl@0
   358
 * Results:
sl@0
   359
 *	None.
sl@0
   360
 *
sl@0
   361
 * Side effects:
sl@0
   362
 *	May move blocks to shared cache.
sl@0
   363
 *
sl@0
   364
 *----------------------------------------------------------------------
sl@0
   365
 */
sl@0
   366
sl@0
   367
void
sl@0
   368
TclpFree(char *ptr)
sl@0
   369
{
sl@0
   370
    if (ptr != NULL) {
sl@0
   371
	Cache  *cachePtr = TclpGetAllocCache();
sl@0
   372
	Block *blockPtr;
sl@0
   373
	int bucket;
sl@0
   374
sl@0
   375
	if (cachePtr == NULL) {
sl@0
   376
	    cachePtr = GetCache();
sl@0
   377
	}
sl@0
   378
 
sl@0
   379
	/*
sl@0
   380
	 * Get the block back from the user pointer and
sl@0
   381
	 * call system free directly for large blocks.
sl@0
   382
	 * Otherwise, push the block back on the bucket and
sl@0
   383
	 * move blocks to the shared cache if there are now
sl@0
   384
	 * too many free.
sl@0
   385
	 */
sl@0
   386
sl@0
   387
	blockPtr = Ptr2Block(ptr);
sl@0
   388
	bucket = blockPtr->b_bucket;
sl@0
   389
	if (bucket == NBUCKETS) {
sl@0
   390
	    cachePtr->nsysalloc -= blockPtr->b_reqsize;
sl@0
   391
	    free(blockPtr);
sl@0
   392
	} else {
sl@0
   393
	    cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize;
sl@0
   394
	    blockPtr->b_next = cachePtr->buckets[bucket].firstPtr;
sl@0
   395
	    cachePtr->buckets[bucket].firstPtr = blockPtr;
sl@0
   396
	    ++cachePtr->buckets[bucket].nfree;
sl@0
   397
	    ++cachePtr->buckets[bucket].nput;
sl@0
   398
	    if (cachePtr != sharedPtr &&
sl@0
   399
		    cachePtr->buckets[bucket].nfree > binfo[bucket].maxblocks) {
sl@0
   400
		PutBlocks(cachePtr, bucket, binfo[bucket].nmove);
sl@0
   401
	    }
sl@0
   402
	}
sl@0
   403
    }
sl@0
   404
}
sl@0
   405
sl@0
   406

sl@0
   407
/*
sl@0
   408
 *----------------------------------------------------------------------
sl@0
   409
 *
sl@0
   410
 *  TclpRealloc --
sl@0
   411
 *
sl@0
   412
 *	Re-allocate memory to a larger or smaller size.
sl@0
   413
 *
sl@0
   414
 * Results:
sl@0
   415
 *	Pointer to memory just beyond Block pointer.
sl@0
   416
 *
sl@0
   417
 * Side effects:
sl@0
   418
 *	Previous memory, if any, may be freed.
sl@0
   419
 *
sl@0
   420
 *----------------------------------------------------------------------
sl@0
   421
 */
sl@0
   422
sl@0
   423
char *
sl@0
   424
TclpRealloc(char *ptr, unsigned int reqsize)
sl@0
   425
{
sl@0
   426
    Cache *cachePtr = TclpGetAllocCache();
sl@0
   427
    Block *blockPtr;
sl@0
   428
    void *new;
sl@0
   429
    size_t size, min;
sl@0
   430
    int bucket;
sl@0
   431
sl@0
   432
    if (ptr == NULL) {
sl@0
   433
	return TclpAlloc(reqsize);
sl@0
   434
    }
sl@0
   435
sl@0
   436
    if (cachePtr == NULL) {
sl@0
   437
	cachePtr = GetCache();
sl@0
   438
    }
sl@0
   439
sl@0
   440
    /*
sl@0
   441
     * If the block is not a system block and fits in place,
sl@0
   442
     * simply return the existing pointer.  Otherwise, if the block
sl@0
   443
     * is a system block and the new size would also require a system
sl@0
   444
     * block, call realloc() directly.
sl@0
   445
     */
sl@0
   446
sl@0
   447
    blockPtr = Ptr2Block(ptr);
sl@0
   448
    size = reqsize + sizeof(Block);
sl@0
   449
#if RCHECK
sl@0
   450
    ++size;
sl@0
   451
#endif
sl@0
   452
    bucket = blockPtr->b_bucket;
sl@0
   453
    if (bucket != NBUCKETS) {
sl@0
   454
	if (bucket > 0) {
sl@0
   455
	    min = binfo[bucket-1].blocksize;
sl@0
   456
	} else {
sl@0
   457
	    min = 0;
sl@0
   458
	}
sl@0
   459
	if (size > min && size <= binfo[bucket].blocksize) {
sl@0
   460
	    cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize;
sl@0
   461
	    cachePtr->buckets[bucket].nrequest += reqsize;
sl@0
   462
	    return Block2Ptr(blockPtr, bucket, reqsize);
sl@0
   463
	}
sl@0
   464
    } else if (size > MAXALLOC) {
sl@0
   465
	cachePtr->nsysalloc -= blockPtr->b_reqsize;
sl@0
   466
	cachePtr->nsysalloc += reqsize;
sl@0
   467
	blockPtr = realloc(blockPtr, size);
sl@0
   468
	if (blockPtr == NULL) {
sl@0
   469
	    return NULL;
sl@0
   470
	}
sl@0
   471
	return Block2Ptr(blockPtr, NBUCKETS, reqsize);
sl@0
   472
    }
sl@0
   473
sl@0
   474
    /*
sl@0
   475
     * Finally, perform an expensive malloc/copy/free.
sl@0
   476
     */
sl@0
   477
sl@0
   478
    new = TclpAlloc(reqsize);
sl@0
   479
    if (new != NULL) {
sl@0
   480
	if (reqsize > blockPtr->b_reqsize) {
sl@0
   481
	    reqsize = blockPtr->b_reqsize;
sl@0
   482
	}
sl@0
   483
    	memcpy(new, ptr, reqsize);
sl@0
   484
    	TclpFree(ptr);
sl@0
   485
    }
sl@0
   486
    return new;
sl@0
   487
}
sl@0
   488
sl@0
   489

sl@0
   490
/*
sl@0
   491
 *----------------------------------------------------------------------
sl@0
   492
 *
sl@0
   493
 * TclThreadAllocObj --
sl@0
   494
 *
sl@0
   495
 *	Allocate a Tcl_Obj from the per-thread cache.
sl@0
   496
 *
sl@0
   497
 * Results:
sl@0
   498
 *	Pointer to uninitialized Tcl_Obj.
sl@0
   499
 *
sl@0
   500
 * Side effects:
sl@0
   501
 *	May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's
sl@0
   502
 *  	if list is empty.
sl@0
   503
 *
sl@0
   504
 *----------------------------------------------------------------------
sl@0
   505
 */
sl@0
   506
sl@0
   507
Tcl_Obj *
sl@0
   508
TclThreadAllocObj(void)
sl@0
   509
{
sl@0
   510
    register Cache *cachePtr = TclpGetAllocCache();
sl@0
   511
    register int nmove;
sl@0
   512
    register Tcl_Obj *objPtr;
sl@0
   513
    Tcl_Obj *newObjsPtr;
sl@0
   514
sl@0
   515
    if (cachePtr == NULL) {
sl@0
   516
	cachePtr = GetCache();
sl@0
   517
    }
sl@0
   518
sl@0
   519
    /*
sl@0
   520
     * Get this thread's obj list structure and move
sl@0
   521
     * or allocate new objs if necessary.
sl@0
   522
     */
sl@0
   523
     
sl@0
   524
    if (cachePtr->nobjs == 0) {
sl@0
   525
    	Tcl_MutexLock(objLockPtr);
sl@0
   526
	nmove = sharedPtr->nobjs;
sl@0
   527
	if (nmove > 0) {
sl@0
   528
	    if (nmove > NOBJALLOC) {
sl@0
   529
		nmove = NOBJALLOC;
sl@0
   530
	    }
sl@0
   531
	    MoveObjs(sharedPtr, cachePtr, nmove);
sl@0
   532
	}
sl@0
   533
    	Tcl_MutexUnlock(objLockPtr);
sl@0
   534
	if (cachePtr->nobjs == 0) {
sl@0
   535
	    cachePtr->nobjs = nmove = NOBJALLOC;
sl@0
   536
	    newObjsPtr = malloc(sizeof(Tcl_Obj) * nmove);
sl@0
   537
	    if (newObjsPtr == NULL) {
sl@0
   538
		panic("alloc: could not allocate %d new objects", nmove);
sl@0
   539
	    }
sl@0
   540
	    while (--nmove >= 0) {
sl@0
   541
		objPtr = &newObjsPtr[nmove];
sl@0
   542
		objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
sl@0
   543
		cachePtr->firstObjPtr = objPtr;
sl@0
   544
	    }
sl@0
   545
	}
sl@0
   546
    }
sl@0
   547
sl@0
   548
    /*
sl@0
   549
     * Pop the first object.
sl@0
   550
     */
sl@0
   551
sl@0
   552
    objPtr = cachePtr->firstObjPtr;
sl@0
   553
    cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
sl@0
   554
    --cachePtr->nobjs;
sl@0
   555
    return objPtr;
sl@0
   556
}
sl@0
   557
sl@0
   558

sl@0
   559
/*
sl@0
   560
 *----------------------------------------------------------------------
sl@0
   561
 *
sl@0
   562
 * TclThreadFreeObj --
sl@0
   563
 *
sl@0
   564
 *	Return a free Tcl_Obj to the per-thread cache.
sl@0
   565
 *
sl@0
   566
 * Results:
sl@0
   567
 *	None.
sl@0
   568
 *
sl@0
   569
 * Side effects:
sl@0
   570
 *	May move free Tcl_Obj's to shared list upon hitting high
sl@0
   571
 *  	water mark.
sl@0
   572
 *
sl@0
   573
 *----------------------------------------------------------------------
sl@0
   574
 */
sl@0
   575
sl@0
   576
void
sl@0
   577
TclThreadFreeObj(Tcl_Obj *objPtr)
sl@0
   578
{
sl@0
   579
    Cache *cachePtr = TclpGetAllocCache();
sl@0
   580
sl@0
   581
    if (cachePtr == NULL) {
sl@0
   582
	cachePtr = GetCache();
sl@0
   583
    }
sl@0
   584
sl@0
   585
    /*
sl@0
   586
     * Get this thread's list and push on the free Tcl_Obj.
sl@0
   587
     */
sl@0
   588
     
sl@0
   589
    objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
sl@0
   590
    cachePtr->firstObjPtr = objPtr;
sl@0
   591
    ++cachePtr->nobjs;
sl@0
   592
    
sl@0
   593
    /*
sl@0
   594
     * If the number of free objects has exceeded the high
sl@0
   595
     * water mark, move some blocks to the shared list.
sl@0
   596
     */
sl@0
   597
     
sl@0
   598
    if (cachePtr->nobjs > NOBJHIGH) {
sl@0
   599
	Tcl_MutexLock(objLockPtr);
sl@0
   600
	MoveObjs(cachePtr, sharedPtr, NOBJALLOC);
sl@0
   601
	Tcl_MutexUnlock(objLockPtr);
sl@0
   602
    }
sl@0
   603
}
sl@0
   604
sl@0
   605

sl@0
   606
/*
sl@0
   607
 *----------------------------------------------------------------------
sl@0
   608
 *
sl@0
   609
 * Tcl_GetMemoryInfo --
sl@0
   610
 *
sl@0
   611
 *	Return a list-of-lists of memory stats.
sl@0
   612
 *
sl@0
   613
 * Results:
sl@0
   614
 *	None.
sl@0
   615
 *
sl@0
   616
 * Side effects:
sl@0
   617
 *  	List appended to given dstring.
sl@0
   618
 *
sl@0
   619
 *----------------------------------------------------------------------
sl@0
   620
 */
sl@0
   621
sl@0
   622
void
sl@0
   623
Tcl_GetMemoryInfo(Tcl_DString *dsPtr)
sl@0
   624
{
sl@0
   625
    Cache *cachePtr;
sl@0
   626
    char buf[200];
sl@0
   627
    int n;
sl@0
   628
sl@0
   629
    Tcl_MutexLock(listLockPtr);
sl@0
   630
    cachePtr = firstCachePtr;
sl@0
   631
    while (cachePtr != NULL) {
sl@0
   632
	Tcl_DStringStartSublist(dsPtr);
sl@0
   633
	if (cachePtr == sharedPtr) {
sl@0
   634
    	    Tcl_DStringAppendElement(dsPtr, "shared");
sl@0
   635
	} else {
sl@0
   636
	    sprintf(buf, "thread%d", (int) cachePtr->owner);
sl@0
   637
    	    Tcl_DStringAppendElement(dsPtr, buf);
sl@0
   638
	}
sl@0
   639
	for (n = 0; n < NBUCKETS; ++n) {
sl@0
   640
    	    sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld",
sl@0
   641
		(unsigned long) binfo[n].blocksize,
sl@0
   642
		cachePtr->buckets[n].nfree,
sl@0
   643
		cachePtr->buckets[n].nget,
sl@0
   644
		cachePtr->buckets[n].nput,
sl@0
   645
		cachePtr->buckets[n].nrequest,
sl@0
   646
		cachePtr->buckets[n].nlock,
sl@0
   647
		cachePtr->buckets[n].nwait);
sl@0
   648
	    Tcl_DStringAppendElement(dsPtr, buf);
sl@0
   649
	}
sl@0
   650
	Tcl_DStringEndSublist(dsPtr);
sl@0
   651
	    cachePtr = cachePtr->nextPtr;
sl@0
   652
    }
sl@0
   653
    Tcl_MutexUnlock(listLockPtr);
sl@0
   654
}
sl@0
   655
sl@0
   656

sl@0
   657
/*
sl@0
   658
 *----------------------------------------------------------------------
sl@0
   659
 *
sl@0
   660
 * MoveObjs --
sl@0
   661
 *
sl@0
   662
 *	Move Tcl_Obj's between caches.
sl@0
   663
 *
sl@0
   664
 * Results:
sl@0
   665
 *	None.
sl@0
   666
 *
sl@0
   667
 * Side effects:
sl@0
   668
 *  	None.
sl@0
   669
 *
sl@0
   670
 *----------------------------------------------------------------------
sl@0
   671
 */
sl@0
   672
sl@0
   673
static void
sl@0
   674
MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove)
sl@0
   675
{
sl@0
   676
    register Tcl_Obj *objPtr = fromPtr->firstObjPtr;
sl@0
   677
    Tcl_Obj *fromFirstObjPtr = objPtr;
sl@0
   678
sl@0
   679
    toPtr->nobjs += nmove;
sl@0
   680
    fromPtr->nobjs -= nmove;
sl@0
   681
sl@0
   682
    /*
sl@0
   683
     * Find the last object to be moved; set the next one
sl@0
   684
     * (the first one not to be moved) as the first object
sl@0
   685
     * in the 'from' cache.
sl@0
   686
     */
sl@0
   687
sl@0
   688
    while (--nmove) {
sl@0
   689
	objPtr = objPtr->internalRep.otherValuePtr;
sl@0
   690
    }
sl@0
   691
    fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr;    
sl@0
   692
sl@0
   693
    /*
sl@0
   694
     * Move all objects as a block - they are already linked to
sl@0
   695
     * each other, we just have to update the first and last.
sl@0
   696
     */
sl@0
   697
sl@0
   698
    objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr;
sl@0
   699
    toPtr->firstObjPtr = fromFirstObjPtr;
sl@0
   700
}
sl@0
   701
sl@0
   702

sl@0
   703
/*
sl@0
   704
 *----------------------------------------------------------------------
sl@0
   705
 *
sl@0
   706
 *  Block2Ptr, Ptr2Block --
sl@0
   707
 *
sl@0
   708
 *	Convert between internal blocks and user pointers.
sl@0
   709
 *
sl@0
   710
 * Results:
sl@0
   711
 *	User pointer or internal block.
sl@0
   712
 *
sl@0
   713
 * Side effects:
sl@0
   714
 *	Invalid blocks will abort the server.
sl@0
   715
 *
sl@0
   716
 *----------------------------------------------------------------------
sl@0
   717
 */
sl@0
   718
sl@0
   719
static char *
sl@0
   720
Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize) 
sl@0
   721
{
sl@0
   722
    register void *ptr;
sl@0
   723
sl@0
   724
    blockPtr->b_magic1 = blockPtr->b_magic2 = MAGIC;
sl@0
   725
    blockPtr->b_bucket = bucket;
sl@0
   726
    blockPtr->b_reqsize = reqsize;
sl@0
   727
    ptr = ((void *) (blockPtr + 1));
sl@0
   728
#if RCHECK
sl@0
   729
    ((unsigned char *)(ptr))[reqsize] = MAGIC;
sl@0
   730
#endif
sl@0
   731
    return (char *) ptr;
sl@0
   732
}
sl@0
   733
sl@0
   734
static Block *
sl@0
   735
Ptr2Block(char *ptr)
sl@0
   736
{
sl@0
   737
    register Block *blockPtr;
sl@0
   738
sl@0
   739
    blockPtr = (((Block *) ptr) - 1);
sl@0
   740
    if (blockPtr->b_magic1 != MAGIC
sl@0
   741
#if RCHECK
sl@0
   742
	|| ((unsigned char *) ptr)[blockPtr->b_reqsize] != MAGIC
sl@0
   743
#endif
sl@0
   744
	|| blockPtr->b_magic2 != MAGIC) {
sl@0
   745
	panic("alloc: invalid block: %p: %x %x %x\n",
sl@0
   746
	    blockPtr, blockPtr->b_magic1, blockPtr->b_magic2,
sl@0
   747
	    ((unsigned char *) ptr)[blockPtr->b_reqsize]);
sl@0
   748
    }
sl@0
   749
    return blockPtr;
sl@0
   750
}
sl@0
   751
sl@0
   752

sl@0
   753
/*
sl@0
   754
 *----------------------------------------------------------------------
sl@0
   755
 *
sl@0
   756
 *  LockBucket, UnlockBucket --
sl@0
   757
 *
sl@0
   758
 *	Set/unset the lock to access a bucket in the shared cache.
sl@0
   759
 *
sl@0
   760
 * Results:
sl@0
   761
 *  	None.
sl@0
   762
 *
sl@0
   763
 * Side effects:
sl@0
   764
 *	Lock activity and contention are monitored globally and on
sl@0
   765
 *  	a per-cache basis.
sl@0
   766
 *
sl@0
   767
 *----------------------------------------------------------------------
sl@0
   768
 */
sl@0
   769
sl@0
   770
static void
sl@0
   771
LockBucket(Cache *cachePtr, int bucket)
sl@0
   772
{
sl@0
   773
#if 0
sl@0
   774
    if (Tcl_MutexTryLock(binfo[bucket].lockPtr) != TCL_OK) {
sl@0
   775
	Tcl_MutexLock(binfo[bucket].lockPtr);
sl@0
   776
    	++cachePtr->buckets[bucket].nwait;
sl@0
   777
    	++sharedPtr->buckets[bucket].nwait;
sl@0
   778
    }
sl@0
   779
#else
sl@0
   780
    Tcl_MutexLock(binfo[bucket].lockPtr);
sl@0
   781
#endif
sl@0
   782
    ++cachePtr->buckets[bucket].nlock;
sl@0
   783
    ++sharedPtr->buckets[bucket].nlock;
sl@0
   784
}
sl@0
   785
sl@0
   786
sl@0
   787
static void
sl@0
   788
UnlockBucket(Cache *cachePtr, int bucket)
sl@0
   789
{
sl@0
   790
    Tcl_MutexUnlock(binfo[bucket].lockPtr);
sl@0
   791
}
sl@0
   792
sl@0
   793

sl@0
   794
/*
sl@0
   795
 *----------------------------------------------------------------------
sl@0
   796
 *
sl@0
   797
 *  PutBlocks --
sl@0
   798
 *
sl@0
   799
 *	Return unused blocks to the shared cache.
sl@0
   800
 *
sl@0
   801
 * Results:
sl@0
   802
 *	None.
sl@0
   803
 *
sl@0
   804
 * Side effects:
sl@0
   805
 *	None.
sl@0
   806
 *
sl@0
   807
 *----------------------------------------------------------------------
sl@0
   808
 */
sl@0
   809
sl@0
   810
static void
sl@0
   811
PutBlocks(Cache *cachePtr, int bucket, int nmove)
sl@0
   812
{
sl@0
   813
    register Block *lastPtr, *firstPtr;
sl@0
   814
    register int n = nmove;
sl@0
   815
sl@0
   816
    /*
sl@0
   817
     * Before acquiring the lock, walk the block list to find
sl@0
   818
     * the last block to be moved.
sl@0
   819
     */
sl@0
   820
sl@0
   821
    firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr;
sl@0
   822
    while (--n > 0) {
sl@0
   823
	lastPtr = lastPtr->b_next;
sl@0
   824
    }
sl@0
   825
    cachePtr->buckets[bucket].firstPtr = lastPtr->b_next;
sl@0
   826
    cachePtr->buckets[bucket].nfree -= nmove;
sl@0
   827
sl@0
   828
    /*
sl@0
   829
     * Aquire the lock and place the list of blocks at the front
sl@0
   830
     * of the shared cache bucket.
sl@0
   831
     */
sl@0
   832
sl@0
   833
    LockBucket(cachePtr, bucket);
sl@0
   834
    lastPtr->b_next = sharedPtr->buckets[bucket].firstPtr;
sl@0
   835
    sharedPtr->buckets[bucket].firstPtr = firstPtr;
sl@0
   836
    sharedPtr->buckets[bucket].nfree += nmove;
sl@0
   837
    UnlockBucket(cachePtr, bucket);
sl@0
   838
}
sl@0
   839
sl@0
   840

sl@0
   841
/*
sl@0
   842
 *----------------------------------------------------------------------
sl@0
   843
 *
sl@0
   844
 *  GetBlocks --
sl@0
   845
 *
sl@0
   846
 *	Get more blocks for a bucket.
sl@0
   847
 *
sl@0
   848
 * Results:
sl@0
   849
 *	1 if blocks where allocated, 0 otherwise.
sl@0
   850
 *
sl@0
   851
 * Side effects:
sl@0
   852
 *	Cache may be filled with available blocks.
sl@0
   853
 *
sl@0
   854
 *----------------------------------------------------------------------
sl@0
   855
 */
sl@0
   856
sl@0
   857
static int
sl@0
   858
GetBlocks(Cache *cachePtr, int bucket)
sl@0
   859
{
sl@0
   860
    register Block *blockPtr;
sl@0
   861
    register int n;
sl@0
   862
    register size_t size;
sl@0
   863
sl@0
   864
    /*
sl@0
   865
     * First, atttempt to move blocks from the shared cache.  Note
sl@0
   866
     * the potentially dirty read of nfree before acquiring the lock
sl@0
   867
     * which is a slight performance enhancement.  The value is
sl@0
   868
     * verified after the lock is actually acquired.
sl@0
   869
     */
sl@0
   870
     
sl@0
   871
    if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].nfree > 0) {
sl@0
   872
	LockBucket(cachePtr, bucket);
sl@0
   873
	if (sharedPtr->buckets[bucket].nfree > 0) {
sl@0
   874
sl@0
   875
	    /*
sl@0
   876
	     * Either move the entire list or walk the list to find
sl@0
   877
	     * the last block to move.
sl@0
   878
	     */
sl@0
   879
sl@0
   880
	    n = binfo[bucket].nmove;
sl@0
   881
	    if (n >= sharedPtr->buckets[bucket].nfree) {
sl@0
   882
		cachePtr->buckets[bucket].firstPtr =
sl@0
   883
		    sharedPtr->buckets[bucket].firstPtr;
sl@0
   884
		cachePtr->buckets[bucket].nfree =
sl@0
   885
		    sharedPtr->buckets[bucket].nfree;
sl@0
   886
		sharedPtr->buckets[bucket].firstPtr = NULL;
sl@0
   887
		sharedPtr->buckets[bucket].nfree = 0;
sl@0
   888
	    } else {
sl@0
   889
		blockPtr = sharedPtr->buckets[bucket].firstPtr;
sl@0
   890
		cachePtr->buckets[bucket].firstPtr = blockPtr;
sl@0
   891
		sharedPtr->buckets[bucket].nfree -= n;
sl@0
   892
		cachePtr->buckets[bucket].nfree = n;
sl@0
   893
		while (--n > 0) {
sl@0
   894
    		    blockPtr = blockPtr->b_next;
sl@0
   895
		}
sl@0
   896
		sharedPtr->buckets[bucket].firstPtr = blockPtr->b_next;
sl@0
   897
		blockPtr->b_next = NULL;
sl@0
   898
	    }
sl@0
   899
	}
sl@0
   900
	UnlockBucket(cachePtr, bucket);
sl@0
   901
    }
sl@0
   902
    
sl@0
   903
    if (cachePtr->buckets[bucket].nfree == 0) {
sl@0
   904
sl@0
   905
	/*
sl@0
   906
	 * If no blocks could be moved from shared, first look for a
sl@0
   907
	 * larger block in this cache to split up.
sl@0
   908
	 */
sl@0
   909
sl@0
   910
    	blockPtr = NULL;
sl@0
   911
	n = NBUCKETS;
sl@0
   912
	size = 0; /* lint */
sl@0
   913
	while (--n > bucket) {
sl@0
   914
    	    if (cachePtr->buckets[n].nfree > 0) {
sl@0
   915
		size = binfo[n].blocksize;
sl@0
   916
		blockPtr = cachePtr->buckets[n].firstPtr;
sl@0
   917
		cachePtr->buckets[n].firstPtr = blockPtr->b_next;
sl@0
   918
		--cachePtr->buckets[n].nfree;
sl@0
   919
		break;
sl@0
   920
	    }
sl@0
   921
	}
sl@0
   922
sl@0
   923
	/*
sl@0
   924
	 * Otherwise, allocate a big new block directly.
sl@0
   925
	 */
sl@0
   926
sl@0
   927
	if (blockPtr == NULL) {
sl@0
   928
	    size = MAXALLOC;
sl@0
   929
	    blockPtr = malloc(size);
sl@0
   930
	    if (blockPtr == NULL) {
sl@0
   931
		return 0;
sl@0
   932
	    }
sl@0
   933
	}
sl@0
   934
sl@0
   935
	/*
sl@0
   936
	 * Split the larger block into smaller blocks for this bucket.
sl@0
   937
	 */
sl@0
   938
sl@0
   939
	n = size / binfo[bucket].blocksize;
sl@0
   940
	cachePtr->buckets[bucket].nfree = n;
sl@0
   941
	cachePtr->buckets[bucket].firstPtr = blockPtr;
sl@0
   942
	while (--n > 0) {
sl@0
   943
	    blockPtr->b_next = (Block *) 
sl@0
   944
		((char *) blockPtr + binfo[bucket].blocksize);
sl@0
   945
	    blockPtr = blockPtr->b_next;
sl@0
   946
	}
sl@0
   947
	blockPtr->b_next = NULL;
sl@0
   948
    }
sl@0
   949
    return 1;
sl@0
   950
}
sl@0
   951
sl@0
   952
/*
sl@0
   953
 *----------------------------------------------------------------------
sl@0
   954
 *
sl@0
   955
 * TclFinalizeThreadAlloc --
sl@0
   956
 *
sl@0
   957
 *	This procedure is used to destroy all private resources used in
sl@0
   958
 *	this file.
sl@0
   959
 *
sl@0
   960
 * Results:
sl@0
   961
 *	None.
sl@0
   962
 *
sl@0
   963
 * Side effects:
sl@0
   964
 *	None.
sl@0
   965
 *
sl@0
   966
 *----------------------------------------------------------------------
sl@0
   967
 */
sl@0
   968
sl@0
   969
void
sl@0
   970
TclFinalizeThreadAlloc()
sl@0
   971
{
sl@0
   972
    int i;
sl@0
   973
    for (i = 0; i < NBUCKETS; ++i) {
sl@0
   974
        TclpFreeAllocMutex(binfo[i].lockPtr); 
sl@0
   975
        binfo[i].lockPtr = NULL;
sl@0
   976
    }
sl@0
   977
sl@0
   978
    TclpFreeAllocMutex(objLockPtr);
sl@0
   979
    objLockPtr = NULL;
sl@0
   980
sl@0
   981
    TclpFreeAllocMutex(listLockPtr);
sl@0
   982
    listLockPtr = NULL;
sl@0
   983
sl@0
   984
    TclpFreeAllocCache(NULL);
sl@0
   985
}
sl@0
   986

sl@0
   987
#else /* ! defined(TCL_THREADS) && ! defined(USE_THREAD_ALLOC) */
sl@0
   988
sl@0
   989
/*
sl@0
   990
 *----------------------------------------------------------------------
sl@0
   991
 *
sl@0
   992
 * TclFinalizeThreadAlloc --
sl@0
   993
 *
sl@0
   994
 *	This procedure is used to destroy all private resources used in
sl@0
   995
 *	this file.
sl@0
   996
 *
sl@0
   997
 * Results:
sl@0
   998
 *	None.
sl@0
   999
 *
sl@0
  1000
 * Side effects:
sl@0
  1001
 *	None.
sl@0
  1002
 *
sl@0
  1003
 *----------------------------------------------------------------------
sl@0
  1004
 */
sl@0
  1005
sl@0
  1006
void
sl@0
  1007
TclFinalizeThreadAlloc()
sl@0
  1008
{
sl@0
  1009
    Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use.");
sl@0
  1010
}
sl@0
  1011
sl@0
  1012
#endif /* TCL_THREADS */