sl@0: /* sl@0: * tclAlloc.c -- sl@0: * sl@0: * This is a very fast storage allocator. It allocates blocks of a sl@0: * small number of different sizes, and keeps free lists of each size. sl@0: * Blocks that don't exactly fit are passed up to the next larger size. sl@0: * Blocks over a certain size are directly allocated from the system. sl@0: * sl@0: * Copyright (c) 1983 Regents of the University of California. sl@0: * Copyright (c) 1996-1997 Sun Microsystems, Inc. sl@0: * Copyright (c) 1998-1999 by Scriptics Corporation. sl@0: * sl@0: * Portions contributed by Chris Kingsley, Jack Jansen and Ray Johnson. sl@0: * sl@0: * See the file "license.terms" for information on usage and redistribution sl@0: * of this file, and for a DISCLAIMER OF ALL WARRANTIES. sl@0: * sl@0: * RCS: @(#) $Id: tclAlloc.c,v 1.16.2.1 2004/10/28 21:12:37 andreas_kupries Exp $ sl@0: */ sl@0: sl@0: /* sl@0: * Windows and Unix use an alternative allocator when building with threads sl@0: * that has significantly reduced lock contention. sl@0: */ sl@0: sl@0: #if !defined(TCL_THREADS) || !defined(USE_THREAD_ALLOC) || defined(TCL_MEM_DEBUG) sl@0: sl@0: #include "tclInt.h" sl@0: #include "tclPort.h" sl@0: sl@0: #if USE_TCLALLOC sl@0: sl@0: #ifdef TCL_DEBUG sl@0: # define DEBUG sl@0: /* #define MSTATS */ sl@0: # define RCHECK sl@0: #endif sl@0: sl@0: /* sl@0: * We should really make use of AC_CHECK_TYPE(caddr_t) sl@0: * here, but it can wait until Tcl uses config.h properly. sl@0: */ sl@0: #if defined(MAC_TCL) || defined(_MSC_VER) || defined(__MINGW32__) || defined(__BORLANDC__) sl@0: typedef unsigned long caddr_t; sl@0: #endif sl@0: sl@0: /* sl@0: * The overhead on a block is at least 8 bytes. When free, this space sl@0: * contains a pointer to the next free block, and the bottom two bits must sl@0: * be zero. When in use, the first byte is set to MAGIC, and the second sl@0: * byte is the size index. The remaining bytes are for alignment. sl@0: * If range checking is enabled then a second word holds the size of the sl@0: * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). sl@0: * The order of elements is critical: ov_magic must overlay the low order sl@0: * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. sl@0: */ sl@0: sl@0: union overhead { sl@0: union overhead *ov_next; /* when free */ sl@0: unsigned char ov_padding[8]; /* Ensure the structure is 8-byte aligned. */ sl@0: struct { sl@0: unsigned char ovu_magic0; /* magic number */ sl@0: unsigned char ovu_index; /* bucket # */ sl@0: unsigned char ovu_unused; /* unused */ sl@0: unsigned char ovu_magic1; /* other magic number */ sl@0: #ifdef RCHECK sl@0: unsigned short ovu_rmagic; /* range magic number */ sl@0: unsigned long ovu_size; /* actual block size */ sl@0: unsigned short ovu_unused2; /* padding to 8-byte align */ sl@0: #endif sl@0: } ovu; sl@0: #define ov_magic0 ovu.ovu_magic0 sl@0: #define ov_magic1 ovu.ovu_magic1 sl@0: #define ov_index ovu.ovu_index sl@0: #define ov_rmagic ovu.ovu_rmagic sl@0: #define ov_size ovu.ovu_size sl@0: }; sl@0: sl@0: sl@0: #define MAGIC 0xef /* magic # on accounting info */ sl@0: #define RMAGIC 0x5555 /* magic # on range info */ sl@0: sl@0: #ifdef RCHECK sl@0: #define RSLOP sizeof (unsigned short) sl@0: #else sl@0: #define RSLOP 0 sl@0: #endif sl@0: sl@0: #define OVERHEAD (sizeof(union overhead) + RSLOP) sl@0: sl@0: /* sl@0: * nextf[i] is the pointer to the next free block of size 2^(i+3). The sl@0: * smallest allocatable block is 8 bytes. The overhead information sl@0: * precedes the data area returned to the user. sl@0: */ sl@0: sl@0: #define NBUCKETS 13 sl@0: #define MAXMALLOC (1<<(NBUCKETS+2)) sl@0: static union overhead *nextf[NBUCKETS]; sl@0: sl@0: /* sl@0: * The following structure is used to keep track of all system memory sl@0: * currently owned by Tcl. When finalizing, all this memory will sl@0: * be returned to the system. sl@0: */ sl@0: sl@0: struct block { sl@0: struct block *nextPtr; /* Linked list. */ sl@0: struct block *prevPtr; /* Linked list for big blocks, ensures 8-byte sl@0: * alignment for suballocated blocks. */ sl@0: }; sl@0: sl@0: static struct block *blockList; /* Tracks the suballocated blocks. */ sl@0: static struct block bigBlocks = { /* Big blocks aren't suballocated. */ sl@0: &bigBlocks, &bigBlocks sl@0: }; sl@0: sl@0: /* sl@0: * The allocator is protected by a special mutex that must be sl@0: * explicitly initialized. Futhermore, because Tcl_Alloc may be sl@0: * used before anything else in Tcl, we make this module self-initializing sl@0: * after all with the allocInit variable. sl@0: */ sl@0: sl@0: #ifdef TCL_THREADS sl@0: static Tcl_Mutex *allocMutexPtr; sl@0: #endif sl@0: static int allocInit = 0; sl@0: sl@0: sl@0: #ifdef MSTATS sl@0: sl@0: /* sl@0: * nmalloc[i] is the difference between the number of mallocs and frees sl@0: * for a given block size. sl@0: */ sl@0: sl@0: static unsigned int nmalloc[NBUCKETS+1]; sl@0: #include sl@0: #endif sl@0: sl@0: #if defined(DEBUG) || defined(RCHECK) sl@0: #define ASSERT(p) if (!(p)) panic(# p) sl@0: #define RANGE_ASSERT(p) if (!(p)) panic(# p) sl@0: #else sl@0: #define ASSERT(p) sl@0: #define RANGE_ASSERT(p) sl@0: #endif sl@0: sl@0: /* sl@0: * Prototypes for functions used only in this file. sl@0: */ sl@0: sl@0: static void MoreCore _ANSI_ARGS_((int bucket)); sl@0: sl@0: sl@0: /* sl@0: *------------------------------------------------------------------------- sl@0: * sl@0: * TclInitAlloc -- sl@0: * sl@0: * Initialize the memory system. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * Initialize the mutex used to serialize allocations. sl@0: * sl@0: *------------------------------------------------------------------------- sl@0: */ sl@0: sl@0: void sl@0: TclInitAlloc() sl@0: { sl@0: if (!allocInit) { sl@0: allocInit = 1; sl@0: #ifdef TCL_THREADS sl@0: allocMutexPtr = Tcl_GetAllocMutex(); sl@0: #endif sl@0: } sl@0: } sl@0: sl@0: /* sl@0: *------------------------------------------------------------------------- sl@0: * sl@0: * TclFinalizeAllocSubsystem -- sl@0: * sl@0: * Release all resources being used by this subsystem, including sl@0: * aggressively freeing all memory allocated by TclpAlloc() that sl@0: * has not yet been released with TclpFree(). sl@0: * sl@0: * After this function is called, all memory allocated with sl@0: * TclpAlloc() should be considered unusable. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * This subsystem is self-initializing, since memory can be sl@0: * allocated before Tcl is formally initialized. After this call, sl@0: * this subsystem has been reset to its initial state and is sl@0: * usable again. sl@0: * sl@0: *------------------------------------------------------------------------- sl@0: */ sl@0: sl@0: void sl@0: TclFinalizeAllocSubsystem() sl@0: { sl@0: int i; sl@0: struct block *blockPtr, *nextPtr; sl@0: sl@0: Tcl_MutexLock(allocMutexPtr); sl@0: for (blockPtr = blockList; blockPtr != NULL; blockPtr = nextPtr) { sl@0: nextPtr = blockPtr->nextPtr; sl@0: TclpSysFree(blockPtr); sl@0: } sl@0: blockList = NULL; sl@0: sl@0: for (blockPtr = bigBlocks.nextPtr; blockPtr != &bigBlocks; ) { sl@0: nextPtr = blockPtr->nextPtr; sl@0: TclpSysFree(blockPtr); sl@0: blockPtr = nextPtr; sl@0: } sl@0: bigBlocks.nextPtr = &bigBlocks; sl@0: bigBlocks.prevPtr = &bigBlocks; sl@0: sl@0: for (i = 0; i < NBUCKETS; i++) { sl@0: nextf[i] = NULL; sl@0: #ifdef MSTATS sl@0: nmalloc[i] = 0; sl@0: #endif sl@0: } sl@0: #ifdef MSTATS sl@0: nmalloc[i] = 0; sl@0: #endif sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: } sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * TclpAlloc -- sl@0: * sl@0: * Allocate more memory. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * None. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: char * sl@0: TclpAlloc(nbytes) sl@0: unsigned int nbytes; /* Number of bytes to allocate. */ sl@0: { sl@0: register union overhead *op; sl@0: register long bucket; sl@0: register unsigned amt; sl@0: struct block *bigBlockPtr; sl@0: sl@0: if (!allocInit) { sl@0: /* sl@0: * We have to make the "self initializing" because Tcl_Alloc sl@0: * may be used before any other part of Tcl. E.g., see sl@0: * main() for tclsh! sl@0: */ sl@0: TclInitAlloc(); sl@0: } sl@0: Tcl_MutexLock(allocMutexPtr); sl@0: /* sl@0: * First the simple case: we simple allocate big blocks directly sl@0: */ sl@0: if (nbytes + OVERHEAD >= MAXMALLOC) { sl@0: bigBlockPtr = (struct block *) TclpSysAlloc((unsigned) sl@0: (sizeof(struct block) + OVERHEAD + nbytes), 0); sl@0: if (bigBlockPtr == NULL) { sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: return NULL; sl@0: } sl@0: bigBlockPtr->nextPtr = bigBlocks.nextPtr; sl@0: bigBlocks.nextPtr = bigBlockPtr; sl@0: bigBlockPtr->prevPtr = &bigBlocks; sl@0: bigBlockPtr->nextPtr->prevPtr = bigBlockPtr; sl@0: sl@0: op = (union overhead *) (bigBlockPtr + 1); sl@0: op->ov_magic0 = op->ov_magic1 = MAGIC; sl@0: op->ov_index = 0xff; sl@0: #ifdef MSTATS sl@0: nmalloc[NBUCKETS]++; sl@0: #endif sl@0: #ifdef RCHECK sl@0: /* sl@0: * Record allocated size of block and sl@0: * bound space with magic numbers. sl@0: */ sl@0: op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); sl@0: op->ov_rmagic = RMAGIC; sl@0: *(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; sl@0: #endif sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: return (void *)(op+1); sl@0: } sl@0: /* sl@0: * Convert amount of memory requested into closest block size sl@0: * stored in hash buckets which satisfies request. sl@0: * Account for space used per block for accounting. sl@0: */ sl@0: #ifndef RCHECK sl@0: amt = 8; /* size of first bucket */ sl@0: bucket = 0; sl@0: #else sl@0: amt = 16; /* size of first bucket */ sl@0: bucket = 1; sl@0: #endif sl@0: while (nbytes + OVERHEAD > amt) { sl@0: amt <<= 1; sl@0: if (amt == 0) { sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: return (NULL); sl@0: } sl@0: bucket++; sl@0: } sl@0: ASSERT( bucket < NBUCKETS ); sl@0: sl@0: /* sl@0: * If nothing in hash bucket right now, sl@0: * request more memory from the system. sl@0: */ sl@0: if ((op = nextf[bucket]) == NULL) { sl@0: MoreCore(bucket); sl@0: if ((op = nextf[bucket]) == NULL) { sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: return (NULL); sl@0: } sl@0: } sl@0: /* sl@0: * Remove from linked list sl@0: */ sl@0: nextf[bucket] = op->ov_next; sl@0: op->ov_magic0 = op->ov_magic1 = MAGIC; sl@0: op->ov_index = (unsigned char) bucket; sl@0: #ifdef MSTATS sl@0: nmalloc[bucket]++; sl@0: #endif sl@0: #ifdef RCHECK sl@0: /* sl@0: * Record allocated size of block and sl@0: * bound space with magic numbers. sl@0: */ sl@0: op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); sl@0: op->ov_rmagic = RMAGIC; sl@0: *(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; sl@0: #endif sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: return ((char *)(op + 1)); sl@0: } sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * MoreCore -- sl@0: * sl@0: * Allocate more memory to the indicated bucket. sl@0: * sl@0: * Assumes Mutex is already held. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * Attempts to get more memory from the system. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: static void sl@0: MoreCore(bucket) sl@0: int bucket; /* What bucket to allocat to. */ sl@0: { sl@0: register union overhead *op; sl@0: register long sz; /* size of desired block */ sl@0: long amt; /* amount to allocate */ sl@0: int nblks; /* how many blocks we get */ sl@0: struct block *blockPtr; sl@0: sl@0: /* sl@0: * sbrk_size <= 0 only for big, FLUFFY, requests (about sl@0: * 2^30 bytes on a VAX, I think) or for a negative arg. sl@0: */ sl@0: sz = 1 << (bucket + 3); sl@0: ASSERT(sz > 0); sl@0: sl@0: amt = MAXMALLOC; sl@0: nblks = amt / sz; sl@0: ASSERT(nblks*sz == amt); sl@0: sl@0: blockPtr = (struct block *) TclpSysAlloc((unsigned) sl@0: (sizeof(struct block) + amt), 1); sl@0: /* no more room! */ sl@0: if (blockPtr == NULL) { sl@0: return; sl@0: } sl@0: blockPtr->nextPtr = blockList; sl@0: blockList = blockPtr; sl@0: sl@0: op = (union overhead *) (blockPtr + 1); sl@0: sl@0: /* sl@0: * Add new memory allocated to that on sl@0: * free list for this hash bucket. sl@0: */ sl@0: nextf[bucket] = op; sl@0: while (--nblks > 0) { sl@0: op->ov_next = (union overhead *)((caddr_t)op + sz); sl@0: op = (union overhead *)((caddr_t)op + sz); sl@0: } sl@0: op->ov_next = (union overhead *)NULL; sl@0: } sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * TclpFree -- sl@0: * sl@0: * Free memory. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * None. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: void sl@0: TclpFree(cp) sl@0: char *cp; /* Pointer to memory to free. */ sl@0: { sl@0: register long size; sl@0: register union overhead *op; sl@0: struct block *bigBlockPtr; sl@0: sl@0: if (cp == NULL) { sl@0: return; sl@0: } sl@0: sl@0: Tcl_MutexLock(allocMutexPtr); sl@0: op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); sl@0: sl@0: ASSERT(op->ov_magic0 == MAGIC); /* make sure it was in use */ sl@0: ASSERT(op->ov_magic1 == MAGIC); sl@0: if (op->ov_magic0 != MAGIC || op->ov_magic1 != MAGIC) { sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: return; sl@0: } sl@0: sl@0: RANGE_ASSERT(op->ov_rmagic == RMAGIC); sl@0: RANGE_ASSERT(*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); sl@0: size = op->ov_index; sl@0: if ( size == 0xff ) { sl@0: #ifdef MSTATS sl@0: nmalloc[NBUCKETS]--; sl@0: #endif sl@0: bigBlockPtr = (struct block *) op - 1; sl@0: bigBlockPtr->prevPtr->nextPtr = bigBlockPtr->nextPtr; sl@0: bigBlockPtr->nextPtr->prevPtr = bigBlockPtr->prevPtr; sl@0: TclpSysFree(bigBlockPtr); sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: return; sl@0: } sl@0: ASSERT(size < NBUCKETS); sl@0: op->ov_next = nextf[size]; /* also clobbers ov_magic */ sl@0: nextf[size] = op; sl@0: #ifdef MSTATS sl@0: nmalloc[size]--; sl@0: #endif sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: } sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * TclpRealloc -- sl@0: * sl@0: * Reallocate memory. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * None. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: char * sl@0: TclpRealloc(cp, nbytes) sl@0: char *cp; /* Pointer to alloced block. */ sl@0: unsigned int nbytes; /* New size of memory. */ sl@0: { sl@0: int i; sl@0: union overhead *op; sl@0: struct block *bigBlockPtr; sl@0: int expensive; sl@0: unsigned long maxsize; sl@0: sl@0: if (cp == NULL) { sl@0: return (TclpAlloc(nbytes)); sl@0: } sl@0: sl@0: Tcl_MutexLock(allocMutexPtr); sl@0: sl@0: op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); sl@0: sl@0: ASSERT(op->ov_magic0 == MAGIC); /* make sure it was in use */ sl@0: ASSERT(op->ov_magic1 == MAGIC); sl@0: if (op->ov_magic0 != MAGIC || op->ov_magic1 != MAGIC) { sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: return NULL; sl@0: } sl@0: sl@0: RANGE_ASSERT(op->ov_rmagic == RMAGIC); sl@0: RANGE_ASSERT(*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); sl@0: i = op->ov_index; sl@0: sl@0: /* sl@0: * If the block isn't in a bin, just realloc it. sl@0: */ sl@0: sl@0: if (i == 0xff) { sl@0: struct block *prevPtr, *nextPtr; sl@0: bigBlockPtr = (struct block *) op - 1; sl@0: prevPtr = bigBlockPtr->prevPtr; sl@0: nextPtr = bigBlockPtr->nextPtr; sl@0: bigBlockPtr = (struct block *) TclpSysRealloc(bigBlockPtr, sl@0: sizeof(struct block) + OVERHEAD + nbytes); sl@0: if (bigBlockPtr == NULL) { sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: return NULL; sl@0: } sl@0: sl@0: if (prevPtr->nextPtr != bigBlockPtr) { sl@0: /* sl@0: * If the block has moved, splice the new block into the list where sl@0: * the old block used to be. sl@0: */ sl@0: sl@0: prevPtr->nextPtr = bigBlockPtr; sl@0: nextPtr->prevPtr = bigBlockPtr; sl@0: } sl@0: sl@0: op = (union overhead *) (bigBlockPtr + 1); sl@0: #ifdef MSTATS sl@0: nmalloc[NBUCKETS]++; sl@0: #endif sl@0: #ifdef RCHECK sl@0: /* sl@0: * Record allocated size of block and update magic number bounds. sl@0: */ sl@0: sl@0: op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); sl@0: *(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; sl@0: #endif sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: return (char *)(op+1); sl@0: } sl@0: maxsize = 1 << (i+3); sl@0: expensive = 0; sl@0: if ( nbytes + OVERHEAD > maxsize ) { sl@0: expensive = 1; sl@0: } else if ( i > 0 && nbytes + OVERHEAD < (maxsize/2) ) { sl@0: expensive = 1; sl@0: } sl@0: sl@0: if (expensive) { sl@0: void *newp; sl@0: sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: sl@0: newp = TclpAlloc(nbytes); sl@0: if ( newp == NULL ) { sl@0: return NULL; sl@0: } sl@0: maxsize -= OVERHEAD; sl@0: if ( maxsize < nbytes ) sl@0: nbytes = maxsize; sl@0: memcpy((VOID *) newp, (VOID *) cp, (size_t) nbytes); sl@0: TclpFree(cp); sl@0: return newp; sl@0: } sl@0: sl@0: /* sl@0: * Ok, we don't have to copy, it fits as-is sl@0: */ sl@0: #ifdef RCHECK sl@0: op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); sl@0: *(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; sl@0: #endif sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: return(cp); sl@0: } sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * mstats -- sl@0: * sl@0: * Prints two lines of numbers, one showing the length of the sl@0: * free list for each size category, the second showing the sl@0: * number of mallocs - frees for each size category. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * None. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: #ifdef MSTATS sl@0: void sl@0: mstats(s) sl@0: char *s; /* Where to write info. */ sl@0: { sl@0: register int i, j; sl@0: register union overhead *p; sl@0: int totfree = 0, sl@0: totused = 0; sl@0: sl@0: Tcl_MutexLock(allocMutexPtr); sl@0: fprintf(stderr, "Memory allocation statistics %s\nTclpFree:\t", s); sl@0: for (i = 0; i < NBUCKETS; i++) { sl@0: for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) sl@0: fprintf(stderr, " %d", j); sl@0: totfree += j * (1 << (i + 3)); sl@0: } sl@0: fprintf(stderr, "\nused:\t"); sl@0: for (i = 0; i < NBUCKETS; i++) { sl@0: fprintf(stderr, " %d", nmalloc[i]); sl@0: totused += nmalloc[i] * (1 << (i + 3)); sl@0: } sl@0: fprintf(stderr, "\n\tTotal small in use: %d, total free: %d\n", sl@0: totused, totfree); sl@0: fprintf(stderr, "\n\tNumber of big (>%d) blocks in use: %d\n", sl@0: MAXMALLOC, nmalloc[NBUCKETS]); sl@0: Tcl_MutexUnlock(allocMutexPtr); sl@0: } sl@0: #endif sl@0: sl@0: #else /* !USE_TCLALLOC */ sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * TclpAlloc -- sl@0: * sl@0: * Allocate more memory. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * None. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: char * sl@0: TclpAlloc(nbytes) sl@0: unsigned int nbytes; /* Number of bytes to allocate. */ sl@0: { sl@0: return (char*) malloc(nbytes); sl@0: } sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * TclpFree -- sl@0: * sl@0: * Free memory. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * None. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: void sl@0: TclpFree(cp) sl@0: char *cp; /* Pointer to memory to free. */ sl@0: { sl@0: free(cp); sl@0: return; sl@0: } sl@0: sl@0: /* sl@0: *---------------------------------------------------------------------- sl@0: * sl@0: * TclpRealloc -- sl@0: * sl@0: * Reallocate memory. sl@0: * sl@0: * Results: sl@0: * None. sl@0: * sl@0: * Side effects: sl@0: * None. sl@0: * sl@0: *---------------------------------------------------------------------- sl@0: */ sl@0: sl@0: char * sl@0: TclpRealloc(cp, nbytes) sl@0: char *cp; /* Pointer to alloced block. */ sl@0: unsigned int nbytes; /* New size of memory. */ sl@0: { sl@0: return (char*) realloc(cp, nbytes); sl@0: } sl@0: sl@0: #endif /* !USE_TCLALLOC */ sl@0: #endif /* !TCL_THREADS */