os/persistentdata/persistentstorage/sqlite3api/TEST/TCL/tcldistribution/generic/tclAlloc.c
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 /* 
     2  * tclAlloc.c --
     3  *
     4  *	This is a very fast storage allocator.  It allocates blocks of a
     5  *	small number of different sizes, and keeps free lists of each size.
     6  *	Blocks that don't exactly fit are passed up to the next larger size.
     7  *	Blocks over a certain size are directly allocated from the system.
     8  *
     9  * Copyright (c) 1983 Regents of the University of California.
    10  * Copyright (c) 1996-1997 Sun Microsystems, Inc.
    11  * Copyright (c) 1998-1999 by Scriptics Corporation.
    12  *
    13  * Portions contributed by Chris Kingsley, Jack Jansen and Ray Johnson.
    14  *
    15  * See the file "license.terms" for information on usage and redistribution
    16  * of this file, and for a DISCLAIMER OF ALL WARRANTIES.
    17  *
    18  * RCS: @(#) $Id: tclAlloc.c,v 1.16.2.1 2004/10/28 21:12:37 andreas_kupries Exp $
    19  */
    20 
    21 /*
    22  * Windows and Unix use an alternative allocator when building with threads
    23  * that has significantly reduced lock contention.
    24  */
    25 
    26 #if !defined(TCL_THREADS) || !defined(USE_THREAD_ALLOC) || defined(TCL_MEM_DEBUG)
    27 
    28 #include "tclInt.h"
    29 #include "tclPort.h"
    30 
    31 #if USE_TCLALLOC
    32 
    33 #ifdef TCL_DEBUG
    34 #   define DEBUG
    35 /* #define MSTATS */
    36 #   define RCHECK
    37 #endif
    38 
    39 /*
    40  * We should really make use of AC_CHECK_TYPE(caddr_t)
    41  * here, but it can wait until Tcl uses config.h properly.
    42  */
    43 #if defined(MAC_TCL) || defined(_MSC_VER) || defined(__MINGW32__) || defined(__BORLANDC__)
    44 typedef unsigned long caddr_t;
    45 #endif
    46 
    47 /*
    48  * The overhead on a block is at least 8 bytes.  When free, this space
    49  * contains a pointer to the next free block, and the bottom two bits must
    50  * be zero.  When in use, the first byte is set to MAGIC, and the second
    51  * byte is the size index.  The remaining bytes are for alignment.
    52  * If range checking is enabled then a second word holds the size of the
    53  * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
    54  * The order of elements is critical: ov_magic must overlay the low order
    55  * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
    56  */
    57 
    58 union overhead {
    59     union overhead *ov_next;	/* when free */
    60     unsigned char ov_padding[8]; /* Ensure the structure is 8-byte aligned. */
    61     struct {
    62 	unsigned char	ovu_magic0;	/* magic number */
    63 	unsigned char	ovu_index;	/* bucket # */
    64 	unsigned char	ovu_unused;	/* unused */
    65 	unsigned char	ovu_magic1;	/* other magic number */
    66 #ifdef RCHECK
    67 	unsigned short	ovu_rmagic;	/* range magic number */
    68 	unsigned long	ovu_size;	/* actual block size */
    69 	unsigned short  ovu_unused2;    /* padding to 8-byte align */
    70 #endif
    71     } ovu;
    72 #define ov_magic0	ovu.ovu_magic0
    73 #define ov_magic1	ovu.ovu_magic1
    74 #define ov_index	ovu.ovu_index
    75 #define ov_rmagic	ovu.ovu_rmagic
    76 #define ov_size		ovu.ovu_size
    77 };
    78 
    79 
    80 #define MAGIC		0xef		/* magic # on accounting info */
    81 #define RMAGIC		0x5555		/* magic # on range info */
    82 
    83 #ifdef RCHECK
    84 #define	RSLOP		sizeof (unsigned short)
    85 #else
    86 #define	RSLOP		0
    87 #endif
    88 
    89 #define OVERHEAD (sizeof(union overhead) + RSLOP)
    90 
    91 /*
    92  * nextf[i] is the pointer to the next free block of size 2^(i+3).  The
    93  * smallest allocatable block is 8 bytes.  The overhead information
    94  * precedes the data area returned to the user.
    95  */
    96 
    97 #define NBUCKETS	13
    98 #define MAXMALLOC	(1<<(NBUCKETS+2))
    99 static	union overhead *nextf[NBUCKETS];
   100 
   101 /* 
   102  * The following structure is used to keep track of all system memory 
   103  * currently owned by Tcl.  When finalizing, all this memory will
   104  * be returned to the system.
   105  */
   106 
   107 struct block {
   108     struct block *nextPtr;	/* Linked list. */
   109     struct block *prevPtr;	/* Linked list for big blocks, ensures 8-byte 
   110 				 * alignment for suballocated blocks. */
   111 };
   112 
   113 static struct block *blockList;		/* Tracks the suballocated blocks. */
   114 static struct block bigBlocks = {	/* Big blocks aren't suballocated. */
   115     &bigBlocks, &bigBlocks
   116 };
   117 
   118 /*
   119  * The allocator is protected by a special mutex that must be
   120  * explicitly initialized.  Futhermore, because Tcl_Alloc may be
   121  * used before anything else in Tcl, we make this module self-initializing
   122  * after all with the allocInit variable.
   123  */
   124 
   125 #ifdef TCL_THREADS
   126 static Tcl_Mutex *allocMutexPtr;
   127 #endif
   128 static int allocInit = 0;
   129 
   130 
   131 #ifdef MSTATS
   132 
   133 /*
   134  * nmalloc[i] is the difference between the number of mallocs and frees
   135  * for a given block size.
   136  */
   137 
   138 static	unsigned int nmalloc[NBUCKETS+1];
   139 #include <stdio.h>
   140 #endif
   141 
   142 #if defined(DEBUG) || defined(RCHECK)
   143 #define	ASSERT(p)   if (!(p)) panic(# p)
   144 #define RANGE_ASSERT(p) if (!(p)) panic(# p)
   145 #else
   146 #define	ASSERT(p)
   147 #define RANGE_ASSERT(p)
   148 #endif
   149 
   150 /*
   151  * Prototypes for functions used only in this file.
   152  */
   153 
   154 static void 		MoreCore _ANSI_ARGS_((int bucket));
   155 
   156 
   157 /*
   158  *-------------------------------------------------------------------------
   159  *
   160  * TclInitAlloc --
   161  *
   162  *	Initialize the memory system.
   163  *
   164  * Results:
   165  *	None.
   166  *
   167  * Side effects:
   168  *	Initialize the mutex used to serialize allocations.
   169  *
   170  *-------------------------------------------------------------------------
   171  */
   172 
   173 void
   174 TclInitAlloc()
   175 {
   176     if (!allocInit) {
   177 	allocInit = 1;
   178 #ifdef TCL_THREADS
   179 	allocMutexPtr = Tcl_GetAllocMutex();
   180 #endif
   181     }
   182 }
   183 
   184 /*
   185  *-------------------------------------------------------------------------
   186  *
   187  * TclFinalizeAllocSubsystem --
   188  *
   189  *	Release all resources being used by this subsystem, including 
   190  *	aggressively freeing all memory allocated by TclpAlloc() that 
   191  *	has not yet been released with TclpFree().
   192  *	
   193  *	After this function is called, all memory allocated with 
   194  *	TclpAlloc() should be considered unusable.
   195  *
   196  * Results:
   197  *	None.
   198  *
   199  * Side effects:
   200  *	This subsystem is self-initializing, since memory can be 
   201  *	allocated before Tcl is formally initialized.  After this call,
   202  *	this subsystem has been reset to its initial state and is 
   203  *	usable again.
   204  *
   205  *-------------------------------------------------------------------------
   206  */
   207 
   208 void
   209 TclFinalizeAllocSubsystem()
   210 {
   211     int i;
   212     struct block *blockPtr, *nextPtr;
   213 
   214     Tcl_MutexLock(allocMutexPtr);
   215     for (blockPtr = blockList; blockPtr != NULL; blockPtr = nextPtr) {
   216 	nextPtr = blockPtr->nextPtr;
   217 	TclpSysFree(blockPtr);
   218     }
   219     blockList = NULL;
   220 
   221     for (blockPtr = bigBlocks.nextPtr; blockPtr != &bigBlocks; ) {
   222 	nextPtr = blockPtr->nextPtr;
   223 	TclpSysFree(blockPtr);
   224 	blockPtr = nextPtr;
   225     }
   226     bigBlocks.nextPtr = &bigBlocks;
   227     bigBlocks.prevPtr = &bigBlocks;
   228 
   229     for (i = 0; i < NBUCKETS; i++) {
   230 	nextf[i] = NULL;
   231 #ifdef MSTATS
   232 	nmalloc[i] = 0;
   233 #endif
   234     }
   235 #ifdef MSTATS
   236     nmalloc[i] = 0;
   237 #endif
   238     Tcl_MutexUnlock(allocMutexPtr);
   239 }
   240 
   241 /*
   242  *----------------------------------------------------------------------
   243  *
   244  * TclpAlloc --
   245  *
   246  *	Allocate more memory.
   247  *
   248  * Results:
   249  *	None.
   250  *
   251  * Side effects:
   252  *	None.
   253  *
   254  *----------------------------------------------------------------------
   255  */
   256 
   257 char *
   258 TclpAlloc(nbytes)
   259     unsigned int nbytes;	/* Number of bytes to allocate. */
   260 {
   261     register union overhead *op;
   262     register long bucket;
   263     register unsigned amt;
   264     struct block *bigBlockPtr;
   265 
   266     if (!allocInit) {
   267 	/*
   268 	 * We have to make the "self initializing" because Tcl_Alloc
   269 	 * may be used before any other part of Tcl.  E.g., see
   270 	 * main() for tclsh!
   271 	 */
   272 	TclInitAlloc();
   273     }
   274     Tcl_MutexLock(allocMutexPtr);
   275     /*
   276      * First the simple case: we simple allocate big blocks directly
   277      */
   278     if (nbytes + OVERHEAD >= MAXMALLOC) {
   279 	bigBlockPtr = (struct block *) TclpSysAlloc((unsigned) 
   280 		(sizeof(struct block) + OVERHEAD + nbytes), 0);
   281 	if (bigBlockPtr == NULL) {
   282 	    Tcl_MutexUnlock(allocMutexPtr);
   283 	    return NULL;
   284 	}
   285 	bigBlockPtr->nextPtr = bigBlocks.nextPtr;
   286 	bigBlocks.nextPtr = bigBlockPtr;
   287 	bigBlockPtr->prevPtr = &bigBlocks;
   288 	bigBlockPtr->nextPtr->prevPtr = bigBlockPtr;
   289 
   290 	op = (union overhead *) (bigBlockPtr + 1);
   291 	op->ov_magic0 = op->ov_magic1 = MAGIC;
   292 	op->ov_index = 0xff;
   293 #ifdef MSTATS
   294 	nmalloc[NBUCKETS]++;
   295 #endif
   296 #ifdef RCHECK
   297 	/*
   298 	 * Record allocated size of block and
   299 	 * bound space with magic numbers.
   300 	 */
   301 	op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
   302 	op->ov_rmagic = RMAGIC;
   303 	*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
   304 #endif
   305 	Tcl_MutexUnlock(allocMutexPtr);
   306 	return (void *)(op+1);
   307     }
   308     /*
   309      * Convert amount of memory requested into closest block size
   310      * stored in hash buckets which satisfies request.
   311      * Account for space used per block for accounting.
   312      */
   313 #ifndef RCHECK
   314     amt = 8;	/* size of first bucket */
   315     bucket = 0;
   316 #else
   317     amt = 16;	/* size of first bucket */
   318     bucket = 1;
   319 #endif
   320     while (nbytes + OVERHEAD > amt) {
   321 	amt <<= 1;
   322 	if (amt == 0) {
   323 	    Tcl_MutexUnlock(allocMutexPtr);
   324 	    return (NULL);
   325 	}
   326 	bucket++;
   327     }
   328     ASSERT( bucket < NBUCKETS );
   329 
   330     /*
   331      * If nothing in hash bucket right now,
   332      * request more memory from the system.
   333      */
   334     if ((op = nextf[bucket]) == NULL) {
   335 	MoreCore(bucket);
   336 	if ((op = nextf[bucket]) == NULL) {
   337 	    Tcl_MutexUnlock(allocMutexPtr);
   338 	    return (NULL);
   339 	}
   340     }
   341     /*
   342      * Remove from linked list
   343      */
   344     nextf[bucket] = op->ov_next;
   345     op->ov_magic0 = op->ov_magic1 = MAGIC;
   346     op->ov_index = (unsigned char) bucket;
   347 #ifdef MSTATS
   348     nmalloc[bucket]++;
   349 #endif
   350 #ifdef RCHECK
   351     /*
   352      * Record allocated size of block and
   353      * bound space with magic numbers.
   354      */
   355     op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
   356     op->ov_rmagic = RMAGIC;
   357     *(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
   358 #endif
   359     Tcl_MutexUnlock(allocMutexPtr);
   360     return ((char *)(op + 1));
   361 }
   362 
   363 /*
   364  *----------------------------------------------------------------------
   365  *
   366  * MoreCore --
   367  *
   368  *	Allocate more memory to the indicated bucket.
   369  *
   370  *	Assumes Mutex is already held.
   371  *
   372  * Results:
   373  *	None.
   374  *
   375  * Side effects:
   376  *	Attempts to get more memory from the system.
   377  *
   378  *----------------------------------------------------------------------
   379  */
   380 
   381 static void
   382 MoreCore(bucket)
   383     int bucket;		/* What bucket to allocat to. */
   384 {
   385     register union overhead *op;
   386     register long sz;		/* size of desired block */
   387     long amt;			/* amount to allocate */
   388     int nblks;			/* how many blocks we get */
   389     struct block *blockPtr;
   390 
   391     /*
   392      * sbrk_size <= 0 only for big, FLUFFY, requests (about
   393      * 2^30 bytes on a VAX, I think) or for a negative arg.
   394      */
   395     sz = 1 << (bucket + 3);
   396     ASSERT(sz > 0);
   397 
   398     amt = MAXMALLOC;
   399     nblks = amt / sz;
   400     ASSERT(nblks*sz == amt);
   401 
   402     blockPtr = (struct block *) TclpSysAlloc((unsigned) 
   403 	    (sizeof(struct block) + amt), 1);
   404     /* no more room! */
   405     if (blockPtr == NULL) {
   406 	return;
   407     }
   408     blockPtr->nextPtr = blockList;
   409     blockList = blockPtr;
   410 
   411     op = (union overhead *) (blockPtr + 1);
   412     
   413     /*
   414      * Add new memory allocated to that on
   415      * free list for this hash bucket.
   416      */
   417     nextf[bucket] = op;
   418     while (--nblks > 0) {
   419 	op->ov_next = (union overhead *)((caddr_t)op + sz);
   420 	op = (union overhead *)((caddr_t)op + sz);
   421     }
   422     op->ov_next = (union overhead *)NULL;
   423 }
   424 
   425 /*
   426  *----------------------------------------------------------------------
   427  *
   428  * TclpFree --
   429  *
   430  *	Free memory.
   431  *
   432  * Results:
   433  *	None.
   434  *
   435  * Side effects:
   436  *	None.
   437  *
   438  *----------------------------------------------------------------------
   439  */
   440 
   441 void
   442 TclpFree(cp)
   443     char *cp;		/* Pointer to memory to free. */
   444 {   
   445     register long size;
   446     register union overhead *op;
   447     struct block *bigBlockPtr;
   448 
   449     if (cp == NULL) {
   450 	return;
   451     }
   452 
   453     Tcl_MutexLock(allocMutexPtr);
   454     op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
   455 
   456     ASSERT(op->ov_magic0 == MAGIC);		/* make sure it was in use */
   457     ASSERT(op->ov_magic1 == MAGIC);
   458     if (op->ov_magic0 != MAGIC || op->ov_magic1 != MAGIC) {
   459 	Tcl_MutexUnlock(allocMutexPtr);
   460 	return;
   461     }
   462 
   463     RANGE_ASSERT(op->ov_rmagic == RMAGIC);
   464     RANGE_ASSERT(*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
   465     size = op->ov_index;
   466     if ( size == 0xff ) {
   467 #ifdef MSTATS
   468 	nmalloc[NBUCKETS]--;
   469 #endif
   470 	bigBlockPtr = (struct block *) op - 1;
   471 	bigBlockPtr->prevPtr->nextPtr = bigBlockPtr->nextPtr;
   472 	bigBlockPtr->nextPtr->prevPtr = bigBlockPtr->prevPtr;
   473 	TclpSysFree(bigBlockPtr);
   474 	Tcl_MutexUnlock(allocMutexPtr);
   475 	return;
   476     }
   477     ASSERT(size < NBUCKETS);
   478     op->ov_next = nextf[size];	/* also clobbers ov_magic */
   479     nextf[size] = op;
   480 #ifdef MSTATS
   481     nmalloc[size]--;
   482 #endif
   483     Tcl_MutexUnlock(allocMutexPtr);
   484 }
   485 
   486 /*
   487  *----------------------------------------------------------------------
   488  *
   489  * TclpRealloc --
   490  *
   491  *	Reallocate memory.
   492  *
   493  * Results:
   494  *	None.
   495  *
   496  * Side effects:
   497  *	None.
   498  *
   499  *----------------------------------------------------------------------
   500  */
   501 
   502 char *
   503 TclpRealloc(cp, nbytes)
   504     char *cp;			/* Pointer to alloced block. */
   505     unsigned int nbytes;	/* New size of memory. */
   506 {   
   507     int i;
   508     union overhead *op;
   509     struct block *bigBlockPtr;
   510     int expensive;
   511     unsigned long maxsize;
   512 
   513     if (cp == NULL) {
   514 	return (TclpAlloc(nbytes));
   515     }
   516 
   517     Tcl_MutexLock(allocMutexPtr);
   518 
   519     op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
   520 
   521     ASSERT(op->ov_magic0 == MAGIC);		/* make sure it was in use */
   522     ASSERT(op->ov_magic1 == MAGIC);
   523     if (op->ov_magic0 != MAGIC || op->ov_magic1 != MAGIC) {
   524 	Tcl_MutexUnlock(allocMutexPtr);
   525 	return NULL;
   526     }
   527 
   528     RANGE_ASSERT(op->ov_rmagic == RMAGIC);
   529     RANGE_ASSERT(*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
   530     i = op->ov_index;
   531 
   532     /*
   533      * If the block isn't in a bin, just realloc it.
   534      */
   535 
   536     if (i == 0xff) {
   537 	struct block *prevPtr, *nextPtr;
   538 	bigBlockPtr = (struct block *) op - 1;
   539 	prevPtr = bigBlockPtr->prevPtr;
   540 	nextPtr = bigBlockPtr->nextPtr;
   541 	bigBlockPtr = (struct block *) TclpSysRealloc(bigBlockPtr, 
   542 		sizeof(struct block) + OVERHEAD + nbytes);
   543 	if (bigBlockPtr == NULL) {
   544 	    Tcl_MutexUnlock(allocMutexPtr);
   545 	    return NULL;
   546 	}
   547 
   548 	if (prevPtr->nextPtr != bigBlockPtr) {
   549 	    /*
   550 	     * If the block has moved, splice the new block into the list where
   551 	     * the old block used to be. 
   552 	     */
   553 
   554 	    prevPtr->nextPtr = bigBlockPtr;
   555 	    nextPtr->prevPtr = bigBlockPtr;
   556 	}
   557 
   558 	op = (union overhead *) (bigBlockPtr + 1);
   559 #ifdef MSTATS
   560 	nmalloc[NBUCKETS]++;
   561 #endif
   562 #ifdef RCHECK
   563 	/*
   564 	 * Record allocated size of block and update magic number bounds.
   565 	 */
   566 
   567 	op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
   568 	*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
   569 #endif
   570 	Tcl_MutexUnlock(allocMutexPtr);
   571 	return (char *)(op+1);
   572     }
   573     maxsize = 1 << (i+3);
   574     expensive = 0;
   575     if ( nbytes + OVERHEAD > maxsize ) {
   576 	expensive = 1;
   577     } else if ( i > 0 && nbytes + OVERHEAD < (maxsize/2) ) {
   578 	expensive = 1;
   579     }
   580 
   581     if (expensive) {
   582 	void *newp;
   583 
   584 	Tcl_MutexUnlock(allocMutexPtr);
   585 
   586 	newp = TclpAlloc(nbytes);
   587 	if ( newp == NULL ) {
   588 	    return NULL;
   589 	}
   590 	maxsize -= OVERHEAD;
   591 	if ( maxsize < nbytes )
   592 	    nbytes = maxsize;
   593 	memcpy((VOID *) newp, (VOID *) cp, (size_t) nbytes);
   594 	TclpFree(cp);
   595 	return newp;
   596     }
   597     
   598     /*
   599      * Ok, we don't have to copy, it fits as-is
   600      */
   601 #ifdef RCHECK
   602     op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
   603     *(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
   604 #endif
   605     Tcl_MutexUnlock(allocMutexPtr);
   606     return(cp);
   607 }
   608 
   609 /*
   610  *----------------------------------------------------------------------
   611  *
   612  * mstats --
   613  *
   614  *	Prints two lines of numbers, one showing the length of the 
   615  *	free list for each size category, the second showing the 
   616  *	number of mallocs - frees for each size category.
   617  *
   618  * Results:
   619  *	None.
   620  *
   621  * Side effects:
   622  *	None.
   623  *
   624  *----------------------------------------------------------------------
   625  */
   626 
   627 #ifdef MSTATS
   628 void
   629 mstats(s)
   630     char *s;	/* Where to write info. */
   631 {
   632     register int i, j;
   633     register union overhead *p;
   634     int totfree = 0,
   635 	totused = 0;
   636 
   637     Tcl_MutexLock(allocMutexPtr);
   638     fprintf(stderr, "Memory allocation statistics %s\nTclpFree:\t", s);
   639     for (i = 0; i < NBUCKETS; i++) {
   640 	for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
   641 	    fprintf(stderr, " %d", j);
   642 	totfree += j * (1 << (i + 3));
   643     }
   644     fprintf(stderr, "\nused:\t");
   645     for (i = 0; i < NBUCKETS; i++) {
   646 	fprintf(stderr, " %d", nmalloc[i]);
   647 	totused += nmalloc[i] * (1 << (i + 3));
   648     }
   649     fprintf(stderr, "\n\tTotal small in use: %d, total free: %d\n",
   650 	    totused, totfree);
   651     fprintf(stderr, "\n\tNumber of big (>%d) blocks in use: %d\n", 
   652 	    MAXMALLOC, nmalloc[NBUCKETS]);
   653     Tcl_MutexUnlock(allocMutexPtr);
   654 }
   655 #endif
   656 
   657 #else  /* !USE_TCLALLOC */
   658 
   659 /*
   660  *----------------------------------------------------------------------
   661  *
   662  * TclpAlloc --
   663  *
   664  *	Allocate more memory.
   665  *
   666  * Results:
   667  *	None.
   668  *
   669  * Side effects:
   670  *	None.
   671  *
   672  *----------------------------------------------------------------------
   673  */
   674 
   675 char *
   676 TclpAlloc(nbytes)
   677     unsigned int nbytes;	/* Number of bytes to allocate. */
   678 {
   679     return (char*) malloc(nbytes);
   680 }
   681 
   682 /*
   683  *----------------------------------------------------------------------
   684  *
   685  * TclpFree --
   686  *
   687  *	Free memory.
   688  *
   689  * Results:
   690  *	None.
   691  *
   692  * Side effects:
   693  *	None.
   694  *
   695  *----------------------------------------------------------------------
   696  */
   697 
   698 void
   699 TclpFree(cp)
   700     char *cp;		/* Pointer to memory to free. */
   701 {   
   702     free(cp);
   703     return;
   704 }
   705 
   706 /*
   707  *----------------------------------------------------------------------
   708  *
   709  * TclpRealloc --
   710  *
   711  *	Reallocate memory.
   712  *
   713  * Results:
   714  *	None.
   715  *
   716  * Side effects:
   717  *	None.
   718  *
   719  *----------------------------------------------------------------------
   720  */
   721 
   722 char *
   723 TclpRealloc(cp, nbytes)
   724     char *cp;			/* Pointer to alloced block. */
   725     unsigned int nbytes;	/* New size of memory. */
   726 {   
   727     return (char*) realloc(cp, nbytes);
   728 }
   729 
   730 #endif /* !USE_TCLALLOC */
   731 #endif /* !TCL_THREADS */