os/persistentdata/persistentstorage/sqlite3api/TEST/TCL/tcldistribution/generic/tclAlloc.c
First public contribution.
4 * This is a very fast storage allocator. It allocates blocks of a
5 * small number of different sizes, and keeps free lists of each size.
6 * Blocks that don't exactly fit are passed up to the next larger size.
7 * Blocks over a certain size are directly allocated from the system.
9 * Copyright (c) 1983 Regents of the University of California.
10 * Copyright (c) 1996-1997 Sun Microsystems, Inc.
11 * Copyright (c) 1998-1999 by Scriptics Corporation.
13 * Portions contributed by Chris Kingsley, Jack Jansen and Ray Johnson.
15 * See the file "license.terms" for information on usage and redistribution
16 * of this file, and for a DISCLAIMER OF ALL WARRANTIES.
18 * RCS: @(#) $Id: tclAlloc.c,v 1.16.2.1 2004/10/28 21:12:37 andreas_kupries Exp $
22 * Windows and Unix use an alternative allocator when building with threads
23 * that has significantly reduced lock contention.
26 #if !defined(TCL_THREADS) || !defined(USE_THREAD_ALLOC) || defined(TCL_MEM_DEBUG)
40 * We should really make use of AC_CHECK_TYPE(caddr_t)
41 * here, but it can wait until Tcl uses config.h properly.
43 #if defined(MAC_TCL) || defined(_MSC_VER) || defined(__MINGW32__) || defined(__BORLANDC__)
44 typedef unsigned long caddr_t;
48 * The overhead on a block is at least 8 bytes. When free, this space
49 * contains a pointer to the next free block, and the bottom two bits must
50 * be zero. When in use, the first byte is set to MAGIC, and the second
51 * byte is the size index. The remaining bytes are for alignment.
52 * If range checking is enabled then a second word holds the size of the
53 * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
54 * The order of elements is critical: ov_magic must overlay the low order
55 * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
59 union overhead *ov_next; /* when free */
60 unsigned char ov_padding[8]; /* Ensure the structure is 8-byte aligned. */
62 unsigned char ovu_magic0; /* magic number */
63 unsigned char ovu_index; /* bucket # */
64 unsigned char ovu_unused; /* unused */
65 unsigned char ovu_magic1; /* other magic number */
67 unsigned short ovu_rmagic; /* range magic number */
68 unsigned long ovu_size; /* actual block size */
69 unsigned short ovu_unused2; /* padding to 8-byte align */
72 #define ov_magic0 ovu.ovu_magic0
73 #define ov_magic1 ovu.ovu_magic1
74 #define ov_index ovu.ovu_index
75 #define ov_rmagic ovu.ovu_rmagic
76 #define ov_size ovu.ovu_size
80 #define MAGIC 0xef /* magic # on accounting info */
81 #define RMAGIC 0x5555 /* magic # on range info */
84 #define RSLOP sizeof (unsigned short)
89 #define OVERHEAD (sizeof(union overhead) + RSLOP)
92 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
93 * smallest allocatable block is 8 bytes. The overhead information
94 * precedes the data area returned to the user.
98 #define MAXMALLOC (1<<(NBUCKETS+2))
99 static union overhead *nextf[NBUCKETS];
102 * The following structure is used to keep track of all system memory
103 * currently owned by Tcl. When finalizing, all this memory will
104 * be returned to the system.
108 struct block *nextPtr; /* Linked list. */
109 struct block *prevPtr; /* Linked list for big blocks, ensures 8-byte
110 * alignment for suballocated blocks. */
113 static struct block *blockList; /* Tracks the suballocated blocks. */
114 static struct block bigBlocks = { /* Big blocks aren't suballocated. */
115 &bigBlocks, &bigBlocks
119 * The allocator is protected by a special mutex that must be
120 * explicitly initialized. Futhermore, because Tcl_Alloc may be
121 * used before anything else in Tcl, we make this module self-initializing
122 * after all with the allocInit variable.
126 static Tcl_Mutex *allocMutexPtr;
128 static int allocInit = 0;
134 * nmalloc[i] is the difference between the number of mallocs and frees
135 * for a given block size.
138 static unsigned int nmalloc[NBUCKETS+1];
142 #if defined(DEBUG) || defined(RCHECK)
143 #define ASSERT(p) if (!(p)) panic(# p)
144 #define RANGE_ASSERT(p) if (!(p)) panic(# p)
147 #define RANGE_ASSERT(p)
151 * Prototypes for functions used only in this file.
154 static void MoreCore _ANSI_ARGS_((int bucket));
158 *-------------------------------------------------------------------------
162 * Initialize the memory system.
168 * Initialize the mutex used to serialize allocations.
170 *-------------------------------------------------------------------------
179 allocMutexPtr = Tcl_GetAllocMutex();
185 *-------------------------------------------------------------------------
187 * TclFinalizeAllocSubsystem --
189 * Release all resources being used by this subsystem, including
190 * aggressively freeing all memory allocated by TclpAlloc() that
191 * has not yet been released with TclpFree().
193 * After this function is called, all memory allocated with
194 * TclpAlloc() should be considered unusable.
200 * This subsystem is self-initializing, since memory can be
201 * allocated before Tcl is formally initialized. After this call,
202 * this subsystem has been reset to its initial state and is
205 *-------------------------------------------------------------------------
209 TclFinalizeAllocSubsystem()
212 struct block *blockPtr, *nextPtr;
214 Tcl_MutexLock(allocMutexPtr);
215 for (blockPtr = blockList; blockPtr != NULL; blockPtr = nextPtr) {
216 nextPtr = blockPtr->nextPtr;
217 TclpSysFree(blockPtr);
221 for (blockPtr = bigBlocks.nextPtr; blockPtr != &bigBlocks; ) {
222 nextPtr = blockPtr->nextPtr;
223 TclpSysFree(blockPtr);
226 bigBlocks.nextPtr = &bigBlocks;
227 bigBlocks.prevPtr = &bigBlocks;
229 for (i = 0; i < NBUCKETS; i++) {
238 Tcl_MutexUnlock(allocMutexPtr);
242 *----------------------------------------------------------------------
246 * Allocate more memory.
254 *----------------------------------------------------------------------
259 unsigned int nbytes; /* Number of bytes to allocate. */
261 register union overhead *op;
262 register long bucket;
263 register unsigned amt;
264 struct block *bigBlockPtr;
268 * We have to make the "self initializing" because Tcl_Alloc
269 * may be used before any other part of Tcl. E.g., see
274 Tcl_MutexLock(allocMutexPtr);
276 * First the simple case: we simple allocate big blocks directly
278 if (nbytes + OVERHEAD >= MAXMALLOC) {
279 bigBlockPtr = (struct block *) TclpSysAlloc((unsigned)
280 (sizeof(struct block) + OVERHEAD + nbytes), 0);
281 if (bigBlockPtr == NULL) {
282 Tcl_MutexUnlock(allocMutexPtr);
285 bigBlockPtr->nextPtr = bigBlocks.nextPtr;
286 bigBlocks.nextPtr = bigBlockPtr;
287 bigBlockPtr->prevPtr = &bigBlocks;
288 bigBlockPtr->nextPtr->prevPtr = bigBlockPtr;
290 op = (union overhead *) (bigBlockPtr + 1);
291 op->ov_magic0 = op->ov_magic1 = MAGIC;
298 * Record allocated size of block and
299 * bound space with magic numbers.
301 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
302 op->ov_rmagic = RMAGIC;
303 *(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
305 Tcl_MutexUnlock(allocMutexPtr);
306 return (void *)(op+1);
309 * Convert amount of memory requested into closest block size
310 * stored in hash buckets which satisfies request.
311 * Account for space used per block for accounting.
314 amt = 8; /* size of first bucket */
317 amt = 16; /* size of first bucket */
320 while (nbytes + OVERHEAD > amt) {
323 Tcl_MutexUnlock(allocMutexPtr);
328 ASSERT( bucket < NBUCKETS );
331 * If nothing in hash bucket right now,
332 * request more memory from the system.
334 if ((op = nextf[bucket]) == NULL) {
336 if ((op = nextf[bucket]) == NULL) {
337 Tcl_MutexUnlock(allocMutexPtr);
342 * Remove from linked list
344 nextf[bucket] = op->ov_next;
345 op->ov_magic0 = op->ov_magic1 = MAGIC;
346 op->ov_index = (unsigned char) bucket;
352 * Record allocated size of block and
353 * bound space with magic numbers.
355 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
356 op->ov_rmagic = RMAGIC;
357 *(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
359 Tcl_MutexUnlock(allocMutexPtr);
360 return ((char *)(op + 1));
364 *----------------------------------------------------------------------
368 * Allocate more memory to the indicated bucket.
370 * Assumes Mutex is already held.
376 * Attempts to get more memory from the system.
378 *----------------------------------------------------------------------
383 int bucket; /* What bucket to allocat to. */
385 register union overhead *op;
386 register long sz; /* size of desired block */
387 long amt; /* amount to allocate */
388 int nblks; /* how many blocks we get */
389 struct block *blockPtr;
392 * sbrk_size <= 0 only for big, FLUFFY, requests (about
393 * 2^30 bytes on a VAX, I think) or for a negative arg.
395 sz = 1 << (bucket + 3);
400 ASSERT(nblks*sz == amt);
402 blockPtr = (struct block *) TclpSysAlloc((unsigned)
403 (sizeof(struct block) + amt), 1);
405 if (blockPtr == NULL) {
408 blockPtr->nextPtr = blockList;
409 blockList = blockPtr;
411 op = (union overhead *) (blockPtr + 1);
414 * Add new memory allocated to that on
415 * free list for this hash bucket.
418 while (--nblks > 0) {
419 op->ov_next = (union overhead *)((caddr_t)op + sz);
420 op = (union overhead *)((caddr_t)op + sz);
422 op->ov_next = (union overhead *)NULL;
426 *----------------------------------------------------------------------
438 *----------------------------------------------------------------------
443 char *cp; /* Pointer to memory to free. */
446 register union overhead *op;
447 struct block *bigBlockPtr;
453 Tcl_MutexLock(allocMutexPtr);
454 op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
456 ASSERT(op->ov_magic0 == MAGIC); /* make sure it was in use */
457 ASSERT(op->ov_magic1 == MAGIC);
458 if (op->ov_magic0 != MAGIC || op->ov_magic1 != MAGIC) {
459 Tcl_MutexUnlock(allocMutexPtr);
463 RANGE_ASSERT(op->ov_rmagic == RMAGIC);
464 RANGE_ASSERT(*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
466 if ( size == 0xff ) {
470 bigBlockPtr = (struct block *) op - 1;
471 bigBlockPtr->prevPtr->nextPtr = bigBlockPtr->nextPtr;
472 bigBlockPtr->nextPtr->prevPtr = bigBlockPtr->prevPtr;
473 TclpSysFree(bigBlockPtr);
474 Tcl_MutexUnlock(allocMutexPtr);
477 ASSERT(size < NBUCKETS);
478 op->ov_next = nextf[size]; /* also clobbers ov_magic */
483 Tcl_MutexUnlock(allocMutexPtr);
487 *----------------------------------------------------------------------
499 *----------------------------------------------------------------------
503 TclpRealloc(cp, nbytes)
504 char *cp; /* Pointer to alloced block. */
505 unsigned int nbytes; /* New size of memory. */
509 struct block *bigBlockPtr;
511 unsigned long maxsize;
514 return (TclpAlloc(nbytes));
517 Tcl_MutexLock(allocMutexPtr);
519 op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
521 ASSERT(op->ov_magic0 == MAGIC); /* make sure it was in use */
522 ASSERT(op->ov_magic1 == MAGIC);
523 if (op->ov_magic0 != MAGIC || op->ov_magic1 != MAGIC) {
524 Tcl_MutexUnlock(allocMutexPtr);
528 RANGE_ASSERT(op->ov_rmagic == RMAGIC);
529 RANGE_ASSERT(*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
533 * If the block isn't in a bin, just realloc it.
537 struct block *prevPtr, *nextPtr;
538 bigBlockPtr = (struct block *) op - 1;
539 prevPtr = bigBlockPtr->prevPtr;
540 nextPtr = bigBlockPtr->nextPtr;
541 bigBlockPtr = (struct block *) TclpSysRealloc(bigBlockPtr,
542 sizeof(struct block) + OVERHEAD + nbytes);
543 if (bigBlockPtr == NULL) {
544 Tcl_MutexUnlock(allocMutexPtr);
548 if (prevPtr->nextPtr != bigBlockPtr) {
550 * If the block has moved, splice the new block into the list where
551 * the old block used to be.
554 prevPtr->nextPtr = bigBlockPtr;
555 nextPtr->prevPtr = bigBlockPtr;
558 op = (union overhead *) (bigBlockPtr + 1);
564 * Record allocated size of block and update magic number bounds.
567 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
568 *(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
570 Tcl_MutexUnlock(allocMutexPtr);
571 return (char *)(op+1);
573 maxsize = 1 << (i+3);
575 if ( nbytes + OVERHEAD > maxsize ) {
577 } else if ( i > 0 && nbytes + OVERHEAD < (maxsize/2) ) {
584 Tcl_MutexUnlock(allocMutexPtr);
586 newp = TclpAlloc(nbytes);
587 if ( newp == NULL ) {
591 if ( maxsize < nbytes )
593 memcpy((VOID *) newp, (VOID *) cp, (size_t) nbytes);
599 * Ok, we don't have to copy, it fits as-is
602 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
603 *(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
605 Tcl_MutexUnlock(allocMutexPtr);
610 *----------------------------------------------------------------------
614 * Prints two lines of numbers, one showing the length of the
615 * free list for each size category, the second showing the
616 * number of mallocs - frees for each size category.
624 *----------------------------------------------------------------------
630 char *s; /* Where to write info. */
633 register union overhead *p;
637 Tcl_MutexLock(allocMutexPtr);
638 fprintf(stderr, "Memory allocation statistics %s\nTclpFree:\t", s);
639 for (i = 0; i < NBUCKETS; i++) {
640 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
641 fprintf(stderr, " %d", j);
642 totfree += j * (1 << (i + 3));
644 fprintf(stderr, "\nused:\t");
645 for (i = 0; i < NBUCKETS; i++) {
646 fprintf(stderr, " %d", nmalloc[i]);
647 totused += nmalloc[i] * (1 << (i + 3));
649 fprintf(stderr, "\n\tTotal small in use: %d, total free: %d\n",
651 fprintf(stderr, "\n\tNumber of big (>%d) blocks in use: %d\n",
652 MAXMALLOC, nmalloc[NBUCKETS]);
653 Tcl_MutexUnlock(allocMutexPtr);
657 #else /* !USE_TCLALLOC */
660 *----------------------------------------------------------------------
664 * Allocate more memory.
672 *----------------------------------------------------------------------
677 unsigned int nbytes; /* Number of bytes to allocate. */
679 return (char*) malloc(nbytes);
683 *----------------------------------------------------------------------
695 *----------------------------------------------------------------------
700 char *cp; /* Pointer to memory to free. */
707 *----------------------------------------------------------------------
719 *----------------------------------------------------------------------
723 TclpRealloc(cp, nbytes)
724 char *cp; /* Pointer to alloced block. */
725 unsigned int nbytes; /* New size of memory. */
727 return (char*) realloc(cp, nbytes);
730 #endif /* !USE_TCLALLOC */
731 #endif /* !TCL_THREADS */