sl@0
|
1 |
/*
|
sl@0
|
2 |
* tclAlloc.c --
|
sl@0
|
3 |
*
|
sl@0
|
4 |
* This is a very fast storage allocator. It allocates blocks of a
|
sl@0
|
5 |
* small number of different sizes, and keeps free lists of each size.
|
sl@0
|
6 |
* Blocks that don't exactly fit are passed up to the next larger size.
|
sl@0
|
7 |
* Blocks over a certain size are directly allocated from the system.
|
sl@0
|
8 |
*
|
sl@0
|
9 |
* Copyright (c) 1983 Regents of the University of California.
|
sl@0
|
10 |
* Copyright (c) 1996-1997 Sun Microsystems, Inc.
|
sl@0
|
11 |
* Copyright (c) 1998-1999 by Scriptics Corporation.
|
sl@0
|
12 |
*
|
sl@0
|
13 |
* Portions contributed by Chris Kingsley, Jack Jansen and Ray Johnson.
|
sl@0
|
14 |
*
|
sl@0
|
15 |
* See the file "license.terms" for information on usage and redistribution
|
sl@0
|
16 |
* of this file, and for a DISCLAIMER OF ALL WARRANTIES.
|
sl@0
|
17 |
*
|
sl@0
|
18 |
* RCS: @(#) $Id: tclAlloc.c,v 1.16.2.1 2004/10/28 21:12:37 andreas_kupries Exp $
|
sl@0
|
19 |
*/
|
sl@0
|
20 |
|
sl@0
|
21 |
/*
|
sl@0
|
22 |
* Windows and Unix use an alternative allocator when building with threads
|
sl@0
|
23 |
* that has significantly reduced lock contention.
|
sl@0
|
24 |
*/
|
sl@0
|
25 |
|
sl@0
|
26 |
#if !defined(TCL_THREADS) || !defined(USE_THREAD_ALLOC) || defined(TCL_MEM_DEBUG)
|
sl@0
|
27 |
|
sl@0
|
28 |
#include "tclInt.h"
|
sl@0
|
29 |
#include "tclPort.h"
|
sl@0
|
30 |
|
sl@0
|
31 |
#if USE_TCLALLOC
|
sl@0
|
32 |
|
sl@0
|
33 |
#ifdef TCL_DEBUG
|
sl@0
|
34 |
# define DEBUG
|
sl@0
|
35 |
/* #define MSTATS */
|
sl@0
|
36 |
# define RCHECK
|
sl@0
|
37 |
#endif
|
sl@0
|
38 |
|
sl@0
|
39 |
/*
|
sl@0
|
40 |
* We should really make use of AC_CHECK_TYPE(caddr_t)
|
sl@0
|
41 |
* here, but it can wait until Tcl uses config.h properly.
|
sl@0
|
42 |
*/
|
sl@0
|
43 |
#if defined(MAC_TCL) || defined(_MSC_VER) || defined(__MINGW32__) || defined(__BORLANDC__)
|
sl@0
|
44 |
typedef unsigned long caddr_t;
|
sl@0
|
45 |
#endif
|
sl@0
|
46 |
|
sl@0
|
47 |
/*
|
sl@0
|
48 |
* The overhead on a block is at least 8 bytes. When free, this space
|
sl@0
|
49 |
* contains a pointer to the next free block, and the bottom two bits must
|
sl@0
|
50 |
* be zero. When in use, the first byte is set to MAGIC, and the second
|
sl@0
|
51 |
* byte is the size index. The remaining bytes are for alignment.
|
sl@0
|
52 |
* If range checking is enabled then a second word holds the size of the
|
sl@0
|
53 |
* requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
|
sl@0
|
54 |
* The order of elements is critical: ov_magic must overlay the low order
|
sl@0
|
55 |
* bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
|
sl@0
|
56 |
*/
|
sl@0
|
57 |
|
sl@0
|
58 |
union overhead {
|
sl@0
|
59 |
union overhead *ov_next; /* when free */
|
sl@0
|
60 |
unsigned char ov_padding[8]; /* Ensure the structure is 8-byte aligned. */
|
sl@0
|
61 |
struct {
|
sl@0
|
62 |
unsigned char ovu_magic0; /* magic number */
|
sl@0
|
63 |
unsigned char ovu_index; /* bucket # */
|
sl@0
|
64 |
unsigned char ovu_unused; /* unused */
|
sl@0
|
65 |
unsigned char ovu_magic1; /* other magic number */
|
sl@0
|
66 |
#ifdef RCHECK
|
sl@0
|
67 |
unsigned short ovu_rmagic; /* range magic number */
|
sl@0
|
68 |
unsigned long ovu_size; /* actual block size */
|
sl@0
|
69 |
unsigned short ovu_unused2; /* padding to 8-byte align */
|
sl@0
|
70 |
#endif
|
sl@0
|
71 |
} ovu;
|
sl@0
|
72 |
#define ov_magic0 ovu.ovu_magic0
|
sl@0
|
73 |
#define ov_magic1 ovu.ovu_magic1
|
sl@0
|
74 |
#define ov_index ovu.ovu_index
|
sl@0
|
75 |
#define ov_rmagic ovu.ovu_rmagic
|
sl@0
|
76 |
#define ov_size ovu.ovu_size
|
sl@0
|
77 |
};
|
sl@0
|
78 |
|
sl@0
|
79 |
|
sl@0
|
80 |
#define MAGIC 0xef /* magic # on accounting info */
|
sl@0
|
81 |
#define RMAGIC 0x5555 /* magic # on range info */
|
sl@0
|
82 |
|
sl@0
|
83 |
#ifdef RCHECK
|
sl@0
|
84 |
#define RSLOP sizeof (unsigned short)
|
sl@0
|
85 |
#else
|
sl@0
|
86 |
#define RSLOP 0
|
sl@0
|
87 |
#endif
|
sl@0
|
88 |
|
sl@0
|
89 |
#define OVERHEAD (sizeof(union overhead) + RSLOP)
|
sl@0
|
90 |
|
sl@0
|
91 |
/*
|
sl@0
|
92 |
* nextf[i] is the pointer to the next free block of size 2^(i+3). The
|
sl@0
|
93 |
* smallest allocatable block is 8 bytes. The overhead information
|
sl@0
|
94 |
* precedes the data area returned to the user.
|
sl@0
|
95 |
*/
|
sl@0
|
96 |
|
sl@0
|
97 |
#define NBUCKETS 13
|
sl@0
|
98 |
#define MAXMALLOC (1<<(NBUCKETS+2))
|
sl@0
|
99 |
static union overhead *nextf[NBUCKETS];
|
sl@0
|
100 |
|
sl@0
|
101 |
/*
|
sl@0
|
102 |
* The following structure is used to keep track of all system memory
|
sl@0
|
103 |
* currently owned by Tcl. When finalizing, all this memory will
|
sl@0
|
104 |
* be returned to the system.
|
sl@0
|
105 |
*/
|
sl@0
|
106 |
|
sl@0
|
107 |
struct block {
|
sl@0
|
108 |
struct block *nextPtr; /* Linked list. */
|
sl@0
|
109 |
struct block *prevPtr; /* Linked list for big blocks, ensures 8-byte
|
sl@0
|
110 |
* alignment for suballocated blocks. */
|
sl@0
|
111 |
};
|
sl@0
|
112 |
|
sl@0
|
113 |
static struct block *blockList; /* Tracks the suballocated blocks. */
|
sl@0
|
114 |
static struct block bigBlocks = { /* Big blocks aren't suballocated. */
|
sl@0
|
115 |
&bigBlocks, &bigBlocks
|
sl@0
|
116 |
};
|
sl@0
|
117 |
|
sl@0
|
118 |
/*
|
sl@0
|
119 |
* The allocator is protected by a special mutex that must be
|
sl@0
|
120 |
* explicitly initialized. Futhermore, because Tcl_Alloc may be
|
sl@0
|
121 |
* used before anything else in Tcl, we make this module self-initializing
|
sl@0
|
122 |
* after all with the allocInit variable.
|
sl@0
|
123 |
*/
|
sl@0
|
124 |
|
sl@0
|
125 |
#ifdef TCL_THREADS
|
sl@0
|
126 |
static Tcl_Mutex *allocMutexPtr;
|
sl@0
|
127 |
#endif
|
sl@0
|
128 |
static int allocInit = 0;
|
sl@0
|
129 |
|
sl@0
|
130 |
|
sl@0
|
131 |
#ifdef MSTATS
|
sl@0
|
132 |
|
sl@0
|
133 |
/*
|
sl@0
|
134 |
* nmalloc[i] is the difference between the number of mallocs and frees
|
sl@0
|
135 |
* for a given block size.
|
sl@0
|
136 |
*/
|
sl@0
|
137 |
|
sl@0
|
138 |
static unsigned int nmalloc[NBUCKETS+1];
|
sl@0
|
139 |
#include <stdio.h>
|
sl@0
|
140 |
#endif
|
sl@0
|
141 |
|
sl@0
|
142 |
#if defined(DEBUG) || defined(RCHECK)
|
sl@0
|
143 |
#define ASSERT(p) if (!(p)) panic(# p)
|
sl@0
|
144 |
#define RANGE_ASSERT(p) if (!(p)) panic(# p)
|
sl@0
|
145 |
#else
|
sl@0
|
146 |
#define ASSERT(p)
|
sl@0
|
147 |
#define RANGE_ASSERT(p)
|
sl@0
|
148 |
#endif
|
sl@0
|
149 |
|
sl@0
|
150 |
/*
|
sl@0
|
151 |
* Prototypes for functions used only in this file.
|
sl@0
|
152 |
*/
|
sl@0
|
153 |
|
sl@0
|
154 |
static void MoreCore _ANSI_ARGS_((int bucket));
|
sl@0
|
155 |
|
sl@0
|
156 |
|
sl@0
|
157 |
/*
|
sl@0
|
158 |
*-------------------------------------------------------------------------
|
sl@0
|
159 |
*
|
sl@0
|
160 |
* TclInitAlloc --
|
sl@0
|
161 |
*
|
sl@0
|
162 |
* Initialize the memory system.
|
sl@0
|
163 |
*
|
sl@0
|
164 |
* Results:
|
sl@0
|
165 |
* None.
|
sl@0
|
166 |
*
|
sl@0
|
167 |
* Side effects:
|
sl@0
|
168 |
* Initialize the mutex used to serialize allocations.
|
sl@0
|
169 |
*
|
sl@0
|
170 |
*-------------------------------------------------------------------------
|
sl@0
|
171 |
*/
|
sl@0
|
172 |
|
sl@0
|
173 |
void
|
sl@0
|
174 |
TclInitAlloc()
|
sl@0
|
175 |
{
|
sl@0
|
176 |
if (!allocInit) {
|
sl@0
|
177 |
allocInit = 1;
|
sl@0
|
178 |
#ifdef TCL_THREADS
|
sl@0
|
179 |
allocMutexPtr = Tcl_GetAllocMutex();
|
sl@0
|
180 |
#endif
|
sl@0
|
181 |
}
|
sl@0
|
182 |
}
|
sl@0
|
183 |
|
sl@0
|
184 |
/*
|
sl@0
|
185 |
*-------------------------------------------------------------------------
|
sl@0
|
186 |
*
|
sl@0
|
187 |
* TclFinalizeAllocSubsystem --
|
sl@0
|
188 |
*
|
sl@0
|
189 |
* Release all resources being used by this subsystem, including
|
sl@0
|
190 |
* aggressively freeing all memory allocated by TclpAlloc() that
|
sl@0
|
191 |
* has not yet been released with TclpFree().
|
sl@0
|
192 |
*
|
sl@0
|
193 |
* After this function is called, all memory allocated with
|
sl@0
|
194 |
* TclpAlloc() should be considered unusable.
|
sl@0
|
195 |
*
|
sl@0
|
196 |
* Results:
|
sl@0
|
197 |
* None.
|
sl@0
|
198 |
*
|
sl@0
|
199 |
* Side effects:
|
sl@0
|
200 |
* This subsystem is self-initializing, since memory can be
|
sl@0
|
201 |
* allocated before Tcl is formally initialized. After this call,
|
sl@0
|
202 |
* this subsystem has been reset to its initial state and is
|
sl@0
|
203 |
* usable again.
|
sl@0
|
204 |
*
|
sl@0
|
205 |
*-------------------------------------------------------------------------
|
sl@0
|
206 |
*/
|
sl@0
|
207 |
|
sl@0
|
208 |
void
|
sl@0
|
209 |
TclFinalizeAllocSubsystem()
|
sl@0
|
210 |
{
|
sl@0
|
211 |
int i;
|
sl@0
|
212 |
struct block *blockPtr, *nextPtr;
|
sl@0
|
213 |
|
sl@0
|
214 |
Tcl_MutexLock(allocMutexPtr);
|
sl@0
|
215 |
for (blockPtr = blockList; blockPtr != NULL; blockPtr = nextPtr) {
|
sl@0
|
216 |
nextPtr = blockPtr->nextPtr;
|
sl@0
|
217 |
TclpSysFree(blockPtr);
|
sl@0
|
218 |
}
|
sl@0
|
219 |
blockList = NULL;
|
sl@0
|
220 |
|
sl@0
|
221 |
for (blockPtr = bigBlocks.nextPtr; blockPtr != &bigBlocks; ) {
|
sl@0
|
222 |
nextPtr = blockPtr->nextPtr;
|
sl@0
|
223 |
TclpSysFree(blockPtr);
|
sl@0
|
224 |
blockPtr = nextPtr;
|
sl@0
|
225 |
}
|
sl@0
|
226 |
bigBlocks.nextPtr = &bigBlocks;
|
sl@0
|
227 |
bigBlocks.prevPtr = &bigBlocks;
|
sl@0
|
228 |
|
sl@0
|
229 |
for (i = 0; i < NBUCKETS; i++) {
|
sl@0
|
230 |
nextf[i] = NULL;
|
sl@0
|
231 |
#ifdef MSTATS
|
sl@0
|
232 |
nmalloc[i] = 0;
|
sl@0
|
233 |
#endif
|
sl@0
|
234 |
}
|
sl@0
|
235 |
#ifdef MSTATS
|
sl@0
|
236 |
nmalloc[i] = 0;
|
sl@0
|
237 |
#endif
|
sl@0
|
238 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
239 |
}
|
sl@0
|
240 |
|
sl@0
|
241 |
/*
|
sl@0
|
242 |
*----------------------------------------------------------------------
|
sl@0
|
243 |
*
|
sl@0
|
244 |
* TclpAlloc --
|
sl@0
|
245 |
*
|
sl@0
|
246 |
* Allocate more memory.
|
sl@0
|
247 |
*
|
sl@0
|
248 |
* Results:
|
sl@0
|
249 |
* None.
|
sl@0
|
250 |
*
|
sl@0
|
251 |
* Side effects:
|
sl@0
|
252 |
* None.
|
sl@0
|
253 |
*
|
sl@0
|
254 |
*----------------------------------------------------------------------
|
sl@0
|
255 |
*/
|
sl@0
|
256 |
|
sl@0
|
257 |
char *
|
sl@0
|
258 |
TclpAlloc(nbytes)
|
sl@0
|
259 |
unsigned int nbytes; /* Number of bytes to allocate. */
|
sl@0
|
260 |
{
|
sl@0
|
261 |
register union overhead *op;
|
sl@0
|
262 |
register long bucket;
|
sl@0
|
263 |
register unsigned amt;
|
sl@0
|
264 |
struct block *bigBlockPtr;
|
sl@0
|
265 |
|
sl@0
|
266 |
if (!allocInit) {
|
sl@0
|
267 |
/*
|
sl@0
|
268 |
* We have to make the "self initializing" because Tcl_Alloc
|
sl@0
|
269 |
* may be used before any other part of Tcl. E.g., see
|
sl@0
|
270 |
* main() for tclsh!
|
sl@0
|
271 |
*/
|
sl@0
|
272 |
TclInitAlloc();
|
sl@0
|
273 |
}
|
sl@0
|
274 |
Tcl_MutexLock(allocMutexPtr);
|
sl@0
|
275 |
/*
|
sl@0
|
276 |
* First the simple case: we simple allocate big blocks directly
|
sl@0
|
277 |
*/
|
sl@0
|
278 |
if (nbytes + OVERHEAD >= MAXMALLOC) {
|
sl@0
|
279 |
bigBlockPtr = (struct block *) TclpSysAlloc((unsigned)
|
sl@0
|
280 |
(sizeof(struct block) + OVERHEAD + nbytes), 0);
|
sl@0
|
281 |
if (bigBlockPtr == NULL) {
|
sl@0
|
282 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
283 |
return NULL;
|
sl@0
|
284 |
}
|
sl@0
|
285 |
bigBlockPtr->nextPtr = bigBlocks.nextPtr;
|
sl@0
|
286 |
bigBlocks.nextPtr = bigBlockPtr;
|
sl@0
|
287 |
bigBlockPtr->prevPtr = &bigBlocks;
|
sl@0
|
288 |
bigBlockPtr->nextPtr->prevPtr = bigBlockPtr;
|
sl@0
|
289 |
|
sl@0
|
290 |
op = (union overhead *) (bigBlockPtr + 1);
|
sl@0
|
291 |
op->ov_magic0 = op->ov_magic1 = MAGIC;
|
sl@0
|
292 |
op->ov_index = 0xff;
|
sl@0
|
293 |
#ifdef MSTATS
|
sl@0
|
294 |
nmalloc[NBUCKETS]++;
|
sl@0
|
295 |
#endif
|
sl@0
|
296 |
#ifdef RCHECK
|
sl@0
|
297 |
/*
|
sl@0
|
298 |
* Record allocated size of block and
|
sl@0
|
299 |
* bound space with magic numbers.
|
sl@0
|
300 |
*/
|
sl@0
|
301 |
op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
|
sl@0
|
302 |
op->ov_rmagic = RMAGIC;
|
sl@0
|
303 |
*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
|
sl@0
|
304 |
#endif
|
sl@0
|
305 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
306 |
return (void *)(op+1);
|
sl@0
|
307 |
}
|
sl@0
|
308 |
/*
|
sl@0
|
309 |
* Convert amount of memory requested into closest block size
|
sl@0
|
310 |
* stored in hash buckets which satisfies request.
|
sl@0
|
311 |
* Account for space used per block for accounting.
|
sl@0
|
312 |
*/
|
sl@0
|
313 |
#ifndef RCHECK
|
sl@0
|
314 |
amt = 8; /* size of first bucket */
|
sl@0
|
315 |
bucket = 0;
|
sl@0
|
316 |
#else
|
sl@0
|
317 |
amt = 16; /* size of first bucket */
|
sl@0
|
318 |
bucket = 1;
|
sl@0
|
319 |
#endif
|
sl@0
|
320 |
while (nbytes + OVERHEAD > amt) {
|
sl@0
|
321 |
amt <<= 1;
|
sl@0
|
322 |
if (amt == 0) {
|
sl@0
|
323 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
324 |
return (NULL);
|
sl@0
|
325 |
}
|
sl@0
|
326 |
bucket++;
|
sl@0
|
327 |
}
|
sl@0
|
328 |
ASSERT( bucket < NBUCKETS );
|
sl@0
|
329 |
|
sl@0
|
330 |
/*
|
sl@0
|
331 |
* If nothing in hash bucket right now,
|
sl@0
|
332 |
* request more memory from the system.
|
sl@0
|
333 |
*/
|
sl@0
|
334 |
if ((op = nextf[bucket]) == NULL) {
|
sl@0
|
335 |
MoreCore(bucket);
|
sl@0
|
336 |
if ((op = nextf[bucket]) == NULL) {
|
sl@0
|
337 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
338 |
return (NULL);
|
sl@0
|
339 |
}
|
sl@0
|
340 |
}
|
sl@0
|
341 |
/*
|
sl@0
|
342 |
* Remove from linked list
|
sl@0
|
343 |
*/
|
sl@0
|
344 |
nextf[bucket] = op->ov_next;
|
sl@0
|
345 |
op->ov_magic0 = op->ov_magic1 = MAGIC;
|
sl@0
|
346 |
op->ov_index = (unsigned char) bucket;
|
sl@0
|
347 |
#ifdef MSTATS
|
sl@0
|
348 |
nmalloc[bucket]++;
|
sl@0
|
349 |
#endif
|
sl@0
|
350 |
#ifdef RCHECK
|
sl@0
|
351 |
/*
|
sl@0
|
352 |
* Record allocated size of block and
|
sl@0
|
353 |
* bound space with magic numbers.
|
sl@0
|
354 |
*/
|
sl@0
|
355 |
op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
|
sl@0
|
356 |
op->ov_rmagic = RMAGIC;
|
sl@0
|
357 |
*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
|
sl@0
|
358 |
#endif
|
sl@0
|
359 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
360 |
return ((char *)(op + 1));
|
sl@0
|
361 |
}
|
sl@0
|
362 |
|
sl@0
|
363 |
/*
|
sl@0
|
364 |
*----------------------------------------------------------------------
|
sl@0
|
365 |
*
|
sl@0
|
366 |
* MoreCore --
|
sl@0
|
367 |
*
|
sl@0
|
368 |
* Allocate more memory to the indicated bucket.
|
sl@0
|
369 |
*
|
sl@0
|
370 |
* Assumes Mutex is already held.
|
sl@0
|
371 |
*
|
sl@0
|
372 |
* Results:
|
sl@0
|
373 |
* None.
|
sl@0
|
374 |
*
|
sl@0
|
375 |
* Side effects:
|
sl@0
|
376 |
* Attempts to get more memory from the system.
|
sl@0
|
377 |
*
|
sl@0
|
378 |
*----------------------------------------------------------------------
|
sl@0
|
379 |
*/
|
sl@0
|
380 |
|
sl@0
|
381 |
static void
|
sl@0
|
382 |
MoreCore(bucket)
|
sl@0
|
383 |
int bucket; /* What bucket to allocat to. */
|
sl@0
|
384 |
{
|
sl@0
|
385 |
register union overhead *op;
|
sl@0
|
386 |
register long sz; /* size of desired block */
|
sl@0
|
387 |
long amt; /* amount to allocate */
|
sl@0
|
388 |
int nblks; /* how many blocks we get */
|
sl@0
|
389 |
struct block *blockPtr;
|
sl@0
|
390 |
|
sl@0
|
391 |
/*
|
sl@0
|
392 |
* sbrk_size <= 0 only for big, FLUFFY, requests (about
|
sl@0
|
393 |
* 2^30 bytes on a VAX, I think) or for a negative arg.
|
sl@0
|
394 |
*/
|
sl@0
|
395 |
sz = 1 << (bucket + 3);
|
sl@0
|
396 |
ASSERT(sz > 0);
|
sl@0
|
397 |
|
sl@0
|
398 |
amt = MAXMALLOC;
|
sl@0
|
399 |
nblks = amt / sz;
|
sl@0
|
400 |
ASSERT(nblks*sz == amt);
|
sl@0
|
401 |
|
sl@0
|
402 |
blockPtr = (struct block *) TclpSysAlloc((unsigned)
|
sl@0
|
403 |
(sizeof(struct block) + amt), 1);
|
sl@0
|
404 |
/* no more room! */
|
sl@0
|
405 |
if (blockPtr == NULL) {
|
sl@0
|
406 |
return;
|
sl@0
|
407 |
}
|
sl@0
|
408 |
blockPtr->nextPtr = blockList;
|
sl@0
|
409 |
blockList = blockPtr;
|
sl@0
|
410 |
|
sl@0
|
411 |
op = (union overhead *) (blockPtr + 1);
|
sl@0
|
412 |
|
sl@0
|
413 |
/*
|
sl@0
|
414 |
* Add new memory allocated to that on
|
sl@0
|
415 |
* free list for this hash bucket.
|
sl@0
|
416 |
*/
|
sl@0
|
417 |
nextf[bucket] = op;
|
sl@0
|
418 |
while (--nblks > 0) {
|
sl@0
|
419 |
op->ov_next = (union overhead *)((caddr_t)op + sz);
|
sl@0
|
420 |
op = (union overhead *)((caddr_t)op + sz);
|
sl@0
|
421 |
}
|
sl@0
|
422 |
op->ov_next = (union overhead *)NULL;
|
sl@0
|
423 |
}
|
sl@0
|
424 |
|
sl@0
|
425 |
/*
|
sl@0
|
426 |
*----------------------------------------------------------------------
|
sl@0
|
427 |
*
|
sl@0
|
428 |
* TclpFree --
|
sl@0
|
429 |
*
|
sl@0
|
430 |
* Free memory.
|
sl@0
|
431 |
*
|
sl@0
|
432 |
* Results:
|
sl@0
|
433 |
* None.
|
sl@0
|
434 |
*
|
sl@0
|
435 |
* Side effects:
|
sl@0
|
436 |
* None.
|
sl@0
|
437 |
*
|
sl@0
|
438 |
*----------------------------------------------------------------------
|
sl@0
|
439 |
*/
|
sl@0
|
440 |
|
sl@0
|
441 |
void
|
sl@0
|
442 |
TclpFree(cp)
|
sl@0
|
443 |
char *cp; /* Pointer to memory to free. */
|
sl@0
|
444 |
{
|
sl@0
|
445 |
register long size;
|
sl@0
|
446 |
register union overhead *op;
|
sl@0
|
447 |
struct block *bigBlockPtr;
|
sl@0
|
448 |
|
sl@0
|
449 |
if (cp == NULL) {
|
sl@0
|
450 |
return;
|
sl@0
|
451 |
}
|
sl@0
|
452 |
|
sl@0
|
453 |
Tcl_MutexLock(allocMutexPtr);
|
sl@0
|
454 |
op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
|
sl@0
|
455 |
|
sl@0
|
456 |
ASSERT(op->ov_magic0 == MAGIC); /* make sure it was in use */
|
sl@0
|
457 |
ASSERT(op->ov_magic1 == MAGIC);
|
sl@0
|
458 |
if (op->ov_magic0 != MAGIC || op->ov_magic1 != MAGIC) {
|
sl@0
|
459 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
460 |
return;
|
sl@0
|
461 |
}
|
sl@0
|
462 |
|
sl@0
|
463 |
RANGE_ASSERT(op->ov_rmagic == RMAGIC);
|
sl@0
|
464 |
RANGE_ASSERT(*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
|
sl@0
|
465 |
size = op->ov_index;
|
sl@0
|
466 |
if ( size == 0xff ) {
|
sl@0
|
467 |
#ifdef MSTATS
|
sl@0
|
468 |
nmalloc[NBUCKETS]--;
|
sl@0
|
469 |
#endif
|
sl@0
|
470 |
bigBlockPtr = (struct block *) op - 1;
|
sl@0
|
471 |
bigBlockPtr->prevPtr->nextPtr = bigBlockPtr->nextPtr;
|
sl@0
|
472 |
bigBlockPtr->nextPtr->prevPtr = bigBlockPtr->prevPtr;
|
sl@0
|
473 |
TclpSysFree(bigBlockPtr);
|
sl@0
|
474 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
475 |
return;
|
sl@0
|
476 |
}
|
sl@0
|
477 |
ASSERT(size < NBUCKETS);
|
sl@0
|
478 |
op->ov_next = nextf[size]; /* also clobbers ov_magic */
|
sl@0
|
479 |
nextf[size] = op;
|
sl@0
|
480 |
#ifdef MSTATS
|
sl@0
|
481 |
nmalloc[size]--;
|
sl@0
|
482 |
#endif
|
sl@0
|
483 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
484 |
}
|
sl@0
|
485 |
|
sl@0
|
486 |
/*
|
sl@0
|
487 |
*----------------------------------------------------------------------
|
sl@0
|
488 |
*
|
sl@0
|
489 |
* TclpRealloc --
|
sl@0
|
490 |
*
|
sl@0
|
491 |
* Reallocate memory.
|
sl@0
|
492 |
*
|
sl@0
|
493 |
* Results:
|
sl@0
|
494 |
* None.
|
sl@0
|
495 |
*
|
sl@0
|
496 |
* Side effects:
|
sl@0
|
497 |
* None.
|
sl@0
|
498 |
*
|
sl@0
|
499 |
*----------------------------------------------------------------------
|
sl@0
|
500 |
*/
|
sl@0
|
501 |
|
sl@0
|
502 |
char *
|
sl@0
|
503 |
TclpRealloc(cp, nbytes)
|
sl@0
|
504 |
char *cp; /* Pointer to alloced block. */
|
sl@0
|
505 |
unsigned int nbytes; /* New size of memory. */
|
sl@0
|
506 |
{
|
sl@0
|
507 |
int i;
|
sl@0
|
508 |
union overhead *op;
|
sl@0
|
509 |
struct block *bigBlockPtr;
|
sl@0
|
510 |
int expensive;
|
sl@0
|
511 |
unsigned long maxsize;
|
sl@0
|
512 |
|
sl@0
|
513 |
if (cp == NULL) {
|
sl@0
|
514 |
return (TclpAlloc(nbytes));
|
sl@0
|
515 |
}
|
sl@0
|
516 |
|
sl@0
|
517 |
Tcl_MutexLock(allocMutexPtr);
|
sl@0
|
518 |
|
sl@0
|
519 |
op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
|
sl@0
|
520 |
|
sl@0
|
521 |
ASSERT(op->ov_magic0 == MAGIC); /* make sure it was in use */
|
sl@0
|
522 |
ASSERT(op->ov_magic1 == MAGIC);
|
sl@0
|
523 |
if (op->ov_magic0 != MAGIC || op->ov_magic1 != MAGIC) {
|
sl@0
|
524 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
525 |
return NULL;
|
sl@0
|
526 |
}
|
sl@0
|
527 |
|
sl@0
|
528 |
RANGE_ASSERT(op->ov_rmagic == RMAGIC);
|
sl@0
|
529 |
RANGE_ASSERT(*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
|
sl@0
|
530 |
i = op->ov_index;
|
sl@0
|
531 |
|
sl@0
|
532 |
/*
|
sl@0
|
533 |
* If the block isn't in a bin, just realloc it.
|
sl@0
|
534 |
*/
|
sl@0
|
535 |
|
sl@0
|
536 |
if (i == 0xff) {
|
sl@0
|
537 |
struct block *prevPtr, *nextPtr;
|
sl@0
|
538 |
bigBlockPtr = (struct block *) op - 1;
|
sl@0
|
539 |
prevPtr = bigBlockPtr->prevPtr;
|
sl@0
|
540 |
nextPtr = bigBlockPtr->nextPtr;
|
sl@0
|
541 |
bigBlockPtr = (struct block *) TclpSysRealloc(bigBlockPtr,
|
sl@0
|
542 |
sizeof(struct block) + OVERHEAD + nbytes);
|
sl@0
|
543 |
if (bigBlockPtr == NULL) {
|
sl@0
|
544 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
545 |
return NULL;
|
sl@0
|
546 |
}
|
sl@0
|
547 |
|
sl@0
|
548 |
if (prevPtr->nextPtr != bigBlockPtr) {
|
sl@0
|
549 |
/*
|
sl@0
|
550 |
* If the block has moved, splice the new block into the list where
|
sl@0
|
551 |
* the old block used to be.
|
sl@0
|
552 |
*/
|
sl@0
|
553 |
|
sl@0
|
554 |
prevPtr->nextPtr = bigBlockPtr;
|
sl@0
|
555 |
nextPtr->prevPtr = bigBlockPtr;
|
sl@0
|
556 |
}
|
sl@0
|
557 |
|
sl@0
|
558 |
op = (union overhead *) (bigBlockPtr + 1);
|
sl@0
|
559 |
#ifdef MSTATS
|
sl@0
|
560 |
nmalloc[NBUCKETS]++;
|
sl@0
|
561 |
#endif
|
sl@0
|
562 |
#ifdef RCHECK
|
sl@0
|
563 |
/*
|
sl@0
|
564 |
* Record allocated size of block and update magic number bounds.
|
sl@0
|
565 |
*/
|
sl@0
|
566 |
|
sl@0
|
567 |
op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
|
sl@0
|
568 |
*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
|
sl@0
|
569 |
#endif
|
sl@0
|
570 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
571 |
return (char *)(op+1);
|
sl@0
|
572 |
}
|
sl@0
|
573 |
maxsize = 1 << (i+3);
|
sl@0
|
574 |
expensive = 0;
|
sl@0
|
575 |
if ( nbytes + OVERHEAD > maxsize ) {
|
sl@0
|
576 |
expensive = 1;
|
sl@0
|
577 |
} else if ( i > 0 && nbytes + OVERHEAD < (maxsize/2) ) {
|
sl@0
|
578 |
expensive = 1;
|
sl@0
|
579 |
}
|
sl@0
|
580 |
|
sl@0
|
581 |
if (expensive) {
|
sl@0
|
582 |
void *newp;
|
sl@0
|
583 |
|
sl@0
|
584 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
585 |
|
sl@0
|
586 |
newp = TclpAlloc(nbytes);
|
sl@0
|
587 |
if ( newp == NULL ) {
|
sl@0
|
588 |
return NULL;
|
sl@0
|
589 |
}
|
sl@0
|
590 |
maxsize -= OVERHEAD;
|
sl@0
|
591 |
if ( maxsize < nbytes )
|
sl@0
|
592 |
nbytes = maxsize;
|
sl@0
|
593 |
memcpy((VOID *) newp, (VOID *) cp, (size_t) nbytes);
|
sl@0
|
594 |
TclpFree(cp);
|
sl@0
|
595 |
return newp;
|
sl@0
|
596 |
}
|
sl@0
|
597 |
|
sl@0
|
598 |
/*
|
sl@0
|
599 |
* Ok, we don't have to copy, it fits as-is
|
sl@0
|
600 |
*/
|
sl@0
|
601 |
#ifdef RCHECK
|
sl@0
|
602 |
op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
|
sl@0
|
603 |
*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
|
sl@0
|
604 |
#endif
|
sl@0
|
605 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
606 |
return(cp);
|
sl@0
|
607 |
}
|
sl@0
|
608 |
|
sl@0
|
609 |
/*
|
sl@0
|
610 |
*----------------------------------------------------------------------
|
sl@0
|
611 |
*
|
sl@0
|
612 |
* mstats --
|
sl@0
|
613 |
*
|
sl@0
|
614 |
* Prints two lines of numbers, one showing the length of the
|
sl@0
|
615 |
* free list for each size category, the second showing the
|
sl@0
|
616 |
* number of mallocs - frees for each size category.
|
sl@0
|
617 |
*
|
sl@0
|
618 |
* Results:
|
sl@0
|
619 |
* None.
|
sl@0
|
620 |
*
|
sl@0
|
621 |
* Side effects:
|
sl@0
|
622 |
* None.
|
sl@0
|
623 |
*
|
sl@0
|
624 |
*----------------------------------------------------------------------
|
sl@0
|
625 |
*/
|
sl@0
|
626 |
|
sl@0
|
627 |
#ifdef MSTATS
|
sl@0
|
628 |
void
|
sl@0
|
629 |
mstats(s)
|
sl@0
|
630 |
char *s; /* Where to write info. */
|
sl@0
|
631 |
{
|
sl@0
|
632 |
register int i, j;
|
sl@0
|
633 |
register union overhead *p;
|
sl@0
|
634 |
int totfree = 0,
|
sl@0
|
635 |
totused = 0;
|
sl@0
|
636 |
|
sl@0
|
637 |
Tcl_MutexLock(allocMutexPtr);
|
sl@0
|
638 |
fprintf(stderr, "Memory allocation statistics %s\nTclpFree:\t", s);
|
sl@0
|
639 |
for (i = 0; i < NBUCKETS; i++) {
|
sl@0
|
640 |
for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
|
sl@0
|
641 |
fprintf(stderr, " %d", j);
|
sl@0
|
642 |
totfree += j * (1 << (i + 3));
|
sl@0
|
643 |
}
|
sl@0
|
644 |
fprintf(stderr, "\nused:\t");
|
sl@0
|
645 |
for (i = 0; i < NBUCKETS; i++) {
|
sl@0
|
646 |
fprintf(stderr, " %d", nmalloc[i]);
|
sl@0
|
647 |
totused += nmalloc[i] * (1 << (i + 3));
|
sl@0
|
648 |
}
|
sl@0
|
649 |
fprintf(stderr, "\n\tTotal small in use: %d, total free: %d\n",
|
sl@0
|
650 |
totused, totfree);
|
sl@0
|
651 |
fprintf(stderr, "\n\tNumber of big (>%d) blocks in use: %d\n",
|
sl@0
|
652 |
MAXMALLOC, nmalloc[NBUCKETS]);
|
sl@0
|
653 |
Tcl_MutexUnlock(allocMutexPtr);
|
sl@0
|
654 |
}
|
sl@0
|
655 |
#endif
|
sl@0
|
656 |
|
sl@0
|
657 |
#else /* !USE_TCLALLOC */
|
sl@0
|
658 |
|
sl@0
|
659 |
/*
|
sl@0
|
660 |
*----------------------------------------------------------------------
|
sl@0
|
661 |
*
|
sl@0
|
662 |
* TclpAlloc --
|
sl@0
|
663 |
*
|
sl@0
|
664 |
* Allocate more memory.
|
sl@0
|
665 |
*
|
sl@0
|
666 |
* Results:
|
sl@0
|
667 |
* None.
|
sl@0
|
668 |
*
|
sl@0
|
669 |
* Side effects:
|
sl@0
|
670 |
* None.
|
sl@0
|
671 |
*
|
sl@0
|
672 |
*----------------------------------------------------------------------
|
sl@0
|
673 |
*/
|
sl@0
|
674 |
|
sl@0
|
675 |
char *
|
sl@0
|
676 |
TclpAlloc(nbytes)
|
sl@0
|
677 |
unsigned int nbytes; /* Number of bytes to allocate. */
|
sl@0
|
678 |
{
|
sl@0
|
679 |
return (char*) malloc(nbytes);
|
sl@0
|
680 |
}
|
sl@0
|
681 |
|
sl@0
|
682 |
/*
|
sl@0
|
683 |
*----------------------------------------------------------------------
|
sl@0
|
684 |
*
|
sl@0
|
685 |
* TclpFree --
|
sl@0
|
686 |
*
|
sl@0
|
687 |
* Free memory.
|
sl@0
|
688 |
*
|
sl@0
|
689 |
* Results:
|
sl@0
|
690 |
* None.
|
sl@0
|
691 |
*
|
sl@0
|
692 |
* Side effects:
|
sl@0
|
693 |
* None.
|
sl@0
|
694 |
*
|
sl@0
|
695 |
*----------------------------------------------------------------------
|
sl@0
|
696 |
*/
|
sl@0
|
697 |
|
sl@0
|
698 |
void
|
sl@0
|
699 |
TclpFree(cp)
|
sl@0
|
700 |
char *cp; /* Pointer to memory to free. */
|
sl@0
|
701 |
{
|
sl@0
|
702 |
free(cp);
|
sl@0
|
703 |
return;
|
sl@0
|
704 |
}
|
sl@0
|
705 |
|
sl@0
|
706 |
/*
|
sl@0
|
707 |
*----------------------------------------------------------------------
|
sl@0
|
708 |
*
|
sl@0
|
709 |
* TclpRealloc --
|
sl@0
|
710 |
*
|
sl@0
|
711 |
* Reallocate memory.
|
sl@0
|
712 |
*
|
sl@0
|
713 |
* Results:
|
sl@0
|
714 |
* None.
|
sl@0
|
715 |
*
|
sl@0
|
716 |
* Side effects:
|
sl@0
|
717 |
* None.
|
sl@0
|
718 |
*
|
sl@0
|
719 |
*----------------------------------------------------------------------
|
sl@0
|
720 |
*/
|
sl@0
|
721 |
|
sl@0
|
722 |
char *
|
sl@0
|
723 |
TclpRealloc(cp, nbytes)
|
sl@0
|
724 |
char *cp; /* Pointer to alloced block. */
|
sl@0
|
725 |
unsigned int nbytes; /* New size of memory. */
|
sl@0
|
726 |
{
|
sl@0
|
727 |
return (char*) realloc(cp, nbytes);
|
sl@0
|
728 |
}
|
sl@0
|
729 |
|
sl@0
|
730 |
#endif /* !USE_TCLALLOC */
|
sl@0
|
731 |
#endif /* !TCL_THREADS */
|