os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mvalloc.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include <plat_priv.h>
sl@0
    17
#include "mm.h"
sl@0
    18
#include "mmu.h"
sl@0
    19
#include "mvalloc.h"
sl@0
    20
#include "maddrcont.h"
sl@0
    21
sl@0
    22
sl@0
    23
/**
sl@0
    24
Log2 of the minimum granularity and alignment of virtual address allocation.
sl@0
    25
Must be greater than or equal to #KPageShift+#KPageColourShift.
sl@0
    26
*/
sl@0
    27
const TUint KVirtualAllocShift = KPageShift+KPageColourShift;
sl@0
    28
sl@0
    29
/**
sl@0
    30
Log2 of the size of the region covered by a single 'slab' of virtual addresses.
sl@0
    31
Must be greater than or equal to KChunkShift.
sl@0
    32
*/
sl@0
    33
const TUint KVirtualAllocSlabShift = KChunkShift;
sl@0
    34
sl@0
    35
/**
sl@0
    36
Size, in bytes, of the size of the region covered by a single 'slab' of virtual addresses.
sl@0
    37
*/
sl@0
    38
const TUint KVirtualAllocSlabSize = 1<<KVirtualAllocSlabShift;
sl@0
    39
sl@0
    40
const TUint KVirtualAllocSlabMask = KVirtualAllocSlabSize-1;
sl@0
    41
sl@0
    42
__ASSERT_COMPILE(KVirtualAllocShift>=KPageShift+KPageColourShift);
sl@0
    43
__ASSERT_COMPILE(KVirtualAllocSlabShift>=TUint(KChunkShift));
sl@0
    44
sl@0
    45
sl@0
    46
#if defined(__GCCXML__)
sl@0
    47
FORCE_INLINE TUint CountLeadingZeroes(TUint32 /*aValue*/)
sl@0
    48
	{
sl@0
    49
	// empty
sl@0
    50
	return 0;
sl@0
    51
	}
sl@0
    52
sl@0
    53
#elif defined(__MARM__)
sl@0
    54
sl@0
    55
#ifdef __ARMCC__
sl@0
    56
FORCE_INLINE TUint CountLeadingZeroes(TUint32 aValue)
sl@0
    57
	{
sl@0
    58
	#if __ARMCC_VERSION < 310000
sl@0
    59
		TUint r;
sl@0
    60
		asm("clz r,aValue");
sl@0
    61
		return r;
sl@0
    62
	#else
sl@0
    63
		// Inline assembler is deprecated in RVCT 3.1 so we use an intrinsic.
sl@0
    64
		return __clz(aValue);
sl@0
    65
	#endif
sl@0
    66
	}
sl@0
    67
#endif // __ARMCC__
sl@0
    68
sl@0
    69
#ifdef __MARM_ARM4__
sl@0
    70
__declspec(naked) static TUint CountLeadingZeroes(TUint32)
sl@0
    71
	{
sl@0
    72
	CLZ(0,0);
sl@0
    73
	__JUMP(,lr);
sl@0
    74
	}
sl@0
    75
sl@0
    76
#elif defined(__GNUC__)
sl@0
    77
FORCE_INLINE TUint CountLeadingZeroes(TUint32 aValue)
sl@0
    78
	{
sl@0
    79
	TUint r;
sl@0
    80
	asm("clz %0,%1" : "=r"(r) : "r"(aValue));
sl@0
    81
	return r;
sl@0
    82
	}
sl@0
    83
#endif //  __GNUC__
sl@0
    84
sl@0
    85
#else // !__MARM__
sl@0
    86
sl@0
    87
inline TUint CountLeadingZeroes(TUint32 aValue)
sl@0
    88
	{
sl@0
    89
	if(!aValue)
sl@0
    90
		return 32;
sl@0
    91
	TUint count = 31;
sl@0
    92
	if(aValue>=(1<<16))
sl@0
    93
		{
sl@0
    94
		count -= 16;
sl@0
    95
		aValue >>= 16;
sl@0
    96
		}
sl@0
    97
	if(aValue>=(1<<8))
sl@0
    98
		{
sl@0
    99
		count -= 8;
sl@0
   100
		aValue >>= 8;
sl@0
   101
		}
sl@0
   102
	if(aValue>=(1<<4))
sl@0
   103
		{
sl@0
   104
		count -= 4;
sl@0
   105
		aValue >>= 4;
sl@0
   106
		}
sl@0
   107
	if(aValue>=(1<<2))
sl@0
   108
		{
sl@0
   109
		count -= 2;
sl@0
   110
		aValue >>= 2;
sl@0
   111
		}
sl@0
   112
	count -= aValue>>1;
sl@0
   113
	return count;
sl@0
   114
	}
sl@0
   115
sl@0
   116
#endif // __MARM__
sl@0
   117
sl@0
   118
sl@0
   119
sl@0
   120
//
sl@0
   121
// TLogAllocator
sl@0
   122
//
sl@0
   123
sl@0
   124
/**
sl@0
   125
Bitmap allocator for allocating regions which have size and alignment which
sl@0
   126
are a power-of-two.
sl@0
   127
*/
sl@0
   128
class TLogAllocator
sl@0
   129
	{
sl@0
   130
public:
sl@0
   131
	TLogAllocator();
sl@0
   132
sl@0
   133
	/**
sl@0
   134
	Find and allocate a free region in the bitmap.
sl@0
   135
sl@0
   136
	@param aSizeShift	Log2 of the number of bits to allocate.
sl@0
   137
sl@0
   138
	@return If successful, the index of the first bit allocated.
sl@0
   139
			Otherwise, -1.
sl@0
   140
	*/
sl@0
   141
	TInt Alloc(TUint aSizeShift);
sl@0
   142
sl@0
   143
	/**
sl@0
   144
	Allocate a specific region of bits.
sl@0
   145
sl@0
   146
	@param aIndex		The index of the first bit to allocated.
sl@0
   147
						Must be a integer multiple of 2^aSizeShift.
sl@0
   148
	@param aSizeShift	Log2 of the number of bits to allocate.
sl@0
   149
sl@0
   150
	@return KErrNone, if successful;
sl@0
   151
			KErrAlreadyExists, if any part of the region was already allocated.
sl@0
   152
	*/
sl@0
   153
	TInt Alloc(TUint aIndex, TUint aSizeShift);
sl@0
   154
sl@0
   155
	/**
sl@0
   156
	Free a specific region of bits.
sl@0
   157
sl@0
   158
	@param aIndex		The index of the first bit to free.
sl@0
   159
						Must be a integer multiple of 2^aSizeShift.
sl@0
   160
sl@0
   161
	@param aSizeShift	Log2 of the number of bits to free.
sl@0
   162
sl@0
   163
	@return True, if the slab no longer has any bits allocated.
sl@0
   164
	*/
sl@0
   165
	TBool Free(TUint aIndex, TUint aSizeShift);
sl@0
   166
private:
sl@0
   167
	enum
sl@0
   168
		{
sl@0
   169
		ENumBits = 1<<(KVirtualAllocSlabShift-KVirtualAllocShift),
sl@0
   170
		ENumWords = (ENumBits+31)/32
sl@0
   171
		};
sl@0
   172
sl@0
   173
	/**
sl@0
   174
	Number of bits which have been allocated.
sl@0
   175
	*/
sl@0
   176
	TUint iAllocCount;
sl@0
   177
sl@0
   178
	/**
sl@0
   179
	Bitmap where a bit set to one indicates 'free' and a bit cleared to zero
sl@0
   180
	indicates 'allocated'. The most significant bit in each word has the lowest
sl@0
   181
	index value. E.g.
sl@0
   182
	- Index 0 is bit 31 of iBits[0]
sl@0
   183
	- Index 31 is bit 0 of iBits[0]
sl@0
   184
	- Index 32 is bit 31 of iBits[1]
sl@0
   185
	*/
sl@0
   186
	TUint32 iBits[ENumWords];
sl@0
   187
	};
sl@0
   188
sl@0
   189
sl@0
   190
TLogAllocator::TLogAllocator()
sl@0
   191
	{
sl@0
   192
	iAllocCount = 0;
sl@0
   193
	memset(iBits,~0u,sizeof(iBits)); // unallocated bits are set to one
sl@0
   194
	}
sl@0
   195
sl@0
   196
sl@0
   197
TInt TLogAllocator::Alloc(TUint aSizeShift)
sl@0
   198
	{
sl@0
   199
	TUint size = 1<<aSizeShift;
sl@0
   200
sl@0
   201
	__NK_ASSERT_DEBUG(size<=ENumBits); // check in range
sl@0
   202
sl@0
   203
	TUint32* bits = iBits;
sl@0
   204
	TUint32* bitsEnd = bits+ENumWords;
sl@0
   205
	TUint32 b;
sl@0
   206
	switch(aSizeShift)
sl@0
   207
		{
sl@0
   208
	case 0: // find word with any unallocated bits...
sl@0
   209
		do
sl@0
   210
			{
sl@0
   211
			b = *bits++;
sl@0
   212
			if(b)
sl@0
   213
				goto small_found;
sl@0
   214
			}
sl@0
   215
		while(bits<bitsEnd);
sl@0
   216
		break;
sl@0
   217
sl@0
   218
	case 1: // find word with 2 adjacent unallocated bits...
sl@0
   219
		do
sl@0
   220
			{
sl@0
   221
			b = *bits++;
sl@0
   222
			b &= b<<1;
sl@0
   223
			b &= 0xaaaaaaaa;
sl@0
   224
			if(b)
sl@0
   225
				goto small_found;
sl@0
   226
			}
sl@0
   227
		while(bits<bitsEnd);
sl@0
   228
		break;
sl@0
   229
sl@0
   230
	case 2: // find word with 4 adjacent unallocated bits...
sl@0
   231
		do
sl@0
   232
			{
sl@0
   233
			b = *bits++;
sl@0
   234
			b &= b<<1;
sl@0
   235
			b &= b<<2;
sl@0
   236
			b &= 0x88888888;
sl@0
   237
			if(b)
sl@0
   238
				goto small_found;
sl@0
   239
			}
sl@0
   240
		while(bits<bitsEnd);
sl@0
   241
		break;
sl@0
   242
sl@0
   243
	case 3: // find word with 8 adjacent unallocated bits...
sl@0
   244
		do
sl@0
   245
			{
sl@0
   246
			b = *bits++;
sl@0
   247
			b &= b<<1;
sl@0
   248
			b &= b<<2;
sl@0
   249
			b &= b<<4;
sl@0
   250
			b &= 0x80808080;
sl@0
   251
			if(b)
sl@0
   252
				goto small_found;
sl@0
   253
			}
sl@0
   254
		while(bits<bitsEnd);
sl@0
   255
		break;
sl@0
   256
sl@0
   257
	case 4: // find word with 16 adjacent unallocated bits...
sl@0
   258
		do
sl@0
   259
			{
sl@0
   260
			b = *bits++;
sl@0
   261
			b &= b<<1;
sl@0
   262
			b &= b<<2;
sl@0
   263
			b &= b<<4;
sl@0
   264
			b &= b<<8;
sl@0
   265
			b &= 0x80008000;
sl@0
   266
			if(b)
sl@0
   267
				goto small_found;
sl@0
   268
			}
sl@0
   269
		while(bits<bitsEnd);
sl@0
   270
		break;
sl@0
   271
sl@0
   272
	case 5: // find word which is totally unallocated (has 32 bits free)...
sl@0
   273
		do
sl@0
   274
			{
sl@0
   275
			b = *bits++;
sl@0
   276
			if(b==0xffffffffu)
sl@0
   277
				goto big_found;
sl@0
   278
			}
sl@0
   279
		while(bits<bitsEnd);
sl@0
   280
		break;
sl@0
   281
sl@0
   282
	default: // find relevant number of words which are unallocated...
sl@0
   283
		{
sl@0
   284
		do
sl@0
   285
			{
sl@0
   286
			// AND words together...
sl@0
   287
			TUint32* end = (TUint32*)((TUint8*)bits+(size>>3));
sl@0
   288
			TUint32 b = 0xffffffffu;
sl@0
   289
			do b &= *bits++;
sl@0
   290
			while(bits<end);
sl@0
   291
sl@0
   292
			if(b==0xffffffffu)
sl@0
   293
				goto big_found; // all were free
sl@0
   294
			}
sl@0
   295
		while(bits<bitsEnd);
sl@0
   296
		break;
sl@0
   297
		}
sl@0
   298
sl@0
   299
		}
sl@0
   300
	__NK_ASSERT_DEBUG(bits==bitsEnd);
sl@0
   301
	return -1;
sl@0
   302
sl@0
   303
small_found:
sl@0
   304
	{
sl@0
   305
	// find first position in word which have free region (a bit set to one)...
sl@0
   306
	TUint offset = CountLeadingZeroes(b);
sl@0
   307
sl@0
   308
	// clear bits...
sl@0
   309
	TUint32 mask = 0xffffffffu;
sl@0
   310
	mask >>= size;
sl@0
   311
	mask = ~mask;
sl@0
   312
	mask >>= offset;
sl@0
   313
	*--bits &= ~mask;
sl@0
   314
sl@0
   315
	// calculate index for allocated region...
sl@0
   316
	TUint index = (bits-iBits)*32+offset;
sl@0
   317
sl@0
   318
	iAllocCount += size;
sl@0
   319
	return index;
sl@0
   320
	}
sl@0
   321
sl@0
   322
big_found:
sl@0
   323
	{
sl@0
   324
	// clear bits...
sl@0
   325
	TUint32* start = (TUint32*)((TUint8*)bits-(size>>3));
sl@0
   326
	do *--bits = 0;
sl@0
   327
	while(bits>start);
sl@0
   328
sl@0
   329
	// calculate index for allocated region...
sl@0
   330
	TUint index = (bits-iBits)*32;
sl@0
   331
sl@0
   332
	iAllocCount += size;
sl@0
   333
	return index;
sl@0
   334
	}
sl@0
   335
sl@0
   336
	}
sl@0
   337
sl@0
   338
sl@0
   339
TInt TLogAllocator::Alloc(TUint aIndex, TUint aSizeShift)
sl@0
   340
	{
sl@0
   341
	TUint size = 1<<aSizeShift;
sl@0
   342
sl@0
   343
	__NK_ASSERT_DEBUG(aIndex+size>aIndex); // check overflow
sl@0
   344
	__NK_ASSERT_DEBUG(aIndex+size<=ENumBits); // check in range
sl@0
   345
	__NK_ASSERT_DEBUG(((aIndex>>aSizeShift)<<aSizeShift)==aIndex); // check alignment
sl@0
   346
sl@0
   347
	TUint32* bits = iBits+(aIndex>>5);
sl@0
   348
	if(size<32)
sl@0
   349
		{
sl@0
   350
		TUint32 mask = 0xffffffffu;
sl@0
   351
		mask >>= size;
sl@0
   352
		mask = ~mask;
sl@0
   353
		mask >>= aIndex&31;
sl@0
   354
		TUint32 b = *bits;
sl@0
   355
		if((b&mask)!=mask)
sl@0
   356
			return KErrAlreadyExists;
sl@0
   357
		*bits = b&~mask;
sl@0
   358
		}
sl@0
   359
	else
sl@0
   360
		{
sl@0
   361
		TUint32* start = bits;
sl@0
   362
		TUint32* end = bits+(size>>5);
sl@0
   363
		do if(*bits++!=0xffffffffu) return KErrAlreadyExists;
sl@0
   364
		while(bits<end);
sl@0
   365
sl@0
   366
		bits = start;
sl@0
   367
		do *bits++ = 0;
sl@0
   368
		while(bits<end);
sl@0
   369
		}
sl@0
   370
sl@0
   371
	iAllocCount += size;
sl@0
   372
	return KErrNone;
sl@0
   373
	}
sl@0
   374
sl@0
   375
sl@0
   376
TBool TLogAllocator::Free(TUint aIndex, TUint aSizeShift)
sl@0
   377
	{
sl@0
   378
	TUint size = 1<<aSizeShift;
sl@0
   379
sl@0
   380
	__NK_ASSERT_DEBUG(aIndex+size>aIndex); // check overflow
sl@0
   381
	__NK_ASSERT_DEBUG(aIndex+size<=ENumBits); // check in range
sl@0
   382
	__NK_ASSERT_DEBUG(((aIndex>>aSizeShift)<<aSizeShift)==aIndex); // check alignment
sl@0
   383
sl@0
   384
	TUint32* bits = iBits+(aIndex>>5);
sl@0
   385
	if(size<32)
sl@0
   386
		{
sl@0
   387
		TUint32 mask = 0xffffffffu;
sl@0
   388
		mask >>= size;
sl@0
   389
		mask = ~mask;
sl@0
   390
		mask >>= aIndex&31;
sl@0
   391
		TUint32 b = *bits;
sl@0
   392
		__NK_ASSERT_DEBUG((b&mask)==0); // check was allocated
sl@0
   393
		*bits = b|mask;
sl@0
   394
		}
sl@0
   395
	else
sl@0
   396
		{
sl@0
   397
		TUint wordCount = size>>5;
sl@0
   398
		do
sl@0
   399
			{
sl@0
   400
			__NK_ASSERT_DEBUG(bits[0]==0);
sl@0
   401
			*bits++ = 0xffffffffu;
sl@0
   402
			}
sl@0
   403
		while(--wordCount);
sl@0
   404
		}
sl@0
   405
sl@0
   406
	iAllocCount -= size;
sl@0
   407
	return !iAllocCount;
sl@0
   408
	}
sl@0
   409
sl@0
   410
sl@0
   411
sl@0
   412
//
sl@0
   413
// TVirtualSlab
sl@0
   414
//
sl@0
   415
sl@0
   416
/**
sl@0
   417
Class for allocating virtual addresses contained in a single 'slab'.
sl@0
   418
@see RVirtualAllocSlabSet.
sl@0
   419
*/
sl@0
   420
class TVirtualSlab
sl@0
   421
	{
sl@0
   422
public:
sl@0
   423
	/**
sl@0
   424
	@param aHead		The head of a linked list of slabs to which this one should be added.
sl@0
   425
	@param aBase		The starting virtual address of the region covered by this slab.
sl@0
   426
	@param aSlabType	The 'slab type'.
sl@0
   427
	*/
sl@0
   428
	TVirtualSlab(SDblQue& aHead, TUint aBase, TUint aSlabType);
sl@0
   429
sl@0
   430
	~TVirtualSlab();
sl@0
   431
sl@0
   432
	/**
sl@0
   433
	Find an allocate a free region of virtual addresses.
sl@0
   434
sl@0
   435
	@param aSizeShift	Log2 of the size, in bytes, of the region.
sl@0
   436
sl@0
   437
	@return If successful, the allocated virtual address.
sl@0
   438
			Otherwise, 0 (zero).
sl@0
   439
	*/
sl@0
   440
	TLinAddr Alloc(TUint aSizeShift);
sl@0
   441
sl@0
   442
	/**
sl@0
   443
	Allocate a specific region of virtual addresses.
sl@0
   444
sl@0
   445
	@param aAddr		The start address of the region.
sl@0
   446
						Must be a integer multiple of 2^aSizeShift.
sl@0
   447
	@param aSizeShift	Log2 of the size, in bytes, of the region.
sl@0
   448
sl@0
   449
sl@0
   450
	@return KErrNone, if successful;
sl@0
   451
			KErrAlreadyExists, if any part of the region was already allocated.
sl@0
   452
	*/
sl@0
   453
	TInt Alloc(TLinAddr aAddr, TUint aSizeShift);
sl@0
   454
sl@0
   455
	/**
sl@0
   456
	Free a specific region of virtual addresses.
sl@0
   457
sl@0
   458
	@param aAddr		The start address of the region.
sl@0
   459
						Must be a integer multiple of 2^aSizeShift.
sl@0
   460
	@param aSizeShift	Log2 of the size, in bytes, of the region.
sl@0
   461
sl@0
   462
	@return True, if the slab no longer has any addresses allocated.
sl@0
   463
	*/
sl@0
   464
	TBool Free(TLinAddr aAddr, TUint aSizeShift);
sl@0
   465
sl@0
   466
	/**
sl@0
   467
	Return the starting virtual address of the region covered by this slab.
sl@0
   468
	*/
sl@0
   469
	FORCE_INLINE TLinAddr Base() { return iBase; }
sl@0
   470
sl@0
   471
	/**
sl@0
   472
	Return this objects 'slab type'. 
sl@0
   473
	*/
sl@0
   474
	FORCE_INLINE TUint SlabType() { return iSlabType; }
sl@0
   475
private:
sl@0
   476
	/**
sl@0
   477
	Link object used to insert this slab into lists.
sl@0
   478
	*/
sl@0
   479
	SDblQueLink iLink;
sl@0
   480
sl@0
   481
	/**
sl@0
   482
	The starting virtual address of the region covered by this slab.
sl@0
   483
	*/
sl@0
   484
	TLinAddr iBase;
sl@0
   485
sl@0
   486
	/**
sl@0
   487
	This objects 'slab type'. 
sl@0
   488
	*/
sl@0
   489
	TUint8 iSlabType;
sl@0
   490
sl@0
   491
	/**
sl@0
   492
	Bitmap allocator used to allocated pages in this slab's virtual address region.
sl@0
   493
	*/
sl@0
   494
	TLogAllocator iAllocator;
sl@0
   495
sl@0
   496
	friend class RVirtualAllocSlabSet;
sl@0
   497
	};
sl@0
   498
sl@0
   499
sl@0
   500
TVirtualSlab::TVirtualSlab(SDblQue& aHead, TUint aBase, TUint aSlabType)
sl@0
   501
	: iBase(aBase),iSlabType(aSlabType)
sl@0
   502
	{
sl@0
   503
	TRACE2(("TVirtualSlab::TVirtualSlab(?,0x%08x,%d)",aBase, aSlabType));
sl@0
   504
	aHead.Add(&iLink);
sl@0
   505
	}
sl@0
   506
sl@0
   507
sl@0
   508
TVirtualSlab::~TVirtualSlab()
sl@0
   509
	{
sl@0
   510
	TRACE2(("TVirtualSlab::~TVirtualSlab base=0x%08x",iBase));
sl@0
   511
	iLink.Deque();
sl@0
   512
	}
sl@0
   513
sl@0
   514
sl@0
   515
TLinAddr TVirtualSlab::Alloc(TUint aSizeShift)
sl@0
   516
	{
sl@0
   517
	TRACE2(("TVirtualSlab::Alloc(%d)",aSizeShift));
sl@0
   518
	__NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift);
sl@0
   519
	aSizeShift -= KVirtualAllocShift;
sl@0
   520
	TInt index = iAllocator.Alloc(aSizeShift);
sl@0
   521
	TLinAddr addr = 0;
sl@0
   522
	if(index>=0)
sl@0
   523
		addr = iBase+(index<<KVirtualAllocShift);
sl@0
   524
	TRACE2(("TVirtualSlab::Alloc returns 0x%08x",addr));
sl@0
   525
	return addr;
sl@0
   526
	}
sl@0
   527
sl@0
   528
sl@0
   529
TInt TVirtualSlab::Alloc(TLinAddr aAddr, TUint aSizeShift)
sl@0
   530
	{
sl@0
   531
	TRACE2(("TVirtualSlab::Alloc(0x%08x,%d)",aAddr,aSizeShift));
sl@0
   532
	__NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift);
sl@0
   533
	aSizeShift -= KVirtualAllocShift;
sl@0
   534
	TUint index = (aAddr-iBase)>>KVirtualAllocShift;
sl@0
   535
	__NK_ASSERT_DEBUG(iBase+(index<<KVirtualAllocShift)==aAddr);
sl@0
   536
	TInt r = iAllocator.Alloc(index,aSizeShift);
sl@0
   537
	if(r<0)
sl@0
   538
		return r;
sl@0
   539
	TRACE2(("TVirtualSlab::Alloc returns 0x%08x",iBase+(r<<KVirtualAllocShift)));
sl@0
   540
	return r;
sl@0
   541
	}
sl@0
   542
sl@0
   543
sl@0
   544
TBool TVirtualSlab::Free(TLinAddr aAddr, TUint aSizeShift)
sl@0
   545
	{
sl@0
   546
	TRACE2(("TVirtualSlab::Free(0x%08x,%d)",aAddr,aSizeShift));
sl@0
   547
	__NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift);
sl@0
   548
	aSizeShift -= KVirtualAllocShift;
sl@0
   549
	TUint offset = aAddr-iBase;
sl@0
   550
	TUint index = offset>>KVirtualAllocShift;
sl@0
   551
	__NK_ASSERT_DEBUG((index<<KVirtualAllocShift)==offset);
sl@0
   552
	return iAllocator.Free(index,aSizeShift);
sl@0
   553
	}
sl@0
   554
sl@0
   555
sl@0
   556
//
sl@0
   557
// RVirtualAllocSet
sl@0
   558
//
sl@0
   559
sl@0
   560
sl@0
   561
/**
sl@0
   562
Class used by #RVirtualAllocator for allocating virtual addresses which
sl@0
   563
have a size less than a 'chunk' (#KChunkSize).
sl@0
   564
sl@0
   565
This consists of a set of #TVirtualSlab objects.
sl@0
   566
*/
sl@0
   567
class RVirtualAllocSlabSet
sl@0
   568
	{
sl@0
   569
public:
sl@0
   570
	/**
sl@0
   571
	Create a new slab set for use with the specified allocator.
sl@0
   572
sl@0
   573
	@param aAllocator		The virtual address allocator which will use the slab set.
sl@0
   574
	@param aNumSlabTypes	The number of slab types this allocator will support.
sl@0
   575
	@param aWriteLock		Reference to the mutex which is being used to protect allocations
sl@0
   576
							with this object. This is only used for debug checks and may be
sl@0
   577
							a mutex assigned by #DMutexPool. In practice, this will usually be an
sl@0
   578
							address space lock DAddressSpace::iLock.
sl@0
   579
sl@0
   580
	@return The newly created #RVirtualAllocSlabSet or the null pointer if there was
sl@0
   581
			insufficient memory.
sl@0
   582
	*/
sl@0
   583
	static RVirtualAllocSlabSet* New(RVirtualAllocator* aAllocator, TUint aNumSlabTypes, DMutex*& aWriteLock);
sl@0
   584
sl@0
   585
	~RVirtualAllocSlabSet();
sl@0
   586
sl@0
   587
	/**
sl@0
   588
	Allocate a region of virtual addresses.
sl@0
   589
sl@0
   590
	@param[in,out] aAddr	On entry, if this is non-zero it represents
sl@0
   591
							the start address a specific region to allocate.
sl@0
   592
							On exit, this is set to the start address of the region allocated.
sl@0
   593
	@param aSizeShift		Log2 of the size, in bytes, of the region.
sl@0
   594
	@param aSlabType		The 'slab type' of the address to be allocated.		
sl@0
   595
sl@0
   596
	@return KErrNone, if successful;
sl@0
   597
			KErrAlreadyExists, if any part of the region was already allocated.
sl@0
   598
sl@0
   599
	@pre The write lock must be held. (The \a aWriteLock argument for the constructor
sl@0
   600
		 #RVirtualAllocSlabSet::RVirtualAllocSlabSet.)
sl@0
   601
	*/
sl@0
   602
	TInt Alloc(TLinAddr& aAddr, TUint aSizeShift, TUint aSlabType);
sl@0
   603
sl@0
   604
	/**
sl@0
   605
	Free a region of virtual addresses.
sl@0
   606
sl@0
   607
	@param aAddr			The start address of the region.
sl@0
   608
	@param aSizeShift		Log2 of the size, in bytes, of the region.
sl@0
   609
sl@0
   610
	@pre The write lock must be held. (The \a aWriteLock argument for the constructor
sl@0
   611
		 #RVirtualAllocSlabSet::RVirtualAllocSlabSet.)
sl@0
   612
	*/
sl@0
   613
	void Free(TLinAddr aAddr, TUint aSizeShift);
sl@0
   614
sl@0
   615
	/**
sl@0
   616
	Return true if the the address region specified by \a aAddr and \a aSizeShift was
sl@0
   617
	allocated by this allocator using the specified \a aSlabType.
sl@0
   618
sl@0
   619
	@pre The write lock must be held. (The \a aWriteLock argument for the constructor
sl@0
   620
		 #RVirtualAllocSlabSet::RVirtualAllocSlabSet.)
sl@0
   621
	*/
sl@0
   622
	TBool CheckSlabType(TLinAddr aAddr, TUint aSizeShift, TUint aSlabType);
sl@0
   623
sl@0
   624
private:
sl@0
   625
	/**
sl@0
   626
	Create a new slab (#TVirtualSlab) for use by this slab set.
sl@0
   627
	Newly allocated slabs are added to #iLists[\a aSlabType].
sl@0
   628
sl@0
   629
	The virtual address range used by the slab is obtained by
sl@0
   630
	by allocating a slab sized region from #iAllocator.
sl@0
   631
sl@0
   632
	@param aAddr		A virtual address which must be in the region to be covered by the slab.
sl@0
   633
	@param aSlabType	The 'slab type'.
sl@0
   634
	*/
sl@0
   635
	TVirtualSlab* NewSlab(TLinAddr aAddr, TUint aSlabType);
sl@0
   636
sl@0
   637
	/**
sl@0
   638
	Delete a slab created with #NewSlab.
sl@0
   639
	*/
sl@0
   640
	void DeleteSlab(TVirtualSlab* aSlab);
sl@0
   641
sl@0
   642
	/**
sl@0
   643
	Constructor, for arguments see #New.
sl@0
   644
	*/
sl@0
   645
	RVirtualAllocSlabSet(RVirtualAllocator* aAllocator, TUint aNumSlabTypes, DMutex*& aWriteLock);
sl@0
   646
sl@0
   647
private:
sl@0
   648
	/**
sl@0
   649
	The virtual allocator which is using this slab set.
sl@0
   650
	*/
sl@0
   651
	RVirtualAllocator* iAllocator;
sl@0
   652
sl@0
   653
	/**
sl@0
   654
	Container for all slabs owned by this slab set. This is keyed on the starting
sl@0
   655
	virtual address of the region each slab covers.
sl@0
   656
sl@0
   657
	Each slab in this container is also linked into the #iLists member appropriate
sl@0
   658
	to its slab type..
sl@0
   659
	*/
sl@0
   660
	RAddressedContainer iSlabs;
sl@0
   661
sl@0
   662
	/**
sl@0
   663
	The number of different 'slab types' this object can allocate addresses for.
sl@0
   664
	*/
sl@0
   665
	TUint iNumSlabTypes;
sl@0
   666
sl@0
   667
	/**
sl@0
   668
	An array of lists which each contain slabs of a single 'slab type'
sl@0
   669
	which this object has created. Slabs are linked by their TVirtualSlab::iLink 
sl@0
   670
	member.
sl@0
   671
sl@0
   672
	This may extend into memory beyond the end of this object and contains
sl@0
   673
	#iNumSlabTypes entries.
sl@0
   674
sl@0
   675
	Each slab in these lists is also contained in #iSlabs.
sl@0
   676
	*/
sl@0
   677
	SDblQue iLists[1];
sl@0
   678
	};
sl@0
   679
sl@0
   680
sl@0
   681
FORCE_INLINE RVirtualAllocSlabSet::RVirtualAllocSlabSet(RVirtualAllocator* aAllocator, TUint aNumSlabTypes, DMutex*& aWriteLock)
sl@0
   682
	: iAllocator(aAllocator), iSlabs(0,aWriteLock), iNumSlabTypes(aNumSlabTypes)
sl@0
   683
	{
sl@0
   684
	while(aNumSlabTypes--)
sl@0
   685
		new (&iLists+aNumSlabTypes) SDblQue;
sl@0
   686
	}
sl@0
   687
sl@0
   688
sl@0
   689
RVirtualAllocSlabSet* RVirtualAllocSlabSet::New(RVirtualAllocator* aAllocator, TUint aNumSlabTypes, DMutex*& aWriteLock)
sl@0
   690
	{
sl@0
   691
	TUint size = sizeof(RVirtualAllocSlabSet)+sizeof(((RVirtualAllocSlabSet*)0x100)->iSlabs)*(aNumSlabTypes-1);
sl@0
   692
	RVirtualAllocSlabSet* set = (RVirtualAllocSlabSet*)Kern::AllocZ(size);
sl@0
   693
	if(set)
sl@0
   694
		new (set) RVirtualAllocSlabSet(aAllocator,aNumSlabTypes,aWriteLock);
sl@0
   695
	return set;
sl@0
   696
	}
sl@0
   697
sl@0
   698
sl@0
   699
RVirtualAllocSlabSet::~RVirtualAllocSlabSet()
sl@0
   700
	{
sl@0
   701
	__NK_ASSERT_DEBUG(iSlabs.Count()==0);
sl@0
   702
	}
sl@0
   703
sl@0
   704
sl@0
   705
TVirtualSlab* RVirtualAllocSlabSet::NewSlab(TLinAddr aAddr, TUint aSlabType)
sl@0
   706
	{
sl@0
   707
	TRACE2(("RVirtualAllocSlabSet::NewSlab(0x%08x,%d,%d)",aAddr,aSlabType));
sl@0
   708
	__NK_ASSERT_DEBUG(aSlabType<iNumSlabTypes);
sl@0
   709
sl@0
   710
	TVirtualSlab* slab = 0;
sl@0
   711
	TLinAddr base;
sl@0
   712
	TUint size;
sl@0
   713
	TInt r = iAllocator->Alloc(base,size,aAddr&~KVirtualAllocSlabMask,KVirtualAllocSlabSize,aSlabType);
sl@0
   714
	if(r==KErrNone)
sl@0
   715
		{
sl@0
   716
		slab = new TVirtualSlab(iLists[aSlabType],base,aSlabType);
sl@0
   717
		if(slab && iSlabs.Add(base,slab)!=KErrNone)
sl@0
   718
			{
sl@0
   719
			delete slab;
sl@0
   720
			slab = 0;
sl@0
   721
			}
sl@0
   722
		if(!slab)
sl@0
   723
			iAllocator->Free(base,KVirtualAllocSlabSize);
sl@0
   724
		}
sl@0
   725
sl@0
   726
	TRACE2(("RVirtualAllocSlabSet::NewSlab returns 0x%08x",slab));
sl@0
   727
	return slab;
sl@0
   728
	}
sl@0
   729
sl@0
   730
sl@0
   731
void RVirtualAllocSlabSet::DeleteSlab(TVirtualSlab* aSlab)
sl@0
   732
	{
sl@0
   733
	TLinAddr base = aSlab->Base();
sl@0
   734
#ifdef _DEBUG
sl@0
   735
	TAny* removedSlab = 
sl@0
   736
#endif
sl@0
   737
	iSlabs.Remove(base);
sl@0
   738
	__NK_ASSERT_DEBUG(removedSlab==aSlab);
sl@0
   739
	delete aSlab;
sl@0
   740
	iAllocator->Free(base,KVirtualAllocSlabSize);
sl@0
   741
	}
sl@0
   742
sl@0
   743
sl@0
   744
TInt RVirtualAllocSlabSet::Alloc(TLinAddr& aAddr, TUint aSizeShift, TUint aSlabType)
sl@0
   745
	{
sl@0
   746
	__NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift && aSizeShift<KVirtualAllocSlabShift);
sl@0
   747
	__NK_ASSERT_DEBUG(aSlabType<iNumSlabTypes);
sl@0
   748
sl@0
   749
	if(!aAddr)
sl@0
   750
		{
sl@0
   751
		SDblQueLink* head = &iLists[aSlabType].iA;
sl@0
   752
		SDblQueLink* link = head;
sl@0
   753
		while((link=link->iNext)!=head)
sl@0
   754
			{
sl@0
   755
			TVirtualSlab* slab = _LOFF(link,TVirtualSlab,iLink);
sl@0
   756
			TLinAddr addr = slab->Alloc(aSizeShift);
sl@0
   757
			if(addr)
sl@0
   758
				{
sl@0
   759
				aAddr = addr;
sl@0
   760
				return KErrNone;
sl@0
   761
				}
sl@0
   762
			}
sl@0
   763
		TVirtualSlab* slab = NewSlab(0,aSlabType);
sl@0
   764
		if(!slab)
sl@0
   765
			return KErrNoMemory;
sl@0
   766
		TLinAddr addr = slab->Alloc(aSizeShift);
sl@0
   767
		if(!addr)
sl@0
   768
			return KErrNoMemory;
sl@0
   769
		aAddr = addr;
sl@0
   770
		return KErrNone;
sl@0
   771
		}
sl@0
   772
sl@0
   773
	TVirtualSlab* slab = (TVirtualSlab*)iSlabs.Find(aAddr&~KVirtualAllocSlabMask);
sl@0
   774
	if(!slab)
sl@0
   775
		{
sl@0
   776
		slab = NewSlab(aAddr,aSlabType);
sl@0
   777
		if(!slab)
sl@0
   778
			return KErrNoMemory;
sl@0
   779
		}
sl@0
   780
	else
sl@0
   781
		{
sl@0
   782
		if(slab->SlabType()!=aSlabType)
sl@0
   783
			return KErrAlreadyExists; // slab is of incompatible type
sl@0
   784
		}
sl@0
   785
	return slab->Alloc(aAddr,aSizeShift);
sl@0
   786
	}
sl@0
   787
sl@0
   788
sl@0
   789
void RVirtualAllocSlabSet::Free(TLinAddr aAddr, TUint aSizeShift)
sl@0
   790
	{
sl@0
   791
	__NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift && aSizeShift<KVirtualAllocSlabShift);
sl@0
   792
sl@0
   793
	TVirtualSlab* slab = (TVirtualSlab*)iSlabs.Find(aAddr&~KVirtualAllocSlabMask);
sl@0
   794
	if(slab)
sl@0
   795
		if(slab->Free(aAddr,aSizeShift))
sl@0
   796
			DeleteSlab(slab);
sl@0
   797
	}
sl@0
   798
sl@0
   799
sl@0
   800
TBool RVirtualAllocSlabSet::CheckSlabType(TLinAddr aAddr, TUint aSizeShift, TUint aSlabType)
sl@0
   801
	{
sl@0
   802
	__NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift && aSizeShift<KVirtualAllocSlabShift);
sl@0
   803
sl@0
   804
	TVirtualSlab* slab = (TVirtualSlab*)iSlabs.Find(aAddr&~KVirtualAllocSlabMask);
sl@0
   805
	if(!slab)
sl@0
   806
		{
sl@0
   807
		TRACE2(("RVirtualAllocSlabSet::CheckSlabType returns No Slab"));
sl@0
   808
		return false;
sl@0
   809
		}
sl@0
   810
sl@0
   811
	if(slab->iSlabType!=aSlabType)
sl@0
   812
		{
sl@0
   813
		TRACE2(("RVirtualAllocSlabSet::CheckSlabType returns Wrong Type"));
sl@0
   814
		return false;
sl@0
   815
		}
sl@0
   816
sl@0
   817
	return true;
sl@0
   818
	}
sl@0
   819
sl@0
   820
sl@0
   821
//
sl@0
   822
// RVirtualAllocator
sl@0
   823
//
sl@0
   824
sl@0
   825
RVirtualAllocator::RVirtualAllocator()
sl@0
   826
	: iBase(0), iSize(0), iAllocator(0), iSlabSet(0)
sl@0
   827
	{}
sl@0
   828
sl@0
   829
sl@0
   830
RVirtualAllocator::~RVirtualAllocator()
sl@0
   831
	{
sl@0
   832
	__NK_ASSERT_DEBUG(iAllocator==0 || iAllocator->iAvail==iAllocator->iSize); // should be empty
sl@0
   833
	Kern::Free(iAllocator);
sl@0
   834
	Kern::Free(iSlabSet);
sl@0
   835
	}
sl@0
   836
sl@0
   837
sl@0
   838
TInt RVirtualAllocator::Construct(TLinAddr aStart, TLinAddr aEnd, TUint aNumSlabTypes, DMutex*& aWriteLock)
sl@0
   839
	{
sl@0
   840
	if((aStart|aEnd)&KVirtualAllocSlabMask)
sl@0
   841
		return KErrArgument; // region not aligned to KVirtualAllocSlabSize
sl@0
   842
	TUint bitSize = (aEnd-aStart)>>KVirtualAllocSlabShift;
sl@0
   843
	iAllocator = TBitMapAllocator::New(bitSize, ETrue);
sl@0
   844
	if(!iAllocator)
sl@0
   845
		return KErrNoMemory;
sl@0
   846
	iSlabSet = RVirtualAllocSlabSet::New(this,aNumSlabTypes,aWriteLock);
sl@0
   847
	if(!iSlabSet)
sl@0
   848
		return KErrNoMemory;
sl@0
   849
	iBase = aStart;
sl@0
   850
	iSize = aEnd-aStart;
sl@0
   851
	return KErrNone;
sl@0
   852
	}
sl@0
   853
sl@0
   854
sl@0
   855
TUint RVirtualAllocator::AdjustRegion(TLinAddr& aAddr, TUint& aSize)
sl@0
   856
	{
sl@0
   857
	TLinAddr first = aAddr;
sl@0
   858
	TLinAddr last = (aAddr+aSize-1);
sl@0
   859
	TLinAddr dif = first^last;
sl@0
   860
	TUint granularity = KVirtualAllocShift;
sl@0
   861
	while(dif>>granularity && ++granularity<KVirtualAllocSlabShift)
sl@0
   862
		{}
sl@0
   863
	first >>= granularity;
sl@0
   864
	last >>= granularity;
sl@0
   865
	aAddr = first<<granularity;
sl@0
   866
	aSize = (last-first+1)<<granularity;
sl@0
   867
	return granularity;
sl@0
   868
	}
sl@0
   869
sl@0
   870
sl@0
   871
TInt RVirtualAllocator::Alloc(TLinAddr& aAddr, TUint& aSize, TLinAddr aRequestedAddr, TUint aRequestedSize, TUint aSlabType)
sl@0
   872
	{
sl@0
   873
	TRACE2(("RVirtualAllocator::Alloc(?,?,0x%08x,0x%08x,%d)",aRequestedAddr,aRequestedSize,aSlabType));
sl@0
   874
sl@0
   875
	if(!aRequestedSize)
sl@0
   876
		{
sl@0
   877
		TRACE2(("RVirtualAllocator::Alloc zero size"));
sl@0
   878
		return KErrArgument;
sl@0
   879
		}
sl@0
   880
sl@0
   881
	aAddr = aRequestedAddr;
sl@0
   882
	aSize = aRequestedSize;
sl@0
   883
	TUint align = AdjustRegion(aAddr,aSize);
sl@0
   884
	TRACE2(("RVirtualAllocator::Alloc adjusted to 0x%08x+0x%08x, align=%d",aAddr,aSize,align));
sl@0
   885
sl@0
   886
	if(align<KVirtualAllocSlabShift)
sl@0
   887
		return iSlabSet->Alloc(aAddr,align,aSlabType);
sl@0
   888
sl@0
   889
	__NK_ASSERT_DEBUG(align==KVirtualAllocSlabShift);
sl@0
   890
	TUint size = aSize>>KVirtualAllocSlabShift;
sl@0
   891
sl@0
   892
	if(!aAddr)
sl@0
   893
		{
sl@0
   894
		TInt r = iAllocator->AllocConsecutive(size, EFalse);
sl@0
   895
		if(r>=0)
sl@0
   896
			{
sl@0
   897
			iAllocator->Alloc(r, size);
sl@0
   898
			aAddr = iBase+(r<<KVirtualAllocSlabShift);
sl@0
   899
			return KErrNone;
sl@0
   900
			}
sl@0
   901
		return KErrNoMemory;
sl@0
   902
		}
sl@0
   903
sl@0
   904
	// specific address requested...
sl@0
   905
	if(!InRange(aAddr,aSize))
sl@0
   906
		{
sl@0
   907
		TRACE2(("RVirtualAllocator::Alloc not in range"));
sl@0
   908
		return KErrArgument;
sl@0
   909
		}
sl@0
   910
sl@0
   911
	TUint offset = TUint(aAddr-iBase)>>KVirtualAllocSlabShift;
sl@0
   912
	if(!iAllocator->NotFree(offset,size))
sl@0
   913
		{
sl@0
   914
		iAllocator->Alloc(offset,size);
sl@0
   915
		return KErrNone;
sl@0
   916
		}
sl@0
   917
	else
sl@0
   918
		{
sl@0
   919
		TRACE2(("RVirtualAllocator::Alloc already allocated!"));
sl@0
   920
		return KErrAlreadyExists;
sl@0
   921
		}
sl@0
   922
	}
sl@0
   923
sl@0
   924
sl@0
   925
void RVirtualAllocator::Free(TLinAddr aAddr, TUint aSize)
sl@0
   926
	{
sl@0
   927
	if(!aSize)
sl@0
   928
		return;
sl@0
   929
sl@0
   930
	TRACE2(("RVirtualAllocator::Free(0x%08x,0x%08x)",aAddr,aSize));
sl@0
   931
sl@0
   932
	TUint align = AdjustRegion(aAddr,aSize);
sl@0
   933
	TRACE2(("RVirtualAllocator::Free adjusted to 0x%08x+0x%08x, align=%d",aAddr,aSize,align));
sl@0
   934
sl@0
   935
	if(!InRange(aAddr,aSize))
sl@0
   936
		{
sl@0
   937
		TRACE2(("RVirtualAllocator::Free invalid region"));
sl@0
   938
		__NK_ASSERT_ALWAYS(0);
sl@0
   939
		return; // invalid region
sl@0
   940
		}
sl@0
   941
sl@0
   942
	if(align<KVirtualAllocSlabShift)
sl@0
   943
		{
sl@0
   944
		iSlabSet->Free(aAddr,align);
sl@0
   945
		return;
sl@0
   946
		}
sl@0
   947
sl@0
   948
	__NK_ASSERT_DEBUG(align==KVirtualAllocSlabShift);
sl@0
   949
	TUint offset = (aAddr-iBase)>>KVirtualAllocSlabShift;
sl@0
   950
	TUint size = aSize>>KVirtualAllocSlabShift;
sl@0
   951
	iAllocator->Free(offset,size);
sl@0
   952
	}
sl@0
   953
sl@0
   954
sl@0
   955
TBool RVirtualAllocator::CheckSlabType(TLinAddr aAddr, TUint aSize, TUint aSlabType)
sl@0
   956
	{
sl@0
   957
	TRACE2(("RVirtualAllocator::CheckSlabType(0x%08x,0x%08x,%d)",aAddr,aSize,aSlabType));
sl@0
   958
	if(!aSize)
sl@0
   959
		return false;
sl@0
   960
sl@0
   961
	TUint align = AdjustRegion(aAddr,aSize);
sl@0
   962
sl@0
   963
	if(!InRange(aAddr,aSize))
sl@0
   964
		{
sl@0
   965
		TRACE2(("RVirtualAllocator::CheckSlabType not in range"));
sl@0
   966
		return false;
sl@0
   967
		}
sl@0
   968
sl@0
   969
	if(align<KVirtualAllocSlabShift)
sl@0
   970
		{
sl@0
   971
		return iSlabSet->CheckSlabType(aAddr,align,aSlabType);
sl@0
   972
		}
sl@0
   973
	else
sl@0
   974
		{
sl@0
   975
		return true;
sl@0
   976
		}
sl@0
   977
	}
sl@0
   978
sl@0
   979
sl@0
   980
//
sl@0
   981
// RBackwardsVirtualAllocator
sl@0
   982
//
sl@0
   983
sl@0
   984
TInt RBackwardsVirtualAllocator::Alloc(TLinAddr& aAddr, TUint& aSize, TLinAddr aRequestedAddr, TUint aRequestedSize, TUint aSlabType)
sl@0
   985
	{
sl@0
   986
	if(aRequestedAddr)
sl@0
   987
		aRequestedAddr = (iBase+iSize)-(aRequestedAddr+aRequestedSize-iBase);
sl@0
   988
	TInt r = RVirtualAllocator::Alloc(aAddr,aSize,aRequestedAddr,aRequestedSize,aSlabType);
sl@0
   989
	if(r==KErrNone)
sl@0
   990
		aAddr = (iBase+iSize)-(aAddr+aSize-iBase);
sl@0
   991
	return r;
sl@0
   992
	}
sl@0
   993
sl@0
   994
sl@0
   995
void RBackwardsVirtualAllocator::Free(TLinAddr aAddr, TUint aSize)
sl@0
   996
	{
sl@0
   997
	RVirtualAllocator::Free((iBase+iSize)-(aAddr+aSize-iBase),aSize);
sl@0
   998
	}
sl@0
   999
sl@0
  1000