First public contribution.
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
16 #include <plat_priv.h>
20 #include "maddrcont.h"
24 Log2 of the minimum granularity and alignment of virtual address allocation.
25 Must be greater than or equal to #KPageShift+#KPageColourShift.
27 const TUint KVirtualAllocShift = KPageShift+KPageColourShift;
30 Log2 of the size of the region covered by a single 'slab' of virtual addresses.
31 Must be greater than or equal to KChunkShift.
33 const TUint KVirtualAllocSlabShift = KChunkShift;
36 Size, in bytes, of the size of the region covered by a single 'slab' of virtual addresses.
38 const TUint KVirtualAllocSlabSize = 1<<KVirtualAllocSlabShift;
40 const TUint KVirtualAllocSlabMask = KVirtualAllocSlabSize-1;
42 __ASSERT_COMPILE(KVirtualAllocShift>=KPageShift+KPageColourShift);
43 __ASSERT_COMPILE(KVirtualAllocSlabShift>=TUint(KChunkShift));
46 #if defined(__GCCXML__)
47 FORCE_INLINE TUint CountLeadingZeroes(TUint32 /*aValue*/)
53 #elif defined(__MARM__)
56 FORCE_INLINE TUint CountLeadingZeroes(TUint32 aValue)
58 #if __ARMCC_VERSION < 310000
63 // Inline assembler is deprecated in RVCT 3.1 so we use an intrinsic.
70 __declspec(naked) static TUint CountLeadingZeroes(TUint32)
76 #elif defined(__GNUC__)
77 FORCE_INLINE TUint CountLeadingZeroes(TUint32 aValue)
80 asm("clz %0,%1" : "=r"(r) : "r"(aValue));
87 inline TUint CountLeadingZeroes(TUint32 aValue)
125 Bitmap allocator for allocating regions which have size and alignment which
134 Find and allocate a free region in the bitmap.
136 @param aSizeShift Log2 of the number of bits to allocate.
138 @return If successful, the index of the first bit allocated.
141 TInt Alloc(TUint aSizeShift);
144 Allocate a specific region of bits.
146 @param aIndex The index of the first bit to allocated.
147 Must be a integer multiple of 2^aSizeShift.
148 @param aSizeShift Log2 of the number of bits to allocate.
150 @return KErrNone, if successful;
151 KErrAlreadyExists, if any part of the region was already allocated.
153 TInt Alloc(TUint aIndex, TUint aSizeShift);
156 Free a specific region of bits.
158 @param aIndex The index of the first bit to free.
159 Must be a integer multiple of 2^aSizeShift.
161 @param aSizeShift Log2 of the number of bits to free.
163 @return True, if the slab no longer has any bits allocated.
165 TBool Free(TUint aIndex, TUint aSizeShift);
169 ENumBits = 1<<(KVirtualAllocSlabShift-KVirtualAllocShift),
170 ENumWords = (ENumBits+31)/32
174 Number of bits which have been allocated.
179 Bitmap where a bit set to one indicates 'free' and a bit cleared to zero
180 indicates 'allocated'. The most significant bit in each word has the lowest
182 - Index 0 is bit 31 of iBits[0]
183 - Index 31 is bit 0 of iBits[0]
184 - Index 32 is bit 31 of iBits[1]
186 TUint32 iBits[ENumWords];
190 TLogAllocator::TLogAllocator()
193 memset(iBits,~0u,sizeof(iBits)); // unallocated bits are set to one
197 TInt TLogAllocator::Alloc(TUint aSizeShift)
199 TUint size = 1<<aSizeShift;
201 __NK_ASSERT_DEBUG(size<=ENumBits); // check in range
203 TUint32* bits = iBits;
204 TUint32* bitsEnd = bits+ENumWords;
208 case 0: // find word with any unallocated bits...
218 case 1: // find word with 2 adjacent unallocated bits...
230 case 2: // find word with 4 adjacent unallocated bits...
243 case 3: // find word with 8 adjacent unallocated bits...
257 case 4: // find word with 16 adjacent unallocated bits...
272 case 5: // find word which is totally unallocated (has 32 bits free)...
282 default: // find relevant number of words which are unallocated...
286 // AND words together...
287 TUint32* end = (TUint32*)((TUint8*)bits+(size>>3));
288 TUint32 b = 0xffffffffu;
293 goto big_found; // all were free
300 __NK_ASSERT_DEBUG(bits==bitsEnd);
305 // find first position in word which have free region (a bit set to one)...
306 TUint offset = CountLeadingZeroes(b);
309 TUint32 mask = 0xffffffffu;
315 // calculate index for allocated region...
316 TUint index = (bits-iBits)*32+offset;
325 TUint32* start = (TUint32*)((TUint8*)bits-(size>>3));
329 // calculate index for allocated region...
330 TUint index = (bits-iBits)*32;
339 TInt TLogAllocator::Alloc(TUint aIndex, TUint aSizeShift)
341 TUint size = 1<<aSizeShift;
343 __NK_ASSERT_DEBUG(aIndex+size>aIndex); // check overflow
344 __NK_ASSERT_DEBUG(aIndex+size<=ENumBits); // check in range
345 __NK_ASSERT_DEBUG(((aIndex>>aSizeShift)<<aSizeShift)==aIndex); // check alignment
347 TUint32* bits = iBits+(aIndex>>5);
350 TUint32 mask = 0xffffffffu;
356 return KErrAlreadyExists;
361 TUint32* start = bits;
362 TUint32* end = bits+(size>>5);
363 do if(*bits++!=0xffffffffu) return KErrAlreadyExists;
376 TBool TLogAllocator::Free(TUint aIndex, TUint aSizeShift)
378 TUint size = 1<<aSizeShift;
380 __NK_ASSERT_DEBUG(aIndex+size>aIndex); // check overflow
381 __NK_ASSERT_DEBUG(aIndex+size<=ENumBits); // check in range
382 __NK_ASSERT_DEBUG(((aIndex>>aSizeShift)<<aSizeShift)==aIndex); // check alignment
384 TUint32* bits = iBits+(aIndex>>5);
387 TUint32 mask = 0xffffffffu;
392 __NK_ASSERT_DEBUG((b&mask)==0); // check was allocated
397 TUint wordCount = size>>5;
400 __NK_ASSERT_DEBUG(bits[0]==0);
401 *bits++ = 0xffffffffu;
417 Class for allocating virtual addresses contained in a single 'slab'.
418 @see RVirtualAllocSlabSet.
424 @param aHead The head of a linked list of slabs to which this one should be added.
425 @param aBase The starting virtual address of the region covered by this slab.
426 @param aSlabType The 'slab type'.
428 TVirtualSlab(SDblQue& aHead, TUint aBase, TUint aSlabType);
433 Find an allocate a free region of virtual addresses.
435 @param aSizeShift Log2 of the size, in bytes, of the region.
437 @return If successful, the allocated virtual address.
440 TLinAddr Alloc(TUint aSizeShift);
443 Allocate a specific region of virtual addresses.
445 @param aAddr The start address of the region.
446 Must be a integer multiple of 2^aSizeShift.
447 @param aSizeShift Log2 of the size, in bytes, of the region.
450 @return KErrNone, if successful;
451 KErrAlreadyExists, if any part of the region was already allocated.
453 TInt Alloc(TLinAddr aAddr, TUint aSizeShift);
456 Free a specific region of virtual addresses.
458 @param aAddr The start address of the region.
459 Must be a integer multiple of 2^aSizeShift.
460 @param aSizeShift Log2 of the size, in bytes, of the region.
462 @return True, if the slab no longer has any addresses allocated.
464 TBool Free(TLinAddr aAddr, TUint aSizeShift);
467 Return the starting virtual address of the region covered by this slab.
469 FORCE_INLINE TLinAddr Base() { return iBase; }
472 Return this objects 'slab type'.
474 FORCE_INLINE TUint SlabType() { return iSlabType; }
477 Link object used to insert this slab into lists.
482 The starting virtual address of the region covered by this slab.
487 This objects 'slab type'.
492 Bitmap allocator used to allocated pages in this slab's virtual address region.
494 TLogAllocator iAllocator;
496 friend class RVirtualAllocSlabSet;
500 TVirtualSlab::TVirtualSlab(SDblQue& aHead, TUint aBase, TUint aSlabType)
501 : iBase(aBase),iSlabType(aSlabType)
503 TRACE2(("TVirtualSlab::TVirtualSlab(?,0x%08x,%d)",aBase, aSlabType));
508 TVirtualSlab::~TVirtualSlab()
510 TRACE2(("TVirtualSlab::~TVirtualSlab base=0x%08x",iBase));
515 TLinAddr TVirtualSlab::Alloc(TUint aSizeShift)
517 TRACE2(("TVirtualSlab::Alloc(%d)",aSizeShift));
518 __NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift);
519 aSizeShift -= KVirtualAllocShift;
520 TInt index = iAllocator.Alloc(aSizeShift);
523 addr = iBase+(index<<KVirtualAllocShift);
524 TRACE2(("TVirtualSlab::Alloc returns 0x%08x",addr));
529 TInt TVirtualSlab::Alloc(TLinAddr aAddr, TUint aSizeShift)
531 TRACE2(("TVirtualSlab::Alloc(0x%08x,%d)",aAddr,aSizeShift));
532 __NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift);
533 aSizeShift -= KVirtualAllocShift;
534 TUint index = (aAddr-iBase)>>KVirtualAllocShift;
535 __NK_ASSERT_DEBUG(iBase+(index<<KVirtualAllocShift)==aAddr);
536 TInt r = iAllocator.Alloc(index,aSizeShift);
539 TRACE2(("TVirtualSlab::Alloc returns 0x%08x",iBase+(r<<KVirtualAllocShift)));
544 TBool TVirtualSlab::Free(TLinAddr aAddr, TUint aSizeShift)
546 TRACE2(("TVirtualSlab::Free(0x%08x,%d)",aAddr,aSizeShift));
547 __NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift);
548 aSizeShift -= KVirtualAllocShift;
549 TUint offset = aAddr-iBase;
550 TUint index = offset>>KVirtualAllocShift;
551 __NK_ASSERT_DEBUG((index<<KVirtualAllocShift)==offset);
552 return iAllocator.Free(index,aSizeShift);
562 Class used by #RVirtualAllocator for allocating virtual addresses which
563 have a size less than a 'chunk' (#KChunkSize).
565 This consists of a set of #TVirtualSlab objects.
567 class RVirtualAllocSlabSet
571 Create a new slab set for use with the specified allocator.
573 @param aAllocator The virtual address allocator which will use the slab set.
574 @param aNumSlabTypes The number of slab types this allocator will support.
575 @param aWriteLock Reference to the mutex which is being used to protect allocations
576 with this object. This is only used for debug checks and may be
577 a mutex assigned by #DMutexPool. In practice, this will usually be an
578 address space lock DAddressSpace::iLock.
580 @return The newly created #RVirtualAllocSlabSet or the null pointer if there was
583 static RVirtualAllocSlabSet* New(RVirtualAllocator* aAllocator, TUint aNumSlabTypes, DMutex*& aWriteLock);
585 ~RVirtualAllocSlabSet();
588 Allocate a region of virtual addresses.
590 @param[in,out] aAddr On entry, if this is non-zero it represents
591 the start address a specific region to allocate.
592 On exit, this is set to the start address of the region allocated.
593 @param aSizeShift Log2 of the size, in bytes, of the region.
594 @param aSlabType The 'slab type' of the address to be allocated.
596 @return KErrNone, if successful;
597 KErrAlreadyExists, if any part of the region was already allocated.
599 @pre The write lock must be held. (The \a aWriteLock argument for the constructor
600 #RVirtualAllocSlabSet::RVirtualAllocSlabSet.)
602 TInt Alloc(TLinAddr& aAddr, TUint aSizeShift, TUint aSlabType);
605 Free a region of virtual addresses.
607 @param aAddr The start address of the region.
608 @param aSizeShift Log2 of the size, in bytes, of the region.
610 @pre The write lock must be held. (The \a aWriteLock argument for the constructor
611 #RVirtualAllocSlabSet::RVirtualAllocSlabSet.)
613 void Free(TLinAddr aAddr, TUint aSizeShift);
616 Return true if the the address region specified by \a aAddr and \a aSizeShift was
617 allocated by this allocator using the specified \a aSlabType.
619 @pre The write lock must be held. (The \a aWriteLock argument for the constructor
620 #RVirtualAllocSlabSet::RVirtualAllocSlabSet.)
622 TBool CheckSlabType(TLinAddr aAddr, TUint aSizeShift, TUint aSlabType);
626 Create a new slab (#TVirtualSlab) for use by this slab set.
627 Newly allocated slabs are added to #iLists[\a aSlabType].
629 The virtual address range used by the slab is obtained by
630 by allocating a slab sized region from #iAllocator.
632 @param aAddr A virtual address which must be in the region to be covered by the slab.
633 @param aSlabType The 'slab type'.
635 TVirtualSlab* NewSlab(TLinAddr aAddr, TUint aSlabType);
638 Delete a slab created with #NewSlab.
640 void DeleteSlab(TVirtualSlab* aSlab);
643 Constructor, for arguments see #New.
645 RVirtualAllocSlabSet(RVirtualAllocator* aAllocator, TUint aNumSlabTypes, DMutex*& aWriteLock);
649 The virtual allocator which is using this slab set.
651 RVirtualAllocator* iAllocator;
654 Container for all slabs owned by this slab set. This is keyed on the starting
655 virtual address of the region each slab covers.
657 Each slab in this container is also linked into the #iLists member appropriate
660 RAddressedContainer iSlabs;
663 The number of different 'slab types' this object can allocate addresses for.
668 An array of lists which each contain slabs of a single 'slab type'
669 which this object has created. Slabs are linked by their TVirtualSlab::iLink
672 This may extend into memory beyond the end of this object and contains
673 #iNumSlabTypes entries.
675 Each slab in these lists is also contained in #iSlabs.
681 FORCE_INLINE RVirtualAllocSlabSet::RVirtualAllocSlabSet(RVirtualAllocator* aAllocator, TUint aNumSlabTypes, DMutex*& aWriteLock)
682 : iAllocator(aAllocator), iSlabs(0,aWriteLock), iNumSlabTypes(aNumSlabTypes)
684 while(aNumSlabTypes--)
685 new (&iLists+aNumSlabTypes) SDblQue;
689 RVirtualAllocSlabSet* RVirtualAllocSlabSet::New(RVirtualAllocator* aAllocator, TUint aNumSlabTypes, DMutex*& aWriteLock)
691 TUint size = sizeof(RVirtualAllocSlabSet)+sizeof(((RVirtualAllocSlabSet*)0x100)->iSlabs)*(aNumSlabTypes-1);
692 RVirtualAllocSlabSet* set = (RVirtualAllocSlabSet*)Kern::AllocZ(size);
694 new (set) RVirtualAllocSlabSet(aAllocator,aNumSlabTypes,aWriteLock);
699 RVirtualAllocSlabSet::~RVirtualAllocSlabSet()
701 __NK_ASSERT_DEBUG(iSlabs.Count()==0);
705 TVirtualSlab* RVirtualAllocSlabSet::NewSlab(TLinAddr aAddr, TUint aSlabType)
707 TRACE2(("RVirtualAllocSlabSet::NewSlab(0x%08x,%d,%d)",aAddr,aSlabType));
708 __NK_ASSERT_DEBUG(aSlabType<iNumSlabTypes);
710 TVirtualSlab* slab = 0;
713 TInt r = iAllocator->Alloc(base,size,aAddr&~KVirtualAllocSlabMask,KVirtualAllocSlabSize,aSlabType);
716 slab = new TVirtualSlab(iLists[aSlabType],base,aSlabType);
717 if(slab && iSlabs.Add(base,slab)!=KErrNone)
723 iAllocator->Free(base,KVirtualAllocSlabSize);
726 TRACE2(("RVirtualAllocSlabSet::NewSlab returns 0x%08x",slab));
731 void RVirtualAllocSlabSet::DeleteSlab(TVirtualSlab* aSlab)
733 TLinAddr base = aSlab->Base();
738 __NK_ASSERT_DEBUG(removedSlab==aSlab);
740 iAllocator->Free(base,KVirtualAllocSlabSize);
744 TInt RVirtualAllocSlabSet::Alloc(TLinAddr& aAddr, TUint aSizeShift, TUint aSlabType)
746 __NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift && aSizeShift<KVirtualAllocSlabShift);
747 __NK_ASSERT_DEBUG(aSlabType<iNumSlabTypes);
751 SDblQueLink* head = &iLists[aSlabType].iA;
752 SDblQueLink* link = head;
753 while((link=link->iNext)!=head)
755 TVirtualSlab* slab = _LOFF(link,TVirtualSlab,iLink);
756 TLinAddr addr = slab->Alloc(aSizeShift);
763 TVirtualSlab* slab = NewSlab(0,aSlabType);
766 TLinAddr addr = slab->Alloc(aSizeShift);
773 TVirtualSlab* slab = (TVirtualSlab*)iSlabs.Find(aAddr&~KVirtualAllocSlabMask);
776 slab = NewSlab(aAddr,aSlabType);
782 if(slab->SlabType()!=aSlabType)
783 return KErrAlreadyExists; // slab is of incompatible type
785 return slab->Alloc(aAddr,aSizeShift);
789 void RVirtualAllocSlabSet::Free(TLinAddr aAddr, TUint aSizeShift)
791 __NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift && aSizeShift<KVirtualAllocSlabShift);
793 TVirtualSlab* slab = (TVirtualSlab*)iSlabs.Find(aAddr&~KVirtualAllocSlabMask);
795 if(slab->Free(aAddr,aSizeShift))
800 TBool RVirtualAllocSlabSet::CheckSlabType(TLinAddr aAddr, TUint aSizeShift, TUint aSlabType)
802 __NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift && aSizeShift<KVirtualAllocSlabShift);
804 TVirtualSlab* slab = (TVirtualSlab*)iSlabs.Find(aAddr&~KVirtualAllocSlabMask);
807 TRACE2(("RVirtualAllocSlabSet::CheckSlabType returns No Slab"));
811 if(slab->iSlabType!=aSlabType)
813 TRACE2(("RVirtualAllocSlabSet::CheckSlabType returns Wrong Type"));
825 RVirtualAllocator::RVirtualAllocator()
826 : iBase(0), iSize(0), iAllocator(0), iSlabSet(0)
830 RVirtualAllocator::~RVirtualAllocator()
832 __NK_ASSERT_DEBUG(iAllocator==0 || iAllocator->iAvail==iAllocator->iSize); // should be empty
833 Kern::Free(iAllocator);
834 Kern::Free(iSlabSet);
838 TInt RVirtualAllocator::Construct(TLinAddr aStart, TLinAddr aEnd, TUint aNumSlabTypes, DMutex*& aWriteLock)
840 if((aStart|aEnd)&KVirtualAllocSlabMask)
841 return KErrArgument; // region not aligned to KVirtualAllocSlabSize
842 TUint bitSize = (aEnd-aStart)>>KVirtualAllocSlabShift;
843 iAllocator = TBitMapAllocator::New(bitSize, ETrue);
846 iSlabSet = RVirtualAllocSlabSet::New(this,aNumSlabTypes,aWriteLock);
855 TUint RVirtualAllocator::AdjustRegion(TLinAddr& aAddr, TUint& aSize)
857 TLinAddr first = aAddr;
858 TLinAddr last = (aAddr+aSize-1);
859 TLinAddr dif = first^last;
860 TUint granularity = KVirtualAllocShift;
861 while(dif>>granularity && ++granularity<KVirtualAllocSlabShift)
863 first >>= granularity;
864 last >>= granularity;
865 aAddr = first<<granularity;
866 aSize = (last-first+1)<<granularity;
871 TInt RVirtualAllocator::Alloc(TLinAddr& aAddr, TUint& aSize, TLinAddr aRequestedAddr, TUint aRequestedSize, TUint aSlabType)
873 TRACE2(("RVirtualAllocator::Alloc(?,?,0x%08x,0x%08x,%d)",aRequestedAddr,aRequestedSize,aSlabType));
877 TRACE2(("RVirtualAllocator::Alloc zero size"));
881 aAddr = aRequestedAddr;
882 aSize = aRequestedSize;
883 TUint align = AdjustRegion(aAddr,aSize);
884 TRACE2(("RVirtualAllocator::Alloc adjusted to 0x%08x+0x%08x, align=%d",aAddr,aSize,align));
886 if(align<KVirtualAllocSlabShift)
887 return iSlabSet->Alloc(aAddr,align,aSlabType);
889 __NK_ASSERT_DEBUG(align==KVirtualAllocSlabShift);
890 TUint size = aSize>>KVirtualAllocSlabShift;
894 TInt r = iAllocator->AllocConsecutive(size, EFalse);
897 iAllocator->Alloc(r, size);
898 aAddr = iBase+(r<<KVirtualAllocSlabShift);
904 // specific address requested...
905 if(!InRange(aAddr,aSize))
907 TRACE2(("RVirtualAllocator::Alloc not in range"));
911 TUint offset = TUint(aAddr-iBase)>>KVirtualAllocSlabShift;
912 if(!iAllocator->NotFree(offset,size))
914 iAllocator->Alloc(offset,size);
919 TRACE2(("RVirtualAllocator::Alloc already allocated!"));
920 return KErrAlreadyExists;
925 void RVirtualAllocator::Free(TLinAddr aAddr, TUint aSize)
930 TRACE2(("RVirtualAllocator::Free(0x%08x,0x%08x)",aAddr,aSize));
932 TUint align = AdjustRegion(aAddr,aSize);
933 TRACE2(("RVirtualAllocator::Free adjusted to 0x%08x+0x%08x, align=%d",aAddr,aSize,align));
935 if(!InRange(aAddr,aSize))
937 TRACE2(("RVirtualAllocator::Free invalid region"));
938 __NK_ASSERT_ALWAYS(0);
939 return; // invalid region
942 if(align<KVirtualAllocSlabShift)
944 iSlabSet->Free(aAddr,align);
948 __NK_ASSERT_DEBUG(align==KVirtualAllocSlabShift);
949 TUint offset = (aAddr-iBase)>>KVirtualAllocSlabShift;
950 TUint size = aSize>>KVirtualAllocSlabShift;
951 iAllocator->Free(offset,size);
955 TBool RVirtualAllocator::CheckSlabType(TLinAddr aAddr, TUint aSize, TUint aSlabType)
957 TRACE2(("RVirtualAllocator::CheckSlabType(0x%08x,0x%08x,%d)",aAddr,aSize,aSlabType));
961 TUint align = AdjustRegion(aAddr,aSize);
963 if(!InRange(aAddr,aSize))
965 TRACE2(("RVirtualAllocator::CheckSlabType not in range"));
969 if(align<KVirtualAllocSlabShift)
971 return iSlabSet->CheckSlabType(aAddr,align,aSlabType);
981 // RBackwardsVirtualAllocator
984 TInt RBackwardsVirtualAllocator::Alloc(TLinAddr& aAddr, TUint& aSize, TLinAddr aRequestedAddr, TUint aRequestedSize, TUint aSlabType)
987 aRequestedAddr = (iBase+iSize)-(aRequestedAddr+aRequestedSize-iBase);
988 TInt r = RVirtualAllocator::Alloc(aAddr,aSize,aRequestedAddr,aRequestedSize,aSlabType);
990 aAddr = (iBase+iSize)-(aAddr+aSize-iBase);
995 void RBackwardsVirtualAllocator::Free(TLinAddr aAddr, TUint aSize)
997 RVirtualAllocator::Free((iBase+iSize)-(aAddr+aSize-iBase),aSize);