sl@0
|
1 |
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
// e32\memmodel\epoc\mmubase\ramalloc.cpp
|
sl@0
|
15 |
//
|
sl@0
|
16 |
//
|
sl@0
|
17 |
|
sl@0
|
18 |
/**
|
sl@0
|
19 |
@file
|
sl@0
|
20 |
@internalComponent
|
sl@0
|
21 |
*/
|
sl@0
|
22 |
//#define __VERIFY_LEASTMOVDIS
|
sl@0
|
23 |
|
sl@0
|
24 |
#include <plat_priv.h>
|
sl@0
|
25 |
#include <ramalloc.h>
|
sl@0
|
26 |
#include <e32btrace.h>
|
sl@0
|
27 |
|
sl@0
|
28 |
#ifndef __MEMMODEL_FLEXIBLE__
|
sl@0
|
29 |
#include <mmubase.inl>
|
sl@0
|
30 |
#else
|
sl@0
|
31 |
#include "mdefrag.inl"
|
sl@0
|
32 |
#endif //__MEMMODEL_FLEXIBLE__
|
sl@0
|
33 |
|
sl@0
|
34 |
DRamAllocator* DRamAllocator::New()
|
sl@0
|
35 |
{
|
sl@0
|
36 |
return new DRamAllocator;
|
sl@0
|
37 |
}
|
sl@0
|
38 |
|
sl@0
|
39 |
DRamAllocator* DRamAllocator::New(const SRamInfo& aInfo, const SRamZone* aZoneInfo, TRamZoneCallback aZoneCallback)
|
sl@0
|
40 |
{
|
sl@0
|
41 |
DRamAllocator* pA=New();
|
sl@0
|
42 |
if (!pA)
|
sl@0
|
43 |
Panic(ECreateNoMemory);
|
sl@0
|
44 |
// If this fails then it won't return but panic
|
sl@0
|
45 |
pA->Create(aInfo,aZoneInfo, aZoneCallback);
|
sl@0
|
46 |
return pA;
|
sl@0
|
47 |
}
|
sl@0
|
48 |
|
sl@0
|
49 |
void DRamAllocator::Panic(TPanic aPanic)
|
sl@0
|
50 |
{
|
sl@0
|
51 |
Kern::Fault("RAM-ALLOC", aPanic);
|
sl@0
|
52 |
}
|
sl@0
|
53 |
|
sl@0
|
54 |
#ifdef KMMU
|
sl@0
|
55 |
void HexDump32(const TAny* a, TInt n, const char* s)
|
sl@0
|
56 |
{
|
sl@0
|
57 |
const TUint32* p=(const TUint32*)a;
|
sl@0
|
58 |
Kern::Printf(s);
|
sl@0
|
59 |
TInt i=0;
|
sl@0
|
60 |
while(n)
|
sl@0
|
61 |
{
|
sl@0
|
62 |
TBuf8<80> b;
|
sl@0
|
63 |
b.AppendNumFixedWidth(i,EHex,4);
|
sl@0
|
64 |
b.Append(':');
|
sl@0
|
65 |
TInt m=Min(n,4);
|
sl@0
|
66 |
n-=m;
|
sl@0
|
67 |
i+=m;
|
sl@0
|
68 |
while(m--)
|
sl@0
|
69 |
{
|
sl@0
|
70 |
b.Append(' ');
|
sl@0
|
71 |
b.AppendNumFixedWidth(*p++,EHex,8);
|
sl@0
|
72 |
}
|
sl@0
|
73 |
Kern::Printf("%S",&b);
|
sl@0
|
74 |
}
|
sl@0
|
75 |
}
|
sl@0
|
76 |
|
sl@0
|
77 |
void HexDump8(const TAny* a, TInt n, const char* s)
|
sl@0
|
78 |
{
|
sl@0
|
79 |
const TUint8* p=(const TUint8*)a;
|
sl@0
|
80 |
Kern::Printf(s);
|
sl@0
|
81 |
TInt i=0;
|
sl@0
|
82 |
while(n)
|
sl@0
|
83 |
{
|
sl@0
|
84 |
TBuf8<80> b;
|
sl@0
|
85 |
b.AppendNumFixedWidth(i,EHex,4);
|
sl@0
|
86 |
b.Append(':');
|
sl@0
|
87 |
TInt m=Min(n,16);
|
sl@0
|
88 |
n-=m;
|
sl@0
|
89 |
i+=m;
|
sl@0
|
90 |
while(m--)
|
sl@0
|
91 |
{
|
sl@0
|
92 |
b.Append(' ');
|
sl@0
|
93 |
b.AppendNumFixedWidth(*p++,EHex,2);
|
sl@0
|
94 |
}
|
sl@0
|
95 |
Kern::Printf("%S",&b);
|
sl@0
|
96 |
}
|
sl@0
|
97 |
}
|
sl@0
|
98 |
|
sl@0
|
99 |
void DRamAllocator::DebugDump()
|
sl@0
|
100 |
{
|
sl@0
|
101 |
Kern::Printf("PageSize=%08x PageShift=%d",KPageSize,KPageShift);
|
sl@0
|
102 |
Kern::Printf("Total Pages=%x Total Free=%x",iTotalRamPages,iTotalFreeRamPages);
|
sl@0
|
103 |
Kern::Printf("Number of zones=%d, PowerState=%016lx",iNumZones,iZonePwrState);
|
sl@0
|
104 |
Kern::Printf("PhysAddrBase=%08x, PhysAddrTop=%08x",iPhysAddrBase,iPhysAddrTop);
|
sl@0
|
105 |
|
sl@0
|
106 |
TUint i = 0;
|
sl@0
|
107 |
Kern::Printf("Zone Info:");
|
sl@0
|
108 |
for (; i<iNumZones; ++i)
|
sl@0
|
109 |
{
|
sl@0
|
110 |
SZone& z=iZones[i];
|
sl@0
|
111 |
TBitMapAllocator& b = *(z.iBma[KBmaAllPages]);
|
sl@0
|
112 |
Kern::Printf("%x: Avail %x Size %x Phys %08x PhysEnd %08x ID %08x FreePage %x Pref %02x",i,b.iAvail,b.iSize,
|
sl@0
|
113 |
z.iPhysBase, z.iPhysEnd, z.iId,z.iFreePages, z.iPref);
|
sl@0
|
114 |
Kern::Printf("Allocated Unknown %x Fixed %x Movable %x Discardable %x",iZones[i].iAllocPages[EPageUnknown],iZones[i].iAllocPages[EPageFixed],
|
sl@0
|
115 |
iZones[i].iAllocPages[EPageMovable],iZones[i].iAllocPages[EPageDiscard]);
|
sl@0
|
116 |
}
|
sl@0
|
117 |
|
sl@0
|
118 |
Kern::Printf("Zone pref order:");
|
sl@0
|
119 |
SDblQueLink* link = iZonePrefList.First();
|
sl@0
|
120 |
for (; link != &iZonePrefList.iA; link = link->iNext)
|
sl@0
|
121 |
{
|
sl@0
|
122 |
SZone& zone = *_LOFF(link, SZone, iPrefLink);
|
sl@0
|
123 |
Kern::Printf("ID0x%x rank0x%x", zone.iId, zone.iPrefRank);
|
sl@0
|
124 |
}
|
sl@0
|
125 |
SZone& zone = *_LOFF(iZoneLeastMovDis, SZone, iPrefLink);
|
sl@0
|
126 |
Kern::Printf("iZoneLeastMovDis ID 0x%x rank 0x%x", zone.iId, iZoneLeastMovDisRank);
|
sl@0
|
127 |
}
|
sl@0
|
128 |
#endif
|
sl@0
|
129 |
|
sl@0
|
130 |
TInt CountBanks(const SRamBank* aBankList)
|
sl@0
|
131 |
{
|
sl@0
|
132 |
TInt banks=0;
|
sl@0
|
133 |
for (; aBankList->iSize; ++banks, ++aBankList);
|
sl@0
|
134 |
return banks;
|
sl@0
|
135 |
}
|
sl@0
|
136 |
|
sl@0
|
137 |
TUint32 TotalBankSize(const SRamBank* aBankList)
|
sl@0
|
138 |
{
|
sl@0
|
139 |
TUint32 size=0;
|
sl@0
|
140 |
for (; aBankList->iSize; ++aBankList)
|
sl@0
|
141 |
size+=aBankList->iSize;
|
sl@0
|
142 |
return size;
|
sl@0
|
143 |
}
|
sl@0
|
144 |
|
sl@0
|
145 |
/**
|
sl@0
|
146 |
Count how many zones have been specified and do some basic checks on their layout:
|
sl@0
|
147 |
Zones must be distinct, i.e. not overlap
|
sl@0
|
148 |
Zone ID must be unique
|
sl@0
|
149 |
Zones must be page size aligned
|
sl@0
|
150 |
Zones must be big enough to cover all of the allocatable RAM
|
sl@0
|
151 |
The end of the list is indicated by a SRamZone.iSize==0.
|
sl@0
|
152 |
@param aZones The list of RAM zones to be setup
|
sl@0
|
153 |
*/
|
sl@0
|
154 |
void DRamAllocator::CountZones(const SRamZone* aZones)
|
sl@0
|
155 |
{
|
sl@0
|
156 |
TUint32 totalSize = 0;
|
sl@0
|
157 |
TUint32 pageMask = KPageSize-1;
|
sl@0
|
158 |
// Check zones don't overlap each other and while running through the zones
|
sl@0
|
159 |
// calculate how many there are
|
sl@0
|
160 |
const SRamZone* pCurZ = aZones;
|
sl@0
|
161 |
for (; pCurZ->iSize != 0; pCurZ++)
|
sl@0
|
162 |
{
|
sl@0
|
163 |
// Verify zone addresses and alignment
|
sl@0
|
164 |
TUint32 curEnd = pCurZ->iBase + pCurZ->iSize - 1;
|
sl@0
|
165 |
__KTRACE_OPT(KMMU,Kern::Printf("curBase %x curEnd %x pageMask %x",pCurZ->iBase,curEnd,pageMask));
|
sl@0
|
166 |
if (curEnd <= pCurZ->iBase || (((curEnd + 1) | pCurZ->iBase) & pageMask))
|
sl@0
|
167 |
{
|
sl@0
|
168 |
Panic(EZonesAlignment);
|
sl@0
|
169 |
}
|
sl@0
|
170 |
|
sl@0
|
171 |
if (pCurZ->iId == KRamZoneInvalidId)
|
sl@0
|
172 |
{
|
sl@0
|
173 |
Panic(EZonesIDInvalid);
|
sl@0
|
174 |
}
|
sl@0
|
175 |
// Check the flags are not set to invalid values
|
sl@0
|
176 |
if (pCurZ->iFlags & KRamZoneFlagInvalid)
|
sl@0
|
177 |
{
|
sl@0
|
178 |
Panic(EZonesFlagsInvalid);
|
sl@0
|
179 |
}
|
sl@0
|
180 |
|
sl@0
|
181 |
iNumZones++;
|
sl@0
|
182 |
if (iNumZones > KMaxRamZones)
|
sl@0
|
183 |
{// Too many zones specified
|
sl@0
|
184 |
Panic(EZonesTooNumerousOrFew);
|
sl@0
|
185 |
}
|
sl@0
|
186 |
totalSize += pCurZ->iSize;
|
sl@0
|
187 |
|
sl@0
|
188 |
// Verify this zone doesn't overlap any of the previous zones' address space
|
sl@0
|
189 |
const SRamZone* pTmpZ = aZones;
|
sl@0
|
190 |
for (; pTmpZ < pCurZ; pTmpZ++)
|
sl@0
|
191 |
{
|
sl@0
|
192 |
TUint32 tmpEnd = pTmpZ->iBase + pTmpZ->iSize - 1;
|
sl@0
|
193 |
if (tmpEnd >= pCurZ->iBase && pTmpZ->iBase <= curEnd)
|
sl@0
|
194 |
{
|
sl@0
|
195 |
Panic(EZonesNotDistinct);
|
sl@0
|
196 |
}
|
sl@0
|
197 |
if(pTmpZ->iId == pCurZ->iId)
|
sl@0
|
198 |
{
|
sl@0
|
199 |
Panic(EZonesIDNotUnique);
|
sl@0
|
200 |
}
|
sl@0
|
201 |
}
|
sl@0
|
202 |
}
|
sl@0
|
203 |
__KTRACE_OPT(KMMU,Kern::Printf("iNumZones=%d, totalSize=%x",iNumZones,totalSize));
|
sl@0
|
204 |
if (!iNumZones)
|
sl@0
|
205 |
{// no zones specified
|
sl@0
|
206 |
Panic(EZonesTooNumerousOrFew);
|
sl@0
|
207 |
}
|
sl@0
|
208 |
|
sl@0
|
209 |
// Together all of the zones should cover the whole of the RAM
|
sl@0
|
210 |
if (totalSize>>KPageShift < iTotalRamPages)
|
sl@0
|
211 |
{
|
sl@0
|
212 |
Panic(EZonesIncomplete);
|
sl@0
|
213 |
}
|
sl@0
|
214 |
}
|
sl@0
|
215 |
|
sl@0
|
216 |
|
sl@0
|
217 |
/**
|
sl@0
|
218 |
Get the zone from the ID
|
sl@0
|
219 |
@param aId ID of zone to find
|
sl@0
|
220 |
@return Pointer to the zone if zone of matching ID found, NULL otherwise
|
sl@0
|
221 |
*/
|
sl@0
|
222 |
SZone* DRamAllocator::ZoneFromId(TUint aId) const
|
sl@0
|
223 |
{
|
sl@0
|
224 |
SZone* pZ = iZones;
|
sl@0
|
225 |
const SZone* const pEndZone = iZones + iNumZones;
|
sl@0
|
226 |
for (; pZ < pEndZone; pZ++)
|
sl@0
|
227 |
{
|
sl@0
|
228 |
if (aId == pZ->iId)
|
sl@0
|
229 |
{
|
sl@0
|
230 |
return pZ;
|
sl@0
|
231 |
}
|
sl@0
|
232 |
}
|
sl@0
|
233 |
return NULL;
|
sl@0
|
234 |
}
|
sl@0
|
235 |
|
sl@0
|
236 |
/** Retrieve the physical base address and number of pages in the specified zone.
|
sl@0
|
237 |
|
sl@0
|
238 |
@param aZoneId The ID of the zone
|
sl@0
|
239 |
@param aPhysBaseAddr Receives the base address of the zone
|
sl@0
|
240 |
@param aNumPages Receives the number of pages in the zone
|
sl@0
|
241 |
|
sl@0
|
242 |
@return KErrNone if zone found, KErrArgument if zone couldn't be found
|
sl@0
|
243 |
*/
|
sl@0
|
244 |
TInt DRamAllocator::GetZoneAddress(TUint aZoneId, TPhysAddr& aPhysBase, TUint& aNumPages)
|
sl@0
|
245 |
{
|
sl@0
|
246 |
SZone* zone = ZoneFromId(aZoneId);
|
sl@0
|
247 |
if (zone == NULL)
|
sl@0
|
248 |
{
|
sl@0
|
249 |
return KErrArgument;
|
sl@0
|
250 |
}
|
sl@0
|
251 |
aPhysBase = zone->iPhysBase;
|
sl@0
|
252 |
aNumPages = zone->iPhysPages;
|
sl@0
|
253 |
return KErrNone;
|
sl@0
|
254 |
}
|
sl@0
|
255 |
|
sl@0
|
256 |
#ifdef __MEMMODEL_FLEXIBLE__
|
sl@0
|
257 |
/**
|
sl@0
|
258 |
@param aAddr The address of page to find the zone of
|
sl@0
|
259 |
@param aOffset The page offset from the start of the zone that the page is in
|
sl@0
|
260 |
*/
|
sl@0
|
261 |
SZone* DRamAllocator::GetZoneAndOffset(TPhysAddr aAddr, TInt& aOffset)
|
sl@0
|
262 |
{
|
sl@0
|
263 |
// Get the zone from the SPageInfo of the page at aAddr
|
sl@0
|
264 |
SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(aAddr);
|
sl@0
|
265 |
if (pageInfo == NULL)
|
sl@0
|
266 |
{
|
sl@0
|
267 |
return NULL;
|
sl@0
|
268 |
}
|
sl@0
|
269 |
|
sl@0
|
270 |
// Perform a binary search for the RAM zone, we know aAddr is within a RAM
|
sl@0
|
271 |
// zone as pageInfo != NULL.
|
sl@0
|
272 |
SZone* left = iZones;
|
sl@0
|
273 |
SZone* mid = iZones + (iNumZones>>1);
|
sl@0
|
274 |
SZone* top = iZones + iNumZones - 1;
|
sl@0
|
275 |
|
sl@0
|
276 |
while (mid->iPhysEnd < aAddr || mid->iPhysBase > aAddr)
|
sl@0
|
277 |
{
|
sl@0
|
278 |
if (mid->iPhysEnd < aAddr)
|
sl@0
|
279 |
left = mid + 1;
|
sl@0
|
280 |
else
|
sl@0
|
281 |
top = mid - 1;
|
sl@0
|
282 |
mid = left + ((top - left) >> 1);
|
sl@0
|
283 |
__ASSERT_DEBUG(left <= top && mid <= top && mid >= left, Panic(EAllocRamPagesInconsistent));
|
sl@0
|
284 |
}
|
sl@0
|
285 |
__ASSERT_DEBUG(mid->iPhysBase <= aAddr && mid->iPhysEnd >= aAddr, Panic(EAllocRamPagesInconsistent));
|
sl@0
|
286 |
aOffset = (aAddr - mid->iPhysBase) >> KPageShift;
|
sl@0
|
287 |
__ASSERT_DEBUG((TUint)aOffset < mid->iPhysPages, Panic(EAllocRamPagesInconsistent));
|
sl@0
|
288 |
return mid;
|
sl@0
|
289 |
}
|
sl@0
|
290 |
#else
|
sl@0
|
291 |
/**
|
sl@0
|
292 |
@param aAddr The address of page to find the zone of
|
sl@0
|
293 |
@param aOffset The page offset from the start of the zone that the page is in
|
sl@0
|
294 |
*/
|
sl@0
|
295 |
SZone* DRamAllocator::GetZoneAndOffset(TPhysAddr aAddr, TInt& aOffset)
|
sl@0
|
296 |
{
|
sl@0
|
297 |
// Get the zone from the SPageInfo of the page at aAddr
|
sl@0
|
298 |
SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(aAddr);
|
sl@0
|
299 |
if (pageInfo == NULL)
|
sl@0
|
300 |
{
|
sl@0
|
301 |
return NULL;
|
sl@0
|
302 |
}
|
sl@0
|
303 |
SZone* z = iZones + pageInfo->Zone();
|
sl@0
|
304 |
aOffset = (aAddr - z->iPhysBase) >> KPageShift;
|
sl@0
|
305 |
__ASSERT_DEBUG((TUint)aOffset < z->iPhysPages, Panic(EAllocRamPagesInconsistent));
|
sl@0
|
306 |
return z;
|
sl@0
|
307 |
}
|
sl@0
|
308 |
#endif
|
sl@0
|
309 |
/**
|
sl@0
|
310 |
@param aId ID of zone to get page count for
|
sl@0
|
311 |
@param aPageData store for page counts
|
sl@0
|
312 |
@return KErrNone if zone found, KErrArgument otherwise
|
sl@0
|
313 |
*/
|
sl@0
|
314 |
TInt DRamAllocator::GetZonePageCount(TUint aId, SRamZonePageCount& aPageData)
|
sl@0
|
315 |
{
|
sl@0
|
316 |
// Search for the zone of ID aId
|
sl@0
|
317 |
const SZone* zone = ZoneFromId(aId);
|
sl@0
|
318 |
if (zone == NULL)
|
sl@0
|
319 |
{
|
sl@0
|
320 |
return KErrArgument;
|
sl@0
|
321 |
}
|
sl@0
|
322 |
aPageData.iFreePages = zone->iFreePages;
|
sl@0
|
323 |
aPageData.iUnknownPages = zone->iAllocPages[EPageUnknown];
|
sl@0
|
324 |
aPageData.iFixedPages = zone->iAllocPages[EPageFixed];
|
sl@0
|
325 |
aPageData.iMovablePages = zone->iAllocPages[EPageMovable];
|
sl@0
|
326 |
aPageData.iDiscardablePages = zone->iAllocPages[EPageDiscard];
|
sl@0
|
327 |
|
sl@0
|
328 |
return KErrNone;
|
sl@0
|
329 |
}
|
sl@0
|
330 |
|
sl@0
|
331 |
|
sl@0
|
332 |
/** Update the count of free and allocated pages for the zone with
|
sl@0
|
333 |
@param aZone The index of the zone whose counts are being updated
|
sl@0
|
334 |
@param aCount The no of pages being allocated
|
sl@0
|
335 |
@param aType The type of the pages being allocated
|
sl@0
|
336 |
*/
|
sl@0
|
337 |
void DRamAllocator::ZoneAllocPages(SZone* aZone, TUint32 aCount, TZonePageType aType)
|
sl@0
|
338 |
{
|
sl@0
|
339 |
#ifdef _DEBUG
|
sl@0
|
340 |
TUint32 free = aZone->iFreePages - aCount;
|
sl@0
|
341 |
TUint32 alloc = aZone->iAllocPages[aType] + aCount;
|
sl@0
|
342 |
TUint32 total_alloc = aZone->iAllocPages[EPageUnknown] +
|
sl@0
|
343 |
aZone->iAllocPages[EPageDiscard] +
|
sl@0
|
344 |
aZone->iAllocPages[EPageMovable] +
|
sl@0
|
345 |
aZone->iAllocPages[EPageFixed] + aCount;
|
sl@0
|
346 |
if (free > aZone->iFreePages ||
|
sl@0
|
347 |
alloc < aZone->iAllocPages[aType] ||
|
sl@0
|
348 |
free + total_alloc != aZone->iPhysPages ||
|
sl@0
|
349 |
iTotalFreeRamPages > iTotalRamPages)
|
sl@0
|
350 |
{
|
sl@0
|
351 |
__KTRACE_OPT(KMMU,Kern::Printf("TotalFree %x TotalPages %x",iTotalFreeRamPages, iTotalRamPages));
|
sl@0
|
352 |
__KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); // counts rolled over
|
sl@0
|
353 |
__KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown],
|
sl@0
|
354 |
aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
|
sl@0
|
355 |
Panic(EZonesCountErr);
|
sl@0
|
356 |
}
|
sl@0
|
357 |
__ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
|
sl@0
|
358 |
__KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));
|
sl@0
|
359 |
__KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown],
|
sl@0
|
360 |
aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
|
sl@0
|
361 |
|
sl@0
|
362 |
if (iAllowBmaVerify)
|
sl@0
|
363 |
{
|
sl@0
|
364 |
TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
|
sl@0
|
365 |
TUint allocPages;
|
sl@0
|
366 |
if (aType == EPageFixed || aType == EPageUnknown)
|
sl@0
|
367 |
allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed];
|
sl@0
|
368 |
else
|
sl@0
|
369 |
allocPages = aZone->iAllocPages[aType];
|
sl@0
|
370 |
allocPages += aCount;
|
sl@0
|
371 |
__NK_ASSERT_DEBUG(aZone->iPhysPages - bmaType.iAvail == allocPages);
|
sl@0
|
372 |
__NK_ASSERT_DEBUG((TUint)bmaType.iAvail >= aZone->iFreePages - aCount);
|
sl@0
|
373 |
|
sl@0
|
374 |
//#define _FULL_VERIFY_TYPE_BMAS
|
sl@0
|
375 |
#ifdef _FULL_VERIFY_TYPE_BMAS
|
sl@0
|
376 |
TUint offset = 0;
|
sl@0
|
377 |
TUint matchedPages = 0;
|
sl@0
|
378 |
TInt r = KErrNone;
|
sl@0
|
379 |
while (offset < aZone->iPhysPages && r == KErrNone)
|
sl@0
|
380 |
{
|
sl@0
|
381 |
r = NextAllocatedPage(aZone, offset, EPageTypes);
|
sl@0
|
382 |
if (bmaType.NotFree(offset, 1))
|
sl@0
|
383 |
{
|
sl@0
|
384 |
matchedPages++;
|
sl@0
|
385 |
}
|
sl@0
|
386 |
offset++;
|
sl@0
|
387 |
}
|
sl@0
|
388 |
__NK_ASSERT_DEBUG(matchedPages == allocPages);
|
sl@0
|
389 |
#endif
|
sl@0
|
390 |
}
|
sl@0
|
391 |
#endif
|
sl@0
|
392 |
|
sl@0
|
393 |
// Update counts
|
sl@0
|
394 |
aZone->iAllocPages[aType] += aCount;
|
sl@0
|
395 |
aZone->iFreePages -= aCount;
|
sl@0
|
396 |
aZone->iFlags &= ~KRamZoneFlagMark; // clear the mark as this zone is active
|
sl@0
|
397 |
|
sl@0
|
398 |
// Check if power state of zone needs to be changed
|
sl@0
|
399 |
if (iZonePowerFunc && !(iZonePwrState & (((TUint64)1) << aZone - iZones)))
|
sl@0
|
400 |
{//zone no longer empty so call variant to power RAM zone up if necessary
|
sl@0
|
401 |
iZonePwrState |= (((TUint64)1) << aZone - iZones);
|
sl@0
|
402 |
|
sl@0
|
403 |
if (iZoneCallbackInitSent)
|
sl@0
|
404 |
{
|
sl@0
|
405 |
TInt ret = (*iZonePowerFunc)(ERamZoneOp_PowerUp, (TAny*)aZone->iId, (TUint*)&iZonePwrState);
|
sl@0
|
406 |
if (ret != KErrNone && ret != KErrNotSupported)
|
sl@0
|
407 |
{
|
sl@0
|
408 |
Panic(EZonesCallbackErr);
|
sl@0
|
409 |
}
|
sl@0
|
410 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::ZoneAllocPages");
|
sl@0
|
411 |
}
|
sl@0
|
412 |
}
|
sl@0
|
413 |
|
sl@0
|
414 |
// Re-order the zone preference list so that a RAM zone with more immovable pages
|
sl@0
|
415 |
// is more preferable and secondary to that a RAM zone that is not empty is more
|
sl@0
|
416 |
// preferable than one that is empty.
|
sl@0
|
417 |
while (&aZone->iPrefLink != iZonePrefList.First())
|
sl@0
|
418 |
{
|
sl@0
|
419 |
SZone* prevZ = _LOFF(aZone->iPrefLink.iPrev, SZone, iPrefLink);
|
sl@0
|
420 |
__NK_ASSERT_DEBUG(K::Initialising || prevZ->iPrefRank == aZone->iPrefRank - 1);
|
sl@0
|
421 |
if (prevZ->iPref == aZone->iPref &&
|
sl@0
|
422 |
(prevZ->iAllocPages[EPageFixed] + prevZ->iAllocPages[EPageUnknown] <
|
sl@0
|
423 |
aZone->iAllocPages[EPageFixed] + aZone->iAllocPages[EPageUnknown] ||
|
sl@0
|
424 |
prevZ->iFreePages == prevZ->iPhysPages))
|
sl@0
|
425 |
{
|
sl@0
|
426 |
__KTRACE_OPT(KMMU, Kern::Printf("a - Reorder aZone 0x%x free 0x%x before prevZ 0x%x free 0x%x", aZone->iId, aZone->iFreePages, prevZ->iId, prevZ->iFreePages));
|
sl@0
|
427 |
// Make this RAM zone more preferable.
|
sl@0
|
428 |
aZone->iPrefLink.Deque();
|
sl@0
|
429 |
aZone->iPrefLink.InsertBefore(&prevZ->iPrefLink);
|
sl@0
|
430 |
aZone->iPrefRank--;
|
sl@0
|
431 |
prevZ->iPrefRank++;
|
sl@0
|
432 |
|
sl@0
|
433 |
if (iZoneLeastMovDis == &prevZ->iPrefLink)
|
sl@0
|
434 |
{// Ensure iZoneLeastMovDisRank is kept up to date.
|
sl@0
|
435 |
iZoneLeastMovDisRank = prevZ->iPrefRank;
|
sl@0
|
436 |
}
|
sl@0
|
437 |
if (iZoneLeastMovDis == &aZone->iPrefLink)
|
sl@0
|
438 |
{// Ensure iZoneLeastMovDisRank is kept up to date.
|
sl@0
|
439 |
iZoneLeastMovDisRank = aZone->iPrefRank;
|
sl@0
|
440 |
// aZone was the least preferable with movable and/or discardable so is it still?
|
sl@0
|
441 |
if (prevZ->iAllocPages[EPageMovable] || prevZ->iAllocPages[EPageDiscard])
|
sl@0
|
442 |
{// prevZ is now the least preferable RAM zone with movable and/or discardable.
|
sl@0
|
443 |
iZoneLeastMovDis = &prevZ->iPrefLink;
|
sl@0
|
444 |
iZoneLeastMovDisRank = prevZ->iPrefRank;
|
sl@0
|
445 |
__KTRACE_OPT(KMMU, Kern::Printf("aa - iZoneleastInUse ID 0x%x", (_LOFF(iZoneLeastMovDis, SZone, iPrefLink))->iId));
|
sl@0
|
446 |
}
|
sl@0
|
447 |
__KTRACE_OPT(KMMU, Kern::Printf("iZoneLeastMovDisRank 0x%x", iZoneLeastMovDisRank));
|
sl@0
|
448 |
}
|
sl@0
|
449 |
}
|
sl@0
|
450 |
else
|
sl@0
|
451 |
{
|
sl@0
|
452 |
break;
|
sl@0
|
453 |
}
|
sl@0
|
454 |
}
|
sl@0
|
455 |
|
sl@0
|
456 |
// Now that the preference list has been re-ordered check whether
|
sl@0
|
457 |
// iZoneLeastMovDis needs updating.
|
sl@0
|
458 |
if (aType >= EPageMovable && iZoneLeastMovDisRank < aZone->iPrefRank)
|
sl@0
|
459 |
{
|
sl@0
|
460 |
iZoneLeastMovDis = &aZone->iPrefLink;
|
sl@0
|
461 |
iZoneLeastMovDisRank = aZone->iPrefRank;
|
sl@0
|
462 |
__KTRACE_OPT(KMMU, Kern::Printf("a - iZoneleastInUse ID 0x%x", (_LOFF(iZoneLeastMovDis, SZone, iPrefLink))->iId));
|
sl@0
|
463 |
}
|
sl@0
|
464 |
__NK_ASSERT_DEBUG( K::Initialising ||
|
sl@0
|
465 |
iZoneLeastMovDisRank == _LOFF(iZoneLeastMovDis, SZone, iPrefLink)->iPrefRank);
|
sl@0
|
466 |
#ifdef __VERIFY_LEASTMOVDIS
|
sl@0
|
467 |
if (!K::Initialising)
|
sl@0
|
468 |
VerifyLeastPrefMovDis();
|
sl@0
|
469 |
#endif
|
sl@0
|
470 |
}
|
sl@0
|
471 |
|
sl@0
|
472 |
|
sl@0
|
473 |
/** Update the count of free and allocated pages for the zone with
|
sl@0
|
474 |
@param aZone The index of the zone whose counts are being updated
|
sl@0
|
475 |
@param aCount The no of pages being freed
|
sl@0
|
476 |
@param aType The type of the pages being freed
|
sl@0
|
477 |
*/
|
sl@0
|
478 |
void DRamAllocator::ZoneFreePages(SZone* aZone, TUint32 aCount, TZonePageType aType)
|
sl@0
|
479 |
{
|
sl@0
|
480 |
#ifdef _DEBUG
|
sl@0
|
481 |
TUint32 alloc = aZone->iAllocPages[aType] - aCount;
|
sl@0
|
482 |
TUint32 free = aZone->iFreePages + aCount;
|
sl@0
|
483 |
TUint32 total_alloc = aZone->iAllocPages[EPageUnknown] +
|
sl@0
|
484 |
aZone->iAllocPages[EPageDiscard] +
|
sl@0
|
485 |
aZone->iAllocPages[EPageMovable] +
|
sl@0
|
486 |
aZone->iAllocPages[EPageFixed] - aCount;
|
sl@0
|
487 |
if (free < aZone->iFreePages ||
|
sl@0
|
488 |
alloc > aZone->iAllocPages[aType] ||
|
sl@0
|
489 |
free + total_alloc != aZone->iPhysPages ||
|
sl@0
|
490 |
iTotalFreeRamPages > iTotalRamPages)
|
sl@0
|
491 |
{
|
sl@0
|
492 |
__KTRACE_OPT(KMMU,Kern::Printf("TotalFree %x TotalPages %x",iTotalFreeRamPages, iTotalRamPages));
|
sl@0
|
493 |
__KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); // counts rolled over
|
sl@0
|
494 |
__KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown],
|
sl@0
|
495 |
aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
|
sl@0
|
496 |
Panic(EZonesCountErr);
|
sl@0
|
497 |
}
|
sl@0
|
498 |
__ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
|
sl@0
|
499 |
__KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));
|
sl@0
|
500 |
__KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown],
|
sl@0
|
501 |
aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
|
sl@0
|
502 |
|
sl@0
|
503 |
if (iAllowBmaVerify)
|
sl@0
|
504 |
{
|
sl@0
|
505 |
TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
|
sl@0
|
506 |
TUint allocPages;
|
sl@0
|
507 |
if (aType == EPageFixed || aType == EPageUnknown)
|
sl@0
|
508 |
allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed];
|
sl@0
|
509 |
else
|
sl@0
|
510 |
allocPages = aZone->iAllocPages[aType];
|
sl@0
|
511 |
allocPages -= aCount;
|
sl@0
|
512 |
__NK_ASSERT_DEBUG(aZone->iPhysPages - bmaType.iAvail == allocPages);
|
sl@0
|
513 |
__NK_ASSERT_DEBUG((TUint)bmaType.iAvail >= aZone->iFreePages + aCount);
|
sl@0
|
514 |
|
sl@0
|
515 |
#ifdef _FULL_VERIFY_TYPE_BMAS
|
sl@0
|
516 |
TUint offset = 0;
|
sl@0
|
517 |
TUint matchedPages = 0;
|
sl@0
|
518 |
TInt r = KErrNone;
|
sl@0
|
519 |
while(offset < aZone->iPhysPages && r == KErrNone)
|
sl@0
|
520 |
{
|
sl@0
|
521 |
r = NextAllocatedPage(aZone, offset, EPageTypes);
|
sl@0
|
522 |
if (bmaType.NotFree(offset, 1))
|
sl@0
|
523 |
{
|
sl@0
|
524 |
matchedPages++;
|
sl@0
|
525 |
}
|
sl@0
|
526 |
offset++;
|
sl@0
|
527 |
}
|
sl@0
|
528 |
__NK_ASSERT_DEBUG(matchedPages == allocPages);
|
sl@0
|
529 |
#endif
|
sl@0
|
530 |
}
|
sl@0
|
531 |
#endif
|
sl@0
|
532 |
|
sl@0
|
533 |
// Update counts
|
sl@0
|
534 |
aZone->iAllocPages[aType] -= aCount;
|
sl@0
|
535 |
aZone->iFreePages += aCount;
|
sl@0
|
536 |
aZone->iFlags &= ~KRamZoneFlagMark; // clear the mark as this zone is active
|
sl@0
|
537 |
|
sl@0
|
538 |
// Check if power state of zone needs to be changed.
|
sl@0
|
539 |
// Don't update iZonePwrState when a zone is being cleared to then be
|
sl@0
|
540 |
// claimed as it shouldn't be powered off as it's about to be used.
|
sl@0
|
541 |
if (iZonePowerFunc && !(aZone->iFlags & KRamZoneFlagClaiming) &&
|
sl@0
|
542 |
aZone->iFreePages == aZone->iPhysPages)
|
sl@0
|
543 |
{// Zone is empty so call variant to power down RAM zone if desirable.
|
sl@0
|
544 |
TUint64 pwrMask = ~(((TUint64)1) << aZone - iZones);
|
sl@0
|
545 |
iZonePwrState &= pwrMask;
|
sl@0
|
546 |
|
sl@0
|
547 |
// Don't invoke callback until Init callback sent.
|
sl@0
|
548 |
if (iZoneCallbackInitSent)
|
sl@0
|
549 |
{
|
sl@0
|
550 |
TInt ret = (*iZonePowerFunc)(ERamZoneOp_PowerDown, (TAny*)aZone->iId, (TUint*)&iZonePwrState);
|
sl@0
|
551 |
if (ret != KErrNone && ret != KErrNotSupported)
|
sl@0
|
552 |
{
|
sl@0
|
553 |
Panic(EZonesCallbackErr);
|
sl@0
|
554 |
}
|
sl@0
|
555 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::ZoneFreePages");
|
sl@0
|
556 |
}
|
sl@0
|
557 |
}
|
sl@0
|
558 |
|
sl@0
|
559 |
// Re-order the zone preference list so that a RAM zone with more immovable pages
|
sl@0
|
560 |
// is more preferable and secondary to that a RAM zone that is not empty is more
|
sl@0
|
561 |
// preferable than one that is empty.
|
sl@0
|
562 |
while (&aZone->iPrefLink != iZonePrefList.Last())
|
sl@0
|
563 |
{
|
sl@0
|
564 |
SZone* nextZ = _LOFF(aZone->iPrefLink.iNext, SZone, iPrefLink);
|
sl@0
|
565 |
__NK_ASSERT_DEBUG(K::Initialising || nextZ->iPrefRank == aZone->iPrefRank + 1);
|
sl@0
|
566 |
if (nextZ->iPref == aZone->iPref &&
|
sl@0
|
567 |
(nextZ->iAllocPages[EPageFixed] + nextZ->iAllocPages[EPageUnknown] >
|
sl@0
|
568 |
aZone->iAllocPages[EPageFixed] + aZone->iAllocPages[EPageUnknown] ||
|
sl@0
|
569 |
(nextZ->iFreePages != nextZ->iPhysPages &&
|
sl@0
|
570 |
aZone->iFreePages == aZone->iPhysPages)))
|
sl@0
|
571 |
{
|
sl@0
|
572 |
__KTRACE_OPT(KMMU, Kern::Printf("f - Reorder aZone 0x%x free 0x%x after nextZ 0x%x free 0x%x", aZone->iId, aZone->iFreePages, nextZ->iId, nextZ->iFreePages));
|
sl@0
|
573 |
// Make this RAM zone less preferable.
|
sl@0
|
574 |
aZone->iPrefLink.Deque();
|
sl@0
|
575 |
aZone->iPrefLink.InsertAfter(&nextZ->iPrefLink);
|
sl@0
|
576 |
aZone->iPrefRank++;
|
sl@0
|
577 |
nextZ->iPrefRank--;
|
sl@0
|
578 |
|
sl@0
|
579 |
if (iZoneLeastMovDis == &aZone->iPrefLink)
|
sl@0
|
580 |
{// Ensure iZoneLeastMovDisRank is kept up to date.
|
sl@0
|
581 |
iZoneLeastMovDisRank = aZone->iPrefRank;
|
sl@0
|
582 |
}
|
sl@0
|
583 |
if (iZoneLeastMovDis == &nextZ->iPrefLink)
|
sl@0
|
584 |
{// Ensure iZoneLeastMovDisRank is kept up to date.
|
sl@0
|
585 |
iZoneLeastMovDisRank = nextZ->iPrefRank;
|
sl@0
|
586 |
if (aZone->iAllocPages[EPageMovable] || aZone->iAllocPages[EPageDiscard])
|
sl@0
|
587 |
{// aZone is now the least preferable RAM zone with movable and/or discardable.
|
sl@0
|
588 |
iZoneLeastMovDis = &aZone->iPrefLink;
|
sl@0
|
589 |
iZoneLeastMovDisRank = aZone->iPrefRank;
|
sl@0
|
590 |
__KTRACE_OPT(KMMU, Kern::Printf("aa - iZoneleastInUse ID 0x%x", (_LOFF(iZoneLeastMovDis, SZone, iPrefLink))->iId));
|
sl@0
|
591 |
}
|
sl@0
|
592 |
__KTRACE_OPT(KMMU, Kern::Printf("iZoneLeastMovDis Rank 0x%x", iZoneLeastMovDisRank));
|
sl@0
|
593 |
}
|
sl@0
|
594 |
}
|
sl@0
|
595 |
else
|
sl@0
|
596 |
{
|
sl@0
|
597 |
break;
|
sl@0
|
598 |
}
|
sl@0
|
599 |
}
|
sl@0
|
600 |
if (&aZone->iPrefLink == iZoneLeastMovDis &&
|
sl@0
|
601 |
!aZone->iAllocPages[EPageMovable] && !aZone->iAllocPages[EPageDiscard])
|
sl@0
|
602 |
{// This RAM zone no longer has movable or discardable and therefore it
|
sl@0
|
603 |
// is also no longer the least preferable RAM zone with movable and/or
|
sl@0
|
604 |
// discardable.
|
sl@0
|
605 |
SZone* zonePrev;
|
sl@0
|
606 |
do
|
sl@0
|
607 |
{
|
sl@0
|
608 |
iZoneLeastMovDis = iZoneLeastMovDis->iPrev;
|
sl@0
|
609 |
iZoneLeastMovDisRank--;
|
sl@0
|
610 |
if (iZoneLeastMovDis == iZonePrefList.First())
|
sl@0
|
611 |
{// This the most preferable RAM zone so can't go any further.
|
sl@0
|
612 |
break;
|
sl@0
|
613 |
}
|
sl@0
|
614 |
zonePrev = _LOFF(iZoneLeastMovDis, SZone, iPrefLink);
|
sl@0
|
615 |
__KTRACE_OPT(KMMU, Kern::Printf("f - iZoneLeastMovDis 0x%x", zonePrev->iId));
|
sl@0
|
616 |
}
|
sl@0
|
617 |
while (!zonePrev->iAllocPages[EPageMovable] && !zonePrev->iAllocPages[EPageDiscard]);
|
sl@0
|
618 |
|
sl@0
|
619 |
__NK_ASSERT_DEBUG( K::Initialising ||
|
sl@0
|
620 |
iZoneLeastMovDisRank == _LOFF(iZoneLeastMovDis, SZone, iPrefLink)->iPrefRank);
|
sl@0
|
621 |
|
sl@0
|
622 |
#ifdef __VERIFY_LEASTMOVDIS
|
sl@0
|
623 |
if (!K::Initialising)
|
sl@0
|
624 |
VerifyLeastPrefMovDis();
|
sl@0
|
625 |
#endif
|
sl@0
|
626 |
}
|
sl@0
|
627 |
}
|
sl@0
|
628 |
|
sl@0
|
629 |
|
sl@0
|
630 |
/** Calculate the physical address order of the zones and temporally store
|
sl@0
|
631 |
the order in aZoneAddrOrder
|
sl@0
|
632 |
*/
|
sl@0
|
633 |
inline void DRamAllocator::SortRamZones(const SRamZone* aZones, TUint8* aZoneAddrOrder)
|
sl@0
|
634 |
{
|
sl@0
|
635 |
const SRamZone* const endZone = aZones + iNumZones;
|
sl@0
|
636 |
const SRamZone* zone = aZones;
|
sl@0
|
637 |
for (; zone < endZone; zone++)
|
sl@0
|
638 |
{
|
sl@0
|
639 |
// zoneIdx is the number of zones that have a lower base address than the
|
sl@0
|
640 |
// current zone and therefore it is the address index of the current zone
|
sl@0
|
641 |
TInt zoneIdx = 0;
|
sl@0
|
642 |
// search for any zones of lower base address
|
sl@0
|
643 |
const SRamZone* zone2 = aZones;
|
sl@0
|
644 |
for (; zone2 < endZone; zone2++)
|
sl@0
|
645 |
{
|
sl@0
|
646 |
if (zone2->iBase < zone->iBase)
|
sl@0
|
647 |
{
|
sl@0
|
648 |
zoneIdx++; // have another zone of lower base address
|
sl@0
|
649 |
}
|
sl@0
|
650 |
}
|
sl@0
|
651 |
aZoneAddrOrder[zoneIdx] = zone - aZones;
|
sl@0
|
652 |
}
|
sl@0
|
653 |
}
|
sl@0
|
654 |
|
sl@0
|
655 |
|
sl@0
|
656 |
/** Initialise SPageInfos for all pages in this zone with the
|
sl@0
|
657 |
index of the zone.
|
sl@0
|
658 |
@param aZone The zone the pages to be initialised are in
|
sl@0
|
659 |
*/
|
sl@0
|
660 |
inline TUint DRamAllocator::InitSPageInfos(const SZone* aZone)
|
sl@0
|
661 |
{
|
sl@0
|
662 |
TUint pagesUpdated = 0;
|
sl@0
|
663 |
if (aZone->iPhysBase > iPhysAddrTop || aZone->iPhysEnd < iPhysAddrBase)
|
sl@0
|
664 |
{// None of the zone is in allocatable RAM
|
sl@0
|
665 |
return pagesUpdated;
|
sl@0
|
666 |
}
|
sl@0
|
667 |
|
sl@0
|
668 |
// Mark each allocatable page in this zone with the index of the zone
|
sl@0
|
669 |
#ifndef __MEMMODEL_FLEXIBLE__
|
sl@0
|
670 |
TUint8 zoneIndex = aZone - iZones;
|
sl@0
|
671 |
#endif
|
sl@0
|
672 |
TPhysAddr addr = aZone->iPhysBase;
|
sl@0
|
673 |
for (; addr <= aZone->iPhysEnd; addr += KPageSize)
|
sl@0
|
674 |
{
|
sl@0
|
675 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(addr);
|
sl@0
|
676 |
if (pi)
|
sl@0
|
677 |
{
|
sl@0
|
678 |
#ifndef __MEMMODEL_FLEXIBLE__ // The FMM doesn't store zone indices in SPageInfos.
|
sl@0
|
679 |
pi->SetZone(zoneIndex);
|
sl@0
|
680 |
#endif
|
sl@0
|
681 |
pagesUpdated++;
|
sl@0
|
682 |
}
|
sl@0
|
683 |
}
|
sl@0
|
684 |
return pagesUpdated;
|
sl@0
|
685 |
}
|
sl@0
|
686 |
|
sl@0
|
687 |
/** HAL Function for the RAM allocator.
|
sl@0
|
688 |
*/
|
sl@0
|
689 |
TInt DRamAllocator::HalFunction(TInt aFunction, TAny* a1, TAny* a2)
|
sl@0
|
690 |
{
|
sl@0
|
691 |
switch(aFunction)
|
sl@0
|
692 |
{
|
sl@0
|
693 |
case ERamHalGetZoneCount:
|
sl@0
|
694 |
{
|
sl@0
|
695 |
kumemput32(a1, &iNumZones, sizeof(iNumZones));
|
sl@0
|
696 |
return KErrNone;
|
sl@0
|
697 |
}
|
sl@0
|
698 |
|
sl@0
|
699 |
case ERamHalGetZoneConfig:
|
sl@0
|
700 |
{
|
sl@0
|
701 |
TUint zoneIndex = (TUint)a1;
|
sl@0
|
702 |
if (zoneIndex < iNumZones)
|
sl@0
|
703 |
{
|
sl@0
|
704 |
SZone* pZone = iZones + zoneIndex;
|
sl@0
|
705 |
struct SRamZoneConfig config;
|
sl@0
|
706 |
NKern::ThreadEnterCS();
|
sl@0
|
707 |
M::RamAllocLock(); // get mutex to ensure consistent set of values are read...
|
sl@0
|
708 |
config.iZoneId = pZone->iId;
|
sl@0
|
709 |
config.iZoneIndex = zoneIndex;
|
sl@0
|
710 |
config.iPhysBase = pZone->iPhysBase;
|
sl@0
|
711 |
config.iPhysEnd = pZone->iPhysEnd;
|
sl@0
|
712 |
config.iPhysPages = pZone->iPhysPages;
|
sl@0
|
713 |
config.iPref = pZone->iPref;
|
sl@0
|
714 |
config.iFlags = pZone->iFlags;
|
sl@0
|
715 |
M::RamAllocUnlock();
|
sl@0
|
716 |
NKern::ThreadLeaveCS();
|
sl@0
|
717 |
kumemput32(a2,&config,sizeof(config));
|
sl@0
|
718 |
return KErrNone;
|
sl@0
|
719 |
}
|
sl@0
|
720 |
return KErrNotFound;
|
sl@0
|
721 |
}
|
sl@0
|
722 |
|
sl@0
|
723 |
case ERamHalGetZoneUtilisation:
|
sl@0
|
724 |
{
|
sl@0
|
725 |
TUint zoneIndex = (TUint)a1;
|
sl@0
|
726 |
if (zoneIndex < iNumZones)
|
sl@0
|
727 |
{
|
sl@0
|
728 |
SZone* pZone = iZones + zoneIndex;
|
sl@0
|
729 |
struct SRamZoneUtilisation config;
|
sl@0
|
730 |
NKern::ThreadEnterCS();
|
sl@0
|
731 |
M::RamAllocLock(); // get mutex to ensure consistent set of values are read...
|
sl@0
|
732 |
config.iZoneId = pZone->iId;
|
sl@0
|
733 |
config.iZoneIndex = zoneIndex;
|
sl@0
|
734 |
config.iPhysPages = pZone->iPhysPages;
|
sl@0
|
735 |
config.iFreePages = pZone->iFreePages;
|
sl@0
|
736 |
config.iAllocUnknown = pZone->iAllocPages[EPageUnknown];
|
sl@0
|
737 |
config.iAllocFixed = pZone->iAllocPages[EPageFixed];
|
sl@0
|
738 |
config.iAllocMovable = pZone->iAllocPages[EPageMovable];
|
sl@0
|
739 |
config.iAllocDiscardable = pZone->iAllocPages[EPageDiscard];
|
sl@0
|
740 |
config.iAllocOther = 0;
|
sl@0
|
741 |
M::RamAllocUnlock();
|
sl@0
|
742 |
NKern::ThreadLeaveCS();
|
sl@0
|
743 |
kumemput32(a2,&config,sizeof(config));
|
sl@0
|
744 |
return KErrNone;
|
sl@0
|
745 |
}
|
sl@0
|
746 |
return KErrNotFound;
|
sl@0
|
747 |
}
|
sl@0
|
748 |
|
sl@0
|
749 |
default:
|
sl@0
|
750 |
{
|
sl@0
|
751 |
return KErrNotSupported;
|
sl@0
|
752 |
}
|
sl@0
|
753 |
}
|
sl@0
|
754 |
}
|
sl@0
|
755 |
|
sl@0
|
756 |
/**
|
sl@0
|
757 |
Setup the ram allocator with information of the RAM available in the system that
|
sl@0
|
758 |
comes from the bootstrap/superpage. This is intended to be called from
|
sl@0
|
759 |
DRamAllocator::New().
|
sl@0
|
760 |
@internalComponent
|
sl@0
|
761 |
@see DRamAllocator::New()
|
sl@0
|
762 |
@param aInfo Two lists of SRamBanks for available and reserved banks in RAM, respectively
|
sl@0
|
763 |
@param aZones A list of the ram zones in the system and their configuration/preferences
|
sl@0
|
764 |
@param aZoneCallback Pointer to a base port call back function that will be invoked by this class
|
sl@0
|
765 |
*/
|
sl@0
|
766 |
void DRamAllocator::Create(const SRamInfo& aInfo, const SRamZone* aZones, TRamZoneCallback aZoneCallback)
|
sl@0
|
767 |
{
|
sl@0
|
768 |
__KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::Create"));
|
sl@0
|
769 |
|
sl@0
|
770 |
// SZone::iBma array assumes this and KBmaAllPages can't be the same as any
|
sl@0
|
771 |
// allocatable page type.
|
sl@0
|
772 |
__ASSERT_COMPILE(EPageFixed < KPageImmovable && EPageUnknown < KPageImmovable &&
|
sl@0
|
773 |
EPageDiscard >= KPageImmovable && EPageMovable >= KPageImmovable &&
|
sl@0
|
774 |
KBmaAllPages != EPageFixed && KBmaAllPages != EPageMovable &&
|
sl@0
|
775 |
KBmaAllPages != EPageDiscard);
|
sl@0
|
776 |
// NoAllocOfPageType() requires this
|
sl@0
|
777 |
__ASSERT_COMPILE( KRamZoneFlagNoFixed == 1 << (EPageFixed - KPageTypeAllocBase) &&
|
sl@0
|
778 |
KRamZoneFlagNoMovable == 1 << (EPageMovable - KPageTypeAllocBase) &&
|
sl@0
|
779 |
KRamZoneFlagNoDiscard == 1 << (EPageDiscard - KPageTypeAllocBase));
|
sl@0
|
780 |
|
sl@0
|
781 |
// SZone::iPhysEnd and iPhysAddrTop rely on this when checking contiguous zones etc.
|
sl@0
|
782 |
__ASSERT_COMPILE(KPageShift != 0);
|
sl@0
|
783 |
|
sl@0
|
784 |
///////////////////////////////////////////////////////////////////////////
|
sl@0
|
785 |
// Determine where all the allocatable RAM pages are, using the SRamBank
|
sl@0
|
786 |
// data passed to the kernel by the bootstrap
|
sl@0
|
787 |
//////////////////////////////////////////////////////////////////////////
|
sl@0
|
788 |
TUint num_boot_banks=CountBanks(aInfo.iBanks);
|
sl@0
|
789 |
TUint32 total_ram_size=TotalBankSize(aInfo.iBanks);
|
sl@0
|
790 |
__KTRACE_OPT(KMMU,Kern::Printf("#banks from bootstrap=%d",num_boot_banks));
|
sl@0
|
791 |
__KTRACE_OPT(KMMU,Kern::Printf("Total size=%08x",total_ram_size));
|
sl@0
|
792 |
iTotalRamPages=total_ram_size>>KPageShift;
|
sl@0
|
793 |
// Assume all pages are allocated as unknown for now
|
sl@0
|
794 |
iTotalFreeRamPages = 0;
|
sl@0
|
795 |
__KTRACE_OPT(KMMU,Kern::Printf("Total size=%08x, total pages=%08x",total_ram_size,iTotalRamPages));
|
sl@0
|
796 |
|
sl@0
|
797 |
iPhysAddrBase=aInfo.iBanks[0].iBase;
|
sl@0
|
798 |
const SRamBank& last_boot_bank=aInfo.iBanks[num_boot_banks-1];
|
sl@0
|
799 |
iPhysAddrTop = last_boot_bank.iBase + last_boot_bank.iSize - 1;
|
sl@0
|
800 |
__KTRACE_OPT(KMMU,Kern::Printf("PA base=%08x, PA top=%08x",iPhysAddrBase,iPhysAddrTop));
|
sl@0
|
801 |
|
sl@0
|
802 |
__ASSERT_DEBUG(iPhysAddrTop > iPhysAddrBase, Panic(ECreateInvalidRamBanks));
|
sl@0
|
803 |
|
sl@0
|
804 |
|
sl@0
|
805 |
///////////////////////////////////////////////////////////////////////////
|
sl@0
|
806 |
// Determine how many zones are required and allocate all the
|
sl@0
|
807 |
// data structures that will be required, permanent one first then
|
sl@0
|
808 |
// temporary ones to avoid kernel heap fragmentation.
|
sl@0
|
809 |
///////////////////////////////////////////////////////////////////////////
|
sl@0
|
810 |
// Stop any RAM zone callback operations until the initial one has been sent
|
sl@0
|
811 |
iZoneCallbackInitSent = EFalse;
|
sl@0
|
812 |
if (aZones)
|
sl@0
|
813 |
{
|
sl@0
|
814 |
CountZones(aZones);
|
sl@0
|
815 |
iZonePowerFunc = aZoneCallback;
|
sl@0
|
816 |
}
|
sl@0
|
817 |
else
|
sl@0
|
818 |
{// maximum number of zone is number of non-coalesced boot banks
|
sl@0
|
819 |
iNumZones = num_boot_banks;
|
sl@0
|
820 |
// No zones specified so don't worry about invoking callback function
|
sl@0
|
821 |
iZonePowerFunc = NULL;
|
sl@0
|
822 |
}
|
sl@0
|
823 |
|
sl@0
|
824 |
// Permenant heap allocation #1 - may be resized if no zones specified
|
sl@0
|
825 |
__KTRACE_OPT(KMMU,Kern::Printf("iNumZones=%d", iNumZones));
|
sl@0
|
826 |
iZones = (SZone*)Kern::AllocZ(iNumZones*sizeof(SZone));
|
sl@0
|
827 |
if (!iZones)
|
sl@0
|
828 |
{
|
sl@0
|
829 |
Panic(ECreateNoMemory);
|
sl@0
|
830 |
}
|
sl@0
|
831 |
|
sl@0
|
832 |
///////////////////////////////////////////////////////////////////////////
|
sl@0
|
833 |
// Coalesce contiguous boot banks
|
sl@0
|
834 |
///////////////////////////////////////////////////////////////////////////
|
sl@0
|
835 |
SRamBank* physBanks = (SRamBank*)Kern::Alloc(num_boot_banks*sizeof(SRamBank));
|
sl@0
|
836 |
if (!physBanks)
|
sl@0
|
837 |
{
|
sl@0
|
838 |
Panic(ECreateNoMemory);
|
sl@0
|
839 |
}
|
sl@0
|
840 |
SRamBank* coalescedBank = physBanks;
|
sl@0
|
841 |
const SRamBank* const lastBank = aInfo.iBanks + num_boot_banks;
|
sl@0
|
842 |
TPhysAddr currentBase = aInfo.iBanks->iBase;
|
sl@0
|
843 |
TPhysAddr currentEnd = aInfo.iBanks->iBase + aInfo.iBanks->iSize;
|
sl@0
|
844 |
const SRamBank* nextBank = aInfo.iBanks + 1;
|
sl@0
|
845 |
for (; nextBank <= lastBank; ++nextBank)
|
sl@0
|
846 |
{
|
sl@0
|
847 |
// Create new bank if the next bank isn't contiguous or if
|
sl@0
|
848 |
// it is the last bank
|
sl@0
|
849 |
if (nextBank == lastBank || nextBank->iBase != currentEnd)
|
sl@0
|
850 |
{
|
sl@0
|
851 |
coalescedBank->iBase = currentBase;
|
sl@0
|
852 |
coalescedBank->iSize = currentEnd - currentBase;
|
sl@0
|
853 |
// Mark all the SPageInfos for the pages in this bank as unused.
|
sl@0
|
854 |
// Needs to be done here to allow SPageInfo::SafeFromPhysAddr to work
|
sl@0
|
855 |
// which is used by InitSPageInfos()
|
sl@0
|
856 |
SPageInfo* pi = SPageInfo::FromPhysAddr(coalescedBank->iBase);
|
sl@0
|
857 |
SPageInfo* piBankEnd = pi + (coalescedBank->iSize >> KPageShift);
|
sl@0
|
858 |
for (; pi < piBankEnd; pi++)
|
sl@0
|
859 |
{
|
sl@0
|
860 |
pi->SetUnused();
|
sl@0
|
861 |
}
|
sl@0
|
862 |
++coalescedBank;
|
sl@0
|
863 |
__KTRACE_OPT(KMMU, Kern::Printf("Coalesced bank: %08x-%08x", currentBase, currentEnd));
|
sl@0
|
864 |
currentBase = nextBank->iBase;
|
sl@0
|
865 |
currentEnd = currentBase + nextBank->iSize;
|
sl@0
|
866 |
}
|
sl@0
|
867 |
else
|
sl@0
|
868 |
{
|
sl@0
|
869 |
currentEnd += nextBank->iSize;
|
sl@0
|
870 |
}
|
sl@0
|
871 |
}
|
sl@0
|
872 |
TUint num_coalesced_banks = coalescedBank - physBanks;
|
sl@0
|
873 |
__KTRACE_OPT(KMMU, Kern::Printf("#Coalesced banks: %d", num_coalesced_banks));
|
sl@0
|
874 |
|
sl@0
|
875 |
///////////////////////////////////////////////////////////////////////////
|
sl@0
|
876 |
// Initialise the SZone objects and mark all the SPageInfos with the index
|
sl@0
|
877 |
// of zone they are in.
|
sl@0
|
878 |
//////////////////////////////////////////////////////////////////////////
|
sl@0
|
879 |
// Assume everything is off so base port will get notification every time the
|
sl@0
|
880 |
// a new zone is required during the rest of boot process.
|
sl@0
|
881 |
if (aZones != NULL)
|
sl@0
|
882 |
{
|
sl@0
|
883 |
SZone* newZone = iZones; // pointer to zone being created
|
sl@0
|
884 |
|
sl@0
|
885 |
// Create and fill zoneAddrOrder with address ordered indices to aZones
|
sl@0
|
886 |
TUint8* zoneAddrOrder = (TUint8*)Kern::Alloc(iNumZones);
|
sl@0
|
887 |
if (!zoneAddrOrder)
|
sl@0
|
888 |
{
|
sl@0
|
889 |
Panic(ECreateNoMemory);
|
sl@0
|
890 |
}
|
sl@0
|
891 |
SortRamZones(aZones, zoneAddrOrder);
|
sl@0
|
892 |
|
sl@0
|
893 |
// Now go through each SRamZone in address order initialising the SZone
|
sl@0
|
894 |
// objects.
|
sl@0
|
895 |
TUint i = 0;
|
sl@0
|
896 |
TUint totalZonePages = 0;
|
sl@0
|
897 |
for (; i < iNumZones; i++)
|
sl@0
|
898 |
{
|
sl@0
|
899 |
const SRamZone& ramZone = *(aZones + zoneAddrOrder[i]);
|
sl@0
|
900 |
newZone->iPhysBase = ramZone.iBase;
|
sl@0
|
901 |
newZone->iPhysEnd = ramZone.iBase + ramZone.iSize - 1;
|
sl@0
|
902 |
newZone->iPhysPages = ramZone.iSize >> KPageShift;
|
sl@0
|
903 |
newZone->iAllocPages[EPageUnknown] = newZone->iPhysPages;
|
sl@0
|
904 |
newZone->iId = ramZone.iId;
|
sl@0
|
905 |
newZone->iPref = ramZone.iPref;
|
sl@0
|
906 |
newZone->iFlags = ramZone.iFlags;
|
sl@0
|
907 |
totalZonePages += InitSPageInfos(newZone);
|
sl@0
|
908 |
newZone++;
|
sl@0
|
909 |
}
|
sl@0
|
910 |
|
sl@0
|
911 |
// iZones now points to all the SZone objects stored in address order
|
sl@0
|
912 |
Kern::Free(zoneAddrOrder);
|
sl@0
|
913 |
if (totalZonePages != iTotalRamPages)
|
sl@0
|
914 |
{// The zones don't cover all of the allocatable RAM.
|
sl@0
|
915 |
Panic(EZonesIncomplete);
|
sl@0
|
916 |
}
|
sl@0
|
917 |
}
|
sl@0
|
918 |
else
|
sl@0
|
919 |
{
|
sl@0
|
920 |
iNumZones = num_coalesced_banks;
|
sl@0
|
921 |
iZones = (SZone*)Kern::ReAlloc((TAny*)iZones, iNumZones*sizeof(SZone));
|
sl@0
|
922 |
if (iZones == NULL)
|
sl@0
|
923 |
{
|
sl@0
|
924 |
Panic(ECreateNoMemory);
|
sl@0
|
925 |
}
|
sl@0
|
926 |
// Create a zone for each coalesced boot bank
|
sl@0
|
927 |
SRamBank* bank = physBanks;
|
sl@0
|
928 |
SRamBank* bankEnd = physBanks + num_coalesced_banks;
|
sl@0
|
929 |
SZone* zone = iZones;
|
sl@0
|
930 |
for (; bank < bankEnd; bank++, zone++)
|
sl@0
|
931 |
{
|
sl@0
|
932 |
zone->iPhysBase = bank->iBase;
|
sl@0
|
933 |
zone->iPhysEnd = bank->iBase + bank->iSize - 1;
|
sl@0
|
934 |
zone->iPhysPages = bank->iSize >> KPageShift;
|
sl@0
|
935 |
zone->iAllocPages[EPageUnknown] = zone->iPhysPages;
|
sl@0
|
936 |
zone->iId = (TUint)bank; // doesn't matter what it is as long as it is unique
|
sl@0
|
937 |
InitSPageInfos(zone);
|
sl@0
|
938 |
}
|
sl@0
|
939 |
}
|
sl@0
|
940 |
// Delete the coalesced banks as no longer required
|
sl@0
|
941 |
Kern::Free(physBanks);
|
sl@0
|
942 |
|
sl@0
|
943 |
//////////////////////////////////////////////////////////////////////////
|
sl@0
|
944 |
// Create each zones' bit map allocator now as no temporary heap
|
sl@0
|
945 |
// cells still allocated at this point.
|
sl@0
|
946 |
///////////////////////////////////////////////////////////////////////////
|
sl@0
|
947 |
const SZone* const endZone = iZones + iNumZones;
|
sl@0
|
948 |
SZone* zone = iZones;
|
sl@0
|
949 |
for (; zone < endZone; zone++)
|
sl@0
|
950 |
{// Create each BMA with all pages allocated as unknown.
|
sl@0
|
951 |
for (TUint i = 0; i < EPageTypes; i++)
|
sl@0
|
952 |
{
|
sl@0
|
953 |
// Only mark the all pages bma and fixed/unknown bma as allocated.
|
sl@0
|
954 |
TBool notAllocated = (i >= (TUint)EPageMovable);
|
sl@0
|
955 |
zone->iBma[i] = TBitMapAllocator::New(zone->iPhysPages, notAllocated);
|
sl@0
|
956 |
if (!zone->iBma[i])
|
sl@0
|
957 |
{
|
sl@0
|
958 |
Panic(ECreateNoMemory);
|
sl@0
|
959 |
}
|
sl@0
|
960 |
}
|
sl@0
|
961 |
}
|
sl@0
|
962 |
|
sl@0
|
963 |
///////////////////////////////////////////////////////////////////////////
|
sl@0
|
964 |
// Unallocate each page in each bank so that it can be allocated when required.
|
sl@0
|
965 |
// Any page that exists outside a bank will remain allocated as EPageUnknown
|
sl@0
|
966 |
// and will therefore not be touched by the allocator.
|
sl@0
|
967 |
//////////////////////////////////////////////////////////////////////////
|
sl@0
|
968 |
// Temporarily fill preference list so SetPhysicalRamState can succeed
|
sl@0
|
969 |
#ifdef _DEBUG
|
sl@0
|
970 |
// Block bma verificaitons as bma and alloc counts aren't consistent yet.
|
sl@0
|
971 |
iAllowBmaVerify = EFalse;
|
sl@0
|
972 |
#endif
|
sl@0
|
973 |
const SZone* const lastZone = iZones + iNumZones;
|
sl@0
|
974 |
zone = iZones;
|
sl@0
|
975 |
for (; zone < lastZone; zone++)
|
sl@0
|
976 |
{
|
sl@0
|
977 |
iZonePrefList.Add(&zone->iPrefLink);
|
sl@0
|
978 |
}
|
sl@0
|
979 |
const SRamBank* const lastPhysBank = aInfo.iBanks + num_boot_banks;
|
sl@0
|
980 |
const SRamBank* bank = aInfo.iBanks;
|
sl@0
|
981 |
for (; bank < lastPhysBank; bank++)
|
sl@0
|
982 |
{// Free all the pages in this bank.
|
sl@0
|
983 |
SetPhysicalRamState(bank->iBase, bank->iSize, ETrue, EPageUnknown);
|
sl@0
|
984 |
}
|
sl@0
|
985 |
#ifdef _DEBUG
|
sl@0
|
986 |
// Only now is it safe to enable bma verifications
|
sl@0
|
987 |
iAllowBmaVerify = ETrue;
|
sl@0
|
988 |
#endif
|
sl@0
|
989 |
|
sl@0
|
990 |
///////////////////////////////////////////////////////////////////////////
|
sl@0
|
991 |
// Sort the zones by preference and create a preference ordered linked list
|
sl@0
|
992 |
///////////////////////////////////////////////////////////////////////////
|
sl@0
|
993 |
zone = iZones;
|
sl@0
|
994 |
for (; zone < lastZone; zone++)
|
sl@0
|
995 |
{// clear all the zones from the preference list as not in preference order
|
sl@0
|
996 |
zone->iPrefLink.Deque();
|
sl@0
|
997 |
}
|
sl@0
|
998 |
SZone** prefOrder = (SZone**)Kern::AllocZ(iNumZones * sizeof(SZone*));
|
sl@0
|
999 |
if (!prefOrder)
|
sl@0
|
1000 |
{
|
sl@0
|
1001 |
Panic(ECreateNoMemory);
|
sl@0
|
1002 |
}
|
sl@0
|
1003 |
zone = iZones;
|
sl@0
|
1004 |
for(; zone < lastZone; zone++)
|
sl@0
|
1005 |
{
|
sl@0
|
1006 |
TInt lowerZones = 0;
|
sl@0
|
1007 |
// Find how many zones that have a lower preference than this one
|
sl@0
|
1008 |
const SZone* zone2 = iZones;
|
sl@0
|
1009 |
for (; zone2 < lastZone; zone2++)
|
sl@0
|
1010 |
{
|
sl@0
|
1011 |
if (zone->iPref > zone2->iPref ||
|
sl@0
|
1012 |
zone->iPref == zone2->iPref && zone->iFreePages > zone2->iFreePages)
|
sl@0
|
1013 |
{
|
sl@0
|
1014 |
lowerZones++;
|
sl@0
|
1015 |
}
|
sl@0
|
1016 |
}
|
sl@0
|
1017 |
while (prefOrder[lowerZones] != 0)
|
sl@0
|
1018 |
{// Zone(s) of this preference and size already exist so
|
sl@0
|
1019 |
// place this one after it/them
|
sl@0
|
1020 |
lowerZones++;
|
sl@0
|
1021 |
}
|
sl@0
|
1022 |
prefOrder[lowerZones] = zone;
|
sl@0
|
1023 |
}
|
sl@0
|
1024 |
// Fill preference ordered linked list
|
sl@0
|
1025 |
SZone** const lastPref = prefOrder + iNumZones;
|
sl@0
|
1026 |
SZone** prefZone = prefOrder;
|
sl@0
|
1027 |
TUint prefRank = 0;
|
sl@0
|
1028 |
for (; prefZone < lastPref; prefZone++, prefRank++)
|
sl@0
|
1029 |
{
|
sl@0
|
1030 |
SZone& zone = **prefZone;
|
sl@0
|
1031 |
iZonePrefList.Add(&zone.iPrefLink);
|
sl@0
|
1032 |
zone.iPrefRank = prefRank;
|
sl@0
|
1033 |
}
|
sl@0
|
1034 |
Kern::Free(prefOrder); // Remove temporary allocation
|
sl@0
|
1035 |
|
sl@0
|
1036 |
///////////////////////////////////////////////////////////////////////////
|
sl@0
|
1037 |
// Now mark any regions reserved by the base port as allocated and not
|
sl@0
|
1038 |
// for use by the RAM allocator.
|
sl@0
|
1039 |
///////////////////////////////////////////////////////////////////////////
|
sl@0
|
1040 |
const SRamBank* pB = lastBank + 1; // first reserved block specifier
|
sl@0
|
1041 |
for (; pB->iSize; ++pB)
|
sl@0
|
1042 |
{
|
sl@0
|
1043 |
__KTRACE_OPT(KMMU, Kern::Printf("Reserve physical block %08x+%x", pB->iBase, pB->iSize));
|
sl@0
|
1044 |
TInt r = SetPhysicalRamState(pB->iBase, pB->iSize, EFalse, EPageFixed);
|
sl@0
|
1045 |
__KTRACE_OPT(KMMU, Kern::Printf("Reserve returns %d", r));
|
sl@0
|
1046 |
if (r!=KErrNone)
|
sl@0
|
1047 |
{
|
sl@0
|
1048 |
Panic(ECreateInvalidReserveBank);
|
sl@0
|
1049 |
}
|
sl@0
|
1050 |
#ifdef BTRACE_KERNEL_MEMORY
|
sl@0
|
1051 |
BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, pB->iSize, pB->iBase);
|
sl@0
|
1052 |
Epoc::DriverAllocdPhysRam += pB->iSize;
|
sl@0
|
1053 |
#endif
|
sl@0
|
1054 |
#ifndef __MEMMODEL_FLEXIBLE__ // Mmu::Init2Common() handles this in FMM.
|
sl@0
|
1055 |
// Synchronise the SPageInfo with any blocks that were reserved by
|
sl@0
|
1056 |
// marking any reserved regions as locked
|
sl@0
|
1057 |
TPhysAddr physAddrEnd = pB->iBase + pB->iSize;
|
sl@0
|
1058 |
TPhysAddr physAddr = pB->iBase;
|
sl@0
|
1059 |
for(; physAddr < physAddrEnd; physAddr += KPageSize)
|
sl@0
|
1060 |
{
|
sl@0
|
1061 |
SPageInfo* pi = SPageInfo::FromPhysAddr(physAddr);
|
sl@0
|
1062 |
pi->Lock();
|
sl@0
|
1063 |
}
|
sl@0
|
1064 |
#endif
|
sl@0
|
1065 |
}
|
sl@0
|
1066 |
|
sl@0
|
1067 |
//////////////////////////////////////////////////////////////////////////
|
sl@0
|
1068 |
// Now that we have have the RAM zone preference list and know how many
|
sl@0
|
1069 |
// allocatable pages there are, set iZoneLeastMovDis to be the RAM zone
|
sl@0
|
1070 |
// that will be used when half of the RAM is in use. This a boot up
|
sl@0
|
1071 |
// optimisation to reduce the amount of moving and/or discarding fixed page
|
sl@0
|
1072 |
// allocations will have to make during boot.
|
sl@0
|
1073 |
//////////////////////////////////////////////////////////////////////////
|
sl@0
|
1074 |
TUint halfAllocatablePages = iTotalFreeRamPages >> 1;
|
sl@0
|
1075 |
TUint pages = 0;
|
sl@0
|
1076 |
SDblQueLink* link = &iZonePrefList.iA;
|
sl@0
|
1077 |
do
|
sl@0
|
1078 |
{
|
sl@0
|
1079 |
link = link->iNext;
|
sl@0
|
1080 |
__NK_ASSERT_DEBUG(link != &iZonePrefList.iA);
|
sl@0
|
1081 |
SZone& zonePages = *_LOFF(link, SZone, iPrefLink);
|
sl@0
|
1082 |
pages += zonePages.iFreePages;
|
sl@0
|
1083 |
}
|
sl@0
|
1084 |
while(pages < halfAllocatablePages);
|
sl@0
|
1085 |
iZoneLeastMovDis = link;
|
sl@0
|
1086 |
iZoneLeastMovDisRank = _LOFF(link, SZone, iPrefLink)->iPrefRank;
|
sl@0
|
1087 |
|
sl@0
|
1088 |
// Reset general defrag links.
|
sl@0
|
1089 |
iZoneGeneralPrefLink = NULL;
|
sl@0
|
1090 |
iZoneGeneralTmpLink = NULL;
|
sl@0
|
1091 |
|
sl@0
|
1092 |
__KTRACE_OPT(KMMU,DebugDump());
|
sl@0
|
1093 |
}
|
sl@0
|
1094 |
|
sl@0
|
1095 |
|
sl@0
|
1096 |
void DRamAllocator::MarkPagesAllocated(TPhysAddr aAddr, TInt aCount, TZonePageType aType)
|
sl@0
|
1097 |
{
|
sl@0
|
1098 |
__KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::MarkPagesAllocated(%x+%x)",aAddr,aCount));
|
sl@0
|
1099 |
|
sl@0
|
1100 |
M::RamAllocIsLocked();
|
sl@0
|
1101 |
|
sl@0
|
1102 |
// Don't allow unknown pages to be allocated, saves extra 'if' when
|
sl@0
|
1103 |
// creating bmaType.
|
sl@0
|
1104 |
__NK_ASSERT_DEBUG(aType != EPageUnknown);
|
sl@0
|
1105 |
|
sl@0
|
1106 |
__ASSERT_DEBUG( !(TUint32(aAddr) & (KPageSize - 1)) &&
|
sl@0
|
1107 |
(TUint32(aAddr) < TUint32(iPhysAddrTop)) &&
|
sl@0
|
1108 |
(TUint32(aAddr) >= TUint32(iPhysAddrBase))&&
|
sl@0
|
1109 |
(TUint32((aCount << KPageShift) -1 + aAddr) <= TUint32(iPhysAddrTop)),
|
sl@0
|
1110 |
Panic(EDoMarkPagesAllocated1));
|
sl@0
|
1111 |
|
sl@0
|
1112 |
iTotalFreeRamPages-=aCount;
|
sl@0
|
1113 |
// Find the 1st zone the 1st set of allocations belong to
|
sl@0
|
1114 |
TInt offset = 0;
|
sl@0
|
1115 |
SZone* pZ = GetZoneAndOffset(aAddr,offset);
|
sl@0
|
1116 |
if (pZ == NULL)
|
sl@0
|
1117 |
{//aAddr not in RAM
|
sl@0
|
1118 |
Panic(EDoMarkPagesAllocated1);
|
sl@0
|
1119 |
}
|
sl@0
|
1120 |
while(aCount)
|
sl@0
|
1121 |
{
|
sl@0
|
1122 |
TBitMapAllocator& bmaAll = *(pZ->iBma[KBmaAllPages]);
|
sl@0
|
1123 |
TBitMapAllocator& bmaType = *(pZ->iBma[aType]);
|
sl@0
|
1124 |
TInt count = Min(bmaAll.iSize - offset, aCount);
|
sl@0
|
1125 |
bmaAll.Alloc(offset, count);
|
sl@0
|
1126 |
bmaType.Alloc(offset, count);
|
sl@0
|
1127 |
ZoneAllocPages(pZ, count, aType);
|
sl@0
|
1128 |
aCount -= count;
|
sl@0
|
1129 |
|
sl@0
|
1130 |
// If spanning zones then ensure the next zone is contiguous.
|
sl@0
|
1131 |
__ASSERT_DEBUG(!aCount || ((pZ + 1)->iPhysBase != 0 && ((pZ + 1)->iPhysBase - 1) == pZ->iPhysEnd), Panic(EDoMarkPagesAllocated1));
|
sl@0
|
1132 |
|
sl@0
|
1133 |
pZ++; // zones in physical address order so move to next one
|
sl@0
|
1134 |
offset = 0; // and reset offset to start of the zone
|
sl@0
|
1135 |
}
|
sl@0
|
1136 |
}
|
sl@0
|
1137 |
|
sl@0
|
1138 |
TInt DRamAllocator::MarkPageAllocated(TPhysAddr aAddr, TZonePageType aType)
|
sl@0
|
1139 |
{
|
sl@0
|
1140 |
__KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::MarkPageAllocated %08x",aAddr));
|
sl@0
|
1141 |
|
sl@0
|
1142 |
M::RamAllocIsLocked();
|
sl@0
|
1143 |
|
sl@0
|
1144 |
// Don't allow unknown pages to be allocated, saves extra 'if' when
|
sl@0
|
1145 |
// creating bmaType.
|
sl@0
|
1146 |
__NK_ASSERT_DEBUG(aType != EPageUnknown);
|
sl@0
|
1147 |
|
sl@0
|
1148 |
TInt n;
|
sl@0
|
1149 |
SZone* z=GetZoneAndOffset(aAddr,n);
|
sl@0
|
1150 |
if (!z)
|
sl@0
|
1151 |
{
|
sl@0
|
1152 |
return KErrArgument;
|
sl@0
|
1153 |
}
|
sl@0
|
1154 |
__KTRACE_OPT(KMMU2,Kern::Printf("Zone index %d page index %04x",z-iZones,n));
|
sl@0
|
1155 |
TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]);
|
sl@0
|
1156 |
TBitMapAllocator& bmaType = *(z->iBma[aType]);
|
sl@0
|
1157 |
if (bmaAll.NotFree(n,1))
|
sl@0
|
1158 |
{
|
sl@0
|
1159 |
__KTRACE_OPT(KMMU,Kern::Printf("Page already allocated"));
|
sl@0
|
1160 |
return KErrAlreadyExists; // page is already allocated
|
sl@0
|
1161 |
}
|
sl@0
|
1162 |
bmaAll.Alloc(n,1);
|
sl@0
|
1163 |
bmaType.Alloc(n,1);
|
sl@0
|
1164 |
--iTotalFreeRamPages;
|
sl@0
|
1165 |
ZoneAllocPages(z, 1, aType);
|
sl@0
|
1166 |
__KTRACE_OPT(KMMU,Kern::Printf("Total free RAM pages now = %d",iTotalFreeRamPages));
|
sl@0
|
1167 |
|
sl@0
|
1168 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
1169 |
BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocMarkAllocated, aType, aAddr);
|
sl@0
|
1170 |
#endif
|
sl@0
|
1171 |
return KErrNone;
|
sl@0
|
1172 |
}
|
sl@0
|
1173 |
|
sl@0
|
1174 |
TInt DRamAllocator::FreeRamPage(TPhysAddr aAddr, TZonePageType aType)
|
sl@0
|
1175 |
{
|
sl@0
|
1176 |
__KTRACE_OPT(KMMU,Kern::Printf("FreeRamPage %08x",aAddr));
|
sl@0
|
1177 |
|
sl@0
|
1178 |
M::RamAllocIsLocked();
|
sl@0
|
1179 |
|
sl@0
|
1180 |
#ifdef _DEBUG
|
sl@0
|
1181 |
#ifndef __MEMMODEL_FLEXIBLE__
|
sl@0
|
1182 |
// Check lock counter of the page
|
sl@0
|
1183 |
if (aAddr != KPhysAddrInvalid)
|
sl@0
|
1184 |
{
|
sl@0
|
1185 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aAddr);
|
sl@0
|
1186 |
if(pi && pi->LockCount())
|
sl@0
|
1187 |
Panic(EFreeingLockedPage);
|
sl@0
|
1188 |
}
|
sl@0
|
1189 |
#endif
|
sl@0
|
1190 |
// Don't allow unknown pages to be freed, saves extra 'if' when
|
sl@0
|
1191 |
// creating bmaType.
|
sl@0
|
1192 |
__NK_ASSERT_DEBUG(aType != EPageUnknown);
|
sl@0
|
1193 |
#endif
|
sl@0
|
1194 |
|
sl@0
|
1195 |
TInt n;
|
sl@0
|
1196 |
SZone* z=GetZoneAndOffset(aAddr,n);
|
sl@0
|
1197 |
if (!z)
|
sl@0
|
1198 |
{
|
sl@0
|
1199 |
return KErrArgument;
|
sl@0
|
1200 |
}
|
sl@0
|
1201 |
__KTRACE_OPT(KMMU2,Kern::Printf("Zone index %d page index %04x",z-iZones,n));
|
sl@0
|
1202 |
TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]);
|
sl@0
|
1203 |
TBitMapAllocator& bmaType = *(z->iBma[aType]);
|
sl@0
|
1204 |
bmaAll.Free(n);
|
sl@0
|
1205 |
bmaType.Free(n);
|
sl@0
|
1206 |
++iTotalFreeRamPages;
|
sl@0
|
1207 |
ZoneFreePages(z, 1, aType);
|
sl@0
|
1208 |
|
sl@0
|
1209 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
1210 |
BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocFreePage, aType, aAddr);
|
sl@0
|
1211 |
#endif
|
sl@0
|
1212 |
return KErrNone;
|
sl@0
|
1213 |
}
|
sl@0
|
1214 |
|
sl@0
|
1215 |
void DRamAllocator::FreeRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType)
|
sl@0
|
1216 |
{
|
sl@0
|
1217 |
__KTRACE_OPT(KMMU,Kern::Printf("FreeRamPages count=%08x",aNumPages));
|
sl@0
|
1218 |
|
sl@0
|
1219 |
M::RamAllocIsLocked();
|
sl@0
|
1220 |
|
sl@0
|
1221 |
#if defined(_DEBUG) && !defined(__MEMMODEL_FLEXIBLE__)
|
sl@0
|
1222 |
// Check lock counter for each page that is about to be freed.
|
sl@0
|
1223 |
TInt pageNum = aNumPages;
|
sl@0
|
1224 |
TPhysAddr* pageList = aPageList;
|
sl@0
|
1225 |
while (pageNum--)
|
sl@0
|
1226 |
{
|
sl@0
|
1227 |
TPhysAddr pa = *pageList++;
|
sl@0
|
1228 |
if (pa == KPhysAddrInvalid)
|
sl@0
|
1229 |
continue;
|
sl@0
|
1230 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
|
sl@0
|
1231 |
if(pi && pi->LockCount())
|
sl@0
|
1232 |
Panic(EFreeingLockedPage);
|
sl@0
|
1233 |
}
|
sl@0
|
1234 |
#endif
|
sl@0
|
1235 |
|
sl@0
|
1236 |
while(aNumPages--)
|
sl@0
|
1237 |
{
|
sl@0
|
1238 |
TPhysAddr first_pa = *aPageList++;
|
sl@0
|
1239 |
if (first_pa == KPhysAddrInvalid)
|
sl@0
|
1240 |
{
|
sl@0
|
1241 |
continue;
|
sl@0
|
1242 |
}
|
sl@0
|
1243 |
TInt ix;
|
sl@0
|
1244 |
SZone* z = GetZoneAndOffset(first_pa,ix);
|
sl@0
|
1245 |
if (!z)
|
sl@0
|
1246 |
{
|
sl@0
|
1247 |
continue;
|
sl@0
|
1248 |
}
|
sl@0
|
1249 |
TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]);
|
sl@0
|
1250 |
TInt zp_rem = bmaAll.iSize - ix;
|
sl@0
|
1251 |
__KTRACE_OPT(KMMU,Kern::Printf("1st PA=%08x Zone %d index %04x",first_pa,z-iZones,ix));
|
sl@0
|
1252 |
TInt n = 1;
|
sl@0
|
1253 |
TPhysAddr pa = first_pa + KPageSize;
|
sl@0
|
1254 |
while (--zp_rem && aNumPages && *aPageList==pa)
|
sl@0
|
1255 |
{
|
sl@0
|
1256 |
++n;
|
sl@0
|
1257 |
--aNumPages;
|
sl@0
|
1258 |
++aPageList;
|
sl@0
|
1259 |
pa += KPageSize;
|
sl@0
|
1260 |
}
|
sl@0
|
1261 |
__KTRACE_OPT(KMMU2,Kern::Printf("%d consecutive pages, zp_rem=%x, %d remaining pages",n,zp_rem,aNumPages));
|
sl@0
|
1262 |
bmaAll.Free(ix,n);
|
sl@0
|
1263 |
TBitMapAllocator& bmaType = *(z->iBma[aType]);
|
sl@0
|
1264 |
bmaType.Free(ix,n);
|
sl@0
|
1265 |
iTotalFreeRamPages += n;
|
sl@0
|
1266 |
ZoneFreePages(z, n, aType);
|
sl@0
|
1267 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
1268 |
BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocFreePages, aType, n, first_pa);
|
sl@0
|
1269 |
#endif
|
sl@0
|
1270 |
}
|
sl@0
|
1271 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
1272 |
BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocFreePagesEnd);
|
sl@0
|
1273 |
#endif
|
sl@0
|
1274 |
}
|
sl@0
|
1275 |
|
sl@0
|
1276 |
/**
|
sl@0
|
1277 |
Attempt to clear upto the required amount of discardable or movable pages
|
sl@0
|
1278 |
from the RAM zone.
|
sl@0
|
1279 |
|
sl@0
|
1280 |
@param aZone The RAM zone to clear.
|
sl@0
|
1281 |
@param aRequiredPages The maximum number of pages to clear.
|
sl@0
|
1282 |
*/
|
sl@0
|
1283 |
void DRamAllocator::ZoneClearPages(SZone& aZone, TUint aRequiredPages)
|
sl@0
|
1284 |
{
|
sl@0
|
1285 |
__KTRACE_OPT(KMMU,
|
sl@0
|
1286 |
Kern::Printf("ZoneClearPages: ID 0x%x, req 0x%x", aZone.iId, aRequiredPages));
|
sl@0
|
1287 |
// Discard the required number of discardable pages.
|
sl@0
|
1288 |
TUint offset = 0;
|
sl@0
|
1289 |
TInt r = NextAllocatedPage(&aZone, offset, EPageDiscard);
|
sl@0
|
1290 |
while (r == KErrNone && aRequiredPages)
|
sl@0
|
1291 |
{
|
sl@0
|
1292 |
TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase;
|
sl@0
|
1293 |
TInt discarded = M::DiscardPage(physAddr, aZone.iId, EFalse);
|
sl@0
|
1294 |
if (discarded == KErrNone)
|
sl@0
|
1295 |
{// The page was successfully discarded.
|
sl@0
|
1296 |
aRequiredPages--;
|
sl@0
|
1297 |
}
|
sl@0
|
1298 |
offset++;
|
sl@0
|
1299 |
r = NextAllocatedPage(&aZone, offset, EPageDiscard);
|
sl@0
|
1300 |
}
|
sl@0
|
1301 |
// Move the required number of movable pages.
|
sl@0
|
1302 |
offset = 0;
|
sl@0
|
1303 |
r = NextAllocatedPage(&aZone, offset, EPageMovable);
|
sl@0
|
1304 |
while(r == KErrNone && aRequiredPages)
|
sl@0
|
1305 |
{
|
sl@0
|
1306 |
TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase;
|
sl@0
|
1307 |
TPhysAddr newAddr = KPhysAddrInvalid;
|
sl@0
|
1308 |
if (M::MovePage(physAddr, newAddr, aZone.iId, EFalse) == KErrNone)
|
sl@0
|
1309 |
{// The page was successfully moved.
|
sl@0
|
1310 |
#ifdef _DEBUG
|
sl@0
|
1311 |
TInt newOffset = 0;
|
sl@0
|
1312 |
SZone* newZone = GetZoneAndOffset(newAddr, newOffset);
|
sl@0
|
1313 |
__NK_ASSERT_DEBUG(newZone != &aZone);
|
sl@0
|
1314 |
#endif
|
sl@0
|
1315 |
aRequiredPages--;
|
sl@0
|
1316 |
}
|
sl@0
|
1317 |
offset++;
|
sl@0
|
1318 |
r = NextAllocatedPage(&aZone, offset, EPageMovable);
|
sl@0
|
1319 |
}
|
sl@0
|
1320 |
}
|
sl@0
|
1321 |
|
sl@0
|
1322 |
/** Attempt to allocate pages into a particular zone. Pages will not
|
sl@0
|
1323 |
always be contiguous.
|
sl@0
|
1324 |
|
sl@0
|
1325 |
@param aPageList On return it will contain the addresses of any allocated pages
|
sl@0
|
1326 |
@param aZone The zone to allocate from
|
sl@0
|
1327 |
@param aNumPages The number of pages to allocate
|
sl@0
|
1328 |
@param aType The type of pages to allocate
|
sl@0
|
1329 |
@return The number of pages that were allocated
|
sl@0
|
1330 |
*/
|
sl@0
|
1331 |
TUint32 DRamAllocator::ZoneFindPages(TPhysAddr*& aPageList, SZone& aZone, TUint32 aNumPages, TZonePageType aType)
|
sl@0
|
1332 |
{
|
sl@0
|
1333 |
// Don't allow unknown pages to be allocated, saves extra 'if' when
|
sl@0
|
1334 |
// creating bmaType.
|
sl@0
|
1335 |
__NK_ASSERT_DEBUG(aType != EPageUnknown);
|
sl@0
|
1336 |
|
sl@0
|
1337 |
TBitMapAllocator& bmaAll = *aZone.iBma[KBmaAllPages];
|
sl@0
|
1338 |
TBitMapAllocator& bmaType = *(aZone.iBma[aType]);
|
sl@0
|
1339 |
TPhysAddr zpb = aZone.iPhysBase;
|
sl@0
|
1340 |
TInt got = bmaAll.AllocList(aNumPages, (TInt*)aPageList);
|
sl@0
|
1341 |
if (got)
|
sl@0
|
1342 |
{
|
sl@0
|
1343 |
TPhysAddr* pE = aPageList + got;
|
sl@0
|
1344 |
while(aPageList < pE)
|
sl@0
|
1345 |
{
|
sl@0
|
1346 |
TInt ix = *aPageList;
|
sl@0
|
1347 |
*aPageList++ = zpb + (ix << KPageShift);
|
sl@0
|
1348 |
__KTRACE_OPT(KMMU,Kern::Printf("Got page @%08x",zpb + (ix << KPageShift)));
|
sl@0
|
1349 |
|
sl@0
|
1350 |
// Mark the page allocated on the page type bit map.
|
sl@0
|
1351 |
bmaType.Alloc(ix, 1);
|
sl@0
|
1352 |
}
|
sl@0
|
1353 |
ZoneAllocPages(&aZone, got, aType);
|
sl@0
|
1354 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
1355 |
BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocRamPages, aType, got, *(pE-got));
|
sl@0
|
1356 |
#endif
|
sl@0
|
1357 |
}
|
sl@0
|
1358 |
return got;
|
sl@0
|
1359 |
}
|
sl@0
|
1360 |
|
sl@0
|
1361 |
/**
|
sl@0
|
1362 |
Allocate discontiguous pages.
|
sl@0
|
1363 |
|
sl@0
|
1364 |
Fixed pages are always allocated into the most preferable RAM zone that has free,
|
sl@0
|
1365 |
movable or discardable pages in it. This is to avoid fixed pages being placed
|
sl@0
|
1366 |
in the less preferred RAM zones.
|
sl@0
|
1367 |
|
sl@0
|
1368 |
Movable and discardable pages are allocated into the RAM zones currently in use.
|
sl@0
|
1369 |
An empty RAM zone will only be used (switched on) if there are not enough free
|
sl@0
|
1370 |
pages in the in use RAM zones. The pages will be allocated from the least
|
sl@0
|
1371 |
preferable RAM to be in use after the allocation to the more preferred RAM zones.
|
sl@0
|
1372 |
|
sl@0
|
1373 |
If a valid zone is specified in aBlockedZoneId then that RAM zone will not be
|
sl@0
|
1374 |
allocated into. Also, if aBlockedZoneId and aBlockRest is set then the allocation
|
sl@0
|
1375 |
will stop if aBlockZoneId
|
sl@0
|
1376 |
|
sl@0
|
1377 |
@param aPageList On success, will contain the address of each allocated page
|
sl@0
|
1378 |
@param aNumPages The number of the pages to allocate
|
sl@0
|
1379 |
@param aType The type of the pages to allocate
|
sl@0
|
1380 |
@param aBlockedZoneId The ID of the RAM zone that shouldn't be allocated into.
|
sl@0
|
1381 |
The default value has no effect.
|
sl@0
|
1382 |
@param aBlockRest Set to ETrue to stop this allocation using any currently empty
|
sl@0
|
1383 |
RAM zones, EFalse to allow empty RAM zones to be used. Only
|
sl@0
|
1384 |
effects movable and discardable allocations.
|
sl@0
|
1385 |
|
sl@0
|
1386 |
@return 0 on success, the number of extra pages required to fulfill the request on failure.
|
sl@0
|
1387 |
*/
|
sl@0
|
1388 |
TInt DRamAllocator::AllocRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType, TUint aBlockedZoneId, TBool aBlockRest)
|
sl@0
|
1389 |
{
|
sl@0
|
1390 |
__KTRACE_OPT(KMMU,Kern::Printf("AllocRamPages 0x%x type%d",aNumPages, aType));
|
sl@0
|
1391 |
|
sl@0
|
1392 |
M::RamAllocIsLocked();
|
sl@0
|
1393 |
|
sl@0
|
1394 |
// Should never allocate unknown pages.
|
sl@0
|
1395 |
__NK_ASSERT_DEBUG(aType != EPageUnknown);
|
sl@0
|
1396 |
|
sl@0
|
1397 |
TPhysAddr* pageListBase = aPageList;
|
sl@0
|
1398 |
TUint32 numMissing = aNumPages;
|
sl@0
|
1399 |
|
sl@0
|
1400 |
if (aType == EPageFixed)
|
sl@0
|
1401 |
{// Currently only a general defrag operation should set this and it won't
|
sl@0
|
1402 |
// allocate fixed pages.
|
sl@0
|
1403 |
__NK_ASSERT_DEBUG(!aBlockRest);
|
sl@0
|
1404 |
if ((TUint)aNumPages > iTotalFreeRamPages + M::NumberOfFreeDpPages())
|
sl@0
|
1405 |
{// Not enough free space and not enough freeable pages.
|
sl@0
|
1406 |
goto exit;
|
sl@0
|
1407 |
}
|
sl@0
|
1408 |
|
sl@0
|
1409 |
// Search through each zone in preference order until all pages allocated or
|
sl@0
|
1410 |
// have reached the end of the preference list
|
sl@0
|
1411 |
SDblQueLink* link = iZonePrefList.First();
|
sl@0
|
1412 |
while (numMissing && link != &iZonePrefList.iA)
|
sl@0
|
1413 |
{
|
sl@0
|
1414 |
SZone& zone = *_LOFF(link, SZone, iPrefLink);
|
sl@0
|
1415 |
// Get the link to next zone before any potential reordering.
|
sl@0
|
1416 |
// Which would occur if previous zone is same preference and has
|
sl@0
|
1417 |
// more free space after this allocation.
|
sl@0
|
1418 |
link = link->iNext;
|
sl@0
|
1419 |
|
sl@0
|
1420 |
if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType))
|
sl@0
|
1421 |
{// The flags disallow aType pages or all pages.
|
sl@0
|
1422 |
__KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags));
|
sl@0
|
1423 |
continue;
|
sl@0
|
1424 |
}
|
sl@0
|
1425 |
|
sl@0
|
1426 |
numMissing -= ZoneFindPages(aPageList, zone, numMissing, aType);
|
sl@0
|
1427 |
__KTRACE_OPT(KMMU, Kern::Printf("zone.iId 0x%x", zone.iId));
|
sl@0
|
1428 |
|
sl@0
|
1429 |
if (numMissing &&
|
sl@0
|
1430 |
(zone.iAllocPages[EPageMovable] || zone.iAllocPages[EPageDiscard]))
|
sl@0
|
1431 |
{// Not all the required pages where allocated and there are still some
|
sl@0
|
1432 |
// movable and discardable pages in this RAM zone.
|
sl@0
|
1433 |
ZoneClearPages(zone, numMissing);
|
sl@0
|
1434 |
|
sl@0
|
1435 |
// Have discarded and moved everything required or possible so
|
sl@0
|
1436 |
// now allocate into the pages just freed.
|
sl@0
|
1437 |
numMissing -= ZoneFindPages(aPageList, zone, numMissing, aType);
|
sl@0
|
1438 |
}
|
sl@0
|
1439 |
}
|
sl@0
|
1440 |
}
|
sl@0
|
1441 |
else
|
sl@0
|
1442 |
{
|
sl@0
|
1443 |
if ((TUint)aNumPages > iTotalFreeRamPages)
|
sl@0
|
1444 |
{// Not enough free pages to fulfill this request so return amount required
|
sl@0
|
1445 |
return aNumPages - iTotalFreeRamPages;
|
sl@0
|
1446 |
}
|
sl@0
|
1447 |
|
sl@0
|
1448 |
// Determine if there are enough free pages in the RAM zones in use.
|
sl@0
|
1449 |
TUint totalFreeInUse = 0;
|
sl@0
|
1450 |
SDblQueLink* link = iZoneLeastMovDis;
|
sl@0
|
1451 |
for(; link != &iZonePrefList.iA; link = link->iPrev)
|
sl@0
|
1452 |
{
|
sl@0
|
1453 |
SZone& zone = *_LOFF(link, SZone, iPrefLink);
|
sl@0
|
1454 |
if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType) ||
|
sl@0
|
1455 |
(aBlockRest && (zone.iFlags & KRamZoneFlagGenDefragBlock)))
|
sl@0
|
1456 |
{// The blocked RAM zone or flags disallow aType pages or all pages
|
sl@0
|
1457 |
__KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags));
|
sl@0
|
1458 |
continue;
|
sl@0
|
1459 |
}
|
sl@0
|
1460 |
totalFreeInUse += zone.iFreePages;
|
sl@0
|
1461 |
}
|
sl@0
|
1462 |
|
sl@0
|
1463 |
if (aBlockRest && totalFreeInUse < (TUint)aNumPages)
|
sl@0
|
1464 |
{// Allocating as part of a general defragmentation and
|
sl@0
|
1465 |
// can't allocate without using a RAM zone less preferable than
|
sl@0
|
1466 |
// the current least prefeable RAM zone with movable and/or
|
sl@0
|
1467 |
//discardable.
|
sl@0
|
1468 |
__NK_ASSERT_DEBUG(numMissing);
|
sl@0
|
1469 |
goto exit;
|
sl@0
|
1470 |
}
|
sl@0
|
1471 |
|
sl@0
|
1472 |
SDblQueLink* leastClearable = iZoneLeastMovDis;
|
sl@0
|
1473 |
while (totalFreeInUse < (TUint)aNumPages)
|
sl@0
|
1474 |
{// The amount of free pages in the RAM zones with movable
|
sl@0
|
1475 |
// and/or discardable isn't enough.
|
sl@0
|
1476 |
leastClearable = leastClearable->iNext;
|
sl@0
|
1477 |
if (leastClearable == &iZonePrefList.iA)
|
sl@0
|
1478 |
{// There are no more RAM zones to allocate into.
|
sl@0
|
1479 |
__NK_ASSERT_DEBUG(numMissing);
|
sl@0
|
1480 |
goto exit;
|
sl@0
|
1481 |
}
|
sl@0
|
1482 |
SZone& zone = *_LOFF(leastClearable, SZone, iPrefLink);
|
sl@0
|
1483 |
if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType))
|
sl@0
|
1484 |
{// The flags disallow aType pages or all pages
|
sl@0
|
1485 |
__KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags));
|
sl@0
|
1486 |
continue;
|
sl@0
|
1487 |
}
|
sl@0
|
1488 |
totalFreeInUse += zone.iFreePages;
|
sl@0
|
1489 |
}
|
sl@0
|
1490 |
// Now that we know exactly how many RAM zones will be required do
|
sl@0
|
1491 |
// the allocation. To reduce fixed allocations having to clear RAM
|
sl@0
|
1492 |
// zones, allocate from the least preferable RAM to be used
|
sl@0
|
1493 |
// to the most preferable RAM zone.
|
sl@0
|
1494 |
link = leastClearable;
|
sl@0
|
1495 |
while (numMissing)
|
sl@0
|
1496 |
{
|
sl@0
|
1497 |
__NK_ASSERT_DEBUG(link != &iZonePrefList.iA);
|
sl@0
|
1498 |
SZone& zone = *_LOFF(link, SZone, iPrefLink);
|
sl@0
|
1499 |
// Update the link before any reordering so we don't miss a RAM zone.
|
sl@0
|
1500 |
link = link->iPrev;
|
sl@0
|
1501 |
|
sl@0
|
1502 |
if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType) ||
|
sl@0
|
1503 |
(aBlockRest && (zone.iFlags & KRamZoneFlagGenDefragBlock)))
|
sl@0
|
1504 |
{// The blocked RAM zone or flags disallow aType pages or all pages
|
sl@0
|
1505 |
__KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags));
|
sl@0
|
1506 |
continue;
|
sl@0
|
1507 |
}
|
sl@0
|
1508 |
|
sl@0
|
1509 |
numMissing -= ZoneFindPages(aPageList, zone, numMissing, aType);
|
sl@0
|
1510 |
__KTRACE_OPT(KMMU, Kern::Printf("zone.iId 0x%x", zone.iId));
|
sl@0
|
1511 |
}
|
sl@0
|
1512 |
__NK_ASSERT_DEBUG(!numMissing);
|
sl@0
|
1513 |
}
|
sl@0
|
1514 |
|
sl@0
|
1515 |
exit:
|
sl@0
|
1516 |
// Update here so any call to FreeRamPages doesn't upset count
|
sl@0
|
1517 |
aNumPages -= numMissing; //set to number of pages that are allocated
|
sl@0
|
1518 |
iTotalFreeRamPages -= aNumPages;
|
sl@0
|
1519 |
|
sl@0
|
1520 |
if (numMissing)
|
sl@0
|
1521 |
{// Couldn't allocate all required pages so free those that were allocated
|
sl@0
|
1522 |
FreeRamPages(pageListBase, aNumPages, aType);
|
sl@0
|
1523 |
}
|
sl@0
|
1524 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
1525 |
else
|
sl@0
|
1526 |
{
|
sl@0
|
1527 |
BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocRamPagesEnd);
|
sl@0
|
1528 |
}
|
sl@0
|
1529 |
#endif
|
sl@0
|
1530 |
return numMissing;
|
sl@0
|
1531 |
}
|
sl@0
|
1532 |
|
sl@0
|
1533 |
|
sl@0
|
1534 |
/**
|
sl@0
|
1535 |
Attempt to allocate discontiguous pages from the specified RAM zone.
|
sl@0
|
1536 |
|
sl@0
|
1537 |
NOTE - This method only obeys the KRamZoneFlagNoAlloc and KRamZoneFlagClaiming
|
sl@0
|
1538 |
flags and not the others.
|
sl@0
|
1539 |
But as currently only EFixed pages will be allocated using this method that is
|
sl@0
|
1540 |
the desired behaviour.
|
sl@0
|
1541 |
|
sl@0
|
1542 |
@param aZoneIdList An array of the IDs of the RAM zones to allocate from.
|
sl@0
|
1543 |
@param aZoneIdCount The number of IDs in aZoneIdList.
|
sl@0
|
1544 |
@param aPageList On success, will contain the address of each allocated page.
|
sl@0
|
1545 |
@param aNumPages The number of the pages to allocate.
|
sl@0
|
1546 |
@param aType The type of the pages to allocate.
|
sl@0
|
1547 |
|
sl@0
|
1548 |
@return KErrNone on success, KErrNoMemory if allocation couldn't succeed or
|
sl@0
|
1549 |
the RAM zone has the KRamZoneFlagNoAlloc flag set, KErrArgument if a zone of
|
sl@0
|
1550 |
aZoneIdList doesn't exist or aNumPages is greater than the total pages in the zone.
|
sl@0
|
1551 |
*/
|
sl@0
|
1552 |
TInt DRamAllocator::ZoneAllocRamPages(TUint* aZoneIdList, TUint aZoneIdCount, TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType)
|
sl@0
|
1553 |
{
|
sl@0
|
1554 |
M::RamAllocIsLocked();
|
sl@0
|
1555 |
__NK_ASSERT_DEBUG(aType == EPageFixed);
|
sl@0
|
1556 |
|
sl@0
|
1557 |
|
sl@0
|
1558 |
__KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocRamPages 0x%x zones 0x%x",aNumPages, aZoneIdCount));
|
sl@0
|
1559 |
|
sl@0
|
1560 |
TInt r = KErrNone;
|
sl@0
|
1561 |
TUint* zoneIdPtr = aZoneIdList;
|
sl@0
|
1562 |
TUint* zoneIdEnd = zoneIdPtr + aZoneIdCount;
|
sl@0
|
1563 |
TUint numMissing = aNumPages;
|
sl@0
|
1564 |
TUint physicalPages = 0;
|
sl@0
|
1565 |
TPhysAddr* pageListBase = aPageList;
|
sl@0
|
1566 |
|
sl@0
|
1567 |
// Always loop through all the RAM zones so that if an invalid ID is specified
|
sl@0
|
1568 |
// it is always detected whether all the specified RAM zones were required
|
sl@0
|
1569 |
// for the allocation or not.
|
sl@0
|
1570 |
for(; zoneIdPtr < zoneIdEnd; zoneIdPtr++)
|
sl@0
|
1571 |
{
|
sl@0
|
1572 |
SZone* zone = ZoneFromId(*zoneIdPtr);
|
sl@0
|
1573 |
|
sl@0
|
1574 |
if (zone == NULL)
|
sl@0
|
1575 |
{// Invalid zone ID.
|
sl@0
|
1576 |
r = KErrArgument;
|
sl@0
|
1577 |
break;
|
sl@0
|
1578 |
}
|
sl@0
|
1579 |
|
sl@0
|
1580 |
physicalPages += zone->iPhysPages;
|
sl@0
|
1581 |
|
sl@0
|
1582 |
if (zone->iFlags & (KRamZoneFlagNoAlloc|KRamZoneFlagClaiming))
|
sl@0
|
1583 |
{// If this RAM zone can't be allocated into then skip it.
|
sl@0
|
1584 |
continue;
|
sl@0
|
1585 |
}
|
sl@0
|
1586 |
|
sl@0
|
1587 |
numMissing -= ZoneFindPages(aPageList, *zone, numMissing, aType);
|
sl@0
|
1588 |
|
sl@0
|
1589 |
if (numMissing && aType == EPageFixed)
|
sl@0
|
1590 |
{// Remove up to required number of pages from the RAM zone
|
sl@0
|
1591 |
// and reattempt the allocation.
|
sl@0
|
1592 |
ZoneClearPages(*zone, numMissing);
|
sl@0
|
1593 |
numMissing -= ZoneFindPages(aPageList, *zone, numMissing, aType);
|
sl@0
|
1594 |
}
|
sl@0
|
1595 |
}
|
sl@0
|
1596 |
|
sl@0
|
1597 |
// Update iTotalFreeRamPages here so that if allocation doesn't succeed then
|
sl@0
|
1598 |
// FreeRamPages() will keep it consistent.
|
sl@0
|
1599 |
TUint numAllocated = aNumPages - numMissing;
|
sl@0
|
1600 |
iTotalFreeRamPages -= numAllocated;
|
sl@0
|
1601 |
|
sl@0
|
1602 |
if (r == KErrArgument || physicalPages < (TUint)aNumPages)
|
sl@0
|
1603 |
{// Invalid zone ID or the number of pages requested is too large.
|
sl@0
|
1604 |
// This should fail regardless of whether the allocation failed or not.
|
sl@0
|
1605 |
FreeRamPages(pageListBase, numAllocated, aType);
|
sl@0
|
1606 |
return KErrArgument;
|
sl@0
|
1607 |
}
|
sl@0
|
1608 |
|
sl@0
|
1609 |
if (numMissing)
|
sl@0
|
1610 |
{// Couldn't allocate all required pages so free those that were allocated
|
sl@0
|
1611 |
FreeRamPages(pageListBase, numAllocated, aType);
|
sl@0
|
1612 |
return KErrNoMemory;
|
sl@0
|
1613 |
}
|
sl@0
|
1614 |
|
sl@0
|
1615 |
// Have allocated all the required pages.
|
sl@0
|
1616 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
1617 |
BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocZoneRamPagesEnd);
|
sl@0
|
1618 |
#endif
|
sl@0
|
1619 |
return KErrNone;
|
sl@0
|
1620 |
}
|
sl@0
|
1621 |
|
sl@0
|
1622 |
|
sl@0
|
1623 |
/**
|
sl@0
|
1624 |
Will return zones one at a time in the following search patterns until a suitable
|
sl@0
|
1625 |
zone has been found or it is determined that there is no suitable zone:
|
sl@0
|
1626 |
- preference order
|
sl@0
|
1627 |
- address order
|
sl@0
|
1628 |
Before the first call for a new search sequence must set:
|
sl@0
|
1629 |
iZoneTmpAddrIndex = -1;
|
sl@0
|
1630 |
iZoneTmpPrefLink = iZonePrefList.First();
|
sl@0
|
1631 |
|
sl@0
|
1632 |
@param aZone On return this will be a pointer to the next zone to search.
|
sl@0
|
1633 |
@param aState The current search state, i.e. which of the zone orderings to follow.
|
sl@0
|
1634 |
It will be updated if necessary by this function.
|
sl@0
|
1635 |
@param aType The type of page to be allocated.
|
sl@0
|
1636 |
@param aBlockedZoneId The ID of a RAM zone to not allocate into.
|
sl@0
|
1637 |
@param aBlockRest ETrue if allocation should fail as soon as a blocked zone is reached,
|
sl@0
|
1638 |
EFalse otherwise. (Currently not used)
|
sl@0
|
1639 |
@return ETrue a sutiable zone is found, EFalse when the allocation is not possible.
|
sl@0
|
1640 |
*/
|
sl@0
|
1641 |
TBool DRamAllocator::NextAllocZone(SZone*& aZone, TZoneSearchState& aState, TZonePageType aType, TUint aBlockedZoneId, TBool aBlockRest)
|
sl@0
|
1642 |
{
|
sl@0
|
1643 |
TUint currentState = aState;
|
sl@0
|
1644 |
TBool r = EFalse;
|
sl@0
|
1645 |
|
sl@0
|
1646 |
for (; currentState < EZoneSearchEnd; currentState++)
|
sl@0
|
1647 |
{
|
sl@0
|
1648 |
if (currentState == EZoneSearchAddr)
|
sl@0
|
1649 |
{
|
sl@0
|
1650 |
iZoneTmpAddrIndex++;
|
sl@0
|
1651 |
for (; iZoneTmpAddrIndex < (TInt)iNumZones; iZoneTmpAddrIndex++)
|
sl@0
|
1652 |
{
|
sl@0
|
1653 |
aZone = iZones + iZoneTmpAddrIndex;
|
sl@0
|
1654 |
if (aBlockedZoneId != aZone->iId && !NoAllocOfPageType(*aZone, aType))
|
sl@0
|
1655 |
{
|
sl@0
|
1656 |
r = ETrue;
|
sl@0
|
1657 |
goto exit;
|
sl@0
|
1658 |
}
|
sl@0
|
1659 |
}
|
sl@0
|
1660 |
}
|
sl@0
|
1661 |
else
|
sl@0
|
1662 |
{
|
sl@0
|
1663 |
while(iZoneTmpPrefLink != &iZonePrefList.iA)
|
sl@0
|
1664 |
{
|
sl@0
|
1665 |
aZone = _LOFF(iZoneTmpPrefLink, SZone, iPrefLink);
|
sl@0
|
1666 |
iZoneTmpPrefLink = iZoneTmpPrefLink->iNext; // Update before any re-ordering
|
sl@0
|
1667 |
if (aBlockedZoneId != aZone->iId && !NoAllocOfPageType(*aZone, aType))
|
sl@0
|
1668 |
{
|
sl@0
|
1669 |
r = ETrue;
|
sl@0
|
1670 |
goto exit;
|
sl@0
|
1671 |
}
|
sl@0
|
1672 |
}
|
sl@0
|
1673 |
}
|
sl@0
|
1674 |
}
|
sl@0
|
1675 |
exit:
|
sl@0
|
1676 |
__NK_ASSERT_DEBUG((r && currentState < EZoneSearchEnd) || (!r && currentState == EZoneSearchEnd));
|
sl@0
|
1677 |
|
sl@0
|
1678 |
aState = (TZoneSearchState)currentState;
|
sl@0
|
1679 |
return r;
|
sl@0
|
1680 |
}
|
sl@0
|
1681 |
|
sl@0
|
1682 |
/**
|
sl@0
|
1683 |
Search through the zones for the requested contiguous RAM, first in preference
|
sl@0
|
1684 |
order then, if that fails, in address order.
|
sl@0
|
1685 |
|
sl@0
|
1686 |
@param aNumPages The number of contiguous pages to find
|
sl@0
|
1687 |
@param aPhysAddr Will contain the base address of any contiguous run if found
|
sl@0
|
1688 |
@param aType The page type of the memory to be allocated
|
sl@0
|
1689 |
@param aAlign Alignment specified as the alignment shift
|
sl@0
|
1690 |
@param aBlockedZoneId The ID of a zone that can't be allocated into, by default this has no effect
|
sl@0
|
1691 |
@param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached
|
sl@0
|
1692 |
in preference ordering. EFalse otherwise.
|
sl@0
|
1693 |
|
sl@0
|
1694 |
@return KErrNone on success, KErrNoMemory otherwise
|
sl@0
|
1695 |
*/
|
sl@0
|
1696 |
TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest)
|
sl@0
|
1697 |
{
|
sl@0
|
1698 |
__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign));
|
sl@0
|
1699 |
|
sl@0
|
1700 |
M::RamAllocIsLocked();
|
sl@0
|
1701 |
|
sl@0
|
1702 |
// No support for non-fixed pages as this will discard and move
|
sl@0
|
1703 |
// pages if required.
|
sl@0
|
1704 |
__NK_ASSERT_DEBUG(aType == EPageFixed);
|
sl@0
|
1705 |
TInt alignWrtPage = Max(aAlign - KPageShift, 0);
|
sl@0
|
1706 |
TUint32 alignmask = (1u << alignWrtPage) - 1;
|
sl@0
|
1707 |
|
sl@0
|
1708 |
// Attempt to find enough pages searching in preference order first then
|
sl@0
|
1709 |
// in address order
|
sl@0
|
1710 |
TZoneSearchState searchState = EZoneSearchPref;
|
sl@0
|
1711 |
SZone* zone;
|
sl@0
|
1712 |
SZone* prevZone = NULL;
|
sl@0
|
1713 |
TInt carryAll = 0; // Carry for all pages bma, clear to start new run.
|
sl@0
|
1714 |
TInt carryImmov = 0; // Carry for immovable pages bma, clear to start new run.
|
sl@0
|
1715 |
TInt base = 0;
|
sl@0
|
1716 |
TInt offset = 0;
|
sl@0
|
1717 |
iZoneTmpAddrIndex = -1;
|
sl@0
|
1718 |
iZoneTmpPrefLink = iZonePrefList.First();
|
sl@0
|
1719 |
while (NextAllocZone(zone, searchState, aType, aBlockedZoneId, aBlockRest))
|
sl@0
|
1720 |
{
|
sl@0
|
1721 |
// Be sure to start from scratch if zone not contiguous with previous zone
|
sl@0
|
1722 |
if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd))
|
sl@0
|
1723 |
{
|
sl@0
|
1724 |
carryAll = 0;
|
sl@0
|
1725 |
carryImmov = 0;
|
sl@0
|
1726 |
}
|
sl@0
|
1727 |
prevZone = zone;
|
sl@0
|
1728 |
TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
|
sl@0
|
1729 |
base = TInt(zone->iPhysBase >> KPageShift);
|
sl@0
|
1730 |
TInt runLength;
|
sl@0
|
1731 |
__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset));
|
sl@0
|
1732 |
offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength);
|
sl@0
|
1733 |
__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
|
sl@0
|
1734 |
|
sl@0
|
1735 |
if (offset >= 0)
|
sl@0
|
1736 |
{// Have found enough contiguous pages so return address of physical page
|
sl@0
|
1737 |
// at the start of the region
|
sl@0
|
1738 |
aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift;
|
sl@0
|
1739 |
MarkPagesAllocated(aPhysAddr, aNumPages, aType);
|
sl@0
|
1740 |
|
sl@0
|
1741 |
__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
|
sl@0
|
1742 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
1743 |
BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr);
|
sl@0
|
1744 |
#endif
|
sl@0
|
1745 |
return KErrNone;
|
sl@0
|
1746 |
}
|
sl@0
|
1747 |
else
|
sl@0
|
1748 |
{// No run found when looking in just the free pages so see if this
|
sl@0
|
1749 |
// RAM zone could be used if pages where moved or discarded.
|
sl@0
|
1750 |
if (aNumPages > KMaxFreeableContiguousPages)
|
sl@0
|
1751 |
{// Can't move or discard any pages so move on to next RAM zone
|
sl@0
|
1752 |
// taking any run at the end of this RAM zone into account.
|
sl@0
|
1753 |
carryImmov = 0;
|
sl@0
|
1754 |
continue;
|
sl@0
|
1755 |
}
|
sl@0
|
1756 |
TBitMapAllocator& bmaImmov = *(zone->iBma[EPageFixed]);
|
sl@0
|
1757 |
offset = 0; // Clear so searches whole of fixed BMA on the first pass.
|
sl@0
|
1758 |
do
|
sl@0
|
1759 |
{
|
sl@0
|
1760 |
__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryImmov=%08x offset=%08x", base, carryImmov, offset));
|
sl@0
|
1761 |
offset = bmaImmov.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryImmov, runLength, offset);
|
sl@0
|
1762 |
__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
|
sl@0
|
1763 |
if (offset >= 0)
|
sl@0
|
1764 |
{// Have found a run in immovable page bma so attempt to clear
|
sl@0
|
1765 |
// it for the allocation.
|
sl@0
|
1766 |
TPhysAddr addrBase = TPhysAddr((base + offset - carryImmov + alignmask) & ~alignmask) << KPageShift;
|
sl@0
|
1767 |
TPhysAddr addrEnd = addrBase + (aNumPages << KPageShift);
|
sl@0
|
1768 |
|
sl@0
|
1769 |
// Block the RAM zones containing the contiguous region
|
sl@0
|
1770 |
// from being allocated into when pages are moved or replaced.
|
sl@0
|
1771 |
TPhysAddr addr = addrBase;
|
sl@0
|
1772 |
TInt tmpOffset;
|
sl@0
|
1773 |
SZone* tmpZone = GetZoneAndOffset(addr, tmpOffset);
|
sl@0
|
1774 |
while (addr < addrEnd-1)
|
sl@0
|
1775 |
{
|
sl@0
|
1776 |
tmpZone->iFlags |= KRamZoneFlagTmpBlockAlloc;
|
sl@0
|
1777 |
addr = tmpZone->iPhysEnd;
|
sl@0
|
1778 |
tmpZone++;
|
sl@0
|
1779 |
}
|
sl@0
|
1780 |
|
sl@0
|
1781 |
addr = addrBase;
|
sl@0
|
1782 |
TInt contigOffset = 0;
|
sl@0
|
1783 |
SZone* contigZone = GetZoneAndOffset(addr, contigOffset);
|
sl@0
|
1784 |
for (; addr != addrEnd; addr += KPageSize, contigOffset++)
|
sl@0
|
1785 |
{
|
sl@0
|
1786 |
if (contigZone->iPhysEnd < addr)
|
sl@0
|
1787 |
{
|
sl@0
|
1788 |
contigZone = GetZoneAndOffset(addr, contigOffset);
|
sl@0
|
1789 |
__NK_ASSERT_DEBUG(contigZone != NULL);
|
sl@0
|
1790 |
}
|
sl@0
|
1791 |
#ifdef _DEBUG // This page shouldn't be allocated as fixed, only movable or discardable.
|
sl@0
|
1792 |
__NK_ASSERT_DEBUG(contigZone != NULL);
|
sl@0
|
1793 |
__NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotAllocated(contigOffset, 1));
|
sl@0
|
1794 |
SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(addr);
|
sl@0
|
1795 |
__NK_ASSERT_DEBUG(pageInfo != NULL);
|
sl@0
|
1796 |
#endif
|
sl@0
|
1797 |
TPhysAddr newAddr;
|
sl@0
|
1798 |
TInt moveRet = M::MovePage(addr, newAddr, contigZone->iId, EFalse);
|
sl@0
|
1799 |
if (moveRet != KErrNone && moveRet != KErrNotFound)
|
sl@0
|
1800 |
{// This page couldn't be moved or discarded so
|
sl@0
|
1801 |
// restart the search the page after this one.
|
sl@0
|
1802 |
__KTRACE_OPT(KMMU2,
|
sl@0
|
1803 |
Kern::Printf("ContigMov fail offset %x moveRet %d addr %x carryImmov %x",
|
sl@0
|
1804 |
offset, moveRet, addr, carryImmov));
|
sl@0
|
1805 |
// Can't rely on RAM zone preference ordering being
|
sl@0
|
1806 |
// the same so clear carrys and restart search from
|
sl@0
|
1807 |
// within the current RAM zone or skip onto the next
|
sl@0
|
1808 |
// one if at the end of this one.
|
sl@0
|
1809 |
carryImmov = 0;
|
sl@0
|
1810 |
carryAll = 0;
|
sl@0
|
1811 |
offset = (addr < zone->iPhysBase)? 0 : contigOffset + 1;
|
sl@0
|
1812 |
__KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail offset %x", offset));
|
sl@0
|
1813 |
break;
|
sl@0
|
1814 |
}
|
sl@0
|
1815 |
}
|
sl@0
|
1816 |
// Unblock the RAM zones containing the contiguous region.
|
sl@0
|
1817 |
TPhysAddr flagAddr = addrBase;
|
sl@0
|
1818 |
tmpZone = GetZoneAndOffset(flagAddr, tmpOffset);
|
sl@0
|
1819 |
while (flagAddr < addrEnd-1)
|
sl@0
|
1820 |
{
|
sl@0
|
1821 |
tmpZone->iFlags &= ~KRamZoneFlagTmpBlockAlloc;
|
sl@0
|
1822 |
flagAddr = tmpZone->iPhysEnd;
|
sl@0
|
1823 |
tmpZone++;
|
sl@0
|
1824 |
}
|
sl@0
|
1825 |
|
sl@0
|
1826 |
if (addr == addrEnd)
|
sl@0
|
1827 |
{// Cleared all the required pages so allocate them.
|
sl@0
|
1828 |
// Return address of physical page at the start of the region.
|
sl@0
|
1829 |
aPhysAddr = addrBase;
|
sl@0
|
1830 |
MarkPagesAllocated(aPhysAddr, aNumPages, aType);
|
sl@0
|
1831 |
|
sl@0
|
1832 |
__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
|
sl@0
|
1833 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
1834 |
BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr);
|
sl@0
|
1835 |
#endif
|
sl@0
|
1836 |
return KErrNone;
|
sl@0
|
1837 |
}
|
sl@0
|
1838 |
}
|
sl@0
|
1839 |
}
|
sl@0
|
1840 |
// Keep searching immovable page bma of the current RAM zone until
|
sl@0
|
1841 |
// gone past end of RAM zone or no run can be found.
|
sl@0
|
1842 |
while (offset >= 0 && (TUint)offset < zone->iPhysPages);
|
sl@0
|
1843 |
}
|
sl@0
|
1844 |
}
|
sl@0
|
1845 |
return KErrNoMemory;
|
sl@0
|
1846 |
}
|
sl@0
|
1847 |
|
sl@0
|
1848 |
|
sl@0
|
1849 |
/**
|
sl@0
|
1850 |
Attempt to allocate the contiguous RAM from the specified zone.
|
sl@0
|
1851 |
|
sl@0
|
1852 |
NOTE - This method only obeys the KRamZoneFlagNoAlloc and KRamZoneFlagClaiming
|
sl@0
|
1853 |
flags and not the others.
|
sl@0
|
1854 |
But as currently only EFixed pages will be allocated using this method that is
|
sl@0
|
1855 |
the desired behaviour.
|
sl@0
|
1856 |
|
sl@0
|
1857 |
@param aZoneIdList An array of the IDs of the RAM zones to allocate from.
|
sl@0
|
1858 |
@param aZoneIdCount The number of the IDs listed by aZoneIdList.
|
sl@0
|
1859 |
@param aSize The number of contiguous bytes to find
|
sl@0
|
1860 |
@param aPhysAddr Will contain the base address of the contiguous run if found
|
sl@0
|
1861 |
@param aType The page type of the memory to be allocated
|
sl@0
|
1862 |
@param aAlign Alignment specified as the alignment shift
|
sl@0
|
1863 |
|
sl@0
|
1864 |
@return KErrNone on success, KErrNoMemory if allocation couldn't succeed or
|
sl@0
|
1865 |
the RAM zone has the KRamZoneFlagNoAlloc flag set. KErrArgument if a zone of
|
sl@0
|
1866 |
aZoneIdList exists or if aSize is larger than the size of the zone.
|
sl@0
|
1867 |
*/
|
sl@0
|
1868 |
TInt DRamAllocator::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign)
|
sl@0
|
1869 |
{
|
sl@0
|
1870 |
__KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam zones 0x%x size 0x%08x align %d",aZoneIdCount, aSize, aAlign));
|
sl@0
|
1871 |
|
sl@0
|
1872 |
M::RamAllocIsLocked();
|
sl@0
|
1873 |
__NK_ASSERT_DEBUG(aType == EPageFixed);
|
sl@0
|
1874 |
|
sl@0
|
1875 |
|
sl@0
|
1876 |
TUint numPages = (aSize + KPageSize - 1) >> KPageShift;
|
sl@0
|
1877 |
TInt carry = 0; // must be zero as this is always the start of a new run
|
sl@0
|
1878 |
TInt alignWrtPage = Max(aAlign - KPageShift, 0);
|
sl@0
|
1879 |
TUint32 alignmask = (1u << alignWrtPage) - 1;
|
sl@0
|
1880 |
TInt offset = -1;
|
sl@0
|
1881 |
TInt base = 0;
|
sl@0
|
1882 |
|
sl@0
|
1883 |
TUint physPages = 0;
|
sl@0
|
1884 |
TUint* zoneIdPtr = aZoneIdList;
|
sl@0
|
1885 |
TUint* zoneIdEnd = aZoneIdList + aZoneIdCount;
|
sl@0
|
1886 |
SZone* prevZone = NULL;
|
sl@0
|
1887 |
for (; zoneIdPtr < zoneIdEnd; zoneIdPtr++)
|
sl@0
|
1888 |
{
|
sl@0
|
1889 |
SZone* zone = ZoneFromId(*zoneIdPtr);
|
sl@0
|
1890 |
if (zone == NULL)
|
sl@0
|
1891 |
{// Couldn't find zone of this ID or it isn't large enough
|
sl@0
|
1892 |
return KErrArgument;
|
sl@0
|
1893 |
}
|
sl@0
|
1894 |
physPages += zone->iPhysPages;
|
sl@0
|
1895 |
|
sl@0
|
1896 |
if (offset >= 0 ||
|
sl@0
|
1897 |
(zone->iFlags & (KRamZoneFlagNoAlloc|KRamZoneFlagClaiming)))
|
sl@0
|
1898 |
{// Keep searching through the RAM zones if the allocation
|
sl@0
|
1899 |
// has succeeded, to ensure the ID list is always fully verified or
|
sl@0
|
1900 |
// if this zone is currently blocked for further allocations.
|
sl@0
|
1901 |
continue;
|
sl@0
|
1902 |
}
|
sl@0
|
1903 |
|
sl@0
|
1904 |
// Be sure to start from scratch if zone not contiguous with previous zone
|
sl@0
|
1905 |
if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd))
|
sl@0
|
1906 |
{
|
sl@0
|
1907 |
carry = 0;
|
sl@0
|
1908 |
}
|
sl@0
|
1909 |
prevZone = zone;
|
sl@0
|
1910 |
|
sl@0
|
1911 |
TInt len;
|
sl@0
|
1912 |
TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
|
sl@0
|
1913 |
base = TInt(zone->iPhysBase >> KPageShift);
|
sl@0
|
1914 |
|
sl@0
|
1915 |
__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: aBase=%08x aCarry=%08x", base, carry));
|
sl@0
|
1916 |
offset = bmaAll.AllocAligned(numPages, alignWrtPage, base, EFalse, carry, len);
|
sl@0
|
1917 |
__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
|
sl@0
|
1918 |
}
|
sl@0
|
1919 |
|
sl@0
|
1920 |
if (physPages < numPages)
|
sl@0
|
1921 |
{// The allocation requested is too large for the specified RAM zones.
|
sl@0
|
1922 |
return KErrArgument;
|
sl@0
|
1923 |
}
|
sl@0
|
1924 |
|
sl@0
|
1925 |
if (offset < 0)
|
sl@0
|
1926 |
{// The allocation failed.
|
sl@0
|
1927 |
return KErrNoMemory;
|
sl@0
|
1928 |
}
|
sl@0
|
1929 |
|
sl@0
|
1930 |
// Have found enough contiguous pages so mark the pages allocated and
|
sl@0
|
1931 |
// return address of physical page at the start of the region.
|
sl@0
|
1932 |
aPhysAddr = TPhysAddr((base + offset - carry + alignmask) & ~alignmask) << KPageShift;
|
sl@0
|
1933 |
MarkPagesAllocated(aPhysAddr, numPages, aType);
|
sl@0
|
1934 |
|
sl@0
|
1935 |
__KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam returns %08x",aPhysAddr));
|
sl@0
|
1936 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
1937 |
BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocZoneContiguousRam, aType, numPages, aPhysAddr);
|
sl@0
|
1938 |
#endif
|
sl@0
|
1939 |
return KErrNone;
|
sl@0
|
1940 |
}
|
sl@0
|
1941 |
|
sl@0
|
1942 |
|
sl@0
|
1943 |
/**
|
sl@0
|
1944 |
Attempt to set the specified contiguous block of RAM pages to be either
|
sl@0
|
1945 |
allocated or free.
|
sl@0
|
1946 |
|
sl@0
|
1947 |
@param aBase The base address of the RAM to update.
|
sl@0
|
1948 |
@param aSize The number of contiguous bytes of RAM to update.
|
sl@0
|
1949 |
@param aState Set to ETrue to free the RAM, EFalse to allocate the RAM.
|
sl@0
|
1950 |
@param aType The type of the pages being updated.
|
sl@0
|
1951 |
|
sl@0
|
1952 |
@return KErrNone on success, KErrArgument if aBase is an invalid address,
|
sl@0
|
1953 |
KErrGeneral if a page being marked free is already free,
|
sl@0
|
1954 |
KErrInUse if the page being marked allocated is already allocated.
|
sl@0
|
1955 |
*/
|
sl@0
|
1956 |
TInt DRamAllocator::SetPhysicalRamState(TPhysAddr aBase, TInt aSize, TBool aState, TZonePageType aType)
|
sl@0
|
1957 |
{
|
sl@0
|
1958 |
M::RamAllocIsLocked();
|
sl@0
|
1959 |
|
sl@0
|
1960 |
__KTRACE_OPT(KMMU,Kern::Printf("SetPhysicalRamState(%08x,%x,%d)",aBase,aSize,aState?1:0));
|
sl@0
|
1961 |
TUint32 pageMask = KPageSize-1;
|
sl@0
|
1962 |
aSize += (aBase & pageMask);
|
sl@0
|
1963 |
aBase &= ~pageMask;
|
sl@0
|
1964 |
TInt npages = (aSize + pageMask) >> KPageShift;
|
sl@0
|
1965 |
__KTRACE_OPT(KMMU,Kern::Printf("Rounded base %08x npages=%x",aBase,npages));
|
sl@0
|
1966 |
TInt baseOffset;
|
sl@0
|
1967 |
SZone* baseZone = GetZoneAndOffset(aBase, baseOffset);
|
sl@0
|
1968 |
if (!baseZone || (TUint32)aSize > (iPhysAddrTop - aBase + 1))
|
sl@0
|
1969 |
{
|
sl@0
|
1970 |
return KErrArgument;
|
sl@0
|
1971 |
}
|
sl@0
|
1972 |
SZone* zone = baseZone;
|
sl@0
|
1973 |
SZone* zoneEnd = iZones + iNumZones;
|
sl@0
|
1974 |
TPhysAddr base = aBase;
|
sl@0
|
1975 |
TInt pagesLeft = npages;
|
sl@0
|
1976 |
TInt offset = baseOffset;
|
sl@0
|
1977 |
TInt pageCount = -1;
|
sl@0
|
1978 |
__KTRACE_OPT(KMMU2,Kern::Printf("Zone %x page index %x z=%08x zE=%08x n=%x base=%08x",zone->iId, offset, zone, zoneEnd, pagesLeft, base));
|
sl@0
|
1979 |
for (; pagesLeft && zone < zoneEnd; ++zone)
|
sl@0
|
1980 |
{
|
sl@0
|
1981 |
if (zone->iPhysBase + (offset << KPageShift) != base)
|
sl@0
|
1982 |
{// Zone not contiguous with current run of page, so have been
|
sl@0
|
1983 |
// asked to set the state of non-existent pages.
|
sl@0
|
1984 |
return KErrArgument;
|
sl@0
|
1985 |
}
|
sl@0
|
1986 |
|
sl@0
|
1987 |
TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
|
sl@0
|
1988 |
TInt zp_rem = bmaAll.iSize - offset;
|
sl@0
|
1989 |
pageCount = Min(pagesLeft, zp_rem);
|
sl@0
|
1990 |
__KTRACE_OPT(KMMU2,Kern::Printf("Zone %x pages %x+%x base %08x", zone->iId, offset, pageCount, base));
|
sl@0
|
1991 |
if(aState)
|
sl@0
|
1992 |
{
|
sl@0
|
1993 |
if(bmaAll.NotAllocated(offset, pageCount))
|
sl@0
|
1994 |
{
|
sl@0
|
1995 |
return KErrGeneral;
|
sl@0
|
1996 |
}
|
sl@0
|
1997 |
}
|
sl@0
|
1998 |
else
|
sl@0
|
1999 |
{
|
sl@0
|
2000 |
if(bmaAll.NotFree(offset, pageCount))
|
sl@0
|
2001 |
{
|
sl@0
|
2002 |
return KErrInUse;
|
sl@0
|
2003 |
}
|
sl@0
|
2004 |
}
|
sl@0
|
2005 |
pagesLeft -= pageCount;
|
sl@0
|
2006 |
offset = 0;
|
sl@0
|
2007 |
base += (TPhysAddr(pageCount) << KPageShift);
|
sl@0
|
2008 |
}
|
sl@0
|
2009 |
if (pagesLeft)
|
sl@0
|
2010 |
{
|
sl@0
|
2011 |
return KErrArgument; // not all of the specified range exists
|
sl@0
|
2012 |
}
|
sl@0
|
2013 |
|
sl@0
|
2014 |
iTotalFreeRamPages += (aState ? npages : -npages);
|
sl@0
|
2015 |
zone = baseZone;
|
sl@0
|
2016 |
offset = baseOffset;
|
sl@0
|
2017 |
for (pagesLeft = npages; pagesLeft; pagesLeft -= pageCount)
|
sl@0
|
2018 |
{
|
sl@0
|
2019 |
TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
|
sl@0
|
2020 |
// Unknown and fixed pages share a bit map.
|
sl@0
|
2021 |
TBitMapAllocator& bmaType = *(zone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
|
sl@0
|
2022 |
TInt zp_rem = bmaAll.iSize - offset;
|
sl@0
|
2023 |
pageCount = Min(pagesLeft, zp_rem);
|
sl@0
|
2024 |
if (aState)
|
sl@0
|
2025 |
{
|
sl@0
|
2026 |
bmaAll.Free(offset, pageCount);
|
sl@0
|
2027 |
bmaType.Free(offset, pageCount);
|
sl@0
|
2028 |
ZoneFreePages(zone, pageCount, aType);
|
sl@0
|
2029 |
}
|
sl@0
|
2030 |
else
|
sl@0
|
2031 |
{
|
sl@0
|
2032 |
bmaAll.Alloc(offset, pageCount);
|
sl@0
|
2033 |
bmaType.Alloc(offset, pageCount);
|
sl@0
|
2034 |
ZoneAllocPages(zone, pageCount, aType);
|
sl@0
|
2035 |
}
|
sl@0
|
2036 |
__KTRACE_OPT(KMMU2,Kern::Printf("Zone %d pages %x+%x base %08x",zone-iZones, offset, pageCount, base));
|
sl@0
|
2037 |
++zone;
|
sl@0
|
2038 |
offset = 0;
|
sl@0
|
2039 |
}
|
sl@0
|
2040 |
return KErrNone;
|
sl@0
|
2041 |
}
|
sl@0
|
2042 |
|
sl@0
|
2043 |
/** Update the allocated page counts for the zone that is page is allocated into.
|
sl@0
|
2044 |
|
sl@0
|
2045 |
@param aAddr The physical address of the page
|
sl@0
|
2046 |
@param aOldPageType The type the page was allocated as
|
sl@0
|
2047 |
@param aNewPageType The type the page is changing to
|
sl@0
|
2048 |
*/
|
sl@0
|
2049 |
void DRamAllocator::ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldType, TZonePageType aNewType)
|
sl@0
|
2050 |
{
|
sl@0
|
2051 |
|
sl@0
|
2052 |
TInt offset;
|
sl@0
|
2053 |
SZone* zone = GetZoneAndOffset(aPageInfo->PhysAddr(), offset);
|
sl@0
|
2054 |
#ifdef _DEBUG
|
sl@0
|
2055 |
// *********** System lock may be held while this is invoked so don't do********
|
sl@0
|
2056 |
// *********** anything too slow and definitely don't call zone callback********
|
sl@0
|
2057 |
M::RamAllocIsLocked();
|
sl@0
|
2058 |
CHECK_PRECONDITIONS((MASK_THREAD_CRITICAL) & ~MASK_NO_FAST_MUTEX, "DRamAllocator::ChangePageType");
|
sl@0
|
2059 |
|
sl@0
|
2060 |
// Get zone page is in and on debug builds check that it is allocated
|
sl@0
|
2061 |
if (zone == NULL || zone->iBma[KBmaAllPages]->NotAllocated(offset, 1))
|
sl@0
|
2062 |
{
|
sl@0
|
2063 |
Panic(EAllocRamPagesInconsistent);
|
sl@0
|
2064 |
}
|
sl@0
|
2065 |
|
sl@0
|
2066 |
// Check if adjusting counts is valid, i.e. won't cause a roll over
|
sl@0
|
2067 |
if (zone->iAllocPages[aOldType] - 1 > zone->iAllocPages[aOldType] ||
|
sl@0
|
2068 |
zone->iAllocPages[aNewType] + 1 < zone->iAllocPages[aNewType])
|
sl@0
|
2069 |
{
|
sl@0
|
2070 |
__KTRACE_OPT(KMMU, Kern::Printf("ChangePageType Alloc Unk %x Fx %x Mv %x Dis %x",zone->iAllocPages[EPageUnknown],
|
sl@0
|
2071 |
zone->iAllocPages[EPageFixed], zone->iAllocPages[EPageMovable],zone->iAllocPages[EPageDiscard]));
|
sl@0
|
2072 |
Panic(EZonesCountErr);
|
sl@0
|
2073 |
}
|
sl@0
|
2074 |
#endif
|
sl@0
|
2075 |
|
sl@0
|
2076 |
// Update the counts and bmas
|
sl@0
|
2077 |
zone->iAllocPages[aOldType]--;
|
sl@0
|
2078 |
zone->iBma[aOldType]->Free(offset);
|
sl@0
|
2079 |
zone->iAllocPages[aNewType]++;
|
sl@0
|
2080 |
zone->iBma[aNewType]->Alloc(offset, 1);
|
sl@0
|
2081 |
|
sl@0
|
2082 |
__KTRACE_OPT(KMMU2, Kern::Printf("ChangePageType Alloc Unk %x Fx %x Mv %x Dis %x",zone->iAllocPages[EPageUnknown],
|
sl@0
|
2083 |
zone->iAllocPages[EPageFixed], zone->iAllocPages[EPageMovable],zone->iAllocPages[EPageDiscard]));
|
sl@0
|
2084 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
2085 |
BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocChangePageType, aNewType, aPageInfo->PhysAddr());
|
sl@0
|
2086 |
#endif
|
sl@0
|
2087 |
}
|
sl@0
|
2088 |
|
sl@0
|
2089 |
/**
|
sl@0
|
2090 |
Get the next page in this zone that is allocated after this one.
|
sl@0
|
2091 |
|
sl@0
|
2092 |
@param aZone The zone to find the next allocated page in.
|
sl@0
|
2093 |
@param aOffset On entry this is the offset from which the next allocated
|
sl@0
|
2094 |
page in the zone should be found, on return it will be the offset
|
sl@0
|
2095 |
of the next allocated page.
|
sl@0
|
2096 |
@return KErrNone if a next allocated page could be found, KErrNotFound if no more pages in
|
sl@0
|
2097 |
the zone after aOffset are allocated, KErrArgument if aOffset is outside the zone.
|
sl@0
|
2098 |
*/
|
sl@0
|
2099 |
TInt DRamAllocator::NextAllocatedPage(SZone* aZone, TUint& aOffset, TZonePageType aType) const
|
sl@0
|
2100 |
{
|
sl@0
|
2101 |
const TUint KWordAlignMask = KMaxTUint32 << 5;
|
sl@0
|
2102 |
|
sl@0
|
2103 |
M::RamAllocIsLocked();
|
sl@0
|
2104 |
|
sl@0
|
2105 |
__NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones);
|
sl@0
|
2106 |
// Makes things simpler for bma selection.
|
sl@0
|
2107 |
__NK_ASSERT_DEBUG(aType != EPageUnknown);
|
sl@0
|
2108 |
|
sl@0
|
2109 |
if (aOffset >= aZone->iPhysPages)
|
sl@0
|
2110 |
{// Starting point is outside the zone
|
sl@0
|
2111 |
return KErrArgument;
|
sl@0
|
2112 |
}
|
sl@0
|
2113 |
|
sl@0
|
2114 |
TUint offset = aOffset;
|
sl@0
|
2115 |
TUint endOffset = aZone->iPhysPages;
|
sl@0
|
2116 |
TUint endOffsetAligned = endOffset & KWordAlignMask;
|
sl@0
|
2117 |
|
sl@0
|
2118 |
// Select the BMA to search,
|
sl@0
|
2119 |
TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType;
|
sl@0
|
2120 |
TUint32* map = &(aZone->iBma[bmaIndex]->iMap[offset >> 5]);
|
sl@0
|
2121 |
TUint32 bits = *map++;
|
sl@0
|
2122 |
|
sl@0
|
2123 |
// Set bits for pages before 'offset' (i.e. ones we want to ignore)...
|
sl@0
|
2124 |
bits |= ~(KMaxTUint32 >> (offset & ~KWordAlignMask));
|
sl@0
|
2125 |
|
sl@0
|
2126 |
// Find the first bit map word from aOffset in aZone with allocated pages
|
sl@0
|
2127 |
while (bits == KMaxTUint32 && offset < endOffsetAligned)
|
sl@0
|
2128 |
{
|
sl@0
|
2129 |
bits = *map++;
|
sl@0
|
2130 |
offset = (offset + 32) & KWordAlignMask;
|
sl@0
|
2131 |
}
|
sl@0
|
2132 |
|
sl@0
|
2133 |
if (offset >= endOffsetAligned && endOffset != endOffsetAligned)
|
sl@0
|
2134 |
{// Have reached the last bit mask word so set the bits that are
|
sl@0
|
2135 |
// outside of the zone so that they are ignored.
|
sl@0
|
2136 |
bits |= KMaxTUint32 >> (endOffset - endOffsetAligned);
|
sl@0
|
2137 |
}
|
sl@0
|
2138 |
|
sl@0
|
2139 |
if (bits == KMaxTUint32)
|
sl@0
|
2140 |
{// No allocated pages found after aOffset in aZone.
|
sl@0
|
2141 |
return KErrNotFound;
|
sl@0
|
2142 |
}
|
sl@0
|
2143 |
|
sl@0
|
2144 |
// Now we have bits with allocated pages in it so determine the exact
|
sl@0
|
2145 |
// offset of the next allocated page
|
sl@0
|
2146 |
TUint32 mask = 0x80000000 >> (offset & ~KWordAlignMask);
|
sl@0
|
2147 |
while (bits & mask)
|
sl@0
|
2148 |
{
|
sl@0
|
2149 |
mask >>= 1;
|
sl@0
|
2150 |
offset++;
|
sl@0
|
2151 |
}
|
sl@0
|
2152 |
|
sl@0
|
2153 |
if (offset >= endOffset)
|
sl@0
|
2154 |
{// Reached the end of the zone without finding an allocated page after aOffset
|
sl@0
|
2155 |
return KErrNotFound;
|
sl@0
|
2156 |
}
|
sl@0
|
2157 |
|
sl@0
|
2158 |
// Should definitely have found an allocated page within aZone's pages
|
sl@0
|
2159 |
__NK_ASSERT_DEBUG(mask != 0 && !(bits & mask) && offset < aZone->iPhysPages);
|
sl@0
|
2160 |
|
sl@0
|
2161 |
aOffset = offset;
|
sl@0
|
2162 |
return KErrNone;
|
sl@0
|
2163 |
}
|
sl@0
|
2164 |
|
sl@0
|
2165 |
/**
|
sl@0
|
2166 |
See if any of the least preferable RAM zones can be emptied. If they can then
|
sl@0
|
2167 |
initialise the allocator for a general defragmentation operation.
|
sl@0
|
2168 |
|
sl@0
|
2169 |
Stage 0 of the general defrag is to ensure that there are enough free
|
sl@0
|
2170 |
pages in the more preferable RAM zones to be in use after the general defrag
|
sl@0
|
2171 |
for the movable page allocations. This is achieved by discarding the
|
sl@0
|
2172 |
required amount of discardable pages from the more preferable RAM zones
|
sl@0
|
2173 |
to be in use after the general defrag.
|
sl@0
|
2174 |
|
sl@0
|
2175 |
|
sl@0
|
2176 |
@parm aInitialStage On return this will contain the stage the general
|
sl@0
|
2177 |
defragmentation should begin at. I.e. if no RAM
|
sl@0
|
2178 |
zones can be cleared then just perform the final
|
sl@0
|
2179 |
tidying stage.
|
sl@0
|
2180 |
@param aRequiredToBeDiscarded On return this will contain the number of
|
sl@0
|
2181 |
discardable pages that need to be discarded
|
sl@0
|
2182 |
from the RAM zones to be in use after the
|
sl@0
|
2183 |
general defrag.
|
sl@0
|
2184 |
@return Pointer to the RAM zone object that may potentially have pages
|
sl@0
|
2185 |
discarded by the general defrag. This will be NULL if no suitable
|
sl@0
|
2186 |
RAM zone could be found.
|
sl@0
|
2187 |
*/
|
sl@0
|
2188 |
SZone* DRamAllocator::GeneralDefragStart0(TGenDefragStage& aStage, TUint& aRequiredToBeDiscarded)
|
sl@0
|
2189 |
{
|
sl@0
|
2190 |
#ifdef _DEBUG
|
sl@0
|
2191 |
if (!K::Initialising)
|
sl@0
|
2192 |
{
|
sl@0
|
2193 |
M::RamAllocIsLocked();
|
sl@0
|
2194 |
#ifdef __VERIFY_LEASTMOVDIS
|
sl@0
|
2195 |
VerifyLeastPrefMovDis();
|
sl@0
|
2196 |
#endif
|
sl@0
|
2197 |
}
|
sl@0
|
2198 |
// Any previous general defrag operation must have ended.
|
sl@0
|
2199 |
__NK_ASSERT_DEBUG(iZoneGeneralPrefLink == NULL);
|
sl@0
|
2200 |
__NK_ASSERT_DEBUG(iZoneGeneralTmpLink == NULL);
|
sl@0
|
2201 |
#endif
|
sl@0
|
2202 |
|
sl@0
|
2203 |
if (iNumZones == 1)
|
sl@0
|
2204 |
{
|
sl@0
|
2205 |
// Only have one RAM zone so a defrag can't do anything.
|
sl@0
|
2206 |
return NULL;
|
sl@0
|
2207 |
}
|
sl@0
|
2208 |
|
sl@0
|
2209 |
// Determine how many movable or discardable pages are required to be allocated.
|
sl@0
|
2210 |
TUint requiredPagesDis = 0;
|
sl@0
|
2211 |
TUint requiredPagesMov = 0;
|
sl@0
|
2212 |
TUint firstClearableInUseRank = 0;
|
sl@0
|
2213 |
SDblQueLink* link = iZoneLeastMovDis;
|
sl@0
|
2214 |
do
|
sl@0
|
2215 |
{
|
sl@0
|
2216 |
SZone& zone = *_LOFF(link, SZone, iPrefLink);
|
sl@0
|
2217 |
requiredPagesDis += zone.iAllocPages[EPageDiscard];
|
sl@0
|
2218 |
requiredPagesMov += zone.iAllocPages[EPageMovable];
|
sl@0
|
2219 |
|
sl@0
|
2220 |
if (!firstClearableInUseRank &&
|
sl@0
|
2221 |
(zone.iAllocPages[EPageMovable] || zone.iAllocPages[EPageDiscard]) &&
|
sl@0
|
2222 |
!zone.iAllocPages[EPageFixed] && !zone.iAllocPages[EPageUnknown])
|
sl@0
|
2223 |
{// This is the least preferable RAM zone that is has movable or
|
sl@0
|
2224 |
// discardable but may be clearable as it has no immovable pages.
|
sl@0
|
2225 |
firstClearableInUseRank = zone.iPrefRank;
|
sl@0
|
2226 |
}
|
sl@0
|
2227 |
|
sl@0
|
2228 |
// Reset KRamZoneFlagGenDefrag flag bit for each RAM zone to be defraged.
|
sl@0
|
2229 |
zone.iFlags &= ~(KRamZoneFlagGenDefrag | KRamZoneFlagGenDefragBlock);
|
sl@0
|
2230 |
|
sl@0
|
2231 |
link = link->iPrev;
|
sl@0
|
2232 |
}
|
sl@0
|
2233 |
while (link != &iZonePrefList.iA);
|
sl@0
|
2234 |
|
sl@0
|
2235 |
// Adjust the number of discardable pages for those that are freeable.
|
sl@0
|
2236 |
// Dirty pages will be moved rather than discarded so they are not freeable
|
sl@0
|
2237 |
// and we must make sure that we have enough space in zones for these dirty
|
sl@0
|
2238 |
// paged pages.
|
sl@0
|
2239 |
__NK_ASSERT_DEBUG(requiredPagesDis >= (TUint)M::NumberOfFreeDpPages());
|
sl@0
|
2240 |
requiredPagesDis -= M::NumberOfFreeDpPages();
|
sl@0
|
2241 |
TUint totalDirtyPagesDis = M::NumberOfDirtyDpPages();
|
sl@0
|
2242 |
if (requiredPagesDis < totalDirtyPagesDis)
|
sl@0
|
2243 |
requiredPagesDis = totalDirtyPagesDis;
|
sl@0
|
2244 |
|
sl@0
|
2245 |
// Determine which is the least preferable RAM zone that needs to be
|
sl@0
|
2246 |
// in use for required number of movable and discardable pages.
|
sl@0
|
2247 |
TUint onlyPagesDis = 0; // Number of pages in RAM zones for discard only.
|
sl@0
|
2248 |
TUint onlyPagesMov = 0; // Number of pages in RAM zones for movable only.
|
sl@0
|
2249 |
TUint totalPagesDis = 0; // Total pages found so far for discardable pages.
|
sl@0
|
2250 |
TUint totalPagesMov = 0; // Total pages found so far for movable pages.
|
sl@0
|
2251 |
TUint totalCurrentDis = 0; // Number of allocated discardable pages found in
|
sl@0
|
2252 |
// RAM zones to be in use after the general defrag.
|
sl@0
|
2253 |
TUint totalCurrentMov = 0; // Number of allocated movable pages found in
|
sl@0
|
2254 |
// RAM zones to be in use after the general defrag.
|
sl@0
|
2255 |
TUint totalCurrentFree = 0; // The current number of free pages in the RAM zones
|
sl@0
|
2256 |
// to be in use after the general defrag.
|
sl@0
|
2257 |
iZoneGeneralPrefLink = &iZonePrefList.iA;
|
sl@0
|
2258 |
while (iZoneGeneralPrefLink != iZoneLeastMovDis &&
|
sl@0
|
2259 |
(requiredPagesMov > totalPagesMov ||
|
sl@0
|
2260 |
requiredPagesDis > totalPagesDis))
|
sl@0
|
2261 |
{
|
sl@0
|
2262 |
iZoneGeneralPrefLink = iZoneGeneralPrefLink->iNext;
|
sl@0
|
2263 |
SZone& zone = *_LOFF(iZoneGeneralPrefLink, SZone, iPrefLink);
|
sl@0
|
2264 |
// Update the current totals.
|
sl@0
|
2265 |
totalCurrentDis += zone.iAllocPages[EPageDiscard];
|
sl@0
|
2266 |
totalCurrentMov += zone.iAllocPages[EPageMovable];
|
sl@0
|
2267 |
totalCurrentFree += zone.iFreePages;
|
sl@0
|
2268 |
|
sl@0
|
2269 |
TBool onlyAllocDis = NoAllocOfPageType(zone, EPageMovable);
|
sl@0
|
2270 |
TBool onlyAllocMov = NoAllocOfPageType(zone, EPageDiscard);
|
sl@0
|
2271 |
if (!onlyAllocMov || !onlyAllocDis)
|
sl@0
|
2272 |
{// Either movable, discardable or both can be allocated in this zone.
|
sl@0
|
2273 |
TUint zonePagesFree = zone.iFreePages;
|
sl@0
|
2274 |
TUint zonePagesDis = zone.iAllocPages[EPageDiscard];
|
sl@0
|
2275 |
TUint zonePagesMov = zone.iAllocPages[EPageMovable];
|
sl@0
|
2276 |
// Total pages in this RAM zone that can be used for either
|
sl@0
|
2277 |
// discardable or movable pages.
|
sl@0
|
2278 |
TUint zonePagesGen = zonePagesDis + zonePagesMov + zonePagesFree;
|
sl@0
|
2279 |
if (onlyAllocMov)
|
sl@0
|
2280 |
{
|
sl@0
|
2281 |
if (requiredPagesDis > totalPagesDis)
|
sl@0
|
2282 |
{// No further discardable pages can be allocated into
|
sl@0
|
2283 |
// this RAM zone but consider any that already are.
|
sl@0
|
2284 |
TUint usedPages = Min( (TInt)zonePagesDis,
|
sl@0
|
2285 |
requiredPagesDis - totalPagesDis);
|
sl@0
|
2286 |
totalPagesDis += usedPages;
|
sl@0
|
2287 |
zonePagesDis -= usedPages;
|
sl@0
|
2288 |
}
|
sl@0
|
2289 |
TUint zoneOnlyMov = zonePagesDis + zonePagesMov + zonePagesFree;
|
sl@0
|
2290 |
onlyPagesMov += zoneOnlyMov;
|
sl@0
|
2291 |
totalPagesMov += zoneOnlyMov;
|
sl@0
|
2292 |
__KTRACE_OPT(KMMU2, Kern::Printf("onlyMov ID%x tot %x",
|
sl@0
|
2293 |
zone.iId, zoneOnlyMov));
|
sl@0
|
2294 |
zonePagesGen = 0; // These pages aren't general purpose.
|
sl@0
|
2295 |
}
|
sl@0
|
2296 |
if (onlyAllocDis)
|
sl@0
|
2297 |
{
|
sl@0
|
2298 |
if (requiredPagesMov > totalPagesMov)
|
sl@0
|
2299 |
{// No further movable pages can be allocated into
|
sl@0
|
2300 |
// this RAM zone but consider any that already are.
|
sl@0
|
2301 |
TUint usedPages = Min( (TInt)zonePagesMov,
|
sl@0
|
2302 |
requiredPagesMov - totalPagesMov);
|
sl@0
|
2303 |
totalPagesMov += usedPages;
|
sl@0
|
2304 |
zonePagesMov -= usedPages;
|
sl@0
|
2305 |
}
|
sl@0
|
2306 |
TUint zoneOnlyDis = zonePagesDis + zonePagesMov + zonePagesFree;
|
sl@0
|
2307 |
onlyPagesDis += zoneOnlyDis;
|
sl@0
|
2308 |
totalPagesDis += zoneOnlyDis;
|
sl@0
|
2309 |
__KTRACE_OPT(KMMU2, Kern::Printf("onlyDis ID%x tot %x",
|
sl@0
|
2310 |
zone.iId, zoneOnlyDis));
|
sl@0
|
2311 |
zonePagesGen = 0; // These pages aren't general purpose.
|
sl@0
|
2312 |
}
|
sl@0
|
2313 |
|
sl@0
|
2314 |
if (requiredPagesDis > totalPagesDis)
|
sl@0
|
2315 |
{// Need some discardable pages so first steal any spare
|
sl@0
|
2316 |
// movable pages for discardable allocations.
|
sl@0
|
2317 |
if (totalPagesMov > requiredPagesMov)
|
sl@0
|
2318 |
{// Use any spare movable pages that can also be
|
sl@0
|
2319 |
// used for discardable allocations for discardable.
|
sl@0
|
2320 |
__NK_ASSERT_DEBUG(onlyPagesMov);
|
sl@0
|
2321 |
TUint spareMovPages = Min((TInt)(totalPagesMov - onlyPagesMov),
|
sl@0
|
2322 |
totalPagesMov - requiredPagesMov);
|
sl@0
|
2323 |
totalPagesMov -= spareMovPages;
|
sl@0
|
2324 |
totalPagesDis += spareMovPages;
|
sl@0
|
2325 |
__KTRACE_OPT(KMMU2, Kern::Printf("genDis Mov ID%x used%x",
|
sl@0
|
2326 |
zone.iId, spareMovPages));
|
sl@0
|
2327 |
}
|
sl@0
|
2328 |
if (requiredPagesDis > totalPagesDis)
|
sl@0
|
2329 |
{
|
sl@0
|
2330 |
// Need more discardable pages but only grab those required.
|
sl@0
|
2331 |
TUint usedPages = Min( (TInt) zonePagesGen,
|
sl@0
|
2332 |
requiredPagesDis - totalPagesDis);
|
sl@0
|
2333 |
totalPagesDis += usedPages;
|
sl@0
|
2334 |
zonePagesGen -= usedPages;
|
sl@0
|
2335 |
__KTRACE_OPT(KMMU2, Kern::Printf("genDis ID%x used%x",
|
sl@0
|
2336 |
zone.iId, usedPages));
|
sl@0
|
2337 |
}
|
sl@0
|
2338 |
}
|
sl@0
|
2339 |
if (requiredPagesMov > totalPagesMov)
|
sl@0
|
2340 |
{// Need some movable pages so first steal any spare
|
sl@0
|
2341 |
// discardable pages for movable allocations.
|
sl@0
|
2342 |
if (totalPagesDis > requiredPagesDis)
|
sl@0
|
2343 |
{// Use any spare discardable pages that can also be
|
sl@0
|
2344 |
// used for movable allocations for movable.
|
sl@0
|
2345 |
__NK_ASSERT_DEBUG(onlyPagesDis);
|
sl@0
|
2346 |
TUint spareDisPages = Min((TInt)(totalPagesDis - onlyPagesDis),
|
sl@0
|
2347 |
totalPagesDis - requiredPagesDis);
|
sl@0
|
2348 |
totalPagesDis -= spareDisPages;
|
sl@0
|
2349 |
totalPagesMov += spareDisPages;
|
sl@0
|
2350 |
__KTRACE_OPT(KMMU2, Kern::Printf("genMov Dis ID%x used%x",
|
sl@0
|
2351 |
zone.iId, spareDisPages));
|
sl@0
|
2352 |
}
|
sl@0
|
2353 |
if (requiredPagesMov > totalPagesMov)
|
sl@0
|
2354 |
{// Still need some movable pages so grab them from this zone.
|
sl@0
|
2355 |
// Just grab all of the general pages left as discard pages will
|
sl@0
|
2356 |
// have already grabbed some if it had needed to.
|
sl@0
|
2357 |
totalPagesMov += zonePagesGen;
|
sl@0
|
2358 |
__KTRACE_OPT(KMMU2, Kern::Printf("genMov ID%x used%x",
|
sl@0
|
2359 |
zone.iId, zonePagesGen));
|
sl@0
|
2360 |
}
|
sl@0
|
2361 |
}
|
sl@0
|
2362 |
}
|
sl@0
|
2363 |
}
|
sl@0
|
2364 |
|
sl@0
|
2365 |
__KTRACE_OPT(KMMU, Kern::Printf("gen least in use ID 0x%x",
|
sl@0
|
2366 |
(_LOFF(iZoneGeneralPrefLink, SZone, iPrefLink))->iId));
|
sl@0
|
2367 |
__NK_ASSERT_DEBUG(_LOFF(iZoneGeneralPrefLink, SZone, iPrefLink)->iPrefRank <=
|
sl@0
|
2368 |
iZoneLeastMovDisRank);
|
sl@0
|
2369 |
|
sl@0
|
2370 |
if (iZoneGeneralPrefLink != iZoneLeastMovDis &&
|
sl@0
|
2371 |
firstClearableInUseRank > _LOFF(iZoneGeneralPrefLink, SZone, iPrefLink)->iPrefRank)
|
sl@0
|
2372 |
{// We can reduce the number of RAM zones in use so block all the RAM
|
sl@0
|
2373 |
// zones not to be in use after the defrag from being allocated into
|
sl@0
|
2374 |
// by the general defrag.
|
sl@0
|
2375 |
link = iZoneLeastMovDis;
|
sl@0
|
2376 |
while (link != iZoneGeneralPrefLink)
|
sl@0
|
2377 |
{
|
sl@0
|
2378 |
SZone& zone = *_LOFF(link, SZone, iPrefLink);
|
sl@0
|
2379 |
zone.iFlags |= KRamZoneFlagGenDefragBlock;
|
sl@0
|
2380 |
link = link->iPrev;
|
sl@0
|
2381 |
}
|
sl@0
|
2382 |
|
sl@0
|
2383 |
// Determine how many pages will need to be discarded to allow general
|
sl@0
|
2384 |
// defrag to succeed in using the minimum RAM zones required.
|
sl@0
|
2385 |
if (requiredPagesDis > totalCurrentDis)
|
sl@0
|
2386 |
{// Need to replace some discardable pages in RAM zones to be
|
sl@0
|
2387 |
// cleared with pages in the RAM zones to be in use after the
|
sl@0
|
2388 |
// general defrag.
|
sl@0
|
2389 |
__NK_ASSERT_DEBUG(totalCurrentFree >= requiredPagesDis - totalCurrentDis);
|
sl@0
|
2390 |
totalCurrentFree -= requiredPagesDis - totalCurrentDis;
|
sl@0
|
2391 |
}
|
sl@0
|
2392 |
TUint totalForMov = totalCurrentFree + totalCurrentMov;
|
sl@0
|
2393 |
if (requiredPagesMov > totalForMov)
|
sl@0
|
2394 |
{// Need to discard some pages from the least preferable RAM zone to be
|
sl@0
|
2395 |
// in use after the general for the movable pages to be moved to.
|
sl@0
|
2396 |
aRequiredToBeDiscarded = requiredPagesMov - totalForMov;
|
sl@0
|
2397 |
__NK_ASSERT_DEBUG(aRequiredToBeDiscarded <= totalCurrentDis);
|
sl@0
|
2398 |
__NK_ASSERT_DEBUG(totalCurrentDis - aRequiredToBeDiscarded >= requiredPagesDis);
|
sl@0
|
2399 |
}
|
sl@0
|
2400 |
|
sl@0
|
2401 |
// This stage should discard pages from the least preferable RAM zones
|
sl@0
|
2402 |
// to be in use after the general defrag to save the pages having to
|
sl@0
|
2403 |
// be moved again by the final stage.
|
sl@0
|
2404 |
iZoneGeneralStage = EGenDefragStage0;
|
sl@0
|
2405 |
aStage = EGenDefragStage1; // Defrag::GeneralDefrag() requires this.
|
sl@0
|
2406 |
iZoneGeneralTmpLink = iZoneGeneralPrefLink;
|
sl@0
|
2407 |
return GeneralDefragNextZone0();
|
sl@0
|
2408 |
}
|
sl@0
|
2409 |
|
sl@0
|
2410 |
// General defrag can't clear any RAM zones so jump to tidying stage.
|
sl@0
|
2411 |
aStage = EGenDefragStage2;
|
sl@0
|
2412 |
iZoneGeneralStage = EGenDefragStage2;
|
sl@0
|
2413 |
return NULL;
|
sl@0
|
2414 |
}
|
sl@0
|
2415 |
|
sl@0
|
2416 |
|
sl@0
|
2417 |
/**
|
sl@0
|
2418 |
Find the next RAM zone that is suitable for stage 0 of a general defrag.
|
sl@0
|
2419 |
This should only be called after a preceeding call to
|
sl@0
|
2420 |
DRamAllocator::GeneralDefragStart0().
|
sl@0
|
2421 |
|
sl@0
|
2422 |
This goes through the RAM zones from the least preferable to be in use
|
sl@0
|
2423 |
after the general defrag to the most preferable RAM zone. It will
|
sl@0
|
2424 |
return each time it finds a RAM zone with discardable pages allocated into it.
|
sl@0
|
2425 |
|
sl@0
|
2426 |
@return Pointer to the RAM zone object that may potentially have pages
|
sl@0
|
2427 |
discarded by the general defrag. This will be NULL if no suitable
|
sl@0
|
2428 |
RAM zone could be found.
|
sl@0
|
2429 |
*/
|
sl@0
|
2430 |
SZone* DRamAllocator::GeneralDefragNextZone0()
|
sl@0
|
2431 |
{
|
sl@0
|
2432 |
M::RamAllocIsLocked();
|
sl@0
|
2433 |
// Any previous general defrag operation must have ended.
|
sl@0
|
2434 |
__NK_ASSERT_DEBUG(iZoneGeneralPrefLink != NULL);
|
sl@0
|
2435 |
__NK_ASSERT_DEBUG(iZoneGeneralTmpLink != NULL);
|
sl@0
|
2436 |
__NK_ASSERT_DEBUG(iZoneGeneralStage == EGenDefragStage0);
|
sl@0
|
2437 |
|
sl@0
|
2438 |
while (iZoneGeneralTmpLink != &iZonePrefList.iA)
|
sl@0
|
2439 |
{
|
sl@0
|
2440 |
SZone* zone = _LOFF(iZoneGeneralTmpLink, SZone, iPrefLink);
|
sl@0
|
2441 |
|
sl@0
|
2442 |
// Save the RAM zone that is currently more preferable than this one
|
sl@0
|
2443 |
// before any reordering.
|
sl@0
|
2444 |
iZoneGeneralTmpLink = iZoneGeneralTmpLink->iPrev;
|
sl@0
|
2445 |
|
sl@0
|
2446 |
if (zone->iFlags & KRamZoneFlagGenDefrag)
|
sl@0
|
2447 |
{// This zone has been selected for a general defrag already.
|
sl@0
|
2448 |
__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext0 zone ID 0x%x already defraged",
|
sl@0
|
2449 |
zone->iId));
|
sl@0
|
2450 |
return NULL;
|
sl@0
|
2451 |
}
|
sl@0
|
2452 |
zone->iFlags |= KRamZoneFlagGenDefrag;
|
sl@0
|
2453 |
if (zone->iAllocPages[EPageDiscard])
|
sl@0
|
2454 |
{
|
sl@0
|
2455 |
// A RAM zone that may have pages discarded by a general defrag has been found.
|
sl@0
|
2456 |
__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext0 zone ID 0x%x", zone->iId));
|
sl@0
|
2457 |
return zone;
|
sl@0
|
2458 |
}
|
sl@0
|
2459 |
}
|
sl@0
|
2460 |
return NULL;
|
sl@0
|
2461 |
}
|
sl@0
|
2462 |
|
sl@0
|
2463 |
|
sl@0
|
2464 |
/**
|
sl@0
|
2465 |
Initialise this stage of a general defrag operation which will attempt
|
sl@0
|
2466 |
to clear all the RAM zones not to be in use once the general defrag
|
sl@0
|
2467 |
has completed.
|
sl@0
|
2468 |
|
sl@0
|
2469 |
@return Pointer to the RAM zone object that may potentially be cleared
|
sl@0
|
2470 |
by the general defrag. This will be NULL if no suitable
|
sl@0
|
2471 |
RAM zone could be found.
|
sl@0
|
2472 |
*/
|
sl@0
|
2473 |
SZone* DRamAllocator::GeneralDefragStart1()
|
sl@0
|
2474 |
{
|
sl@0
|
2475 |
M::RamAllocIsLocked();
|
sl@0
|
2476 |
__NK_ASSERT_DEBUG(iNumZones == 1 || iZoneGeneralPrefLink != NULL);
|
sl@0
|
2477 |
|
sl@0
|
2478 |
|
sl@0
|
2479 |
if (iNumZones == 1)
|
sl@0
|
2480 |
{// On a device with one RAM zone can't do any defrag so return NULL.
|
sl@0
|
2481 |
return NULL;
|
sl@0
|
2482 |
}
|
sl@0
|
2483 |
|
sl@0
|
2484 |
// Clear general defrag flags of each RAM zone to be defraged.
|
sl@0
|
2485 |
SDblQueLink* link = iZoneGeneralPrefLink;
|
sl@0
|
2486 |
for (; link != &iZonePrefList.iA; link = link->iPrev)
|
sl@0
|
2487 |
{
|
sl@0
|
2488 |
SZone& zone = *_LOFF(link, SZone, iPrefLink);
|
sl@0
|
2489 |
zone.iFlags &= ~KRamZoneFlagGenDefrag;
|
sl@0
|
2490 |
}
|
sl@0
|
2491 |
|
sl@0
|
2492 |
// Flags cleared so now to start this stage from least preferable RAM zone
|
sl@0
|
2493 |
// currently in use.
|
sl@0
|
2494 |
iZoneGeneralTmpLink = iZoneLeastMovDis;
|
sl@0
|
2495 |
iZoneGeneralStage = EGenDefragStage1;
|
sl@0
|
2496 |
return GeneralDefragNextZone1();
|
sl@0
|
2497 |
}
|
sl@0
|
2498 |
|
sl@0
|
2499 |
|
sl@0
|
2500 |
/**
|
sl@0
|
2501 |
Find the next RAM zone that is suitable for stage 1 of a general defrag.
|
sl@0
|
2502 |
This should only be called after a preceeding call to
|
sl@0
|
2503 |
DRamAllocator::GeneralDefragStart1().
|
sl@0
|
2504 |
|
sl@0
|
2505 |
This goes through the RAM zones from the least preferable currently
|
sl@0
|
2506 |
with movable or discardable pages allocated into it to the least
|
sl@0
|
2507 |
preferable RAM zone that is to be in use after the general defrag.
|
sl@0
|
2508 |
It will return each time it finds a RAM zone with movable and/or
|
sl@0
|
2509 |
discardable pages allocated into it.
|
sl@0
|
2510 |
|
sl@0
|
2511 |
@return Pointer to the RAM zone object that may potentially be cleared by a
|
sl@0
|
2512 |
general defrag. This will be NULL if no suitable zone could be found.
|
sl@0
|
2513 |
*/
|
sl@0
|
2514 |
SZone* DRamAllocator::GeneralDefragNextZone1()
|
sl@0
|
2515 |
{
|
sl@0
|
2516 |
M::RamAllocIsLocked();
|
sl@0
|
2517 |
// Any previous general defrag operation must have ended.
|
sl@0
|
2518 |
__NK_ASSERT_DEBUG(iZoneGeneralPrefLink != NULL);
|
sl@0
|
2519 |
__NK_ASSERT_DEBUG(iZoneGeneralTmpLink != NULL);
|
sl@0
|
2520 |
__NK_ASSERT_DEBUG(iZoneGeneralStage == EGenDefragStage1);
|
sl@0
|
2521 |
|
sl@0
|
2522 |
|
sl@0
|
2523 |
// If we hit the target least preferable RAM zone to be in use once
|
sl@0
|
2524 |
// the defrag has completed then stop this stage of the general defrag.
|
sl@0
|
2525 |
|
sl@0
|
2526 |
// Should never skip past iZoneGeneralPrefLink.
|
sl@0
|
2527 |
__NK_ASSERT_DEBUG(iZoneGeneralTmpLink != &iZonePrefList.iA);
|
sl@0
|
2528 |
|
sl@0
|
2529 |
while (iZoneGeneralTmpLink != iZoneGeneralPrefLink)
|
sl@0
|
2530 |
{
|
sl@0
|
2531 |
SZone* zone = _LOFF(iZoneGeneralTmpLink, SZone, iPrefLink);
|
sl@0
|
2532 |
|
sl@0
|
2533 |
// Save the RAM zone that is currently more preferable than this one
|
sl@0
|
2534 |
// before any reordering.
|
sl@0
|
2535 |
iZoneGeneralTmpLink = iZoneGeneralTmpLink->iPrev;
|
sl@0
|
2536 |
|
sl@0
|
2537 |
if (zone->iFlags & KRamZoneFlagGenDefrag)
|
sl@0
|
2538 |
{// This zone has been selected for a general defrag already.
|
sl@0
|
2539 |
__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext1 zone ID 0x%x already defraged",
|
sl@0
|
2540 |
zone->iId));
|
sl@0
|
2541 |
return NULL;
|
sl@0
|
2542 |
}
|
sl@0
|
2543 |
zone->iFlags |= KRamZoneFlagGenDefrag;
|
sl@0
|
2544 |
if (zone->iAllocPages[EPageMovable] || zone->iAllocPages[EPageDiscard])
|
sl@0
|
2545 |
{
|
sl@0
|
2546 |
// A RAM zone that may be cleared by a general defrag has been found.
|
sl@0
|
2547 |
__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext1 zone ID 0x%x", zone->iId));
|
sl@0
|
2548 |
return zone;
|
sl@0
|
2549 |
}
|
sl@0
|
2550 |
}
|
sl@0
|
2551 |
__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext1 reached general target"));
|
sl@0
|
2552 |
return NULL;
|
sl@0
|
2553 |
}
|
sl@0
|
2554 |
|
sl@0
|
2555 |
|
sl@0
|
2556 |
/**
|
sl@0
|
2557 |
Initialise stage 2 of a general defrag operation.
|
sl@0
|
2558 |
|
sl@0
|
2559 |
Stage 2 creates room for fixed pages allocations in the more preferable RAM
|
sl@0
|
2560 |
zones in use by moving pages into the least preferable RAM zones in use.
|
sl@0
|
2561 |
|
sl@0
|
2562 |
@return Pointer to the RAM zone object that may potentially be cleared of
|
sl@0
|
2563 |
movable and discardable pages by the general defrag. This will be
|
sl@0
|
2564 |
NULL if no suitable zone could be found.
|
sl@0
|
2565 |
*/
|
sl@0
|
2566 |
SZone* DRamAllocator::GeneralDefragStart2()
|
sl@0
|
2567 |
{
|
sl@0
|
2568 |
M::RamAllocIsLocked();
|
sl@0
|
2569 |
__NK_ASSERT_DEBUG(iNumZones == 1 || iZoneGeneralPrefLink != NULL);
|
sl@0
|
2570 |
|
sl@0
|
2571 |
|
sl@0
|
2572 |
if (iNumZones == 1)
|
sl@0
|
2573 |
{// On a device with one RAM zone can't do any defrag so return NULL.
|
sl@0
|
2574 |
return NULL;
|
sl@0
|
2575 |
}
|
sl@0
|
2576 |
|
sl@0
|
2577 |
// Clear general defrag flags of each RAM zone to be defraged.
|
sl@0
|
2578 |
SDblQueLink* link = iZoneLeastMovDis;
|
sl@0
|
2579 |
for (; link != &iZonePrefList.iA; link = link->iPrev)
|
sl@0
|
2580 |
{
|
sl@0
|
2581 |
SZone& zone = *_LOFF(link, SZone, iPrefLink);
|
sl@0
|
2582 |
zone.iFlags &= ~(KRamZoneFlagGenDefrag | KRamZoneFlagGenDefragBlock);
|
sl@0
|
2583 |
}
|
sl@0
|
2584 |
|
sl@0
|
2585 |
// Flags cleared so now to start 2nd stage from most preferable RAM zone.
|
sl@0
|
2586 |
iZoneGeneralTmpLink = iZonePrefList.First();
|
sl@0
|
2587 |
iZoneGeneralStage = EGenDefragStage2;
|
sl@0
|
2588 |
return GeneralDefragNextZone2();
|
sl@0
|
2589 |
}
|
sl@0
|
2590 |
|
sl@0
|
2591 |
|
sl@0
|
2592 |
/**
|
sl@0
|
2593 |
Find the next RAM zone that is suitable for this stage of general defrag.
|
sl@0
|
2594 |
This should only be called after a preceeding call to
|
sl@0
|
2595 |
DRamAllocator::GeneralDefragStart2().
|
sl@0
|
2596 |
|
sl@0
|
2597 |
This goes through the RAM zones from the most preferable to the least
|
sl@0
|
2598 |
preferable RAM zone that has movable and/or discardable pages allocated
|
sl@0
|
2599 |
into it. It will return each time it finds a RAM zone with movable and/or
|
sl@0
|
2600 |
discardable pages allocated into it.
|
sl@0
|
2601 |
|
sl@0
|
2602 |
@return Pointer to the RAM zone object that may potentially be cleared of
|
sl@0
|
2603 |
movable and discardable pages by the general defrag. This will be
|
sl@0
|
2604 |
NULL if no suitable zone could be found.
|
sl@0
|
2605 |
*/
|
sl@0
|
2606 |
SZone* DRamAllocator::GeneralDefragNextZone2()
|
sl@0
|
2607 |
{
|
sl@0
|
2608 |
M::RamAllocIsLocked();
|
sl@0
|
2609 |
__NK_ASSERT_DEBUG(iZoneGeneralTmpLink != NULL);
|
sl@0
|
2610 |
__NK_ASSERT_DEBUG(iZoneGeneralStage == EGenDefragStage2);
|
sl@0
|
2611 |
|
sl@0
|
2612 |
|
sl@0
|
2613 |
while (iZoneGeneralTmpLink != iZoneLeastMovDis)
|
sl@0
|
2614 |
{
|
sl@0
|
2615 |
SZone* zone = _LOFF(iZoneGeneralTmpLink, SZone, iPrefLink);
|
sl@0
|
2616 |
|
sl@0
|
2617 |
// Save the RAM zone that is currently less preferable than this one
|
sl@0
|
2618 |
// before any reordering.
|
sl@0
|
2619 |
iZoneGeneralTmpLink = iZoneGeneralTmpLink->iNext;
|
sl@0
|
2620 |
|
sl@0
|
2621 |
if (zone->iFlags & KRamZoneFlagGenDefrag)
|
sl@0
|
2622 |
{// This zone has been selected for a general defrag already.
|
sl@0
|
2623 |
__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext2 zone ID 0x%x already defraged", zone->iId));
|
sl@0
|
2624 |
return NULL;
|
sl@0
|
2625 |
}
|
sl@0
|
2626 |
zone->iFlags |= KRamZoneFlagGenDefrag | KRamZoneFlagGenDefragBlock;
|
sl@0
|
2627 |
if (zone->iAllocPages[EPageMovable] || zone->iAllocPages[EPageDiscard])
|
sl@0
|
2628 |
{// A RAM zone that may be cleared by a general defrag has been found.
|
sl@0
|
2629 |
__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext2 zone ID 0x%x", zone->iId));
|
sl@0
|
2630 |
return zone;
|
sl@0
|
2631 |
}
|
sl@0
|
2632 |
}
|
sl@0
|
2633 |
__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext2 reached general target"));
|
sl@0
|
2634 |
return NULL;
|
sl@0
|
2635 |
}
|
sl@0
|
2636 |
|
sl@0
|
2637 |
/**
|
sl@0
|
2638 |
Inform the allocator that a general defragmentation operation has completed.
|
sl@0
|
2639 |
|
sl@0
|
2640 |
*/
|
sl@0
|
2641 |
void DRamAllocator::GeneralDefragEnd()
|
sl@0
|
2642 |
{
|
sl@0
|
2643 |
#ifdef _DEBUG
|
sl@0
|
2644 |
if (!K::Initialising)
|
sl@0
|
2645 |
{
|
sl@0
|
2646 |
M::RamAllocIsLocked();
|
sl@0
|
2647 |
#ifdef __VERIFY_LEASTMOVDIS
|
sl@0
|
2648 |
VerifyLeastPrefMovDis();
|
sl@0
|
2649 |
#endif
|
sl@0
|
2650 |
}
|
sl@0
|
2651 |
#endif
|
sl@0
|
2652 |
// Reset the general defrag preference link as it is no longer required.
|
sl@0
|
2653 |
iZoneGeneralPrefLink = NULL;
|
sl@0
|
2654 |
iZoneGeneralTmpLink = NULL;
|
sl@0
|
2655 |
}
|
sl@0
|
2656 |
|
sl@0
|
2657 |
|
sl@0
|
2658 |
/**
|
sl@0
|
2659 |
Calculate the number of free pages in all the RAM zones to be in use
|
sl@0
|
2660 |
once the general defragmentation operation has completed.
|
sl@0
|
2661 |
|
sl@0
|
2662 |
@param aType The type of free pages to find in the higher priority zones.
|
sl@0
|
2663 |
@return The number of free pages in the RAM zones intended to be in use
|
sl@0
|
2664 |
after the general defrag operation has completed.
|
sl@0
|
2665 |
*/
|
sl@0
|
2666 |
TUint DRamAllocator::GenDefragFreePages(TZonePageType aType) const
|
sl@0
|
2667 |
{
|
sl@0
|
2668 |
M::RamAllocIsLocked();
|
sl@0
|
2669 |
|
sl@0
|
2670 |
if (iZoneGeneralStage == EGenDefragStage2)
|
sl@0
|
2671 |
{// Second stage of general defrag where don't have to empty the RAM zone.
|
sl@0
|
2672 |
return KMaxTUint;
|
sl@0
|
2673 |
}
|
sl@0
|
2674 |
TUint totalFree = 0;
|
sl@0
|
2675 |
SDblQueLink* link = iZoneGeneralPrefLink;
|
sl@0
|
2676 |
for (; link != &iZonePrefList.iA; link = link->iPrev)
|
sl@0
|
2677 |
{
|
sl@0
|
2678 |
SZone& zone = *_LOFF(link, SZone, iPrefLink);
|
sl@0
|
2679 |
if (NoAllocOfPageType(zone, aType) ||
|
sl@0
|
2680 |
zone.iFlags & KRamZoneFlagGenDefragBlock)
|
sl@0
|
2681 |
{
|
sl@0
|
2682 |
continue;
|
sl@0
|
2683 |
}
|
sl@0
|
2684 |
// This zone has free space for this type of page
|
sl@0
|
2685 |
totalFree += zone.iFreePages;
|
sl@0
|
2686 |
}
|
sl@0
|
2687 |
return totalFree;
|
sl@0
|
2688 |
}
|
sl@0
|
2689 |
|
sl@0
|
2690 |
|
sl@0
|
2691 |
/** Mark the RAM zone as being claimed to stop any further allocations.
|
sl@0
|
2692 |
@param aZone The zone to stop allocations to.
|
sl@0
|
2693 |
|
sl@0
|
2694 |
@pre RamAlloc mutex held.
|
sl@0
|
2695 |
@post RamAlloc mutex held.
|
sl@0
|
2696 |
*/
|
sl@0
|
2697 |
void DRamAllocator::ZoneClaimStart(SZone& aZone)
|
sl@0
|
2698 |
{
|
sl@0
|
2699 |
M::RamAllocIsLocked();
|
sl@0
|
2700 |
__NK_ASSERT_DEBUG(!(aZone.iFlags & KRamZoneFlagClaiming));
|
sl@0
|
2701 |
|
sl@0
|
2702 |
aZone.iFlags |= KRamZoneFlagClaiming;
|
sl@0
|
2703 |
|
sl@0
|
2704 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
2705 |
BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags);
|
sl@0
|
2706 |
#endif
|
sl@0
|
2707 |
}
|
sl@0
|
2708 |
|
sl@0
|
2709 |
|
sl@0
|
2710 |
/** Mark the RAM zone as not being claimed to allow allocations.
|
sl@0
|
2711 |
@param aZone The zone to allow allocations into.
|
sl@0
|
2712 |
|
sl@0
|
2713 |
@pre RamAlloc mutex held.
|
sl@0
|
2714 |
@post RamAlloc mutex held.
|
sl@0
|
2715 |
*/
|
sl@0
|
2716 |
void DRamAllocator::ZoneClaimEnd(SZone& aZone)
|
sl@0
|
2717 |
{
|
sl@0
|
2718 |
M::RamAllocIsLocked();
|
sl@0
|
2719 |
__NK_ASSERT_DEBUG(aZone.iFlags & KRamZoneFlagClaiming);
|
sl@0
|
2720 |
|
sl@0
|
2721 |
aZone.iFlags &= ~KRamZoneFlagClaiming;
|
sl@0
|
2722 |
|
sl@0
|
2723 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
2724 |
BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags);
|
sl@0
|
2725 |
#endif
|
sl@0
|
2726 |
}
|
sl@0
|
2727 |
|
sl@0
|
2728 |
/** Mark the RAM zone so that any allocation or frees from it can be detected.
|
sl@0
|
2729 |
Useful for defragging.
|
sl@0
|
2730 |
@param aZone The zone to mark.
|
sl@0
|
2731 |
@pre RamAlloc mutex held
|
sl@0
|
2732 |
@post RamAlloc mutex held
|
sl@0
|
2733 |
*/
|
sl@0
|
2734 |
void DRamAllocator::ZoneMark(SZone& aZone)
|
sl@0
|
2735 |
{
|
sl@0
|
2736 |
M::RamAllocIsLocked();
|
sl@0
|
2737 |
__NK_ASSERT_DEBUG(!(aZone.iFlags & KRamZoneFlagMark));
|
sl@0
|
2738 |
|
sl@0
|
2739 |
aZone.iFlags |= KRamZoneFlagMark;
|
sl@0
|
2740 |
|
sl@0
|
2741 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
2742 |
BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags);
|
sl@0
|
2743 |
#endif
|
sl@0
|
2744 |
}
|
sl@0
|
2745 |
|
sl@0
|
2746 |
/** Unmark the RAM zone.
|
sl@0
|
2747 |
Useful for defragging.
|
sl@0
|
2748 |
@param aZone The zone to mark.
|
sl@0
|
2749 |
@return ETrue if the RAM zone is inactive, EFalse otherwise.
|
sl@0
|
2750 |
@pre RamAlloc mutex held
|
sl@0
|
2751 |
@post RamAlloc mutex held
|
sl@0
|
2752 |
*/
|
sl@0
|
2753 |
TBool DRamAllocator::ZoneUnmark(SZone& aZone)
|
sl@0
|
2754 |
{
|
sl@0
|
2755 |
M::RamAllocIsLocked();
|
sl@0
|
2756 |
|
sl@0
|
2757 |
TInt r = aZone.iFlags & KRamZoneFlagMark;
|
sl@0
|
2758 |
aZone.iFlags &= ~KRamZoneFlagMark;
|
sl@0
|
2759 |
|
sl@0
|
2760 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
2761 |
BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags);
|
sl@0
|
2762 |
#endif
|
sl@0
|
2763 |
return r;
|
sl@0
|
2764 |
}
|
sl@0
|
2765 |
|
sl@0
|
2766 |
/** Determine whether it is OK to allocate the specified page type
|
sl@0
|
2767 |
to the RAM zone.
|
sl@0
|
2768 |
|
sl@0
|
2769 |
This should be used by all functions that search through the zones when
|
sl@0
|
2770 |
attempting to allocate pages.
|
sl@0
|
2771 |
|
sl@0
|
2772 |
@return ETrue if this page type shouldn't be allocated into the RAM zone,
|
sl@0
|
2773 |
EFalse if it is OK to allocate that page type into the RAM zone.
|
sl@0
|
2774 |
*/
|
sl@0
|
2775 |
TBool DRamAllocator::NoAllocOfPageType(SZone& aZone, TZonePageType aType) const
|
sl@0
|
2776 |
{
|
sl@0
|
2777 |
TUint8 flagMask = 1 << (aType - KPageTypeAllocBase);
|
sl@0
|
2778 |
return (aZone.iFlags & (KRamZoneFlagClaiming|KRamZoneFlagNoAlloc|KRamZoneFlagTmpBlockAlloc)) ||
|
sl@0
|
2779 |
(aZone.iFlags & flagMask);
|
sl@0
|
2780 |
}
|
sl@0
|
2781 |
|
sl@0
|
2782 |
|
sl@0
|
2783 |
/** Updates the flags of the specified RAM zone.
|
sl@0
|
2784 |
|
sl@0
|
2785 |
@param aId The ID of the RAM zone to modify.
|
sl@0
|
2786 |
@param aClearFlags The bit flags to clear.
|
sl@0
|
2787 |
@param aSetFlags The bit flags to set.
|
sl@0
|
2788 |
|
sl@0
|
2789 |
@return KErrNone on success, KErrArgument if the RAM zone of aId not found or
|
sl@0
|
2790 |
aSetMask contains invalid flags.
|
sl@0
|
2791 |
|
sl@0
|
2792 |
@pre RamAlloc mutex held
|
sl@0
|
2793 |
@post RamAlloc mutex held
|
sl@0
|
2794 |
*/
|
sl@0
|
2795 |
TInt DRamAllocator::ModifyZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
|
sl@0
|
2796 |
{
|
sl@0
|
2797 |
M::RamAllocIsLocked();
|
sl@0
|
2798 |
|
sl@0
|
2799 |
SZone* zone = ZoneFromId(aId);
|
sl@0
|
2800 |
if (zone == NULL || (aSetMask & KRamZoneFlagInvalid))
|
sl@0
|
2801 |
{// aId invalid or an invalid flag bit was requested to be set.
|
sl@0
|
2802 |
return KErrArgument;
|
sl@0
|
2803 |
}
|
sl@0
|
2804 |
zone->iFlags &= ~aClearMask;
|
sl@0
|
2805 |
zone->iFlags |= aSetMask;
|
sl@0
|
2806 |
|
sl@0
|
2807 |
__KTRACE_OPT(KMMU, Kern::Printf("Zone %x Flags %x", zone->iId, zone->iFlags));
|
sl@0
|
2808 |
|
sl@0
|
2809 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
2810 |
BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, zone->iId, zone->iFlags);
|
sl@0
|
2811 |
#endif
|
sl@0
|
2812 |
return KErrNone;
|
sl@0
|
2813 |
}
|
sl@0
|
2814 |
|
sl@0
|
2815 |
|
sl@0
|
2816 |
/** Invoke the RAM zone call back function to inform the variant of the RAM zones
|
sl@0
|
2817 |
in use so far by the system.
|
sl@0
|
2818 |
This is designed to only be invoked once during boot in MmuBase::Init2()
|
sl@0
|
2819 |
*/
|
sl@0
|
2820 |
void DRamAllocator::InitialCallback()
|
sl@0
|
2821 |
{
|
sl@0
|
2822 |
__NK_ASSERT_DEBUG(iZoneCallbackInitSent == EFalse);
|
sl@0
|
2823 |
if (iZonePowerFunc)
|
sl@0
|
2824 |
{
|
sl@0
|
2825 |
TInt ret = (*iZonePowerFunc)(ERamZoneOp_Init, NULL, (TUint*)&iZonePwrState);
|
sl@0
|
2826 |
if (ret != KErrNone && ret != KErrNotSupported)
|
sl@0
|
2827 |
{
|
sl@0
|
2828 |
Panic(EZonesCallbackErr);
|
sl@0
|
2829 |
}
|
sl@0
|
2830 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::InitialCallback");
|
sl@0
|
2831 |
}
|
sl@0
|
2832 |
iZoneCallbackInitSent = ETrue;
|
sl@0
|
2833 |
}
|
sl@0
|
2834 |
|
sl@0
|
2835 |
|
sl@0
|
2836 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
2837 |
/**
|
sl@0
|
2838 |
Structure for outputing zone information to BTrace that couldn't be fit into first
|
sl@0
|
2839 |
2 words of the BTraceN call
|
sl@0
|
2840 |
*/
|
sl@0
|
2841 |
struct TRamAllocBtraceZone
|
sl@0
|
2842 |
{
|
sl@0
|
2843 |
TUint32 iId;
|
sl@0
|
2844 |
TUint8 iPref;
|
sl@0
|
2845 |
TUint8 iFlags;
|
sl@0
|
2846 |
TUint16 iReserved;
|
sl@0
|
2847 |
};
|
sl@0
|
2848 |
|
sl@0
|
2849 |
/**
|
sl@0
|
2850 |
This will be invoked when BTrace starts logging BTrace::ERamAllocator category
|
sl@0
|
2851 |
traces.
|
sl@0
|
2852 |
It outputs the zone configuration and the base addresses of any contiguous block
|
sl@0
|
2853 |
of allocated pages.
|
sl@0
|
2854 |
*/
|
sl@0
|
2855 |
void DRamAllocator::SendInitialBtraceLogs(void)
|
sl@0
|
2856 |
{
|
sl@0
|
2857 |
M::RamAllocIsLocked();
|
sl@0
|
2858 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::SendInitialBtraceLogs");
|
sl@0
|
2859 |
|
sl@0
|
2860 |
// Output the zone information
|
sl@0
|
2861 |
TRamAllocBtraceZone bZone;
|
sl@0
|
2862 |
BTrace4(BTrace::ERamAllocator, BTrace::ERamAllocZoneCount, iNumZones);
|
sl@0
|
2863 |
const SZone* zone = iZones;
|
sl@0
|
2864 |
const SZone* const endZone = iZones + iNumZones;
|
sl@0
|
2865 |
for (; zone < endZone; zone++)
|
sl@0
|
2866 |
{
|
sl@0
|
2867 |
bZone.iId = zone->iId;
|
sl@0
|
2868 |
bZone.iPref = zone->iPref;
|
sl@0
|
2869 |
bZone.iFlags = zone->iFlags;
|
sl@0
|
2870 |
BTraceN(BTrace::ERamAllocator, BTrace::ERamAllocZoneConfig, zone->iPhysPages,
|
sl@0
|
2871 |
zone->iPhysBase, &bZone, sizeof(TRamAllocBtraceZone));
|
sl@0
|
2872 |
}
|
sl@0
|
2873 |
|
sl@0
|
2874 |
// Search through zones and output each contiguous region of allocated pages
|
sl@0
|
2875 |
for (zone = iZones; zone < endZone; zone++)
|
sl@0
|
2876 |
{
|
sl@0
|
2877 |
if (zone->iFreePages != zone->iPhysPages)
|
sl@0
|
2878 |
{
|
sl@0
|
2879 |
TInt pageCount = 0;
|
sl@0
|
2880 |
TInt totalPages = 0;
|
sl@0
|
2881 |
TUint32 runStart = 0;
|
sl@0
|
2882 |
while ((TUint)totalPages != zone->iPhysPages - zone->iFreePages)
|
sl@0
|
2883 |
{
|
sl@0
|
2884 |
// find set of contiguous pages that have been allocated
|
sl@0
|
2885 |
// runStart will be set to first page of allocated run if one found
|
sl@0
|
2886 |
for (;runStart < zone->iPhysPages && zone->iBma[KBmaAllPages]->NotAllocated(runStart,1); runStart++);
|
sl@0
|
2887 |
|
sl@0
|
2888 |
// find last allocated page of this run
|
sl@0
|
2889 |
TUint32 runEnd = runStart + 1;
|
sl@0
|
2890 |
for (;runEnd < zone->iPhysPages && zone->iBma[KBmaAllPages]->NotFree(runEnd,1); runEnd++);
|
sl@0
|
2891 |
|
sl@0
|
2892 |
pageCount = runEnd - runStart;
|
sl@0
|
2893 |
if (pageCount > 0)
|
sl@0
|
2894 |
{// have a run of allocated pages so output BTrace
|
sl@0
|
2895 |
TPhysAddr baseAddr = (runStart << KPageShift) + zone->iPhysBase;
|
sl@0
|
2896 |
__KTRACE_OPT(KMMU2, Kern::Printf("offset %x physBase %x pages %x baseAddr %08x",runStart, zone->iPhysBase, pageCount, baseAddr));
|
sl@0
|
2897 |
BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocBootAllocation, pageCount, baseAddr);
|
sl@0
|
2898 |
runStart += pageCount;
|
sl@0
|
2899 |
totalPages += pageCount;
|
sl@0
|
2900 |
}
|
sl@0
|
2901 |
}
|
sl@0
|
2902 |
}
|
sl@0
|
2903 |
}
|
sl@0
|
2904 |
BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocBootAllocationEnd);
|
sl@0
|
2905 |
}
|
sl@0
|
2906 |
#endif // BTRACE_RAM_ALLOCATOR
|
sl@0
|
2907 |
|
sl@0
|
2908 |
TInt DRamAllocator::ClaimPhysicalRam(TPhysAddr aBase, TInt aSize)
|
sl@0
|
2909 |
{
|
sl@0
|
2910 |
TInt ret = SetPhysicalRamState(aBase,aSize,EFalse, EPageFixed);
|
sl@0
|
2911 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
2912 |
if (ret == KErrNone)
|
sl@0
|
2913 |
{
|
sl@0
|
2914 |
BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocClaimRam, aSize, aBase);
|
sl@0
|
2915 |
}
|
sl@0
|
2916 |
#endif
|
sl@0
|
2917 |
return ret;
|
sl@0
|
2918 |
}
|
sl@0
|
2919 |
|
sl@0
|
2920 |
TInt DRamAllocator::FreePhysicalRam(TPhysAddr aBase, TInt aSize)
|
sl@0
|
2921 |
{
|
sl@0
|
2922 |
TInt ret = SetPhysicalRamState(aBase,aSize,ETrue, EPageFixed);
|
sl@0
|
2923 |
#ifdef BTRACE_RAM_ALLOCATOR
|
sl@0
|
2924 |
if (ret == KErrNone)
|
sl@0
|
2925 |
{
|
sl@0
|
2926 |
BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocFreePhysical, aSize, aBase);
|
sl@0
|
2927 |
}
|
sl@0
|
2928 |
#endif
|
sl@0
|
2929 |
return ret;
|
sl@0
|
2930 |
}
|
sl@0
|
2931 |
|
sl@0
|
2932 |
|
sl@0
|
2933 |
TInt DRamAllocator::FreeRamInBytes()
|
sl@0
|
2934 |
{
|
sl@0
|
2935 |
return iTotalFreeRamPages<<KPageShift;
|
sl@0
|
2936 |
}
|
sl@0
|
2937 |
|
sl@0
|
2938 |
TUint DRamAllocator::FreeRamInPages()
|
sl@0
|
2939 |
{
|
sl@0
|
2940 |
return iTotalFreeRamPages;
|
sl@0
|
2941 |
}
|
sl@0
|
2942 |
|
sl@0
|
2943 |
TUint DRamAllocator::TotalPhysicalRamPages()
|
sl@0
|
2944 |
{
|
sl@0
|
2945 |
return iTotalRamPages;
|
sl@0
|
2946 |
}
|
sl@0
|
2947 |
|
sl@0
|
2948 |
#ifdef __VERIFY_LEASTMOVDIS
|
sl@0
|
2949 |
void DRamAllocator::VerifyLeastPrefMovDis()
|
sl@0
|
2950 |
{
|
sl@0
|
2951 |
// Shouldn't have any movable or discardable pages in any RAM
|
sl@0
|
2952 |
// zone less preferable than iZoneLeastMovDis
|
sl@0
|
2953 |
SDblQueLink* tmpLink = iZoneLeastMovDis->iNext;
|
sl@0
|
2954 |
while (tmpLink != &iZonePrefList.iA)
|
sl@0
|
2955 |
{
|
sl@0
|
2956 |
SZone& zone = *_LOFF(tmpLink, SZone, iPrefLink);
|
sl@0
|
2957 |
if (zone.iAllocPages[EPageMovable] != 0 ||
|
sl@0
|
2958 |
zone.iAllocPages[EPageDiscard] != 0)
|
sl@0
|
2959 |
{
|
sl@0
|
2960 |
DebugDump();
|
sl@0
|
2961 |
__NK_ASSERT_DEBUG(0);
|
sl@0
|
2962 |
}
|
sl@0
|
2963 |
tmpLink = tmpLink->iNext;
|
sl@0
|
2964 |
}
|
sl@0
|
2965 |
}
|
sl@0
|
2966 |
#endif
|