sl@0
|
1 |
// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
// e32\memmodel\epoc\multiple\x86\xmmu.cpp
|
sl@0
|
15 |
//
|
sl@0
|
16 |
//
|
sl@0
|
17 |
|
sl@0
|
18 |
#include <x86_mem.h>
|
sl@0
|
19 |
#include <mmubase.inl>
|
sl@0
|
20 |
#include <ramcache.h>
|
sl@0
|
21 |
#include "execs.h"
|
sl@0
|
22 |
#include <defrag.h>
|
sl@0
|
23 |
|
sl@0
|
24 |
extern "C" void DoTotalInvalidateTLB();
|
sl@0
|
25 |
|
sl@0
|
26 |
// Constants for X86 MMU
|
sl@0
|
27 |
const TUint32 KPdePtePresent=0x01;
|
sl@0
|
28 |
const TUint32 KPdePteWrite=0x02;
|
sl@0
|
29 |
const TUint32 KPdePteUser=0x04;
|
sl@0
|
30 |
const TUint32 KPdePteWriteThrough=0x08;
|
sl@0
|
31 |
const TUint32 KPdePteUncached=0x10;
|
sl@0
|
32 |
const TUint32 KPdePteAccessed=0x20;
|
sl@0
|
33 |
const TUint32 KPdePteDirty=0x40;
|
sl@0
|
34 |
const TUint32 KPdeLargePage=0x80; // Pentium and above, not 486
|
sl@0
|
35 |
const TUint32 KPdePteGlobal=0x100; // P6 and above, not 486 or Pentium
|
sl@0
|
36 |
const TUint32 KPdePtePhysAddrMask=0xfffff000u;
|
sl@0
|
37 |
const TUint32 KPdeLargePagePhysAddrMask=0xffc00000u; // Pentium and above, not 486
|
sl@0
|
38 |
|
sl@0
|
39 |
const TPde KPdPdePerm=KPdePtePresent|KPdePteWrite;
|
sl@0
|
40 |
const TPte KPdPtePerm=KPdePtePresent|KPdePteWrite;
|
sl@0
|
41 |
const TPde KPtPdePerm=KPdePtePresent|KPdePteWrite;
|
sl@0
|
42 |
const TPte KPtPtePerm=KPdePtePresent|KPdePteWrite;
|
sl@0
|
43 |
const TPde KPtInfoPdePerm=KPdePtePresent|KPdePteWrite;
|
sl@0
|
44 |
const TPte KPtInfoPtePerm=KPdePtePresent|KPdePteWrite;
|
sl@0
|
45 |
const TPde KRomPdePerm=KPdePtePresent|KPdePteWrite|KPdePteUser;
|
sl@0
|
46 |
const TPte KRomPtePerm=KPdePtePresent|KPdePteUser;
|
sl@0
|
47 |
const TPde KShadowPdePerm=KPdePtePresent|KPdePteWrite|KPdePteUser;
|
sl@0
|
48 |
const TPte KShadowPtePerm=KPdePtePresent|KPdePteWrite|KPdePteUser; // unfortunately there's no RWRO
|
sl@0
|
49 |
|
sl@0
|
50 |
// Permissions for each chunk type
|
sl@0
|
51 |
|
sl@0
|
52 |
const TPde KStandardPtePerm=KPdePtePresent|KPdePteWrite|KPdePteUser;
|
sl@0
|
53 |
const TPte KPdePermNONO=KPdePtePresent|KPdePteWrite|KPdePteUser;
|
sl@0
|
54 |
const TPte KPdePermRONO=KPdePtePresent;
|
sl@0
|
55 |
const TPte KPdePermRORO=KPdePtePresent|KPdePteUser;
|
sl@0
|
56 |
const TPte KPdePermRWNO=KPdePtePresent|KPdePteWrite;
|
sl@0
|
57 |
const TPte KPdePermRWRW=KPdePtePresent|KPdePteWrite|KPdePteUser;
|
sl@0
|
58 |
|
sl@0
|
59 |
LOCAL_D const TPte ChunkPtePermissions[ENumChunkTypes] =
|
sl@0
|
60 |
{
|
sl@0
|
61 |
KStandardPtePerm|KPdePteGlobal, // EKernelData
|
sl@0
|
62 |
KStandardPtePerm|KPdePteGlobal, // EKernelStack
|
sl@0
|
63 |
KPdePermRWNO|KPdePteGlobal, // EKernelCode - loading
|
sl@0
|
64 |
KPdePermRWNO, // EDll (used for global code) - loading
|
sl@0
|
65 |
KPdePermRORO, // EUserCode
|
sl@0
|
66 |
KStandardPtePerm, // ERamDrive
|
sl@0
|
67 |
KStandardPtePerm, // EUserData
|
sl@0
|
68 |
KStandardPtePerm, // EDllData
|
sl@0
|
69 |
KStandardPtePerm, // EUserSelfModCode
|
sl@0
|
70 |
KStandardPtePerm, // ESharedKernelSingle
|
sl@0
|
71 |
KStandardPtePerm, // ESharedKernelMultiple
|
sl@0
|
72 |
KStandardPtePerm, // ESharedIo
|
sl@0
|
73 |
KStandardPtePerm|KPdePteGlobal, // ESharedKernelMirror
|
sl@0
|
74 |
KStandardPtePerm|KPdePteGlobal, // EKernelMessage
|
sl@0
|
75 |
};
|
sl@0
|
76 |
|
sl@0
|
77 |
LOCAL_D const TPde ChunkPdePermissions[ENumChunkTypes] =
|
sl@0
|
78 |
{
|
sl@0
|
79 |
KPdePermRWNO, // EKernelData
|
sl@0
|
80 |
KPdePermRWNO, // EKernelStack
|
sl@0
|
81 |
KPdePermRWNO, // EKernelCode
|
sl@0
|
82 |
KPdePermRWRW, // EDll
|
sl@0
|
83 |
KPdePermRWRW, // EUserCode
|
sl@0
|
84 |
KPdePermRWRW, // ERamDrive
|
sl@0
|
85 |
KPdePermRWRW, // EUserData
|
sl@0
|
86 |
KPdePermRWRW, // EDllData
|
sl@0
|
87 |
KPdePermRWRW, // EUserSelfModCode
|
sl@0
|
88 |
KPdePermRWRW, // ESharedKernelSingle
|
sl@0
|
89 |
KPdePermRWRW, // ESharedKernelMultiple
|
sl@0
|
90 |
KPdePermRWRW, // ESharedIo
|
sl@0
|
91 |
KPdePermRWNO, // ESharedKernelMirror
|
sl@0
|
92 |
KPdePermRWNO, // EKernelMessage
|
sl@0
|
93 |
};
|
sl@0
|
94 |
|
sl@0
|
95 |
#if defined(KMMU)
|
sl@0
|
96 |
extern "C" void __DebugMsgFlushTLB()
|
sl@0
|
97 |
{
|
sl@0
|
98 |
__KTRACE_OPT(KMMU,Kern::Printf("FlushTLB"));
|
sl@0
|
99 |
}
|
sl@0
|
100 |
|
sl@0
|
101 |
extern "C" void __DebugMsgLocalFlushTLB()
|
sl@0
|
102 |
{
|
sl@0
|
103 |
__KTRACE_OPT(KMMU,Kern::Printf("FlushTLB"));
|
sl@0
|
104 |
}
|
sl@0
|
105 |
|
sl@0
|
106 |
extern "C" void __DebugMsgTotalFlushTLB()
|
sl@0
|
107 |
{
|
sl@0
|
108 |
__KTRACE_OPT(KMMU,Kern::Printf("TotalFlushTLB"));
|
sl@0
|
109 |
}
|
sl@0
|
110 |
|
sl@0
|
111 |
extern "C" void __DebugMsgINVLPG(int a)
|
sl@0
|
112 |
{
|
sl@0
|
113 |
__KTRACE_OPT(KMMU,Kern::Printf("INVLPG(%08x)",a));
|
sl@0
|
114 |
}
|
sl@0
|
115 |
#endif
|
sl@0
|
116 |
|
sl@0
|
117 |
// Inline functions for simple transformations
|
sl@0
|
118 |
inline TLinAddr PageTableLinAddr(TInt aId)
|
sl@0
|
119 |
{
|
sl@0
|
120 |
return (KPageTableBase+(aId<<KPageTableShift));
|
sl@0
|
121 |
}
|
sl@0
|
122 |
|
sl@0
|
123 |
inline TPte* PageTable(TInt aId)
|
sl@0
|
124 |
{
|
sl@0
|
125 |
return (TPte*)(KPageTableBase+(aId<<KPageTableShift));
|
sl@0
|
126 |
}
|
sl@0
|
127 |
|
sl@0
|
128 |
inline TLinAddr PageDirectoryLinAddr(TInt aOsAsid)
|
sl@0
|
129 |
{
|
sl@0
|
130 |
return (KPageDirectoryBase+(aOsAsid<<KPageTableShift));
|
sl@0
|
131 |
}
|
sl@0
|
132 |
|
sl@0
|
133 |
extern "C" {
|
sl@0
|
134 |
|
sl@0
|
135 |
void __fastcall DoInvalidateTLBForPage(TLinAddr /*aLinAddr*/);
|
sl@0
|
136 |
void DoInvalidateTLB();
|
sl@0
|
137 |
void DoLocalInvalidateTLB();
|
sl@0
|
138 |
|
sl@0
|
139 |
}
|
sl@0
|
140 |
|
sl@0
|
141 |
|
sl@0
|
142 |
#ifdef __SMP__
|
sl@0
|
143 |
|
sl@0
|
144 |
TSpinLock ShadowSpinLock(TSpinLock::EOrderGenericPreHigh0); // Used when stopping other CPUs
|
sl@0
|
145 |
|
sl@0
|
146 |
class TTLBIPI : public TGenericIPI
|
sl@0
|
147 |
{
|
sl@0
|
148 |
public:
|
sl@0
|
149 |
TTLBIPI();
|
sl@0
|
150 |
|
sl@0
|
151 |
static void InvalidateForPagesIsr(TGenericIPI*);
|
sl@0
|
152 |
static void LocalInvalidateIsr(TGenericIPI*);
|
sl@0
|
153 |
static void TotalInvalidateIsr(TGenericIPI*);
|
sl@0
|
154 |
static void InvalidateIsr(TGenericIPI*);
|
sl@0
|
155 |
static void WaitAndInvalidateIsr(TGenericIPI*);
|
sl@0
|
156 |
void AddAddress(TLinAddr aAddr);
|
sl@0
|
157 |
void InvalidateList();
|
sl@0
|
158 |
public:
|
sl@0
|
159 |
volatile TInt iFlag;
|
sl@0
|
160 |
TInt iCount;
|
sl@0
|
161 |
TLinAddr iAddr[KMaxPages];
|
sl@0
|
162 |
};
|
sl@0
|
163 |
|
sl@0
|
164 |
TTLBIPI::TTLBIPI()
|
sl@0
|
165 |
: iFlag(0), iCount(0)
|
sl@0
|
166 |
{
|
sl@0
|
167 |
}
|
sl@0
|
168 |
|
sl@0
|
169 |
void TTLBIPI::LocalInvalidateIsr(TGenericIPI*)
|
sl@0
|
170 |
{
|
sl@0
|
171 |
__KTRACE_OPT(KMMU2,Kern::Printf("TLBLocInv"));
|
sl@0
|
172 |
DoLocalInvalidateTLB();
|
sl@0
|
173 |
}
|
sl@0
|
174 |
|
sl@0
|
175 |
void TTLBIPI::TotalInvalidateIsr(TGenericIPI*)
|
sl@0
|
176 |
{
|
sl@0
|
177 |
__KTRACE_OPT(KMMU2,Kern::Printf("TLBTotInv"));
|
sl@0
|
178 |
DoTotalInvalidateTLB();
|
sl@0
|
179 |
}
|
sl@0
|
180 |
|
sl@0
|
181 |
void TTLBIPI::InvalidateIsr(TGenericIPI*)
|
sl@0
|
182 |
{
|
sl@0
|
183 |
__KTRACE_OPT(KMMU2,Kern::Printf("TLBInv"));
|
sl@0
|
184 |
DoInvalidateTLB();
|
sl@0
|
185 |
}
|
sl@0
|
186 |
|
sl@0
|
187 |
void TTLBIPI::WaitAndInvalidateIsr(TGenericIPI* aTLBIPI)
|
sl@0
|
188 |
{
|
sl@0
|
189 |
__KTRACE_OPT(KMMU2,Kern::Printf("TLBWtInv"));
|
sl@0
|
190 |
TTLBIPI& a = *(TTLBIPI*)aTLBIPI;
|
sl@0
|
191 |
while (!a.iFlag)
|
sl@0
|
192 |
{}
|
sl@0
|
193 |
if (a.iCount == 1)
|
sl@0
|
194 |
DoInvalidateTLBForPage(a.iAddr[0]);
|
sl@0
|
195 |
else
|
sl@0
|
196 |
DoInvalidateTLB();
|
sl@0
|
197 |
}
|
sl@0
|
198 |
|
sl@0
|
199 |
void TTLBIPI::InvalidateForPagesIsr(TGenericIPI* aTLBIPI)
|
sl@0
|
200 |
{
|
sl@0
|
201 |
TTLBIPI& a = *(TTLBIPI*)aTLBIPI;
|
sl@0
|
202 |
TInt i;
|
sl@0
|
203 |
for (i=0; i<a.iCount; ++i)
|
sl@0
|
204 |
{
|
sl@0
|
205 |
__KTRACE_OPT(KMMU2,Kern::Printf("TLBInv %08x", a.iAddr[i]));
|
sl@0
|
206 |
DoInvalidateTLBForPage(a.iAddr[i]);
|
sl@0
|
207 |
}
|
sl@0
|
208 |
}
|
sl@0
|
209 |
|
sl@0
|
210 |
void TTLBIPI::AddAddress(TLinAddr aAddr)
|
sl@0
|
211 |
{
|
sl@0
|
212 |
iAddr[iCount] = aAddr;
|
sl@0
|
213 |
if (++iCount == KMaxPages)
|
sl@0
|
214 |
InvalidateList();
|
sl@0
|
215 |
}
|
sl@0
|
216 |
|
sl@0
|
217 |
void TTLBIPI::InvalidateList()
|
sl@0
|
218 |
{
|
sl@0
|
219 |
NKern::Lock();
|
sl@0
|
220 |
InvalidateForPagesIsr(this);
|
sl@0
|
221 |
QueueAllOther(&InvalidateForPagesIsr);
|
sl@0
|
222 |
NKern::Unlock();
|
sl@0
|
223 |
WaitCompletion();
|
sl@0
|
224 |
iCount = 0;
|
sl@0
|
225 |
}
|
sl@0
|
226 |
|
sl@0
|
227 |
void LocalInvalidateTLB()
|
sl@0
|
228 |
{
|
sl@0
|
229 |
TTLBIPI ipi;
|
sl@0
|
230 |
NKern::Lock();
|
sl@0
|
231 |
DoLocalInvalidateTLB();
|
sl@0
|
232 |
ipi.QueueAllOther(&TTLBIPI::LocalInvalidateIsr);
|
sl@0
|
233 |
NKern::Unlock();
|
sl@0
|
234 |
ipi.WaitCompletion();
|
sl@0
|
235 |
}
|
sl@0
|
236 |
|
sl@0
|
237 |
void TotalInvalidateTLB()
|
sl@0
|
238 |
{
|
sl@0
|
239 |
TTLBIPI ipi;
|
sl@0
|
240 |
NKern::Lock();
|
sl@0
|
241 |
DoTotalInvalidateTLB();
|
sl@0
|
242 |
ipi.QueueAllOther(&TTLBIPI::TotalInvalidateIsr);
|
sl@0
|
243 |
NKern::Unlock();
|
sl@0
|
244 |
ipi.WaitCompletion();
|
sl@0
|
245 |
}
|
sl@0
|
246 |
|
sl@0
|
247 |
void InvalidateTLB()
|
sl@0
|
248 |
{
|
sl@0
|
249 |
TTLBIPI ipi;
|
sl@0
|
250 |
NKern::Lock();
|
sl@0
|
251 |
DoInvalidateTLB();
|
sl@0
|
252 |
ipi.QueueAllOther(&TTLBIPI::InvalidateIsr);
|
sl@0
|
253 |
NKern::Unlock();
|
sl@0
|
254 |
ipi.WaitCompletion();
|
sl@0
|
255 |
}
|
sl@0
|
256 |
|
sl@0
|
257 |
void InvalidateTLBForPage(TLinAddr aAddr)
|
sl@0
|
258 |
{
|
sl@0
|
259 |
TTLBIPI ipi;
|
sl@0
|
260 |
ipi.AddAddress(aAddr);
|
sl@0
|
261 |
ipi.InvalidateList();
|
sl@0
|
262 |
}
|
sl@0
|
263 |
|
sl@0
|
264 |
#else
|
sl@0
|
265 |
#define InvalidateTLBForPage(a) DoInvalidateTLBForPage(a)
|
sl@0
|
266 |
#define LocalInvalidateTLB() DoLocalInvalidateTLB()
|
sl@0
|
267 |
#define TotalInvalidateTLB() TotalInvalidateTLB()
|
sl@0
|
268 |
#define InvalidateTLB() DoInvalidateTLB()
|
sl@0
|
269 |
#endif
|
sl@0
|
270 |
|
sl@0
|
271 |
|
sl@0
|
272 |
TPte* SafePageTableFromPde(TPde aPde)
|
sl@0
|
273 |
{
|
sl@0
|
274 |
if (aPde&KPdePtePresent)
|
sl@0
|
275 |
{
|
sl@0
|
276 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde);
|
sl@0
|
277 |
if (pi)
|
sl@0
|
278 |
{
|
sl@0
|
279 |
TInt id=pi->Offset(); // assumes page table size = page size
|
sl@0
|
280 |
return PageTable(id);
|
sl@0
|
281 |
}
|
sl@0
|
282 |
}
|
sl@0
|
283 |
return 0;
|
sl@0
|
284 |
}
|
sl@0
|
285 |
|
sl@0
|
286 |
TPte* SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0)
|
sl@0
|
287 |
{
|
sl@0
|
288 |
TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
|
sl@0
|
289 |
TPte* pt = SafePageTableFromPde(pde);
|
sl@0
|
290 |
if(pt)
|
sl@0
|
291 |
pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
|
sl@0
|
292 |
return pt;
|
sl@0
|
293 |
}
|
sl@0
|
294 |
|
sl@0
|
295 |
TPte* PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0)
|
sl@0
|
296 |
{
|
sl@0
|
297 |
TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
|
sl@0
|
298 |
SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
|
sl@0
|
299 |
TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
|
sl@0
|
300 |
TPte* pt = PageTable(id);
|
sl@0
|
301 |
pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
|
sl@0
|
302 |
return pt;
|
sl@0
|
303 |
}
|
sl@0
|
304 |
|
sl@0
|
305 |
TInt X86Mmu::LinearToPhysical(TLinAddr aAddr, TInt aSize, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList, TInt aOsAsid)
|
sl@0
|
306 |
{
|
sl@0
|
307 |
TPhysAddr physStart = LinearToPhysical(aAddr,aOsAsid);
|
sl@0
|
308 |
|
sl@0
|
309 |
TInt pageShift = iPageShift;
|
sl@0
|
310 |
TUint32 page = aAddr>>pageShift<<pageShift;
|
sl@0
|
311 |
TUint32 lastPage = (aAddr+aSize-1)>>pageShift<<pageShift;
|
sl@0
|
312 |
TUint32* pageList = aPhysicalPageList;
|
sl@0
|
313 |
TUint32 nextPhys = LinearToPhysical(page,aOsAsid);
|
sl@0
|
314 |
TUint32 pageSize = 1<<pageShift;
|
sl@0
|
315 |
while(page<=lastPage)
|
sl@0
|
316 |
{
|
sl@0
|
317 |
TPhysAddr phys = LinearToPhysical(page,aOsAsid);
|
sl@0
|
318 |
if(pageList)
|
sl@0
|
319 |
*pageList++ = phys;
|
sl@0
|
320 |
if(phys!=nextPhys)
|
sl@0
|
321 |
nextPhys = KPhysAddrInvalid;
|
sl@0
|
322 |
else
|
sl@0
|
323 |
nextPhys += pageSize;
|
sl@0
|
324 |
page += pageSize;
|
sl@0
|
325 |
}
|
sl@0
|
326 |
if(nextPhys==KPhysAddrInvalid)
|
sl@0
|
327 |
{
|
sl@0
|
328 |
// Memory is discontiguous...
|
sl@0
|
329 |
aPhysicalAddress = KPhysAddrInvalid;
|
sl@0
|
330 |
return 1;
|
sl@0
|
331 |
}
|
sl@0
|
332 |
else
|
sl@0
|
333 |
{
|
sl@0
|
334 |
// Memory is contiguous...
|
sl@0
|
335 |
aPhysicalAddress = physStart;
|
sl@0
|
336 |
return KErrNone;
|
sl@0
|
337 |
}
|
sl@0
|
338 |
return KErrNone;
|
sl@0
|
339 |
}
|
sl@0
|
340 |
|
sl@0
|
341 |
TPhysAddr X86Mmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
|
sl@0
|
342 |
//
|
sl@0
|
343 |
// Find the physical address corresponding to a given linear address in a specified OS
|
sl@0
|
344 |
// address space. Call with system locked.
|
sl@0
|
345 |
//
|
sl@0
|
346 |
{
|
sl@0
|
347 |
__KTRACE_OPT(KMMU2,Kern::Printf("X86Mmu::LinearToPhysical(%08x,%d)",aLinAddr,aOsAsid));
|
sl@0
|
348 |
TInt pdeIndex=aLinAddr>>KChunkShift;
|
sl@0
|
349 |
TPde pde=PageDirectory(aOsAsid)[pdeIndex];
|
sl@0
|
350 |
TPhysAddr pa=KPhysAddrInvalid;
|
sl@0
|
351 |
if (pde & KPdePtePresent)
|
sl@0
|
352 |
{
|
sl@0
|
353 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
|
sl@0
|
354 |
if (pi)
|
sl@0
|
355 |
{
|
sl@0
|
356 |
TInt id=pi->Offset(); // assumes page table size = page size
|
sl@0
|
357 |
TPte* pPte=PageTable(id);
|
sl@0
|
358 |
TPte pte=pPte[(aLinAddr&KChunkMask)>>KPageShift];
|
sl@0
|
359 |
if (pte & KPdePtePresent)
|
sl@0
|
360 |
{
|
sl@0
|
361 |
pa=(pte&KPdePtePhysAddrMask)+(aLinAddr&KPageMask);
|
sl@0
|
362 |
__KTRACE_OPT(KMMU2,Kern::Printf("Mapped with page table - returning %08x",pa));
|
sl@0
|
363 |
}
|
sl@0
|
364 |
}
|
sl@0
|
365 |
}
|
sl@0
|
366 |
return pa;
|
sl@0
|
367 |
}
|
sl@0
|
368 |
|
sl@0
|
369 |
|
sl@0
|
370 |
TInt X86Mmu::PreparePagesForDMA(TLinAddr /*aLinAddr*/, TInt /*aSize*/, TInt /*aOsAsid*/, TPhysAddr* /*aPhysicalPageList*/)
|
sl@0
|
371 |
{
|
sl@0
|
372 |
return KErrNotSupported;
|
sl@0
|
373 |
}
|
sl@0
|
374 |
|
sl@0
|
375 |
TInt X86Mmu::ReleasePagesFromDMA(TPhysAddr* /*aPhysicalPageList*/, TInt /*aPageCount*/)
|
sl@0
|
376 |
{
|
sl@0
|
377 |
return KErrNotSupported;
|
sl@0
|
378 |
}
|
sl@0
|
379 |
|
sl@0
|
380 |
static const TInt PermissionLookup[8]=
|
sl@0
|
381 |
{
|
sl@0
|
382 |
0,
|
sl@0
|
383 |
EMapAttrReadSup|EMapAttrExecSup,
|
sl@0
|
384 |
0,
|
sl@0
|
385 |
EMapAttrWriteSup|EMapAttrReadSup|EMapAttrExecSup,
|
sl@0
|
386 |
0,
|
sl@0
|
387 |
EMapAttrReadUser|EMapAttrExecUser,
|
sl@0
|
388 |
0,
|
sl@0
|
389 |
EMapAttrWriteUser|EMapAttrReadUser|EMapAttrExecUser
|
sl@0
|
390 |
};
|
sl@0
|
391 |
|
sl@0
|
392 |
TInt X86Mmu::PageTableId(TLinAddr aAddr, TInt aOsAsid)
|
sl@0
|
393 |
{
|
sl@0
|
394 |
TInt id=-1;
|
sl@0
|
395 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::PageTableId(%08x,%d)",aAddr,aOsAsid));
|
sl@0
|
396 |
TInt pdeIndex=aAddr>>KChunkShift;
|
sl@0
|
397 |
TPde pde=PageDirectory(aOsAsid)[pdeIndex];
|
sl@0
|
398 |
if (pde & KPdePtePresent)
|
sl@0
|
399 |
{
|
sl@0
|
400 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
|
sl@0
|
401 |
if (pi)
|
sl@0
|
402 |
id=pi->Offset(); // assumes page table size = page size
|
sl@0
|
403 |
}
|
sl@0
|
404 |
__KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
|
sl@0
|
405 |
return id;
|
sl@0
|
406 |
}
|
sl@0
|
407 |
|
sl@0
|
408 |
// Used only during boot for recovery of RAM drive
|
sl@0
|
409 |
TInt X86Mmu::BootPageTableId(TLinAddr aAddr, TPhysAddr& aPtPhys)
|
sl@0
|
410 |
{
|
sl@0
|
411 |
TInt id=KErrNotFound;
|
sl@0
|
412 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:BootPageTableId(%08x,&)",aAddr));
|
sl@0
|
413 |
TPde* kpd=(TPde*)KPageDirectoryBase; // kernel page directory
|
sl@0
|
414 |
TInt pdeIndex=aAddr>>KChunkShift;
|
sl@0
|
415 |
TPde pde = kpd[pdeIndex];
|
sl@0
|
416 |
if (pde & KPdePtePresent)
|
sl@0
|
417 |
{
|
sl@0
|
418 |
aPtPhys = pde & KPdePtePhysAddrMask;
|
sl@0
|
419 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
|
sl@0
|
420 |
if (pi)
|
sl@0
|
421 |
{
|
sl@0
|
422 |
SPageInfo::TType type = pi->Type();
|
sl@0
|
423 |
if (type == SPageInfo::EPageTable)
|
sl@0
|
424 |
id=pi->Offset(); // assumes page table size = page size
|
sl@0
|
425 |
else if (type == SPageInfo::EUnused)
|
sl@0
|
426 |
id = KErrUnknown;
|
sl@0
|
427 |
}
|
sl@0
|
428 |
}
|
sl@0
|
429 |
__KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
|
sl@0
|
430 |
return id;
|
sl@0
|
431 |
}
|
sl@0
|
432 |
|
sl@0
|
433 |
TBool X86Mmu::PteIsPresent(TPte aPte)
|
sl@0
|
434 |
{
|
sl@0
|
435 |
return aPte & KPdePtePresent;
|
sl@0
|
436 |
}
|
sl@0
|
437 |
|
sl@0
|
438 |
TPhysAddr X86Mmu::PtePhysAddr(TPte aPte, TInt /*aPteIndex*/)
|
sl@0
|
439 |
{
|
sl@0
|
440 |
return aPte & KPdePtePhysAddrMask;
|
sl@0
|
441 |
}
|
sl@0
|
442 |
|
sl@0
|
443 |
TPhysAddr X86Mmu::PdePhysAddr(TLinAddr aAddr)
|
sl@0
|
444 |
{
|
sl@0
|
445 |
TPde* kpd = (TPde*)KPageDirectoryBase; // kernel page directory
|
sl@0
|
446 |
TPde pde = kpd[aAddr>>KChunkShift];
|
sl@0
|
447 |
if (pde & (KPdePtePresent|KPdeLargePage) == (KPdePtePresent|KPdeLargePage))
|
sl@0
|
448 |
return pde & KPdeLargePagePhysAddrMask;
|
sl@0
|
449 |
return KPhysAddrInvalid;
|
sl@0
|
450 |
}
|
sl@0
|
451 |
|
sl@0
|
452 |
void X86Mmu::Init1()
|
sl@0
|
453 |
{
|
sl@0
|
454 |
__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("X86Mmu::Init1"));
|
sl@0
|
455 |
|
sl@0
|
456 |
TUint pge = TheSuperPage().iCpuId & EX86Feat_PGE;
|
sl@0
|
457 |
iPteGlobal = pge ? KPdePteGlobal : 0;
|
sl@0
|
458 |
X86_UseGlobalPTEs = pge!=0;
|
sl@0
|
459 |
|
sl@0
|
460 |
// MmuBase data
|
sl@0
|
461 |
iPageSize=KPageSize;
|
sl@0
|
462 |
iPageMask=KPageMask;
|
sl@0
|
463 |
iPageShift=KPageShift;
|
sl@0
|
464 |
iChunkSize=KChunkSize;
|
sl@0
|
465 |
iChunkMask=KChunkMask;
|
sl@0
|
466 |
iChunkShift=KChunkShift;
|
sl@0
|
467 |
iPageTableSize=KPageTableSize;
|
sl@0
|
468 |
iPageTableMask=KPageTableMask;
|
sl@0
|
469 |
iPageTableShift=KPageTableShift;
|
sl@0
|
470 |
iPtClusterSize=KPtClusterSize;
|
sl@0
|
471 |
iPtClusterMask=KPtClusterMask;
|
sl@0
|
472 |
iPtClusterShift=KPtClusterShift;
|
sl@0
|
473 |
iPtBlockSize=KPtBlockSize;
|
sl@0
|
474 |
iPtBlockMask=KPtBlockMask;
|
sl@0
|
475 |
iPtBlockShift=KPtBlockShift;
|
sl@0
|
476 |
iPtGroupSize=KChunkSize/KPageTableSize;
|
sl@0
|
477 |
iPtGroupMask=iPtGroupSize-1;
|
sl@0
|
478 |
iPtGroupShift=iChunkShift-iPageTableShift;
|
sl@0
|
479 |
//TInt* iPtBlockCount; // dynamically allocated - Init2
|
sl@0
|
480 |
//TInt* iPtGroupCount; // dynamically allocated - Init2
|
sl@0
|
481 |
iPtInfo=(SPageTableInfo*)KPageTableInfoBase;
|
sl@0
|
482 |
iPageTableLinBase=KPageTableBase;
|
sl@0
|
483 |
//iRamPageAllocator; // dynamically allocated - Init2
|
sl@0
|
484 |
//iAsyncFreeList; // dynamically allocated - Init2
|
sl@0
|
485 |
//iPageTableAllocator; // dynamically allocated - Init2
|
sl@0
|
486 |
//iPageTableLinearAllocator;// dynamically allocated - Init2
|
sl@0
|
487 |
iPtInfoPtePerm=KPtInfoPtePerm|iPteGlobal;
|
sl@0
|
488 |
iPtPtePerm=KPtPtePerm|iPteGlobal;
|
sl@0
|
489 |
iPtPdePerm=KPtPdePerm;
|
sl@0
|
490 |
iUserCodeLoadPtePerm=KPdePermRWNO;
|
sl@0
|
491 |
iKernelCodePtePerm=KPdePermRONO|iPteGlobal;
|
sl@0
|
492 |
iTempAddr=KTempAddr;
|
sl@0
|
493 |
iSecondTempAddr=KSecondTempAddr;
|
sl@0
|
494 |
|
sl@0
|
495 |
TUint pse = TheSuperPage().iCpuId & EX86Feat_PSE;
|
sl@0
|
496 |
iMapSizes = pse ? KPageSize|KChunkSize : KPageSize;
|
sl@0
|
497 |
|
sl@0
|
498 |
iDecommitThreshold=0; // no cache consistency issues on decommit
|
sl@0
|
499 |
iRomLinearBase = ::RomHeaderAddress;
|
sl@0
|
500 |
iRomLinearEnd = KRomLinearEnd;
|
sl@0
|
501 |
iShadowPtePerm = KShadowPtePerm;
|
sl@0
|
502 |
iShadowPdePerm = KShadowPdePerm;
|
sl@0
|
503 |
|
sl@0
|
504 |
// Mmu data
|
sl@0
|
505 |
TInt total_ram=TheSuperPage().iTotalRamSize;
|
sl@0
|
506 |
|
sl@0
|
507 |
iNumOsAsids=1024;
|
sl@0
|
508 |
iNumGlobalPageDirs=1;
|
sl@0
|
509 |
//iOsAsidAllocator; // dynamically allocated - Init2
|
sl@0
|
510 |
iGlobalPdSize=KPageTableSize;
|
sl@0
|
511 |
iGlobalPdShift=KPageTableShift;
|
sl@0
|
512 |
iLocalPdSize=0;
|
sl@0
|
513 |
iLocalPdShift=0;
|
sl@0
|
514 |
iAsidGroupSize=KChunkSize/KPageTableSize;
|
sl@0
|
515 |
iAsidGroupMask=iAsidGroupSize-1;
|
sl@0
|
516 |
iAsidGroupShift=iChunkShift-iGlobalPdShift;
|
sl@0
|
517 |
iAliasSize=KPageSize;
|
sl@0
|
518 |
iAliasMask=KPageMask;
|
sl@0
|
519 |
iAliasShift=KPageShift;
|
sl@0
|
520 |
iUserLocalBase=KUserLocalDataBase;
|
sl@0
|
521 |
iUserSharedBase=KUserSharedDataBase;
|
sl@0
|
522 |
iAsidInfo=(TUint32*)KAsidInfoBase;
|
sl@0
|
523 |
iPdeBase=KPageDirectoryBase;
|
sl@0
|
524 |
iPdPtePerm=KPdPtePerm|iPteGlobal;
|
sl@0
|
525 |
iPdPdePerm=KPdPdePerm;
|
sl@0
|
526 |
iRamDriveMask=0x00f00000;
|
sl@0
|
527 |
iGlobalCodePtePerm=KPdePermRORO|iPteGlobal;
|
sl@0
|
528 |
|
sl@0
|
529 |
iMaxDllDataSize=Min(total_ram/2, 0x08000000); // phys RAM/2 up to 128Mb
|
sl@0
|
530 |
iMaxDllDataSize=(iMaxDllDataSize+iChunkMask)&~iChunkMask; // round up to chunk size
|
sl@0
|
531 |
iMaxUserCodeSize=Min(total_ram, 0x10000000); // phys RAM up to 256Mb
|
sl@0
|
532 |
iMaxUserCodeSize=(iMaxUserCodeSize+iChunkMask)&~iChunkMask; // round up to chunk size
|
sl@0
|
533 |
iUserLocalEnd=iUserSharedBase-iMaxDllDataSize;
|
sl@0
|
534 |
iUserSharedEnd=KUserSharedDataEnd-iMaxUserCodeSize;
|
sl@0
|
535 |
iDllDataBase=iUserLocalEnd;
|
sl@0
|
536 |
iUserCodeBase=iUserSharedEnd;
|
sl@0
|
537 |
__KTRACE_OPT(KMMU,Kern::Printf("ULB %08x ULE %08x USB %08x USE %08x",iUserLocalBase,iUserLocalEnd,
|
sl@0
|
538 |
iUserSharedBase,iUserSharedEnd));
|
sl@0
|
539 |
__KTRACE_OPT(KMMU,Kern::Printf("DDB %08x UCB %08x",iDllDataBase,iUserCodeBase));
|
sl@0
|
540 |
|
sl@0
|
541 |
// X86Mmu data
|
sl@0
|
542 |
|
sl@0
|
543 |
// other
|
sl@0
|
544 |
PP::MaxUserThreadStack=0x14000; // 80K - STDLIB asks for 64K for PosixServer!!!!
|
sl@0
|
545 |
PP::UserThreadStackGuard=0x2000; // 8K
|
sl@0
|
546 |
PP::MaxStackSpacePerProcess=0x200000; // 2Mb
|
sl@0
|
547 |
K::SupervisorThreadStackSize=0x1000; // 4K
|
sl@0
|
548 |
PP::SupervisorThreadStackGuard=0x1000; // 4K
|
sl@0
|
549 |
K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr;
|
sl@0
|
550 |
PP::RamDriveStartAddress=KRamDriveStartAddress;
|
sl@0
|
551 |
PP::RamDriveRange=KRamDriveMaxSize;
|
sl@0
|
552 |
PP::RamDriveMaxSize=KRamDriveMaxSize; // may be reduced later
|
sl@0
|
553 |
K::MemModelAttributes=EMemModelTypeMultiple|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt|
|
sl@0
|
554 |
EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSvKernProt|
|
sl@0
|
555 |
EMemModelAttrIPCKernProt|EMemModelAttrRamCodeProt;
|
sl@0
|
556 |
|
sl@0
|
557 |
#ifdef __SMP__
|
sl@0
|
558 |
ApTrampolinePage = KApTrampolinePageLin;
|
sl@0
|
559 |
|
sl@0
|
560 |
TInt i;
|
sl@0
|
561 |
for (i=0; i<KMaxCpus; ++i)
|
sl@0
|
562 |
{
|
sl@0
|
563 |
TSubScheduler& ss = TheSubSchedulers[i];
|
sl@0
|
564 |
TLinAddr a = KIPCAlias + (i<<KChunkShift);
|
sl@0
|
565 |
ss.i_AliasLinAddr = (TAny*)a;
|
sl@0
|
566 |
ss.i_AliasPdePtr = (TAny*)(KPageDirectoryBase + (a>>KChunkShift)*sizeof(TPde));
|
sl@0
|
567 |
}
|
sl@0
|
568 |
#endif
|
sl@0
|
569 |
|
sl@0
|
570 |
Mmu::Init1();
|
sl@0
|
571 |
}
|
sl@0
|
572 |
|
sl@0
|
573 |
void X86Mmu::DoInit2()
|
sl@0
|
574 |
{
|
sl@0
|
575 |
__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("X86Mmu::DoInit2"));
|
sl@0
|
576 |
iTempPte=PageTable(PageTableId(iTempAddr,0))+((iTempAddr&KChunkMask)>>KPageShift);
|
sl@0
|
577 |
iSecondTempPte=PageTable(PageTableId(iSecondTempAddr,0))+((iSecondTempAddr&KChunkMask)>>KPageShift);
|
sl@0
|
578 |
__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iTempAddr=%08x, iTempPte=%08x, iSecondTempAddr=%08x, iSecondTempPte=%08x",
|
sl@0
|
579 |
iTempAddr, iTempPte, iSecondTempAddr, iSecondTempPte));
|
sl@0
|
580 |
CreateKernelSection(KKernelSectionEnd, iAliasShift);
|
sl@0
|
581 |
CreateUserGlobalSection(KUserGlobalDataBase, KUserGlobalDataEnd);
|
sl@0
|
582 |
iUserHwChunkAllocator=THwChunkAddressAllocator::New(0, iUserGlobalSection);
|
sl@0
|
583 |
__ASSERT_ALWAYS(iUserHwChunkAllocator, Panic(ECreateUserGlobalSectionFailed));
|
sl@0
|
584 |
Mmu::DoInit2();
|
sl@0
|
585 |
}
|
sl@0
|
586 |
|
sl@0
|
587 |
#ifndef __MMU_MACHINE_CODED__
|
sl@0
|
588 |
void X86Mmu::MapRamPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, const TPhysAddr* aPageList, TInt aNumPages, TPte aPtePerm)
|
sl@0
|
589 |
//
|
sl@0
|
590 |
// Map a list of physical RAM pages into a specified page table with specified PTE permissions.
|
sl@0
|
591 |
// Update the page information array.
|
sl@0
|
592 |
// Call this with the system locked.
|
sl@0
|
593 |
//
|
sl@0
|
594 |
{
|
sl@0
|
595 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::MapRamPages() id=%d type=%d ptr=%08x off=%08x n=%d perm=%08x",
|
sl@0
|
596 |
aId, aType, aPtr, aOffset, aNumPages, aPtePerm));
|
sl@0
|
597 |
|
sl@0
|
598 |
SPageTableInfo& ptinfo=iPtInfo[aId];
|
sl@0
|
599 |
ptinfo.iCount+=aNumPages;
|
sl@0
|
600 |
aOffset>>=KPageShift;
|
sl@0
|
601 |
TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table
|
sl@0
|
602 |
TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
|
sl@0
|
603 |
while(aNumPages--)
|
sl@0
|
604 |
{
|
sl@0
|
605 |
TPhysAddr pa = *aPageList++;
|
sl@0
|
606 |
*pPte++ = pa | aPtePerm; // insert PTE
|
sl@0
|
607 |
__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
|
sl@0
|
608 |
if (aType!=SPageInfo::EInvalid)
|
sl@0
|
609 |
{
|
sl@0
|
610 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
|
sl@0
|
611 |
if(pi)
|
sl@0
|
612 |
{
|
sl@0
|
613 |
pi->Set(aType,aPtr,aOffset);
|
sl@0
|
614 |
__KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
|
sl@0
|
615 |
++aOffset; // increment offset for next page
|
sl@0
|
616 |
}
|
sl@0
|
617 |
}
|
sl@0
|
618 |
}
|
sl@0
|
619 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
620 |
}
|
sl@0
|
621 |
|
sl@0
|
622 |
void X86Mmu::MapPhysicalPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, TPhysAddr aPhysAddr, TInt aNumPages, TPte aPtePerm)
|
sl@0
|
623 |
//
|
sl@0
|
624 |
// Map consecutive physical pages into a specified page table with specified PTE permissions.
|
sl@0
|
625 |
// Update the page information array if RAM pages are being mapped.
|
sl@0
|
626 |
// Call this with the system locked.
|
sl@0
|
627 |
//
|
sl@0
|
628 |
{
|
sl@0
|
629 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::MapPhysicalPages() id=%d type=%d ptr=%08x off=%08x phys=%08x n=%d perm=%08x",
|
sl@0
|
630 |
aId, aType, aPtr, aOffset, aPhysAddr, aNumPages, aPtePerm));
|
sl@0
|
631 |
SPageTableInfo& ptinfo=iPtInfo[aId];
|
sl@0
|
632 |
ptinfo.iCount+=aNumPages;
|
sl@0
|
633 |
aOffset>>=KPageShift;
|
sl@0
|
634 |
TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table
|
sl@0
|
635 |
TPte* pPte=(TPte*)(PageTableLinAddr(aId))+ptOffset; // address of first PTE
|
sl@0
|
636 |
SPageInfo* pi;
|
sl@0
|
637 |
if(aType==SPageInfo::EInvalid)
|
sl@0
|
638 |
pi = NULL;
|
sl@0
|
639 |
else
|
sl@0
|
640 |
pi = SPageInfo::SafeFromPhysAddr(aPhysAddr);
|
sl@0
|
641 |
while(aNumPages--)
|
sl@0
|
642 |
{
|
sl@0
|
643 |
*pPte++ = aPhysAddr|aPtePerm; // insert PTE
|
sl@0
|
644 |
aPhysAddr+=KPageSize;
|
sl@0
|
645 |
__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
|
sl@0
|
646 |
if (pi)
|
sl@0
|
647 |
{
|
sl@0
|
648 |
pi->Set(aType,aPtr,aOffset);
|
sl@0
|
649 |
++aOffset; // increment offset for next page
|
sl@0
|
650 |
__KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
|
sl@0
|
651 |
++pi;
|
sl@0
|
652 |
}
|
sl@0
|
653 |
}
|
sl@0
|
654 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
655 |
}
|
sl@0
|
656 |
|
sl@0
|
657 |
void X86Mmu::MapVirtual(TInt /*aId*/, TInt /*aNumPages*/)
|
sl@0
|
658 |
//
|
sl@0
|
659 |
// Used in the implementation of demand paging - not supported on x86
|
sl@0
|
660 |
//
|
sl@0
|
661 |
{
|
sl@0
|
662 |
MM::Panic(MM::EOperationNotSupported);
|
sl@0
|
663 |
}
|
sl@0
|
664 |
|
sl@0
|
665 |
void X86Mmu::RemapPage(TInt /*aId*/, TUint32 /*aAddr*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/, TPte /*aPtePerm*/, DProcess* /*aProcess*/)
|
sl@0
|
666 |
{
|
sl@0
|
667 |
MM::Panic(MM::EOperationNotSupported);
|
sl@0
|
668 |
}
|
sl@0
|
669 |
|
sl@0
|
670 |
void X86Mmu::RemapPageByAsid(TBitMapAllocator* /*aOsAsids*/, TLinAddr /*aLinAddr*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/, TPte /*aPtePerm*/)
|
sl@0
|
671 |
{
|
sl@0
|
672 |
MM::Panic(MM::EOperationNotSupported);
|
sl@0
|
673 |
}
|
sl@0
|
674 |
|
sl@0
|
675 |
TInt X86Mmu::UnmapPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess*)
|
sl@0
|
676 |
//
|
sl@0
|
677 |
// Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
|
sl@0
|
678 |
// pages into aPageList, and count of unmapped pages into aNumPtes.
|
sl@0
|
679 |
// Return number of pages still mapped using this page table.
|
sl@0
|
680 |
// Call this with the system locked.
|
sl@0
|
681 |
{
|
sl@0
|
682 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::UnmapPages() id=%d off=%08x n=%d pl=%08x set-free=%08x",aId,aAddr,aNumPages,aPageList,aSetPagesFree));
|
sl@0
|
683 |
TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table
|
sl@0
|
684 |
TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
|
sl@0
|
685 |
TInt np=0;
|
sl@0
|
686 |
TInt nf=0;
|
sl@0
|
687 |
#ifdef __SMP__
|
sl@0
|
688 |
TTLBIPI ipi;
|
sl@0
|
689 |
#endif
|
sl@0
|
690 |
while(aNumPages--)
|
sl@0
|
691 |
{
|
sl@0
|
692 |
TPte pte=*pPte; // get original PTE
|
sl@0
|
693 |
*pPte++=0; // clear PTE
|
sl@0
|
694 |
if (pte & KPdePtePresent)
|
sl@0
|
695 |
{
|
sl@0
|
696 |
#ifdef __SMP__
|
sl@0
|
697 |
ipi.AddAddress(aAddr);
|
sl@0
|
698 |
#else
|
sl@0
|
699 |
InvalidateTLBForPage(aAddr); // flush any corresponding TLB entry
|
sl@0
|
700 |
#endif
|
sl@0
|
701 |
++np; // count unmapped pages
|
sl@0
|
702 |
TPhysAddr pa=pte & KPdePtePhysAddrMask; // physical address of unmapped page
|
sl@0
|
703 |
if (aSetPagesFree)
|
sl@0
|
704 |
{
|
sl@0
|
705 |
SPageInfo* pi = SPageInfo::FromPhysAddr(pa);
|
sl@0
|
706 |
if(iRamCache->PageUnmapped(pi))
|
sl@0
|
707 |
{
|
sl@0
|
708 |
pi->SetUnused(); // mark page as unused
|
sl@0
|
709 |
if (pi->LockCount()==0)
|
sl@0
|
710 |
{
|
sl@0
|
711 |
*aPageList++=pa; // store in page list
|
sl@0
|
712 |
++nf; // count free pages
|
sl@0
|
713 |
}
|
sl@0
|
714 |
}
|
sl@0
|
715 |
}
|
sl@0
|
716 |
else
|
sl@0
|
717 |
*aPageList++=pa; // store in page list
|
sl@0
|
718 |
}
|
sl@0
|
719 |
aAddr+=KPageSize;
|
sl@0
|
720 |
}
|
sl@0
|
721 |
#ifdef __SMP__
|
sl@0
|
722 |
ipi.InvalidateList();
|
sl@0
|
723 |
#endif
|
sl@0
|
724 |
aNumPtes=np;
|
sl@0
|
725 |
aNumFree=nf;
|
sl@0
|
726 |
SPageTableInfo& ptinfo=iPtInfo[aId];
|
sl@0
|
727 |
TInt r=(ptinfo.iCount-=np);
|
sl@0
|
728 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
729 |
__KTRACE_OPT(KMMU,Kern::Printf("Pages recovered %d Pages remaining %d NF=%d",np,r,nf));
|
sl@0
|
730 |
return r; // return number of pages remaining in this page table
|
sl@0
|
731 |
}
|
sl@0
|
732 |
|
sl@0
|
733 |
TInt X86Mmu::UnmapUnownedPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TLinAddr* aLAPageList, TInt& aNumPtes, TInt& aNumFree, DProcess*)
|
sl@0
|
734 |
//
|
sl@0
|
735 |
// Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
|
sl@0
|
736 |
// pages into aPageList, and count of unmapped pages into aNumPtes.
|
sl@0
|
737 |
// Return number of pages still mapped using this page table.
|
sl@0
|
738 |
// Call this with the system locked.
|
sl@0
|
739 |
{
|
sl@0
|
740 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::UnmapPages() id=%d off=%08x n=%d pl=%08x",aId,aAddr,aNumPages,aPageList));
|
sl@0
|
741 |
TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table
|
sl@0
|
742 |
TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
|
sl@0
|
743 |
TInt np=0;
|
sl@0
|
744 |
TInt nf=0;
|
sl@0
|
745 |
#ifdef __SMP__
|
sl@0
|
746 |
TTLBIPI ipi;
|
sl@0
|
747 |
#endif
|
sl@0
|
748 |
while(aNumPages--)
|
sl@0
|
749 |
{
|
sl@0
|
750 |
TPte pte=*pPte; // get original PTE
|
sl@0
|
751 |
*pPte++=0; // clear PTE
|
sl@0
|
752 |
if (pte & KPdePtePresent)
|
sl@0
|
753 |
{
|
sl@0
|
754 |
#ifdef __SMP__
|
sl@0
|
755 |
ipi.AddAddress(aAddr);
|
sl@0
|
756 |
#else
|
sl@0
|
757 |
InvalidateTLBForPage(aAddr); // flush any corresponding TLB entry
|
sl@0
|
758 |
#endif
|
sl@0
|
759 |
++np; // count unmapped pages
|
sl@0
|
760 |
TPhysAddr pa=pte & KPdePtePhysAddrMask; // physical address of unmapped page
|
sl@0
|
761 |
|
sl@0
|
762 |
nf++;
|
sl@0
|
763 |
*aPageList++=pa; // store in page list
|
sl@0
|
764 |
*aLAPageList++ = aAddr;
|
sl@0
|
765 |
}
|
sl@0
|
766 |
aAddr+=KPageSize;
|
sl@0
|
767 |
}
|
sl@0
|
768 |
#ifdef __SMP__
|
sl@0
|
769 |
ipi.InvalidateList();
|
sl@0
|
770 |
#endif
|
sl@0
|
771 |
aNumPtes=np;
|
sl@0
|
772 |
aNumFree=nf;
|
sl@0
|
773 |
SPageTableInfo& ptinfo=iPtInfo[aId];
|
sl@0
|
774 |
TInt r=(ptinfo.iCount-=np);
|
sl@0
|
775 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
776 |
__KTRACE_OPT(KMMU,Kern::Printf("Pages recovered %d Pages remaining %d NF=%d",np,r,nf));
|
sl@0
|
777 |
return r; // return number of pages remaining in this page table
|
sl@0
|
778 |
}
|
sl@0
|
779 |
|
sl@0
|
780 |
TInt X86Mmu::UnmapVirtual(TInt /*aId*/, TUint32 /*aAddr*/, TInt /*aNumPages*/, TPhysAddr* /*aPageList*/, TBool /*aSetPagesFree*/, TInt& /*aNumPtes*/, TInt& /*aNumFree*/, DProcess* /*aProcess*/)
|
sl@0
|
781 |
//
|
sl@0
|
782 |
// Used in the implementation of demand paging - not supported on x86
|
sl@0
|
783 |
//
|
sl@0
|
784 |
{
|
sl@0
|
785 |
MM::Panic(MM::EOperationNotSupported);
|
sl@0
|
786 |
return 0; // keep compiler happy
|
sl@0
|
787 |
}
|
sl@0
|
788 |
|
sl@0
|
789 |
TInt X86Mmu::UnmapUnownedVirtual(TInt /*aId*/, TUint32 /*aAddr*/, TInt /*aNumPages*/, TPhysAddr* /*aPageList*/, TLinAddr* /*aLALinAddr*/, TInt& /*aNumPtes*/, TInt& /*aNumFree*/, DProcess* /*aProcess*/)
|
sl@0
|
790 |
//
|
sl@0
|
791 |
// Used in the implementation of demand paging - not supported on x86
|
sl@0
|
792 |
//
|
sl@0
|
793 |
{
|
sl@0
|
794 |
MM::Panic(MM::EOperationNotSupported);
|
sl@0
|
795 |
return 0; // keep compiler happy
|
sl@0
|
796 |
}
|
sl@0
|
797 |
|
sl@0
|
798 |
void X86Mmu::DoAssignPageTable(TInt aId, TLinAddr aAddr, TPde aPdePerm, const TAny* aOsAsids)
|
sl@0
|
799 |
//
|
sl@0
|
800 |
// Assign an allocated page table to map a given linear address with specified permissions.
|
sl@0
|
801 |
// This should be called with the system unlocked and the MMU mutex held.
|
sl@0
|
802 |
//
|
sl@0
|
803 |
{
|
sl@0
|
804 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::DoAssignPageTable %d to %08x perm %08x asid %08x",aId,aAddr,aPdePerm,aOsAsids));
|
sl@0
|
805 |
TLinAddr ptLin=PageTableLinAddr(aId);
|
sl@0
|
806 |
TPhysAddr ptPhys=LinearToPhysical(ptLin,0);
|
sl@0
|
807 |
TInt pdeIndex=TInt(aAddr>>KChunkShift);
|
sl@0
|
808 |
TInt os_asid=(TInt)aOsAsids;
|
sl@0
|
809 |
if (TUint32(os_asid)<TUint32(iNumOsAsids))
|
sl@0
|
810 |
{
|
sl@0
|
811 |
// single OS ASID
|
sl@0
|
812 |
TPde* pageDir=PageDirectory(os_asid);
|
sl@0
|
813 |
NKern::LockSystem();
|
sl@0
|
814 |
pageDir[pdeIndex]=ptPhys|aPdePerm;
|
sl@0
|
815 |
NKern::UnlockSystem();
|
sl@0
|
816 |
__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
|
sl@0
|
817 |
}
|
sl@0
|
818 |
else
|
sl@0
|
819 |
{
|
sl@0
|
820 |
// selection of OS ASIDs or all OS ASIDs
|
sl@0
|
821 |
const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
|
sl@0
|
822 |
if (os_asid==-1)
|
sl@0
|
823 |
pB=iOsAsidAllocator; // 0's in positions which exist
|
sl@0
|
824 |
TInt num_os_asids=pB->iSize-pB->iAvail;
|
sl@0
|
825 |
for (os_asid=0; num_os_asids; ++os_asid)
|
sl@0
|
826 |
{
|
sl@0
|
827 |
if (pB->NotAllocated(os_asid,1))
|
sl@0
|
828 |
continue; // os_asid is not needed
|
sl@0
|
829 |
TPde* pageDir=PageDirectory(os_asid);
|
sl@0
|
830 |
NKern::LockSystem();
|
sl@0
|
831 |
pageDir[pdeIndex]=ptPhys|aPdePerm;
|
sl@0
|
832 |
NKern::UnlockSystem();
|
sl@0
|
833 |
__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
|
sl@0
|
834 |
--num_os_asids;
|
sl@0
|
835 |
}
|
sl@0
|
836 |
}
|
sl@0
|
837 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
838 |
}
|
sl@0
|
839 |
|
sl@0
|
840 |
void X86Mmu::RemapPageTableSingle(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, TInt aOsAsid)
|
sl@0
|
841 |
{
|
sl@0
|
842 |
MM::Panic(MM::EOperationNotSupported);
|
sl@0
|
843 |
}
|
sl@0
|
844 |
|
sl@0
|
845 |
void X86Mmu::RemapPageTableGlobal(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr)
|
sl@0
|
846 |
{
|
sl@0
|
847 |
MM::Panic(MM::EOperationNotSupported);
|
sl@0
|
848 |
}
|
sl@0
|
849 |
|
sl@0
|
850 |
void X86Mmu::RemapPageTableMultiple(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, const TAny* aOsAsids)
|
sl@0
|
851 |
{
|
sl@0
|
852 |
MM::Panic(MM::EOperationNotSupported);
|
sl@0
|
853 |
}
|
sl@0
|
854 |
|
sl@0
|
855 |
void X86Mmu::RemapPageTableAliases(TPhysAddr aOld, TPhysAddr aNew)
|
sl@0
|
856 |
{
|
sl@0
|
857 |
MM::Panic(MM::EOperationNotSupported);
|
sl@0
|
858 |
}
|
sl@0
|
859 |
|
sl@0
|
860 |
void X86Mmu::DoUnassignPageTable(TLinAddr aAddr, const TAny* aOsAsids)
|
sl@0
|
861 |
//
|
sl@0
|
862 |
// Unassign a now-empty page table currently mapping the specified linear address.
|
sl@0
|
863 |
// We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped.
|
sl@0
|
864 |
// This should be called with the system unlocked and the MMU mutex held.
|
sl@0
|
865 |
//
|
sl@0
|
866 |
{
|
sl@0
|
867 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::DoUnassignPageTable at %08x a=%08x",aAddr,aOsAsids));
|
sl@0
|
868 |
TInt pdeIndex=TInt(aAddr>>KChunkShift);
|
sl@0
|
869 |
TInt os_asid=(TInt)aOsAsids;
|
sl@0
|
870 |
TUint pde=0;
|
sl@0
|
871 |
|
sl@0
|
872 |
SDblQue checkedList;
|
sl@0
|
873 |
SDblQueLink* next;
|
sl@0
|
874 |
|
sl@0
|
875 |
if (TUint32(os_asid)<TUint32(iNumOsAsids))
|
sl@0
|
876 |
{
|
sl@0
|
877 |
// single OS ASID
|
sl@0
|
878 |
TPde* pageDir=PageDirectory(os_asid);
|
sl@0
|
879 |
NKern::LockSystem();
|
sl@0
|
880 |
pde = pageDir[pdeIndex];
|
sl@0
|
881 |
pageDir[pdeIndex]=0;
|
sl@0
|
882 |
__KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
|
sl@0
|
883 |
|
sl@0
|
884 |
// remove any aliases of the page table...
|
sl@0
|
885 |
TUint ptId = pde>>KPageTableShift;
|
sl@0
|
886 |
while(!iAliasList.IsEmpty())
|
sl@0
|
887 |
{
|
sl@0
|
888 |
next = iAliasList.First()->Deque();
|
sl@0
|
889 |
checkedList.Add(next);
|
sl@0
|
890 |
DMemModelThread* thread = _LOFF(next, DMemModelThread, iAliasLink);
|
sl@0
|
891 |
if(thread->iAliasOsAsid==os_asid && (thread->iAliasPde>>KPageTableShift)==ptId)
|
sl@0
|
892 |
{
|
sl@0
|
893 |
// the page table is being aliased by the thread, so remove it...
|
sl@0
|
894 |
thread->iAliasPde = 0;
|
sl@0
|
895 |
}
|
sl@0
|
896 |
NKern::FlashSystem();
|
sl@0
|
897 |
}
|
sl@0
|
898 |
}
|
sl@0
|
899 |
else
|
sl@0
|
900 |
{
|
sl@0
|
901 |
// selection of OS ASIDs or all OS ASIDs
|
sl@0
|
902 |
const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
|
sl@0
|
903 |
if (os_asid==-1)
|
sl@0
|
904 |
pB=iOsAsidAllocator; // 0's in positions which exist
|
sl@0
|
905 |
TInt num_os_asids=pB->iSize-pB->iAvail;
|
sl@0
|
906 |
for (os_asid=0; num_os_asids; ++os_asid)
|
sl@0
|
907 |
{
|
sl@0
|
908 |
if (pB->NotAllocated(os_asid,1))
|
sl@0
|
909 |
continue; // os_asid is not needed
|
sl@0
|
910 |
TPde* pageDir=PageDirectory(os_asid);
|
sl@0
|
911 |
NKern::LockSystem();
|
sl@0
|
912 |
pde = pageDir[pdeIndex];
|
sl@0
|
913 |
pageDir[pdeIndex]=0;
|
sl@0
|
914 |
NKern::UnlockSystem();
|
sl@0
|
915 |
__KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
|
sl@0
|
916 |
--num_os_asids;
|
sl@0
|
917 |
}
|
sl@0
|
918 |
|
sl@0
|
919 |
// remove any aliases of the page table...
|
sl@0
|
920 |
TUint ptId = pde>>KPageTableShift;
|
sl@0
|
921 |
NKern::LockSystem();
|
sl@0
|
922 |
while(!iAliasList.IsEmpty())
|
sl@0
|
923 |
{
|
sl@0
|
924 |
next = iAliasList.First()->Deque();
|
sl@0
|
925 |
checkedList.Add(next);
|
sl@0
|
926 |
DMemModelThread* thread = _LOFF(next, DMemModelThread, iAliasLink);
|
sl@0
|
927 |
if((thread->iAliasPde>>KPageTableShift)==ptId && !pB->NotAllocated(thread->iAliasOsAsid,1))
|
sl@0
|
928 |
{
|
sl@0
|
929 |
// the page table is being aliased by the thread, so remove it...
|
sl@0
|
930 |
thread->iAliasPde = 0;
|
sl@0
|
931 |
}
|
sl@0
|
932 |
NKern::FlashSystem();
|
sl@0
|
933 |
}
|
sl@0
|
934 |
}
|
sl@0
|
935 |
|
sl@0
|
936 |
// copy checkedList back to iAliasList
|
sl@0
|
937 |
iAliasList.MoveFrom(&checkedList);
|
sl@0
|
938 |
|
sl@0
|
939 |
NKern::UnlockSystem();
|
sl@0
|
940 |
|
sl@0
|
941 |
__DRAIN_WRITE_BUFFER; // because page tables have been updated
|
sl@0
|
942 |
}
|
sl@0
|
943 |
#endif
|
sl@0
|
944 |
|
sl@0
|
945 |
// Initialise page table at physical address aXptPhys to be used as page table aXptId
|
sl@0
|
946 |
// to expand the virtual address range used for mapping page tables. Map the page table
|
sl@0
|
947 |
// at aPhysAddr as page table aId using the expanded range.
|
sl@0
|
948 |
// Assign aXptPhys to kernel's Page Directory.
|
sl@0
|
949 |
// Called with system unlocked and MMU mutex held.
|
sl@0
|
950 |
void X86Mmu::BootstrapPageTable(TInt aXptId, TPhysAddr aXptPhys, TInt aId, TPhysAddr aPhysAddr)
|
sl@0
|
951 |
{
|
sl@0
|
952 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::BootstrapPageTable xptid=%04x, xptphys=%08x, id=%04x, phys=%08x",
|
sl@0
|
953 |
aXptId, aXptPhys, aId, aPhysAddr));
|
sl@0
|
954 |
|
sl@0
|
955 |
// put in a temporary mapping for aXptPhys
|
sl@0
|
956 |
*iTempPte = aXptPhys | KPtPtePerm | iPteGlobal;
|
sl@0
|
957 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
958 |
|
sl@0
|
959 |
// clear XPT
|
sl@0
|
960 |
TPte* xpt=(TPte*)iTempAddr;
|
sl@0
|
961 |
memclr(xpt, KPageSize);
|
sl@0
|
962 |
|
sl@0
|
963 |
// map XPT
|
sl@0
|
964 |
xpt[aXptId & KPagesInPDEMask] = aXptPhys | KPtPtePerm | iPteGlobal;
|
sl@0
|
965 |
|
sl@0
|
966 |
// map other page table
|
sl@0
|
967 |
xpt[aId & KPagesInPDEMask] = aPhysAddr | KPtPtePerm | iPteGlobal;
|
sl@0
|
968 |
|
sl@0
|
969 |
// remove temporary mapping
|
sl@0
|
970 |
iTempPte=0;
|
sl@0
|
971 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
972 |
InvalidateTLBForPage(iTempAddr);
|
sl@0
|
973 |
|
sl@0
|
974 |
// initialise PtInfo...
|
sl@0
|
975 |
TLinAddr xptAddr = PageTableLinAddr(aXptId);
|
sl@0
|
976 |
iPtInfo[aXptId].SetGlobal(xptAddr>>KChunkShift);
|
sl@0
|
977 |
|
sl@0
|
978 |
// map xpt...
|
sl@0
|
979 |
TInt pdeIndex=TInt(xptAddr>>KChunkShift);
|
sl@0
|
980 |
TPde* pageDir=PageDirectory(0);
|
sl@0
|
981 |
NKern::LockSystem();
|
sl@0
|
982 |
pageDir[pdeIndex]=aXptPhys|KPtPdePerm;
|
sl@0
|
983 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
984 |
NKern::UnlockSystem();
|
sl@0
|
985 |
}
|
sl@0
|
986 |
|
sl@0
|
987 |
void X86Mmu::FixupXPageTable(TInt aId, TLinAddr aTempMap, TPhysAddr aOld, TPhysAddr aNew)
|
sl@0
|
988 |
{
|
sl@0
|
989 |
MM::Panic(MM::EOperationNotSupported);
|
sl@0
|
990 |
}
|
sl@0
|
991 |
|
sl@0
|
992 |
TInt X86Mmu::NewPageDirectory(TInt aOsAsid, TBool aSeparateGlobal, TPhysAddr& aPhysAddr, TInt& aNumPages)
|
sl@0
|
993 |
{
|
sl@0
|
994 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::NewPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal));
|
sl@0
|
995 |
TInt r=AllocRamPages(&aPhysAddr,1, EPageFixed);
|
sl@0
|
996 |
if (r!=KErrNone)
|
sl@0
|
997 |
return r;
|
sl@0
|
998 |
#ifdef BTRACE_KERNEL_MEMORY
|
sl@0
|
999 |
BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift);
|
sl@0
|
1000 |
Epoc::KernelMiscPages += 1;
|
sl@0
|
1001 |
#endif
|
sl@0
|
1002 |
SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
|
sl@0
|
1003 |
NKern::LockSystem();
|
sl@0
|
1004 |
pi->SetPageDir(aOsAsid,0);
|
sl@0
|
1005 |
NKern::UnlockSystem();
|
sl@0
|
1006 |
aNumPages=1;
|
sl@0
|
1007 |
return KErrNone;
|
sl@0
|
1008 |
}
|
sl@0
|
1009 |
|
sl@0
|
1010 |
inline void CopyPdes(TPde* aDest, const TPde* aSrc, TLinAddr aBase, TLinAddr aEnd)
|
sl@0
|
1011 |
{
|
sl@0
|
1012 |
memcpy(aDest+(aBase>>KChunkShift), aSrc+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
|
sl@0
|
1013 |
}
|
sl@0
|
1014 |
|
sl@0
|
1015 |
inline void ZeroPdes(TPde* aDest, TLinAddr aBase, TLinAddr aEnd)
|
sl@0
|
1016 |
{
|
sl@0
|
1017 |
memclr(aDest+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
|
sl@0
|
1018 |
}
|
sl@0
|
1019 |
|
sl@0
|
1020 |
void X86Mmu::InitPageDirectory(TInt aOsAsid, TBool)
|
sl@0
|
1021 |
{
|
sl@0
|
1022 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::InitPageDirectory(%d)",aOsAsid));
|
sl@0
|
1023 |
TPde* newpd=PageDirectory(aOsAsid); // new page directory
|
sl@0
|
1024 |
const TPde* kpd=(const TPde*)KPageDirectoryBase; // kernel page directory
|
sl@0
|
1025 |
ZeroPdes(newpd, 0x00000000, KUserSharedDataEnd); // clear user mapping area
|
sl@0
|
1026 |
ZeroPdes(newpd, KRamDriveStartAddress, KRamDriveEndAddress); // don't copy RAM drive
|
sl@0
|
1027 |
CopyPdes(newpd, kpd, KRomLinearBase, KUserGlobalDataEnd); // copy ROM + user global
|
sl@0
|
1028 |
CopyPdes(newpd, kpd, KRamDriveEndAddress, 0x00000000); // copy kernel mappings
|
sl@0
|
1029 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1030 |
}
|
sl@0
|
1031 |
|
sl@0
|
1032 |
void X86Mmu::ClearPageTable(TInt aId, TInt aFirstIndex)
|
sl@0
|
1033 |
{
|
sl@0
|
1034 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:ClearPageTable(%d,%d)",aId,aFirstIndex));
|
sl@0
|
1035 |
TPte* pte=PageTable(aId);
|
sl@0
|
1036 |
memclr(pte+aFirstIndex, KPageSize-aFirstIndex*sizeof(TPte));
|
sl@0
|
1037 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1038 |
}
|
sl@0
|
1039 |
|
sl@0
|
1040 |
void X86Mmu::ApplyTopLevelPermissions(TLinAddr aAddr, TInt aOsAsid, TInt aNumPdes, TPde aPdePerm)
|
sl@0
|
1041 |
{
|
sl@0
|
1042 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::ApplyTopLevelPermissions %04x:%08x->%08x count %d",
|
sl@0
|
1043 |
aOsAsid, aAddr, aPdePerm, aNumPdes));
|
sl@0
|
1044 |
TInt ix=aAddr>>KChunkShift;
|
sl@0
|
1045 |
TPde* pPde=PageDirectory(aOsAsid)+ix;
|
sl@0
|
1046 |
TPde* pPdeEnd=pPde+aNumPdes;
|
sl@0
|
1047 |
NKern::LockSystem();
|
sl@0
|
1048 |
for (; pPde<pPdeEnd; ++pPde)
|
sl@0
|
1049 |
{
|
sl@0
|
1050 |
TPde pde=*pPde;
|
sl@0
|
1051 |
if (pde)
|
sl@0
|
1052 |
*pPde = (pde&KPdePtePhysAddrMask)|aPdePerm;
|
sl@0
|
1053 |
}
|
sl@0
|
1054 |
NKern::UnlockSystem();
|
sl@0
|
1055 |
(aAddr>=KUserSharedDataEnd) ? InvalidateTLB() : LocalInvalidateTLB();
|
sl@0
|
1056 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1057 |
}
|
sl@0
|
1058 |
|
sl@0
|
1059 |
void X86Mmu::ApplyPagePermissions(TInt aId, TInt aPageOffset, TInt aNumPages, TPte aPtePerm)
|
sl@0
|
1060 |
{
|
sl@0
|
1061 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::ApplyPagePermissions %04x:%03x+%03x perm %08x",
|
sl@0
|
1062 |
aId, aPageOffset, aNumPages, aPtePerm));
|
sl@0
|
1063 |
TPte* pPte=PageTable(aId)+aPageOffset;
|
sl@0
|
1064 |
TPde* pPteEnd=pPte+aNumPages;
|
sl@0
|
1065 |
TPte g=0;
|
sl@0
|
1066 |
NKern::LockSystem();
|
sl@0
|
1067 |
for (; pPte<pPteEnd; ++pPte)
|
sl@0
|
1068 |
{
|
sl@0
|
1069 |
TPte pte=*pPte;
|
sl@0
|
1070 |
g |= pte;
|
sl@0
|
1071 |
if (pte)
|
sl@0
|
1072 |
*pPte = (pte&KPdePtePhysAddrMask)|aPtePerm;
|
sl@0
|
1073 |
}
|
sl@0
|
1074 |
NKern::UnlockSystem();
|
sl@0
|
1075 |
(g & KPdePteGlobal) ? InvalidateTLB() : LocalInvalidateTLB();
|
sl@0
|
1076 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1077 |
}
|
sl@0
|
1078 |
|
sl@0
|
1079 |
|
sl@0
|
1080 |
// Set up a page table (specified by aId) to map a 4Mb section of ROM containing aRomAddr
|
sl@0
|
1081 |
// using ROM at aOrigPhys.
|
sl@0
|
1082 |
void X86Mmu::InitShadowPageTable(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
|
sl@0
|
1083 |
{
|
sl@0
|
1084 |
(void)aId, (void)aRomAddr, (void)aOrigPhys;
|
sl@0
|
1085 |
FAULT(); // Never used
|
sl@0
|
1086 |
/*
|
sl@0
|
1087 |
__KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:InitShadowPageTable id=%04x aRomAddr=%08x aOrigPhys=%08x",
|
sl@0
|
1088 |
aId, aRomAddr, aOrigPhys));
|
sl@0
|
1089 |
TPte* ppte = PageTable(aId);
|
sl@0
|
1090 |
TPte* ppte_End = ppte + KChunkSize/KPageSize;
|
sl@0
|
1091 |
TPhysAddr phys = aOrigPhys - (aRomAddr & KChunkMask);
|
sl@0
|
1092 |
for (; ppte<ppte_End; ++ppte, phys+=KPageSize)
|
sl@0
|
1093 |
*ppte = phys | KRomPtePerm;
|
sl@0
|
1094 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1095 |
*/
|
sl@0
|
1096 |
}
|
sl@0
|
1097 |
|
sl@0
|
1098 |
// Copy the contents of ROM at aRomAddr to a shadow page at physical address aShadowPhys
|
sl@0
|
1099 |
void X86Mmu::InitShadowPage(TPhysAddr aShadowPhys, TLinAddr aRomAddr)
|
sl@0
|
1100 |
{
|
sl@0
|
1101 |
__KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:InitShadowPage aShadowPhys=%08x aRomAddr=%08x",
|
sl@0
|
1102 |
aShadowPhys, aRomAddr));
|
sl@0
|
1103 |
|
sl@0
|
1104 |
// put in a temporary mapping for aShadowPhys
|
sl@0
|
1105 |
// make it noncacheable
|
sl@0
|
1106 |
*iTempPte = aShadowPhys | KPtPtePerm | iPteGlobal;
|
sl@0
|
1107 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1108 |
|
sl@0
|
1109 |
// copy contents of ROM
|
sl@0
|
1110 |
wordmove( (TAny*)iTempAddr, (const TAny*)aRomAddr, KPageSize );
|
sl@0
|
1111 |
__DRAIN_WRITE_BUFFER; // make sure contents are written to memory
|
sl@0
|
1112 |
|
sl@0
|
1113 |
// remove temporary mapping
|
sl@0
|
1114 |
*iTempPte=0;
|
sl@0
|
1115 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1116 |
InvalidateTLBForPage(iTempAddr);
|
sl@0
|
1117 |
}
|
sl@0
|
1118 |
|
sl@0
|
1119 |
// Assign a shadow page table to replace a ROM section mapping
|
sl@0
|
1120 |
// Enter and return with system locked
|
sl@0
|
1121 |
void X86Mmu::AssignShadowPageTable(TInt aId, TLinAddr aRomAddr)
|
sl@0
|
1122 |
{
|
sl@0
|
1123 |
__KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:AssignShadowPageTable aId=%04x aRomAddr=%08x",
|
sl@0
|
1124 |
aId, aRomAddr));
|
sl@0
|
1125 |
TLinAddr ptLin=PageTableLinAddr(aId);
|
sl@0
|
1126 |
TPhysAddr ptPhys=LinearToPhysical(ptLin, 0);
|
sl@0
|
1127 |
TPde* ppde = ::InitPageDirectory + (aRomAddr>>KChunkShift);
|
sl@0
|
1128 |
TPde newpde = ptPhys | KShadowPdePerm;
|
sl@0
|
1129 |
__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
|
sl@0
|
1130 |
#ifdef __SMP__
|
sl@0
|
1131 |
TTLBIPI ipi;
|
sl@0
|
1132 |
NKern::Lock(); // stop other processors passing this point
|
sl@0
|
1133 |
ShadowSpinLock.LockOnly();
|
sl@0
|
1134 |
ipi.QueueAllOther(&TTLBIPI::WaitAndInvalidateIsr);
|
sl@0
|
1135 |
ipi.WaitEntry(); // wait for other processors to stop in the ISR
|
sl@0
|
1136 |
#endif
|
sl@0
|
1137 |
TInt irq=NKern::DisableAllInterrupts();
|
sl@0
|
1138 |
*ppde = newpde; // map in the page table
|
sl@0
|
1139 |
__DRAIN_WRITE_BUFFER; // make sure new PDE written to main memory
|
sl@0
|
1140 |
DoInvalidateTLB(); // completely flush TLB
|
sl@0
|
1141 |
NKern::RestoreInterrupts(irq);
|
sl@0
|
1142 |
#ifdef __SMP__
|
sl@0
|
1143 |
ipi.iFlag = 1; // release other processors so they can flush their TLBs
|
sl@0
|
1144 |
ipi.WaitCompletion(); // wait for other processors to flush their TLBs
|
sl@0
|
1145 |
ShadowSpinLock.UnlockOnly();
|
sl@0
|
1146 |
NKern::Unlock();
|
sl@0
|
1147 |
#endif
|
sl@0
|
1148 |
}
|
sl@0
|
1149 |
|
sl@0
|
1150 |
void X86Mmu::DoUnmapShadowPage(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
|
sl@0
|
1151 |
{
|
sl@0
|
1152 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:DoUnmapShadowPage, id=%04x lin=%08x origphys=%08x", aId, aRomAddr, aOrigPhys));
|
sl@0
|
1153 |
TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
|
sl@0
|
1154 |
TPte newpte = aOrigPhys | KRomPtePerm;
|
sl@0
|
1155 |
__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
|
sl@0
|
1156 |
#ifdef __SMP__
|
sl@0
|
1157 |
TTLBIPI ipi;
|
sl@0
|
1158 |
ipi.AddAddress(aRomAddr);
|
sl@0
|
1159 |
NKern::Lock(); // stop other processors passing this point
|
sl@0
|
1160 |
ShadowSpinLock.LockOnly();
|
sl@0
|
1161 |
ipi.QueueAllOther(&TTLBIPI::WaitAndInvalidateIsr);
|
sl@0
|
1162 |
ipi.WaitEntry(); // wait for other processors to stop
|
sl@0
|
1163 |
#endif
|
sl@0
|
1164 |
TInt irq=NKern::DisableAllInterrupts();
|
sl@0
|
1165 |
*ppte = newpte;
|
sl@0
|
1166 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1167 |
DoInvalidateTLBForPage(aRomAddr);
|
sl@0
|
1168 |
NKern::RestoreInterrupts(irq);
|
sl@0
|
1169 |
#ifdef __SMP__
|
sl@0
|
1170 |
ipi.iFlag = 1; // release other processors so they can flush their TLBs
|
sl@0
|
1171 |
ipi.WaitCompletion(); // wait for other processors to flush their TLBs
|
sl@0
|
1172 |
ShadowSpinLock.UnlockOnly();
|
sl@0
|
1173 |
NKern::Unlock();
|
sl@0
|
1174 |
#endif
|
sl@0
|
1175 |
}
|
sl@0
|
1176 |
|
sl@0
|
1177 |
TInt X86Mmu::UnassignShadowPageTable(TLinAddr /*aRomAddr*/, TPhysAddr /*aOrigPhys*/)
|
sl@0
|
1178 |
{
|
sl@0
|
1179 |
// not used since we use page mappings for the ROM
|
sl@0
|
1180 |
return KErrGeneral;
|
sl@0
|
1181 |
}
|
sl@0
|
1182 |
|
sl@0
|
1183 |
TInt X86Mmu::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength)
|
sl@0
|
1184 |
{
|
sl@0
|
1185 |
__KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:CopyToShadowMemory aDest=%08x aSrc=%08x aLength=%08x", aDest, aSrc, aLength));
|
sl@0
|
1186 |
|
sl@0
|
1187 |
// Check that destination is ROM
|
sl@0
|
1188 |
if (aDest<iRomLinearBase || (aDest+aLength) > iRomLinearEnd)
|
sl@0
|
1189 |
{
|
sl@0
|
1190 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:CopyToShadowMemory: Destination not entirely in ROM"));
|
sl@0
|
1191 |
return KErrArgument;
|
sl@0
|
1192 |
}
|
sl@0
|
1193 |
|
sl@0
|
1194 |
// do operation with RamAlloc mutex held (to prevent shadow pages from being released from under us)
|
sl@0
|
1195 |
Kern::MutexWait(*RamAllocatorMutex);
|
sl@0
|
1196 |
|
sl@0
|
1197 |
TInt r = KErrNone;
|
sl@0
|
1198 |
while (aLength)
|
sl@0
|
1199 |
{
|
sl@0
|
1200 |
// Calculate memory size to copy in this loop. A single page region will be copied per loop
|
sl@0
|
1201 |
TInt copySize = Min(aLength, iPageSize - (aDest&iPageMask));
|
sl@0
|
1202 |
|
sl@0
|
1203 |
// Get physical address
|
sl@0
|
1204 |
TPhysAddr physAddr = LinearToPhysical(aDest&~iPageMask, 0);
|
sl@0
|
1205 |
if (KPhysAddrInvalid==physAddr)
|
sl@0
|
1206 |
{
|
sl@0
|
1207 |
r = KErrArgument;
|
sl@0
|
1208 |
break;
|
sl@0
|
1209 |
}
|
sl@0
|
1210 |
|
sl@0
|
1211 |
//check whether it is shadowed rom
|
sl@0
|
1212 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(physAddr);
|
sl@0
|
1213 |
if (pi==0 || pi->Type()!=SPageInfo::EShadow)
|
sl@0
|
1214 |
{
|
sl@0
|
1215 |
__KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:CopyToShadowMemory: No shadow page at this address"));
|
sl@0
|
1216 |
r = KErrArgument;
|
sl@0
|
1217 |
break;
|
sl@0
|
1218 |
}
|
sl@0
|
1219 |
|
sl@0
|
1220 |
//Temporarily map into writable memory and copy data. RamAllocator DMutex is required
|
sl@0
|
1221 |
TLinAddr tempAddr = MapTemp (physAddr, aDest&~iPageMask);
|
sl@0
|
1222 |
__KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:CopyToShadowMemory Copy aDest=%08x aSrc=%08x aSize=%08x", tempAddr+(aDest&iPageMask), aSrc, copySize));
|
sl@0
|
1223 |
memcpy ((TAny*)(tempAddr+(aDest&iPageMask)), (const TAny*)aSrc, copySize); //Kernel-to-Kernel copy is presumed
|
sl@0
|
1224 |
UnmapTemp();
|
sl@0
|
1225 |
|
sl@0
|
1226 |
//Update variables for the next loop/page
|
sl@0
|
1227 |
aDest+=copySize;
|
sl@0
|
1228 |
aSrc+=copySize;
|
sl@0
|
1229 |
aLength-=copySize;
|
sl@0
|
1230 |
}
|
sl@0
|
1231 |
|
sl@0
|
1232 |
Kern::MutexSignal(*RamAllocatorMutex);
|
sl@0
|
1233 |
return r;
|
sl@0
|
1234 |
}
|
sl@0
|
1235 |
|
sl@0
|
1236 |
void X86Mmu::DoFreezeShadowPage(TInt aId, TLinAddr aRomAddr)
|
sl@0
|
1237 |
{
|
sl@0
|
1238 |
__KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:DoFreezeShadowPage aId=%04x aRomAddr=%08x",
|
sl@0
|
1239 |
aId, aRomAddr));
|
sl@0
|
1240 |
TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
|
sl@0
|
1241 |
TPte newpte = (*ppte & KPdePtePhysAddrMask) | KRomPtePerm;
|
sl@0
|
1242 |
__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
|
sl@0
|
1243 |
*ppte = newpte;
|
sl@0
|
1244 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1245 |
InvalidateTLBForPage(aRomAddr);
|
sl@0
|
1246 |
}
|
sl@0
|
1247 |
|
sl@0
|
1248 |
void X86Mmu::FlushShadow(TLinAddr aRomAddr)
|
sl@0
|
1249 |
{
|
sl@0
|
1250 |
#ifdef __SMP__
|
sl@0
|
1251 |
TTLBIPI ipi;
|
sl@0
|
1252 |
ipi.AddAddress(aRomAddr);
|
sl@0
|
1253 |
NKern::Lock(); // stop other processors passing this point
|
sl@0
|
1254 |
ShadowSpinLock.LockOnly();
|
sl@0
|
1255 |
ipi.QueueAllOther(&TTLBIPI::WaitAndInvalidateIsr);
|
sl@0
|
1256 |
ipi.WaitEntry(); // wait for other processors to stop
|
sl@0
|
1257 |
DoInvalidateTLBForPage(aRomAddr);
|
sl@0
|
1258 |
ipi.iFlag = 1; // release other processors so they can flush their TLBs
|
sl@0
|
1259 |
ipi.WaitCompletion(); // wait for other processors to flush their TLBs
|
sl@0
|
1260 |
ShadowSpinLock.UnlockOnly();
|
sl@0
|
1261 |
NKern::Unlock();
|
sl@0
|
1262 |
#else
|
sl@0
|
1263 |
InvalidateTLBForPage(aRomAddr); // remove all TLB references to original ROM page
|
sl@0
|
1264 |
#endif
|
sl@0
|
1265 |
}
|
sl@0
|
1266 |
|
sl@0
|
1267 |
void X86Mmu::Pagify(TInt aId, TLinAddr aLinAddr)
|
sl@0
|
1268 |
{
|
sl@0
|
1269 |
// Nothing to do on x86
|
sl@0
|
1270 |
}
|
sl@0
|
1271 |
|
sl@0
|
1272 |
void X86Mmu::ClearRamDrive(TLinAddr aStart)
|
sl@0
|
1273 |
{
|
sl@0
|
1274 |
// clear the page directory entries corresponding to the RAM drive
|
sl@0
|
1275 |
TPde* kpd=(TPde*)KPageDirectoryBase; // kernel page directory
|
sl@0
|
1276 |
ZeroPdes(kpd, aStart, KRamDriveEndAddress);
|
sl@0
|
1277 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1278 |
}
|
sl@0
|
1279 |
|
sl@0
|
1280 |
// Generic cache/TLB flush function.
|
sl@0
|
1281 |
// Which things are flushed is determined by aMask.
|
sl@0
|
1282 |
void X86Mmu::GenericFlush(TUint32 aMask)
|
sl@0
|
1283 |
{
|
sl@0
|
1284 |
__KTRACE_OPT(KMMU,Kern::Printf("GenericFlush %x",aMask));
|
sl@0
|
1285 |
if (aMask&(EFlushDPermChg|EFlushIPermChg))
|
sl@0
|
1286 |
InvalidateTLB();
|
sl@0
|
1287 |
}
|
sl@0
|
1288 |
|
sl@0
|
1289 |
TPde X86Mmu::PdePermissions(TChunkType aChunkType, TBool aRO)
|
sl@0
|
1290 |
{
|
sl@0
|
1291 |
if (aChunkType==EUserData && aRO)
|
sl@0
|
1292 |
return KPdePtePresent|KPdePteUser;
|
sl@0
|
1293 |
return ChunkPdePermissions[aChunkType];
|
sl@0
|
1294 |
}
|
sl@0
|
1295 |
|
sl@0
|
1296 |
TPte X86Mmu::PtePermissions(TChunkType aChunkType)
|
sl@0
|
1297 |
{
|
sl@0
|
1298 |
TPte pte=ChunkPtePermissions[aChunkType];
|
sl@0
|
1299 |
return (pte&~KPdePteGlobal)|(pte&iPteGlobal);
|
sl@0
|
1300 |
}
|
sl@0
|
1301 |
|
sl@0
|
1302 |
const TUint FBLK=(EMapAttrFullyBlocking>>12);
|
sl@0
|
1303 |
const TUint BFNC=(EMapAttrBufferedNC>>12);
|
sl@0
|
1304 |
const TUint BUFC=(EMapAttrBufferedC>>12);
|
sl@0
|
1305 |
const TUint L1UN=(EMapAttrL1Uncached>>12);
|
sl@0
|
1306 |
const TUint WTRA=(EMapAttrCachedWTRA>>12);
|
sl@0
|
1307 |
const TUint WTWA=(EMapAttrCachedWTWA>>12);
|
sl@0
|
1308 |
const TUint WBRA=(EMapAttrCachedWBRA>>12);
|
sl@0
|
1309 |
const TUint WBWA=(EMapAttrCachedWBWA>>12);
|
sl@0
|
1310 |
const TUint AWTR=(EMapAttrAltCacheWTRA>>12);
|
sl@0
|
1311 |
const TUint AWTW=(EMapAttrAltCacheWTWA>>12);
|
sl@0
|
1312 |
const TUint AWBR=(EMapAttrAltCacheWBRA>>12);
|
sl@0
|
1313 |
const TUint AWBW=(EMapAttrAltCacheWBWA>>12);
|
sl@0
|
1314 |
|
sl@0
|
1315 |
const TUint16 UNS=0xffffu; // Unsupported attribute
|
sl@0
|
1316 |
const TUint16 SPE=0xfffeu; // Special processing required
|
sl@0
|
1317 |
|
sl@0
|
1318 |
static const TUint16 CacheBuffAttributes[16]=
|
sl@0
|
1319 |
{0x10,0x10,0x10,0x10,0x08,0x08,0x00,0x00, UNS, UNS, UNS, UNS, UNS, UNS, UNS,0x00};
|
sl@0
|
1320 |
static const TUint8 CacheBuffActual[16]=
|
sl@0
|
1321 |
{FBLK,FBLK,FBLK,FBLK,WTRA,WTRA,WBWA,WBWA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WBWA};
|
sl@0
|
1322 |
|
sl@0
|
1323 |
static const TUint8 ActualReadPrivilegeLevel[4]={1,1,4,4}; // RONO,RWNO,RORO,RWRW
|
sl@0
|
1324 |
static const TUint8 ActualWritePrivilegeLevel[4]={0,1,0,4}; // RONO,RWNO,RORO,RWRW
|
sl@0
|
1325 |
|
sl@0
|
1326 |
TInt X86Mmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte)
|
sl@0
|
1327 |
{
|
sl@0
|
1328 |
__KTRACE_OPT(KMMU,Kern::Printf(">X86Mmu::PdePtePermissions, mapattr=%08x",aMapAttr));
|
sl@0
|
1329 |
TUint read=aMapAttr & EMapAttrReadMask;
|
sl@0
|
1330 |
TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
|
sl@0
|
1331 |
TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
|
sl@0
|
1332 |
TUint cache=(aMapAttr & EMapAttrL1CacheMask)>>12;
|
sl@0
|
1333 |
TPte pte;
|
sl@0
|
1334 |
// ignore L2 cache attributes for now - downgrade to L2 uncached
|
sl@0
|
1335 |
|
sl@0
|
1336 |
// if execute access is greater than read, adjust read (since there are no separate execute permissions on X86)
|
sl@0
|
1337 |
if (exec>read)
|
sl@0
|
1338 |
read=exec;
|
sl@0
|
1339 |
pte=0;
|
sl@0
|
1340 |
if (write==0)
|
sl@0
|
1341 |
{
|
sl@0
|
1342 |
// read-only
|
sl@0
|
1343 |
if (read>=4)
|
sl@0
|
1344 |
pte=KPdePermRORO; // user and supervisor read-only
|
sl@0
|
1345 |
else
|
sl@0
|
1346 |
pte=KPdePermRONO; // supervisor r/o user no access
|
sl@0
|
1347 |
}
|
sl@0
|
1348 |
else if (write<4)
|
sl@0
|
1349 |
{
|
sl@0
|
1350 |
// only supervisor can write
|
sl@0
|
1351 |
if (read>=4)
|
sl@0
|
1352 |
pte=KPdePermRWRW; // full access since no RWRO
|
sl@0
|
1353 |
else
|
sl@0
|
1354 |
pte=KPdePermRWNO; // sup rw user no access
|
sl@0
|
1355 |
}
|
sl@0
|
1356 |
else
|
sl@0
|
1357 |
pte=KPdePermRWRW; // sup rw user rw
|
sl@0
|
1358 |
read=ActualReadPrivilegeLevel[pte>>1];
|
sl@0
|
1359 |
write=ActualWritePrivilegeLevel[pte>>1];
|
sl@0
|
1360 |
TUint cbatt=CacheBuffAttributes[cache];
|
sl@0
|
1361 |
TInt r=KErrNone;
|
sl@0
|
1362 |
if (cbatt==UNS)
|
sl@0
|
1363 |
r=KErrNotSupported;
|
sl@0
|
1364 |
if (r==KErrNone)
|
sl@0
|
1365 |
{
|
sl@0
|
1366 |
cache=CacheBuffActual[cache];
|
sl@0
|
1367 |
aPde=KPdePtePresent|KPdePteWrite|KPdePteUser;
|
sl@0
|
1368 |
aPte=pte|cbatt|iPteGlobal; // HW chunks can always be global
|
sl@0
|
1369 |
aMapAttr=read|(write<<4)|(read<<8)|(cache<<12);
|
sl@0
|
1370 |
}
|
sl@0
|
1371 |
__KTRACE_OPT(KMMU,Kern::Printf("<X86Mmu::PdePtePermissions, r=%d, mapattr=%08x, pde=%08x, pte=%08x",
|
sl@0
|
1372 |
r,aMapAttr,aPde,aPte));
|
sl@0
|
1373 |
return r;
|
sl@0
|
1374 |
}
|
sl@0
|
1375 |
|
sl@0
|
1376 |
THwChunkAddressAllocator* X86Mmu::MappingRegion(TUint aMapAttr)
|
sl@0
|
1377 |
{
|
sl@0
|
1378 |
TUint read=aMapAttr & EMapAttrReadMask;
|
sl@0
|
1379 |
TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
|
sl@0
|
1380 |
TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
|
sl@0
|
1381 |
if (read>=4 || write>=4 || exec>=4)
|
sl@0
|
1382 |
return iUserHwChunkAllocator; // if any access in user mode, must put it in user global section
|
sl@0
|
1383 |
return iHwChunkAllocator;
|
sl@0
|
1384 |
}
|
sl@0
|
1385 |
|
sl@0
|
1386 |
void X86Mmu::Map(TLinAddr aLinAddr, TPhysAddr aPhysAddr, TInt aSize, TPde aPdePerm, TPte aPtePerm, TInt aMapShift)
|
sl@0
|
1387 |
//
|
sl@0
|
1388 |
// Map a region of physical addresses aPhysAddr to aPhysAddr+aSize-1 to virtual address aLinAddr.
|
sl@0
|
1389 |
// Use permissions specified by aPdePerm and aPtePerm. Use mapping sizes up to and including (1<<aMapShift).
|
sl@0
|
1390 |
// Assume any page tables required are already assigned.
|
sl@0
|
1391 |
// aLinAddr, aPhysAddr, aSize must be page-aligned.
|
sl@0
|
1392 |
//
|
sl@0
|
1393 |
{
|
sl@0
|
1394 |
__KTRACE_OPT(KMMU, Kern::Printf("X86Mmu::Map lin=%08x phys=%08x size=%08x", aLinAddr, aPhysAddr, aSize));
|
sl@0
|
1395 |
__KTRACE_OPT(KMMU, Kern::Printf("pde=%08x pte=%08x mapshift=%d", aPdePerm, aPtePerm, aMapShift));
|
sl@0
|
1396 |
TPde lp_pde=aPtePerm|KPdeLargePage;
|
sl@0
|
1397 |
TLinAddr la=aLinAddr;
|
sl@0
|
1398 |
TPhysAddr pa=aPhysAddr;
|
sl@0
|
1399 |
TInt remain=aSize;
|
sl@0
|
1400 |
while (remain)
|
sl@0
|
1401 |
{
|
sl@0
|
1402 |
if (aMapShift>=KChunkShift && (la & KChunkMask)==0 && remain>=KChunkSize)
|
sl@0
|
1403 |
{
|
sl@0
|
1404 |
// use large pages
|
sl@0
|
1405 |
TInt npdes=remain>>KChunkShift;
|
sl@0
|
1406 |
const TBitMapAllocator& b=*iOsAsidAllocator;
|
sl@0
|
1407 |
TInt num_os_asids=b.iSize-b.iAvail;
|
sl@0
|
1408 |
TInt os_asid=0;
|
sl@0
|
1409 |
for (; num_os_asids; ++os_asid)
|
sl@0
|
1410 |
{
|
sl@0
|
1411 |
if (b.NotAllocated(os_asid,1))
|
sl@0
|
1412 |
continue; // os_asid is not needed
|
sl@0
|
1413 |
TPde* p_pde=PageDirectory(os_asid)+(la>>KChunkShift);
|
sl@0
|
1414 |
TPde* p_pde_E=p_pde+npdes;
|
sl@0
|
1415 |
TPde pde=pa|lp_pde;
|
sl@0
|
1416 |
NKern::LockSystem();
|
sl@0
|
1417 |
for (; p_pde < p_pde_E; pde+=KChunkSize)
|
sl@0
|
1418 |
{
|
sl@0
|
1419 |
__ASSERT_DEBUG(*p_pde==0, MM::Panic(MM::EPdeAlreadyInUse));
|
sl@0
|
1420 |
__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, p_pde));
|
sl@0
|
1421 |
*p_pde++=pde;
|
sl@0
|
1422 |
}
|
sl@0
|
1423 |
NKern::UnlockSystem();
|
sl@0
|
1424 |
--num_os_asids;
|
sl@0
|
1425 |
}
|
sl@0
|
1426 |
npdes<<=KChunkShift;
|
sl@0
|
1427 |
la+=npdes, pa+=npdes, remain-=npdes;
|
sl@0
|
1428 |
continue;
|
sl@0
|
1429 |
}
|
sl@0
|
1430 |
// use normal pages
|
sl@0
|
1431 |
TInt block_size = Min(remain, KChunkSize-(la&KChunkMask));
|
sl@0
|
1432 |
TInt id=PageTableId(la, 0);
|
sl@0
|
1433 |
__ASSERT_DEBUG(id>=0, MM::Panic(MM::EMmuMapNoPageTable));
|
sl@0
|
1434 |
TPte* p_pte=PageTable(id)+((la&KChunkMask)>>KPageShift);
|
sl@0
|
1435 |
TPte* p_pte_E = p_pte + (block_size>>KPageShift);
|
sl@0
|
1436 |
TPte pte=pa|aPtePerm;
|
sl@0
|
1437 |
SPageTableInfo& ptinfo=iPtInfo[id];
|
sl@0
|
1438 |
NKern::LockSystem();
|
sl@0
|
1439 |
for (; p_pte < p_pte_E; pte+=KPageSize)
|
sl@0
|
1440 |
{
|
sl@0
|
1441 |
__ASSERT_DEBUG(*p_pte==0, MM::Panic(MM::EPteAlreadyInUse));
|
sl@0
|
1442 |
__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", pte, p_pte));
|
sl@0
|
1443 |
*p_pte++=pte;
|
sl@0
|
1444 |
++ptinfo.iCount;
|
sl@0
|
1445 |
NKern::FlashSystem();
|
sl@0
|
1446 |
}
|
sl@0
|
1447 |
NKern::UnlockSystem();
|
sl@0
|
1448 |
la+=block_size, pa+=block_size, remain-=block_size;
|
sl@0
|
1449 |
}
|
sl@0
|
1450 |
}
|
sl@0
|
1451 |
|
sl@0
|
1452 |
void X86Mmu::Unmap(TLinAddr aLinAddr, TInt aSize)
|
sl@0
|
1453 |
//
|
sl@0
|
1454 |
// Remove all mappings in the specified range of addresses.
|
sl@0
|
1455 |
// Don't free page tables.
|
sl@0
|
1456 |
// aLinAddr, aSize must be page-aligned.
|
sl@0
|
1457 |
//
|
sl@0
|
1458 |
{
|
sl@0
|
1459 |
__KTRACE_OPT(KMMU, Kern::Printf("X86Mmu::Unmap lin=%08x size=%08x", aLinAddr, aSize));
|
sl@0
|
1460 |
#ifdef __SMP__
|
sl@0
|
1461 |
TTLBIPI ipi;
|
sl@0
|
1462 |
#endif
|
sl@0
|
1463 |
TLinAddr a=aLinAddr;
|
sl@0
|
1464 |
TLinAddr end=a+aSize;
|
sl@0
|
1465 |
__KTRACE_OPT(KMMU,Kern::Printf("a=%08x end=%08x",a,end));
|
sl@0
|
1466 |
NKern::LockSystem();
|
sl@0
|
1467 |
while(a!=end)
|
sl@0
|
1468 |
{
|
sl@0
|
1469 |
TInt pdeIndex=a>>KChunkShift;
|
sl@0
|
1470 |
TLinAddr next=(pdeIndex<<KChunkShift)+KChunkSize;
|
sl@0
|
1471 |
TInt to_do=Min(TInt(end-a), TInt(next-a))>>KPageShift;
|
sl@0
|
1472 |
__KTRACE_OPT(KMMU,Kern::Printf("a=%08x next=%08x to_do=%d",a,next,to_do));
|
sl@0
|
1473 |
TPde pde=::InitPageDirectory[pdeIndex];
|
sl@0
|
1474 |
if ( (pde&(KPdePtePresent|KPdeLargePage))==(KPdePtePresent|KPdeLargePage) )
|
sl@0
|
1475 |
{
|
sl@0
|
1476 |
__ASSERT_DEBUG(!(a&KChunkMask), MM::Panic(MM::EUnmapBadAlignment));
|
sl@0
|
1477 |
::InitPageDirectory[pdeIndex]=0;
|
sl@0
|
1478 |
#ifdef __SMP__
|
sl@0
|
1479 |
ipi.AddAddress(a);
|
sl@0
|
1480 |
#else
|
sl@0
|
1481 |
InvalidateTLBForPage(a); // flush any corresponding TLB entry
|
sl@0
|
1482 |
#endif
|
sl@0
|
1483 |
a=next;
|
sl@0
|
1484 |
NKern::FlashSystem();
|
sl@0
|
1485 |
continue;
|
sl@0
|
1486 |
}
|
sl@0
|
1487 |
TInt ptid=PageTableId(a,0);
|
sl@0
|
1488 |
SPageTableInfo& ptinfo=iPtInfo[ptid];
|
sl@0
|
1489 |
if (ptid>=0)
|
sl@0
|
1490 |
{
|
sl@0
|
1491 |
TPte* ppte=PageTable(ptid)+((a&KChunkMask)>>KPageShift);
|
sl@0
|
1492 |
TPte* ppte_End=ppte+to_do;
|
sl@0
|
1493 |
for (; ppte<ppte_End; ++ppte, a+=KPageSize)
|
sl@0
|
1494 |
{
|
sl@0
|
1495 |
if (*ppte & KPdePtePresent)
|
sl@0
|
1496 |
--ptinfo.iCount;
|
sl@0
|
1497 |
*ppte=0;
|
sl@0
|
1498 |
#ifdef __SMP__
|
sl@0
|
1499 |
ipi.AddAddress(a);
|
sl@0
|
1500 |
#else
|
sl@0
|
1501 |
InvalidateTLBForPage(a); // flush any corresponding TLB entry
|
sl@0
|
1502 |
#endif
|
sl@0
|
1503 |
NKern::FlashSystem();
|
sl@0
|
1504 |
}
|
sl@0
|
1505 |
}
|
sl@0
|
1506 |
else
|
sl@0
|
1507 |
a += (to_do<<KPageShift);
|
sl@0
|
1508 |
}
|
sl@0
|
1509 |
#ifdef __SMP__
|
sl@0
|
1510 |
ipi.InvalidateList();
|
sl@0
|
1511 |
#endif
|
sl@0
|
1512 |
NKern::UnlockSystem();
|
sl@0
|
1513 |
}
|
sl@0
|
1514 |
|
sl@0
|
1515 |
|
sl@0
|
1516 |
void X86Mmu::ClearPages(TInt aNumPages, TPhysAddr* aPageList, TUint8 aClearByte)
|
sl@0
|
1517 |
{
|
sl@0
|
1518 |
//map the pages at a temporary address, clear them and unmap
|
sl@0
|
1519 |
__ASSERT_MUTEX(RamAllocatorMutex);
|
sl@0
|
1520 |
while (--aNumPages >= 0)
|
sl@0
|
1521 |
{
|
sl@0
|
1522 |
TPhysAddr pa;
|
sl@0
|
1523 |
if((TInt)aPageList&1)
|
sl@0
|
1524 |
{
|
sl@0
|
1525 |
pa = (TPhysAddr)aPageList&~1;
|
sl@0
|
1526 |
*(TPhysAddr*)&aPageList += iPageSize;
|
sl@0
|
1527 |
}
|
sl@0
|
1528 |
else
|
sl@0
|
1529 |
pa = *aPageList++;
|
sl@0
|
1530 |
*iTempPte = pa | KPdePtePresent | KPdePteWrite | iPteGlobal;
|
sl@0
|
1531 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1532 |
InvalidateTLBForPage(iTempAddr);
|
sl@0
|
1533 |
memset((TAny*)iTempAddr, aClearByte, iPageSize);
|
sl@0
|
1534 |
}
|
sl@0
|
1535 |
*iTempPte=0;
|
sl@0
|
1536 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1537 |
InvalidateTLBForPage(iTempAddr);
|
sl@0
|
1538 |
}
|
sl@0
|
1539 |
|
sl@0
|
1540 |
TLinAddr X86Mmu::MapTemp(TPhysAddr aPage,TLinAddr /*aLinAddr*/,TInt aPages)
|
sl@0
|
1541 |
{
|
sl@0
|
1542 |
__ASSERT_MUTEX(RamAllocatorMutex);
|
sl@0
|
1543 |
__ASSERT_DEBUG(!*iTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
|
sl@0
|
1544 |
__ASSERT_DEBUG(aPages<=4,MM::Panic(MM::ETempMappingNoRoom));
|
sl@0
|
1545 |
iTempMapCount = aPages;
|
sl@0
|
1546 |
for (TInt i=0; i<aPages; i++)
|
sl@0
|
1547 |
{
|
sl@0
|
1548 |
iTempPte[i] = ((aPage&~KPageMask)+(i<<KPageShift)) | KPdePtePresent | KPdePteWrite | iPteGlobal
|
sl@0
|
1549 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1550 |
InvalidateTLBForPage(iTempAddr+(i<<KPageShift));
|
sl@0
|
1551 |
}
|
sl@0
|
1552 |
return iTempAddr;
|
sl@0
|
1553 |
}
|
sl@0
|
1554 |
|
sl@0
|
1555 |
TLinAddr X86Mmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages, TMemoryType)
|
sl@0
|
1556 |
{
|
sl@0
|
1557 |
return MapTemp(aPage, aLinAddr, aPages);
|
sl@0
|
1558 |
}
|
sl@0
|
1559 |
|
sl@0
|
1560 |
TLinAddr X86Mmu::MapSecondTemp(TPhysAddr aPage,TLinAddr /*aLinAddr*/,TInt aPages)
|
sl@0
|
1561 |
{
|
sl@0
|
1562 |
__ASSERT_MUTEX(RamAllocatorMutex);
|
sl@0
|
1563 |
__ASSERT_DEBUG(!*iSecondTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
|
sl@0
|
1564 |
__ASSERT_DEBUG(aPages<=4,MM::Panic(MM::ETempMappingNoRoom));
|
sl@0
|
1565 |
iSecondTempMapCount = aPages;
|
sl@0
|
1566 |
for (TInt i=0; i<aPages; i++)
|
sl@0
|
1567 |
{
|
sl@0
|
1568 |
iSecondTempPte[i] = ((aPage&~KPageMask)+(i<<KPageShift)) | KPdePtePresent | KPdePteWrite | iPteGlobal
|
sl@0
|
1569 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1570 |
InvalidateTLBForPage(iSecondTempAddr+(i<<KPageShift));
|
sl@0
|
1571 |
}
|
sl@0
|
1572 |
return iSecondTempAddr;
|
sl@0
|
1573 |
}
|
sl@0
|
1574 |
|
sl@0
|
1575 |
void X86Mmu::UnmapTemp()
|
sl@0
|
1576 |
{
|
sl@0
|
1577 |
__ASSERT_MUTEX(RamAllocatorMutex);
|
sl@0
|
1578 |
for (TInt i=0; i<iTempMapCount; i++)
|
sl@0
|
1579 |
{
|
sl@0
|
1580 |
iTempPte[i] = 0;
|
sl@0
|
1581 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1582 |
InvalidateTLBForPage(iTempAddr+(i<<KPageShift));
|
sl@0
|
1583 |
}
|
sl@0
|
1584 |
}
|
sl@0
|
1585 |
|
sl@0
|
1586 |
void X86Mmu::UnmapSecondTemp()
|
sl@0
|
1587 |
{
|
sl@0
|
1588 |
__ASSERT_MUTEX(RamAllocatorMutex);
|
sl@0
|
1589 |
for (TInt i=0; i<iSecondTempMapCount; i++)
|
sl@0
|
1590 |
{
|
sl@0
|
1591 |
iSecondTempPte[i] = 0;
|
sl@0
|
1592 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1593 |
InvalidateTLBForPage(iSecondTempAddr+(i<<KPageShift));
|
sl@0
|
1594 |
}
|
sl@0
|
1595 |
}
|
sl@0
|
1596 |
|
sl@0
|
1597 |
void ExecHandler::UnlockRamDrive()
|
sl@0
|
1598 |
{
|
sl@0
|
1599 |
}
|
sl@0
|
1600 |
|
sl@0
|
1601 |
EXPORT_C void TInternalRamDrive::Unlock()
|
sl@0
|
1602 |
{
|
sl@0
|
1603 |
}
|
sl@0
|
1604 |
|
sl@0
|
1605 |
EXPORT_C void TInternalRamDrive::Lock()
|
sl@0
|
1606 |
{
|
sl@0
|
1607 |
}
|
sl@0
|
1608 |
|
sl@0
|
1609 |
TBool X86Mmu::ValidateLocalIpcAddress(TLinAddr aAddr,TInt aSize,TBool aWrite)
|
sl@0
|
1610 |
{
|
sl@0
|
1611 |
__NK_ASSERT_DEBUG(aSize<=KChunkSize);
|
sl@0
|
1612 |
TLinAddr end = aAddr+aSize-1;
|
sl@0
|
1613 |
if(end<aAddr)
|
sl@0
|
1614 |
end = ~0u;
|
sl@0
|
1615 |
|
sl@0
|
1616 |
if(TUint(aAddr^KIPCAlias)<TUint(KChunkSize) || TUint(end^KIPCAlias)<TUint(KChunkSize))
|
sl@0
|
1617 |
{
|
sl@0
|
1618 |
// local address is in alias region.
|
sl@0
|
1619 |
// remove alias...
|
sl@0
|
1620 |
NKern::LockSystem();
|
sl@0
|
1621 |
((DMemModelThread*)TheCurrentThread)->RemoveAlias();
|
sl@0
|
1622 |
NKern::UnlockSystem();
|
sl@0
|
1623 |
// access memory, which will cause an exception...
|
sl@0
|
1624 |
if(!(TUint(aAddr^KIPCAlias)<TUint(KChunkSize)))
|
sl@0
|
1625 |
aAddr = end;
|
sl@0
|
1626 |
DoInvalidateTLBForPage(aAddr); // only need to do this processor since alias range is owned by the thread
|
sl@0
|
1627 |
if(aWrite)
|
sl@0
|
1628 |
*(volatile TUint8*)aAddr = 0;
|
sl@0
|
1629 |
else
|
sl@0
|
1630 |
aWrite = *(volatile TUint8*)aAddr;
|
sl@0
|
1631 |
// can't get here
|
sl@0
|
1632 |
__NK_ASSERT_DEBUG(0);
|
sl@0
|
1633 |
}
|
sl@0
|
1634 |
|
sl@0
|
1635 |
TUint32 local_mask;
|
sl@0
|
1636 |
DMemModelProcess* process=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
|
sl@0
|
1637 |
if(aWrite)
|
sl@0
|
1638 |
local_mask = process->iAddressCheckMaskW;
|
sl@0
|
1639 |
else
|
sl@0
|
1640 |
local_mask = process->iAddressCheckMaskR;
|
sl@0
|
1641 |
TInt mask = 2<<(end>>27);
|
sl@0
|
1642 |
mask -= 1<<(aAddr>>27);
|
sl@0
|
1643 |
if((local_mask&mask)!=mask)
|
sl@0
|
1644 |
return EFalse;
|
sl@0
|
1645 |
|
sl@0
|
1646 |
return ETrue;
|
sl@0
|
1647 |
}
|
sl@0
|
1648 |
|
sl@0
|
1649 |
TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TInt aPerm, TLinAddr& aAliasAddr, TInt& aAliasSize)
|
sl@0
|
1650 |
//
|
sl@0
|
1651 |
// Set up an alias mapping starting at address aAddr in specified process.
|
sl@0
|
1652 |
// Check permissions aPerm.
|
sl@0
|
1653 |
// Enter and return with system locked.
|
sl@0
|
1654 |
// Note: Alias is removed if an exception if trapped by DThread::IpcExcHandler.
|
sl@0
|
1655 |
//
|
sl@0
|
1656 |
{
|
sl@0
|
1657 |
__KTRACE_OPT(KMMU2,Kern::Printf("Thread %O Alias %08x+%x Process %O perm %x",this,aAddr,aSize,aProcess,aPerm));
|
sl@0
|
1658 |
__ASSERT_SYSTEM_LOCK;
|
sl@0
|
1659 |
|
sl@0
|
1660 |
if(TUint(aAddr^KIPCAlias)<TUint(KIPCAliasAreaSize))
|
sl@0
|
1661 |
return KErrBadDescriptor; // prevent access to alias region
|
sl@0
|
1662 |
|
sl@0
|
1663 |
// check if memory is in region which is safe to access with supervisor permissions...
|
sl@0
|
1664 |
TBool okForSupervisorAccess = aPerm&(EMapAttrReadSup|EMapAttrWriteSup) ? 1 : 0;
|
sl@0
|
1665 |
if(!okForSupervisorAccess)
|
sl@0
|
1666 |
{
|
sl@0
|
1667 |
if(aAddr>=0xc0000000) // address in kernel area (top 1GB)?
|
sl@0
|
1668 |
return KErrBadDescriptor; // don't have permission
|
sl@0
|
1669 |
TUint32 local_mask;
|
sl@0
|
1670 |
if(aPerm&EMapAttrWriteUser)
|
sl@0
|
1671 |
local_mask = aProcess->iAddressCheckMaskW;
|
sl@0
|
1672 |
else
|
sl@0
|
1673 |
local_mask = aProcess->iAddressCheckMaskR;
|
sl@0
|
1674 |
okForSupervisorAccess = (local_mask>>(aAddr>>27))&1;
|
sl@0
|
1675 |
}
|
sl@0
|
1676 |
|
sl@0
|
1677 |
if(aAddr>=KUserSharedDataEnd) // if address is in global section, don't bother aliasing it...
|
sl@0
|
1678 |
{
|
sl@0
|
1679 |
if(iAliasLinAddr)
|
sl@0
|
1680 |
RemoveAlias();
|
sl@0
|
1681 |
aAliasAddr = aAddr;
|
sl@0
|
1682 |
TInt maxSize = KChunkSize-(aAddr&KChunkMask);
|
sl@0
|
1683 |
aAliasSize = aSize<maxSize ? aSize : maxSize;
|
sl@0
|
1684 |
return okForSupervisorAccess;
|
sl@0
|
1685 |
}
|
sl@0
|
1686 |
|
sl@0
|
1687 |
TInt asid = aProcess->iOsAsid;
|
sl@0
|
1688 |
TPde* pd = PageDirectory(asid);
|
sl@0
|
1689 |
TPde pde = pd[aAddr>>KChunkShift];
|
sl@0
|
1690 |
#ifdef __SMP__
|
sl@0
|
1691 |
TLinAddr aliasAddr;
|
sl@0
|
1692 |
#else
|
sl@0
|
1693 |
TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask));
|
sl@0
|
1694 |
#endif
|
sl@0
|
1695 |
if(pde==iAliasPde && iAliasLinAddr)
|
sl@0
|
1696 |
{
|
sl@0
|
1697 |
// pde already aliased, so just update linear address...
|
sl@0
|
1698 |
#ifdef __SMP__
|
sl@0
|
1699 |
__NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
|
sl@0
|
1700 |
aliasAddr = iAliasLinAddr & ~KChunkMask;
|
sl@0
|
1701 |
aliasAddr |= (aAddr & (KChunkMask & ~KPageMask));
|
sl@0
|
1702 |
#endif
|
sl@0
|
1703 |
iAliasLinAddr = aliasAddr;
|
sl@0
|
1704 |
}
|
sl@0
|
1705 |
else
|
sl@0
|
1706 |
{
|
sl@0
|
1707 |
// alias PDE changed...
|
sl@0
|
1708 |
if(!iAliasLinAddr)
|
sl@0
|
1709 |
{
|
sl@0
|
1710 |
::TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased
|
sl@0
|
1711 |
#ifdef __SMP__
|
sl@0
|
1712 |
__NK_ASSERT_DEBUG(iCpuRestoreCookie==-1);
|
sl@0
|
1713 |
iCpuRestoreCookie = NKern::FreezeCpu(); // temporarily lock current thread to this processor
|
sl@0
|
1714 |
#endif
|
sl@0
|
1715 |
}
|
sl@0
|
1716 |
iAliasPde = pde;
|
sl@0
|
1717 |
iAliasOsAsid = asid;
|
sl@0
|
1718 |
#ifdef __SMP__
|
sl@0
|
1719 |
TSubScheduler& ss = SubScheduler(); // OK since we are locked to this CPU
|
sl@0
|
1720 |
aliasAddr = TLinAddr(ss.i_AliasLinAddr) + (aAddr & (KChunkMask & ~KPageMask));
|
sl@0
|
1721 |
iAliasPdePtr = (TPde*)(TLinAddr(ss.i_AliasPdePtr) + (((DMemModelProcess*)iOwningProcess)->iOsAsid << KPageTableShift));
|
sl@0
|
1722 |
#endif
|
sl@0
|
1723 |
iAliasLinAddr = aliasAddr;
|
sl@0
|
1724 |
}
|
sl@0
|
1725 |
__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, iAliasPdePtr));
|
sl@0
|
1726 |
*iAliasPdePtr = pde;
|
sl@0
|
1727 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1728 |
DoInvalidateTLBForPage(aliasAddr); // only need to do this processor
|
sl@0
|
1729 |
TInt offset = aAddr&KPageMask;
|
sl@0
|
1730 |
aAliasAddr = aliasAddr | offset;
|
sl@0
|
1731 |
TInt maxSize = KPageSize - offset;
|
sl@0
|
1732 |
aAliasSize = aSize<maxSize ? aSize : maxSize;
|
sl@0
|
1733 |
return okForSupervisorAccess;
|
sl@0
|
1734 |
}
|
sl@0
|
1735 |
|
sl@0
|
1736 |
void DMemModelThread::RemoveAlias()
|
sl@0
|
1737 |
//
|
sl@0
|
1738 |
// Remove alias mapping (if present)
|
sl@0
|
1739 |
// Enter and return with system locked.
|
sl@0
|
1740 |
//
|
sl@0
|
1741 |
{
|
sl@0
|
1742 |
__KTRACE_OPT(KMMU2,Kern::Printf("Thread %O RemoveAlias", this));
|
sl@0
|
1743 |
__ASSERT_SYSTEM_LOCK;
|
sl@0
|
1744 |
TLinAddr addr = iAliasLinAddr;
|
sl@0
|
1745 |
if(addr)
|
sl@0
|
1746 |
{
|
sl@0
|
1747 |
iAliasLinAddr = 0;
|
sl@0
|
1748 |
iAliasPde = 0;
|
sl@0
|
1749 |
__KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x", iAliasPdePtr));
|
sl@0
|
1750 |
*iAliasPdePtr = 0;
|
sl@0
|
1751 |
__DRAIN_WRITE_BUFFER;
|
sl@0
|
1752 |
DoInvalidateTLBForPage(addr); // only need to do it for this processor
|
sl@0
|
1753 |
iAliasLink.Deque();
|
sl@0
|
1754 |
#ifdef __SMP__
|
sl@0
|
1755 |
__NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
|
sl@0
|
1756 |
NKern::EndFreezeCpu(iCpuRestoreCookie);
|
sl@0
|
1757 |
iCpuRestoreCookie = -1;
|
sl@0
|
1758 |
#endif
|
sl@0
|
1759 |
}
|
sl@0
|
1760 |
}
|
sl@0
|
1761 |
|
sl@0
|
1762 |
void X86Mmu::CacheMaintenanceOnDecommit(TPhysAddr)
|
sl@0
|
1763 |
{
|
sl@0
|
1764 |
// no cache operations required on freeing memory
|
sl@0
|
1765 |
}
|
sl@0
|
1766 |
|
sl@0
|
1767 |
void X86Mmu::CacheMaintenanceOnDecommit(const TPhysAddr*, TInt)
|
sl@0
|
1768 |
{
|
sl@0
|
1769 |
// no cache operations required on freeing memory
|
sl@0
|
1770 |
}
|
sl@0
|
1771 |
|
sl@0
|
1772 |
void X86Mmu::CacheMaintenanceOnPreserve(TPhysAddr, TUint)
|
sl@0
|
1773 |
{
|
sl@0
|
1774 |
// no cache operations required on freeing memory
|
sl@0
|
1775 |
}
|
sl@0
|
1776 |
|
sl@0
|
1777 |
void X86Mmu::CacheMaintenanceOnPreserve(const TPhysAddr*, TInt, TUint)
|
sl@0
|
1778 |
{
|
sl@0
|
1779 |
// no cache operations required on freeing memory
|
sl@0
|
1780 |
}
|
sl@0
|
1781 |
|
sl@0
|
1782 |
void X86Mmu::CacheMaintenanceOnPreserve(TPhysAddr , TInt , TLinAddr , TUint )
|
sl@0
|
1783 |
{
|
sl@0
|
1784 |
// no cache operations required on freeing memory
|
sl@0
|
1785 |
}
|
sl@0
|
1786 |
|
sl@0
|
1787 |
|
sl@0
|
1788 |
TInt X86Mmu::UnlockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess)
|
sl@0
|
1789 |
{
|
sl@0
|
1790 |
TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid;
|
sl@0
|
1791 |
TInt page = aLinAddr>>KPageShift;
|
sl@0
|
1792 |
NKern::LockSystem();
|
sl@0
|
1793 |
for(;;)
|
sl@0
|
1794 |
{
|
sl@0
|
1795 |
TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift));
|
sl@0
|
1796 |
TPte* pt = SafePageTableFromPde(*pd++);
|
sl@0
|
1797 |
__NK_ASSERT_DEBUG(pt);
|
sl@0
|
1798 |
TInt pteIndex = page&(KChunkMask>>KPageShift);
|
sl@0
|
1799 |
pt += pteIndex;
|
sl@0
|
1800 |
do
|
sl@0
|
1801 |
{
|
sl@0
|
1802 |
TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
|
sl@0
|
1803 |
if(pagesInPt>aNumPages)
|
sl@0
|
1804 |
pagesInPt = aNumPages;
|
sl@0
|
1805 |
if(pagesInPt>KMaxPages)
|
sl@0
|
1806 |
pagesInPt = KMaxPages;
|
sl@0
|
1807 |
|
sl@0
|
1808 |
aNumPages -= pagesInPt;
|
sl@0
|
1809 |
page += pagesInPt;
|
sl@0
|
1810 |
|
sl@0
|
1811 |
do
|
sl@0
|
1812 |
{
|
sl@0
|
1813 |
TPte pte = *pt++;
|
sl@0
|
1814 |
if(pte) // pte may be null if page has already been unlocked and reclaimed by system
|
sl@0
|
1815 |
iRamCache->DonateRamCachePage(SPageInfo::FromPhysAddr(pte));
|
sl@0
|
1816 |
}
|
sl@0
|
1817 |
while(--pagesInPt);
|
sl@0
|
1818 |
|
sl@0
|
1819 |
if(!aNumPages)
|
sl@0
|
1820 |
{
|
sl@0
|
1821 |
NKern::UnlockSystem();
|
sl@0
|
1822 |
return KErrNone;
|
sl@0
|
1823 |
}
|
sl@0
|
1824 |
|
sl@0
|
1825 |
pteIndex = page&(KChunkMask>>KPageShift);
|
sl@0
|
1826 |
}
|
sl@0
|
1827 |
while(!NKern::FlashSystem() && pteIndex);
|
sl@0
|
1828 |
}
|
sl@0
|
1829 |
}
|
sl@0
|
1830 |
|
sl@0
|
1831 |
|
sl@0
|
1832 |
TInt X86Mmu::LockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess)
|
sl@0
|
1833 |
{
|
sl@0
|
1834 |
TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid;
|
sl@0
|
1835 |
TInt page = aLinAddr>>KPageShift;
|
sl@0
|
1836 |
NKern::LockSystem();
|
sl@0
|
1837 |
for(;;)
|
sl@0
|
1838 |
{
|
sl@0
|
1839 |
TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift));
|
sl@0
|
1840 |
TPte* pt = SafePageTableFromPde(*pd++);
|
sl@0
|
1841 |
__NK_ASSERT_DEBUG(pt);
|
sl@0
|
1842 |
TInt pteIndex = page&(KChunkMask>>KPageShift);
|
sl@0
|
1843 |
pt += pteIndex;
|
sl@0
|
1844 |
do
|
sl@0
|
1845 |
{
|
sl@0
|
1846 |
TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
|
sl@0
|
1847 |
if(pagesInPt>aNumPages)
|
sl@0
|
1848 |
pagesInPt = aNumPages;
|
sl@0
|
1849 |
if(pagesInPt>KMaxPages)
|
sl@0
|
1850 |
pagesInPt = KMaxPages;
|
sl@0
|
1851 |
|
sl@0
|
1852 |
aNumPages -= pagesInPt;
|
sl@0
|
1853 |
page += pagesInPt;
|
sl@0
|
1854 |
|
sl@0
|
1855 |
do
|
sl@0
|
1856 |
{
|
sl@0
|
1857 |
TPte pte = *pt++;
|
sl@0
|
1858 |
if(pte==0)
|
sl@0
|
1859 |
goto not_found;
|
sl@0
|
1860 |
if(!iRamCache->ReclaimRamCachePage(SPageInfo::FromPhysAddr(pte)))
|
sl@0
|
1861 |
goto not_found;
|
sl@0
|
1862 |
}
|
sl@0
|
1863 |
while(--pagesInPt);
|
sl@0
|
1864 |
|
sl@0
|
1865 |
if(!aNumPages)
|
sl@0
|
1866 |
{
|
sl@0
|
1867 |
NKern::UnlockSystem();
|
sl@0
|
1868 |
return KErrNone;
|
sl@0
|
1869 |
}
|
sl@0
|
1870 |
|
sl@0
|
1871 |
pteIndex = page&(KChunkMask>>KPageShift);
|
sl@0
|
1872 |
}
|
sl@0
|
1873 |
while(!NKern::FlashSystem() && pteIndex);
|
sl@0
|
1874 |
}
|
sl@0
|
1875 |
not_found:
|
sl@0
|
1876 |
NKern::UnlockSystem();
|
sl@0
|
1877 |
return KErrNotFound;
|
sl@0
|
1878 |
}
|
sl@0
|
1879 |
|
sl@0
|
1880 |
|
sl@0
|
1881 |
void RamCache::SetFree(SPageInfo* aPageInfo)
|
sl@0
|
1882 |
{
|
sl@0
|
1883 |
// Make a page free
|
sl@0
|
1884 |
TInt type = aPageInfo->Type();
|
sl@0
|
1885 |
if(type==SPageInfo::EPagedCache)
|
sl@0
|
1886 |
{
|
sl@0
|
1887 |
TInt offset = aPageInfo->Offset()<<KPageShift;
|
sl@0
|
1888 |
DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
|
sl@0
|
1889 |
__NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iSize));
|
sl@0
|
1890 |
TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
|
sl@0
|
1891 |
TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid;
|
sl@0
|
1892 |
TPte* pt = PtePtrFromLinAddr(lin,asid);
|
sl@0
|
1893 |
*pt = 0;
|
sl@0
|
1894 |
InvalidateTLBForPage(lin);
|
sl@0
|
1895 |
|
sl@0
|
1896 |
// actually decommit it from chunk...
|
sl@0
|
1897 |
TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
|
sl@0
|
1898 |
SPageTableInfo& ptinfo=((X86Mmu*)iMmu)->iPtInfo[ptid];
|
sl@0
|
1899 |
if(!--ptinfo.iCount)
|
sl@0
|
1900 |
{
|
sl@0
|
1901 |
chunk->iPageTables[offset>>KChunkShift] = 0xffff;
|
sl@0
|
1902 |
NKern::UnlockSystem();
|
sl@0
|
1903 |
((X86Mmu*)iMmu)->DoUnassignPageTable(lin, (TAny*)asid);
|
sl@0
|
1904 |
((X86Mmu*)iMmu)->FreePageTable(ptid);
|
sl@0
|
1905 |
NKern::LockSystem();
|
sl@0
|
1906 |
}
|
sl@0
|
1907 |
}
|
sl@0
|
1908 |
else
|
sl@0
|
1909 |
{
|
sl@0
|
1910 |
__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
|
sl@0
|
1911 |
Panic(EUnexpectedPageType);
|
sl@0
|
1912 |
}
|
sl@0
|
1913 |
}
|
sl@0
|
1914 |
|
sl@0
|
1915 |
// Not supported on x86 - no defrag yet
|
sl@0
|
1916 |
void X86Mmu::DisablePageModification(DMemModelChunk* aChunk, TInt aOffset)
|
sl@0
|
1917 |
{
|
sl@0
|
1918 |
MM::Panic(MM::EOperationNotSupported);
|
sl@0
|
1919 |
}
|
sl@0
|
1920 |
|
sl@0
|
1921 |
TInt X86Mmu::RamDefragFault(TAny* aExceptionInfo)
|
sl@0
|
1922 |
{
|
sl@0
|
1923 |
MM::Panic(MM::EOperationNotSupported);
|
sl@0
|
1924 |
return KErrAbort;
|
sl@0
|
1925 |
}
|