sl@0
|
1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
// e32\memmodel\epoc\multiple\mprocess.cpp
|
sl@0
|
15 |
//
|
sl@0
|
16 |
//
|
sl@0
|
17 |
|
sl@0
|
18 |
#include "memmodel.h"
|
sl@0
|
19 |
#include "mmboot.h"
|
sl@0
|
20 |
#include "cache_maintenance.h"
|
sl@0
|
21 |
#include <demand_paging.h>
|
sl@0
|
22 |
|
sl@0
|
23 |
#define iMState iWaitLink.iSpare1
|
sl@0
|
24 |
|
sl@0
|
25 |
// just for convenience...
|
sl@0
|
26 |
#define KAmSelfMod (DMemModelChunk::ECode | DMemModelChunk::EAddressLocal)
|
sl@0
|
27 |
|
sl@0
|
28 |
_LIT(KDollarDat,"$DAT");
|
sl@0
|
29 |
_LIT(KLitDollarCode,"$CODE");
|
sl@0
|
30 |
_LIT(KLitDllDollarData,"DLL$DATA");
|
sl@0
|
31 |
|
sl@0
|
32 |
#ifdef __CPU_HAS_BTB
|
sl@0
|
33 |
extern void __FlushBtb();
|
sl@0
|
34 |
#endif
|
sl@0
|
35 |
|
sl@0
|
36 |
const TInt KChunkGranularity=4;
|
sl@0
|
37 |
|
sl@0
|
38 |
/********************************************
|
sl@0
|
39 |
* Process
|
sl@0
|
40 |
********************************************/
|
sl@0
|
41 |
void DMemModelProcess::Destruct()
|
sl@0
|
42 |
{
|
sl@0
|
43 |
__ASSERT_ALWAYS(!iChunkCount && !iCodeChunk && !iDllDataChunk, MM::Panic(MM::EProcessDestructChunksRemaining));
|
sl@0
|
44 |
Kern::Free(iChunks);
|
sl@0
|
45 |
Kern::Free(iLocalSection);
|
sl@0
|
46 |
if (iOsAsid)
|
sl@0
|
47 |
{
|
sl@0
|
48 |
Mmu& m=Mmu::Get();
|
sl@0
|
49 |
MmuBase::Wait();
|
sl@0
|
50 |
m.FreeOsAsid(iOsAsid);
|
sl@0
|
51 |
iOsAsid=0;
|
sl@0
|
52 |
MmuBase::Signal();
|
sl@0
|
53 |
#ifndef __SMP__
|
sl@0
|
54 |
LastUserSelfMod=0; // must force a BTB flush when next selfmod chunk switched in
|
sl@0
|
55 |
#endif
|
sl@0
|
56 |
}
|
sl@0
|
57 |
#ifdef __CPU_HAS_BTB
|
sl@0
|
58 |
__FlushBtb();
|
sl@0
|
59 |
#endif
|
sl@0
|
60 |
DProcess::Destruct();
|
sl@0
|
61 |
}
|
sl@0
|
62 |
|
sl@0
|
63 |
TInt DMemModelProcess::NewChunk(DChunk*& aChunk, SChunkCreateInfo& aInfo, TLinAddr& aRunAddr)
|
sl@0
|
64 |
{
|
sl@0
|
65 |
aChunk=NULL;
|
sl@0
|
66 |
DMemModelChunk* pC=NULL;
|
sl@0
|
67 |
TInt r=GetNewChunk(pC,aInfo);
|
sl@0
|
68 |
if (r!=KErrNone)
|
sl@0
|
69 |
{
|
sl@0
|
70 |
if (pC)
|
sl@0
|
71 |
pC->Close(NULL);
|
sl@0
|
72 |
return r;
|
sl@0
|
73 |
}
|
sl@0
|
74 |
TInt mapType=pC->iAttributes & DMemModelChunk::EMapTypeMask;
|
sl@0
|
75 |
pC->iOwningProcess=(mapType==DMemModelChunk::EMapTypeLocal)?this:NULL;
|
sl@0
|
76 |
#ifdef __CPU_HAS_BTB
|
sl@0
|
77 |
if ((pC->iAttributes & KAmSelfMod) == KAmSelfMod) // it's a potentially overlapping self-mod
|
sl@0
|
78 |
{
|
sl@0
|
79 |
iSelfModChunks++;
|
sl@0
|
80 |
#ifndef __SMP__
|
sl@0
|
81 |
LastUserSelfMod = this; // we become the last selfmodding process
|
sl@0
|
82 |
#endif
|
sl@0
|
83 |
__FlushBtb(); // we need to do this, as there may be bad branches already in the btb
|
sl@0
|
84 |
}
|
sl@0
|
85 |
#endif
|
sl@0
|
86 |
r=pC->Create(aInfo);
|
sl@0
|
87 |
if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdjust))
|
sl@0
|
88 |
{
|
sl@0
|
89 |
if (aInfo.iRunAddress!=0)
|
sl@0
|
90 |
pC->SetFixedAddress(aInfo.iRunAddress,aInfo.iPreallocated);
|
sl@0
|
91 |
if (aInfo.iPreallocated==0 && aInfo.iInitialTop!=0)
|
sl@0
|
92 |
{
|
sl@0
|
93 |
if (pC->iAttributes & DChunk::EDisconnected)
|
sl@0
|
94 |
{
|
sl@0
|
95 |
r=pC->Commit(aInfo.iInitialBottom,aInfo.iInitialTop-aInfo.iInitialBottom);
|
sl@0
|
96 |
}
|
sl@0
|
97 |
else if (pC->iAttributes & DChunk::EDoubleEnded)
|
sl@0
|
98 |
{
|
sl@0
|
99 |
r=pC->AdjustDoubleEnded(aInfo.iInitialBottom,aInfo.iInitialTop);
|
sl@0
|
100 |
}
|
sl@0
|
101 |
else
|
sl@0
|
102 |
{
|
sl@0
|
103 |
r=pC->Adjust(aInfo.iInitialTop);
|
sl@0
|
104 |
}
|
sl@0
|
105 |
}
|
sl@0
|
106 |
}
|
sl@0
|
107 |
if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdd))
|
sl@0
|
108 |
{
|
sl@0
|
109 |
// if (pC->iAttributes & DMemModelChunk::ECode)
|
sl@0
|
110 |
// MM::TheMmu->SyncCodeMappings();
|
sl@0
|
111 |
if (mapType!=DMemModelChunk::EMapTypeGlobal)
|
sl@0
|
112 |
{
|
sl@0
|
113 |
r=WaitProcessLock();
|
sl@0
|
114 |
if (r==KErrNone)
|
sl@0
|
115 |
{
|
sl@0
|
116 |
r=AddChunk(pC,aRunAddr,EFalse);
|
sl@0
|
117 |
SignalProcessLock();
|
sl@0
|
118 |
}
|
sl@0
|
119 |
}
|
sl@0
|
120 |
else
|
sl@0
|
121 |
aRunAddr=(TLinAddr)pC->Base();
|
sl@0
|
122 |
}
|
sl@0
|
123 |
if (r==KErrNone)
|
sl@0
|
124 |
{
|
sl@0
|
125 |
if(r==KErrNone)
|
sl@0
|
126 |
if(pC->iKernelMirror)
|
sl@0
|
127 |
aRunAddr = (TLinAddr)pC->iKernelMirror->Base();
|
sl@0
|
128 |
pC->iDestroyedDfc = aInfo.iDestroyedDfc;
|
sl@0
|
129 |
aChunk=(DChunk*)pC;
|
sl@0
|
130 |
}
|
sl@0
|
131 |
else
|
sl@0
|
132 |
pC->Close(NULL); // NULL since chunk can't have been added to process
|
sl@0
|
133 |
return r;
|
sl@0
|
134 |
}
|
sl@0
|
135 |
|
sl@0
|
136 |
TInt DMemModelProcess::DoCreate(TBool aKernelProcess, TProcessCreateInfo& aInfo)
|
sl@0
|
137 |
{
|
sl@0
|
138 |
__KTRACE_OPT(KPROC,Kern::Printf(">DMemModelProcess::DoCreate %O",this));
|
sl@0
|
139 |
|
sl@0
|
140 |
Mmu& m=Mmu::Get();
|
sl@0
|
141 |
TInt r=KErrNone;
|
sl@0
|
142 |
|
sl@0
|
143 |
iSelfModChunks=0; // we don't have any yet.
|
sl@0
|
144 |
|
sl@0
|
145 |
if (aKernelProcess)
|
sl@0
|
146 |
{
|
sl@0
|
147 |
iAttributes |= ESupervisor;
|
sl@0
|
148 |
//iOsAsid=0;
|
sl@0
|
149 |
// Leave these till Mmu::Init2
|
sl@0
|
150 |
// if (m.iLocalPdSize)
|
sl@0
|
151 |
// iLocalPageDir=m.LinearToPhysical(TLinAddr(m.LocalPageDir(0)));
|
sl@0
|
152 |
// iGlobalPageDir=m.LinearToPhysical(TLinAddr(m.GlobalPageDir(0)));
|
sl@0
|
153 |
m.iAsidInfo[0]=((TUint32)this)|1;
|
sl@0
|
154 |
iAddressCheckMaskR=0xffffffff;
|
sl@0
|
155 |
iAddressCheckMaskW=0xffffffff;
|
sl@0
|
156 |
}
|
sl@0
|
157 |
else
|
sl@0
|
158 |
{
|
sl@0
|
159 |
MmuBase::Wait();
|
sl@0
|
160 |
r=m.NewOsAsid(EFalse);
|
sl@0
|
161 |
if (r>=0)
|
sl@0
|
162 |
{
|
sl@0
|
163 |
iOsAsid=r;
|
sl@0
|
164 |
if (m.iLocalPdSize)
|
sl@0
|
165 |
iLocalPageDir=m.LinearToPhysical(TLinAddr(m.LocalPageDir(r)));
|
sl@0
|
166 |
else
|
sl@0
|
167 |
iGlobalPageDir=m.LinearToPhysical(TLinAddr(m.GlobalPageDir(r)));
|
sl@0
|
168 |
m.iAsidInfo[r] |= (TUint32)this;
|
sl@0
|
169 |
r=KErrNone;
|
sl@0
|
170 |
}
|
sl@0
|
171 |
MmuBase::Signal();
|
sl@0
|
172 |
if (r==KErrNone && 0==(iLocalSection=TLinearSection::New(m.iUserLocalBase, m.iUserLocalEnd)) )
|
sl@0
|
173 |
r=KErrNoMemory;
|
sl@0
|
174 |
}
|
sl@0
|
175 |
|
sl@0
|
176 |
__KTRACE_OPT(KPROC,Kern::Printf("OS ASID=%d, LPD=%08x, GPD=%08x, ASID info=%08x",iOsAsid,iLocalPageDir,
|
sl@0
|
177 |
iGlobalPageDir,m.iAsidInfo[iOsAsid]));
|
sl@0
|
178 |
__KTRACE_OPT(KPROC,Kern::Printf("<DMemModelProcess::DoCreate %d",r));
|
sl@0
|
179 |
return r;
|
sl@0
|
180 |
}
|
sl@0
|
181 |
|
sl@0
|
182 |
TInt DMemModelProcess::CreateDataBssStackArea(TProcessCreateInfo& aInfo)
|
sl@0
|
183 |
{
|
sl@0
|
184 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::CreateDataBssStackArea %O",this));
|
sl@0
|
185 |
Mmu& m=Mmu::Get();
|
sl@0
|
186 |
TInt dataBssSize=Mmu::RoundToPageSize(aInfo.iTotalDataSize);
|
sl@0
|
187 |
TInt maxSize=dataBssSize+PP::MaxStackSpacePerProcess;
|
sl@0
|
188 |
TLinAddr dataRunAddress=m.iUserLocalBase;
|
sl@0
|
189 |
iDataBssRunAddress=dataRunAddress;
|
sl@0
|
190 |
|
sl@0
|
191 |
__KTRACE_OPT(KPROC,Kern::Printf("DataBssSize=%x, chunk max size %x",dataBssSize,maxSize));
|
sl@0
|
192 |
|
sl@0
|
193 |
SChunkCreateInfo cinfo;
|
sl@0
|
194 |
cinfo.iGlobal=EFalse;
|
sl@0
|
195 |
cinfo.iAtt=TChunkCreate::EDisconnected;
|
sl@0
|
196 |
cinfo.iForceFixed=EFalse;
|
sl@0
|
197 |
cinfo.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd;
|
sl@0
|
198 |
cinfo.iType=EUserData;
|
sl@0
|
199 |
cinfo.iMaxSize=maxSize;
|
sl@0
|
200 |
cinfo.iInitialBottom=0;
|
sl@0
|
201 |
cinfo.iInitialTop=dataBssSize;
|
sl@0
|
202 |
cinfo.iPreallocated=0;
|
sl@0
|
203 |
cinfo.iName.Set(KDollarDat);
|
sl@0
|
204 |
cinfo.iOwner=this;
|
sl@0
|
205 |
cinfo.iRunAddress=0;
|
sl@0
|
206 |
TLinAddr cb;
|
sl@0
|
207 |
TInt r=NewChunk((DChunk*&)iDataBssStackChunk,cinfo,cb);
|
sl@0
|
208 |
return r;
|
sl@0
|
209 |
}
|
sl@0
|
210 |
|
sl@0
|
211 |
TInt DMemModelProcess::AddChunk(DChunk* aChunk, TBool isReadOnly)
|
sl@0
|
212 |
{
|
sl@0
|
213 |
DMemModelChunk* pC=(DMemModelChunk*)aChunk;
|
sl@0
|
214 |
if ((pC->iAttributes & DMemModelChunk::EPrivate) && this!=pC->iOwningProcess)
|
sl@0
|
215 |
return KErrAccessDenied;
|
sl@0
|
216 |
TInt r=WaitProcessLock();
|
sl@0
|
217 |
if (r==KErrNone)
|
sl@0
|
218 |
{
|
sl@0
|
219 |
TInt pos=0;
|
sl@0
|
220 |
r=ChunkIndex(pC,pos);
|
sl@0
|
221 |
TLinAddr dataSectionBase=0;
|
sl@0
|
222 |
if (r==0) // Found the chunk in this process, just up its count
|
sl@0
|
223 |
{
|
sl@0
|
224 |
iChunks[pos].iAccessCount++;
|
sl@0
|
225 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (Access count incremented to %d)",aChunk,this,iChunks[pos].iAccessCount));
|
sl@0
|
226 |
SignalProcessLock();
|
sl@0
|
227 |
return KErrNone;
|
sl@0
|
228 |
}
|
sl@0
|
229 |
r=AddChunk(pC,dataSectionBase,isReadOnly);
|
sl@0
|
230 |
SignalProcessLock();
|
sl@0
|
231 |
}
|
sl@0
|
232 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk returns %d",r));
|
sl@0
|
233 |
return r;
|
sl@0
|
234 |
}
|
sl@0
|
235 |
|
sl@0
|
236 |
void M::FsRegisterThread()
|
sl@0
|
237 |
{
|
sl@0
|
238 |
DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk;
|
sl@0
|
239 |
TInt mapType=pC->iAttributes & DMemModelChunk::EMapTypeMask;
|
sl@0
|
240 |
if (mapType!=DMemModelChunk::EMapTypeLocal)
|
sl@0
|
241 |
{
|
sl@0
|
242 |
DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
|
sl@0
|
243 |
TLinAddr dataSectionBase;
|
sl@0
|
244 |
TInt r=pP->WaitProcessLock();
|
sl@0
|
245 |
if (r==KErrNone)
|
sl@0
|
246 |
r=pP->AddChunk(pC,dataSectionBase,EFalse);
|
sl@0
|
247 |
__ASSERT_ALWAYS(r==KErrNone, MM::Panic(MM::EFsRegisterThread));
|
sl@0
|
248 |
pP->SignalProcessLock();
|
sl@0
|
249 |
}
|
sl@0
|
250 |
}
|
sl@0
|
251 |
|
sl@0
|
252 |
TInt DMemModelProcess::AddChunk(DMemModelChunk* aChunk, TLinAddr& aDataSectionBase, TBool isReadOnly)
|
sl@0
|
253 |
{
|
sl@0
|
254 |
//
|
sl@0
|
255 |
// Must hold the process $LOCK mutex before calling this
|
sl@0
|
256 |
//
|
sl@0
|
257 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %O to %O",aChunk,this));
|
sl@0
|
258 |
SChunkInfo *pC=iChunks;
|
sl@0
|
259 |
SChunkInfo *pE=pC+iChunkCount-1;
|
sl@0
|
260 |
TLinAddr base=TLinAddr(aChunk->iBase);
|
sl@0
|
261 |
TInt i=0;
|
sl@0
|
262 |
|
sl@0
|
263 |
#ifdef __CPU_HAS_BTB
|
sl@0
|
264 |
if ((aChunk->iAttributes & KAmSelfMod)==KAmSelfMod) // it's a potentially overlapping self-mod
|
sl@0
|
265 |
{
|
sl@0
|
266 |
iSelfModChunks++;
|
sl@0
|
267 |
#ifndef __SMP__
|
sl@0
|
268 |
LastUserSelfMod = this; // we become the last selfmodding process
|
sl@0
|
269 |
#endif
|
sl@0
|
270 |
__FlushBtb(); // we need to do this, as there may be bad branches already in the btb
|
sl@0
|
271 |
}
|
sl@0
|
272 |
#endif
|
sl@0
|
273 |
if (iChunkCount)
|
sl@0
|
274 |
{
|
sl@0
|
275 |
for (; pE>=pC && TLinAddr(pE->iChunk->iBase)>base; --pE);
|
sl@0
|
276 |
if (pE>=pC && TLinAddr(pE->iChunk->iBase)+pE->iChunk->iMaxSize>base)
|
sl@0
|
277 |
return KErrInUse;
|
sl@0
|
278 |
pC=pE+1;
|
sl@0
|
279 |
if (pC<iChunks+iChunkCount && base+aChunk->iMaxSize>TLinAddr(pC->iChunk->iBase))
|
sl@0
|
280 |
return KErrInUse;
|
sl@0
|
281 |
i=pC-iChunks;
|
sl@0
|
282 |
}
|
sl@0
|
283 |
if (iChunkCount==iChunkAlloc)
|
sl@0
|
284 |
{
|
sl@0
|
285 |
TInt newAlloc=iChunkAlloc+KChunkGranularity;
|
sl@0
|
286 |
TInt r=Kern::SafeReAlloc((TAny*&)iChunks,iChunkAlloc*sizeof(SChunkInfo),newAlloc*sizeof(SChunkInfo));
|
sl@0
|
287 |
if (r!=KErrNone)
|
sl@0
|
288 |
return r;
|
sl@0
|
289 |
pC=iChunks+i;
|
sl@0
|
290 |
iChunkAlloc=newAlloc;
|
sl@0
|
291 |
}
|
sl@0
|
292 |
memmove(pC+1,pC,(iChunkCount-i)*sizeof(SChunkInfo));
|
sl@0
|
293 |
++iChunkCount;
|
sl@0
|
294 |
pC->isReadOnly=isReadOnly;
|
sl@0
|
295 |
pC->iAccessCount=1;
|
sl@0
|
296 |
pC->iChunk=aChunk;
|
sl@0
|
297 |
aDataSectionBase=base;
|
sl@0
|
298 |
Mmu& m=Mmu::Get();
|
sl@0
|
299 |
if (aChunk->iOsAsids)
|
sl@0
|
300 |
{
|
sl@0
|
301 |
// only need to do address space manipulation for shared chunks
|
sl@0
|
302 |
MmuBase::Wait();
|
sl@0
|
303 |
aChunk->iOsAsids->Alloc(iOsAsid,1);
|
sl@0
|
304 |
TLinAddr a;
|
sl@0
|
305 |
TInt i=0;
|
sl@0
|
306 |
for (a=TLinAddr(aChunk->iBase); a<TLinAddr(aChunk->iBase)+aChunk->iMaxSize; a+=m.iChunkSize, ++i)
|
sl@0
|
307 |
{
|
sl@0
|
308 |
TInt ptid=aChunk->iPageTables[i];
|
sl@0
|
309 |
if (ptid!=0xffff)
|
sl@0
|
310 |
m.DoAssignPageTable(ptid,a,aChunk->iPdePermissions,(const TAny*)iOsAsid);
|
sl@0
|
311 |
}
|
sl@0
|
312 |
MmuBase::Signal();
|
sl@0
|
313 |
}
|
sl@0
|
314 |
if (aChunk->iChunkType==ERamDrive)
|
sl@0
|
315 |
{
|
sl@0
|
316 |
NKern::LockSystem();
|
sl@0
|
317 |
iAddressCheckMaskR |= m.iRamDriveMask;
|
sl@0
|
318 |
iAddressCheckMaskW |= m.iRamDriveMask;
|
sl@0
|
319 |
NKern::UnlockSystem();
|
sl@0
|
320 |
}
|
sl@0
|
321 |
__DEBUG_EVENT(EEventUpdateProcess, this);
|
sl@0
|
322 |
return KErrNone;
|
sl@0
|
323 |
}
|
sl@0
|
324 |
|
sl@0
|
325 |
void DMemModelProcess::DoRemoveChunk(TInt aIndex)
|
sl@0
|
326 |
{
|
sl@0
|
327 |
__DEBUG_EVENT(EEventUpdateProcess, this);
|
sl@0
|
328 |
DMemModelChunk* chunk = iChunks[aIndex].iChunk;
|
sl@0
|
329 |
memmove(iChunks+aIndex, iChunks+aIndex+1, (iChunkCount-aIndex-1)*sizeof(SChunkInfo));
|
sl@0
|
330 |
--iChunkCount;
|
sl@0
|
331 |
Mmu& m=Mmu::Get();
|
sl@0
|
332 |
if (chunk->iOsAsids)
|
sl@0
|
333 |
{
|
sl@0
|
334 |
// only need to do address space manipulation for shared chunks
|
sl@0
|
335 |
MmuBase::Wait();
|
sl@0
|
336 |
chunk->iOsAsids->Free(iOsAsid);
|
sl@0
|
337 |
TLinAddr a;
|
sl@0
|
338 |
for (a=TLinAddr(chunk->iBase); a<TLinAddr(chunk->iBase)+chunk->iMaxSize; a+=m.iChunkSize)
|
sl@0
|
339 |
m.DoUnassignPageTable(a,(const TAny*)iOsAsid);
|
sl@0
|
340 |
TUint32 mask=(chunk->iAttributes&DMemModelChunk::ECode)?Mmu::EFlushITLB:0;
|
sl@0
|
341 |
m.GenericFlush(mask|Mmu::EFlushDTLB);
|
sl@0
|
342 |
|
sl@0
|
343 |
MmuBase::Signal();
|
sl@0
|
344 |
}
|
sl@0
|
345 |
if (chunk->iChunkType==ERamDrive)
|
sl@0
|
346 |
{
|
sl@0
|
347 |
NKern::LockSystem();
|
sl@0
|
348 |
iAddressCheckMaskR &= ~m.iRamDriveMask;
|
sl@0
|
349 |
iAddressCheckMaskW &= ~m.iRamDriveMask;
|
sl@0
|
350 |
NKern::UnlockSystem();
|
sl@0
|
351 |
}
|
sl@0
|
352 |
}
|
sl@0
|
353 |
|
sl@0
|
354 |
/**
|
sl@0
|
355 |
Final chance for process to release resources during its death.
|
sl@0
|
356 |
|
sl@0
|
357 |
Called with process $LOCK mutex held (if it exists).
|
sl@0
|
358 |
This mutex will not be released before it is deleted.
|
sl@0
|
359 |
I.e. no other thread will ever hold the mutex again.
|
sl@0
|
360 |
*/
|
sl@0
|
361 |
void DMemModelProcess::FinalRelease()
|
sl@0
|
362 |
{
|
sl@0
|
363 |
// Clean up any left over chunks (such as SharedIo buffers)
|
sl@0
|
364 |
if(iProcessLock)
|
sl@0
|
365 |
while(iChunkCount)
|
sl@0
|
366 |
DoRemoveChunk(0);
|
sl@0
|
367 |
}
|
sl@0
|
368 |
|
sl@0
|
369 |
void DMemModelProcess::RemoveChunk(DMemModelChunk *aChunk)
|
sl@0
|
370 |
{
|
sl@0
|
371 |
// note that this can't be called after the process $LOCK mutex has been deleted
|
sl@0
|
372 |
// since it can only be called by a thread in this process doing a handle close or
|
sl@0
|
373 |
// dying, or by the process handles array being deleted due to the process dying,
|
sl@0
|
374 |
// all of which happen before $LOCK is deleted.
|
sl@0
|
375 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O RemoveChunk %O",this,aChunk));
|
sl@0
|
376 |
Kern::MutexWait(*iProcessLock);
|
sl@0
|
377 |
TInt pos=0;
|
sl@0
|
378 |
TInt r=ChunkIndex(aChunk,pos);
|
sl@0
|
379 |
|
sl@0
|
380 |
if (r==KErrNone) // Found the chunk
|
sl@0
|
381 |
{
|
sl@0
|
382 |
__KTRACE_OPT(KPROC,Kern::Printf("Chunk access count %d",iChunks[pos].iAccessCount));
|
sl@0
|
383 |
if (--iChunks[pos].iAccessCount==0)
|
sl@0
|
384 |
{
|
sl@0
|
385 |
DoRemoveChunk(pos);
|
sl@0
|
386 |
#ifdef __CPU_HAS_BTB
|
sl@0
|
387 |
if ((aChunk->iAttributes & KAmSelfMod)==KAmSelfMod) // was a self-mod code chunk
|
sl@0
|
388 |
if (iSelfModChunks)
|
sl@0
|
389 |
iSelfModChunks--;
|
sl@0
|
390 |
#endif
|
sl@0
|
391 |
}
|
sl@0
|
392 |
}
|
sl@0
|
393 |
Kern::MutexSignal(*iProcessLock);
|
sl@0
|
394 |
}
|
sl@0
|
395 |
|
sl@0
|
396 |
TInt DMemModelProcess::ChunkIndex(DMemModelChunk* aChunk,TInt& aPos)
|
sl@0
|
397 |
{
|
sl@0
|
398 |
if (!aChunk)
|
sl@0
|
399 |
return KErrNotFound;
|
sl@0
|
400 |
SChunkInfo *pC=iChunks;
|
sl@0
|
401 |
SChunkInfo *pE=pC+iChunkCount;
|
sl@0
|
402 |
for (; pC<pE && pC->iChunk!=aChunk; ++pC);
|
sl@0
|
403 |
if (pC==pE)
|
sl@0
|
404 |
return KErrNotFound;
|
sl@0
|
405 |
aPos=pC-iChunks;
|
sl@0
|
406 |
return KErrNone;
|
sl@0
|
407 |
}
|
sl@0
|
408 |
|
sl@0
|
409 |
TInt DMemModelProcess::MapCodeSeg(DCodeSeg* aSeg)
|
sl@0
|
410 |
{
|
sl@0
|
411 |
DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg;
|
sl@0
|
412 |
__KTRACE_OPT(KDLL,Kern::Printf("Process %O MapCodeSeg %C", this, aSeg));
|
sl@0
|
413 |
TBool kernel_only=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
|
sl@0
|
414 |
if (kernel_only && !(iAttributes&ESupervisor))
|
sl@0
|
415 |
return KErrNotSupported;
|
sl@0
|
416 |
if (seg.iAttr&ECodeSegAttKernel)
|
sl@0
|
417 |
return KErrNone; // no extra mappings needed for kernel code
|
sl@0
|
418 |
TInt r=KErrNone;
|
sl@0
|
419 |
if (seg.Pages())
|
sl@0
|
420 |
r=MapUserRamCode(seg.Memory(),EFalse);
|
sl@0
|
421 |
if (seg.IsDll())
|
sl@0
|
422 |
{
|
sl@0
|
423 |
TInt total_data_size;
|
sl@0
|
424 |
TLinAddr data_base;
|
sl@0
|
425 |
seg.GetDataSizeAndBase(total_data_size, data_base);
|
sl@0
|
426 |
if (r==KErrNone && total_data_size)
|
sl@0
|
427 |
{
|
sl@0
|
428 |
TInt size=Mmu::RoundToPageSize(total_data_size);
|
sl@0
|
429 |
r=CommitDllData(data_base, size);
|
sl@0
|
430 |
if (r!=KErrNone && seg.Pages())
|
sl@0
|
431 |
UnmapUserRamCode(seg.Memory(), EFalse);
|
sl@0
|
432 |
}
|
sl@0
|
433 |
}
|
sl@0
|
434 |
return r;
|
sl@0
|
435 |
}
|
sl@0
|
436 |
|
sl@0
|
437 |
void DMemModelProcess::UnmapCodeSeg(DCodeSeg* aSeg)
|
sl@0
|
438 |
{
|
sl@0
|
439 |
DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg;
|
sl@0
|
440 |
__KTRACE_OPT(KDLL,Kern::Printf("Process %O UnmapCodeSeg %C", this, aSeg));
|
sl@0
|
441 |
if (seg.iAttr&ECodeSegAttKernel)
|
sl@0
|
442 |
return; // no extra mappings needed for kernel code
|
sl@0
|
443 |
if (seg.IsDll())
|
sl@0
|
444 |
{
|
sl@0
|
445 |
TInt total_data_size;
|
sl@0
|
446 |
TLinAddr data_base;
|
sl@0
|
447 |
seg.GetDataSizeAndBase(total_data_size, data_base);
|
sl@0
|
448 |
if (total_data_size)
|
sl@0
|
449 |
DecommitDllData(data_base, Mmu::RoundToPageSize(total_data_size));
|
sl@0
|
450 |
}
|
sl@0
|
451 |
if (seg.Pages())
|
sl@0
|
452 |
UnmapUserRamCode(seg.Memory(), EFalse);
|
sl@0
|
453 |
}
|
sl@0
|
454 |
|
sl@0
|
455 |
void DMemModelProcess::RemoveDllData()
|
sl@0
|
456 |
//
|
sl@0
|
457 |
// Call with CodeSegLock held
|
sl@0
|
458 |
//
|
sl@0
|
459 |
{
|
sl@0
|
460 |
}
|
sl@0
|
461 |
|
sl@0
|
462 |
TInt DMemModelProcess::CreateCodeChunk()
|
sl@0
|
463 |
{
|
sl@0
|
464 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CreateCodeChunk",this));
|
sl@0
|
465 |
TBool kernel=iAttributes&ESupervisor;
|
sl@0
|
466 |
Mmu& m=Mmu::Get();
|
sl@0
|
467 |
SChunkCreateInfo c;
|
sl@0
|
468 |
c.iGlobal=kernel;
|
sl@0
|
469 |
c.iAtt = TChunkCreate::EDisconnected | (kernel? 0 : TChunkCreate::EMemoryNotOwned);
|
sl@0
|
470 |
c.iForceFixed=EFalse;
|
sl@0
|
471 |
c.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd;
|
sl@0
|
472 |
c.iRunAddress=kernel ? 0 : m.iUserCodeBase;
|
sl@0
|
473 |
c.iPreallocated=0;
|
sl@0
|
474 |
c.iType=kernel ? EKernelCode : EUserCode;
|
sl@0
|
475 |
c.iMaxSize=m.iMaxUserCodeSize;
|
sl@0
|
476 |
c.iName.Set(KLitDollarCode);
|
sl@0
|
477 |
c.iOwner=this;
|
sl@0
|
478 |
c.iInitialTop=0;
|
sl@0
|
479 |
TLinAddr runAddr;
|
sl@0
|
480 |
TInt r = NewChunk((DChunk*&)iCodeChunk,c,runAddr);
|
sl@0
|
481 |
return r;
|
sl@0
|
482 |
}
|
sl@0
|
483 |
|
sl@0
|
484 |
void DMemModelProcess::FreeCodeChunk()
|
sl@0
|
485 |
{
|
sl@0
|
486 |
iCodeChunk->Close(this);
|
sl@0
|
487 |
iCodeChunk=NULL;
|
sl@0
|
488 |
}
|
sl@0
|
489 |
|
sl@0
|
490 |
TInt DMemModelProcess::MapUserRamCode(DMemModelCodeSegMemory* aMemory, TBool aLoading)
|
sl@0
|
491 |
{
|
sl@0
|
492 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O MapUserRamCode %C %d %d %d",
|
sl@0
|
493 |
this, aMemory->iCodeSeg, aLoading, iOsAsid, aMemory->iIsDemandPaged));
|
sl@0
|
494 |
__ASSERT_MUTEX(DCodeSeg::CodeSegLock);
|
sl@0
|
495 |
|
sl@0
|
496 |
TInt r;
|
sl@0
|
497 |
|
sl@0
|
498 |
if (!iCodeChunk)
|
sl@0
|
499 |
{
|
sl@0
|
500 |
r=CreateCodeChunk();
|
sl@0
|
501 |
__KTRACE_OPT(KPROC,Kern::Printf("CreateCodeChunk returns %d", r));
|
sl@0
|
502 |
if (r!=KErrNone)
|
sl@0
|
503 |
return r;
|
sl@0
|
504 |
}
|
sl@0
|
505 |
|
sl@0
|
506 |
MmuBase::Wait();
|
sl@0
|
507 |
|
sl@0
|
508 |
Mmu& m=Mmu::Get();
|
sl@0
|
509 |
TInt offset=aMemory->iRamInfo.iCodeRunAddr-TLinAddr(iCodeChunk->iBase);
|
sl@0
|
510 |
TInt codeSize = aMemory->iPageCount<<m.iPageShift;
|
sl@0
|
511 |
TBool paged = aMemory->iIsDemandPaged;
|
sl@0
|
512 |
DChunk::TCommitType commitType = paged ? DChunk::ECommitVirtual : DChunk::ECommitDiscontiguousPhysical;
|
sl@0
|
513 |
r=iCodeChunk->Commit(offset, codeSize, commitType, aMemory->iPages);
|
sl@0
|
514 |
__KTRACE_OPT(KPROC,Kern::Printf("Commit Pages returns %d", r));
|
sl@0
|
515 |
if(r==KErrNone)
|
sl@0
|
516 |
{
|
sl@0
|
517 |
if (aLoading && !paged)
|
sl@0
|
518 |
{
|
sl@0
|
519 |
iCodeChunk->ApplyPermissions(offset, codeSize, m.iUserCodeLoadPtePerm);
|
sl@0
|
520 |
UNLOCK_USER_MEMORY();
|
sl@0
|
521 |
memset((TAny*)(aMemory->iRamInfo.iCodeLoadAddr+aMemory->iRamInfo.iCodeSize+aMemory->iRamInfo.iDataSize), 0x03, codeSize-(aMemory->iRamInfo.iCodeSize+aMemory->iRamInfo.iDataSize));
|
sl@0
|
522 |
LOCK_USER_MEMORY();
|
sl@0
|
523 |
}
|
sl@0
|
524 |
if(aLoading && aMemory->iDataPageCount)
|
sl@0
|
525 |
{
|
sl@0
|
526 |
TInt dataSize = aMemory->iDataPageCount<<m.iPageShift;
|
sl@0
|
527 |
r=iCodeChunk->Commit(offset+codeSize, dataSize, DChunk::ECommitDiscontiguousPhysical, aMemory->iPages+aMemory->iPageCount);
|
sl@0
|
528 |
if(r==KErrNone)
|
sl@0
|
529 |
{
|
sl@0
|
530 |
iCodeChunk->ApplyPermissions(offset+codeSize, dataSize, m.iUserCodeLoadPtePerm);
|
sl@0
|
531 |
UNLOCK_USER_MEMORY();
|
sl@0
|
532 |
memset((TAny*)(aMemory->iRamInfo.iDataLoadAddr+aMemory->iRamInfo.iDataSize), 0x03, dataSize-aMemory->iRamInfo.iDataSize);
|
sl@0
|
533 |
LOCK_USER_MEMORY();
|
sl@0
|
534 |
}
|
sl@0
|
535 |
}
|
sl@0
|
536 |
if(r!=KErrNone)
|
sl@0
|
537 |
{
|
sl@0
|
538 |
// error, so decommit up code pages we had already committed...
|
sl@0
|
539 |
DChunk::TDecommitType decommitType = paged ? DChunk::EDecommitVirtual : DChunk::EDecommitNormal;
|
sl@0
|
540 |
iCodeChunk->Decommit(offset, codeSize, decommitType);
|
sl@0
|
541 |
}
|
sl@0
|
542 |
else
|
sl@0
|
543 |
{
|
sl@0
|
544 |
// indicate codeseg is now successfully mapped into the process...
|
sl@0
|
545 |
NKern::LockSystem();
|
sl@0
|
546 |
aMemory->iOsAsids->Free(iOsAsid);
|
sl@0
|
547 |
NKern::UnlockSystem();
|
sl@0
|
548 |
}
|
sl@0
|
549 |
}
|
sl@0
|
550 |
|
sl@0
|
551 |
MmuBase::Signal();
|
sl@0
|
552 |
|
sl@0
|
553 |
if(r!=KErrNone && iCodeChunk->iSize==0)
|
sl@0
|
554 |
FreeCodeChunk(); // cleanup any unused code chunk we would otherwise leave lying around
|
sl@0
|
555 |
|
sl@0
|
556 |
return r;
|
sl@0
|
557 |
}
|
sl@0
|
558 |
|
sl@0
|
559 |
void DMemModelProcess::UnmapUserRamCode(DMemModelCodeSegMemory* aMemory, TBool aLoading)
|
sl@0
|
560 |
{
|
sl@0
|
561 |
__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O UnmapUserRamCode %C %d %d",
|
sl@0
|
562 |
this, aMemory->iCodeSeg, iOsAsid, aMemory->iIsDemandPaged != 0));
|
sl@0
|
563 |
|
sl@0
|
564 |
__ASSERT_MUTEX(DCodeSeg::CodeSegLock);
|
sl@0
|
565 |
|
sl@0
|
566 |
MmuBase::Wait();
|
sl@0
|
567 |
|
sl@0
|
568 |
NKern::LockSystem();
|
sl@0
|
569 |
aMemory->iOsAsids->Alloc(iOsAsid, 1);
|
sl@0
|
570 |
NKern::UnlockSystem();
|
sl@0
|
571 |
|
sl@0
|
572 |
Mmu& m=Mmu::Get();
|
sl@0
|
573 |
__NK_ASSERT_DEBUG(iCodeChunk);
|
sl@0
|
574 |
TInt offset=aMemory->iRamInfo.iCodeRunAddr-TLinAddr(iCodeChunk->iBase);
|
sl@0
|
575 |
TInt codeSize = aMemory->iPageCount<<m.iPageShift;
|
sl@0
|
576 |
TBool paged = aMemory->iIsDemandPaged;
|
sl@0
|
577 |
DChunk::TDecommitType decommitType = paged ? DChunk::EDecommitVirtual : DChunk::EDecommitNormal;
|
sl@0
|
578 |
TInt r=iCodeChunk->Decommit(offset, codeSize, decommitType);
|
sl@0
|
579 |
__ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed));
|
sl@0
|
580 |
(void)r; //Supress the warning in urel build
|
sl@0
|
581 |
|
sl@0
|
582 |
if(aLoading && aMemory->iDataPageCount)
|
sl@0
|
583 |
{
|
sl@0
|
584 |
// decommit pages used to store data section...
|
sl@0
|
585 |
TInt dataSize = aMemory->iDataPageCount<<m.iPageShift;
|
sl@0
|
586 |
r=iCodeChunk->Decommit(offset+codeSize, dataSize);
|
sl@0
|
587 |
__ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed));
|
sl@0
|
588 |
(void)r; //Supress the warning in urel build
|
sl@0
|
589 |
}
|
sl@0
|
590 |
__NK_ASSERT_DEBUG(iCodeChunk->iSize >= 0);
|
sl@0
|
591 |
|
sl@0
|
592 |
MmuBase::Signal();
|
sl@0
|
593 |
|
sl@0
|
594 |
if (iCodeChunk->iSize==0)
|
sl@0
|
595 |
FreeCodeChunk();
|
sl@0
|
596 |
}
|
sl@0
|
597 |
|
sl@0
|
598 |
TInt DMemModelProcess::CreateDllDataChunk()
|
sl@0
|
599 |
{
|
sl@0
|
600 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CreateDllDataChunk",this));
|
sl@0
|
601 |
Mmu& m=Mmu::Get();
|
sl@0
|
602 |
SChunkCreateInfo c;
|
sl@0
|
603 |
c.iGlobal=EFalse;
|
sl@0
|
604 |
c.iAtt=TChunkCreate::EDisconnected;
|
sl@0
|
605 |
c.iForceFixed=EFalse;
|
sl@0
|
606 |
c.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd;
|
sl@0
|
607 |
c.iRunAddress=m.iDllDataBase;
|
sl@0
|
608 |
c.iPreallocated=0;
|
sl@0
|
609 |
c.iType=EDllData;
|
sl@0
|
610 |
c.iMaxSize=m.iMaxDllDataSize;
|
sl@0
|
611 |
c.iName.Set(KLitDllDollarData);
|
sl@0
|
612 |
c.iOwner=this;
|
sl@0
|
613 |
c.iInitialTop=0;
|
sl@0
|
614 |
TLinAddr runAddr;
|
sl@0
|
615 |
return NewChunk((DChunk*&)iDllDataChunk,c,runAddr);
|
sl@0
|
616 |
}
|
sl@0
|
617 |
|
sl@0
|
618 |
void DMemModelProcess::FreeDllDataChunk()
|
sl@0
|
619 |
{
|
sl@0
|
620 |
iDllDataChunk->Close(this);
|
sl@0
|
621 |
iDllDataChunk=NULL;
|
sl@0
|
622 |
}
|
sl@0
|
623 |
|
sl@0
|
624 |
TInt DMemModelProcess::CommitDllData(TLinAddr aBase, TInt aSize)
|
sl@0
|
625 |
{
|
sl@0
|
626 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CommitDllData %08x+%x",this,aBase,aSize));
|
sl@0
|
627 |
TInt r=KErrNone;
|
sl@0
|
628 |
if (!iDllDataChunk)
|
sl@0
|
629 |
r=CreateDllDataChunk();
|
sl@0
|
630 |
if (r==KErrNone)
|
sl@0
|
631 |
{
|
sl@0
|
632 |
TInt offset=aBase-(TLinAddr)iDllDataChunk->iBase;
|
sl@0
|
633 |
__ASSERT_ALWAYS(TUint32(offset)<TUint32(iDllDataChunk->iMaxSize),MM::Panic(MM::ECommitInvalidDllDataAddress));
|
sl@0
|
634 |
r=iDllDataChunk->Commit(offset, aSize);
|
sl@0
|
635 |
if (r!=KErrNone && iDllDataChunk->iSize==0)
|
sl@0
|
636 |
FreeDllDataChunk();
|
sl@0
|
637 |
}
|
sl@0
|
638 |
__KTRACE_OPT(KDLL,Kern::Printf("CommitDllData returns %d",r));
|
sl@0
|
639 |
return r;
|
sl@0
|
640 |
}
|
sl@0
|
641 |
|
sl@0
|
642 |
void DMemModelProcess::DecommitDllData(TLinAddr aBase, TInt aSize)
|
sl@0
|
643 |
{
|
sl@0
|
644 |
__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O DecommitDllData %08x+%x",this,aBase,aSize));
|
sl@0
|
645 |
TInt offset=aBase-(TLinAddr)iDllDataChunk->iBase;
|
sl@0
|
646 |
TInt r=iDllDataChunk->Decommit(offset, aSize);
|
sl@0
|
647 |
__ASSERT_ALWAYS(r==KErrNone,MM::Panic(MM::EDecommitInvalidDllDataAddress));
|
sl@0
|
648 |
if (iDllDataChunk->iSize==0)
|
sl@0
|
649 |
FreeDllDataChunk();
|
sl@0
|
650 |
}
|
sl@0
|
651 |
|
sl@0
|
652 |
TInt DMemModelProcess::NewShPool(DShPool*& /* aPool */, TShPoolCreateInfo& /* aInfo */)
|
sl@0
|
653 |
{
|
sl@0
|
654 |
return KErrNotSupported;
|
sl@0
|
655 |
}
|
sl@0
|
656 |
|
sl@0
|
657 |
|
sl@0
|
658 |
TInt DThread::RawRead(const TAny* aSrc, TAny* aDest, TInt aLength, TInt aFlags, TIpcExcTrap* /*aExcTrap*/)
|
sl@0
|
659 |
//
|
sl@0
|
660 |
// Read from the thread's process.
|
sl@0
|
661 |
// Enter and return with system locked
|
sl@0
|
662 |
// aSrc Run address of memory to read
|
sl@0
|
663 |
// aDest Current address of destination
|
sl@0
|
664 |
// aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area then specified.
|
sl@0
|
665 |
// It happens when reading is performed on un-aligned memory area.
|
sl@0
|
666 |
//
|
sl@0
|
667 |
{
|
sl@0
|
668 |
Mmu& m=Mmu::Get();
|
sl@0
|
669 |
DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
|
sl@0
|
670 |
DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess;
|
sl@0
|
671 |
TLinAddr src=(TLinAddr)aSrc;
|
sl@0
|
672 |
TLinAddr dest=(TLinAddr)aDest;
|
sl@0
|
673 |
TBool localIsSafe=ETrue;
|
sl@0
|
674 |
TInt result = KErrNone;
|
sl@0
|
675 |
|
sl@0
|
676 |
while (aLength)
|
sl@0
|
677 |
{
|
sl@0
|
678 |
if (iMState==EDead)
|
sl@0
|
679 |
{
|
sl@0
|
680 |
result = KErrDied;
|
sl@0
|
681 |
break;
|
sl@0
|
682 |
}
|
sl@0
|
683 |
TLinAddr alias_src;
|
sl@0
|
684 |
TInt alias_size;
|
sl@0
|
685 |
TInt alias_result=t.Alias(src, pP, aLength, EMapAttrReadUser, alias_src, alias_size);
|
sl@0
|
686 |
if (alias_result<0)
|
sl@0
|
687 |
{
|
sl@0
|
688 |
result = KErrBadDescriptor; // bad permissions
|
sl@0
|
689 |
break;
|
sl@0
|
690 |
}
|
sl@0
|
691 |
NKern::UnlockSystem();
|
sl@0
|
692 |
|
sl@0
|
693 |
__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead %08x<-%08x+%x",dest,alias_src,alias_size));
|
sl@0
|
694 |
if(aFlags&KCheckLocalAddress)
|
sl@0
|
695 |
localIsSafe = m.ValidateLocalIpcAddress(dest,alias_size,ETrue);
|
sl@0
|
696 |
|
sl@0
|
697 |
CHECK_PAGING_SAFE;
|
sl@0
|
698 |
|
sl@0
|
699 |
COND_UNLOCK_USER_MEMORY(localIsSafe);
|
sl@0
|
700 |
|
sl@0
|
701 |
if(alias_result)
|
sl@0
|
702 |
{
|
sl@0
|
703 |
// remote address is safe for direct access...
|
sl@0
|
704 |
if (localIsSafe)
|
sl@0
|
705 |
memcpy( (TAny*)dest, (const TAny*)alias_src, alias_size);
|
sl@0
|
706 |
else
|
sl@0
|
707 |
umemput( (TAny*)dest, (const TAny*)alias_src, alias_size);
|
sl@0
|
708 |
}
|
sl@0
|
709 |
else
|
sl@0
|
710 |
{
|
sl@0
|
711 |
// remote address is NOT safe for direct access, so use user permision checks when reading...
|
sl@0
|
712 |
if (localIsSafe)
|
sl@0
|
713 |
umemget( (TAny*)dest, (const TAny*)alias_src, alias_size);
|
sl@0
|
714 |
else
|
sl@0
|
715 |
uumemcpy( (TAny*)dest, (const TAny*)alias_src, alias_size);
|
sl@0
|
716 |
}
|
sl@0
|
717 |
|
sl@0
|
718 |
LOCK_USER_MEMORY();
|
sl@0
|
719 |
|
sl@0
|
720 |
src+=alias_size;
|
sl@0
|
721 |
dest+=alias_size;
|
sl@0
|
722 |
aLength-=alias_size;
|
sl@0
|
723 |
NKern::LockSystem();
|
sl@0
|
724 |
}
|
sl@0
|
725 |
t.RemoveAlias();
|
sl@0
|
726 |
return result;
|
sl@0
|
727 |
}
|
sl@0
|
728 |
|
sl@0
|
729 |
TInt DThread::RawWrite(const TAny* aDest, const TAny* aSrc, TInt aLength, TInt aFlags, DThread* anOriginatingThread, TIpcExcTrap* /*aExcTrap*/)
|
sl@0
|
730 |
//
|
sl@0
|
731 |
// Write to the thread's process.
|
sl@0
|
732 |
// Enter and return with system locked
|
sl@0
|
733 |
// aDest Run address of memory to write
|
sl@0
|
734 |
// aSrc Current address of destination
|
sl@0
|
735 |
// anOriginatingThread The thread on behalf of which this operation is performed (eg client of device driver).
|
sl@0
|
736 |
// aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area then specified.
|
sl@0
|
737 |
// It happens when reading is performed on un-aligned memory area.
|
sl@0
|
738 |
//
|
sl@0
|
739 |
{
|
sl@0
|
740 |
Mmu& m=Mmu::Get();
|
sl@0
|
741 |
DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
|
sl@0
|
742 |
DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess;
|
sl@0
|
743 |
TLinAddr src=(TLinAddr)aSrc;
|
sl@0
|
744 |
TLinAddr dest=(TLinAddr)aDest;
|
sl@0
|
745 |
TBool localIsSafe=ETrue;
|
sl@0
|
746 |
DThread* pO=anOriginatingThread?anOriginatingThread:&t;
|
sl@0
|
747 |
DProcess* pF=K::TheFileServerProcess;
|
sl@0
|
748 |
TBool special=(iOwningProcess==pF && pO->iOwningProcess==pF);
|
sl@0
|
749 |
TUint32 perm=special ? EMapAttrWriteSup : EMapAttrWriteUser;
|
sl@0
|
750 |
TInt result = KErrNone;
|
sl@0
|
751 |
|
sl@0
|
752 |
while (aLength)
|
sl@0
|
753 |
{
|
sl@0
|
754 |
if (iMState==EDead)
|
sl@0
|
755 |
{
|
sl@0
|
756 |
result = KErrDied;
|
sl@0
|
757 |
break;
|
sl@0
|
758 |
}
|
sl@0
|
759 |
TLinAddr alias_dest;
|
sl@0
|
760 |
TInt alias_size;
|
sl@0
|
761 |
TInt alias_result=t.Alias(dest, pP, aLength, perm, alias_dest, alias_size);
|
sl@0
|
762 |
if (alias_result<0)
|
sl@0
|
763 |
{
|
sl@0
|
764 |
result = KErrBadDescriptor; // bad permissions
|
sl@0
|
765 |
break;
|
sl@0
|
766 |
}
|
sl@0
|
767 |
NKern::UnlockSystem();
|
sl@0
|
768 |
|
sl@0
|
769 |
__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite %08x+%x->%08x",src,alias_size,alias_dest));
|
sl@0
|
770 |
if(aFlags&KCheckLocalAddress)
|
sl@0
|
771 |
localIsSafe = m.ValidateLocalIpcAddress(src,alias_size,EFalse);
|
sl@0
|
772 |
|
sl@0
|
773 |
// Must check that it is safe to page, unless we are reading from unpaged ROM in which case
|
sl@0
|
774 |
// we allow it. umemget and uumemcpy do this anyway, so we just need to check if
|
sl@0
|
775 |
// localIsSafe is set.
|
sl@0
|
776 |
if (localIsSafe)
|
sl@0
|
777 |
{
|
sl@0
|
778 |
CHECK_PAGING_SAFE_RANGE(src, aLength);
|
sl@0
|
779 |
CHECK_DATA_PAGING_SAFE_RANGE(dest, aLength);
|
sl@0
|
780 |
}
|
sl@0
|
781 |
|
sl@0
|
782 |
COND_UNLOCK_USER_MEMORY(localIsSafe);
|
sl@0
|
783 |
|
sl@0
|
784 |
if(alias_result)
|
sl@0
|
785 |
{
|
sl@0
|
786 |
// remote address is safe for direct access...
|
sl@0
|
787 |
if (localIsSafe)
|
sl@0
|
788 |
memcpy( (TAny*)alias_dest, (const TAny*)src, alias_size);
|
sl@0
|
789 |
else
|
sl@0
|
790 |
umemget( (TAny*)alias_dest, (const TAny*)src, alias_size);
|
sl@0
|
791 |
}
|
sl@0
|
792 |
else
|
sl@0
|
793 |
{
|
sl@0
|
794 |
// remote address is NOT safe for direct access, so use user permision checks when writing...
|
sl@0
|
795 |
if (localIsSafe)
|
sl@0
|
796 |
umemput( (TAny*)alias_dest, (const TAny*)src, alias_size);
|
sl@0
|
797 |
else
|
sl@0
|
798 |
uumemcpy( (TAny*)alias_dest, (const TAny*)src, alias_size);
|
sl@0
|
799 |
}
|
sl@0
|
800 |
|
sl@0
|
801 |
LOCK_USER_MEMORY();
|
sl@0
|
802 |
|
sl@0
|
803 |
src+=alias_size;
|
sl@0
|
804 |
dest+=alias_size;
|
sl@0
|
805 |
aLength-=alias_size;
|
sl@0
|
806 |
NKern::LockSystem();
|
sl@0
|
807 |
}
|
sl@0
|
808 |
t.RemoveAlias();
|
sl@0
|
809 |
return result;
|
sl@0
|
810 |
}
|
sl@0
|
811 |
|
sl@0
|
812 |
#ifdef __DEBUGGER_SUPPORT__
|
sl@0
|
813 |
|
sl@0
|
814 |
/**
|
sl@0
|
815 |
@pre Calling thread must be in critical section
|
sl@0
|
816 |
@pre CodeSeg mutex held
|
sl@0
|
817 |
*/
|
sl@0
|
818 |
TInt CodeModifier::SafeWriteCode(DProcess* aProcess, TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue)
|
sl@0
|
819 |
{
|
sl@0
|
820 |
Mmu& m=Mmu::Get();
|
sl@0
|
821 |
MmuBase::Wait();
|
sl@0
|
822 |
|
sl@0
|
823 |
NKern::LockSystem();
|
sl@0
|
824 |
|
sl@0
|
825 |
// Find physical address of the page, the breakpoint belongs to
|
sl@0
|
826 |
TPhysAddr physAddr = m.LinearToPhysical(aAddress,((DMemModelProcess*)aProcess)->iOsAsid);
|
sl@0
|
827 |
__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - PA:%x", physAddr));
|
sl@0
|
828 |
if (physAddr==KPhysAddrInvalid)
|
sl@0
|
829 |
{
|
sl@0
|
830 |
__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - invalid VA"));
|
sl@0
|
831 |
NKern::UnlockSystem();
|
sl@0
|
832 |
MmuBase::Signal();
|
sl@0
|
833 |
return KErrBadDescriptor;
|
sl@0
|
834 |
}
|
sl@0
|
835 |
|
sl@0
|
836 |
// Temporarily map physical page
|
sl@0
|
837 |
TLinAddr tempAddr = m.MapTemp (physAddr&~m.iPageMask, aAddress);
|
sl@0
|
838 |
tempAddr |= aAddress & m.iPageMask;
|
sl@0
|
839 |
__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - tempAddr:%x",tempAddr));
|
sl@0
|
840 |
|
sl@0
|
841 |
//Set exception handler. Make sure the boundaries cover the worst case (aSize = 4)
|
sl@0
|
842 |
TIpcExcTrap xt;
|
sl@0
|
843 |
xt.iLocalBase=0;
|
sl@0
|
844 |
xt.iRemoteBase=(TLinAddr)tempAddr&~3; //word aligned.
|
sl@0
|
845 |
xt.iSize=sizeof(TInt);
|
sl@0
|
846 |
xt.iDir=1;
|
sl@0
|
847 |
|
sl@0
|
848 |
TInt r=xt.Trap(NULL);
|
sl@0
|
849 |
if (r==0)
|
sl@0
|
850 |
{
|
sl@0
|
851 |
r = WriteCode(tempAddr, aSize, aValue, aOldValue);
|
sl@0
|
852 |
xt.UnTrap();
|
sl@0
|
853 |
}
|
sl@0
|
854 |
|
sl@0
|
855 |
m.UnmapTemp();
|
sl@0
|
856 |
NKern::UnlockSystem();
|
sl@0
|
857 |
MmuBase::Signal();
|
sl@0
|
858 |
return r;
|
sl@0
|
859 |
}
|
sl@0
|
860 |
|
sl@0
|
861 |
/**
|
sl@0
|
862 |
@pre Calling thread must be in critical section
|
sl@0
|
863 |
@pre CodeSeg mutex held
|
sl@0
|
864 |
*/
|
sl@0
|
865 |
TInt CodeModifier::WriteCode(TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue)
|
sl@0
|
866 |
{
|
sl@0
|
867 |
// We do not want to be interrupted by e.g. ISR that will run altered code before IMB-Range.
|
sl@0
|
868 |
// Therefore, copy data and clean/invalidate caches with interrupts disabled.
|
sl@0
|
869 |
TInt irq=NKern::DisableAllInterrupts();
|
sl@0
|
870 |
switch(aSize)
|
sl@0
|
871 |
{
|
sl@0
|
872 |
case 1:
|
sl@0
|
873 |
*(TUint8*) aOldValue = *(TUint8*)aAddress;
|
sl@0
|
874 |
*(TUint8*) aAddress = (TUint8)aValue;
|
sl@0
|
875 |
break;
|
sl@0
|
876 |
case 2:
|
sl@0
|
877 |
*(TUint16*) aOldValue = *(TUint16*)aAddress;
|
sl@0
|
878 |
*(TUint16*) aAddress = (TUint16)aValue;
|
sl@0
|
879 |
break;
|
sl@0
|
880 |
default://It is 4 otherwise
|
sl@0
|
881 |
*(TUint32*) aOldValue = *(TUint32*)aAddress;
|
sl@0
|
882 |
*(TUint32*) aAddress = (TUint32)aValue;
|
sl@0
|
883 |
break;
|
sl@0
|
884 |
};
|
sl@0
|
885 |
CacheMaintenance::CodeChanged(aAddress, aSize, CacheMaintenance::ECodeModifier);
|
sl@0
|
886 |
NKern::RestoreInterrupts(irq);
|
sl@0
|
887 |
return KErrNone;
|
sl@0
|
888 |
}
|
sl@0
|
889 |
#endif //__DEBUGGER_SUPPORT__
|
sl@0
|
890 |
|
sl@0
|
891 |
|
sl@0
|
892 |
#ifdef __MARM__
|
sl@0
|
893 |
|
sl@0
|
894 |
// the body of ReadDesHeader is machine coded on ARM...
|
sl@0
|
895 |
extern TInt ThreadDoReadAndParseDesHeader(DThread* aThread, const TAny* aSrc, TUint32* aDest);
|
sl@0
|
896 |
|
sl@0
|
897 |
TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest)
|
sl@0
|
898 |
//
|
sl@0
|
899 |
// Read and parse the header of a remote descriptor.
|
sl@0
|
900 |
// Enter and return with system locked
|
sl@0
|
901 |
//
|
sl@0
|
902 |
{
|
sl@0
|
903 |
// todo: remove use of system lock from callers, when they have been un-exported from the kernel
|
sl@0
|
904 |
NKern::UnlockSystem();
|
sl@0
|
905 |
TInt r = ThreadDoReadAndParseDesHeader(this,aSrc,(TUint32*)&aDest);
|
sl@0
|
906 |
NKern::LockSystem();
|
sl@0
|
907 |
return r;
|
sl@0
|
908 |
}
|
sl@0
|
909 |
|
sl@0
|
910 |
|
sl@0
|
911 |
#else // !__MARM__
|
sl@0
|
912 |
|
sl@0
|
913 |
|
sl@0
|
914 |
TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest)
|
sl@0
|
915 |
//
|
sl@0
|
916 |
// Read and parse the header of a remote descriptor.
|
sl@0
|
917 |
// Enter and return with system locked
|
sl@0
|
918 |
//
|
sl@0
|
919 |
{
|
sl@0
|
920 |
static const TUint8 LengthLookup[16] = {4,8,12,8,12,0,0,0,0,0,0,0,0,0,0,0};
|
sl@0
|
921 |
|
sl@0
|
922 |
DMemModelThread& t = *(DMemModelThread*)TheCurrentThread;
|
sl@0
|
923 |
TInt r = KErrBadDescriptor;
|
sl@0
|
924 |
|
sl@0
|
925 |
CHECK_PAGING_SAFE;
|
sl@0
|
926 |
|
sl@0
|
927 |
DMemModelProcess* pP = (DMemModelProcess*)iOwningProcess;
|
sl@0
|
928 |
TLinAddr src = (TLinAddr)aSrc;
|
sl@0
|
929 |
const TUint32* pAlias;
|
sl@0
|
930 |
TInt alias_size;
|
sl@0
|
931 |
TInt alias_result = t.Alias(src, pP, 12, EMapAttrReadUser, (TLinAddr&)pAlias, alias_size);
|
sl@0
|
932 |
if (alias_result<0)
|
sl@0
|
933 |
return KErrBadDescriptor; // bad permissions
|
sl@0
|
934 |
NKern::UnlockSystem();
|
sl@0
|
935 |
t.iIpcClient = this;
|
sl@0
|
936 |
TUint32* dest = (TUint32*)&aDest;
|
sl@0
|
937 |
if (Kern::SafeRead(pAlias, dest, sizeof(TUint32)))
|
sl@0
|
938 |
goto fail;
|
sl@0
|
939 |
|
sl@0
|
940 |
{
|
sl@0
|
941 |
TInt type=*dest>>KShiftDesType8;
|
sl@0
|
942 |
|
sl@0
|
943 |
src += sizeof(TUint32);
|
sl@0
|
944 |
alias_size -= sizeof(TUint32);
|
sl@0
|
945 |
++pAlias;
|
sl@0
|
946 |
++dest;
|
sl@0
|
947 |
|
sl@0
|
948 |
TInt l=LengthLookup[type];
|
sl@0
|
949 |
if (l==0)
|
sl@0
|
950 |
goto fail;
|
sl@0
|
951 |
|
sl@0
|
952 |
l -= sizeof(TUint32); // we've already read one word
|
sl@0
|
953 |
if (l>0 && alias_size)
|
sl@0
|
954 |
{
|
sl@0
|
955 |
get_more:
|
sl@0
|
956 |
// more to go - get rest or as much as is currently aliased
|
sl@0
|
957 |
TInt ll = alias_size>=l ? l : alias_size;
|
sl@0
|
958 |
if(Kern::SafeRead(pAlias, dest, l))
|
sl@0
|
959 |
goto fail;
|
sl@0
|
960 |
l -= ll;
|
sl@0
|
961 |
src += TLinAddr(ll);
|
sl@0
|
962 |
dest = (TUint32*)(TLinAddr(dest) + TLinAddr(ll));
|
sl@0
|
963 |
}
|
sl@0
|
964 |
if (l>0)
|
sl@0
|
965 |
{
|
sl@0
|
966 |
// more to go - need to step alias on
|
sl@0
|
967 |
NKern::LockSystem();
|
sl@0
|
968 |
alias_result = t.Alias(src, pP, l, EMapAttrReadUser, (TLinAddr&)pAlias, alias_size);
|
sl@0
|
969 |
if (alias_result<0)
|
sl@0
|
970 |
goto fail_locked;
|
sl@0
|
971 |
NKern::UnlockSystem();
|
sl@0
|
972 |
goto get_more;
|
sl@0
|
973 |
}
|
sl@0
|
974 |
|
sl@0
|
975 |
r = K::ParseDesHeader(aSrc, *(TRawDesHeader*)&aDest, aDest);
|
sl@0
|
976 |
}
|
sl@0
|
977 |
|
sl@0
|
978 |
fail:
|
sl@0
|
979 |
NKern::LockSystem();
|
sl@0
|
980 |
fail_locked:
|
sl@0
|
981 |
t.RemoveAlias();
|
sl@0
|
982 |
t.iIpcClient = NULL;
|
sl@0
|
983 |
return r;
|
sl@0
|
984 |
}
|
sl@0
|
985 |
|
sl@0
|
986 |
|
sl@0
|
987 |
#endif
|
sl@0
|
988 |
|
sl@0
|
989 |
|
sl@0
|
990 |
DChunk* DThread::OpenSharedChunk(const TAny* aAddress, TBool aWrite, TInt& aOffset)
|
sl@0
|
991 |
{
|
sl@0
|
992 |
NKern::LockSystem();
|
sl@0
|
993 |
|
sl@0
|
994 |
DMemModelProcess* pP = (DMemModelProcess*)iOwningProcess;
|
sl@0
|
995 |
DMemModelProcess::SChunkInfo* pS=pP->iChunks;
|
sl@0
|
996 |
DMemModelProcess::SChunkInfo* pC=pS+pP->iChunkCount;
|
sl@0
|
997 |
while(--pC>=pS && TUint(pC->iChunk->Base())>TUint(aAddress)) {};
|
sl@0
|
998 |
if(pC>=pS)
|
sl@0
|
999 |
{
|
sl@0
|
1000 |
DMemModelChunk* chunk = pC->iChunk;
|
sl@0
|
1001 |
if(chunk->iChunkType==ESharedKernelSingle || chunk->iChunkType==ESharedKernelMultiple)
|
sl@0
|
1002 |
{
|
sl@0
|
1003 |
TInt offset = (TInt)aAddress-(TInt)chunk->Base();
|
sl@0
|
1004 |
if(TUint(offset)<TUint(chunk->iMaxSize) && chunk->Open()==KErrNone)
|
sl@0
|
1005 |
{
|
sl@0
|
1006 |
aOffset = offset;
|
sl@0
|
1007 |
NKern::UnlockSystem();
|
sl@0
|
1008 |
return chunk;
|
sl@0
|
1009 |
}
|
sl@0
|
1010 |
}
|
sl@0
|
1011 |
}
|
sl@0
|
1012 |
NKern::UnlockSystem();
|
sl@0
|
1013 |
return 0;
|
sl@0
|
1014 |
}
|
sl@0
|
1015 |
|
sl@0
|
1016 |
TInt DThread::PrepareMemoryForDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
|
sl@0
|
1017 |
{
|
sl@0
|
1018 |
TInt asid = ((DMemModelProcess*)iOwningProcess)->iOsAsid;
|
sl@0
|
1019 |
Mmu& m=(Mmu&)*MmuBase::TheMmu;
|
sl@0
|
1020 |
return m.PreparePagesForDMA((TLinAddr)aLinAddr, aSize, asid, aPhysicalPageList);
|
sl@0
|
1021 |
}
|
sl@0
|
1022 |
|
sl@0
|
1023 |
TInt DThread::ReleaseMemoryFromDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
|
sl@0
|
1024 |
{
|
sl@0
|
1025 |
TInt pageCount = (((TInt)aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift;
|
sl@0
|
1026 |
Mmu& m=(Mmu&)*MmuBase::TheMmu;
|
sl@0
|
1027 |
return m.ReleasePagesFromDMA(aPhysicalPageList, pageCount);
|
sl@0
|
1028 |
}
|
sl@0
|
1029 |
|