1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/multiple/mcodeseg.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,537 @@
1.4 +// Copyright (c) 1995-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\memmodel\epoc\multiple\mcodeseg.cpp
1.18 +//
1.19 +//
1.20 +
1.21 +#include "memmodel.h"
1.22 +#include <mmubase.inl>
1.23 +#include "cache_maintenance.h"
1.24 +#include <demand_paging.h>
1.25 +
1.26 +DCodeSeg* M::NewCodeSeg(TCodeSegCreateInfo&)
1.27 +//
1.28 +// Create a new instance of this class.
1.29 +//
1.30 + {
1.31 +
1.32 + __KTRACE_OPT(KDLL,Kern::Printf("M::NewCodeSeg"));
1.33 + return new DMemModelCodeSeg;
1.34 + }
1.35 +
1.36 +//
1.37 +// DMemModelCodeSegMemory
1.38 +//
1.39 +
1.40 +DEpocCodeSegMemory* DEpocCodeSegMemory::New(DEpocCodeSeg* aCodeSeg)
1.41 + {
1.42 + return new DMemModelCodeSegMemory(aCodeSeg);
1.43 + }
1.44 +
1.45 +
1.46 +DMemModelCodeSegMemory::DMemModelCodeSegMemory(DEpocCodeSeg* aCodeSeg)
1.47 + : DMmuCodeSegMemory(aCodeSeg)
1.48 + {
1.49 + }
1.50 +
1.51 +
1.52 +TInt DMemModelCodeSegMemory::Create(TCodeSegCreateInfo& aInfo)
1.53 + {
1.54 + TInt r = DMmuCodeSegMemory::Create(aInfo);
1.55 + if(r!=KErrNone)
1.56 + return r;
1.57 +
1.58 + Mmu& m=Mmu::Get();
1.59 +
1.60 + iOsAsids = TBitMapAllocator::New(m.iNumOsAsids, EFalse);
1.61 + if(!iOsAsids)
1.62 + return KErrNoMemory;
1.63 +
1.64 + TInt totalPages = iPageCount+iDataPageCount;
1.65 + iPages = (TPhysAddr*)Kern::Alloc(totalPages*sizeof(TPhysAddr));
1.66 + if(!iPages)
1.67 + return KErrNoMemory;
1.68 + TInt i;
1.69 + for (i=0; i<totalPages; ++i)
1.70 + iPages[i] = KPhysAddrInvalid;
1.71 +
1.72 + MmuBase::Wait();
1.73 +
1.74 + // allocate RAM pages...
1.75 + __KTRACE_OPT(KDLL,Kern::Printf("Alloc DLL pages %x,%x", iPageCount,iDataPageCount));
1.76 + TInt startPage = iIsDemandPaged ? iPageCount : 0; // if demand paged, skip pages for code
1.77 + TInt endPage = iPageCount+iDataPageCount;
1.78 + r=m.AllocRamPages(iPages+startPage, endPage-startPage, EPageMovable);
1.79 +
1.80 + // initialise SPageInfo objects for allocated pages...
1.81 + if (r==KErrNone)
1.82 + {
1.83 + NKern::LockSystem();
1.84 + for (i=startPage; i<endPage; ++i)
1.85 + {
1.86 + SPageInfo* info = SPageInfo::FromPhysAddr(iPages[i]);
1.87 + info->SetCodeSegMemory(this,i);
1.88 + if((i&15)==15)
1.89 + NKern::FlashSystem();
1.90 + }
1.91 + NKern::UnlockSystem();
1.92 + }
1.93 +
1.94 + MmuBase::Signal();
1.95 +
1.96 + if (r!=KErrNone)
1.97 + return r;
1.98 +
1.99 +#ifdef BTRACE_CODESEGS
1.100 + BTrace8(BTrace::ECodeSegs,BTrace::ECodeSegMemoryAllocated,iCodeSeg,iPageCount<<m.iPageShift);
1.101 +#endif
1.102 +
1.103 + DCodeSeg::Wait();
1.104 +
1.105 + TInt code_alloc=((totalPages<<m.iPageShift)+m.iAliasMask)>>m.iAliasShift;
1.106 + r=MM::UserCodeAllocator->AllocConsecutive(code_alloc, ETrue);
1.107 + if (r<0)
1.108 + r = KErrNoMemory;
1.109 + else
1.110 + {
1.111 + MM::UserCodeAllocator->Alloc(r, code_alloc);
1.112 + iCodeAllocBase=r;
1.113 + iRamInfo.iCodeRunAddr=m.iUserCodeBase+(r<<m.iAliasShift);
1.114 + iRamInfo.iCodeLoadAddr=iRamInfo.iCodeRunAddr;
1.115 + if (iRamInfo.iDataSize)
1.116 + {
1.117 + if(iDataPageCount)
1.118 + iRamInfo.iDataLoadAddr=iRamInfo.iCodeLoadAddr+Mmu::RoundToPageSize(iRamInfo.iCodeSize);
1.119 + else
1.120 + iRamInfo.iDataLoadAddr=iRamInfo.iCodeLoadAddr+iRamInfo.iCodeSize;
1.121 + }
1.122 +
1.123 + DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
1.124 + r=pP->MapUserRamCode(this, ETrue);
1.125 + if (r==KErrNone)
1.126 + iCreator=pP;
1.127 + }
1.128 +
1.129 + DCodeSeg::Signal();
1.130 + return r;
1.131 + }
1.132 +
1.133 +
1.134 +void DMemModelCodeSegMemory::Substitute(TInt aOffset, TPhysAddr aOld, TPhysAddr aNew)
1.135 + {
1.136 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelCodeSegMemory::Substitute %x %08x %08x",aOffset,aOld,aNew));
1.137 + Mmu& m=Mmu::Get();
1.138 +
1.139 + if (iPages[aOffset>>KPageShift] != aOld)
1.140 + MM::Panic(MM::ECodeSegRemapWrongPage);
1.141 +
1.142 + iPages[aOffset>>KPageShift] = aNew;
1.143 + m.RemapPageByAsid(iOsAsids, iRamInfo.iCodeRunAddr+aOffset, aOld, aNew, m.PtePermissions(EUserCode));
1.144 + }
1.145 +
1.146 +
1.147 +TInt DMemModelCodeSegMemory::Loaded(TCodeSegCreateInfo& aInfo)
1.148 + {
1.149 + __NK_ASSERT_DEBUG(iPages);
1.150 +
1.151 + TInt r = DMmuCodeSegMemory::Loaded(aInfo);
1.152 + if(r!=KErrNone)
1.153 + return r;
1.154 +
1.155 + Mmu& m=Mmu::Get();
1.156 +
1.157 + if(!iIsDemandPaged)
1.158 + {
1.159 + UNLOCK_USER_MEMORY();
1.160 + CacheMaintenance::CodeChanged(iRamInfo.iCodeLoadAddr, iRamInfo.iCodeSize);
1.161 + LOCK_USER_MEMORY();
1.162 + }
1.163 + else
1.164 + {
1.165 + // apply code fixups to pages which have already been loaded...
1.166 + TInt pageShift = m.iPageShift;
1.167 + for (TInt i = 0 ; i < iPageCount ; ++i)
1.168 + {
1.169 + if (iPages[i] != KPhysAddrInvalid)
1.170 + {
1.171 + r = ApplyCodeFixupsOnLoad((TUint32*)(iRamInfo.iCodeLoadAddr+(i<<pageShift)),iRamInfo.iCodeRunAddr+(i<<pageShift));
1.172 + if(r!=KErrNone)
1.173 + return r;
1.174 + }
1.175 + }
1.176 +
1.177 + // copy export directory (this will now have fixups applied)...
1.178 + TInt exportDirSize = iRamInfo.iExportDirCount * sizeof(TLinAddr);
1.179 + if (exportDirSize > 0 || (exportDirSize==0 && (iCodeSeg->iAttr&ECodeSegAttNmdExpData)) )
1.180 + {
1.181 + exportDirSize += sizeof(TLinAddr);
1.182 + TLinAddr* expDir = (TLinAddr*)Kern::Alloc(exportDirSize);
1.183 + if (!expDir)
1.184 + return KErrNoMemory;
1.185 + iCopyOfExportDir = expDir;
1.186 + UNLOCK_USER_MEMORY();
1.187 + memcpy(expDir,(TAny*)(iRamInfo.iExportDir-sizeof(TLinAddr)),exportDirSize);
1.188 + LOCK_USER_MEMORY();
1.189 + }
1.190 + }
1.191 +
1.192 + // unmap code from loading process...
1.193 + DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
1.194 + __ASSERT_ALWAYS(iCreator==pP, MM::Panic(MM::ECodeSegLoadedNotCreator));
1.195 + pP->UnmapUserRamCode(this, ETrue);
1.196 + iCreator=NULL;
1.197 +
1.198 + // discard any temporary pages used to store loaded data section...
1.199 + if(iDataPageCount)
1.200 + {
1.201 + MmuBase::Wait();
1.202 + TPhysAddr* pages = iPages+iPageCount;
1.203 + m.FreePages(pages,iDataPageCount, EPageMovable);
1.204 + for (TInt i = 0 ; i < iDataPageCount ; ++i)
1.205 + pages[i] = KPhysAddrInvalid;
1.206 + MmuBase::Signal();
1.207 +
1.208 + // see if we can free any virtual address space now we don't need any for loading data
1.209 + TInt data_start = ((iPageCount << m.iPageShift) + m.iAliasMask) >> m.iAliasShift;
1.210 + TInt data_end = (((iPageCount + iDataPageCount) << m.iPageShift) + m.iAliasMask) >> m.iAliasShift;
1.211 + if (data_end != data_start)
1.212 + {
1.213 + DCodeSeg::Wait();
1.214 + MM::UserCodeAllocator->Free(iCodeAllocBase + data_start, data_end - data_start);
1.215 + DCodeSeg::Signal();
1.216 + }
1.217 +
1.218 + iDataPageCount = 0;
1.219 + //Reduce the size of the DCodeSeg now the data section has been moved
1.220 + iCodeSeg->iSize = iPageCount << m.iPageShift;
1.221 + }
1.222 +
1.223 + return KErrNone;
1.224 + }
1.225 +
1.226 +
1.227 +void DMemModelCodeSegMemory::Destroy()
1.228 + {
1.229 + if(iCreator)
1.230 + iCreator->UnmapUserRamCode(this, ETrue); // remove from creating process if not fully loaded
1.231 + }
1.232 +
1.233 +
1.234 +DMemModelCodeSegMemory::~DMemModelCodeSegMemory()
1.235 + {
1.236 + __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSegMemory::~DMemModelCodeSegMemory %x", this));
1.237 + __NK_ASSERT_DEBUG(iAccessCount==0);
1.238 + __NK_ASSERT_DEBUG(iOsAsids==0 || iOsAsids->Avail()==0); // check not mapped (inverted logic!)
1.239 +
1.240 + Mmu& m=Mmu::Get();
1.241 +
1.242 + if(iCodeAllocBase>=0)
1.243 + {
1.244 + // free allocated virtual memory space...
1.245 + TInt size = (iPageCount+iDataPageCount)<<KPageShift;
1.246 + TInt code_alloc=(size+m.iAliasMask)>>m.iAliasShift;
1.247 + DCodeSeg::Wait();
1.248 + MM::UserCodeAllocator->Free(iCodeAllocBase, code_alloc);
1.249 + DCodeSeg::Signal();
1.250 + }
1.251 +
1.252 + if(iPages)
1.253 + {
1.254 +#ifdef __DEMAND_PAGING__
1.255 + if (iIsDemandPaged)
1.256 + {
1.257 + // Return any paged memory to the paging system
1.258 + MmuBase::Wait();
1.259 + NKern::LockSystem();
1.260 + DemandPaging& p = *DemandPaging::ThePager;
1.261 + for (TInt i = 0 ; i < iPageCount ; ++i)
1.262 + {
1.263 + if (iPages[i] != KPhysAddrInvalid)
1.264 + p.NotifyPageFree(iPages[i]);
1.265 + }
1.266 + NKern::UnlockSystem();
1.267 + MmuBase::Signal();
1.268 +
1.269 + Kern::Free(iCopyOfExportDir);
1.270 + iCopyOfExportDir = NULL;
1.271 + }
1.272 +#endif
1.273 + MmuBase::Wait();
1.274 + m.FreePages(iPages,iPageCount+iDataPageCount, EPageMovable);
1.275 + MmuBase::Signal();
1.276 + Kern::Free(iPages);
1.277 + iPages = NULL;
1.278 +#ifdef BTRACE_CODESEGS
1.279 + BTrace8(BTrace::ECodeSegs,BTrace::ECodeSegMemoryDeallocated,this,iPageCount<<m.iPageShift);
1.280 +#endif
1.281 + }
1.282 + delete iOsAsids;
1.283 + }
1.284 +
1.285 +
1.286 +DMemModelCodeSeg::DMemModelCodeSeg()
1.287 +//
1.288 +// Constructor
1.289 +//
1.290 + : iCodeAllocBase(-1),
1.291 + iDataAllocBase(-1)
1.292 + {
1.293 + }
1.294 +
1.295 +
1.296 +DMemModelCodeSeg::~DMemModelCodeSeg()
1.297 +//
1.298 +// Destructor
1.299 +//
1.300 + {
1.301 + __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::Destruct %C", this));
1.302 + Mmu& m=Mmu::Get();
1.303 + DCodeSeg::Wait();
1.304 + if (iCodeAllocBase>=0)
1.305 + {
1.306 + TBool kernel=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
1.307 + TBool global=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal );
1.308 + TInt r=KErrNone;
1.309 + if (kernel)
1.310 + {
1.311 + DMemModelProcess& kproc=*(DMemModelProcess*)K::TheKernelProcess;
1.312 + r=kproc.iCodeChunk->Decommit(iCodeAllocBase, iSize);
1.313 + }
1.314 + else if (global)
1.315 + {
1.316 + r=m.iGlobalCode->Decommit(iCodeAllocBase, iSize);
1.317 + }
1.318 + __ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed));
1.319 + r=r; // stop compiler warning
1.320 + }
1.321 + if(Memory())
1.322 + Memory()->Destroy();
1.323 + if (iDataAllocBase>=0 && !iXIP)
1.324 + {
1.325 + SRamCodeInfo& ri=RamInfo();
1.326 + TInt data_alloc=(ri.iDataSize+ri.iBssSize+m.iPageMask)>>m.iPageShift;
1.327 + MM::DllDataAllocator->Free(iDataAllocBase, data_alloc);
1.328 + }
1.329 + DCodeSeg::Signal();
1.330 + Kern::Free(iKernelData);
1.331 + DEpocCodeSeg::Destruct();
1.332 + }
1.333 +
1.334 +
1.335 +TInt DMemModelCodeSeg::DoCreateRam(TCodeSegCreateInfo& aInfo, DProcess*)
1.336 + {
1.337 + __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateRam %C", this));
1.338 + TBool kernel=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
1.339 + TBool global=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal );
1.340 + Mmu& m=Mmu::Get();
1.341 + SRamCodeInfo& ri=RamInfo();
1.342 + iSize = Mmu::RoundToPageSize(ri.iCodeSize+ri.iDataSize);
1.343 + if (iSize==0)
1.344 + return KErrCorrupt;
1.345 + TInt total_data_size=ri.iDataSize+ri.iBssSize;
1.346 + TInt r=KErrNone;
1.347 + if (kernel)
1.348 + {
1.349 + DMemModelProcess& kproc=*(DMemModelProcess*)K::TheKernelProcess;
1.350 + if (!kproc.iCodeChunk)
1.351 + r=kproc.CreateCodeChunk();
1.352 + if (r!=KErrNone)
1.353 + return r;
1.354 + r=kproc.iCodeChunk->Allocate(iSize, 0, m.iAliasShift);
1.355 + if (r<0)
1.356 + return r;
1.357 + iCodeAllocBase=r;
1.358 + ri.iCodeRunAddr=(TUint32)kproc.iCodeChunk->Base();
1.359 + ri.iCodeRunAddr+=r;
1.360 + ri.iCodeLoadAddr=ri.iCodeRunAddr;
1.361 + if (ri.iDataSize)
1.362 + ri.iDataLoadAddr=ri.iCodeLoadAddr+ri.iCodeSize;
1.363 + if (total_data_size)
1.364 + {
1.365 + iKernelData=Kern::Alloc(total_data_size);
1.366 + if (!iKernelData)
1.367 + return KErrNoMemory;
1.368 + ri.iDataRunAddr=(TLinAddr)iKernelData;
1.369 + }
1.370 + return KErrNone;
1.371 + }
1.372 + if (global)
1.373 + {
1.374 + if (!m.iGlobalCode)
1.375 + r=m.CreateGlobalCodeChunk();
1.376 + if (r==KErrNone)
1.377 + r=m.iGlobalCode->Allocate(iSize, 0, m.iAliasShift);
1.378 + if (r<0)
1.379 + return r;
1.380 + iCodeAllocBase=r;
1.381 + ri.iCodeRunAddr=(TUint32)m.iGlobalCode->Base();
1.382 + ri.iCodeRunAddr+=r;
1.383 + ri.iCodeLoadAddr=ri.iCodeRunAddr;
1.384 + ri.iDataLoadAddr=0; // we don't allow static data in global code
1.385 + ri.iDataRunAddr=0;
1.386 + TInt loadSize = ri.iCodeSize+ri.iDataSize;
1.387 + memset((TAny*)(ri.iCodeRunAddr+loadSize), 0x03, iSize-loadSize);
1.388 + return KErrNone;
1.389 + }
1.390 +
1.391 + DCodeSeg::Wait();
1.392 + if (total_data_size && !IsExe())
1.393 + {
1.394 + TInt data_alloc=(total_data_size+m.iPageMask)>>m.iPageShift;
1.395 + __KTRACE_OPT(KDLL,Kern::Printf("Alloc DLL data %x", data_alloc));
1.396 + r=MM::DllDataAllocator->AllocConsecutive(data_alloc, ETrue);
1.397 + if (r<0)
1.398 + r = KErrNoMemory;
1.399 + else
1.400 + {
1.401 + MM::DllDataAllocator->Alloc(r, data_alloc);
1.402 + iDataAllocBase=r;
1.403 + ri.iDataRunAddr=m.iDllDataBase+m.iMaxDllDataSize-((r+data_alloc)<<m.iPageShift);
1.404 + r = KErrNone;
1.405 + }
1.406 + }
1.407 + DCodeSeg::Signal();
1.408 +
1.409 + if(r==KErrNone)
1.410 + r = Memory()->Create(aInfo);
1.411 +
1.412 + return r;
1.413 + }
1.414 +
1.415 +
1.416 +TInt DMemModelCodeSeg::DoCreateXIP(DProcess* aProcess)
1.417 + {
1.418 +// __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateXIP %C proc %O", this, aProcess));
1.419 + return KErrNone;
1.420 + }
1.421 +
1.422 +
1.423 +TInt DMemModelCodeSeg::Loaded(TCodeSegCreateInfo& aInfo)
1.424 + {
1.425 + if(iXIP)
1.426 + return DEpocCodeSeg::Loaded(aInfo);
1.427 +
1.428 + TBool kernel=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
1.429 + TBool global=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal );
1.430 + if (Pages())
1.431 + {
1.432 + TInt r = Memory()->Loaded(aInfo);
1.433 + if(r!=KErrNone)
1.434 + return r;
1.435 + }
1.436 + else if (kernel && iExeCodeSeg!=this)
1.437 + {
1.438 + Mmu& m=Mmu::Get();
1.439 + DMemModelProcess& kproc=*(DMemModelProcess*)K::TheKernelProcess;
1.440 + SRamCodeInfo& ri=RamInfo();
1.441 +
1.442 + // NOTE: Must do IMB before changing permissions since ARMv6 is very pedantic and
1.443 + // doesn't let you clean a cache line which is marked as read only.
1.444 + CacheMaintenance::CodeChanged(ri.iCodeRunAddr, ri.iCodeSize);
1.445 +
1.446 + TInt offset=ri.iCodeRunAddr-TLinAddr(kproc.iCodeChunk->iBase);
1.447 + kproc.iCodeChunk->ApplyPermissions(offset, iSize, m.iKernelCodePtePerm);
1.448 + }
1.449 + else if (global)
1.450 + {
1.451 + Mmu& m=Mmu::Get();
1.452 + SRamCodeInfo& ri=RamInfo();
1.453 + CacheMaintenance::CodeChanged(ri.iCodeRunAddr, ri.iCodeSize);
1.454 + TInt offset=ri.iCodeRunAddr-TLinAddr(m.iGlobalCode->iBase);
1.455 + m.iGlobalCode->ApplyPermissions(offset, iSize, m.iGlobalCodePtePerm);
1.456 + }
1.457 + return DEpocCodeSeg::Loaded(aInfo);
1.458 + }
1.459 +
1.460 +void DMemModelCodeSeg::ReadExportDir(TUint32* aDest)
1.461 + {
1.462 + __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::ReadExportDir %C %08x",this, aDest));
1.463 +
1.464 + if (!iXIP)
1.465 + {
1.466 + SRamCodeInfo& ri=RamInfo();
1.467 + TInt size=(ri.iExportDirCount+1)*sizeof(TLinAddr);
1.468 +
1.469 + if (Memory()->iCopyOfExportDir)
1.470 + {
1.471 + kumemput(aDest, Memory()->iCopyOfExportDir, size);
1.472 + return;
1.473 + }
1.474 +
1.475 + NKern::ThreadEnterCS();
1.476 + Mmu& m=Mmu::Get();
1.477 + TLinAddr src=ri.iExportDir-sizeof(TLinAddr);
1.478 +
1.479 + MmuBase::Wait();
1.480 + TInt offset=src-ri.iCodeRunAddr;
1.481 + TPhysAddr* physArray = Pages();
1.482 + TPhysAddr* ppa=physArray+(offset>>m.iPageShift);
1.483 + while(size)
1.484 + {
1.485 + TInt pageOffset = src&m.iPageMask;
1.486 + TInt l=Min(size, m.iPageSize-pageOffset);
1.487 + TLinAddr alias_src = m.MapTemp(*ppa++,src-pageOffset)+pageOffset;
1.488 + // Note, the following memory access isn't XTRAP'ed, because...
1.489 + // a) This function is only called by the loader thread, so even if
1.490 + // exceptions were trapped the system is doomed anyway
1.491 + // b) Any exception will cause the crash debugger/logger to be called
1.492 + // which will provide more information than if trapped exceptions
1.493 + // and returned an error code.
1.494 + kumemput32(aDest, (const TAny*)alias_src, l);
1.495 + m.UnmapTemp();
1.496 + size-=l;
1.497 + src+=l;
1.498 + aDest+=l/sizeof(TUint32);
1.499 + }
1.500 + MmuBase::Signal();
1.501 +
1.502 + NKern::ThreadLeaveCS();
1.503 + }
1.504 + }
1.505 +
1.506 +TBool DMemModelCodeSeg::OpenCheck(DProcess* aProcess)
1.507 + {
1.508 + return FindCheck(aProcess);
1.509 + }
1.510 +
1.511 +TBool DMemModelCodeSeg::FindCheck(DProcess* aProcess)
1.512 + {
1.513 + __KTRACE_OPT(KDLL,Kern::Printf("CSEG:%08x Compat? proc=%O",this,aProcess));
1.514 + if (aProcess)
1.515 + {
1.516 + DMemModelProcess& p=*(DMemModelProcess*)aProcess;
1.517 + DCodeSeg* pPSeg=p.CodeSeg();
1.518 + if (iAttachProcess && iAttachProcess!=aProcess)
1.519 + return EFalse;
1.520 + if (iExeCodeSeg && iExeCodeSeg!=pPSeg)
1.521 + return EFalse;
1.522 + }
1.523 + return ETrue;
1.524 + }
1.525 +
1.526 +
1.527 +void DMemModelCodeSeg::BTracePrime(TInt aCategory)
1.528 + {
1.529 +#ifdef BTRACE_CODESEGS
1.530 + if (aCategory == BTrace::ECodeSegs || aCategory == -1)
1.531 + {
1.532 + DCodeSeg::BTracePrime(aCategory);
1.533 + DMemModelCodeSegMemory* codeSegMemory = Memory();
1.534 + if(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iPageCount)
1.535 + {
1.536 + BTrace8(BTrace::ECodeSegs,BTrace::ECodeSegMemoryAllocated,this,codeSegMemory->iPageCount<<Mmu::Get().iPageShift);
1.537 + }
1.538 + }
1.539 +#endif
1.540 + }