First public contribution.
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\drivers\locmedia\dmasupport.cpp
18 #include <kernel/kernel.h>
19 #include <kernel/cache.h>
21 #include "dmasupport.h"
22 #include "dmasupport.inl"
24 #include "OstTraceDefinitions.h"
25 #ifdef OST_TRACE_COMPILER_IN_USE
26 #include "locmedia_ost.h"
28 #pragma warning(disable: 4127) // disabling warning "conditional expression is constant"
30 #include "dmasupportTraces.h"
33 #define PHYSADDR_FAULT() Kern::Fault("TLOCDRV-PHYS-ADDR",__LINE__)
35 //#define __DEBUG_DMASUP__
36 #ifdef __DEBUG_DMASUP__
37 #define __KTRACE_DMA(p) {p;}
39 #define __KTRACE_DMA(p)
42 TInt DDmaHelper::iPageSize;
43 TInt DDmaHelper::iPageSizeLog2;
44 TInt DDmaHelper::iPageSizeMsk;
46 /******************************************************************************
48 ******************************************************************************/
49 const TPhysAddr KPhysMemFragmented = KPhysAddrInvalid;
51 TUint32 Log2(TUint32 aVal)
53 __ASSERT_COMPILE(sizeof(TUint32) == 4);
57 if(!(aVal >> 16)) {bitPos-=16; aVal<<=16;}
58 if(!(aVal >> 24)) {bitPos-=8; aVal<<=8 ;}
59 if(!(aVal >> 28)) {bitPos-=4; aVal<<=4 ;}
60 if(!(aVal >> 30)) {bitPos-=2; aVal<<=2 ;}
61 if(!(aVal >> 31)) {bitPos-=1;}
66 TBool IsPowerOfTwo(TInt aNum)
68 // Returns ETrue if aNum is a power of two
71 return (aNum != 0 && (aNum & -aNum) == aNum);
74 void DDmaHelper::ResetPageLists()
77 iFragLenRemaining = 0;
80 DDmaHelper::DDmaHelper()
82 OstTraceFunctionEntry0( DDMAHELPER_DDMAHELPER_ENTRY );
83 iPageSize = Kern::RoundToPageSize(1);
84 __ASSERT_ALWAYS(IsPowerOfTwo(iPageSize), PHYSADDR_FAULT());
85 iPageSizeLog2 = Log2(iPageSize);
86 iPageSizeMsk = iPageSize-1;
87 OstTraceFunctionExit0( DDMAHELPER_DDMAHELPER_EXIT );
90 DDmaHelper::~DDmaHelper()
92 OstTraceFunctionEntry0( DESTRUCTOR_DDMAHELPER_ENTRY );
95 if (iPhysicalPinObject)
97 NKern::ThreadEnterCS();
98 Kern::DestroyPhysicalPinObject(iPhysicalPinObject);
99 NKern::ThreadLeaveCS();
101 OstTraceFunctionExit0( DESTRUCTOR_DDMAHELPER_EXIT );
105 Constructs the DDmaHelper object
107 @param aLength The maximum length of data mapped by this object.
108 Should be a multiple of the page size
109 @param aMediaBlockSize The minimum amount data that the media can transfer in read / write operations
110 @param aDmaAlignment The memory alignment required by the media devices DMA controller. (i.e. word aligned = 2)
112 @return KErrNone,if successful;
113 KErrNoMemory, if unable to create Page Array's.
115 TInt DDmaHelper::Construct(TInt aLength, TInt aMediaBlockSize, TInt aDmaAlignment)
117 OstTraceFunctionEntry1( DDMAHELPER_CONSTRUCT_ENTRY, this );
118 __ASSERT_ALWAYS(aMediaBlockSize > 0, PHYSADDR_FAULT());
119 __ASSERT_ALWAYS(IsPowerOfTwo(aMediaBlockSize), PHYSADDR_FAULT());
120 __ASSERT_ALWAYS(aLength > 0, PHYSADDR_FAULT());
121 __ASSERT_ALWAYS(aLength > iPageSize, PHYSADDR_FAULT());
123 // This code assumes that the media block size (normally 512) is >= the processor's
124 // cache-line size (typically 32 bytes). This may not be true for future processors.
125 // If the cache-line size was 1024, for example, reading 512 bytes into a client's
126 // buffer & then calling Cache::SyncMemoryAfterDmaRead would invalidate an entire 1024
127 // bytes in the user's address space.
128 TUint cacheLineSize = Cache::DmaBufferAlignment();
129 __ASSERT_ALWAYS(IsPowerOfTwo(cacheLineSize), PHYSADDR_FAULT());
130 if (cacheLineSize > (TUint) aMediaBlockSize)
132 OstTraceFunctionExitExt( DDMAHELPER_CONSTRUCT_EXIT1, this, KErrNotSupported );
133 return KErrNotSupported;
136 //Check whether Kernel supports physical memory pinning:
137 TInt mm = Kern::HalFunction(EHalGroupKernel, EKernelHalMemModelInfo, 0, 0) & EMemModelTypeMask;
138 if (mm >= EMemModelTypeFlexible)
140 // Flexible memory model supports physical pinning for user (and Kernel) memory that
141 // is the subject of DMA transfer.
142 // Physical memory pinning ensures that:
143 // - physical memory is not moved by RAM defragmentation.
144 // - it is safe to to DMA against it or do sync cache (using new interface) even if/when
145 // the owner of the memory (e.g. untrusted user aplication) decomits memory or panics.
146 // For details @see Kern::PinPhysicalMemory.
147 // Cache Sync of physically pinned memory on flexible memory model is done by:
148 // - Cache::SyncPhysicalMemoryBeforeDmaWrite
149 // - Cache::SyncPhysicalMemoryBeforeDmaRead
150 // - Cache::SyncPhysicalMemoryAfterDmaRead
151 iPhysPinningAvailable = ETrue;
152 __KTRACE_DMA(Kern::Printf("Memory model (%d) supports physical pinning\n",mm));
153 NKern::ThreadEnterCS();
154 TInt r=Kern::CreatePhysicalPinObject(iPhysicalPinObject);
155 OstTraceExt2(TRACE_DMASUPPORT, DDMAHELPER_CONSTRUCT1, "Memory model=%d supports physical pinning; created Physical Pin Object with return value=%d",mm, r);
156 NKern::ThreadLeaveCS();
161 // Memory models before flexible do not support memory pinning.
162 // The driver has to use PrepareMemoryForDMA/ReleaseMemoryFromDMA Kernel interface
163 // that ensures that physical memory won't be moved by RAM defragmentation module.
164 // However, Kernel relies on assumption that the user memory won't dissapear (e.g. by
165 // user client closing the chunk or panics), as it would lead to Kernel crash.
166 // For that reason, the only use case for DMA transfer into user memory is File System's
167 // read/write buffer - as it is assumed that File System is trusted component.
168 // To mark its buffers(s) for DMA transfer, File Sytem must call UserSvr::RegisterTrustedChunk
169 // before DMA transfer starts.
170 // Cache sync. operations before/after DMA transfer must be done by using the old Cache interface:
171 // - Cache::SyncMemoryBeforeDmaWrite
172 // - Cache::SyncMemoryBeforeDmaRead
173 // - Cache::SyncMemoryAfterDmaRead
174 // As they all require linear address as input, these methods also rely on File System buffers
175 // to be in valid state during sync calls.
176 iPhysPinningAvailable = EFalse;
177 __KTRACE_DMA(Kern::Printf("Memory model (%d) doesn't support physical pining",mm));
178 OstTrace1(TRACE_DMASUPPORT, DDMAHELPER_CONSTRUCT2, "Memory model=%d doesn't support physical pinning",mm);
179 iPhysicalPinObject = NULL;
182 iMaxPages = (aLength >> iPageSizeLog2)-1;
184 // 2 Additional pages for page straddling
185 iPageArray = new TPhysAddr[iMaxPages+2];
186 if (iPageArray != NULL)
188 iPageList = new TPageList[iMaxPages];
189 if (iPageList != NULL)
191 iMediaBlockSize = aMediaBlockSize;
192 iMediaBlockSizeMask = TInt64(iMediaBlockSize - 1);
194 iDmaAlignment = aDmaAlignment;
195 __KTRACE_DMA(Kern::Printf("-PHYSADDR: Construct iMaxPages(%d), MediaBlocks(%d), DMAalign(%d)",iMaxPages,iMediaBlockSize,iDmaAlignment));
196 OstTraceExt3(TRACE_FLOW, DDMAHELPER_CONSTRUCT_EXIT2, "< KErrNone PHYSADDR: Construct iMaxPages %d MediaBlocks %d DMAalign %d", iMaxPages,iMediaBlockSize,iDmaAlignment );
199 delete [] iPageArray; iPageArray = NULL;
203 OstTraceFunctionExitExt( DDMAHELPER_CONSTRUCT_EXIT3, this, KErrNoMemory );
208 * Each Read/Write request is examined to determine if the descriptor that
209 * is referenced is mapped to a physical memory object;
210 * if so it prepares the memory, updates the request with physical memory information
211 * and issues the request.
212 * If a request does not make use of physical memory or is not configured correctly the
213 * request is passed through without modification.
215 TInt DDmaHelper::SendReceive(TLocDrvRequest& aReq, TLinAddr aLinAddress)
217 OstTraceFunctionEntry0( DDMAHELPER_SENDRECEIVE_ENTRY );
218 DPrimaryMediaBase& primaryMedia = *aReq.Drive()->iPrimaryMedia;
220 TInt reqId = aReq.Id();
221 if (reqId != DLocalDrive::ERead && reqId != DLocalDrive::EWrite)
223 OstTrace0(TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT1, "< Request is not ERead or EWrite, cannot perform Direct Memory Access");
224 return aReq.SendReceive(&primaryMedia.iMsgQ);
227 if ((I64HIGH(aReq.Length()) > 0) || (aReq.Length() < iMediaBlockSize))
229 OstTrace0(TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT2, "< Invalid request length, cannot perform Direct Memory Access");
230 return aReq.SendReceive(&primaryMedia.iMsgQ);
233 // If more than one user thread tries to access the drive, then bail out as there is
234 // only one DDmaHelper object per TLocDrv. Normally this shouldn't ever happen unless
235 // a client app accesses the drive directly using TBusLOcalDrive or the file system is
236 // asynchronous (i.e. there is a separate drive thread) but the file server message is
237 // flagged as synchronous - e.g. EFsDrive
238 if (TInt(__e32_atomic_add_ord32(&iLockCount, 1)) > 0) // busy ?
240 __KTRACE_DMA(Kern::Printf("-PHYSADDR: BUSY"));
241 __e32_atomic_add_ord32(&iLockCount, TUint32(-1));
242 OstTrace0(TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT3, "< DMA Busy");
243 return aReq.SendReceive(&primaryMedia.iMsgQ);
246 // make a copy of the request
247 iMemoryType = EUnknown;
251 iReqPosClient = iReq->Pos();
253 iReqLenClient = I64LOW(iReq->Length());
255 iReqRemoteDesOffset = iReq->RemoteDesOffset();
256 iReqFlags = iReq->Flags();
258 iRemoteThread = iReq->RemoteThread();
259 iCurrentThread = &Kern::CurrentThread();
260 iOwningThread = iRemoteThread ? iRemoteThread : iCurrentThread;
264 iLinAddressUser = NULL;
267 // point to the start of the descriptor
268 iLinAddressUser = aLinAddress - iReqRemoteDesOffset;
270 // Need to check descriptors from both direct Clients (i.e. file cache, RemoteThread == NULL )
271 // and Remote Server Clients (file server clients, RemoteThread != NULL)
272 // Shared Memory can potentially be used by both remote server and direct clients
273 NKern::ThreadEnterCS();
274 iChunk = Kern::OpenSharedChunk(iOwningThread, (const TAny*) iLinAddressUser, ETrue, iChunkOffset);
275 NKern::ThreadLeaveCS();
281 __KTRACE_DMA(Kern::Printf(">PHYSADDR:SendReceive() iReqLen %d; iLenConsumed %d; fragments %d",iReqLen, iLenConsumed, fragments));
282 OstTraceExt2( TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE1, "PHYSADDR:SendReceive() iLenConsumed=%d; fragments=%d", iLenConsumed, fragments);
288 NKern::ThreadEnterCS();
289 Kern::ChunkClose(iChunk);
291 NKern::ThreadLeaveCS();
293 __KTRACE_DMA(Kern::Printf("<PHYSADDR:SendReceive()- r:%d",r));
294 OstTrace1( TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT4, "< PHYSADDR:SendReceive() Return code %d",r);
295 iMemoryType = EUnknown;
296 __e32_atomic_add_ord32(&iLockCount, TUint32(-1));
297 return fragments ? r : iReq->SendReceive(&primaryMedia.iMsgQ);
301 iReq->Flags() |= TLocDrvRequest::EPhysAddr;
304 __KTRACE_DMA(Kern::Printf("-PHYSADDR:SendReceive() rThread %08X pos %08lX, len %d addr %08X off %08X",
305 iRemoteThread, iReq->Pos(), I64LOW(iReq->Length()), iLinAddressUser, iReqRemoteDesOffset));
306 OstTraceExt4(TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE2, "PHYSADDR:SendReceive() position=%Ld; length=%d; address=0x%x; offset=0x%x", iReq->Pos(), (TInt) I64LOW(iReq->Length()), (TUint) iLinAddressUser, (TUint) iReqRemoteDesOffset );
308 __ASSERT_DEBUG(iReq->Length() == FragLength(), PHYSADDR_FAULT());
309 __ASSERT_DEBUG(iReq->Length() != 0, PHYSADDR_FAULT());
311 // reinstate iValue in case overwritten by DMediaPagingDevice::CompleteRequest()
312 iReq->iValue = iReqId;
314 OstTrace1(TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE3, "Dma SendReceive Start iReq=%d", iReq);
315 r = iReq->SendReceive(&primaryMedia.iMsgQ);
316 OstTrace1(TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE4, "Dma SendReceive Return iReq=%d", iReq);
318 // The media driver could potentially choose to deal with the request
319 // without accessing physical memory (e.g. if the data is already cached).
320 iLenConsumed += iFragLenRemaining;
329 while(r == KErrNone && LengthRemaining() > 0);
333 NKern::ThreadEnterCS();
334 Kern::ChunkClose(iChunk);
336 NKern::ThreadLeaveCS();
339 // Set remote descriptor length to iReqLenClient
340 if (iReqId == DLocalDrive::ERead && r == KErrNone)
341 r = UpdateRemoteDescriptorLength(iReqLenClient);
343 __KTRACE_DMA(Kern::Printf("<PHYSADDR:SendReceive()"));
345 iMemoryType = EUnknown;
347 __e32_atomic_add_ord32(&iLockCount, TUint32(-1));
348 OstTraceFunctionExit0( DDMAHELPER_SENDRECEIVE_EXIT5 );
354 * Each read/write request is split into one or more DMA "fragments".
355 * The maximum size of each fragment depends on the size of iPageArray[].
356 * Subsquent calls to RequestStart maybe required to complete a request.
358 * The physical address is checked for DMA alignment or the possibility of
359 * eventually alignment due to mis-aligned start/end media blocks.
361 * A DMA "fragment" can be split over a number of pages as follows :
362 * ----------------------------------------------------------
363 * | 4K | 4K | 4K | 4K |
364 * ----------------------------------------------------------
365 * ******************************** : region to be read
366 * <----------- iFragLen ----------->
368 * The pages may not be physically contiguous; if they are not,
369 * then they are supplied to the media driver one contiguous
370 * sequent at a time by GetPhysicalAddress()
372 TInt DDmaHelper::RequestStart()
374 OstTraceFunctionEntry1( DDMAHELPER_REQUESTSTART_ENTRY, this );
375 __KTRACE_DMA(Kern::Printf(">PHYSADDR:RequestStart()"));
379 TLinAddr startAddr = LinAddress();
380 TInt64 startPos = iReqPosClient + iLenConsumed;
381 TInt mediaBlockOffset = BlockOffset(startPos);
382 TInt addrBlockOffset = BlockOffset(startAddr);
383 TInt length = Min(LengthRemaining(), MaxFragLength());
385 iPageArrayCount = iPageListCount = 0;
387 TLinAddr firstPageStart = PageAlign(startAddr);
388 TLinAddr lastPageStart = PageAlign(startAddr + length + iPageSize - 1);
389 iPageArrayCount = (lastPageStart - firstPageStart + 1) >> iPageSizeLog2;
391 iMemoryType = EUnknown;
392 iPhysAddr = KPhysMemFragmented; // Default - Mark memory as fragmented
394 //*************************************
395 // Check Physical Page Alignment!!
396 //*************************************
397 if (!IsBlockAligned(startPos))
399 // Will DMA align at next block alignment? such that DMA can be used
400 TInt ofset = I64LOW((startPos + iMediaBlockSize) & (iMediaBlockSize-1));
401 ofset = iMediaBlockSize - ofset;
403 if (!IsDmaAligned(startAddr))
405 __KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - not DMA Aligned pos 0x%x addr 0x%x)",I64LOW(startPos), startAddr));
406 OstTraceExt2( TRACE_FLOW, DDMAHELPER_REQUESTSTART_EXIT1, "< KErrNotSupported Not DMA Aligned startPos %x startAddr %x", I64LOW(startPos), startAddr );
407 return KErrNotSupported;
412 if (!IsDmaAligned(startAddr))
414 __KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - not DMA Aligned (0x%x)",startAddr));
415 OstTrace1(TRACE_FLOW, DDMAHELPER_REQUESTSTART_EXIT2, "< KErrNotSupported Not DMA Aligned startAddr %x", startAddr);
416 return KErrNotSupported;
420 //************************************************
421 // Check for possible striping of RAM pages vs Media blocks
422 // i.e. Media blocks which may straddle 2 non contiguous pages.
423 //************************************************
424 if (mediaBlockOffset != addrBlockOffset)
426 __KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - Frag / not block aligned: pos 0x%x addr 0x%x", I64LOW(startPos), startAddr));
427 OstTraceExt2(TRACE_FLOW, DDMAHELPER_REQUESTSTART_EXIT3, "< KErrNotSupported Frag / not block aligned: startPos 0x%x startAddr 0x%x", I64LOW(startPos), startAddr );
428 return KErrNotSupported;
431 //************************************************
432 // Is it File Server Cache request ?
433 //************************************************
434 if (iChunk == NULL && // Not Shared memory
435 iRemoteThread == NULL && // Direct Client Request
436 IsPageAligned(startAddr) &&
437 IsBlockAligned(startPos) &&
438 (iPageArrayCount > 0) )
440 TLinAddr firstPageAddr = PageAlign(startAddr); //ensure that it is page aligned.
443 if (iPhysPinningAvailable)
445 TBool readOnlyMem = (iReqId == DLocalDrive::EWrite);
446 r = Kern::PinPhysicalMemory(iPhysicalPinObject, firstPageAddr, iPageArrayCount << iPageSizeLog2,
447 readOnlyMem, iPhysAddr, iPageArray, iMapAttr, iPageColour, iCurrentThread);
451 NKern::ThreadEnterCS();
452 r = Kern::PrepareMemoryForDMA(iCurrentThread, (void*)firstPageAddr, iPageArrayCount << iPageSizeLog2, iPageArray);
453 NKern::ThreadLeaveCS();
457 OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT4, this, r );
461 iMemoryType = EFileServerChunk;
463 __KTRACE_DMA(Kern::Printf("-PHYSADDR:RequestStart() - EFileServerChunk"));
464 OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_REQUESTSTART1, "EFileServerChunk");
466 //****************************
467 // Is it shared chunk ?
468 //****************************
471 // calculate chunk offset of start of first page
472 TInt offset = iChunkOffset + iReqRemoteDesOffset+ iLenConsumed;
474 TInt r = Kern::ChunkPhysicalAddress(iChunk, offset, length, iLinAddressKernel, iMapAttr, iPhysAddr, iPageArray);
478 OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT5, this, r );
479 return r; // 0 = Contiguous Memory, 1 = Fragmented/Dis-Contiguous Memory
482 iMemoryType = ESharedChunk;
484 __KTRACE_DMA(Kern::Printf("-PHYSADDR:RequestStart() - ESharedChunk"));
485 OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_REQUESTSTART2, "ESharedChunk");
489 __KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - EUnknown"));
490 OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT6, this, KErrNotFound );
494 SetFragLength(length);
496 //************************************************
497 // Build Contiguous Page list
498 //************************************************
501 //************************************************
502 // Set up request parameters for this fragment
503 //************************************************
504 iReq->Length() = MAKE_TINT64(0, length);
505 iReq->Pos() = iReqPosClient + iLenConsumed;
506 iReq->RemoteDesOffset() = iReqRemoteDesOffset + iLenConsumed;
507 // restore EAdjusted flag to ensure iReq->Pos() is adjusted correctly
508 iReq->Flags()&= ~TLocDrvRequest::EAdjusted;
509 iReq->Flags()|= (iReqFlags & TLocDrvRequest::EAdjusted);
511 //************************************************
513 //************************************************
514 __KTRACE_DMA(Kern::Printf(">SYNC-PHYSADDR:addr 0x%x len %d", startAddr, length));
515 OstTraceExt2(TRACE_DMASUPPORT, DDMAHELPER_REQUESTSTART3, "startAddr=0x%x length=%d", (TUint) startAddr, length );
517 // Only sync whole blocks: it is assumed that the media driver will transfer
518 // partial start and end blocks without DMA
520 TInt startBlockPartialLen = IsBlockAligned(startPos) ? 0 : iMediaBlockSize - BlockOffset(startPos);
521 TInt blockLen = (TInt) BlockAlign(length - startBlockPartialLen);
523 if (iReqId == DLocalDrive::EWrite)
525 if (iMemoryType == ESharedChunk)
527 Cache::SyncMemoryBeforeDmaWrite(iLinAddressKernel+startBlockPartialLen, blockLen, iMapAttr);
529 else // (iMemoryType == EFileServerChunk)
531 if (iPhysPinningAvailable)
532 Cache::SyncPhysicalMemoryBeforeDmaWrite(iPageArray, iPageColour, startBlockPartialLen, blockLen, iMapAttr);
534 Cache::SyncMemoryBeforeDmaWrite(startAddr+startBlockPartialLen, blockLen);
539 if (iMemoryType == ESharedChunk)
540 Cache::SyncMemoryBeforeDmaRead(iLinAddressKernel, length, iMapAttr);
541 else // (iMemoryType == EFileServerChunk)
543 if (iPhysPinningAvailable)
544 Cache::SyncPhysicalMemoryBeforeDmaRead(iPageArray, iPageColour, 0, length, iMapAttr);
546 Cache::SyncMemoryBeforeDmaRead(startAddr, length);
550 __KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart()"));
552 OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT7, this, KErrNone );
557 * After read requests this method synchronous the current physical memory in use.
559 void DDmaHelper::RequestEnd()
561 OstTraceFunctionEntry0( DDMAHELPER_REQUESTEND_ENTRY );
562 __KTRACE_DMA(Kern::Printf(">PHYSADDR:RequestEnd()"));
565 __ASSERT_DEBUG(iReqId == DLocalDrive::ERead || iReqId == DLocalDrive::EWrite, PHYSADDR_FAULT());
566 __ASSERT_DEBUG(iMemoryType == ESharedChunk || iMemoryType == EFileServerChunk, PHYSADDR_FAULT());
568 TInt length = FragLength(); // len of data just transferred
569 TLinAddr startAddr = LinAddress() - length;
571 // Sync the memory : but not if the media driver has decided to transfer ALL the data using IPC rather than DMA.
572 // It is assumed that the media driver will transfer partial start & end blocks using IPC, but it may also choose
573 // to use IPC for the ENTIRE fragment when read/writing at the end of the media (see medmmc.cpp)
574 if (iFragLenRemaining < length && iReqId == DLocalDrive::ERead)
576 TInt64 startPos = iReq->Pos();
577 TInt startBlockPartialLen = IsBlockAligned(startPos) ? 0 : iMediaBlockSize - BlockOffset(startPos);
578 TInt blockLen = (TInt) BlockAlign(length - startBlockPartialLen);
580 if (iMemoryType == ESharedChunk)
582 Cache::SyncMemoryAfterDmaRead(iLinAddressKernel + startBlockPartialLen, blockLen);
584 else // (iMemoryType == EFileServerChunk)
586 if (iPhysPinningAvailable)
587 Cache::SyncPhysicalMemoryAfterDmaRead(iPageArray, iPageColour, startBlockPartialLen, blockLen, iMapAttr);
589 Cache::SyncMemoryAfterDmaRead(startAddr + startBlockPartialLen, blockLen);
593 ReleasePages(PageAlign(startAddr));
594 OstTraceFunctionExit0( DDMAHELPER_REQUESTEND_EXIT );
598 * For File Server chunks this method releases the current physical memory in use.
600 * @see Kern::ReleaseMemoryFromDMA()
602 void DDmaHelper::ReleasePages(TLinAddr aAddr)
604 OstTraceFunctionEntry1( DDMAHELPER_RELEASEPAGES_ENTRY, this );
605 if (iMemoryType == EFileServerChunk)
607 __KTRACE_DMA(Kern::Printf(">PHYSADDR():ReleasePages thread (0x%x) aAddr(0x%08x) size(%d) iPageArray(0x%x)",iCurrentThread, aAddr, (iPageArrayCount << iPageSizeLog2), iPageArray));
608 OstTraceExt3( TRACE_DMASUPPORT, DDMAHELPER_RELEASEPAGES, "ReleasePages aAddr=0x%x; size=%d; iPageArray-0x%x", (TUint) aAddr, (iPageArrayCount << iPageSizeLog2), (TUint) iPageArray);
611 if (iPhysPinningAvailable)
613 r = Kern::UnpinPhysicalMemory(iPhysicalPinObject);
617 NKern::ThreadEnterCS();
618 r = Kern::ReleaseMemoryFromDMA(iCurrentThread, (void*) aAddr, iPageArrayCount << iPageSizeLog2, iPageArray);
619 NKern::ThreadLeaveCS();
621 __ASSERT_ALWAYS(r == KErrNone, PHYSADDR_FAULT());
623 OstTraceFunctionExit1( DDMAHELPER_RELEASEPAGES_EXIT, this );
627 * Utility method which examines the page array, compiling adjacent pages into contiguous fragments
628 * and populating iPageList with said fragments.
630 void DDmaHelper::BuildPageList()
632 OstTraceFunctionEntry1( DDMAHELPER_BUILDPAGELIST_ENTRY, this );
635 if (iPhysAddr != KPhysMemFragmented)
637 __KTRACE_DMA(Kern::Printf(">PHYSADDR:BuildPageList() - Contiguous Memory"));
638 OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_BUILDPAGELIST1, "Contiguous Memory");
639 // Only one entry required.
640 iPageList[0].iAddress = iPhysAddr;
641 iPageList[0].iLength = FragLength();
646 __KTRACE_DMA(Kern::Printf(">PHYSADDR:BuildPageList() - Dis-Contiguous Memory"));
647 OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_BUILDPAGELIST2, "Dis-Contiguous Memory");
650 offset = PageOffset(iChunkOffset + iReqRemoteDesOffset+ iLenConsumed);
651 iPageList[0].iAddress = iPageArray[0]+offset;
652 iPageList[0].iLength = iPageSize-offset;
654 TInt lengthRemaining = FragLength() - iPageList[0].iLength;
657 for( ; i < iPageArrayCount; i++)
659 //Check if RAM pages are physically adjacent
660 if ((iPageArray[i-1] + PageSize()) == iPageArray[i])
662 // Adjacent pages - just add length
663 iPageList[iPageListCount].iLength += PageSize();
667 // Not Adjacent, start new Memory fragment
669 iPageList[iPageListCount].iAddress = iPageArray[i];
670 iPageList[iPageListCount].iLength = iPageSize;
673 lengthRemaining -= PageSize();
674 if (lengthRemaining < 0)
676 // Last page, re-adjust length for odd remainder.
677 iPageList[iPageListCount].iLength += lengthRemaining;
685 //#ifdef __DEBUG_DMASUP__
686 // for (TInt m=0; m<iPageListCount; m++)
687 // __KTRACE_DMA(Kern::Printf("-PHYSADDR:BuildPageList() [%d]: %08X l:%d", m, iPageList[m].iAddress, iPageList[m].iLength));
689 OstTraceFunctionExit1( DDMAHELPER_BUILDPAGELIST_EXIT, this );
694 * Returns Address and Length of next contiguous Physical memory fragment
696 * @param aAddr On success, populated with the Physical Address of the next fragment.
697 * @param aLen On success, populated with the length in bytes of the next fragment.
699 * @return KErrNone, if successful;
700 * KErrNoMemory, if no more memory fragments left.
702 TInt DDmaHelper::GetPhysicalAddress(TPhysAddr& aAddr, TInt& aLen)
704 OstTraceFunctionEntry1( DUP1_DDMAHELPER_GETPHYSICALADDRESS_ENTRY, this );
705 if (iIndex >= iPageListCount)
707 __KTRACE_DMA(Kern::Printf(">PHYSADDR:GetPhysD() [%d], PageListCount:%d", iIndex, iPageListCount));
708 OstTraceExt2(TRACE_DMASUPPORT, DDMAHELPER_GETPHYSICALADDRESS1, "GetPhysD() [%d]; iPageCountList=%d", iIndex, iPageListCount );
711 OstTraceFunctionExitExt( DUP1_DDMAHELPER_GETPHYSICALADDRESS_EXIT1, this, KErrGeneral );
715 aAddr = iPageList[iIndex].iAddress;
716 aLen = iPageList[iIndex].iLength;
718 iFragLenRemaining-= aLen;
720 __KTRACE_DMA(Kern::Printf(">PHYSADDR:GetPhysD() [%d] addr:0x%08X, l:%d; Used:%d, Left:%d", iIndex, aAddr, aLen, iLenConsumed, iFragLenRemaining));
721 OstTraceExt5(TRACE_DMASUPPORT, DDMAHELPER_GETPHYSICALADDRESS2, "GetPhysD() [%d]; address=0x%x; length=%d; iLenConsumed=%d; iFragLenRemaining=%d", iIndex, (TUint) aAddr, aLen, iLenConsumed, iFragLenRemaining);
722 __ASSERT_DEBUG(aLen >= 0, PHYSADDR_FAULT());
724 iIndex++; //Move index to next page
726 OstTraceFunctionExitExt( DDMAHELPER_GETPHYSICALADDRESS_EXIT2, this, KErrNone );
731 #ifdef __DEMAND_PAGING__
733 * Returns Address and Length of next contiguous Physical memory.
734 * Static function specifically for Demand Paging support
736 * @param aReq TLocDrvRequest from which physical
737 * @param aAddr Populated with the Physical Address of the Request aReq.
738 * @param aLen Populated with the length in bytes of the memory.
742 TInt DDmaHelper::GetPhysicalAddress(TLocDrvRequest& aReq, TPhysAddr& aAddr, TInt& aLen)
744 OstTraceFunctionEntry0( DDMAHELPER_GETPHYSICALADDRESS_ENTRY );
745 __ASSERT_DEBUG( (aReq.Flags() & TLocDrvRequest::ETClientBuffer) == 0, PHYSADDR_FAULT());
746 TLinAddr linAddr = (TLinAddr) aReq.RemoteDes();
747 TInt& offset = aReq.RemoteDesOffset();
748 TLinAddr currLinAddr = linAddr + offset;
749 TInt reqLen = I64LOW(aReq.Length());
750 __ASSERT_DEBUG(I64HIGH(aReq.Length()) == 0, PHYSADDR_FAULT());
752 aAddr = Epoc::LinearToPhysical(currLinAddr);
754 // Set the initial length to be the length remaining in this page or the request length (whichever is shorter).
755 // If there are subsequent pages, we then need to determine whether they are contiguous
756 aLen = Min( (TInt) (PageAlign(currLinAddr+iPageSize) - currLinAddr), reqLen - offset);
758 __ASSERT_DEBUG(aLen > 0, PHYSADDR_FAULT());
760 TPhysAddr currPhysPageAddr = PageAlign((TLinAddr) aAddr);
765 while (offset < reqLen)
767 TPhysAddr nextPhysPageAddr = Epoc::LinearToPhysical(linAddr + offset);
768 __ASSERT_DEBUG(PageOffset((TLinAddr) nextPhysPageAddr) == 0, PHYSADDR_FAULT());
770 if (nextPhysPageAddr != currPhysPageAddr + iPageSize)
773 currPhysPageAddr = nextPhysPageAddr;
775 TInt len = Min(iPageSize, reqLen - offset);
781 __KTRACE_DMA(Kern::Printf(">PHYSADDR:DP:GetPhysS(), linAddr %08X, physAddr %08X, len %x reqLen %x", linAddr + offset, aAddr, aLen, reqLen));
782 OstTraceExt4(TRACE_DEMANDPAGING, DDMAHELPER_GETPHYSICALADDRESS_DP, "linAddr=0x%x; physAddr=0x%x; length=0x%x; reqLen=0x%x", linAddr + offset, aAddr, aLen, reqLen);
783 OstTraceFunctionExit0( DDMAHELPER_GETPHYSICALADDRESS_EXIT );
786 #endif // (__DEMAND_PAGING__)
790 * Modifies the current requests remote descriptor length
792 * @param aLength Length in bytes to which the descriptor is to be set.
794 * @return KErrNone, if successful;
795 * KErrBadDescriptor, if descriptor is corrupted;
796 * otherwise one of the other system wide error codes.
799 TInt DDmaHelper::UpdateRemoteDescriptorLength(TInt aLength)
801 OstTraceFunctionEntryExt( DDMAHELPER_UPDATEREMOTEDESCRIPTORLENGTH_ENTRY, this );
802 __KTRACE_DMA(Kern::Printf(">PHYSADDR:UpDesLen(%d)",aLength));
804 // Restore request Id (overwritten by KErrNone return code) to stop ASSERT in WriteRemote
805 iReq->Id() = DLocalDrive::ERead;
807 // restore caller's descriptor offset
808 iReq->RemoteDesOffset() = iReqRemoteDesOffset;
810 // Write a zero length descriptor at the end such that the descriptors length is correctly updated.
811 TPtrC8 zeroDes(NULL, 0);
812 TInt r = iReq->WriteRemote(&zeroDes, aLength);
814 // restore return code
815 iReq->iValue = KErrNone;
817 OstTraceFunctionExitExt( DDMAHELPER_UPDATEREMOTEDESCRIPTORLENGTH_EXIT, this, r );