os/kernelhwsrv/kernel/eka/drivers/locmedia/dmasupport.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 // e32\drivers\locmedia\dmasupport.cpp
    15 // 
    16 //
    17 
    18 #include <kernel/kernel.h>
    19 #include <kernel/cache.h>
    20 #include "locmedia.h"
    21 #include "dmasupport.h"
    22 #include "dmasupport.inl"
    23 
    24 #include "OstTraceDefinitions.h"
    25 #ifdef OST_TRACE_COMPILER_IN_USE
    26 #include "locmedia_ost.h"
    27 #ifdef __VC32__
    28 #pragma warning(disable: 4127) // disabling warning "conditional expression is constant"
    29 #endif
    30 #include "dmasupportTraces.h"
    31 #endif
    32 
    33 #define PHYSADDR_FAULT()	Kern::Fault("TLOCDRV-PHYS-ADDR",__LINE__)
    34 
    35 //#define __DEBUG_DMASUP__
    36 #ifdef __DEBUG_DMASUP__
    37 #define __KTRACE_DMA(p) {p;}
    38 #else
    39 #define __KTRACE_DMA(p)
    40 #endif
    41 
    42 TInt DDmaHelper::iPageSize;
    43 TInt DDmaHelper::iPageSizeLog2;
    44 TInt DDmaHelper::iPageSizeMsk;
    45 
    46 /******************************************************************************
    47  DDmaHelper
    48  ******************************************************************************/
    49 const TPhysAddr KPhysMemFragmented = KPhysAddrInvalid;
    50 
    51 TUint32 Log2(TUint32 aVal)
    52 	{	
    53     __ASSERT_COMPILE(sizeof(TUint32) == 4);
    54 
    55     TUint32 bitPos=31;
    56 
    57     if(!(aVal >> 16)) {bitPos-=16; aVal<<=16;}
    58     if(!(aVal >> 24)) {bitPos-=8;  aVal<<=8 ;}
    59     if(!(aVal >> 28)) {bitPos-=4;  aVal<<=4 ;}
    60     if(!(aVal >> 30)) {bitPos-=2;  aVal<<=2 ;}
    61     if(!(aVal >> 31)) {bitPos-=1;}
    62     
    63     return bitPos;
    64 	}
    65 
    66 TBool IsPowerOfTwo(TInt aNum)
    67 //
    68 // Returns ETrue if aNum is a power of two
    69 //
    70 	{
    71 	return (aNum != 0 && (aNum & -aNum) == aNum);
    72 	}
    73 
    74 void DDmaHelper::ResetPageLists()
    75 	{
    76 	iFragLen = 0;
    77 	iFragLenRemaining = 0;
    78 	}
    79 
    80 DDmaHelper::DDmaHelper()
    81 	{
    82 	OstTraceFunctionEntry0( DDMAHELPER_DDMAHELPER_ENTRY );
    83 	iPageSize = Kern::RoundToPageSize(1);
    84 	__ASSERT_ALWAYS(IsPowerOfTwo(iPageSize), PHYSADDR_FAULT());
    85 	iPageSizeLog2 = Log2(iPageSize);
    86 	iPageSizeMsk = iPageSize-1;
    87 	OstTraceFunctionExit0( DDMAHELPER_DDMAHELPER_EXIT );
    88 	}
    89 
    90 DDmaHelper::~DDmaHelper()
    91 	{
    92 	OstTraceFunctionEntry0( DESTRUCTOR_DDMAHELPER_ENTRY );
    93 	delete [] iPageArray;
    94 	delete [] iPageList;
    95 	if (iPhysicalPinObject)
    96 		{
    97 		NKern::ThreadEnterCS();
    98 		Kern::DestroyPhysicalPinObject(iPhysicalPinObject);
    99 		NKern::ThreadLeaveCS();
   100 		}
   101 	OstTraceFunctionExit0( DESTRUCTOR_DDMAHELPER_EXIT );
   102 	}
   103 
   104 /**
   105 Constructs the DDmaHelper object 
   106 
   107 @param aLength The maximum length of data mapped by this object.
   108 			   Should be a multiple of the page size 
   109 @param aMediaBlockSize The minimum amount data that the media can transfer in read / write operations
   110 @param aDmaAlignment The memory alignment required by the media devices DMA controller. (i.e. word aligned = 2)
   111 
   112 @return KErrNone,if successful;
   113 		KErrNoMemory, if unable to create Page Array's.
   114 */
   115 TInt DDmaHelper::Construct(TInt aLength, TInt aMediaBlockSize, TInt aDmaAlignment)
   116 	{
   117 	OstTraceFunctionEntry1( DDMAHELPER_CONSTRUCT_ENTRY, this );
   118 	__ASSERT_ALWAYS(aMediaBlockSize > 0, PHYSADDR_FAULT());
   119 	__ASSERT_ALWAYS(IsPowerOfTwo(aMediaBlockSize), PHYSADDR_FAULT());
   120 	__ASSERT_ALWAYS(aLength > 0, PHYSADDR_FAULT());
   121 	__ASSERT_ALWAYS(aLength > iPageSize, PHYSADDR_FAULT());
   122 
   123 	// This code assumes that the media block size (normally 512) is >= the processor's 
   124 	// cache-line size (typically 32 bytes). This may not be true for future processors.
   125 	// If the cache-line size was 1024, for example,  reading 512 bytes into a client's 
   126 	// buffer & then calling Cache::SyncMemoryAfterDmaRead would invalidate an entire 1024 
   127 	// bytes in the user's address space.
   128 	TUint cacheLineSize = Cache::DmaBufferAlignment();
   129 	__ASSERT_ALWAYS(IsPowerOfTwo(cacheLineSize), PHYSADDR_FAULT());
   130 	if (cacheLineSize > (TUint) aMediaBlockSize)
   131 	    {
   132 		OstTraceFunctionExitExt( DDMAHELPER_CONSTRUCT_EXIT1, this, KErrNotSupported );
   133 		return KErrNotSupported;
   134 	    }
   135 	
   136 	//Check whether Kernel supports physical memory pinning:
   137 	TInt mm = Kern::HalFunction(EHalGroupKernel, EKernelHalMemModelInfo, 0, 0) & EMemModelTypeMask;
   138 	if (mm >= EMemModelTypeFlexible)
   139 		{
   140 		// Flexible memory model supports physical pinning for user (and Kernel) memory that
   141 		// is the subject of DMA transfer.
   142 		// Physical memory pinning ensures that:
   143 		// - physical memory is not moved by RAM defragmentation.
   144 		// - it is safe to to DMA against it or do sync cache (using new interface) even if/when
   145 		// the owner of the memory (e.g. untrusted user aplication) decomits memory or panics.
   146 		// For details @see Kern::PinPhysicalMemory.
   147 		// Cache Sync of physically pinned memory on flexible memory model is done by:
   148 		//  - Cache::SyncPhysicalMemoryBeforeDmaWrite
   149 		//  - Cache::SyncPhysicalMemoryBeforeDmaRead
   150 		//  - Cache::SyncPhysicalMemoryAfterDmaRead
   151 		iPhysPinningAvailable = ETrue;
   152 		__KTRACE_DMA(Kern::Printf("Memory model (%d) supports physical pinning\n",mm));
   153 		NKern::ThreadEnterCS();
   154 		TInt r=Kern::CreatePhysicalPinObject(iPhysicalPinObject);
   155 		OstTraceExt2(TRACE_DMASUPPORT, DDMAHELPER_CONSTRUCT1, "Memory model=%d supports physical pinning; created Physical Pin Object with return value=%d",mm, r);
   156 		NKern::ThreadLeaveCS();
   157 		if (r) return r;
   158 		}
   159 	else
   160 		{
   161 		// Memory models before flexible do not support memory pinning.
   162 		// The driver has to use  PrepareMemoryForDMA/ReleaseMemoryFromDMA Kernel interface 
   163 		// that ensures that physical memory won't be moved by RAM defragmentation module.
   164 		// However, Kernel relies on assumption that the user memory won't dissapear (e.g. by
   165 		// user client closing the chunk or panics), as it would lead to Kernel crash.
   166 		// For that reason, the only use case for DMA transfer into user memory is File System's
   167 		// read/write buffer - as it is assumed that File System is trusted component.
   168 		// To mark its buffers(s) for DMA transfer, File Sytem must call UserSvr::RegisterTrustedChunk
   169 		// before DMA transfer starts.
   170 		// Cache sync. operations before/after DMA transfer must be done by using the old Cache interface:
   171 		//  - Cache::SyncMemoryBeforeDmaWrite
   172 		//  - Cache::SyncMemoryBeforeDmaRead
   173 		//  - Cache::SyncMemoryAfterDmaRead
   174 		// As they all require linear address as input, these methods also rely on File System buffers
   175 		// to be in valid state during sync calls.
   176 		iPhysPinningAvailable = EFalse;
   177 		__KTRACE_DMA(Kern::Printf("Memory model (%d) doesn't support physical pining",mm));
   178 		OstTrace1(TRACE_DMASUPPORT, DDMAHELPER_CONSTRUCT2, "Memory model=%d doesn't support physical pinning",mm);
   179 		iPhysicalPinObject = NULL;
   180 		}
   181 	
   182 	iMaxPages = (aLength >> iPageSizeLog2)-1;
   183 	
   184 	// 2 Additional pages for page straddling
   185 	iPageArray = new TPhysAddr[iMaxPages+2];
   186 	if (iPageArray != NULL)
   187 		{
   188 		iPageList = new TPageList[iMaxPages];
   189 		if (iPageList != NULL)
   190 			{
   191 			iMediaBlockSize = aMediaBlockSize;
   192 			iMediaBlockSizeMask = TInt64(iMediaBlockSize - 1);
   193 
   194 			iDmaAlignment = aDmaAlignment;
   195 			__KTRACE_DMA(Kern::Printf("-PHYSADDR: Construct iMaxPages(%d), MediaBlocks(%d), DMAalign(%d)",iMaxPages,iMediaBlockSize,iDmaAlignment));
   196 			OstTraceExt3(TRACE_FLOW, DDMAHELPER_CONSTRUCT_EXIT2, "< KErrNone PHYSADDR: Construct iMaxPages %d MediaBlocks %d DMAalign %d", iMaxPages,iMediaBlockSize,iDmaAlignment );
   197 			return KErrNone;
   198 			}
   199 		delete [] iPageArray; iPageArray = NULL;
   200 		}
   201 	
   202 	iMaxPages = 0;
   203 	OstTraceFunctionExitExt( DDMAHELPER_CONSTRUCT_EXIT3, this, KErrNoMemory );
   204 	return KErrNoMemory;
   205 	}
   206 
   207 /**
   208  * Each Read/Write request is examined to determine if the descriptor that 
   209  * is referenced is mapped to a physical memory object; 
   210  * if so it prepares the memory, updates the request with physical memory information
   211  * and issues the request.
   212  * If a request does not make use of physical memory or is not configured correctly the
   213  * request is passed through without modification.
   214  */
   215 TInt DDmaHelper::SendReceive(TLocDrvRequest& aReq, TLinAddr aLinAddress)
   216 	{
   217 	OstTraceFunctionEntry0( DDMAHELPER_SENDRECEIVE_ENTRY );
   218 	DPrimaryMediaBase& primaryMedia = *aReq.Drive()->iPrimaryMedia;
   219 	
   220 	TInt reqId = aReq.Id();
   221 	if (reqId != DLocalDrive::ERead && reqId != DLocalDrive::EWrite)
   222 	    {
   223 	    OstTrace0(TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT1, "< Request is not ERead or EWrite, cannot perform Direct Memory Access");
   224 		return aReq.SendReceive(&primaryMedia.iMsgQ);
   225 	    }
   226 		
   227 	if ((I64HIGH(aReq.Length()) > 0) || (aReq.Length() < iMediaBlockSize))
   228 	    {
   229 	    OstTrace0(TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT2, "< Invalid request length, cannot perform Direct Memory Access");
   230 		return aReq.SendReceive(&primaryMedia.iMsgQ);
   231 	    }
   232 	
   233 	// If more than one user thread tries to access the drive, then bail out as there is 
   234 	// only one DDmaHelper object per TLocDrv. Normally this shouldn't ever happen unless
   235 	// a client app accesses the drive directly using TBusLOcalDrive or the file system is 
   236 	// asynchronous (i.e. there is a separate drive thread) but the file server message is 
   237 	// flagged as synchronous - e.g. EFsDrive
   238 	if (TInt(__e32_atomic_add_ord32(&iLockCount, 1)) > 0)	// busy ?
   239 		{
   240 		__KTRACE_DMA(Kern::Printf("-PHYSADDR: BUSY"));
   241 		__e32_atomic_add_ord32(&iLockCount, TUint32(-1));
   242 		OstTrace0(TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT3, "< DMA Busy");
   243 		return aReq.SendReceive(&primaryMedia.iMsgQ);
   244 		}
   245 
   246 	// make a copy of the request 
   247 	iMemoryType = EUnknown;
   248 	iReq = &aReq;
   249 	iReqId = reqId;
   250 		
   251 	iReqPosClient = iReq->Pos();
   252 
   253 	iReqLenClient = I64LOW(iReq->Length());
   254 
   255 	iReqRemoteDesOffset = iReq->RemoteDesOffset();
   256 	iReqFlags = iReq->Flags();
   257 
   258 	iRemoteThread = iReq->RemoteThread();
   259 	iCurrentThread = &Kern::CurrentThread();
   260 	iOwningThread = iRemoteThread ? iRemoteThread : iCurrentThread;
   261 
   262 	iChunk = NULL;
   263 	iChunkOffset = 0;
   264 	iLinAddressUser = NULL;
   265 	iLenConsumed = 0;
   266 
   267 	// point to the start of the descriptor
   268 	iLinAddressUser = aLinAddress - iReqRemoteDesOffset;
   269 	
   270 	// Need to check descriptors from both direct Clients (i.e. file cache, RemoteThread == NULL )
   271 	// and Remote Server Clients (file server clients, RemoteThread != NULL)
   272 	// Shared Memory can potentially be used by both remote server and direct clients
   273 	NKern::ThreadEnterCS();
   274 	iChunk = Kern::OpenSharedChunk(iOwningThread, (const TAny*) iLinAddressUser, ETrue, iChunkOffset);
   275 	NKern::ThreadLeaveCS();
   276 	
   277 	TInt fragments = 0;
   278 	TInt r;
   279 	do
   280 		{
   281 		__KTRACE_DMA(Kern::Printf(">PHYSADDR:SendReceive() iReqLen %d; iLenConsumed %d; fragments %d",iReqLen, iLenConsumed, fragments));
   282 		OstTraceExt2( TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE1, "PHYSADDR:SendReceive() iLenConsumed=%d; fragments=%d", iLenConsumed, fragments);
   283 		r = RequestStart();
   284 		if (r != KErrNone)
   285 			{
   286 			if (iChunk)
   287 				{
   288 				NKern::ThreadEnterCS();
   289 				Kern::ChunkClose(iChunk);
   290 				iChunk = NULL;
   291 				NKern::ThreadLeaveCS();
   292 				}
   293 			__KTRACE_DMA(Kern::Printf("<PHYSADDR:SendReceive()- r:%d",r));
   294 			OstTrace1( TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT4, "< PHYSADDR:SendReceive() Return code %d",r);
   295 			iMemoryType = EUnknown;
   296 			__e32_atomic_add_ord32(&iLockCount, TUint32(-1));
   297 			return fragments ? r : iReq->SendReceive(&primaryMedia.iMsgQ);
   298 			}
   299 		else
   300 			{
   301 			iReq->Flags() |= TLocDrvRequest::EPhysAddr;
   302 			}
   303 
   304 		__KTRACE_DMA(Kern::Printf("-PHYSADDR:SendReceive() rThread %08X pos %08lX, len %d addr %08X off %08X", 
   305 				iRemoteThread, iReq->Pos(), I64LOW(iReq->Length()), iLinAddressUser, iReqRemoteDesOffset));
   306 		OstTraceExt4(TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE2, "PHYSADDR:SendReceive() position=%Ld; length=%d; address=0x%x; offset=0x%x", iReq->Pos(), (TInt) I64LOW(iReq->Length()), (TUint) iLinAddressUser, (TUint) iReqRemoteDesOffset );
   307 		
   308 		__ASSERT_DEBUG(iReq->Length() == FragLength(), PHYSADDR_FAULT());
   309 		__ASSERT_DEBUG(iReq->Length() != 0, PHYSADDR_FAULT());
   310 
   311 		// reinstate iValue in case overwritten by DMediaPagingDevice::CompleteRequest()
   312 		iReq->iValue = iReqId;
   313 		
   314 		OstTrace1(TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE3, "Dma SendReceive Start iReq=%d", iReq);
   315 		r = iReq->SendReceive(&primaryMedia.iMsgQ);
   316 		OstTrace1(TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE4, "Dma SendReceive Return iReq=%d", iReq);
   317 		
   318 		// The media driver could potentially choose to deal with the request 
   319 		// without accessing physical memory (e.g. if the data is already cached).
   320 		iLenConsumed += iFragLenRemaining;
   321 				
   322 		RequestEnd();
   323 		
   324 		ResetPageLists();
   325 
   326 		fragments++;
   327 		
   328 		}
   329 	while(r == KErrNone && LengthRemaining() > 0);
   330 
   331 	if (iChunk)		
   332 		{
   333 		NKern::ThreadEnterCS();
   334 		Kern::ChunkClose(iChunk);
   335 		iChunk = NULL;
   336 		NKern::ThreadLeaveCS();
   337 		}
   338 	
   339 	// Set remote descriptor length to iReqLenClient
   340 	if (iReqId == DLocalDrive::ERead && r == KErrNone)
   341 		r = UpdateRemoteDescriptorLength(iReqLenClient);
   342 
   343 	__KTRACE_DMA(Kern::Printf("<PHYSADDR:SendReceive()"));
   344 
   345 	iMemoryType = EUnknown;
   346 
   347 	__e32_atomic_add_ord32(&iLockCount, TUint32(-1));
   348 	OstTraceFunctionExit0( DDMAHELPER_SENDRECEIVE_EXIT5 );
   349 	return r;
   350 	}
   351 
   352 
   353 /**
   354  * Each read/write request is split into one or more DMA "fragments".
   355  * The maximum size of each fragment depends on the size of iPageArray[].
   356  * Subsquent calls to RequestStart maybe required to complete a request.
   357  * 
   358  * The physical address is checked for DMA alignment or the possibility of 
   359  * eventually alignment due to mis-aligned start/end media blocks.
   360  * 
   361  * A DMA "fragment" can be split over a number of pages as follows :
   362  * ----------------------------------------------------------
   363  * |    4K    |    4K    |    4K    |    4K    |
   364  * ----------------------------------------------------------
   365  *      ********************************		: region to be read
   366  * <----------- iFragLen ----------->
   367  * 
   368  * The pages may not be physically contiguous; if they are not, 
   369  * then they are supplied to the media driver one contiguous 
   370  * sequent at a time by GetPhysicalAddress()
   371  **/
   372 TInt DDmaHelper::RequestStart()
   373 	{
   374 	OstTraceFunctionEntry1( DDMAHELPER_REQUESTSTART_ENTRY, this );
   375 	__KTRACE_DMA(Kern::Printf(">PHYSADDR:RequestStart()"));
   376 
   377 	iIndex = 0;
   378 
   379 	TLinAddr startAddr = LinAddress();
   380 	TInt64 startPos = iReqPosClient + iLenConsumed;
   381 	TInt mediaBlockOffset = BlockOffset(startPos);
   382 	TInt addrBlockOffset = BlockOffset(startAddr);
   383 	TInt length = Min(LengthRemaining(), MaxFragLength());
   384 
   385 	iPageArrayCount = iPageListCount = 0;
   386 
   387 	TLinAddr firstPageStart = PageAlign(startAddr);
   388 	TLinAddr lastPageStart = PageAlign(startAddr + length + iPageSize - 1);
   389 	iPageArrayCount = (lastPageStart - firstPageStart + 1) >> iPageSizeLog2;
   390 
   391 	iMemoryType = EUnknown;
   392 	iPhysAddr = KPhysMemFragmented; // Default - Mark memory as fragmented
   393 
   394 	//*************************************
   395 	// Check Physical Page Alignment!!	
   396 	//*************************************
   397 	if (!IsBlockAligned(startPos))
   398 		{
   399 		// Will DMA align at next block alignment? such that DMA can be used
   400 		TInt ofset = I64LOW((startPos + iMediaBlockSize) & (iMediaBlockSize-1));
   401 		ofset = iMediaBlockSize - ofset;
   402 
   403 		if (!IsDmaAligned(startAddr))
   404 			{			
   405 			__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - not DMA Aligned pos 0x%x addr 0x%x)",I64LOW(startPos), startAddr));
   406 			OstTraceExt2( TRACE_FLOW, DDMAHELPER_REQUESTSTART_EXIT1, "< KErrNotSupported Not DMA Aligned startPos %x startAddr %x", I64LOW(startPos), startAddr );
   407 			return KErrNotSupported;
   408 			}
   409 		}
   410 	else 
   411 		{ //block aligned!
   412 		if (!IsDmaAligned(startAddr))
   413 			{
   414 			__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - not DMA Aligned (0x%x)",startAddr));
   415 			OstTrace1(TRACE_FLOW, DDMAHELPER_REQUESTSTART_EXIT2, "< KErrNotSupported Not DMA Aligned startAddr %x", startAddr);
   416 			return KErrNotSupported;
   417 			}
   418 		}
   419 
   420 	//************************************************
   421 	// Check for possible striping of RAM pages vs Media blocks
   422 	// i.e. Media blocks which may straddle 2 non contiguous pages. 
   423 	//************************************************
   424 	if (mediaBlockOffset != addrBlockOffset)
   425 		{
   426 		__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - Frag / not block aligned: pos 0x%x addr 0x%x", I64LOW(startPos), startAddr));
   427 		OstTraceExt2(TRACE_FLOW, DDMAHELPER_REQUESTSTART_EXIT3, "< KErrNotSupported Frag / not block aligned: startPos 0x%x startAddr 0x%x", I64LOW(startPos), startAddr );
   428 		return KErrNotSupported;
   429 		}
   430 
   431 	//************************************************
   432 	// Is it File Server Cache request ?
   433 	//************************************************
   434 	if (iChunk == NULL &&				// Not Shared memory
   435 		iRemoteThread == NULL &&		// Direct Client Request
   436 		IsPageAligned(startAddr) &&
   437 		IsBlockAligned(startPos) &&
   438 		(iPageArrayCount > 0) )
   439 		{
   440 		TLinAddr firstPageAddr = PageAlign(startAddr); //ensure that it is page aligned.
   441 		
   442 		TInt r = KErrNone;
   443 		if (iPhysPinningAvailable)
   444 			{
   445 			TBool readOnlyMem = (iReqId == DLocalDrive::EWrite); 
   446 			r =  Kern::PinPhysicalMemory(iPhysicalPinObject,  firstPageAddr, iPageArrayCount << iPageSizeLog2,
   447 					readOnlyMem, iPhysAddr, iPageArray, iMapAttr, iPageColour, iCurrentThread);
   448 			}
   449 		else
   450 			{
   451 			NKern::ThreadEnterCS();
   452 			r = Kern::PrepareMemoryForDMA(iCurrentThread, (void*)firstPageAddr, iPageArrayCount << iPageSizeLog2, iPageArray);
   453 			NKern::ThreadLeaveCS();
   454 			}
   455 		if (r != KErrNone) 
   456 		    {
   457 			OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT4, this, r );
   458 			return r;
   459 		    }
   460 
   461 		iMemoryType = EFileServerChunk;
   462 		
   463 		__KTRACE_DMA(Kern::Printf("-PHYSADDR:RequestStart() - EFileServerChunk"));
   464 		OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_REQUESTSTART1, "EFileServerChunk");
   465 		}
   466 	//****************************
   467 	// Is it shared chunk ?
   468 	//****************************
   469 	else if (iChunk)
   470 		{
   471 		// calculate chunk offset of start of first page
   472 		TInt offset = iChunkOffset + iReqRemoteDesOffset+ iLenConsumed;
   473 				
   474 		TInt r = Kern::ChunkPhysicalAddress(iChunk, offset, length, iLinAddressKernel, iMapAttr, iPhysAddr, iPageArray);
   475 		
   476 		if (r < KErrNone)
   477 		    {
   478 			OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT5, this, r );
   479 			return r;  // 0 = Contiguous Memory, 1 = Fragmented/Dis-Contiguous Memory
   480 		    }
   481 			
   482 		iMemoryType = ESharedChunk;
   483 		
   484 		__KTRACE_DMA(Kern::Printf("-PHYSADDR:RequestStart() - ESharedChunk"));
   485 		OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_REQUESTSTART2, "ESharedChunk");
   486 		}
   487 	else
   488 		{
   489 		__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - EUnknown"));
   490 		OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT6, this, KErrNotFound );
   491 		return KErrNotFound;
   492 		}
   493 
   494 	SetFragLength(length);
   495 	
   496 	//************************************************
   497 	// Build Contiguous Page list
   498 	//************************************************
   499 	BuildPageList();
   500 	
   501 	//************************************************
   502 	// Set up request parameters for this fragment 
   503 	//************************************************
   504 	iReq->Length() = MAKE_TINT64(0, length);
   505 	iReq->Pos() = iReqPosClient + iLenConsumed;
   506 	iReq->RemoteDesOffset() = iReqRemoteDesOffset + iLenConsumed;
   507 	// restore EAdjusted flag to ensure iReq->Pos() is adjusted correctly
   508 	iReq->Flags()&= ~TLocDrvRequest::EAdjusted;
   509 	iReq->Flags()|= (iReqFlags & TLocDrvRequest::EAdjusted);
   510 
   511 	//************************************************
   512 	// Sync memory
   513 	//************************************************
   514 	__KTRACE_DMA(Kern::Printf(">SYNC-PHYSADDR:addr 0x%x len %d", startAddr, length));
   515 	OstTraceExt2(TRACE_DMASUPPORT, DDMAHELPER_REQUESTSTART3, "startAddr=0x%x length=%d", (TUint) startAddr, length );
   516 
   517 	// Only sync whole blocks: it is assumed that the media driver will transfer 
   518 	// partial start and end blocks without DMA
   519 
   520 	TInt startBlockPartialLen = IsBlockAligned(startPos) ? 0 : iMediaBlockSize - BlockOffset(startPos);
   521 	TInt blockLen = (TInt) BlockAlign(length - startBlockPartialLen);
   522 
   523 	if (iReqId == DLocalDrive::EWrite)
   524 		{
   525 		if (iMemoryType == ESharedChunk)
   526 			{
   527 			Cache::SyncMemoryBeforeDmaWrite(iLinAddressKernel+startBlockPartialLen, blockLen, iMapAttr);
   528 			}
   529 		else // (iMemoryType == EFileServerChunk)
   530 			{
   531 			if (iPhysPinningAvailable)
   532 				Cache::SyncPhysicalMemoryBeforeDmaWrite(iPageArray, iPageColour, startBlockPartialLen, blockLen, iMapAttr);
   533 			else
   534 				Cache::SyncMemoryBeforeDmaWrite(startAddr+startBlockPartialLen, blockLen);
   535 			}
   536 		}
   537 	else
   538 		{
   539 		if (iMemoryType == ESharedChunk)
   540 			Cache::SyncMemoryBeforeDmaRead(iLinAddressKernel, length, iMapAttr);
   541 		else // (iMemoryType == EFileServerChunk)
   542 			{
   543 			if (iPhysPinningAvailable)
   544 				Cache::SyncPhysicalMemoryBeforeDmaRead(iPageArray, iPageColour, 0, length, iMapAttr);
   545 			else
   546 				Cache::SyncMemoryBeforeDmaRead(startAddr, length);
   547 			}
   548 		}
   549 
   550 	__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart()"));
   551 
   552 	OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT7, this, KErrNone );
   553 	return KErrNone;
   554 	}
   555 
   556 /**
   557  * After read requests this method synchronous the current physical memory in use.
   558  */
   559 void DDmaHelper::RequestEnd()
   560 	{
   561 	OstTraceFunctionEntry0( DDMAHELPER_REQUESTEND_ENTRY );
   562 	__KTRACE_DMA(Kern::Printf(">PHYSADDR:RequestEnd()"));
   563 
   564 
   565 	__ASSERT_DEBUG(iReqId == DLocalDrive::ERead || iReqId == DLocalDrive::EWrite, PHYSADDR_FAULT());
   566 	__ASSERT_DEBUG(iMemoryType == ESharedChunk || iMemoryType == EFileServerChunk, PHYSADDR_FAULT());
   567 
   568 	TInt length = FragLength();	// len of data just transferred
   569 	TLinAddr startAddr = LinAddress() - length;
   570 
   571 	// Sync the memory : but not if the media driver has decided to transfer ALL the data using IPC rather than DMA.
   572 	// It is assumed that the media driver will transfer partial start & end blocks using IPC, but it may also choose 
   573 	// to use IPC for the ENTIRE fragment when read/writing at the end of the media (see medmmc.cpp)
   574 	if (iFragLenRemaining < length && iReqId == DLocalDrive::ERead)
   575 		{
   576 		TInt64 startPos = iReq->Pos();
   577 		TInt startBlockPartialLen = IsBlockAligned(startPos) ? 0 : iMediaBlockSize - BlockOffset(startPos);
   578 		TInt blockLen = (TInt) BlockAlign(length - startBlockPartialLen);
   579 
   580 		if (iMemoryType == ESharedChunk)
   581 			{
   582 			Cache::SyncMemoryAfterDmaRead(iLinAddressKernel + startBlockPartialLen, blockLen);
   583 			}
   584 		else // (iMemoryType == EFileServerChunk)
   585 			{
   586 			if (iPhysPinningAvailable)
   587 				Cache::SyncPhysicalMemoryAfterDmaRead(iPageArray, iPageColour, startBlockPartialLen, blockLen, iMapAttr);
   588 			else
   589 				Cache::SyncMemoryAfterDmaRead(startAddr + startBlockPartialLen, blockLen);
   590 			}
   591 
   592 		}
   593 	ReleasePages(PageAlign(startAddr));
   594 	OstTraceFunctionExit0( DDMAHELPER_REQUESTEND_EXIT );
   595 	}
   596 
   597 /**
   598  * For File Server chunks this method releases the current physical memory in use.
   599  * 
   600  * @see Kern::ReleaseMemoryFromDMA()
   601  */
   602 void DDmaHelper::ReleasePages(TLinAddr aAddr)
   603 	{
   604 	OstTraceFunctionEntry1( DDMAHELPER_RELEASEPAGES_ENTRY, this );
   605 	if (iMemoryType == EFileServerChunk)
   606 		{
   607 		__KTRACE_DMA(Kern::Printf(">PHYSADDR():ReleasePages thread (0x%x) aAddr(0x%08x) size(%d) iPageArray(0x%x)",iCurrentThread, aAddr, (iPageArrayCount << iPageSizeLog2), iPageArray));
   608 		OstTraceExt3( TRACE_DMASUPPORT, DDMAHELPER_RELEASEPAGES, "ReleasePages aAddr=0x%x; size=%d; iPageArray-0x%x", (TUint) aAddr, (iPageArrayCount << iPageSizeLog2), (TUint) iPageArray);
   609 
   610 		TInt r;
   611 		if (iPhysPinningAvailable)
   612 			{
   613 			r = Kern::UnpinPhysicalMemory(iPhysicalPinObject);
   614 			}
   615 		else
   616 			{
   617 			NKern::ThreadEnterCS();
   618 			r = Kern::ReleaseMemoryFromDMA(iCurrentThread, (void*) aAddr, iPageArrayCount << iPageSizeLog2, iPageArray);
   619 			NKern::ThreadLeaveCS();
   620 			}
   621 		__ASSERT_ALWAYS(r == KErrNone, PHYSADDR_FAULT());
   622 		}		
   623 	OstTraceFunctionExit1( DDMAHELPER_RELEASEPAGES_EXIT, this );
   624 	}
   625 
   626 /**
   627  * Utility method which examines the page array, compiling adjacent pages into contiguous fragments
   628  * and populating iPageList with said fragments.
   629  */
   630 void DDmaHelper::BuildPageList()
   631 	{
   632 	OstTraceFunctionEntry1( DDMAHELPER_BUILDPAGELIST_ENTRY, this );
   633 	iPageListCount = 0;
   634 	
   635 	if (iPhysAddr != KPhysMemFragmented)
   636 		{
   637 		__KTRACE_DMA(Kern::Printf(">PHYSADDR:BuildPageList() - Contiguous Memory"));
   638 		OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_BUILDPAGELIST1, "Contiguous Memory");
   639 		// Only one entry required.
   640 		iPageList[0].iAddress = iPhysAddr;
   641 		iPageList[0].iLength = FragLength();
   642 		iPageListCount = 1;
   643 		}
   644 	else
   645 		{
   646 		__KTRACE_DMA(Kern::Printf(">PHYSADDR:BuildPageList() - Dis-Contiguous Memory"));
   647 		OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_BUILDPAGELIST2, "Dis-Contiguous Memory");
   648 		TInt offset;
   649 		
   650 		offset = PageOffset(iChunkOffset + iReqRemoteDesOffset+ iLenConsumed);
   651 		iPageList[0].iAddress = iPageArray[0]+offset;
   652 		iPageList[0].iLength  = iPageSize-offset;
   653 		
   654 		TInt lengthRemaining = FragLength() - iPageList[0].iLength;
   655 		
   656 		TInt i =1;
   657         for( ; i < iPageArrayCount; i++)
   658             {
   659             //Check if RAM pages are physically adjacent
   660             if ((iPageArray[i-1] + PageSize()) == iPageArray[i])
   661                 {
   662                 // Adjacent pages - just add length
   663                 iPageList[iPageListCount].iLength += PageSize();             
   664                 }
   665             else     	
   666                 {
   667                 // Not Adjacent, start new Memory fragment
   668                 iPageListCount++;
   669                 iPageList[iPageListCount].iAddress = iPageArray[i];
   670                 iPageList[iPageListCount].iLength  = iPageSize;
   671                 }
   672             
   673             lengthRemaining -= PageSize();
   674             if (lengthRemaining < 0)
   675             	{
   676             	// Last page, re-adjust length for odd remainder.            	
   677             	iPageList[iPageListCount].iLength += lengthRemaining;
   678             	break;
   679             	}
   680             }
   681         
   682         iPageListCount++;
   683 		}
   684 
   685 //#ifdef __DEBUG_DMASUP__
   686 //	for (TInt m=0; m<iPageListCount; m++)
   687 //		__KTRACE_DMA(Kern::Printf("-PHYSADDR:BuildPageList() [%d]: %08X l:%d", m, iPageList[m].iAddress, iPageList[m].iLength));
   688 //#endif
   689 	OstTraceFunctionExit1( DDMAHELPER_BUILDPAGELIST_EXIT, this );
   690 	}
   691 
   692 
   693 /**
   694  * Returns Address and Length of next contiguous Physical memory fragment
   695  * 
   696  * @param aAddr On success, populated with the Physical Address of the next fragment.
   697  * @param aLen  On success, populated with the length in bytes of the next fragment.
   698  * 
   699  * @return KErrNone, if successful;
   700  * 		   KErrNoMemory, if no more memory fragments left.
   701  */
   702 TInt DDmaHelper::GetPhysicalAddress(TPhysAddr& aAddr, TInt& aLen)
   703 	{
   704 	OstTraceFunctionEntry1( DUP1_DDMAHELPER_GETPHYSICALADDRESS_ENTRY, this );
   705 	if (iIndex >= iPageListCount)
   706 		{
   707 		__KTRACE_DMA(Kern::Printf(">PHYSADDR:GetPhysD() [%d], PageListCount:%d", iIndex, iPageListCount));
   708 		OstTraceExt2(TRACE_DMASUPPORT, DDMAHELPER_GETPHYSICALADDRESS1, "GetPhysD() [%d]; iPageCountList=%d", iIndex, iPageListCount );
   709 		aAddr = 0;
   710 		aLen = 0;
   711 		OstTraceFunctionExitExt( DUP1_DDMAHELPER_GETPHYSICALADDRESS_EXIT1, this, KErrGeneral );
   712 		return KErrGeneral;
   713 		}
   714 	
   715 	aAddr = iPageList[iIndex].iAddress;
   716 	aLen = iPageList[iIndex].iLength;
   717 	iLenConsumed+= aLen;
   718 	iFragLenRemaining-= aLen;
   719 	
   720 	__KTRACE_DMA(Kern::Printf(">PHYSADDR:GetPhysD() [%d] addr:0x%08X, l:%d; Used:%d, Left:%d", iIndex, aAddr, aLen, iLenConsumed, iFragLenRemaining));
   721 	OstTraceExt5(TRACE_DMASUPPORT, DDMAHELPER_GETPHYSICALADDRESS2, "GetPhysD() [%d]; address=0x%x; length=%d; iLenConsumed=%d; iFragLenRemaining=%d", iIndex, (TUint) aAddr, aLen, iLenConsumed, iFragLenRemaining);
   722 	__ASSERT_DEBUG(aLen >= 0, PHYSADDR_FAULT());
   723 
   724 	iIndex++;  //Move index to next page
   725 
   726 	OstTraceFunctionExitExt( DDMAHELPER_GETPHYSICALADDRESS_EXIT2, this, KErrNone );
   727 	return KErrNone;
   728 	}
   729 
   730 
   731 #ifdef __DEMAND_PAGING__
   732 /**
   733  * Returns Address and Length of next contiguous Physical memory. 
   734  * Static function specifically for Demand Paging support
   735  * 
   736  * @param aReq  TLocDrvRequest from which physical 
   737  * @param aAddr Populated with the Physical Address of the Request aReq.
   738  * @param aLen  Populated with the length in bytes of the memory.
   739  * 
   740  * @return KErrNone 
   741  */
   742 TInt DDmaHelper::GetPhysicalAddress(TLocDrvRequest& aReq, TPhysAddr& aAddr, TInt& aLen)
   743 	{
   744 	OstTraceFunctionEntry0( DDMAHELPER_GETPHYSICALADDRESS_ENTRY );
   745 	__ASSERT_DEBUG( (aReq.Flags() & TLocDrvRequest::ETClientBuffer) == 0,  PHYSADDR_FAULT());
   746 	TLinAddr linAddr = (TLinAddr) aReq.RemoteDes();
   747 	TInt& offset = aReq.RemoteDesOffset();
   748 	TLinAddr currLinAddr = linAddr + offset;
   749 	TInt reqLen = I64LOW(aReq.Length());
   750 	__ASSERT_DEBUG(I64HIGH(aReq.Length()) == 0,  PHYSADDR_FAULT());
   751 
   752 	aAddr = Epoc::LinearToPhysical(currLinAddr);
   753 
   754 	// Set the initial length to be the length remaining in this page or the request length (whichever is shorter).
   755 	// If there are subsequent pages, we then need to determine whether they are contiguous
   756 	aLen = Min( (TInt) (PageAlign(currLinAddr+iPageSize) - currLinAddr), reqLen - offset);
   757 
   758 	__ASSERT_DEBUG(aLen > 0,  PHYSADDR_FAULT());
   759 	
   760 	TPhysAddr currPhysPageAddr = PageAlign((TLinAddr) aAddr);
   761 
   762 	offset+= aLen;
   763 
   764 
   765 	while (offset < reqLen)
   766 		{
   767 		TPhysAddr nextPhysPageAddr = Epoc::LinearToPhysical(linAddr + offset);
   768 		__ASSERT_DEBUG(PageOffset((TLinAddr) nextPhysPageAddr) == 0,  PHYSADDR_FAULT());
   769 
   770 		if (nextPhysPageAddr != currPhysPageAddr + iPageSize)
   771 			break;
   772 		
   773 		currPhysPageAddr = nextPhysPageAddr;
   774 
   775 		TInt len = Min(iPageSize, reqLen - offset);
   776 		offset+= len;
   777 		aLen+= len;
   778 		}
   779 
   780 
   781 	__KTRACE_DMA(Kern::Printf(">PHYSADDR:DP:GetPhysS(), linAddr %08X, physAddr %08X, len %x reqLen %x", linAddr + offset, aAddr, aLen, reqLen));
   782 	OstTraceExt4(TRACE_DEMANDPAGING, DDMAHELPER_GETPHYSICALADDRESS_DP, "linAddr=0x%x; physAddr=0x%x; length=0x%x; reqLen=0x%x", linAddr + offset, aAddr, aLen, reqLen);
   783 	OstTraceFunctionExit0( DDMAHELPER_GETPHYSICALADDRESS_EXIT );
   784 	return KErrNone;
   785 	}
   786 #endif	// (__DEMAND_PAGING__)
   787 
   788 
   789 /**
   790  * Modifies the current requests remote descriptor length
   791  * 
   792  * @param aLength Length in bytes to which the descriptor is to be set.
   793  * 
   794  * @return KErrNone, if successful;
   795  * 		   KErrBadDescriptor, if descriptor is corrupted;
   796  * 		   otherwise one of the other system wide error codes.
   797  */
   798 
   799 TInt DDmaHelper::UpdateRemoteDescriptorLength(TInt aLength)
   800 	{
   801 	OstTraceFunctionEntryExt( DDMAHELPER_UPDATEREMOTEDESCRIPTORLENGTH_ENTRY, this );
   802 	__KTRACE_DMA(Kern::Printf(">PHYSADDR:UpDesLen(%d)",aLength));
   803 
   804 	// Restore request Id (overwritten by KErrNone return code) to stop ASSERT in WriteRemote
   805 	iReq->Id() = DLocalDrive::ERead;
   806 
   807 	// restore caller's descriptor offset
   808 	iReq->RemoteDesOffset() = iReqRemoteDesOffset;
   809 
   810 	// Write a zero length descriptor at the end such that the descriptors length is correctly updated.
   811 	TPtrC8 zeroDes(NULL, 0);
   812 	TInt r = iReq->WriteRemote(&zeroDes, aLength);
   813 
   814 	// restore return code	
   815 	iReq->iValue = KErrNone;
   816 
   817 	OstTraceFunctionExitExt( DDMAHELPER_UPDATEREMOTEDESCRIPTORLENGTH_EXIT, this, r );
   818 	return r;
   819 	}
   820