0
|
1 |
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// e32\drivers\locmedia\dmasupport.cpp
|
|
15 |
//
|
|
16 |
//
|
|
17 |
|
|
18 |
#include <kernel/kernel.h>
|
|
19 |
#include <kernel/cache.h>
|
|
20 |
#include "locmedia.h"
|
|
21 |
#include "dmasupport.h"
|
|
22 |
#include "dmasupport.inl"
|
|
23 |
|
|
24 |
#define PHYSADDR_FAULT() Kern::Fault("TLOCDRV-PHYS-ADDR",__LINE__)
|
|
25 |
|
|
26 |
//#define __DEBUG_DMASUP__
|
|
27 |
#ifdef __DEBUG_DMASUP__
|
|
28 |
#define __KTRACE_DMA(p) {p;}
|
|
29 |
#else
|
|
30 |
#define __KTRACE_DMA(p)
|
|
31 |
#endif
|
|
32 |
|
|
33 |
TInt DDmaHelper::iPageSize;
|
|
34 |
TInt DDmaHelper::iPageSizeLog2;
|
|
35 |
TInt DDmaHelper::iPageSizeMsk;
|
|
36 |
|
|
37 |
/******************************************************************************
|
|
38 |
DDmaHelper
|
|
39 |
******************************************************************************/
|
|
40 |
const TPhysAddr KPhysMemFragmented = KPhysAddrInvalid;
|
|
41 |
|
|
42 |
TUint32 Log2(TUint32 aVal)
|
|
43 |
{
|
|
44 |
__ASSERT_COMPILE(sizeof(TUint32) == 4);
|
|
45 |
|
|
46 |
TUint32 bitPos=31;
|
|
47 |
|
|
48 |
if(!(aVal >> 16)) {bitPos-=16; aVal<<=16;}
|
|
49 |
if(!(aVal >> 24)) {bitPos-=8; aVal<<=8 ;}
|
|
50 |
if(!(aVal >> 28)) {bitPos-=4; aVal<<=4 ;}
|
|
51 |
if(!(aVal >> 30)) {bitPos-=2; aVal<<=2 ;}
|
|
52 |
if(!(aVal >> 31)) {bitPos-=1;}
|
|
53 |
|
|
54 |
return bitPos;
|
|
55 |
}
|
|
56 |
|
|
57 |
TBool IsPowerOfTwo(TInt aNum)
|
|
58 |
//
|
|
59 |
// Returns ETrue if aNum is a power of two
|
|
60 |
//
|
|
61 |
{
|
|
62 |
return (aNum != 0 && (aNum & -aNum) == aNum);
|
|
63 |
}
|
|
64 |
|
|
65 |
void DDmaHelper::ResetPageLists()
|
|
66 |
{
|
|
67 |
iFragLen = 0;
|
|
68 |
iFragLenRemaining = 0;
|
|
69 |
}
|
|
70 |
|
|
71 |
DDmaHelper::DDmaHelper()
|
|
72 |
{
|
|
73 |
iPageSize = Kern::RoundToPageSize(1);
|
|
74 |
__ASSERT_ALWAYS(IsPowerOfTwo(iPageSize), PHYSADDR_FAULT());
|
|
75 |
iPageSizeLog2 = Log2(iPageSize);
|
|
76 |
iPageSizeMsk = iPageSize-1;
|
|
77 |
}
|
|
78 |
|
|
79 |
DDmaHelper::~DDmaHelper()
|
|
80 |
{
|
|
81 |
delete [] iPageArray;
|
|
82 |
delete [] iPageList;
|
|
83 |
if (iPhysicalPinObject)
|
|
84 |
{
|
|
85 |
NKern::ThreadEnterCS();
|
|
86 |
Kern::DestroyPhysicalPinObject(iPhysicalPinObject);
|
|
87 |
NKern::ThreadLeaveCS();
|
|
88 |
}
|
|
89 |
}
|
|
90 |
|
|
91 |
/**
|
|
92 |
Constructs the DDmaHelper object
|
|
93 |
|
|
94 |
@param aLength The maximum length of data mapped by this object.
|
|
95 |
Should be a multiple of the page size
|
|
96 |
@param aMediaBlockSize The minimum amount data that the media can transfer in read / write operations
|
|
97 |
@param aDmaAlignment The memory alignment required by the media devices DMA controller. (i.e. word aligned = 2)
|
|
98 |
|
|
99 |
@return KErrNone,if successful;
|
|
100 |
KErrNoMemory, if unable to create Page Array's.
|
|
101 |
*/
|
|
102 |
TInt DDmaHelper::Construct(TInt aLength, TInt aMediaBlockSize, TInt aDmaAlignment)
|
|
103 |
{
|
|
104 |
__ASSERT_ALWAYS(aMediaBlockSize > 0, PHYSADDR_FAULT());
|
|
105 |
__ASSERT_ALWAYS(IsPowerOfTwo(aMediaBlockSize), PHYSADDR_FAULT());
|
|
106 |
__ASSERT_ALWAYS(aLength > 0, PHYSADDR_FAULT());
|
|
107 |
__ASSERT_ALWAYS(aLength > iPageSize, PHYSADDR_FAULT());
|
|
108 |
|
|
109 |
// This code assumes that the media block size (normally 512) is >= the processor's
|
|
110 |
// cache-line size (typically 32 bytes). This may not be true for future processors.
|
|
111 |
// If the cache-line size was 1024, for example, reading 512 bytes into a client's
|
|
112 |
// buffer & then calling Cache::SyncMemoryAfterDmaRead would invalidate an entire 1024
|
|
113 |
// bytes in the user's address space.
|
|
114 |
TUint cacheLineSize = Cache::DmaBufferAlignment();
|
|
115 |
__ASSERT_ALWAYS(IsPowerOfTwo(cacheLineSize), PHYSADDR_FAULT());
|
|
116 |
if (cacheLineSize > (TUint) aMediaBlockSize)
|
|
117 |
return KErrNotSupported;
|
|
118 |
|
|
119 |
//Check whether Kernel supports physical memory pinning:
|
|
120 |
TInt mm = Kern::HalFunction(EHalGroupKernel, EKernelHalMemModelInfo, 0, 0) & EMemModelTypeMask;
|
|
121 |
if (mm >= EMemModelTypeFlexible)
|
|
122 |
{
|
|
123 |
// Flexible memory model supports physical pinning for user (and Kernel) memory that
|
|
124 |
// is the subject of DMA transfer.
|
|
125 |
// Physical memory pinning ensures that:
|
|
126 |
// - physical memory is not moved by RAM defragmentation.
|
|
127 |
// - it is safe to to DMA against it or do sync cache (using new interface) even if/when
|
|
128 |
// the owner of the memory (e.g. untrusted user aplication) decomits memory or panics.
|
|
129 |
// For details @see Kern::PinPhysicalMemory.
|
|
130 |
// Cache Sync of physically pinned memory on flexible memory model is done by:
|
|
131 |
// - Cache::SyncPhysicalMemoryBeforeDmaWrite
|
|
132 |
// - Cache::SyncPhysicalMemoryBeforeDmaRead
|
|
133 |
// - Cache::SyncPhysicalMemoryAfterDmaRead
|
|
134 |
iPhysPinningAvailable = ETrue;
|
|
135 |
__KTRACE_DMA(Kern::Printf("Memory model (%d) supports physical pining\n",mm));
|
|
136 |
NKern::ThreadEnterCS();
|
|
137 |
TInt r=Kern::CreatePhysicalPinObject(iPhysicalPinObject);
|
|
138 |
NKern::ThreadLeaveCS();
|
|
139 |
if (r) return r;
|
|
140 |
}
|
|
141 |
else
|
|
142 |
{
|
|
143 |
// Memory models before flexible do not support memory pinning.
|
|
144 |
// The driver has to use PrepareMemoryForDMA/ReleaseMemoryFromDMA Kernel interface
|
|
145 |
// that ensures that physical memory won't be moved by RAM defragmentation module.
|
|
146 |
// However, Kernel relies on assumption that the user memory won't dissapear (e.g. by
|
|
147 |
// user client closing the chunk or panics), as it would lead to Kernel crash.
|
|
148 |
// For that reason, the only use case for DMA transfer into user memory is File System's
|
|
149 |
// read/write buffer - as it is assumed that File System is trusted component.
|
|
150 |
// To mark its buffers(s) for DMA transfer, File Sytem must call UserSvr::RegisterTrustedChunk
|
|
151 |
// before DMA transfer starts.
|
|
152 |
// Cache sync. operations before/after DMA transfer must be done by using the old Cache interface:
|
|
153 |
// - Cache::SyncMemoryBeforeDmaWrite
|
|
154 |
// - Cache::SyncMemoryBeforeDmaRead
|
|
155 |
// - Cache::SyncMemoryAfterDmaRead
|
|
156 |
// As they all require linear address as input, these methods also rely on File System buffers
|
|
157 |
// to be in valid state during sync calls.
|
|
158 |
iPhysPinningAvailable = EFalse;
|
|
159 |
__KTRACE_DMA(Kern::Printf("Memory model (%d) doesn't support physical pining\n",mm));
|
|
160 |
iPhysicalPinObject = NULL;
|
|
161 |
}
|
|
162 |
|
|
163 |
iMaxPages = (aLength >> iPageSizeLog2)-1;
|
|
164 |
|
|
165 |
// 2 Additional pages for page straddling
|
|
166 |
iPageArray = new TPhysAddr[iMaxPages+2];
|
|
167 |
if (iPageArray != NULL)
|
|
168 |
{
|
|
169 |
iPageList = new TPageList[iMaxPages];
|
|
170 |
if (iPageList != NULL)
|
|
171 |
{
|
|
172 |
iMediaBlockSize = aMediaBlockSize;
|
|
173 |
iMediaBlockSizeMask = TInt64(iMediaBlockSize - 1);
|
|
174 |
|
|
175 |
iDmaAlignment = aDmaAlignment;
|
|
176 |
__KTRACE_DMA(Kern::Printf("-PHYSADDR: Construct iMaxPages(%d), MediaBlocks(%d), DMAalign(%d)",iMaxPages,iMediaBlockSize,iDmaAlignment));
|
|
177 |
return KErrNone;
|
|
178 |
}
|
|
179 |
delete [] iPageArray; iPageArray = NULL;
|
|
180 |
}
|
|
181 |
|
|
182 |
iMaxPages = 0;
|
|
183 |
return KErrNoMemory;
|
|
184 |
}
|
|
185 |
|
|
186 |
/**
|
|
187 |
* Each Read/Write request is examined to determine if the descriptor that
|
|
188 |
* is referenced is mapped to a physical memory object;
|
|
189 |
* if so it prepares the memory, updates the request with physical memory information
|
|
190 |
* and issues the request.
|
|
191 |
* If a request does not make use of physical memory or is not configured correctly the
|
|
192 |
* request is passed through without modification.
|
|
193 |
*/
|
|
194 |
TInt DDmaHelper::SendReceive(TLocDrvRequest& aReq, TLinAddr aLinAddress)
|
|
195 |
{
|
|
196 |
DPrimaryMediaBase& primaryMedia = *aReq.Drive()->iPrimaryMedia;
|
|
197 |
|
|
198 |
TInt reqId = aReq.Id();
|
|
199 |
if (reqId != DLocalDrive::ERead && reqId != DLocalDrive::EWrite)
|
|
200 |
return aReq.SendReceive(&primaryMedia.iMsgQ);
|
|
201 |
|
|
202 |
if ((I64HIGH(aReq.Length()) > 0) || (aReq.Length() < iMediaBlockSize))
|
|
203 |
return aReq.SendReceive(&primaryMedia.iMsgQ);
|
|
204 |
|
|
205 |
// If more than one user thread tries to access the drive, then bail out as there is
|
|
206 |
// only one DDmaHelper object per TLocDrv. Normally this shouldn't ever happen unless
|
|
207 |
// a client app accesses the drive directly using TBusLOcalDrive or the file system is
|
|
208 |
// asynchronous (i.e. there is a separate drive thread) but the file server message is
|
|
209 |
// flagged as synchronous - e.g. EFsDrive
|
|
210 |
if (TInt(__e32_atomic_add_ord32(&iLockCount, 1)) > 0) // busy ?
|
|
211 |
{
|
|
212 |
__KTRACE_DMA(Kern::Printf("-PHYSADDR: BUSY"));
|
|
213 |
__e32_atomic_add_ord32(&iLockCount, TUint32(-1));
|
|
214 |
return aReq.SendReceive(&primaryMedia.iMsgQ);
|
|
215 |
}
|
|
216 |
|
|
217 |
// make a copy of the request
|
|
218 |
iMemoryType = EUnknown;
|
|
219 |
iReq = &aReq;
|
|
220 |
iReqId = reqId;
|
|
221 |
|
|
222 |
iReqPosClient = iReq->Pos();
|
|
223 |
|
|
224 |
iReqLenClient = I64LOW(iReq->Length());
|
|
225 |
|
|
226 |
iReqRemoteDesOffset = iReq->RemoteDesOffset();
|
|
227 |
iReqFlags = iReq->Flags();
|
|
228 |
|
|
229 |
iRemoteThread = iReq->RemoteThread();
|
|
230 |
iCurrentThread = &Kern::CurrentThread();
|
|
231 |
iOwningThread = iRemoteThread ? iRemoteThread : iCurrentThread;
|
|
232 |
|
|
233 |
iChunk = NULL;
|
|
234 |
iChunkOffset = 0;
|
|
235 |
iLinAddressUser = NULL;
|
|
236 |
iLenConsumed = 0;
|
|
237 |
|
|
238 |
// point to the start of the descriptor
|
|
239 |
iLinAddressUser = aLinAddress - iReqRemoteDesOffset;
|
|
240 |
|
|
241 |
// Need to check descriptors from both direct Clients (i.e. file cache, RemoteThread == NULL )
|
|
242 |
// and Remote Server Clients (file server clients, RemoteThread != NULL)
|
|
243 |
// Shared Memory can potentially be used by both remote server and direct clients
|
|
244 |
NKern::ThreadEnterCS();
|
|
245 |
iChunk = Kern::OpenSharedChunk(iOwningThread, (const TAny*) iLinAddressUser, ETrue, iChunkOffset);
|
|
246 |
NKern::ThreadLeaveCS();
|
|
247 |
|
|
248 |
TInt fragments = 0;
|
|
249 |
TInt r;
|
|
250 |
do
|
|
251 |
{
|
|
252 |
__KTRACE_DMA(Kern::Printf(">PHYSADDR:SendReceive() iReqLen %d; iLenConsumed %d; fragments %d",iReqLen, iLenConsumed, fragments));
|
|
253 |
r = RequestStart();
|
|
254 |
if (r != KErrNone)
|
|
255 |
{
|
|
256 |
if (iChunk)
|
|
257 |
{
|
|
258 |
NKern::ThreadEnterCS();
|
|
259 |
Kern::ChunkClose(iChunk);
|
|
260 |
iChunk = NULL;
|
|
261 |
NKern::ThreadLeaveCS();
|
|
262 |
}
|
|
263 |
__KTRACE_DMA(Kern::Printf("<PHYSADDR:SendReceive()- r:%d",r));
|
|
264 |
iMemoryType = EUnknown;
|
|
265 |
__e32_atomic_add_ord32(&iLockCount, TUint32(-1));
|
|
266 |
return fragments ? r : iReq->SendReceive(&primaryMedia.iMsgQ);
|
|
267 |
}
|
|
268 |
else
|
|
269 |
{
|
|
270 |
iReq->Flags() |= TLocDrvRequest::EPhysAddr;
|
|
271 |
}
|
|
272 |
|
|
273 |
__KTRACE_DMA(Kern::Printf("-PHYSADDR:SendReceive() rThread %08X pos %08lX, len %d addr %08X off %08X",
|
|
274 |
iRemoteThread, iReq->Pos(), I64LOW(iReq->Length()), iLinAddressUser, iReqRemoteDesOffset));
|
|
275 |
|
|
276 |
__ASSERT_DEBUG(iReq->Length() == FragLength(), PHYSADDR_FAULT());
|
|
277 |
__ASSERT_DEBUG(iReq->Length() != 0, PHYSADDR_FAULT());
|
|
278 |
|
|
279 |
// reinstate iValue in case overwritten by DMediaPagingDevice::CompleteRequest()
|
|
280 |
iReq->iValue = iReqId;
|
|
281 |
|
|
282 |
r = iReq->SendReceive(&primaryMedia.iMsgQ);
|
|
283 |
|
|
284 |
// The media driver could potentially choose to deal with the request
|
|
285 |
// without accessing physical memory (e.g. if the data is already cached).
|
|
286 |
iLenConsumed += iFragLenRemaining;
|
|
287 |
|
|
288 |
RequestEnd();
|
|
289 |
|
|
290 |
ResetPageLists();
|
|
291 |
|
|
292 |
fragments++;
|
|
293 |
|
|
294 |
}
|
|
295 |
while(r == KErrNone && LengthRemaining() > 0);
|
|
296 |
|
|
297 |
if (iChunk)
|
|
298 |
{
|
|
299 |
NKern::ThreadEnterCS();
|
|
300 |
Kern::ChunkClose(iChunk);
|
|
301 |
iChunk = NULL;
|
|
302 |
NKern::ThreadLeaveCS();
|
|
303 |
}
|
|
304 |
|
|
305 |
// Set remote descriptor length to iReqLenClient
|
|
306 |
if (iReqId == DLocalDrive::ERead && r == KErrNone)
|
|
307 |
r = UpdateRemoteDescriptorLength(iReqLenClient);
|
|
308 |
|
|
309 |
__KTRACE_DMA(Kern::Printf("<PHYSADDR:SendReceive()"));
|
|
310 |
|
|
311 |
iMemoryType = EUnknown;
|
|
312 |
|
|
313 |
__e32_atomic_add_ord32(&iLockCount, TUint32(-1));
|
|
314 |
return r;
|
|
315 |
}
|
|
316 |
|
|
317 |
|
|
318 |
/**
|
|
319 |
* Each read/write request is split into one or more DMA "fragments".
|
|
320 |
* The maximum size of each fragment depends on the size of iPageArray[].
|
|
321 |
* Subsquent calls to RequestStart maybe required to complete a request.
|
|
322 |
*
|
|
323 |
* The physical address is checked for DMA alignment or the possibility of
|
|
324 |
* eventually alignment due to mis-aligned start/end media blocks.
|
|
325 |
*
|
|
326 |
* A DMA "fragment" can be split over a number of pages as follows :
|
|
327 |
* ----------------------------------------------------------
|
|
328 |
* | 4K | 4K | 4K | 4K |
|
|
329 |
* ----------------------------------------------------------
|
|
330 |
* ******************************** : region to be read
|
|
331 |
* <----------- iFragLen ----------->
|
|
332 |
*
|
|
333 |
* The pages may not be physically contiguous; if they are not,
|
|
334 |
* then they are supplied to the media driver one contiguous
|
|
335 |
* sequent at a time by GetPhysicalAddress()
|
|
336 |
**/
|
|
337 |
TInt DDmaHelper::RequestStart()
|
|
338 |
{
|
|
339 |
__KTRACE_DMA(Kern::Printf(">PHYSADDR:RequestStart()"));
|
|
340 |
iIndex = 0;
|
|
341 |
|
|
342 |
TLinAddr startAddr = LinAddress();
|
|
343 |
TInt64 startPos = iReqPosClient + iLenConsumed;
|
|
344 |
TInt mediaBlockOffset = BlockOffset(startPos);
|
|
345 |
TInt addrBlockOffset = BlockOffset(startAddr);
|
|
346 |
TInt length = Min(LengthRemaining(), MaxFragLength());
|
|
347 |
|
|
348 |
iPageArrayCount = iPageListCount = 0;
|
|
349 |
|
|
350 |
TLinAddr firstPageStart = PageAlign(startAddr);
|
|
351 |
TLinAddr lastPageStart = PageAlign(startAddr + length + iPageSize - 1);
|
|
352 |
iPageArrayCount = (lastPageStart - firstPageStart + 1) >> iPageSizeLog2;
|
|
353 |
|
|
354 |
iMemoryType = EUnknown;
|
|
355 |
iPhysAddr = KPhysMemFragmented; // Default - Mark memory as fragmented
|
|
356 |
|
|
357 |
//*************************************
|
|
358 |
// Check Physical Page Alignment!!
|
|
359 |
//*************************************
|
|
360 |
if (!IsBlockAligned(startPos))
|
|
361 |
{
|
|
362 |
// Will DMA align at next block alignment? such that DMA can be used
|
|
363 |
TInt ofset = I64LOW((startPos + iMediaBlockSize) & (iMediaBlockSize-1));
|
|
364 |
ofset = iMediaBlockSize - ofset;
|
|
365 |
|
|
366 |
if (!IsDmaAligned(startAddr))
|
|
367 |
{
|
|
368 |
__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - not DMA Aligned pos 0x%x addr 0x%x)",I64LOW(startPos), startAddr));
|
|
369 |
return KErrNotSupported;
|
|
370 |
}
|
|
371 |
}
|
|
372 |
else
|
|
373 |
{ //block aligned!
|
|
374 |
if (!IsDmaAligned(startAddr))
|
|
375 |
{
|
|
376 |
__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - not DMA Aligned (0x%x)",startAddr));
|
|
377 |
return KErrNotSupported;
|
|
378 |
}
|
|
379 |
}
|
|
380 |
|
|
381 |
//************************************************
|
|
382 |
// Check for possible striping of RAM pages vs Media blocks
|
|
383 |
// i.e. Media blocks which may straddle 2 non contiguous pages.
|
|
384 |
//************************************************
|
|
385 |
if (mediaBlockOffset != addrBlockOffset)
|
|
386 |
{
|
|
387 |
__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - Frag / not block aligned: pos 0x%x addr 0x%x", I64LOW(startPos), startAddr));
|
|
388 |
return KErrNotSupported;
|
|
389 |
}
|
|
390 |
|
|
391 |
//************************************************
|
|
392 |
// Is it File Server Cache request ?
|
|
393 |
//************************************************
|
|
394 |
if (iChunk == NULL && // Not Shared memory
|
|
395 |
iRemoteThread == NULL && // Direct Client Request
|
|
396 |
IsPageAligned(startAddr) &&
|
|
397 |
IsBlockAligned(startPos) &&
|
|
398 |
(iPageArrayCount > 0) )
|
|
399 |
{
|
|
400 |
TLinAddr firstPageAddr = PageAlign(startAddr); //ensure that it is page aligned.
|
|
401 |
|
|
402 |
TInt r = KErrNone;
|
|
403 |
if (iPhysPinningAvailable)
|
|
404 |
{
|
|
405 |
TBool readOnlyMem = (iReqId == DLocalDrive::EWrite);
|
|
406 |
r = Kern::PinPhysicalMemory(iPhysicalPinObject, firstPageAddr, iPageArrayCount << iPageSizeLog2,
|
|
407 |
readOnlyMem, iPhysAddr, iPageArray, iMapAttr, iPageColour, iCurrentThread);
|
|
408 |
}
|
|
409 |
else
|
|
410 |
{
|
|
411 |
NKern::ThreadEnterCS();
|
|
412 |
r = Kern::PrepareMemoryForDMA(iCurrentThread, (void*)firstPageAddr, iPageArrayCount << iPageSizeLog2, iPageArray);
|
|
413 |
NKern::ThreadLeaveCS();
|
|
414 |
}
|
|
415 |
if (r != KErrNone)
|
|
416 |
return r;
|
|
417 |
|
|
418 |
iMemoryType = EFileServerChunk;
|
|
419 |
|
|
420 |
__KTRACE_DMA(Kern::Printf("-PHYSADDR:RequestStart() - EFileServerChunk"));
|
|
421 |
}
|
|
422 |
//****************************
|
|
423 |
// Is it shared chunk ?
|
|
424 |
//****************************
|
|
425 |
else if (iChunk)
|
|
426 |
{
|
|
427 |
// calculate chunk offset of start of first page
|
|
428 |
TInt offset = iChunkOffset + iReqRemoteDesOffset+ iLenConsumed;
|
|
429 |
|
|
430 |
TInt r = Kern::ChunkPhysicalAddress(iChunk, offset, length, iLinAddressKernel, iMapAttr, iPhysAddr, iPageArray);
|
|
431 |
|
|
432 |
if (r < KErrNone)
|
|
433 |
return r; // 0 = Contiguous Memory, 1 = Fragmented/Dis-Contiguous Memory
|
|
434 |
|
|
435 |
iMemoryType = ESharedChunk;
|
|
436 |
|
|
437 |
__KTRACE_DMA(Kern::Printf("-PHYSADDR:RequestStart() - ESharedChunk"));
|
|
438 |
}
|
|
439 |
else
|
|
440 |
{
|
|
441 |
__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - EUnknown"));
|
|
442 |
return KErrNotFound;
|
|
443 |
}
|
|
444 |
|
|
445 |
SetFragLength(length);
|
|
446 |
|
|
447 |
//************************************************
|
|
448 |
// Build Contiguous Page list
|
|
449 |
//************************************************
|
|
450 |
BuildPageList();
|
|
451 |
|
|
452 |
//************************************************
|
|
453 |
// Set up request parameters for this fragment
|
|
454 |
//************************************************
|
|
455 |
iReq->Length() = MAKE_TINT64(0, length);
|
|
456 |
iReq->Pos() = iReqPosClient + iLenConsumed;
|
|
457 |
iReq->RemoteDesOffset() = iReqRemoteDesOffset + iLenConsumed;
|
|
458 |
// restore EAdjusted flag to ensure iReq->Pos() is adjusted correctly
|
|
459 |
iReq->Flags()&= ~TLocDrvRequest::EAdjusted;
|
|
460 |
iReq->Flags()|= (iReqFlags & TLocDrvRequest::EAdjusted);
|
|
461 |
|
|
462 |
//************************************************
|
|
463 |
// Sync memory
|
|
464 |
//************************************************
|
|
465 |
__KTRACE_DMA(Kern::Printf(">SYNC-PHYSADDR:addr 0x%x len %d", startAddr, length));
|
|
466 |
|
|
467 |
// Only sync whole blocks: it is assumed that the media driver will transfer
|
|
468 |
// partial start and end blocks without DMA
|
|
469 |
|
|
470 |
TInt startBlockPartialLen = IsBlockAligned(startPos) ? 0 : iMediaBlockSize - BlockOffset(startPos);
|
|
471 |
TInt blockLen = (TInt) BlockAlign(length - startBlockPartialLen);
|
|
472 |
|
|
473 |
if (iReqId == DLocalDrive::EWrite)
|
|
474 |
{
|
|
475 |
if (iMemoryType == ESharedChunk)
|
|
476 |
{
|
|
477 |
Cache::SyncMemoryBeforeDmaWrite(iLinAddressKernel+startBlockPartialLen, blockLen, iMapAttr);
|
|
478 |
}
|
|
479 |
else // (iMemoryType == EFileServerChunk)
|
|
480 |
{
|
|
481 |
if (iPhysPinningAvailable)
|
|
482 |
Cache::SyncPhysicalMemoryBeforeDmaWrite(iPageArray, iPageColour, startBlockPartialLen, blockLen, iMapAttr);
|
|
483 |
else
|
|
484 |
Cache::SyncMemoryBeforeDmaWrite(startAddr+startBlockPartialLen, blockLen);
|
|
485 |
}
|
|
486 |
}
|
|
487 |
else
|
|
488 |
{
|
|
489 |
if (iMemoryType == ESharedChunk)
|
|
490 |
Cache::SyncMemoryBeforeDmaRead(iLinAddressKernel, length, iMapAttr);
|
|
491 |
else // (iMemoryType == EFileServerChunk)
|
|
492 |
{
|
|
493 |
if (iPhysPinningAvailable)
|
|
494 |
Cache::SyncPhysicalMemoryBeforeDmaRead(iPageArray, iPageColour, 0, length, iMapAttr);
|
|
495 |
else
|
|
496 |
Cache::SyncMemoryBeforeDmaRead(startAddr, length);
|
|
497 |
}
|
|
498 |
}
|
|
499 |
|
|
500 |
__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart()"));
|
|
501 |
return KErrNone;
|
|
502 |
}
|
|
503 |
|
|
504 |
/**
|
|
505 |
* After read requests this method synchronous the current physical memory in use.
|
|
506 |
*/
|
|
507 |
void DDmaHelper::RequestEnd()
|
|
508 |
{
|
|
509 |
__KTRACE_DMA(Kern::Printf(">PHYSADDR:RequestEnd()"));
|
|
510 |
|
|
511 |
__ASSERT_DEBUG(iReqId == DLocalDrive::ERead || iReqId == DLocalDrive::EWrite, PHYSADDR_FAULT());
|
|
512 |
__ASSERT_DEBUG(iMemoryType == ESharedChunk || iMemoryType == EFileServerChunk, PHYSADDR_FAULT());
|
|
513 |
|
|
514 |
TInt length = FragLength(); // len of data just transferred
|
|
515 |
TLinAddr startAddr = LinAddress() - length;
|
|
516 |
|
|
517 |
// Sync the memory : but not if the media driver has decided to transfer ALL the data using IPC rather than DMA.
|
|
518 |
// It is assumed that the media driver will transfer partial start & end blocks using IPC, but it may also choose
|
|
519 |
// to use IPC for the ENTIRE fragment when read/writing at the end of the media (see medmmc.cpp)
|
|
520 |
if (iFragLenRemaining < length && iReqId == DLocalDrive::ERead)
|
|
521 |
{
|
|
522 |
TInt64 startPos = iReq->Pos();
|
|
523 |
TInt startBlockPartialLen = IsBlockAligned(startPos) ? 0 : iMediaBlockSize - BlockOffset(startPos);
|
|
524 |
TInt blockLen = (TInt) BlockAlign(length - startBlockPartialLen);
|
|
525 |
|
|
526 |
if (iMemoryType == ESharedChunk)
|
|
527 |
{
|
|
528 |
Cache::SyncMemoryAfterDmaRead(iLinAddressKernel + startBlockPartialLen, blockLen);
|
|
529 |
}
|
|
530 |
else // (iMemoryType == EFileServerChunk)
|
|
531 |
{
|
|
532 |
if (iPhysPinningAvailable)
|
|
533 |
Cache::SyncPhysicalMemoryAfterDmaRead(iPageArray, iPageColour, startBlockPartialLen, blockLen, iMapAttr);
|
|
534 |
else
|
|
535 |
Cache::SyncMemoryAfterDmaRead(startAddr + startBlockPartialLen, blockLen);
|
|
536 |
}
|
|
537 |
|
|
538 |
}
|
|
539 |
ReleasePages(PageAlign(startAddr));
|
|
540 |
}
|
|
541 |
|
|
542 |
/**
|
|
543 |
* For File Server chunks this method releases the current physical memory in use.
|
|
544 |
*
|
|
545 |
* @see Kern::ReleaseMemoryFromDMA()
|
|
546 |
*/
|
|
547 |
void DDmaHelper::ReleasePages(TLinAddr aAddr)
|
|
548 |
{
|
|
549 |
if (iMemoryType == EFileServerChunk)
|
|
550 |
{
|
|
551 |
__KTRACE_DMA(Kern::Printf(">PHYSADDR():ReleasePages thread (0x%x) aAddr(0x%08x) size(%d) iPageArray(0x%x)",iCurrentThread, aAddr, (iPageArrayCount << iPageSizeLog2), iPageArray));
|
|
552 |
|
|
553 |
TInt r;
|
|
554 |
if (iPhysPinningAvailable)
|
|
555 |
{
|
|
556 |
r = Kern::UnpinPhysicalMemory(iPhysicalPinObject);
|
|
557 |
}
|
|
558 |
else
|
|
559 |
{
|
|
560 |
NKern::ThreadEnterCS();
|
|
561 |
r = Kern::ReleaseMemoryFromDMA(iCurrentThread, (void*) aAddr, iPageArrayCount << iPageSizeLog2, iPageArray);
|
|
562 |
NKern::ThreadLeaveCS();
|
|
563 |
}
|
|
564 |
__ASSERT_ALWAYS(r == KErrNone, PHYSADDR_FAULT());
|
|
565 |
}
|
|
566 |
}
|
|
567 |
|
|
568 |
/**
|
|
569 |
* Utility method which examines the page array, compiling adjacent pages into contiguous fragments
|
|
570 |
* and populating iPageList with said fragments.
|
|
571 |
*/
|
|
572 |
void DDmaHelper::BuildPageList()
|
|
573 |
{
|
|
574 |
iPageListCount = 0;
|
|
575 |
|
|
576 |
if (iPhysAddr != KPhysMemFragmented)
|
|
577 |
{
|
|
578 |
__KTRACE_DMA(Kern::Printf(">PHYSADDR:BuildPageList() - Contiguous Memory"));
|
|
579 |
// Only one entry required.
|
|
580 |
iPageList[0].iAddress = iPhysAddr;
|
|
581 |
iPageList[0].iLength = FragLength();
|
|
582 |
iPageListCount = 1;
|
|
583 |
}
|
|
584 |
else
|
|
585 |
{
|
|
586 |
__KTRACE_DMA(Kern::Printf(">PHYSADDR:BuildPageList() - Dis-Contiguous Memory"));
|
|
587 |
TInt offset;
|
|
588 |
|
|
589 |
offset = PageOffset(iChunkOffset + iReqRemoteDesOffset+ iLenConsumed);
|
|
590 |
iPageList[0].iAddress = iPageArray[0]+offset;
|
|
591 |
iPageList[0].iLength = iPageSize-offset;
|
|
592 |
|
|
593 |
TInt lengthRemaining = FragLength() - iPageList[0].iLength;
|
|
594 |
|
|
595 |
TInt i =1;
|
|
596 |
for( ; i < iPageArrayCount; i++)
|
|
597 |
{
|
|
598 |
//Check if RAM pages are physically adjacent
|
|
599 |
if ((iPageArray[i-1] + PageSize()) == iPageArray[i])
|
|
600 |
{
|
|
601 |
// Adjacent pages - just add length
|
|
602 |
iPageList[iPageListCount].iLength += PageSize();
|
|
603 |
}
|
|
604 |
else
|
|
605 |
{
|
|
606 |
// Not Adjacent, start new Memory fragment
|
|
607 |
iPageListCount++;
|
|
608 |
iPageList[iPageListCount].iAddress = iPageArray[i];
|
|
609 |
iPageList[iPageListCount].iLength = iPageSize;
|
|
610 |
}
|
|
611 |
|
|
612 |
lengthRemaining -= PageSize();
|
|
613 |
if (lengthRemaining < 0)
|
|
614 |
{
|
|
615 |
// Last page, re-adjust length for odd remainder.
|
|
616 |
iPageList[iPageListCount].iLength += lengthRemaining;
|
|
617 |
break;
|
|
618 |
}
|
|
619 |
}
|
|
620 |
|
|
621 |
iPageListCount++;
|
|
622 |
}
|
|
623 |
|
|
624 |
//#ifdef __DEBUG_DMASUP__
|
|
625 |
// for (TInt m=0; m<iPageListCount; m++)
|
|
626 |
// __KTRACE_DMA(Kern::Printf("-PHYSADDR:BuildPageList() [%d]: %08X l:%d", m, iPageList[m].iAddress, iPageList[m].iLength));
|
|
627 |
//#endif
|
|
628 |
}
|
|
629 |
|
|
630 |
|
|
631 |
/**
|
|
632 |
* Returns Address and Length of next contiguous Physical memory fragment
|
|
633 |
*
|
|
634 |
* @param aAddr On success, populated with the Physical Address of the next fragment.
|
|
635 |
* @param aLen On success, populated with the length in bytes of the next fragment.
|
|
636 |
*
|
|
637 |
* @return KErrNone, if successful;
|
|
638 |
* KErrNoMemory, if no more memory fragments left.
|
|
639 |
*/
|
|
640 |
TInt DDmaHelper::GetPhysicalAddress(TPhysAddr& aAddr, TInt& aLen)
|
|
641 |
{
|
|
642 |
if (iIndex >= iPageListCount)
|
|
643 |
{
|
|
644 |
__KTRACE_DMA(Kern::Printf(">PHYSADDR:GetPhysD() [%d], PageListCount:%d", iIndex, iPageListCount));
|
|
645 |
aAddr = 0;
|
|
646 |
aLen = 0;
|
|
647 |
return KErrGeneral;
|
|
648 |
}
|
|
649 |
|
|
650 |
aAddr = iPageList[iIndex].iAddress;
|
|
651 |
aLen = iPageList[iIndex].iLength;
|
|
652 |
iLenConsumed+= aLen;
|
|
653 |
iFragLenRemaining-= aLen;
|
|
654 |
|
|
655 |
__KTRACE_DMA(Kern::Printf(">PHYSADDR:GetPhysD() [%d] addr:0x%08X, l:%d; Used:%d, Left:%d", iIndex, aAddr, aLen, iLenConsumed, iFragLenRemaining));
|
|
656 |
__ASSERT_DEBUG(aLen >= 0, PHYSADDR_FAULT());
|
|
657 |
|
|
658 |
iIndex++; //Move index to next page
|
|
659 |
|
|
660 |
return KErrNone;
|
|
661 |
}
|
|
662 |
|
|
663 |
|
|
664 |
#ifdef __DEMAND_PAGING__
|
|
665 |
/**
|
|
666 |
* Returns Address and Length of next contiguous Physical memory.
|
|
667 |
* Static function specifically for Demand Paging support
|
|
668 |
*
|
|
669 |
* @param aReq TLocDrvRequest from which physical
|
|
670 |
* @param aAddr Populated with the Physical Address of the Request aReq.
|
|
671 |
* @param aLen Populated with the length in bytes of the memory.
|
|
672 |
*
|
|
673 |
* @return KErrNone
|
|
674 |
*/
|
|
675 |
TInt DDmaHelper::GetPhysicalAddress(TLocDrvRequest& aReq, TPhysAddr& aAddr, TInt& aLen)
|
|
676 |
{
|
|
677 |
__ASSERT_DEBUG( (aReq.Flags() & TLocDrvRequest::ETClientBuffer) == 0, PHYSADDR_FAULT());
|
|
678 |
TLinAddr linAddr = (TLinAddr) aReq.RemoteDes();
|
|
679 |
TInt& offset = aReq.RemoteDesOffset();
|
|
680 |
TLinAddr currLinAddr = linAddr + offset;
|
|
681 |
TInt reqLen = I64LOW(aReq.Length());
|
|
682 |
__ASSERT_DEBUG(I64HIGH(aReq.Length()) == 0, PHYSADDR_FAULT());
|
|
683 |
|
|
684 |
aAddr = Epoc::LinearToPhysical(currLinAddr);
|
|
685 |
|
|
686 |
// Set the initial length to be the length remaining in this page or the request length (whichever is shorter).
|
|
687 |
// If there are subsequent pages, we then need to determine whether they are contiguous
|
|
688 |
aLen = Min( (TInt) (PageAlign(currLinAddr+iPageSize) - currLinAddr), reqLen - offset);
|
|
689 |
|
|
690 |
__ASSERT_DEBUG(aLen > 0, PHYSADDR_FAULT());
|
|
691 |
|
|
692 |
TPhysAddr currPhysPageAddr = PageAlign((TLinAddr) aAddr);
|
|
693 |
|
|
694 |
offset+= aLen;
|
|
695 |
|
|
696 |
|
|
697 |
while (offset < reqLen)
|
|
698 |
{
|
|
699 |
TPhysAddr nextPhysPageAddr = Epoc::LinearToPhysical(linAddr + offset);
|
|
700 |
__ASSERT_DEBUG(PageOffset((TLinAddr) nextPhysPageAddr) == 0, PHYSADDR_FAULT());
|
|
701 |
|
|
702 |
if (nextPhysPageAddr != currPhysPageAddr + iPageSize)
|
|
703 |
break;
|
|
704 |
|
|
705 |
currPhysPageAddr = nextPhysPageAddr;
|
|
706 |
|
|
707 |
TInt len = Min(iPageSize, reqLen - offset);
|
|
708 |
offset+= len;
|
|
709 |
aLen+= len;
|
|
710 |
}
|
|
711 |
|
|
712 |
|
|
713 |
__KTRACE_DMA(Kern::Printf(">PHYSADDR:DP:GetPhysS(), linAddr %08X, physAddr %08X, len %x reqLen %x", linAddr + offset, aAddr, aLen, reqLen));
|
|
714 |
|
|
715 |
return KErrNone;
|
|
716 |
}
|
|
717 |
#endif // (__DEMAND_PAGING__)
|
|
718 |
|
|
719 |
|
|
720 |
/**
|
|
721 |
* Modifies the current requests remote descriptor length
|
|
722 |
*
|
|
723 |
* @param aLength Length in bytes to which the descriptor is to be set.
|
|
724 |
*
|
|
725 |
* @return KErrNone, if successful;
|
|
726 |
* KErrBadDescriptor, if descriptor is corrupted;
|
|
727 |
* otherwise one of the other system wide error codes.
|
|
728 |
*/
|
|
729 |
|
|
730 |
TInt DDmaHelper::UpdateRemoteDescriptorLength(TInt aLength)
|
|
731 |
{
|
|
732 |
__KTRACE_DMA(Kern::Printf(">PHYSADDR:UpDesLen(%d)",aLength));
|
|
733 |
|
|
734 |
// Restore request Id (overwritten by KErrNone return code) to stop ASSERT in WriteRemote
|
|
735 |
iReq->Id() = DLocalDrive::ERead;
|
|
736 |
|
|
737 |
// restore caller's descriptor offset
|
|
738 |
iReq->RemoteDesOffset() = iReqRemoteDesOffset;
|
|
739 |
|
|
740 |
// Write a zero length descriptor at the end such that the descriptors length is correctly updated.
|
|
741 |
TPtrC8 zeroDes(NULL, 0);
|
|
742 |
TInt r = iReq->WriteRemote(&zeroDes, aLength);
|
|
743 |
|
|
744 |
// restore return code
|
|
745 |
iReq->iValue = KErrNone;
|
|
746 |
|
|
747 |
return r;
|
|
748 |
}
|
|
749 |
|