|
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // |
|
15 |
|
16 #include "memmodel.h" |
|
17 #include "mm.h" |
|
18 #include "mmu.h" |
|
19 #include "mobject.h" |
|
20 #include "mmapping.h" |
|
21 #include "mmanager.h" |
|
22 #include "mpdalloc.h" |
|
23 #include "mptalloc.h" |
|
24 #include "mpager.h" |
|
25 #include "maddressspace.h" |
|
26 |
|
27 |
|
28 |
|
29 |
|
30 // |
|
31 // DMutexPool |
|
32 // |
|
33 |
|
34 DMutexPool::~DMutexPool() |
|
35 { |
|
36 TUint i; |
|
37 for(i=0; i<iCount; ++i) |
|
38 { |
|
39 DMutex* mutex = iMembers[i].iMutex; |
|
40 if(mutex) |
|
41 mutex->Close(0); |
|
42 } |
|
43 Kern::Free(iMembers); |
|
44 } |
|
45 |
|
46 |
|
47 TInt DMutexPool::Create(TUint aCount, const TDesC* aName, TUint aOrder) |
|
48 { |
|
49 if(aCount>EMaxPoolSize) |
|
50 return KErrTooBig; |
|
51 |
|
52 iMembers = (SMember*)Kern::AllocZ(aCount*sizeof(SMember)); |
|
53 if(!iMembers) |
|
54 return KErrNoMemory; |
|
55 |
|
56 iCount = aCount; |
|
57 |
|
58 TInt r = KErrNone; |
|
59 TUint i; |
|
60 for(i=0; i<aCount; ++i) |
|
61 { |
|
62 TKName name; |
|
63 if(aName) |
|
64 { |
|
65 name = *aName; |
|
66 name.AppendNum(i); |
|
67 } |
|
68 K::MutexCreate(iMembers[i].iMutex, name, NULL, EFalse, aOrder); |
|
69 if(r!=KErrNone) |
|
70 break; |
|
71 } |
|
72 |
|
73 return r; |
|
74 } |
|
75 |
|
76 |
|
77 /** |
|
78 @class DMutexPool |
|
79 @details |
|
80 |
|
81 The cookie used for dynamically assigned mutexes is broken into three bit fields: |
|
82 - Bit 0, always set. (To distinguish the cookie from a proper DMutex*). |
|
83 - Bits 1 through #KMutexPoolIndexBits, these contain the index of the assigned |
|
84 mutex within DMutexPool::iMembers. |
|
85 - Bits (#KMutexPoolIndexBits+1) through 31, the count of the number of threads waiting |
|
86 for this particular mutex assignment. When this reaches zero, the mutex can |
|
87 be unassigned. |
|
88 */ |
|
89 |
|
90 /** |
|
91 Number of bits used to contain the index value of a dynamically assigned pool mutex. |
|
92 */ |
|
93 const TUint KMutexPoolIndexBits = 7; |
|
94 |
|
95 const TUint KMutexPoolIndexMask = ((1<<KMutexPoolIndexBits)-1)<<1; |
|
96 const TUint KMutexPoolWaitCountIncrement = 1<<(KMutexPoolIndexBits+1); |
|
97 |
|
98 __ASSERT_COMPILE(DMutexPool::EMaxPoolSize<=TUint(KMutexPoolIndexMask/2+1)); // required for algorithm correctness |
|
99 |
|
100 __ASSERT_COMPILE(DMutexPool::EMaxPoolSize<=64); // required to avoid excessive system lock hold time |
|
101 |
|
102 |
|
103 void DMutexPool::Wait(DMutex*& aMutexRef) |
|
104 { |
|
105 NKern::LockSystem(); |
|
106 |
|
107 TUintPtr poolMutex = (TUintPtr)aMutexRef; |
|
108 if(!poolMutex) |
|
109 { |
|
110 // try and find a free mutex, else use the next one... |
|
111 TUint next = iNext; |
|
112 do |
|
113 { |
|
114 if(iMembers[next].iUseCount==0) |
|
115 break; |
|
116 if(++next>=iCount) |
|
117 next = 0; |
|
118 } |
|
119 while(next!=iNext); |
|
120 // use found mutex... |
|
121 ++iMembers[next].iUseCount; |
|
122 poolMutex = (next*2)+1; // mutex index*2 | 1 |
|
123 // update next... |
|
124 if(++next>=iCount) |
|
125 next = 0; |
|
126 iNext = next; |
|
127 } |
|
128 |
|
129 DMutex* mutex = (DMutex*)poolMutex; |
|
130 if(poolMutex&1) |
|
131 { |
|
132 // mutex is a pool mutex, get pointer, and update wait count... |
|
133 SMember* member = &iMembers[(poolMutex&KMutexPoolIndexMask)>>1]; |
|
134 mutex = member->iMutex; |
|
135 poolMutex += KMutexPoolWaitCountIncrement; |
|
136 __NK_ASSERT_ALWAYS(poolMutex>=KMutexPoolWaitCountIncrement); |
|
137 aMutexRef = (DMutex*)poolMutex; |
|
138 } |
|
139 |
|
140 mutex->Wait(); |
|
141 |
|
142 NKern::UnlockSystem(); |
|
143 } |
|
144 |
|
145 |
|
146 void DMutexPool::Signal(DMutex*& aMutexRef) |
|
147 { |
|
148 NKern::LockSystem(); |
|
149 |
|
150 TUintPtr poolMutex = (TUintPtr)aMutexRef; |
|
151 __NK_ASSERT_ALWAYS(poolMutex); |
|
152 |
|
153 DMutex* mutex = (DMutex*)poolMutex; |
|
154 |
|
155 if(poolMutex&1) |
|
156 { |
|
157 // mutex is a pool mutex, get pointer, and update wait count... |
|
158 SMember* member = &iMembers[(poolMutex&KMutexPoolIndexMask)>>1]; |
|
159 mutex = member->iMutex; |
|
160 __NK_ASSERT_ALWAYS(poolMutex>=KMutexPoolWaitCountIncrement); |
|
161 poolMutex -= KMutexPoolWaitCountIncrement; |
|
162 if(poolMutex<KMutexPoolWaitCountIncrement) |
|
163 { |
|
164 --member->iUseCount; |
|
165 poolMutex = 0; |
|
166 } |
|
167 aMutexRef = (DMutex*)poolMutex; |
|
168 } |
|
169 |
|
170 mutex->Signal(); |
|
171 } |
|
172 |
|
173 |
|
174 TBool DMutexPool::IsHeld(DMutex*& aMutexRef) |
|
175 { |
|
176 TBool held = false; |
|
177 NKern::LockSystem(); |
|
178 TUintPtr poolMutex = (TUintPtr)aMutexRef; |
|
179 if(poolMutex) |
|
180 { |
|
181 DMutex* mutex = (DMutex*)poolMutex; |
|
182 if(poolMutex&1) |
|
183 { |
|
184 SMember* member = &iMembers[(poolMutex&KMutexPoolIndexMask)>>1]; |
|
185 mutex = member->iMutex; |
|
186 } |
|
187 held = mutex->iCleanup.iThread==&Kern::CurrentThread(); |
|
188 } |
|
189 NKern::UnlockSystem(); |
|
190 return held; |
|
191 } |
|
192 |
|
193 |
|
194 |
|
195 // |
|
196 // DReferenceCountedObject |
|
197 // |
|
198 |
|
199 DReferenceCountedObject::~DReferenceCountedObject() |
|
200 { |
|
201 __NK_ASSERT_DEBUG(iReferenceCount==0); |
|
202 } |
|
203 |
|
204 |
|
205 void DReferenceCountedObject::Open() |
|
206 { |
|
207 __ASSERT_CRITICAL |
|
208 TBool ok = __e32_atomic_tas_ord32(&iReferenceCount, 1, 1, 0); |
|
209 __NK_ASSERT_ALWAYS(ok); |
|
210 } |
|
211 |
|
212 |
|
213 TBool DReferenceCountedObject::TryOpen() |
|
214 { |
|
215 __ASSERT_CRITICAL |
|
216 TBool ok = __e32_atomic_tas_ord32(&iReferenceCount, 1, 1, 0); |
|
217 return ok; |
|
218 } |
|
219 |
|
220 |
|
221 TBool DReferenceCountedObject::CheckCloseIsSafe() |
|
222 { |
|
223 __ASSERT_CRITICAL |
|
224 #ifdef _DEBUG |
|
225 NFastMutex* fm = NKern::HeldFastMutex(); |
|
226 if(fm) |
|
227 { |
|
228 Kern::Printf("DReferenceCountedObject[0x%08x]::Close() fast mutex violation %M",this,fm); |
|
229 return false; |
|
230 } |
|
231 SDblQue& ml = TheCurrentThread->iMutexList; |
|
232 if(!ml.IsEmpty()) |
|
233 { |
|
234 DMutex* m = _LOFF(ml.First(), DMutex, iOrderLink); |
|
235 if(m->iOrder<KMutexOrdKernelHeap) |
|
236 { |
|
237 Kern::Printf("DReferenceCountedObject[0x%08x]::Close() mutex order violation holding mutex %O",this,m); |
|
238 return false; |
|
239 } |
|
240 } |
|
241 #endif |
|
242 return true; |
|
243 } |
|
244 |
|
245 |
|
246 TBool DReferenceCountedObject::CheckAsyncCloseIsSafe() |
|
247 { |
|
248 __ASSERT_CRITICAL |
|
249 #ifdef _DEBUG |
|
250 NFastMutex* fm = NKern::HeldFastMutex(); |
|
251 if(fm) |
|
252 { |
|
253 Kern::Printf("DReferenceCountedObject[0x%08x]::AsyncClose() fast mutex violation %M",this,fm); |
|
254 return false; |
|
255 } |
|
256 #endif |
|
257 return true; |
|
258 } |
|
259 |
|
260 |
|
261 void DReferenceCountedObject::Close() |
|
262 { |
|
263 __ASSERT_CRITICAL |
|
264 __NK_ASSERT_DEBUG(CheckCloseIsSafe()); |
|
265 __NK_ASSERT_DEBUG(iReferenceCount>0); |
|
266 if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) == 1) |
|
267 delete this; |
|
268 } |
|
269 |
|
270 |
|
271 void DReferenceCountedObject::AsyncClose() |
|
272 { |
|
273 __ASSERT_CRITICAL |
|
274 __NK_ASSERT_DEBUG(CheckAsyncCloseIsSafe()); |
|
275 __NK_ASSERT_DEBUG(iReferenceCount>0); |
|
276 if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) == 1) |
|
277 AsyncDelete(); |
|
278 } |
|
279 |
|
280 |
|
281 // |
|
282 // Memory object functions |
|
283 // |
|
284 |
|
285 TInt MM::MemoryNew(DMemoryObject*& aMemory, TMemoryObjectType aType, TUint aPageCount, TMemoryCreateFlags aCreateFlags, TMemoryAttributes aAttributes) |
|
286 { |
|
287 TRACE(("MM::MemoryNew(?,0x%08x,0x%08x,0x%08x,0x%08x)",aType,aPageCount,aCreateFlags,*(TUint32*)&aAttributes)); |
|
288 |
|
289 DMemoryManager* manager; |
|
290 if(aCreateFlags&EMemoryCreateCustomManager) |
|
291 manager = (DMemoryManager*)aType; |
|
292 else |
|
293 { |
|
294 switch(aType) |
|
295 { |
|
296 case EMemoryObjectUnpaged: |
|
297 manager = TheUnpagedMemoryManager; |
|
298 break; |
|
299 case EMemoryObjectMovable: |
|
300 manager = TheMovableMemoryManager; |
|
301 break; |
|
302 case EMemoryObjectPaged: |
|
303 manager = TheDataPagedMemoryManager; |
|
304 break; |
|
305 case EMemoryObjectDiscardable: |
|
306 manager = TheDiscardableMemoryManager; |
|
307 break; |
|
308 case EMemoryObjectHardware: |
|
309 manager = TheHardwareMemoryManager; |
|
310 break; |
|
311 default: |
|
312 manager = 0; |
|
313 __NK_ASSERT_DEBUG(0); |
|
314 break; |
|
315 } |
|
316 } |
|
317 TMemoryCreateFlags flags = (TMemoryCreateFlags)(aCreateFlags&~(EMemoryCreateDemandPaged)); |
|
318 TInt r = manager->New(aMemory,aPageCount,aAttributes,flags); |
|
319 TRACE(("MM::MemoryNew returns %d, aMemory=0x%08x",r,aMemory)); |
|
320 #ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
321 if (r == KErrNone) |
|
322 aMemory->BTraceCreate(); |
|
323 #endif |
|
324 return r; |
|
325 } |
|
326 |
|
327 |
|
328 TInt MM::MemoryClaimInitialPages(DMemoryObject* aMemory, TLinAddr aBase, TUint aSize, TMappingPermissions aPermissions, TBool aAllowGaps, TBool aAllowNonRamPages) |
|
329 { |
|
330 TRACE(("MM::MemoryClaimInitialPages(0x%08x,0x%08x,0x%08x,0x%08x,%d,%d)",aMemory,aBase,aPermissions,aSize,aAllowGaps!=0,aAllowNonRamPages!=0)); |
|
331 TInt r = aMemory->ClaimInitialPages(aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages); |
|
332 TRACE(("MM::MemoryClaimInitialPages returns %d",r)); |
|
333 __NK_ASSERT_DEBUG(r==KErrNone); |
|
334 return r; |
|
335 } |
|
336 |
|
337 |
|
338 void MM::MemorySetLock(DMemoryObject* aMemory, DMutex* aLock) |
|
339 { |
|
340 aMemory->SetLock(aLock); |
|
341 } |
|
342 |
|
343 |
|
344 void MM::MemoryLock(DMemoryObject* aMemory) |
|
345 { |
|
346 MemoryObjectLock::Lock(aMemory); |
|
347 } |
|
348 |
|
349 |
|
350 void MM::MemoryUnlock(DMemoryObject* aMemory) |
|
351 { |
|
352 MemoryObjectLock::Unlock(aMemory); |
|
353 } |
|
354 |
|
355 |
|
356 void MM::MemoryDestroy(DMemoryObject*& aMemory) |
|
357 { |
|
358 DMemoryObject* memory = (DMemoryObject*)__e32_atomic_swp_ord_ptr(&aMemory, 0); |
|
359 if (!memory) |
|
360 return; |
|
361 TRACE(("MM::MemoryDestroy(0x%08x)",memory)); |
|
362 #ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
363 BTraceContext4(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectDestroy,memory); |
|
364 #endif |
|
365 memory->iManager->Destruct(memory); |
|
366 } |
|
367 |
|
368 |
|
369 TInt MM::MemoryAlloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
370 { |
|
371 TRACE(("MM::MemoryAlloc(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount)); |
|
372 MemoryObjectLock::Lock(aMemory); |
|
373 TInt r; |
|
374 if(!aMemory->CheckRegion(aIndex,aCount)) |
|
375 r = KErrArgument; |
|
376 else |
|
377 r = aMemory->iManager->Alloc(aMemory,aIndex,aCount); |
|
378 MemoryObjectLock::Unlock(aMemory); |
|
379 TRACE(("MM::MemoryAlloc returns %d",r)); |
|
380 return r; |
|
381 } |
|
382 |
|
383 |
|
384 TInt MM::MemoryAllocContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TUint aAlign, TPhysAddr& aPhysAddr) |
|
385 { |
|
386 TRACE(("MM::MemoryAllocContiguous(0x%08x,0x%08x,0x%08x,%d,?)",aMemory,aIndex,aCount,aAlign)); |
|
387 MemoryObjectLock::Lock(aMemory); |
|
388 TInt r; |
|
389 if(!aMemory->CheckRegion(aIndex,aCount)) |
|
390 r = KErrArgument; |
|
391 else |
|
392 r = aMemory->iManager->AllocContiguous(aMemory,aIndex,aCount,MM::RoundToPageShift(aAlign),aPhysAddr); |
|
393 MemoryObjectLock::Unlock(aMemory); |
|
394 TRACE(("MM::MemoryAlloc returns %d (aPhysAddr=0x%08x)",r,aPhysAddr)); |
|
395 return r; |
|
396 } |
|
397 |
|
398 |
|
399 void MM::MemoryFree(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
400 { |
|
401 TRACE(("MM::MemoryFree(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount)); |
|
402 MemoryObjectLock::Lock(aMemory); |
|
403 aMemory->ClipRegion(aIndex,aCount); |
|
404 aMemory->iManager->Free(aMemory,aIndex,aCount); |
|
405 MemoryObjectLock::Unlock(aMemory); |
|
406 } |
|
407 |
|
408 |
|
409 TInt MM::MemoryAddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages) |
|
410 { |
|
411 TRACE(("MM::MemoryAddPages(0x%08x,0x%08x,0x%08x,?)",aMemory,aIndex,aCount)); |
|
412 MemoryObjectLock::Lock(aMemory); |
|
413 TInt r; |
|
414 if(!aMemory->CheckRegion(aIndex,aCount)) |
|
415 r = KErrArgument; |
|
416 else |
|
417 r = aMemory->iManager->AddPages(aMemory,aIndex,aCount,aPages); |
|
418 MemoryObjectLock::Unlock(aMemory); |
|
419 TRACE(("MM::MemoryAddPages returns %d",r)); |
|
420 return r; |
|
421 } |
|
422 |
|
423 |
|
424 TInt MM::MemoryAddContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr aPhysAddr) |
|
425 { |
|
426 TRACE(("MM::MemoryAddContiguous(0x%08x,0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount,aPhysAddr)); |
|
427 MemoryObjectLock::Lock(aMemory); |
|
428 TInt r; |
|
429 if(!aMemory->CheckRegion(aIndex,aCount)) |
|
430 r = KErrArgument; |
|
431 else |
|
432 r = aMemory->iManager->AddContiguous(aMemory,aIndex,aCount,aPhysAddr); |
|
433 MemoryObjectLock::Unlock(aMemory); |
|
434 TRACE(("MM::MemoryAddContiguous returns %d",r)); |
|
435 return r; |
|
436 } |
|
437 |
|
438 |
|
439 TUint MM::MemoryRemovePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages) |
|
440 { |
|
441 TRACE(("MM::MemoryRemovePages(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount)); |
|
442 MemoryObjectLock::Lock(aMemory); |
|
443 aMemory->ClipRegion(aIndex,aCount); |
|
444 TInt r = aMemory->iManager->RemovePages(aMemory,aIndex,aCount,aPages); |
|
445 if(r<0) |
|
446 r = 0; |
|
447 MemoryObjectLock::Unlock(aMemory); |
|
448 TRACE(("MM::MemoryRemovePages returns %d",r)); |
|
449 return r; |
|
450 } |
|
451 |
|
452 |
|
453 TInt MM::MemoryAllowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
454 { |
|
455 TRACE(("MM::MemoryAllowDiscard(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount)); |
|
456 MemoryObjectLock::Lock(aMemory); |
|
457 TInt r; |
|
458 if(!aMemory->CheckRegion(aIndex,aCount)) |
|
459 r = KErrArgument; |
|
460 else |
|
461 r = aMemory->iManager->AllowDiscard(aMemory,aIndex,aCount); |
|
462 MemoryObjectLock::Unlock(aMemory); |
|
463 TRACE(("MM::MemoryAllowDiscard returns %d",r)); |
|
464 return r; |
|
465 } |
|
466 |
|
467 |
|
468 TInt MM::MemoryDisallowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
469 { |
|
470 TRACE(("MM::MemoryDisallowDiscard(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount)); |
|
471 MemoryObjectLock::Lock(aMemory); |
|
472 TInt r; |
|
473 if(!aMemory->CheckRegion(aIndex,aCount)) |
|
474 r = KErrArgument; |
|
475 else |
|
476 r = aMemory->iManager->DisallowDiscard(aMemory,aIndex,aCount); |
|
477 MemoryObjectLock::Unlock(aMemory); |
|
478 TRACE(("MM::MemoryDisallowDiscard returns %d",r)); |
|
479 return r; |
|
480 } |
|
481 |
|
482 |
|
483 TInt MM::MemoryPhysAddr(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList) |
|
484 { |
|
485 TRACE(("MM::MemoryPhysAddr(0x%08x,0x%08x,0x%08x,?,?)",aMemory,aIndex,aCount)); |
|
486 TInt r = aMemory->PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList); |
|
487 TRACE(("MM::MemoryPhysAddr returns %d aPhysicalAddress=0x%08x",r,aPhysicalAddress)); |
|
488 return r; |
|
489 } |
|
490 |
|
491 |
|
492 void MM::MemoryBTracePrime(DMemoryObject* aMemory) |
|
493 { |
|
494 aMemory->BTraceCreate(); |
|
495 aMemory->iMappings.Lock(); |
|
496 TMappingListIter iter; |
|
497 DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(aMemory->iMappings); |
|
498 while(mapping) |
|
499 { |
|
500 aMemory->iMappings.Unlock(); |
|
501 mapping->BTraceCreate(); |
|
502 aMemory->iMappings.Lock(); |
|
503 mapping = (DMemoryMapping*)iter.Next(); |
|
504 } |
|
505 iter.Finish(); |
|
506 aMemory->iMappings.Unlock(); |
|
507 } |
|
508 |
|
509 |
|
510 void MM::MemoryClose(DMemoryObject* aMemory) |
|
511 { |
|
512 aMemory->Close(); |
|
513 } |
|
514 |
|
515 |
|
516 TBool MM::MemoryIsNotMapped(DMemoryObject* aMemory) |
|
517 { |
|
518 TBool r = aMemory->iMappings.IsEmpty(); |
|
519 TRACE2(("MM::MemoryIsNotMapped(0x%08x) returns %d",aMemory,r)); |
|
520 return r; |
|
521 } |
|
522 |
|
523 // |
|
524 // Physical pinning |
|
525 // |
|
526 |
|
527 TInt MM::PinPhysicalMemory(DMemoryObject* aMemory, DPhysicalPinMapping* aPinObject, TUint aIndex, TUint aCount, TBool aReadOnly, TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour) |
|
528 { |
|
529 |
|
530 if (!aMemory->CheckRegion(aIndex,aCount)) |
|
531 return KErrArgument; |
|
532 |
|
533 TMappingPermissions permissions = aReadOnly ? ESupervisorReadOnly : ESupervisorReadWrite; |
|
534 TInt r = aPinObject->Pin(aMemory, aIndex, aCount, permissions); |
|
535 if (r == KErrNone) |
|
536 { |
|
537 r = aPinObject->PhysAddr(aIndex, aCount, aAddress, aPages); |
|
538 if (r>=KErrNone) |
|
539 { |
|
540 r = KErrNone; //Do not report discontigious memory in return value. |
|
541 const TMappingAttributes2& mapAttr2 = |
|
542 MM::LegacyMappingAttributes(aMemory->Attributes(), permissions); |
|
543 *(TMappingAttributes2*)&aMapAttr = mapAttr2; |
|
544 } |
|
545 else |
|
546 { |
|
547 aPinObject->Unpin(); |
|
548 } |
|
549 } |
|
550 |
|
551 aColour = 0; |
|
552 return r; |
|
553 } |
|
554 |
|
555 |
|
556 TInt MM::MemoryWipe(DMemoryObject* aMemory) |
|
557 { |
|
558 __NK_ASSERT_ALWAYS(aMemory->iMappings.IsEmpty()); // can't be mapped otherwise confidentiality can't be guaranteed |
|
559 TRACE2(("MM::MemoryWipe(0x%08x)",aMemory)); |
|
560 MemoryObjectLock::Lock(aMemory); |
|
561 TInt r = aMemory->iManager->Wipe(aMemory); |
|
562 MemoryObjectLock::Unlock(aMemory); |
|
563 return r; |
|
564 } |
|
565 |
|
566 |
|
567 TInt MM::MemorySetReadOnly(DMemoryObject* aMemory) |
|
568 { |
|
569 TRACE2(("MM::MemorySetReadOnly(0x%08x)",aMemory)); |
|
570 MemoryObjectLock::Lock(aMemory); |
|
571 TInt r = aMemory->SetReadOnly(); |
|
572 MemoryObjectLock::Unlock(aMemory); |
|
573 return r; |
|
574 } |
|
575 |
|
576 // |
|
577 // Mapping functions |
|
578 // |
|
579 |
|
580 TInt MM::MappingNew(DMemoryMapping*& aMapping, DMemoryObject* aMemory, TMappingPermissions aPermissions, TInt aOsAsid, TMappingCreateFlags aFlags, TLinAddr aAddr, TUint aIndex, TUint aCount) |
|
581 { |
|
582 TRACE(("MM::MappingNew(?,0x%08x,0x%08x,%d,0x%08x,0x%08x,0x%08x,0x%08x)",aMemory, aPermissions, aOsAsid, aFlags, aAddr, aIndex, aCount)); |
|
583 |
|
584 /** |
|
585 @todo Make mappings created with this function fail (panic?) if the are reused to map |
|
586 another object. |
|
587 */ |
|
588 if(aCount==~0u) |
|
589 aCount = aMemory->iSizeInPages-aIndex; |
|
590 |
|
591 // if memory object reserves all resources, make mappings also do so... |
|
592 if(aMemory->iFlags&DMemoryObject::EReserveResources) |
|
593 FlagSet(aFlags,EMappingCreateReserveAllResources); |
|
594 |
|
595 // check if mapping is for global user data... |
|
596 if(aOsAsid==(TInt)KKernelOsAsid && aPermissions&EUser) |
|
597 FlagSet(aFlags,EMappingCreateUserGlobalVirtual); |
|
598 else |
|
599 FlagClear(aFlags,EMappingCreateUserGlobalVirtual); |
|
600 |
|
601 // set paged attribute for mapping... |
|
602 if(aMemory->IsDemandPaged()) |
|
603 FlagSet(aFlags,EMappingCreateDemandPaged); |
|
604 else |
|
605 FlagClear(aFlags,EMappingCreateDemandPaged); |
|
606 |
|
607 DMemoryMapping* mapping = 0; |
|
608 TInt r = KErrNone; |
|
609 if(!aMemory->CheckRegion(aIndex,aCount)) |
|
610 r = KErrArgument; |
|
611 else |
|
612 { |
|
613 mapping = aMemory->CreateMapping(aIndex, aCount); |
|
614 if(!mapping) |
|
615 r = KErrNoMemory; |
|
616 } |
|
617 |
|
618 if(!mapping) |
|
619 { |
|
620 // free any virtual address the mapping should have adopted... |
|
621 if(aFlags&EMappingCreateAdoptVirtual) |
|
622 MM::VirtualFree(aOsAsid, aAddr, aCount<<KPageShift); |
|
623 } |
|
624 else |
|
625 { |
|
626 r = mapping->Construct(aMemory->Attributes(), aFlags, aOsAsid, aAddr, aCount<<KPageShift, aIndex<<KPageShift); |
|
627 if(r==KErrNone) |
|
628 r = mapping->Map(aMemory, aIndex, aCount, aPermissions); |
|
629 if(r!=KErrNone) |
|
630 { |
|
631 mapping->Close(); |
|
632 mapping = 0; |
|
633 } |
|
634 } |
|
635 |
|
636 aMapping = mapping; |
|
637 TRACE(("MM::MappingNew returns %d (aMapping=0x%0x)",r,aMapping)); |
|
638 #ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
639 if (r == KErrNone) |
|
640 aMapping->BTraceCreate(); |
|
641 #endif |
|
642 return r; |
|
643 } |
|
644 |
|
645 |
|
646 TInt MM::MappingNew(DMemoryMapping*& aMapping, TUint aCount, TInt aOsAsid, TMappingCreateFlags aFlags, TLinAddr aAddr, TLinAddr aColourOffset) |
|
647 { |
|
648 TRACE2(("MM::MappingNew(?,0x%08x,%d,0x%08x,0x%08x,0x%08x)",aCount, aOsAsid, aFlags, aAddr, aColourOffset)); |
|
649 |
|
650 FlagClear(aFlags,EMappingCreateDemandPaged); // mapping can't use demand paged page tables |
|
651 |
|
652 TInt r = KErrNone; |
|
653 DMemoryMapping* mapping = new DFineMapping(); |
|
654 if(!mapping) |
|
655 r = KErrNoMemory; |
|
656 |
|
657 if(!mapping) |
|
658 { |
|
659 // free any virtual address the mapping should have adopted... |
|
660 if(aFlags&EMappingCreateAdoptVirtual) |
|
661 MM::VirtualFree(aOsAsid, aAddr, aCount<<KPageShift); |
|
662 } |
|
663 else |
|
664 { |
|
665 r = mapping->Construct(EMemoryAttributeStandard, aFlags, aOsAsid, aAddr, aCount<<KPageShift, aColourOffset); |
|
666 if(r!=KErrNone) |
|
667 { |
|
668 mapping->Close(); |
|
669 mapping = 0; |
|
670 } |
|
671 } |
|
672 |
|
673 aMapping = mapping; |
|
674 TRACE2(("MM::MappingNew returns %d (aMapping=0x%0x)",r,aMapping)); |
|
675 |
|
676 return r; |
|
677 } |
|
678 |
|
679 |
|
680 TInt MM::MappingMap(DMemoryMapping* aMapping, TMappingPermissions aPermissions, DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
681 { |
|
682 TRACE2(("MM::MappingMap(0x%08x,0x%08x,0x%08x,0x%x,0x%x)",aMapping,aPermissions,aMemory,aIndex,aCount)); |
|
683 if(aCount==~0u) |
|
684 aCount = aMemory->iSizeInPages-aIndex; |
|
685 TInt r = aMapping->Map(aMemory, aIndex, aCount, aPermissions); |
|
686 TRACE2(("MM::MappingMap returns %d",r)); |
|
687 return r; |
|
688 } |
|
689 |
|
690 |
|
691 void MM::MappingUnmap(DMemoryMapping* aMapping) |
|
692 { |
|
693 if(aMapping->IsAttached()) |
|
694 { |
|
695 TRACE2(("MM::MappingUnmap(0x%08x)",aMapping)); |
|
696 aMapping->Unmap(); |
|
697 } |
|
698 } |
|
699 |
|
700 |
|
701 void MM::MappingDestroy(DMemoryMapping*& aMapping) |
|
702 { |
|
703 DMemoryMapping* mapping = (DMemoryMapping*)__e32_atomic_swp_ord_ptr(&aMapping, 0); |
|
704 if (!mapping) |
|
705 return; |
|
706 TRACE(("MM::MappingDestroy(0x%08x)",mapping)); |
|
707 #ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
708 BTraceContext4(BTrace::EFlexibleMemModel,BTrace::EMemoryMappingDestroy,mapping); |
|
709 #endif |
|
710 if(mapping->IsAttached()) |
|
711 mapping->Unmap(); |
|
712 mapping->Close(); |
|
713 } |
|
714 |
|
715 |
|
716 void MM::MappingDestroy(TLinAddr aAddr, TInt aOsAsid) |
|
717 { |
|
718 DMemoryMapping* mapping = AddressSpace[aOsAsid]->GetMapping(aAddr); |
|
719 MM::MappingDestroy(mapping); |
|
720 } |
|
721 |
|
722 |
|
723 void MM::MappingAndMemoryDestroy(DMemoryMapping*& aMapping) |
|
724 { |
|
725 DMemoryMapping* mapping = (DMemoryMapping*)__e32_atomic_swp_ord_ptr(&aMapping, 0); |
|
726 TRACE(("MM::MappingAndMemoryDestroy(0x%08x)",mapping)); |
|
727 if (!mapping) |
|
728 return; |
|
729 DMemoryObject* memory = mapping->Memory(true); // safe because we assume owner hasn't unmapped mapping |
|
730 MM::MappingDestroy(mapping); |
|
731 MM::MemoryDestroy(memory); |
|
732 } |
|
733 |
|
734 |
|
735 void MM::MappingAndMemoryDestroy(TLinAddr aAddr, TInt aOsAsid) |
|
736 { |
|
737 DMemoryMapping* mapping = AddressSpace[aOsAsid]->GetMapping(aAddr); |
|
738 MM::MappingAndMemoryDestroy(mapping); |
|
739 } |
|
740 |
|
741 |
|
742 TLinAddr MM::MappingBase(DMemoryMapping* aMapping) |
|
743 { |
|
744 TLinAddr base = aMapping->Base(); |
|
745 TRACE2(("MM::MappingBase(0x%08x) returns 0x%08x",aMapping,base)); |
|
746 return base; |
|
747 } |
|
748 |
|
749 |
|
750 TInt MM::MappingOsAsid(DMemoryMapping* aMapping) |
|
751 { |
|
752 return aMapping->OsAsid(); |
|
753 } |
|
754 |
|
755 |
|
756 DMemoryObject* MM::MappingGetAndOpenMemory(DMemoryMapping* aMapping) |
|
757 { |
|
758 MmuLock::Lock(); |
|
759 DMemoryObject* memory = aMapping->Memory(); |
|
760 if (memory) |
|
761 memory->Open(); |
|
762 MmuLock::Unlock(); |
|
763 TRACE2(("MM::MappingGetAndOpenMemory(0x%08x) returns 0x%08x",aMapping,memory)); |
|
764 return memory; |
|
765 } |
|
766 |
|
767 |
|
768 void MM::MappingClose(DMemoryMapping* aMapping) |
|
769 { |
|
770 TRACE2(("MM::MappingClose(0x%08x)",aMapping)); |
|
771 aMapping->Close(); |
|
772 } |
|
773 |
|
774 |
|
775 DMemoryMapping* MM::FindMappingInThread(DMemModelThread* aThread, TLinAddr aAddr, TUint aSize, |
|
776 TUint& aOffsetInMapping, TUint& aInstanceCount) |
|
777 { |
|
778 if(aAddr>=KGlobalMemoryBase) |
|
779 { |
|
780 // Address in global region, so look it up in kernel's address space... |
|
781 return FindMappingInAddressSpace(KKernelOsAsid, aAddr, aSize, aOffsetInMapping, aInstanceCount); |
|
782 } |
|
783 |
|
784 // Address in thread's process address space so open a reference to its os asid |
|
785 // so that it remains valid for FindMappingInAddressSpace() call. |
|
786 DMemModelProcess* process = (DMemModelProcess*)aThread->iOwningProcess; |
|
787 TInt osAsid = process->TryOpenOsAsid(); |
|
788 if (osAsid < 0) |
|
789 {// The process no longer owns an address space so can't have any mappings. |
|
790 return NULL; |
|
791 } |
|
792 |
|
793 DMemoryMapping* r = FindMappingInAddressSpace(osAsid, aAddr, aSize, aOffsetInMapping, aInstanceCount); |
|
794 |
|
795 process->CloseOsAsid(); |
|
796 return r; |
|
797 } |
|
798 |
|
799 |
|
800 DMemoryMapping* MM::FindMappingInAddressSpace( TUint aOsAsid, TLinAddr aAddr, TUint aSize, |
|
801 TUint& aOffsetInMapping, TUint& aInstanceCount) |
|
802 { |
|
803 return AddressSpace[aOsAsid]->FindMapping(aAddr, aSize, aOffsetInMapping, aInstanceCount); |
|
804 } |
|
805 |
|
806 |
|
807 |
|
808 // |
|
809 // Address space |
|
810 // |
|
811 |
|
812 TInt MM::AddressSpaceAlloc(TPhysAddr& aPageDirectory) |
|
813 { |
|
814 return DAddressSpace::New(aPageDirectory); |
|
815 } |
|
816 |
|
817 |
|
818 void MM::AddressSpaceFree(TUint aOsAsid) |
|
819 { |
|
820 AddressSpace[aOsAsid]->Close(); |
|
821 } |
|
822 |
|
823 |
|
824 void MM::AsyncAddressSpaceFree(TUint aOsAsid) |
|
825 { |
|
826 AddressSpace[aOsAsid]->AsyncClose(); |
|
827 } |
|
828 |
|
829 |
|
830 TInt MM::VirtualAllocCommon(TLinAddr& aLinAddr, TUint aSize, TBool aDemandPaged) |
|
831 { |
|
832 TRACE(("MM::VirtualAllocCommon(?,0x%08x,%d)",aSize,aDemandPaged)); |
|
833 TUint pdeType = aDemandPaged ? EVirtualSlabTypeDemandPaged : 0; |
|
834 TInt r = DAddressSpace::AllocateUserCommonVirtualMemory(aLinAddr, aSize, 0, aSize, pdeType); |
|
835 TRACE(("MM::VirtualAllocCommon returns %d region=0x%08x+0x%08x",r,aLinAddr,aSize)); |
|
836 return r; |
|
837 } |
|
838 |
|
839 |
|
840 void MM::VirtualFreeCommon(TLinAddr aLinAddr, TUint aSize) |
|
841 { |
|
842 TRACE(("MM::VirtualFreeCommon(0x%08x,0x%08x)",aLinAddr,aSize)); |
|
843 DAddressSpace::FreeUserCommonVirtualMemory(aLinAddr, aSize); |
|
844 } |
|
845 |
|
846 |
|
847 TInt MM::VirtualAlloc(TInt aOsAsid, TLinAddr& aLinAddr, TUint aSize, TBool aDemandPaged) |
|
848 { |
|
849 TRACE(("MM::VirtualAlloc(?,%d,0x%08x,%d)",aOsAsid,aSize,aDemandPaged)); |
|
850 TUint pdeType = aDemandPaged ? EVirtualSlabTypeDemandPaged : 0; |
|
851 TInt r = AddressSpace[aOsAsid]->AllocateVirtualMemory(aLinAddr, aSize, 0, aSize, pdeType); |
|
852 TRACE(("MM::VirtualAlloc returns %d region=0x%08x+0x%08x",r,aLinAddr,aSize)); |
|
853 return r; |
|
854 } |
|
855 |
|
856 |
|
857 void MM::VirtualFree(TInt aOsAsid, TLinAddr aLinAddr, TUint aSize) |
|
858 { |
|
859 TRACE(("MM::VirtualFree(%d,0x%08x,0x%08x)",aOsAsid,aLinAddr,aSize)); |
|
860 AddressSpace[aOsAsid]->FreeVirtualMemory(aLinAddr, aSize); |
|
861 } |
|
862 |
|
863 |
|
864 |
|
865 // |
|
866 // Init |
|
867 // |
|
868 |
|
869 void MM::Init1() |
|
870 { |
|
871 TheMmu.Init1(); |
|
872 } |
|
873 |
|
874 |
|
875 extern DMutexPool MemoryObjectMutexPool; |
|
876 extern DMutexPool AddressSpaceMutexPool; |
|
877 |
|
878 void MM::Init2() |
|
879 { |
|
880 TInt r; |
|
881 |
|
882 TheMmu.Init2(); |
|
883 |
|
884 // create mutex pools before calling any functions which require them... |
|
885 _LIT(KAddressSpaceMutexName,"AddressSpaceMutex"); |
|
886 r = AddressSpaceMutexPool.Create(4, &KAddressSpaceMutexName, KMutexOrdAddresSpace); |
|
887 __NK_ASSERT_ALWAYS(r==KErrNone); |
|
888 _LIT(KMemoryObjectMutexName,"MemoryObjectMutex"); |
|
889 r = MemoryObjectMutexPool.Create(8, &KMemoryObjectMutexName, KMutexOrdMemoryObject); |
|
890 __NK_ASSERT_ALWAYS(r==KErrNone); |
|
891 |
|
892 // use the Ram Allocator mutex for low-level memory functions... |
|
893 DMutex* mmuAllocMutex = TheMmu.iRamAllocatorMutex; |
|
894 |
|
895 // memory cleanup needs initialising before any memory is freed... |
|
896 TMemoryCleanup::Init2(); |
|
897 |
|
898 // initialise allocators used for MMU operations... |
|
899 RPageArray::Init2A(); |
|
900 PageTables.Init2(mmuAllocMutex); // must come before any other code which allocates memory objects |
|
901 RPageArray::Init2B(mmuAllocMutex); |
|
902 PageTables.Init2B(); |
|
903 PageDirectories.Init2(); |
|
904 |
|
905 // initialise address spaces... |
|
906 DAddressSpace::Init2(); |
|
907 |
|
908 // init pager... |
|
909 ThePager.Init2(); |
|
910 |
|
911 TheMmu.Init2Final(); |
|
912 } |
|
913 |
|
914 |
|
915 /** HAL Function wrapper for the RAM allocator. |
|
916 */ |
|
917 TInt RamHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2) |
|
918 { |
|
919 return TheMmu.RamHalFunction(aFunction, a1, a2); |
|
920 } |
|
921 |
|
922 |
|
923 void MM::Init3() |
|
924 { |
|
925 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MM::Init3")); |
|
926 ThePager.Init3(); |
|
927 |
|
928 // Register a HAL Function for the Ram allocator. |
|
929 TInt r = Kern::AddHalEntry(EHalGroupRam, RamHalFunction, 0); |
|
930 __NK_ASSERT_ALWAYS(r==KErrNone); |
|
931 |
|
932 TheMmu.Init3(); |
|
933 } |
|
934 |
|
935 |
|
936 TInt MM::InitFixedKernelMemory(DMemoryObject*& aMemory, |
|
937 TLinAddr aStart, |
|
938 TLinAddr aEnd, |
|
939 TUint aInitSize, |
|
940 TMemoryObjectType aType, |
|
941 TMemoryCreateFlags aMemoryCreateFlags, |
|
942 TMemoryAttributes aMemoryAttributes, |
|
943 TMappingCreateFlags aMappingCreateFlags |
|
944 ) |
|
945 { |
|
946 TUint maxSize = aEnd-aStart; |
|
947 TInt r = MM::MemoryNew(aMemory, aType, MM::BytesToPages(maxSize), aMemoryCreateFlags, aMemoryAttributes); |
|
948 if(r==KErrNone) |
|
949 { |
|
950 TBool allowGaps = aInitSize&1; // lower bit of size is set if region to be claimed contains gaps |
|
951 aInitSize &= ~1; |
|
952 r = MM::MemoryClaimInitialPages(aMemory,aStart,aInitSize,ESupervisorReadWrite,allowGaps); |
|
953 if(r==KErrNone) |
|
954 { |
|
955 DMemoryMapping* mapping; |
|
956 r = MM::MappingNew(mapping,aMemory,ESupervisorReadWrite,KKernelOsAsid,aMappingCreateFlags,aStart); |
|
957 // prevent any further mappings of this memory, |
|
958 // this is needed for realtime and OOM guarantees... |
|
959 aMemory->DenyMappings(); |
|
960 } |
|
961 } |
|
962 // Note, no cleanup is done if an error occurs because this function is only |
|
963 // used at boot time and the system can't recover from an error |
|
964 return r; |
|
965 } |
|
966 |
|
967 |
|
968 void MM::Panic(MM::TMemModelPanic aPanic) |
|
969 { |
|
970 Kern::Fault("MemModel", aPanic); |
|
971 } |
|
972 |
|
973 |
|
974 // |
|
975 // |
|
976 // |
|
977 |
|
978 TUint MM::BytesToPages(TUint aBytes) |
|
979 { |
|
980 if(aBytes&KPageMask) |
|
981 Panic(EBadBytesToPages); |
|
982 return aBytes>>KPageShift; |
|
983 } |
|
984 |
|
985 |
|
986 TUint MM::RoundToPageSize(TUint aSize) |
|
987 { |
|
988 return (aSize+KPageMask)&~KPageMask; |
|
989 } |
|
990 |
|
991 |
|
992 TUint MM::RoundToPageCount(TUint aSize) |
|
993 { |
|
994 return (aSize+KPageMask)>>KPageShift; |
|
995 } |
|
996 |
|
997 |
|
998 TUint MM::RoundToPageShift(TUint aShift) |
|
999 { |
|
1000 return aShift>(TUint)KPageShift ? aShift-KPageShift : 0; |
|
1001 } |
|
1002 |
|
1003 |
|
1004 // |
|
1005 // |
|
1006 // |
|
1007 |
|
1008 void MM::ValidateLocalIpcAddress(TLinAddr aAddr, TUint aSize, TBool aWrite) |
|
1009 { |
|
1010 __NK_ASSERT_DEBUG(aSize); |
|
1011 |
|
1012 TLinAddr end = aAddr+aSize-1; |
|
1013 if(end<aAddr) |
|
1014 end = ~(TLinAddr)0; // clip to end of memory |
|
1015 |
|
1016 // if IPC region is in process local data area then it's OK... |
|
1017 if(end<KUserLocalDataEnd && aAddr>=KUserLocalDataBase) |
|
1018 return; |
|
1019 |
|
1020 // if region overlaps alias region... |
|
1021 if(end>=KIPCAlias && aAddr<KIPCAlias+KIPCAliasAreaSize) |
|
1022 { |
|
1023 // remove alias... |
|
1024 ((DMemModelThread*)TheCurrentThread)->RemoveAlias(); |
|
1025 // make sure start address is in alias region... |
|
1026 if(aAddr<KIPCAlias) |
|
1027 aAddr = KIPCAlias; |
|
1028 // then cause fault now... |
|
1029 MM::UserPermissionFault(aAddr,aWrite); |
|
1030 } |
|
1031 |
|
1032 if(end<(TLinAddr)KUserMemoryLimit) |
|
1033 return; // user memory is safe |
|
1034 |
|
1035 // Compare the current thread's process os asid to kernel asid, no need to |
|
1036 // open a reference on the os asid as it is the current thread. |
|
1037 if(((DMemModelProcess*)TheCurrentThread->iOwningProcess)->OsAsid()==(TInt)KKernelOsAsid) |
|
1038 return; // kernel can access everything |
|
1039 |
|
1040 // make sure address is in supervisor only region... |
|
1041 if(aAddr<KUserMemoryLimit) |
|
1042 aAddr = KUserMemoryLimit; |
|
1043 // then cause fault now... |
|
1044 MM::UserPermissionFault(aAddr,aWrite); |
|
1045 } |
|
1046 |
|
1047 |
|
1048 void MM::UserPermissionFault(TLinAddr aAddr, TBool aWrite) |
|
1049 { |
|
1050 // Access aAddr with user permissions to generate an exception... |
|
1051 if(aWrite) |
|
1052 UserWriteFault(aAddr); |
|
1053 else |
|
1054 UserReadFault(aAddr); |
|
1055 __NK_ASSERT_ALWAYS(0); // shouldn't get here |
|
1056 } |
|
1057 |
|
1058 |
|
1059 #ifndef __SMP__ |
|
1060 void MM::IpcAliasPde(TPde*& aPdePtr, TUint aOsAsid) |
|
1061 { |
|
1062 aPdePtr = &Mmu::PageDirectory(aOsAsid)[KIPCAlias>>KChunkShift]; |
|
1063 } |
|
1064 #endif |
|
1065 |
|
1066 |
|
1067 TMappingPermissions MM::MappingPermissions(TBool aUser, TBool aWrite, TBool aExecute) |
|
1068 { |
|
1069 TUint perm = 0; |
|
1070 if(aUser) |
|
1071 perm |= EUser; |
|
1072 if(aWrite) |
|
1073 perm |= EReadWrite; |
|
1074 if(aExecute) |
|
1075 perm |= EExecute; |
|
1076 return (TMappingPermissions)perm; |
|
1077 } |
|
1078 |
|
1079 |
|
1080 TInt MM::MappingPermissions(TMappingPermissions& aPermissions, TMappingAttributes2 aLegacyAttributes) |
|
1081 { |
|
1082 TUint attr2 = *(TUint32*)&aLegacyAttributes; |
|
1083 |
|
1084 TUint read = attr2&EMapAttrReadMask; |
|
1085 TUint write = (attr2&EMapAttrWriteMask)>>4; |
|
1086 TUint execute = (attr2&EMapAttrExecMask)>>8; |
|
1087 |
|
1088 read |= execute; // execute access requires read access |
|
1089 |
|
1090 if(write==0) // no write required |
|
1091 { |
|
1092 if((read&5)==0) |
|
1093 return KErrNotSupported; // neither supervisor nor user read specified |
|
1094 } |
|
1095 else if(write<4) // supervisor write required |
|
1096 { |
|
1097 if(read>=4) |
|
1098 return KErrNotSupported; // user read requested (but no user write) |
|
1099 } |
|
1100 |
|
1101 read |= write; // write access implies read access |
|
1102 |
|
1103 TUint user = read&4; |
|
1104 aPermissions = MappingPermissions(user,write,execute); |
|
1105 |
|
1106 return KErrNone; |
|
1107 } |
|
1108 |
|
1109 |
|
1110 TInt MM::MemoryAttributes(TMemoryAttributes& aAttributes, TMappingAttributes2 aLegacyAttributes) |
|
1111 { |
|
1112 TUint attr = aLegacyAttributes.Type(); |
|
1113 if (aLegacyAttributes.Shared()) |
|
1114 attr |= EMemoryAttributeShareable; |
|
1115 if (aLegacyAttributes.Parity()) |
|
1116 attr |= EMemoryAttributeUseECC; |
|
1117 aAttributes = Mmu::CanonicalMemoryAttributes((TMemoryAttributes)attr); |
|
1118 return KErrNone; |
|
1119 } |
|
1120 |
|
1121 |
|
1122 TMappingAttributes2 MM::LegacyMappingAttributes(TMemoryAttributes aAttributes, TMappingPermissions aPermissions) |
|
1123 { |
|
1124 TUint attr = Mmu::CanonicalMemoryAttributes(aAttributes); |
|
1125 return TMappingAttributes2 |
|
1126 ( |
|
1127 (TMemoryType)(attr&EMemoryAttributeTypeMask), |
|
1128 aPermissions&EUser, |
|
1129 aPermissions&EReadWrite, |
|
1130 aPermissions&EExecute, |
|
1131 attr&EMemoryAttributeShareable, |
|
1132 attr&EMemoryAttributeUseECC |
|
1133 ); |
|
1134 } |
|
1135 |
|
1136 |