|
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // |
|
15 |
|
16 #include <plat_priv.h> |
|
17 #include "cache_maintenance.h" |
|
18 #include "decompress.h" // include for the generic BytePairDecompress(). |
|
19 #include "mm.h" |
|
20 #include "mmu.h" |
|
21 #include "mpager.h" |
|
22 #include "mmanager.h" |
|
23 #include "mmapping.h" |
|
24 #include "mobject.h" |
|
25 #include "mcleanup.h" |
|
26 |
|
27 |
|
28 // |
|
29 // DMemoryManager |
|
30 // |
|
31 |
|
32 TInt DMemoryManager::New(DMemoryObject*& aMemory, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags) |
|
33 { |
|
34 DMemoryObject* memory; |
|
35 if(aSizeInPages&(KChunkMask>>KPageShift)) |
|
36 memory = DFineMemory::New(this,aSizeInPages,aAttributes,aCreateFlags); |
|
37 else |
|
38 memory = DCoarseMemory::New(this,aSizeInPages,aAttributes,aCreateFlags); |
|
39 aMemory = memory; |
|
40 if(!memory) |
|
41 return KErrNoMemory; |
|
42 return KErrNone; |
|
43 } |
|
44 |
|
45 |
|
46 TInt DMemoryManager::Alloc(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/) |
|
47 { |
|
48 return KErrNotSupported; |
|
49 } |
|
50 |
|
51 |
|
52 TInt DMemoryManager::AllocContiguous(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/, TUint /*aAlign*/, TPhysAddr& /*aPhysAddr*/) |
|
53 { |
|
54 return KErrNotSupported; |
|
55 } |
|
56 |
|
57 |
|
58 void DMemoryManager::Free(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/) |
|
59 { |
|
60 } |
|
61 |
|
62 |
|
63 TInt DMemoryManager::Wipe(DMemoryObject* /*aMemory*/) |
|
64 { |
|
65 return KErrNotSupported; |
|
66 } |
|
67 |
|
68 |
|
69 TInt DMemoryManager::AddPages(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/, TPhysAddr* /*aPages*/) |
|
70 { |
|
71 return KErrNotSupported; |
|
72 } |
|
73 |
|
74 |
|
75 TInt DMemoryManager::AddContiguous(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/, TPhysAddr /*aPhysAddr*/) |
|
76 { |
|
77 return KErrNotSupported; |
|
78 } |
|
79 |
|
80 |
|
81 TInt DMemoryManager::RemovePages(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/, TPhysAddr* /*aPages*/) |
|
82 { |
|
83 return KErrNotSupported; |
|
84 } |
|
85 |
|
86 |
|
87 TInt DMemoryManager::AllowDiscard(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/) |
|
88 { |
|
89 return KErrNotSupported; |
|
90 } |
|
91 |
|
92 |
|
93 TInt DMemoryManager::DisallowDiscard(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/) |
|
94 { |
|
95 return KErrNotSupported; |
|
96 } |
|
97 |
|
98 |
|
99 TInt DMemoryManager::StealPage(DMemoryObject* /*aMemory*/, SPageInfo* /*aPageInfo*/) |
|
100 { |
|
101 return KErrNotSupported; |
|
102 } |
|
103 |
|
104 |
|
105 TInt DMemoryManager::RestrictPage(DMemoryObject* /*aMemory*/, SPageInfo* /*aPageInfo*/, TRestrictPagesType /*aRestriction*/) |
|
106 { |
|
107 return KErrNotSupported; |
|
108 } |
|
109 |
|
110 |
|
111 TInt DMemoryManager::CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& /*aPageArrayEntry*/) |
|
112 { |
|
113 if(aPageInfo->IsDirty()==false) |
|
114 return KErrNone; |
|
115 __NK_ASSERT_DEBUG(0); |
|
116 return KErrNotSupported; |
|
117 } |
|
118 |
|
119 |
|
120 TInt DMemoryManager::HandleFault( DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping, |
|
121 TUint aMapInstanceCount, TUint aAccessPermissions) |
|
122 { |
|
123 (void)aMemory; |
|
124 (void)aIndex; |
|
125 (void)aMapping; |
|
126 (void)aMapInstanceCount; |
|
127 (void)aAccessPermissions; |
|
128 // Kern::Printf("DMemoryManager::HandlePageFault(0x%08x,0x%x,0x%08x,%d)",aMemory,aIndex,aMapping,aAccessPermissions); |
|
129 return KErrAbort; |
|
130 } |
|
131 |
|
132 |
|
133 TInt DMemoryManager::MovePage( DMemoryObject* aMemory, SPageInfo* aOldPageInfo, |
|
134 TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest) |
|
135 { |
|
136 return KErrNotSupported; |
|
137 } |
|
138 |
|
139 TZonePageType DMemoryManager::PageType() |
|
140 {// This should not be invoked on memory managers that do not use the methods |
|
141 // AllocPages() and FreePages(). |
|
142 __NK_ASSERT_DEBUG(0); |
|
143 return EPageFixed; |
|
144 } |
|
145 |
|
146 static TMemoryCleanup Cleanup; |
|
147 |
|
148 DMemoryObject* DMemoryManager::iCleanupHead = 0; |
|
149 TSpinLock DMemoryManager::iCleanupLock(TSpinLock::EOrderGenericIrqHigh3); |
|
150 |
|
151 void DMemoryManager::CleanupFunction(TAny*) |
|
152 { |
|
153 for(;;) |
|
154 { |
|
155 __SPIN_LOCK_IRQ(iCleanupLock); |
|
156 |
|
157 // get an object from queue... |
|
158 DMemoryObject* memory = iCleanupHead; |
|
159 if(!memory) |
|
160 { |
|
161 // none left, so end... |
|
162 __SPIN_UNLOCK_IRQ(iCleanupLock); |
|
163 return; |
|
164 } |
|
165 |
|
166 if(memory->iCleanupFlags&ECleanupDecommitted) |
|
167 { |
|
168 // object requires cleanup of decommitted pages... |
|
169 memory->iCleanupFlags &= ~ECleanupDecommitted; |
|
170 __SPIN_UNLOCK_IRQ(iCleanupLock); |
|
171 memory->iManager->DoCleanupDecommitted(memory); |
|
172 } |
|
173 else |
|
174 { |
|
175 // object has no more cleanup operations to perform, |
|
176 // so remove it from the cleanup queue... |
|
177 __NK_ASSERT_DEBUG(memory->iCleanupFlags==ECleanupIsQueued); // no operations left, just flag to say its in the cleanup queue |
|
178 memory->iCleanupFlags &= ~ECleanupIsQueued; |
|
179 iCleanupHead = memory->iCleanupNext; |
|
180 memory->iCleanupNext = NULL; |
|
181 __SPIN_UNLOCK_IRQ(iCleanupLock); |
|
182 |
|
183 // close reference which was added when object was queued... |
|
184 memory->Close(); |
|
185 } |
|
186 } |
|
187 } |
|
188 |
|
189 |
|
190 void DMemoryManager::QueueCleanup(DMemoryObject* aMemory, TCleanupOperationFlag aCleanupOp) |
|
191 { |
|
192 // add new cleanup operation... |
|
193 __SPIN_LOCK_IRQ(iCleanupLock); |
|
194 TUint32 oldFlags = aMemory->iCleanupFlags; |
|
195 aMemory->iCleanupFlags = oldFlags|aCleanupOp|ECleanupIsQueued; |
|
196 __SPIN_UNLOCK_IRQ(iCleanupLock); |
|
197 |
|
198 // if cleanup was already requested... |
|
199 if(oldFlags) |
|
200 return; // nothing more to do |
|
201 |
|
202 // increase reference count... |
|
203 aMemory->Open(); |
|
204 |
|
205 // add object to cleanup queue... |
|
206 __SPIN_LOCK_IRQ(iCleanupLock); |
|
207 aMemory->iCleanupNext = iCleanupHead; |
|
208 iCleanupHead = aMemory; |
|
209 __SPIN_UNLOCK_IRQ(iCleanupLock); |
|
210 |
|
211 // queue cleanup function to run... |
|
212 Cleanup.Add((TMemoryCleanupCallback)CleanupFunction,0); |
|
213 } |
|
214 |
|
215 |
|
216 void DMemoryManager::DoCleanupDecommitted(DMemoryObject* aMemory) |
|
217 { |
|
218 TRACE2(("DMemoryManager::DoCleanupDecommitted(0x%08x)",aMemory)); |
|
219 __NK_ASSERT_DEBUG(0); |
|
220 } |
|
221 |
|
222 |
|
223 void DMemoryManager::ReAllocDecommitted(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
224 { |
|
225 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
226 |
|
227 // make iterator for region... |
|
228 RPageArray::TIter pageIter; |
|
229 aMemory->iPages.FindStart(aIndex,aCount,pageIter); |
|
230 |
|
231 for(;;) |
|
232 { |
|
233 // find some pages... |
|
234 RPageArray::TIter pageList; |
|
235 TUint n = pageIter.Find(pageList); |
|
236 if(!n) |
|
237 break; |
|
238 |
|
239 // check each existing page... |
|
240 RamAllocLock::Lock(); |
|
241 TPhysAddr* pages; |
|
242 while(pageList.Pages(pages)) |
|
243 { |
|
244 TPhysAddr page = *pages; |
|
245 if(RPageArray::State(page)==RPageArray::EDecommitted) |
|
246 { |
|
247 // decommitted pages need re-initialising... |
|
248 TPhysAddr pagePhys = page&~KPageMask; |
|
249 *pages = pagePhys|RPageArray::ECommitted; |
|
250 TheMmu.PagesAllocated(&pagePhys,1,aMemory->RamAllocFlags(),true); |
|
251 } |
|
252 pageList.Skip(1); |
|
253 } |
|
254 RamAllocLock::Unlock(); |
|
255 |
|
256 // move on... |
|
257 pageIter.FindRelease(n); |
|
258 } |
|
259 |
|
260 aMemory->iPages.FindEnd(aIndex,aCount); |
|
261 } |
|
262 |
|
263 |
|
264 void DMemoryManager::FreeDecommitted(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
265 { |
|
266 TRACE2(("DMemoryManager::FreeDecommitted(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount)); |
|
267 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
268 |
|
269 // make iterator for region... |
|
270 RPageArray::TIter pageIter; |
|
271 aMemory->iPages.FindStart(aIndex,aCount,pageIter); |
|
272 |
|
273 for(;;) |
|
274 { |
|
275 // find some pages... |
|
276 RPageArray::TIter pageList; |
|
277 TUint findCount = pageIter.Find(pageList); |
|
278 if(!findCount) |
|
279 break; |
|
280 |
|
281 // search for decommitted pages... |
|
282 RamAllocLock::Lock(); |
|
283 TPhysAddr* pages; |
|
284 TUint numPages; |
|
285 while((numPages=pageList.Pages(pages))!=0) |
|
286 { |
|
287 TUint n=0; |
|
288 if(RPageArray::State(pages[n])!=RPageArray::EDecommitted) |
|
289 { |
|
290 // skip pages which aren't EDecommitted... |
|
291 while(++n<numPages && RPageArray::State(pages[n])!=RPageArray::EDecommitted) |
|
292 {} |
|
293 } |
|
294 else |
|
295 { |
|
296 // find range of pages which are EDecommitted... |
|
297 while(++n<numPages && RPageArray::State(pages[n])==RPageArray::EDecommitted) |
|
298 {} |
|
299 RPageArray::TIter decommittedList(pageList.Left(n)); |
|
300 |
|
301 // free pages... |
|
302 TUint freedCount = FreePages(aMemory,decommittedList); |
|
303 (void)freedCount; |
|
304 TRACE2(("DMemoryManager::FreeDecommitted(0x%08x) freed %d in 0x%x..0x%x",aMemory,freedCount,decommittedList.Index(),decommittedList.IndexEnd())); |
|
305 } |
|
306 pageList.Skip(n); |
|
307 } |
|
308 RamAllocLock::Unlock(); |
|
309 |
|
310 // move on... |
|
311 pageIter.FindRelease(findCount); |
|
312 } |
|
313 |
|
314 aMemory->iPages.FindEnd(aIndex,aCount); |
|
315 } |
|
316 |
|
317 |
|
318 void DMemoryManager::DoFree(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
319 { |
|
320 TRACE2(("DMemoryManager::DoFree(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount)); |
|
321 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
322 |
|
323 RPageArray::TIter pageIter; |
|
324 aMemory->iPages.FindStart(aIndex,aCount,pageIter); |
|
325 |
|
326 for(;;) |
|
327 { |
|
328 // find some pages... |
|
329 RPageArray::TIter pageList; |
|
330 TUint n = pageIter.RemoveFind(pageList); |
|
331 if(!n) |
|
332 break; |
|
333 |
|
334 // free pages... |
|
335 FreePages(aMemory,pageList); |
|
336 |
|
337 // move on... |
|
338 pageIter.FindRelease(n); |
|
339 } |
|
340 |
|
341 aMemory->iPages.FindEnd(aIndex,aCount); |
|
342 } |
|
343 |
|
344 |
|
345 TInt DMemoryManager::FreePages(DMemoryObject* aMemory, RPageArray::TIter aPageList) |
|
346 { |
|
347 // unmap the pages... |
|
348 aMemory->UnmapPages(aPageList,true); |
|
349 |
|
350 RamAllocLock::Lock(); |
|
351 |
|
352 // remove and free pages... |
|
353 Mmu& m = TheMmu; |
|
354 TUint count = 0; |
|
355 TPhysAddr pages[KMaxPagesInOneGo]; |
|
356 TUint n; |
|
357 while((n=aPageList.Remove(KMaxPagesInOneGo,pages))!=0) |
|
358 { |
|
359 count += n; |
|
360 m.FreeRam(pages, n, aMemory->iManager->PageType()); |
|
361 } |
|
362 |
|
363 RamAllocLock::Unlock(); |
|
364 |
|
365 return count; |
|
366 } |
|
367 |
|
368 |
|
369 |
|
370 /** |
|
371 Manager for memory objects containing normal unpaged program memory (RAM) which |
|
372 is allocated from a system wide pool. The physical pages allocated to this |
|
373 memory are fixed until explicitly freed. |
|
374 |
|
375 This is normally used for kernel memory and any other situation where it |
|
376 is not permissible for memory accesses to generate page faults of any kind. |
|
377 */ |
|
378 class DUnpagedMemoryManager : public DMemoryManager |
|
379 { |
|
380 public: |
|
381 // from DMemoryManager... |
|
382 virtual void Destruct(DMemoryObject* aMemory); |
|
383 virtual TInt Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
384 virtual TInt AllocContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TUint aAlign, TPhysAddr& aPhysAddr); |
|
385 virtual void Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
386 virtual TInt Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs); |
|
387 virtual void Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs); |
|
388 virtual TInt Wipe(DMemoryObject* aMemory); |
|
389 virtual TZonePageType PageType(); |
|
390 |
|
391 private: |
|
392 // from DMemoryManager... |
|
393 virtual void DoCleanupDecommitted(DMemoryObject* aMemory); |
|
394 |
|
395 /** |
|
396 Implementation factor for implementation of #Alloc. |
|
397 */ |
|
398 static TInt AllocPages(DMemoryObject* aMemory, RPageArray::TIter aPageList); |
|
399 |
|
400 /** |
|
401 Implementation factor for implementation of #AllocContiguous. |
|
402 */ |
|
403 static TInt AllocContiguousPages(DMemoryObject* aMemory, RPageArray::TIter aPageList, TUint aAlign, TPhysAddr& aPhysAddr); |
|
404 |
|
405 /** |
|
406 Implementation factor for implementation of #Wipe. |
|
407 */ |
|
408 static void WipePages(DMemoryObject* aMemory, RPageArray::TIter aPageList); |
|
409 |
|
410 public: |
|
411 /** |
|
412 The single instance of this manager class. |
|
413 */ |
|
414 static DUnpagedMemoryManager TheManager; |
|
415 }; |
|
416 |
|
417 |
|
418 DUnpagedMemoryManager DUnpagedMemoryManager::TheManager; |
|
419 DMemoryManager* TheUnpagedMemoryManager = &DUnpagedMemoryManager::TheManager; |
|
420 |
|
421 |
|
422 void DUnpagedMemoryManager::Destruct(DMemoryObject* aMemory) |
|
423 { |
|
424 MemoryObjectLock::Lock(aMemory); |
|
425 Free(aMemory,0,aMemory->iSizeInPages); |
|
426 MemoryObjectLock::Unlock(aMemory); |
|
427 aMemory->Close(); |
|
428 } |
|
429 |
|
430 |
|
431 TInt DUnpagedMemoryManager::Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
432 { |
|
433 TRACE2(("DUnpagedMemoryManager::Alloc(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount)); |
|
434 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
435 |
|
436 // re-initialise any decommitted pages which we may still own because they were pinned... |
|
437 ReAllocDecommitted(aMemory,aIndex,aCount); |
|
438 |
|
439 // check and allocate page array entries... |
|
440 RPageArray::TIter pageList; |
|
441 TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageList,true); |
|
442 if(r!=KErrNone) |
|
443 return r; |
|
444 |
|
445 // allocate RAM and add it to page array... |
|
446 r = AllocPages(aMemory,pageList); |
|
447 |
|
448 // map pages... |
|
449 if(r==KErrNone) |
|
450 r = aMemory->MapPages(pageList); |
|
451 |
|
452 // release page array entries... |
|
453 aMemory->iPages.AddEnd(aIndex,aCount); |
|
454 |
|
455 // revert if error... |
|
456 if(r!=KErrNone) |
|
457 Free(aMemory,aIndex,aCount); |
|
458 |
|
459 return r; |
|
460 } |
|
461 |
|
462 |
|
463 TInt DUnpagedMemoryManager::AllocContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TUint aAlign, TPhysAddr& aPhysAddr) |
|
464 { |
|
465 TRACE2(("DUnpagedMemoryManager::AllocContiguous(0x%08x,0x%x,0x%x,%d,?)",aMemory, aIndex, aCount, aAlign)); |
|
466 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
467 |
|
468 // set invalid memory in case of error... |
|
469 aPhysAddr = KPhysAddrInvalid; |
|
470 |
|
471 // check and allocate page array entries... |
|
472 RPageArray::TIter pageList; |
|
473 TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageList); |
|
474 if(r!=KErrNone) |
|
475 return r; |
|
476 |
|
477 // allocate memory... |
|
478 TPhysAddr physAddr; |
|
479 r = AllocContiguousPages(aMemory, pageList, aAlign, physAddr); |
|
480 |
|
481 // map memory... |
|
482 if(r==KErrNone) |
|
483 { |
|
484 r = aMemory->MapPages(pageList); |
|
485 if(r==KErrNone) |
|
486 aPhysAddr = physAddr; |
|
487 } |
|
488 |
|
489 // release page array entries... |
|
490 aMemory->iPages.AddEnd(aIndex,aCount); |
|
491 |
|
492 // revert if error... |
|
493 if(r!=KErrNone) |
|
494 Free(aMemory,aIndex,aCount); |
|
495 |
|
496 return r; |
|
497 } |
|
498 |
|
499 |
|
500 void DUnpagedMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
501 { |
|
502 DoFree(aMemory,aIndex,aCount); |
|
503 } |
|
504 |
|
505 |
|
506 TInt DUnpagedMemoryManager::AllocPages(DMemoryObject* aMemory, RPageArray::TIter aPageList) |
|
507 { |
|
508 TInt r = KErrNone; |
|
509 RamAllocLock::Lock(); |
|
510 |
|
511 Mmu& m = TheMmu; |
|
512 for(;;) |
|
513 { |
|
514 // find entries in page array to allocate... |
|
515 RPageArray::TIter allocList; |
|
516 TUint n = aPageList.AddFind(allocList); |
|
517 if(!n) |
|
518 break; |
|
519 |
|
520 do |
|
521 { |
|
522 // allocate ram... |
|
523 TPhysAddr pages[KMaxPagesInOneGo]; |
|
524 if(n>KMaxPagesInOneGo) |
|
525 n = KMaxPagesInOneGo; |
|
526 r = m.AllocRam(pages, n, aMemory->RamAllocFlags(), aMemory->iManager->PageType()); |
|
527 if(r!=KErrNone) |
|
528 goto done; |
|
529 |
|
530 // assign pages to memory object... |
|
531 { |
|
532 TUint index = allocList.Index(); |
|
533 TUint flags = aMemory->PageInfoFlags(); |
|
534 TUint i=0; |
|
535 MmuLock::Lock(); |
|
536 do |
|
537 { |
|
538 SPageInfo* pi = SPageInfo::FromPhysAddr(pages[i]); |
|
539 pi->SetManaged(aMemory,index+i,flags); |
|
540 } |
|
541 while(++i<n); |
|
542 MmuLock::Unlock(); |
|
543 } |
|
544 |
|
545 // add pages to page array... |
|
546 allocList.Add(n,pages); |
|
547 } |
|
548 while((n=allocList.Count())!=0); |
|
549 } |
|
550 done: |
|
551 RamAllocLock::Unlock(); |
|
552 return r; |
|
553 } |
|
554 |
|
555 |
|
556 TInt DUnpagedMemoryManager::AllocContiguousPages(DMemoryObject* aMemory, RPageArray::TIter aPageList, TUint aAlign, TPhysAddr& aPhysAddr) |
|
557 { |
|
558 TUint size = aPageList.Count(); |
|
559 RamAllocLock::Lock(); |
|
560 |
|
561 // allocate memory... |
|
562 Mmu& m = TheMmu; |
|
563 TPhysAddr physAddr; |
|
564 TInt r = m.AllocContiguousRam(physAddr, size, aAlign, aMemory->RamAllocFlags()); |
|
565 if(r==KErrNone) |
|
566 { |
|
567 // assign pages to memory object... |
|
568 TUint index = aPageList.Index(); |
|
569 TUint flags = aMemory->PageInfoFlags(); |
|
570 SPageInfo* pi = SPageInfo::FromPhysAddr(physAddr); |
|
571 SPageInfo* piEnd = pi+size; |
|
572 TUint flash = 0; |
|
573 MmuLock::Lock(); |
|
574 while(pi<piEnd) |
|
575 { |
|
576 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
577 pi->SetManaged(aMemory,index++,flags); |
|
578 ++pi; |
|
579 } |
|
580 MmuLock::Unlock(); |
|
581 |
|
582 // add pages to page array... |
|
583 aPageList.AddContiguous(size,physAddr); |
|
584 |
|
585 // set result... |
|
586 aPhysAddr = physAddr; |
|
587 } |
|
588 |
|
589 RamAllocLock::Unlock(); |
|
590 return r; |
|
591 } |
|
592 |
|
593 |
|
594 TInt DUnpagedMemoryManager::Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs) |
|
595 { |
|
596 RPageArray::TIter pageList; |
|
597 aMemory->iPages.FindStart(aMapping->iStartIndex,aMapping->iSizeInPages,pageList); |
|
598 |
|
599 MmuLock::Lock(); |
|
600 |
|
601 TUint n; |
|
602 TPhysAddr* pages; |
|
603 TUint flash = 0; |
|
604 while((n=pageList.Pages(pages,KMaxPageInfoUpdatesInOneGo))!=0) |
|
605 { |
|
606 TPhysAddr* p = pages; |
|
607 TPhysAddr* pEnd = p+n; |
|
608 do |
|
609 { |
|
610 TPhysAddr page = *p++; |
|
611 if(RPageArray::TargetStateIsDecommitted(page)) |
|
612 goto stop; // page is being decommitted, so can't pin it |
|
613 } |
|
614 while(p!=pEnd); |
|
615 pageList.Skip(n); |
|
616 flash += n; |
|
617 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
618 } |
|
619 stop: |
|
620 MmuLock::Unlock(); |
|
621 |
|
622 aMemory->iPages.FindEnd(aMapping->iStartIndex,aMapping->iSizeInPages); |
|
623 |
|
624 return pageList.Count() ? KErrNotFound : KErrNone; |
|
625 } |
|
626 |
|
627 |
|
628 void DUnpagedMemoryManager::Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs) |
|
629 { |
|
630 } |
|
631 |
|
632 |
|
633 void DUnpagedMemoryManager::DoCleanupDecommitted(DMemoryObject* aMemory) |
|
634 { |
|
635 MemoryObjectLock::Lock(aMemory); |
|
636 FreeDecommitted(aMemory,0,aMemory->iSizeInPages); |
|
637 MemoryObjectLock::Unlock(aMemory); |
|
638 } |
|
639 |
|
640 |
|
641 TInt DUnpagedMemoryManager::Wipe(DMemoryObject* aMemory) |
|
642 { |
|
643 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
644 |
|
645 // make iterator for region... |
|
646 RPageArray::TIter pageIter; |
|
647 aMemory->iPages.FindStart(0,aMemory->iSizeInPages,pageIter); |
|
648 |
|
649 for(;;) |
|
650 { |
|
651 // find some pages... |
|
652 RPageArray::TIter pageList; |
|
653 TUint n = pageIter.Find(pageList); |
|
654 if(!n) |
|
655 break; |
|
656 |
|
657 // wipe some pages... |
|
658 WipePages(aMemory,pageList); |
|
659 |
|
660 // move on... |
|
661 pageIter.FindRelease(n); |
|
662 } |
|
663 |
|
664 aMemory->iPages.FindEnd(0,aMemory->iSizeInPages); |
|
665 |
|
666 return KErrNone; |
|
667 } |
|
668 |
|
669 |
|
670 void DUnpagedMemoryManager::WipePages(DMemoryObject* aMemory, RPageArray::TIter aPageList) |
|
671 { |
|
672 TUint index = aPageList.Index(); |
|
673 TUint count = aPageList.Count(); |
|
674 TRACE(("DUnpagedMemoryManager::WipePages(0x%08x,0x%x,0x%x)",aMemory,index,count)); |
|
675 |
|
676 __NK_ASSERT_ALWAYS(!aMemory->IsReadOnly()); // trap wiping read-only memory |
|
677 |
|
678 RamAllocLock::Lock(); |
|
679 |
|
680 while(count) |
|
681 { |
|
682 // get some physical page addresses... |
|
683 TPhysAddr pages[KMaxPagesInOneGo]; |
|
684 TPhysAddr physAddr; |
|
685 TUint n = count; |
|
686 if(n>KMaxPagesInOneGo) |
|
687 n = KMaxPagesInOneGo; |
|
688 TInt r = aMemory->iPages.PhysAddr(index,n,physAddr,pages); |
|
689 __NK_ASSERT_ALWAYS(r>=0); // caller should have ensured all pages are present |
|
690 |
|
691 // wipe some pages... |
|
692 TPhysAddr* pagesToWipe = r!=0 ? pages : (TPhysAddr*)((TLinAddr)physAddr|1); |
|
693 TheMmu.PagesAllocated(pagesToWipe,n,aMemory->RamAllocFlags(),true); |
|
694 |
|
695 // move on... |
|
696 index += n; |
|
697 count -= n; |
|
698 } |
|
699 |
|
700 RamAllocLock::Unlock(); |
|
701 } |
|
702 |
|
703 |
|
704 TZonePageType DUnpagedMemoryManager::PageType() |
|
705 {// Unpaged memory cannot be moved or discarded therefore it is fixed. |
|
706 return EPageFixed; |
|
707 } |
|
708 |
|
709 |
|
710 /** |
|
711 Manager for memory objects containing normal unpaged RAM, as |
|
712 #DUnpagedMemoryManager, but which may be 'moved' by RAM |
|
713 defragmentation. I.e. have the physical pages used to store its content |
|
714 substituted for others. |
|
715 |
|
716 Such memory may cause transient page faults if it is accessed whilst its |
|
717 contents are being moved, this makes it unsuitable for most kernel-side |
|
718 usage. This is the memory management scheme normally used for unpaged user |
|
719 memory. |
|
720 */ |
|
721 class DMovableMemoryManager : public DUnpagedMemoryManager |
|
722 { |
|
723 public: |
|
724 // from DMemoryManager... |
|
725 virtual TInt MovePage(DMemoryObject* aMemory, SPageInfo* aOldPageInfo, TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest); |
|
726 virtual TInt HandleFault( DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping, |
|
727 TUint aMapInstanceCount, TUint aAccessPermissions); |
|
728 virtual TZonePageType PageType(); |
|
729 public: |
|
730 /** |
|
731 The single instance of this manager class. |
|
732 */ |
|
733 static DMovableMemoryManager TheManager; |
|
734 }; |
|
735 |
|
736 |
|
737 DMovableMemoryManager DMovableMemoryManager::TheManager; |
|
738 DMemoryManager* TheMovableMemoryManager = &DMovableMemoryManager::TheManager; |
|
739 |
|
740 |
|
741 TInt DMovableMemoryManager::MovePage( DMemoryObject* aMemory, SPageInfo* aOldPageInfo, |
|
742 TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest) |
|
743 { |
|
744 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
745 |
|
746 // Allocate the new page to move to, ensuring that we use the page type of the |
|
747 // manager assigned to this page. |
|
748 TPhysAddr newPage; |
|
749 Mmu& m = TheMmu; |
|
750 TInt r = m.AllocRam(&newPage, 1, aMemory->RamAllocFlags(), aMemory->iManager->PageType(), |
|
751 aBlockZoneId, aBlockRest); |
|
752 if (r != KErrNone) |
|
753 {// Failed to allocate a new page to move the page to so can't continue. |
|
754 return r; |
|
755 } |
|
756 |
|
757 r = KErrInUse; |
|
758 MmuLock::Lock(); |
|
759 |
|
760 TUint index = aOldPageInfo->Index(); |
|
761 TRACE( ("DMovableMemoryManager::MovePage(0x%08x,0x%08x,?,0x%08x,%d) index=0x%x", |
|
762 aMemory,aOldPageInfo,aBlockZoneId,aBlockRest,index)); |
|
763 __NK_ASSERT_DEBUG(aMemory==aOldPageInfo->Owner()); |
|
764 |
|
765 // Mark the page as being moved and get a pointer to the page array entry. |
|
766 RPageArray::TIter pageIter; |
|
767 TPhysAddr* const movingPageArrayPtr = aMemory->iPages.MovePageStart(index, pageIter); |
|
768 if (!movingPageArrayPtr) |
|
769 {// Can't move the page another operation is being performed on it. |
|
770 MmuLock::Unlock(); |
|
771 TheMmu.FreeRam(&newPage, 1, aMemory->iManager->PageType()); |
|
772 return r; |
|
773 } |
|
774 __NK_ASSERT_DEBUG(RPageArray::IsPresent(*movingPageArrayPtr)); |
|
775 TPhysAddr oldPageEntry = *movingPageArrayPtr; |
|
776 TPhysAddr oldPage = oldPageEntry & ~KPageMask; |
|
777 #ifdef _DEBUG |
|
778 if (oldPage != aOldPageInfo->PhysAddr()) |
|
779 {// The address of page array entry and the page info should match except |
|
780 // when the page is being shadowed. |
|
781 __NK_ASSERT_DEBUG(SPageInfo::FromPhysAddr(oldPage)->Type() == SPageInfo::EShadow); |
|
782 } |
|
783 #endif |
|
784 __NK_ASSERT_DEBUG((newPage & KPageMask) == 0); |
|
785 __NK_ASSERT_DEBUG(newPage != oldPage); |
|
786 |
|
787 // Set the modifier so we can detect if the page state is updated. |
|
788 aOldPageInfo->SetModifier(&pageIter); |
|
789 |
|
790 // Restrict the page ready for moving. |
|
791 // Read only memory objects don't need to be restricted but we still need |
|
792 // to discover any physically pinned mappings. |
|
793 TBool pageRestrictedNA = !aMemory->IsReadOnly(); |
|
794 TRestrictPagesType restrictType = pageRestrictedNA ? |
|
795 ERestrictPagesNoAccessForMoving : |
|
796 ERestrictPagesForMovingFlag; |
|
797 |
|
798 // This page's contents may be changed so restrict the page to no access |
|
799 // so we can detect any access to it while we are moving it. |
|
800 MmuLock::Unlock(); |
|
801 // This will clear the memory objects mapping added flag so we can detect any new mappings. |
|
802 aMemory->RestrictPages(pageIter, restrictType); |
|
803 |
|
804 const TUint KOldMappingSlot = 0; |
|
805 const TUint KNewMappingSlot = 1; |
|
806 const TAny* tmpPtrOld = NULL; |
|
807 TAny* tmpPtrNew; |
|
808 // Verify that page restricting wasn't interrupted, if it was then the page |
|
809 // can't be moved so remap it. |
|
810 // If the page array entry (*movingPageArrayPtr) has been modified then a pinning |
|
811 // veto'd the preparation. |
|
812 MmuLock::Lock(); |
|
813 if (aOldPageInfo->CheckModified(&pageIter) || oldPageEntry != *movingPageArrayPtr) |
|
814 {// Page is pinned or has been modified by another operation. |
|
815 MmuLock::Unlock(); |
|
816 TheMmu.FreeRam(&newPage, 1, aMemory->iManager->PageType()); |
|
817 goto remap; |
|
818 } |
|
819 |
|
820 MmuLock::Unlock(); |
|
821 // Copy the contents of the page using some temporary mappings. |
|
822 tmpPtrOld = (TAny*)TheMmu.MapTemp(oldPage, index, KOldMappingSlot); |
|
823 tmpPtrNew = (TAny*)TheMmu.MapTemp(newPage, index, KNewMappingSlot); |
|
824 pagecpy(tmpPtrNew, tmpPtrOld); |
|
825 |
|
826 // Unmap and perform cache maintenance if the memory object is executable. |
|
827 // Must do cache maintenance before we add any new mappings to the new page |
|
828 // to ensure that any old instruction cache entries for the new page aren't |
|
829 // picked up by any remapped executable mappings. |
|
830 if (aMemory->IsExecutable()) |
|
831 CacheMaintenance::CodeChanged((TLinAddr)tmpPtrNew, KPageSize); |
|
832 TheMmu.UnmapTemp(KNewMappingSlot); |
|
833 #ifndef _DEBUG |
|
834 TheMmu.UnmapTemp(KOldMappingSlot); |
|
835 #endif |
|
836 |
|
837 MmuLock::Lock(); |
|
838 if (!aOldPageInfo->CheckModified(&pageIter) && oldPageEntry == *movingPageArrayPtr && |
|
839 !aMemory->MappingAddedFlag()) |
|
840 { |
|
841 // The page has been copied without anyone modifying it so set the page |
|
842 // array entry to new physical address and map the page. |
|
843 RPageArray::PageMoveNewAddr(*movingPageArrayPtr, newPage); |
|
844 |
|
845 // Copy across the page info data from the old page to the new. |
|
846 SPageInfo& newPageInfo = *SPageInfo::FromPhysAddr(newPage); |
|
847 newPageInfo = *aOldPageInfo; |
|
848 if (aMemory->IsDemandPaged()) |
|
849 {// Let the pager deal with the live list links for this page if required. |
|
850 ThePager.ReplacePage(*aOldPageInfo, newPageInfo); |
|
851 } |
|
852 |
|
853 MmuLock::Unlock(); |
|
854 r = KErrNone; |
|
855 aNewPage = newPage; |
|
856 } |
|
857 else |
|
858 { |
|
859 MmuLock::Unlock(); |
|
860 TheMmu.FreeRam(&newPage, 1, aMemory->iManager->PageType()); |
|
861 } |
|
862 remap: |
|
863 // Remap all mappings to the new physical address if the move was successful or |
|
864 // back to the old page if the move failed. |
|
865 // Invalidate the TLB for the page if old mappings still exist or new |
|
866 // mappings were added but will be removed as the page can't be moved. |
|
867 TBool invalidateTLB = !pageRestrictedNA || r != KErrNone; |
|
868 aMemory->RemapPage(*movingPageArrayPtr, index, invalidateTLB); |
|
869 |
|
870 if (r == KErrNone) |
|
871 {// Must wait until here as read only memory objects' mappings aren't |
|
872 // all guaranteed to point to the new page until after RemapPage(). |
|
873 TheMmu.FreeRam(&oldPage, 1, aMemory->iManager->PageType()); |
|
874 #ifdef _DEBUG |
|
875 // For testing purposes clear the old page to help detect any |
|
876 // erroneous mappings to the old page. |
|
877 memclr((TAny*)tmpPtrOld, KPageSize); |
|
878 } |
|
879 TheMmu.UnmapTemp(KOldMappingSlot); // Will invalidate the TLB entry for the mapping. |
|
880 #else |
|
881 } |
|
882 #endif |
|
883 // indicate we've stopped moving memory now... |
|
884 MmuLock::Lock(); |
|
885 RPageArray::MovePageEnd(*movingPageArrayPtr); |
|
886 MmuLock::Unlock(); |
|
887 |
|
888 return r; |
|
889 } |
|
890 |
|
891 |
|
892 TInt DMovableMemoryManager::HandleFault(DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping, |
|
893 TUint aMapInstanceCount, TUint aAccessPermissions) |
|
894 { |
|
895 TInt r = KErrNotFound; |
|
896 SPageInfo* pageInfo; |
|
897 MmuLock::Lock(); |
|
898 __UNLOCK_GUARD_START(MmuLock); |
|
899 TPhysAddr* const pageEntry = aMemory->iPages.PageEntry(aIndex); |
|
900 if (!pageEntry || !RPageArray::IsPresent(*pageEntry) || |
|
901 aMapInstanceCount != aMapping->MapInstanceCount() || aMapping->BeingDetached()) |
|
902 {// The page isn't present or has been unmapped so invalid access. |
|
903 goto exit; |
|
904 } |
|
905 |
|
906 if (aMapping->MovingPageIn(*pageEntry, aIndex)) |
|
907 {// The page was has been paged in as it was still mapped. |
|
908 pageInfo = SPageInfo::FromPhysAddr(*pageEntry & ~KPageMask); |
|
909 pageInfo->SetModifier(0); // Signal to MovePage() that the page has been paged in. |
|
910 r = KErrNone; |
|
911 } |
|
912 |
|
913 exit: |
|
914 __UNLOCK_GUARD_END(MmuLock); |
|
915 MmuLock::Unlock(); |
|
916 return r; |
|
917 } |
|
918 |
|
919 |
|
920 TZonePageType DMovableMemoryManager::PageType() |
|
921 {// Movable memory object pages are movable. |
|
922 return EPageMovable; |
|
923 } |
|
924 |
|
925 |
|
926 /** |
|
927 Manager for memory objects containing normal unpaged RAM, which |
|
928 as well as being 'movable', like #DMovableMemoryManager, |
|
929 may also have regions marked as 'discardable'. Discardable pages may be |
|
930 reclaimed (removed) by the system at any time; this state is controlled using |
|
931 the functions #AllowDiscard and #DisallowDiscard. |
|
932 <P> |
|
933 This is used for the memory containing file system caches. Discardable memory |
|
934 is managed using similar |
|
935 */ |
|
936 class DDiscardableMemoryManager : public DMovableMemoryManager |
|
937 { |
|
938 public: |
|
939 // from DMemoryManager... |
|
940 virtual TInt AllowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
941 virtual TInt DisallowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
942 virtual TInt StealPage(DMemoryObject* aMemory, SPageInfo* aPageInfo); |
|
943 virtual TInt RestrictPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TRestrictPagesType aRestriction); |
|
944 virtual TZonePageType PageType(); |
|
945 public: |
|
946 /** |
|
947 The single instance of this manager class. |
|
948 */ |
|
949 static DDiscardableMemoryManager TheManager; |
|
950 }; |
|
951 |
|
952 |
|
953 DDiscardableMemoryManager DDiscardableMemoryManager::TheManager; |
|
954 DMemoryManager* TheDiscardableMemoryManager = &DDiscardableMemoryManager::TheManager; |
|
955 |
|
956 |
|
957 TInt DDiscardableMemoryManager::AllowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
958 { |
|
959 TRACE2(("DDiscardableMemoryManager::AllowDiscard(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount)); |
|
960 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
961 |
|
962 // make iterator for region... |
|
963 RPageArray::TIter pageIter; |
|
964 aMemory->iPages.FindStart(aIndex,aCount,pageIter); |
|
965 |
|
966 for(;;) |
|
967 { |
|
968 // find some pages... |
|
969 RPageArray::TIter pageList; |
|
970 TUint nFound = pageIter.Find(pageList); |
|
971 if(!nFound) |
|
972 break; |
|
973 |
|
974 // donate pages... |
|
975 TUint n; |
|
976 TPhysAddr* pages; |
|
977 while((n=pageList.Pages(pages,KMaxPagesInOneGo))!=0) |
|
978 { |
|
979 pageList.Skip(n); |
|
980 ThePager.DonatePages(n,pages); |
|
981 } |
|
982 |
|
983 // move on... |
|
984 pageIter.FindRelease(nFound); |
|
985 } |
|
986 |
|
987 // done... |
|
988 aMemory->iPages.FindEnd(aIndex,aCount); |
|
989 |
|
990 return KErrNone; |
|
991 } |
|
992 |
|
993 |
|
994 TInt DDiscardableMemoryManager::DisallowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
995 { |
|
996 TRACE2(("DDiscardableMemoryManager::DisallowDiscard(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount)); |
|
997 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
998 |
|
999 TInt r = KErrNone; |
|
1000 |
|
1001 // get pages... |
|
1002 RPageArray::TIter pageIter; |
|
1003 aMemory->iPages.FindStart(aIndex,aCount,pageIter); |
|
1004 |
|
1005 RPageArray::TIter pageList; |
|
1006 TUint numPages = pageIter.Find(pageList); |
|
1007 |
|
1008 if(numPages!=aCount) |
|
1009 { |
|
1010 // not all pages are present... |
|
1011 r = KErrNotFound; |
|
1012 } |
|
1013 else |
|
1014 { |
|
1015 TUint n; |
|
1016 TPhysAddr* pages; |
|
1017 while((n=pageList.Pages(pages,KMaxPagesInOneGo))!=0) |
|
1018 { |
|
1019 pageList.Skip(n); |
|
1020 r = ThePager.ReclaimPages(n,pages); |
|
1021 if(r!=KErrNone) |
|
1022 break; |
|
1023 } |
|
1024 } |
|
1025 |
|
1026 // done with pages... |
|
1027 if(numPages) |
|
1028 pageIter.FindRelease(numPages); |
|
1029 aMemory->iPages.FindEnd(aIndex,aCount); |
|
1030 |
|
1031 return r; |
|
1032 } |
|
1033 |
|
1034 |
|
1035 TInt DDiscardableMemoryManager::StealPage(DMemoryObject* aMemory, SPageInfo* aPageInfo) |
|
1036 { |
|
1037 TRACE2(("DDiscardableMemoryManager::StealPage(0x%08x,0x%08x)",aMemory,aPageInfo)); |
|
1038 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
1039 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1040 __UNLOCK_GUARD_START(MmuLock); |
|
1041 |
|
1042 TUint index = aPageInfo->Index(); |
|
1043 TInt r; |
|
1044 |
|
1045 RPageArray::TIter pageList; |
|
1046 TPhysAddr* p = aMemory->iPages.StealPageStart(index,pageList); |
|
1047 __NK_ASSERT_DEBUG((*p&~KPageMask)==aPageInfo->PhysAddr()); // object should have our page |
|
1048 |
|
1049 aPageInfo->SetModifier(&pageList); |
|
1050 |
|
1051 __UNLOCK_GUARD_END(MmuLock); |
|
1052 MmuLock::Unlock(); |
|
1053 |
|
1054 // unmap the page... |
|
1055 aMemory->UnmapPages(pageList,false); |
|
1056 |
|
1057 MmuLock::Lock(); |
|
1058 |
|
1059 __NK_ASSERT_DEBUG((*p&~KPageMask)==aPageInfo->PhysAddr()); // object should still have our page because freeing a page requires the RamAllocLock, which we hold |
|
1060 |
|
1061 if(aPageInfo->CheckModified(&pageList)) |
|
1062 { |
|
1063 // page state was changed, this can only happen if a page fault put this page |
|
1064 // back into the committed state or if the page was pinned. |
|
1065 // From either of these states it's possible to subsequently change |
|
1066 // to any other state or use (so we can't assert anything here). |
|
1067 r = KErrInUse; |
|
1068 } |
|
1069 else |
|
1070 { |
|
1071 // nobody else has modified page state, so we can... |
|
1072 TPhysAddr page = *p; |
|
1073 __NK_ASSERT_DEBUG(RPageArray::TargetStateIsDecommitted(page)); |
|
1074 if(page&RPageArray::EUnmapVetoed) |
|
1075 { |
|
1076 // operation was vetoed, which means page had a pinned mapping but the pin |
|
1077 // operation hadn't got around to removing the page from the live list, |
|
1078 // we need to restore correct state... |
|
1079 if(RPageArray::State(page)==RPageArray::EStealing) |
|
1080 *p = (page&~(RPageArray::EStateMask|RPageArray::EUnmapVetoed))|RPageArray::ECommitted; |
|
1081 // else |
|
1082 // leave page in state it was before we attempted to steal it |
|
1083 |
|
1084 // put page back on live list so it doesn't get lost. |
|
1085 // We put it at the start as if it were recently accessed because being pinned |
|
1086 // counts as an access and we can't put it anywhere else otherwise when |
|
1087 // page stealing retries it may get this same page again, potentially causing |
|
1088 // deadlock. |
|
1089 __NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged); // no one else has changed page since we removed it in DPager::StealPage |
|
1090 ThePager.PagedIn(aPageInfo); |
|
1091 |
|
1092 r = KErrInUse; |
|
1093 } |
|
1094 else |
|
1095 { |
|
1096 // page successfully unmapped... |
|
1097 aPageInfo->SetReadOnly(); // page not mapped, so must be read-only |
|
1098 |
|
1099 // if the page can be made clean... |
|
1100 r = aMemory->iManager->CleanPage(aMemory,aPageInfo,p); |
|
1101 |
|
1102 if(r==KErrNone) |
|
1103 { |
|
1104 // page successfully stolen... |
|
1105 __NK_ASSERT_DEBUG((*p^page)<(TUint)KPageSize); // sanity check, page should still be allocated to us |
|
1106 __NK_ASSERT_DEBUG(aPageInfo->IsDirty()==false); |
|
1107 __NK_ASSERT_DEBUG(aPageInfo->IsWritable()==false); |
|
1108 |
|
1109 TPhysAddr pagerInfo = aPageInfo->PagingManagerData(); |
|
1110 *p = pagerInfo; |
|
1111 __NK_ASSERT_ALWAYS((pagerInfo&(RPageArray::EFlagsMask|RPageArray::EStateMask)) == RPageArray::ENotPresent); |
|
1112 |
|
1113 TheMmu.PageFreed(aPageInfo); |
|
1114 } |
|
1115 else |
|
1116 { |
|
1117 // only legitimate reason for failing the clean is if the page state was changed |
|
1118 // by a page fault or by pinning, this should return KErrInUse... |
|
1119 __NK_ASSERT_DEBUG(r==KErrInUse); |
|
1120 } |
|
1121 } |
|
1122 } |
|
1123 |
|
1124 aMemory->iPages.StealPageEnd(index,r==KErrNone ? 1 : 0); |
|
1125 |
|
1126 #ifdef _DEBUG |
|
1127 if(r!=KErrNone) |
|
1128 TRACE2(("DDiscardableMemoryManager::StealPage fail because preempted")); |
|
1129 #endif |
|
1130 |
|
1131 TRACE2(("DDiscardableMemoryManager::StealPage returns %d",r)); |
|
1132 return r; |
|
1133 } |
|
1134 |
|
1135 |
|
1136 TInt DDiscardableMemoryManager::RestrictPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TRestrictPagesType aRestriction) |
|
1137 { |
|
1138 if(aRestriction==ERestrictPagesNoAccessForOldPage) |
|
1139 { |
|
1140 // Lie to pager when it sets an old page inaccessible as we don't want to rejunvanate |
|
1141 // the page if it is accessed as RChunk::Lock() should be used to remove the page from |
|
1142 // the live list before accessing the page. |
|
1143 return KErrNone; |
|
1144 } |
|
1145 return DMovableMemoryManager::RestrictPage(aMemory, aPageInfo, aRestriction); |
|
1146 } |
|
1147 |
|
1148 |
|
1149 TZonePageType DDiscardableMemoryManager::PageType() |
|
1150 {// Discardable memory objects page are movable unless they are donated to the pager. |
|
1151 return EPageMovable; |
|
1152 } |
|
1153 |
|
1154 |
|
1155 |
|
1156 /** |
|
1157 Manager for memory objects containing memory mapped hardware devices or special |
|
1158 purpose memory for which the physical addresses are fixed. |
|
1159 */ |
|
1160 class DHardwareMemoryManager : public DMemoryManager |
|
1161 { |
|
1162 public: |
|
1163 // from DMemoryManager... |
|
1164 virtual void Destruct(DMemoryObject* aMemory); |
|
1165 virtual TInt AddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages); |
|
1166 virtual TInt AddContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr aPhysAddr); |
|
1167 virtual TInt RemovePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages); |
|
1168 virtual TInt Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs); |
|
1169 virtual void Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs); |
|
1170 |
|
1171 private: |
|
1172 /** |
|
1173 Update the page information structure for RAM added with #AddPages and #AddContiguous. |
|
1174 |
|
1175 This performs debug checks to ensure that any physical memory which is added to more than |
|
1176 one memory object meets with the restriction imposed by the MMU and cache hardware. |
|
1177 It also verifies that the RAM pages are of type SPageInfo::EPhysAlloc, |
|
1178 i.e. were allocated with Epoc::AllocPhysicalRam or similar. |
|
1179 |
|
1180 This is only used when the physical addresses of the page being added to a memory |
|
1181 object corresponds to RAM being managed by the kernel, i.e. physical addresses |
|
1182 with an associated #SPageInfo structure. |
|
1183 |
|
1184 @param aMemory A memory object associated with this manager. |
|
1185 @param aIndex Page index, within the memory, for the page. |
|
1186 @param aPageInfo The page information structure of the RAM page. |
|
1187 |
|
1188 @pre #MmuLock held. |
|
1189 @post #MmuLock held. |
|
1190 */ |
|
1191 static void AssignPage(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo); |
|
1192 |
|
1193 /** |
|
1194 Update the page information structure for RAM removed with #RemovePages. |
|
1195 |
|
1196 This is only used when the physical addresses of the page being removed from a memory |
|
1197 object corresponds to RAM being managed by the kernel, i.e. physical addresses |
|
1198 with an associated #SPageInfo structure. |
|
1199 |
|
1200 @param aMemory A memory object associated with this manager. |
|
1201 @param aIndex Page index, within the memory, for the page. |
|
1202 @param aPageInfo The page information structure of the RAM page. |
|
1203 |
|
1204 @pre #MmuLock held. |
|
1205 @post #MmuLock held. |
|
1206 */ |
|
1207 static void UnassignPage(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo); |
|
1208 |
|
1209 public: |
|
1210 /** |
|
1211 The single instance of this manager class. |
|
1212 */ |
|
1213 static DHardwareMemoryManager TheManager; |
|
1214 }; |
|
1215 |
|
1216 |
|
1217 DHardwareMemoryManager DHardwareMemoryManager::TheManager; |
|
1218 DMemoryManager* TheHardwareMemoryManager = &DHardwareMemoryManager::TheManager; |
|
1219 |
|
1220 |
|
1221 void DHardwareMemoryManager::Destruct(DMemoryObject* aMemory) |
|
1222 { |
|
1223 MemoryObjectLock::Lock(aMemory); |
|
1224 RemovePages(aMemory,0,aMemory->iSizeInPages,0); |
|
1225 MemoryObjectLock::Unlock(aMemory); |
|
1226 aMemory->Close(); |
|
1227 } |
|
1228 |
|
1229 |
|
1230 TInt DHardwareMemoryManager::AddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages) |
|
1231 { |
|
1232 TRACE2(("DHardwareMemoryManager::AddPages(0x%08x,0x%x,0x%x,?)",aMemory, aIndex, aCount)); |
|
1233 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
1234 |
|
1235 // validate arguments... |
|
1236 TPhysAddr* pages = aPages; |
|
1237 TPhysAddr* pagesEnd = aPages+aCount; |
|
1238 TPhysAddr checkMask = 0; |
|
1239 do checkMask |= *pages++; |
|
1240 while(pages<pagesEnd); |
|
1241 if(checkMask&KPageMask) |
|
1242 return KErrArgument; |
|
1243 |
|
1244 // check and allocate page array entries... |
|
1245 RPageArray::TIter pageIter; |
|
1246 TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageIter); |
|
1247 if(r!=KErrNone) |
|
1248 return r; |
|
1249 |
|
1250 // assign pages... |
|
1251 pages = aPages; |
|
1252 TUint index = aIndex; |
|
1253 TUint flash = 0; |
|
1254 MmuLock::Lock(); |
|
1255 do |
|
1256 { |
|
1257 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); // flash twice as often because we're doing about twice the work as a simple page info update |
|
1258 TPhysAddr pagePhys = *pages++; |
|
1259 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys); |
|
1260 if(pi) |
|
1261 AssignPage(aMemory,index,pi); |
|
1262 ++index; |
|
1263 } |
|
1264 while(pages<pagesEnd); |
|
1265 MmuLock::Unlock(); |
|
1266 |
|
1267 // map the pages... |
|
1268 RPageArray::TIter pageList = pageIter; |
|
1269 pageIter.Add(aCount,aPages); |
|
1270 r = aMemory->MapPages(pageList); |
|
1271 |
|
1272 // release page array entries... |
|
1273 aMemory->iPages.AddEnd(aIndex,aCount); |
|
1274 |
|
1275 // revert if error... |
|
1276 if(r!=KErrNone) |
|
1277 RemovePages(aMemory,aIndex,aCount,0); |
|
1278 |
|
1279 return r; |
|
1280 } |
|
1281 |
|
1282 |
|
1283 TInt DHardwareMemoryManager::AddContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr aPhysAddr) |
|
1284 { |
|
1285 TRACE2(("DHardwareMemoryManager::AddContiguous(0x%08x,0x%x,0x%x,0x%08x)",aMemory, aIndex, aCount, aPhysAddr)); |
|
1286 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
1287 |
|
1288 // validate arguments... |
|
1289 if(aPhysAddr&KPageMask) |
|
1290 return KErrArgument; |
|
1291 |
|
1292 // check and allocate page array entries... |
|
1293 RPageArray::TIter pageIter; |
|
1294 TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageIter); |
|
1295 if(r!=KErrNone) |
|
1296 return r; |
|
1297 |
|
1298 RPageArray::TIter pageList = pageIter; |
|
1299 |
|
1300 // assign pages... |
|
1301 SPageInfo* piStart = SPageInfo::SafeFromPhysAddr(aPhysAddr); |
|
1302 SPageInfo* piEnd = piStart+aCount; |
|
1303 if(piStart) |
|
1304 { |
|
1305 SPageInfo* pi = piStart; |
|
1306 TUint index = aIndex; |
|
1307 TUint flash = 0; |
|
1308 MmuLock::Lock(); |
|
1309 while(pi<piEnd) |
|
1310 { |
|
1311 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); // flash twice as often because we're doing about twice the work as a simple page info update |
|
1312 AssignPage(aMemory,index,pi); |
|
1313 ++index; |
|
1314 ++pi; |
|
1315 } |
|
1316 MmuLock::Unlock(); |
|
1317 } |
|
1318 |
|
1319 // map the pages... |
|
1320 pageIter.AddContiguous(aCount,aPhysAddr); |
|
1321 r = aMemory->MapPages(pageList); |
|
1322 |
|
1323 // release page array entries... |
|
1324 aMemory->iPages.AddEnd(aIndex,aCount); |
|
1325 |
|
1326 // revert if error... |
|
1327 if(r!=KErrNone) |
|
1328 RemovePages(aMemory,aIndex,aCount,0); |
|
1329 |
|
1330 return r; |
|
1331 } |
|
1332 |
|
1333 |
|
1334 TInt DHardwareMemoryManager::RemovePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages) |
|
1335 { |
|
1336 TRACE2(("DHardwareMemoryManager::RemovePages(0x%08x,0x%x,0x%x,?)",aMemory, aIndex, aCount)); |
|
1337 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
1338 |
|
1339 RPageArray::TIter pageIter; |
|
1340 aMemory->iPages.FindStart(aIndex,aCount,pageIter); |
|
1341 |
|
1342 TUint numPages = 0; |
|
1343 for(;;) |
|
1344 { |
|
1345 // find some pages... |
|
1346 RPageArray::TIter pageList; |
|
1347 TUint n = pageIter.RemoveFind(pageList); |
|
1348 if(!n) |
|
1349 break; |
|
1350 |
|
1351 // unmap some pages... |
|
1352 aMemory->UnmapPages(pageList,true); |
|
1353 |
|
1354 // free pages... |
|
1355 TPhysAddr pagePhys; |
|
1356 while(pageList.Remove(1,&pagePhys)) |
|
1357 { |
|
1358 if(aPages) |
|
1359 *aPages++ = pagePhys; |
|
1360 ++numPages; |
|
1361 |
|
1362 __NK_ASSERT_DEBUG((pagePhys&KPageMask)==0); |
|
1363 |
|
1364 TUint index = pageList.Index()-1; |
|
1365 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys); |
|
1366 if(!pi) |
|
1367 TheMmu.CleanAndInvalidatePages(&pagePhys,1,aMemory->Attributes(),index); |
|
1368 else |
|
1369 { |
|
1370 MmuLock::Lock(); |
|
1371 UnassignPage(aMemory,index,pi); |
|
1372 MmuLock::Unlock(); |
|
1373 } |
|
1374 } |
|
1375 |
|
1376 // move on... |
|
1377 pageIter.FindRelease(n); |
|
1378 } |
|
1379 |
|
1380 aMemory->iPages.FindEnd(aIndex,aCount); |
|
1381 |
|
1382 return numPages; |
|
1383 } |
|
1384 |
|
1385 |
|
1386 void DHardwareMemoryManager::AssignPage(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo) |
|
1387 { |
|
1388 TRACE2(("DHardwareMemoryManager::AssignPage(0x%08x,0x%x,phys=0x%08x)",aMemory, aIndex, aPageInfo->PhysAddr())); |
|
1389 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1390 __NK_ASSERT_DEBUG(aPageInfo->Type()==SPageInfo::EPhysAlloc); |
|
1391 TUint flags = aMemory->PageInfoFlags(); |
|
1392 if(aPageInfo->UseCount()==0) |
|
1393 { |
|
1394 // not mapped yet... |
|
1395 aPageInfo->SetMapped(aIndex,flags); |
|
1396 } |
|
1397 else |
|
1398 { |
|
1399 // already mapped somewhere... |
|
1400 TMemoryType type = (TMemoryType)(flags&KMemoryTypeMask); |
|
1401 if(CacheMaintenance::IsCached(type)) |
|
1402 { |
|
1403 // memory is cached at L1, check colour matches existing mapping... |
|
1404 if( (aPageInfo->Index()^aIndex) & KPageColourMask ) |
|
1405 { |
|
1406 #ifdef _DEBUG |
|
1407 Kern::Printf("DHardwareMemoryManager::AssignPage BAD COLOUR"); |
|
1408 aPageInfo->Dump(); |
|
1409 #endif |
|
1410 __NK_ASSERT_ALWAYS(0); |
|
1411 } |
|
1412 } |
|
1413 // check memory type matches existing mapping... |
|
1414 if( (aPageInfo->Flags()^flags) & EMemoryAttributeMask ) |
|
1415 { |
|
1416 #ifdef _DEBUG |
|
1417 Kern::Printf("DHardwareMemoryManager::AssignPage BAD MEMORY TYPE"); |
|
1418 aPageInfo->Dump(); |
|
1419 #endif |
|
1420 __NK_ASSERT_ALWAYS(0); |
|
1421 } |
|
1422 } |
|
1423 aPageInfo->IncUseCount(); |
|
1424 TRACE2(("DHardwareMemoryManager::AssignPage iUseCount=%d",aPageInfo->UseCount())); |
|
1425 } |
|
1426 |
|
1427 |
|
1428 void DHardwareMemoryManager::UnassignPage(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo) |
|
1429 { |
|
1430 TRACE2(("DHardwareMemoryManager::UnassignPage(0x%08x,0x%x,phys=0x%08x)",aMemory, aIndex, aPageInfo->PhysAddr())); |
|
1431 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1432 TRACE2(("DHardwareMemoryManager::UnassignPage iUseCount=%d",aPageInfo->UseCount())); |
|
1433 __NK_ASSERT_DEBUG(aPageInfo->UseCount()); |
|
1434 if(!aPageInfo->DecUseCount()) |
|
1435 { |
|
1436 // page no longer being used by any memory object, make sure it's contents |
|
1437 // are purged from the cache... |
|
1438 TPhysAddr pagePhys = aPageInfo->PhysAddr(); |
|
1439 aPageInfo->SetModifier(&pagePhys); |
|
1440 MmuLock::Unlock(); |
|
1441 TheMmu.CleanAndInvalidatePages(&pagePhys,1,aMemory->Attributes(),aIndex); |
|
1442 MmuLock::Lock(); |
|
1443 if(!aPageInfo->CheckModified(&pagePhys)) // if page has not been reused... |
|
1444 aPageInfo->SetUncached(); // we know the memory is not in the cache |
|
1445 } |
|
1446 } |
|
1447 |
|
1448 |
|
1449 TInt DHardwareMemoryManager::Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs) |
|
1450 { |
|
1451 return ((DUnpagedMemoryManager*)this)->DUnpagedMemoryManager::Pin(aMemory,aMapping,aPinArgs); |
|
1452 } |
|
1453 |
|
1454 |
|
1455 void DHardwareMemoryManager::Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs) |
|
1456 { |
|
1457 ((DUnpagedMemoryManager*)this)->DUnpagedMemoryManager::Unpin(aMemory,aMapping,aPinArgs); |
|
1458 } |
|
1459 |
|
1460 |
|
1461 |
|
1462 // |
|
1463 // DPagedMemoryManager |
|
1464 // |
|
1465 |
|
1466 TInt DPagedMemoryManager::New(DMemoryObject*& aMemory, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags) |
|
1467 { |
|
1468 return DMemoryManager::New(aMemory, aSizeInPages, aAttributes, (TMemoryCreateFlags)(aCreateFlags | EMemoryCreateDemandPaged)); |
|
1469 } |
|
1470 |
|
1471 |
|
1472 void DPagedMemoryManager::Destruct(DMemoryObject* aMemory) |
|
1473 { |
|
1474 ((DUnpagedMemoryManager*)this)->DUnpagedMemoryManager::Destruct(aMemory); |
|
1475 } |
|
1476 |
|
1477 |
|
1478 TInt DPagedMemoryManager::StealPage(DMemoryObject* aMemory, SPageInfo* aPageInfo) |
|
1479 { |
|
1480 return ((DDiscardableMemoryManager*)this)->DDiscardableMemoryManager::StealPage(aMemory,aPageInfo); |
|
1481 } |
|
1482 |
|
1483 |
|
1484 TInt DPagedMemoryManager::MovePage( DMemoryObject* aMemory, SPageInfo* aOldPageInfo, |
|
1485 TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest) |
|
1486 { |
|
1487 return TheMovableMemoryManager->MovePage(aMemory, aOldPageInfo, aNewPage, aBlockZoneId, aBlockRest); |
|
1488 } |
|
1489 |
|
1490 |
|
1491 TInt DPagedMemoryManager::RestrictPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TRestrictPagesType aRestriction) |
|
1492 { |
|
1493 TRACE2(("DPagedMemoryManager::RestrictPage(0x%08x,0x%08x,%d)",aMemory,aPageInfo,aRestriction)); |
|
1494 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1495 |
|
1496 TUint index = aPageInfo->Index(); |
|
1497 TInt r = KErrNotFound; |
|
1498 |
|
1499 TPhysAddr page; |
|
1500 TPhysAddr originalPage; |
|
1501 RPageArray::TIter pageList; |
|
1502 TPhysAddr* p = aMemory->iPages.RestrictPageNAStart(index,pageList); |
|
1503 if(!p) |
|
1504 goto fail; |
|
1505 originalPage = *p; |
|
1506 __NK_ASSERT_DEBUG((originalPage&~KPageMask)==aPageInfo->PhysAddr()); |
|
1507 |
|
1508 aPageInfo->SetModifier(&pageList); |
|
1509 |
|
1510 MmuLock::Unlock(); |
|
1511 |
|
1512 // restrict page... |
|
1513 aMemory->RestrictPages(pageList,aRestriction); |
|
1514 |
|
1515 MmuLock::Lock(); |
|
1516 |
|
1517 page = *p; |
|
1518 if(aPageInfo->CheckModified(&pageList) || page!=originalPage/*page state changed*/) |
|
1519 { |
|
1520 // page state was changed by someone else... |
|
1521 r = KErrInUse; |
|
1522 } |
|
1523 else |
|
1524 { |
|
1525 // nobody else has modified page state, so restrictions successfully applied... |
|
1526 *p = (page&~RPageArray::EStateMask)|RPageArray::ECommitted; // restore state |
|
1527 aPageInfo->SetReadOnly(); |
|
1528 r = KErrNone; |
|
1529 } |
|
1530 |
|
1531 aMemory->iPages.RestrictPageNAEnd(index); |
|
1532 |
|
1533 #ifdef _DEBUG |
|
1534 if(r!=KErrNone) |
|
1535 TRACE2(("DPagedMemoryManager::RestrictPage fail because preempted or vetoed")); |
|
1536 #endif |
|
1537 |
|
1538 fail: |
|
1539 TRACE2(("DPagedMemoryManager::RestrictPage returns %d",r)); |
|
1540 return r; |
|
1541 } |
|
1542 |
|
1543 |
|
1544 TInt DPagedMemoryManager::HandleFault( DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping, |
|
1545 TUint aMapInstanceCount, TUint aAccessPermissions) |
|
1546 { |
|
1547 TPinArgs pinArgs; |
|
1548 pinArgs.iReadOnly = !(aAccessPermissions&EReadWrite); |
|
1549 |
|
1550 TUint usedNew = 0; |
|
1551 |
|
1552 RPageArray::TIter pageList; |
|
1553 TPhysAddr* p = aMemory->iPages.AddPageStart(aIndex,pageList); |
|
1554 __NK_ASSERT_ALWAYS(p); // we should never run out of memory handling a paging fault |
|
1555 |
|
1556 TInt r = 1; // positive value to indicate nothing done |
|
1557 |
|
1558 // if memory object already has page, then we can use it... |
|
1559 MmuLock::Lock(); |
|
1560 if(RPageArray::IsPresent(*p)) |
|
1561 { |
|
1562 r = PageInDone(aMemory,aIndex,0,p); |
|
1563 __NK_ASSERT_DEBUG(r<=0); // can't return >0 as we didn't supply a new page |
|
1564 } |
|
1565 MmuLock::Unlock(); |
|
1566 |
|
1567 if(r>0) |
|
1568 { |
|
1569 // need to read page from backing store... |
|
1570 |
|
1571 // get paging request object... |
|
1572 DPageReadRequest* req; |
|
1573 do |
|
1574 { |
|
1575 r = AcquirePageReadRequest(req,aMemory,aIndex,1); |
|
1576 __NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocated memory, therefore can't fail with KErrNoMemory |
|
1577 if(r==KErrNone) |
|
1578 { |
|
1579 // if someone else has since read our page, then we can use it... |
|
1580 MmuLock::Lock(); |
|
1581 r = 1; |
|
1582 if(RPageArray::IsPresent(*p)) |
|
1583 { |
|
1584 r = PageInDone(aMemory,aIndex,0,p); |
|
1585 __NK_ASSERT_DEBUG(r<=0); // can't return >0 as we didn't supply a new page |
|
1586 } |
|
1587 MmuLock::Unlock(); |
|
1588 } |
|
1589 } |
|
1590 while(r>0 && !req); // while not paged in && don't have a request object |
|
1591 |
|
1592 if(r>0) |
|
1593 { |
|
1594 // still need to read page from backing store... |
|
1595 |
|
1596 // get RAM page... |
|
1597 TPhysAddr pagePhys; |
|
1598 r = ThePager.PageInAllocPages(&pagePhys,1,aMemory->RamAllocFlags()); |
|
1599 __NK_ASSERT_DEBUG(r!=KErrNoMemory); |
|
1600 if(r==KErrNone) |
|
1601 { |
|
1602 // read data for page... |
|
1603 r = ReadPages(aMemory,aIndex,1,&pagePhys,req); |
|
1604 __NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocated memory, therefore can't fail with KErrNoMemory |
|
1605 if(r!=KErrNone) |
|
1606 { |
|
1607 // error, so free unused pages... |
|
1608 ThePager.PageInFreePages(&pagePhys,1); |
|
1609 } |
|
1610 else |
|
1611 { |
|
1612 // use new page... |
|
1613 MmuLock::Lock(); |
|
1614 r = PageInDone(aMemory,aIndex,SPageInfo::FromPhysAddr(pagePhys),p); |
|
1615 MmuLock::Unlock(); |
|
1616 if(r>0) |
|
1617 { |
|
1618 // new page actually used... |
|
1619 r = KErrNone; |
|
1620 usedNew = 1; |
|
1621 } |
|
1622 } |
|
1623 } |
|
1624 } |
|
1625 |
|
1626 // done with paging request object... |
|
1627 if(req) |
|
1628 req->Release(); |
|
1629 } |
|
1630 |
|
1631 // map page... |
|
1632 if(r==KErrNone && aMapping) |
|
1633 { |
|
1634 r = aMapping->PageIn(pageList, pinArgs, aMapInstanceCount); |
|
1635 __NK_ASSERT_ALWAYS(r!=KErrNoMemory); // we should never run out of memory handling a paging fault |
|
1636 #ifdef COARSE_GRAINED_TLB_MAINTENANCE |
|
1637 InvalidateTLB(); |
|
1638 #endif |
|
1639 } |
|
1640 |
|
1641 // finished with this page... |
|
1642 aMemory->iPages.AddPageEnd(aIndex,usedNew); |
|
1643 |
|
1644 __NK_ASSERT_ALWAYS(r!=KErrNoMemory); // we should never run out of memory handling a paging fault |
|
1645 return r; |
|
1646 } |
|
1647 |
|
1648 |
|
1649 TInt DPagedMemoryManager::Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs) |
|
1650 { |
|
1651 __ASSERT_CRITICAL; |
|
1652 return DoPin(aMemory,aMapping->iStartIndex,aMapping->iSizeInPages,aMapping,aPinArgs); |
|
1653 } |
|
1654 |
|
1655 |
|
1656 TInt DPagedMemoryManager::DoPin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs) |
|
1657 { |
|
1658 TRACE(("DPagedMemoryManager::DoPin(0x%08x,0x%08x,0x%08x,0x%08x)",aMemory, aIndex, aCount, aMapping)); |
|
1659 __ASSERT_CRITICAL; |
|
1660 __NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(aCount)); |
|
1661 |
|
1662 // check and allocate page array entries... |
|
1663 RPageArray::TIter pageList; |
|
1664 TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageList,true); |
|
1665 if(r!=KErrNone) |
|
1666 return r; |
|
1667 |
|
1668 RPageArray::TIter pageIter = pageList; |
|
1669 TUint n; |
|
1670 TPhysAddr* pages; |
|
1671 while((n=pageIter.Pages(pages,DPageReadRequest::EMaxPages))!=0) |
|
1672 { |
|
1673 MmuLock::Lock(); |
|
1674 |
|
1675 if(RPageArray::IsPresent(*pages)) |
|
1676 { |
|
1677 // pin page which is already committed to memory object... |
|
1678 r = PageInPinnedDone(aMemory,pageIter.Index(),0,pages,aPinArgs); |
|
1679 __NK_ASSERT_DEBUG(r<=0); // can't return >0 as we didn't supply a new page |
|
1680 } |
|
1681 else |
|
1682 { |
|
1683 // count consecutive pages which need to be read... |
|
1684 TUint i; |
|
1685 for(i=1; i<n; ++i) |
|
1686 if(RPageArray::IsPresent(pages[i])) |
|
1687 break; |
|
1688 n = i; |
|
1689 r = 1; // positive value to indicate nothing done |
|
1690 } |
|
1691 |
|
1692 MmuLock::Unlock(); |
|
1693 |
|
1694 if(r==KErrNone) |
|
1695 { |
|
1696 // successfully pinned one page, so move on to next one... |
|
1697 pageIter.Skip(1); |
|
1698 continue; |
|
1699 } |
|
1700 else if(r<0) |
|
1701 { |
|
1702 // error, so end... |
|
1703 break; |
|
1704 } |
|
1705 |
|
1706 // need to read pages from backing store... |
|
1707 |
|
1708 // get paging request object... |
|
1709 DPageReadRequest* req; |
|
1710 TUint i; |
|
1711 do |
|
1712 { |
|
1713 i = 0; |
|
1714 r = AcquirePageReadRequest(req,aMemory,pageIter.Index(),n); |
|
1715 if(r==KErrNone) |
|
1716 { |
|
1717 // see if someone else has since read any of our pages... |
|
1718 MmuLock::Lock(); |
|
1719 for(; i<n; ++i) |
|
1720 if(RPageArray::IsPresent(pages[i])) |
|
1721 break; |
|
1722 MmuLock::Unlock(); |
|
1723 } |
|
1724 } |
|
1725 while(i==n && !req); // while still need all pages && don't have a request object |
|
1726 |
|
1727 // if don't need all pages any more... |
|
1728 if(i!=n) |
|
1729 { |
|
1730 // retry loop... |
|
1731 if(req) |
|
1732 req->Release(); |
|
1733 continue; |
|
1734 } |
|
1735 |
|
1736 // keep count of number of pages actually added to memory object... |
|
1737 TUint usedNew = 0; |
|
1738 |
|
1739 // get RAM pages... |
|
1740 TPhysAddr newPages[DPageReadRequest::EMaxPages]; |
|
1741 __NK_ASSERT_DEBUG(n<=DPageReadRequest::EMaxPages); |
|
1742 r = ThePager.PageInAllocPages(newPages,n,aMemory->RamAllocFlags()); |
|
1743 if(r==KErrNone) |
|
1744 { |
|
1745 // read data for pages... |
|
1746 r = ReadPages(aMemory,pageIter.Index(),n,newPages,req); |
|
1747 if(r!=KErrNone) |
|
1748 { |
|
1749 // error, so free unused pages... |
|
1750 ThePager.PageInFreePages(newPages,n); |
|
1751 } |
|
1752 else |
|
1753 { |
|
1754 // use new pages... |
|
1755 for(i=0; i<n; ++i) |
|
1756 { |
|
1757 MmuLock::Lock(); |
|
1758 r = PageInPinnedDone(aMemory, |
|
1759 pageIter.Index()+i, |
|
1760 SPageInfo::FromPhysAddr(newPages[i]), |
|
1761 pages+i, |
|
1762 aPinArgs |
|
1763 ); |
|
1764 MmuLock::Unlock(); |
|
1765 if(r>0) |
|
1766 { |
|
1767 // new page actually used... |
|
1768 r = KErrNone; |
|
1769 ++usedNew; |
|
1770 } |
|
1771 if(r!=KErrNone) |
|
1772 { |
|
1773 // error, so free remaining unused pages... |
|
1774 ThePager.PageInFreePages(newPages+(i+1),n-(i+1)); |
|
1775 // and update array for any pages already added... |
|
1776 if(i) |
|
1777 pageIter.Added(i,usedNew); |
|
1778 break; |
|
1779 } |
|
1780 } |
|
1781 } |
|
1782 } |
|
1783 |
|
1784 // done with paging request object... |
|
1785 if(req) |
|
1786 req->Release(); |
|
1787 |
|
1788 if(r!=KErrNone) |
|
1789 break; // error, so give up |
|
1790 |
|
1791 // move on to next set of pages... |
|
1792 pageIter.Added(n,usedNew); |
|
1793 } |
|
1794 |
|
1795 // map pages... |
|
1796 if(r==KErrNone) |
|
1797 {// Page in the page with the pinning mapping, OK to get the instance count here |
|
1798 // without any locking as the pinned mapping can't be reused for another purpose |
|
1799 // during this method. |
|
1800 r = aMapping->PageIn(pageList, aPinArgs, aMapping->MapInstanceCount()); |
|
1801 #ifdef COARSE_GRAINED_TLB_MAINTENANCE |
|
1802 InvalidateTLB(); |
|
1803 #endif |
|
1804 } |
|
1805 |
|
1806 // release page array entries... |
|
1807 aMemory->iPages.AddEnd(aIndex,aCount); |
|
1808 |
|
1809 if(r==KErrNone) |
|
1810 { |
|
1811 // set EPagesPinned flag to indicate success... |
|
1812 __NK_ASSERT_DEBUG((aMapping->Flags()&DMemoryMapping::EPagesPinned)==0); |
|
1813 __e32_atomic_ior_ord8(&aMapping->Flags(), (TUint8)DMemoryMapping::EPagesPinned); |
|
1814 } |
|
1815 else |
|
1816 { |
|
1817 // cleanup on error... |
|
1818 TUint pinnedCount = pageIter.Index()-aIndex; // number of pages actually pinned |
|
1819 DoUnpin(aMemory,aIndex,pinnedCount,aMapping,aPinArgs); |
|
1820 } |
|
1821 |
|
1822 return r; |
|
1823 } |
|
1824 |
|
1825 |
|
1826 void DPagedMemoryManager::Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs) |
|
1827 { |
|
1828 __ASSERT_CRITICAL; |
|
1829 // if mapping successfully pinned... |
|
1830 if(aMapping->Flags()&DMemoryMapping::EPagesPinned) |
|
1831 { |
|
1832 // then undo pinning... |
|
1833 DoUnpin(aMemory,aMapping->iStartIndex,aMapping->iSizeInPages,aMapping,aPinArgs); |
|
1834 } |
|
1835 } |
|
1836 |
|
1837 |
|
1838 void DPagedMemoryManager::DoUnpin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs) |
|
1839 { |
|
1840 TRACE(("DPagedMemoryManager::DoUnpin(0x%08x,0x%08x,0x%08x,0x%08x,?)",aMemory, aIndex, aCount, aMapping)); |
|
1841 __ASSERT_CRITICAL; |
|
1842 |
|
1843 MmuLock::Lock(); |
|
1844 TUint endIndex = aIndex+aCount; |
|
1845 for(TUint i=aIndex; i<endIndex; ++i) |
|
1846 { |
|
1847 TPhysAddr page = aMemory->iPages.Page(i); |
|
1848 __NK_ASSERT_DEBUG(RPageArray::IsPresent(page)); |
|
1849 __NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(page&~KPageMask)); |
|
1850 ThePager.Unpin(SPageInfo::FromPhysAddr(page),aPinArgs); |
|
1851 MmuLock::Flash(); |
|
1852 } |
|
1853 MmuLock::Unlock(); |
|
1854 |
|
1855 // clear EPagesPinned flag... |
|
1856 __e32_atomic_and_ord8(&aMapping->Flags(), TUint8(~DMemoryMapping::EPagesPinned)); |
|
1857 } |
|
1858 |
|
1859 |
|
1860 void DPagedMemoryManager::DoCleanupDecommitted(DMemoryObject* aMemory) |
|
1861 { |
|
1862 MemoryObjectLock::Lock(aMemory); |
|
1863 FreeDecommitted(aMemory,0,aMemory->iSizeInPages); |
|
1864 MemoryObjectLock::Unlock(aMemory); |
|
1865 } |
|
1866 |
|
1867 |
|
1868 TInt DPagedMemoryManager::PageInDone(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo, TPhysAddr* aPageArrayEntry) |
|
1869 { |
|
1870 TInt r = DoPageInDone(aMemory,aIndex,aPageInfo,aPageArrayEntry,false); |
|
1871 |
|
1872 if(r>=0) |
|
1873 ThePager.PagedIn(aPageInfo); |
|
1874 |
|
1875 // check page assigned correctly... |
|
1876 #ifdef _DEBUG |
|
1877 if(RPageArray::IsPresent(*aPageArrayEntry)) |
|
1878 { |
|
1879 SPageInfo* pi = SPageInfo::FromPhysAddr(*aPageArrayEntry); |
|
1880 __NK_ASSERT_DEBUG(pi->Owner()==aMemory); |
|
1881 __NK_ASSERT_DEBUG(pi->Index()==aIndex); |
|
1882 } |
|
1883 #endif |
|
1884 |
|
1885 return r; |
|
1886 } |
|
1887 |
|
1888 |
|
1889 TInt DPagedMemoryManager::PageInPinnedDone(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo, TPhysAddr* aPageArrayEntry, TPinArgs& aPinArgs) |
|
1890 { |
|
1891 TInt r = DoPageInDone(aMemory,aIndex,aPageInfo,aPageArrayEntry,true); |
|
1892 |
|
1893 if(r>=0) |
|
1894 ThePager.PagedInPinned(aPageInfo,aPinArgs); |
|
1895 |
|
1896 // check page assigned correctly... |
|
1897 #ifdef _DEBUG |
|
1898 if(RPageArray::IsPresent(*aPageArrayEntry)) |
|
1899 { |
|
1900 SPageInfo* pi = SPageInfo::FromPhysAddr(*aPageArrayEntry); |
|
1901 __NK_ASSERT_DEBUG(pi->Owner()==aMemory); |
|
1902 __NK_ASSERT_DEBUG(pi->Index()==aIndex); |
|
1903 if(r>=0) |
|
1904 __NK_ASSERT_DEBUG(pi->PagedState()==SPageInfo::EPagedPinned); |
|
1905 } |
|
1906 #endif |
|
1907 |
|
1908 return r; |
|
1909 } |
|
1910 |
|
1911 |
|
1912 TInt DPagedMemoryManager::DoPageInDone(DMemoryObject* aMemory, TUint aIndex, SPageInfo*& aPageInfo, TPhysAddr* aPageArrayEntry, TBool aPinning) |
|
1913 { |
|
1914 TRACE(("DPagedMemoryManager::DoPageInDone(0x%08x,0x%08x,0x%08x,?,%d)",aMemory,aIndex,aPageInfo,aPinning)); |
|
1915 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1916 |
|
1917 __UNLOCK_GUARD_START(MmuLock); |
|
1918 |
|
1919 SPageInfo* pi = aPageInfo; |
|
1920 |
|
1921 if(!IsAllocated(aMemory,aIndex,1)) |
|
1922 { |
|
1923 // memory has been decommitted from memory object... |
|
1924 if(pi) |
|
1925 ThePager.PagedInUnneeded(pi); |
|
1926 __UNLOCK_GUARD_END(MmuLock); |
|
1927 aPageInfo = 0; |
|
1928 return KErrNotFound; |
|
1929 } |
|
1930 |
|
1931 TPhysAddr oldPage = *aPageArrayEntry; |
|
1932 TBool useNew = (bool)!RPageArray::IsPresent(oldPage); |
|
1933 if(useNew) |
|
1934 { |
|
1935 if(!pi) |
|
1936 { |
|
1937 __UNLOCK_GUARD_END(MmuLock); |
|
1938 // aPageInfo = 0; // this is already set to zero |
|
1939 return KErrNotFound; // no new page to use |
|
1940 } |
|
1941 |
|
1942 // assign page to memory object... |
|
1943 pi->SetManaged(aMemory,aIndex,aMemory->PageInfoFlags()); |
|
1944 |
|
1945 ThePager.Event(DPager::EEventPageInNew,pi); |
|
1946 |
|
1947 // save any paging manager data stored in page array before we overwrite it... |
|
1948 pi->SetPagingManagerData(*aPageArrayEntry); |
|
1949 } |
|
1950 else |
|
1951 { |
|
1952 __NK_ASSERT_DEBUG(!pi); // should only have read new page if none present |
|
1953 |
|
1954 // discard new page... |
|
1955 if(pi) |
|
1956 ThePager.PagedInUnneeded(pi); |
|
1957 |
|
1958 // check existing page can be committed... |
|
1959 if(RPageArray::State(oldPage)<=RPageArray::EDecommitting) |
|
1960 { |
|
1961 __UNLOCK_GUARD_END(MmuLock); |
|
1962 aPageInfo = 0; |
|
1963 return KErrNotFound; |
|
1964 } |
|
1965 |
|
1966 // and use one we already have... |
|
1967 SPageInfo* newPage = SPageInfo::FromPhysAddr(oldPage); |
|
1968 |
|
1969 if(!pi && !aPinning) |
|
1970 ThePager.Event(DPager::EEventPageInAgain,newPage); |
|
1971 |
|
1972 pi = newPage; |
|
1973 pi->SetModifier(0); // so observers see page state has changed |
|
1974 } |
|
1975 |
|
1976 // set page array entry... |
|
1977 TPhysAddr pagePhys = pi->PhysAddr(); |
|
1978 *aPageArrayEntry = pagePhys|RPageArray::ECommitted; |
|
1979 |
|
1980 // return the page we actually used... |
|
1981 aPageInfo = pi; |
|
1982 |
|
1983 __UNLOCK_GUARD_END(MmuLock); |
|
1984 return useNew; |
|
1985 } |
|
1986 |
|
1987 |
|
1988 TInt DPagedMemoryManager::Decompress(TUint32 aCompressionType, TLinAddr aDst, TUint aDstBytes, TLinAddr aSrc, TUint aSrcBytes) |
|
1989 { |
|
1990 #ifdef BTRACE_PAGING_VERBOSE |
|
1991 BTraceContext4(BTrace::EPaging, BTrace::EPagingDecompressStart, aCompressionType); |
|
1992 #endif |
|
1993 TInt r; |
|
1994 switch(aCompressionType) |
|
1995 { |
|
1996 case 0: |
|
1997 __NK_ASSERT_DEBUG(aSrcBytes == aDstBytes); |
|
1998 memcpy((void*)aDst, (void*)aSrc, aSrcBytes); |
|
1999 r = aSrcBytes; |
|
2000 break; |
|
2001 |
|
2002 case SRomPageInfo::EBytePair: |
|
2003 case KUidCompressionBytePair: |
|
2004 { |
|
2005 TUint8* srcNext = 0; |
|
2006 START_PAGING_BENCHMARK; |
|
2007 r = BytePairDecompress((TUint8*)aDst, aDstBytes, (TUint8*)aSrc, aSrcBytes, srcNext); |
|
2008 END_PAGING_BENCHMARK(EPagingBmDecompress); |
|
2009 if (r > 0) |
|
2010 { |
|
2011 // decompression successful so check srcNext points to the end of the compressed data... |
|
2012 __NK_ASSERT_ALWAYS((TLinAddr)srcNext == aSrc + aSrcBytes); |
|
2013 } |
|
2014 } |
|
2015 break; |
|
2016 |
|
2017 default: |
|
2018 r = KErrNotSupported; |
|
2019 break; |
|
2020 } |
|
2021 #ifdef BTRACE_PAGING_VERBOSE |
|
2022 BTraceContext0(BTrace::EPaging, BTrace::EPagingDecompressEnd); |
|
2023 #endif |
|
2024 return r; |
|
2025 } |
|
2026 |
|
2027 |
|
2028 TInt DPagedMemoryManager::AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2029 { |
|
2030 __NK_ASSERT_ALWAYS(0); |
|
2031 return KErrNotSupported; |
|
2032 } |
|
2033 |
|
2034 |
|
2035 TInt DPagedMemoryManager::WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest) |
|
2036 { |
|
2037 __NK_ASSERT_ALWAYS(0); |
|
2038 return KErrNotSupported; |
|
2039 } |
|
2040 |
|
2041 TZonePageType DPagedMemoryManager::PageType() |
|
2042 {// Paged manager's pages should be discardable and will actaully be freed by |
|
2043 // the pager so this value won't be used. |
|
2044 return EPageDiscard; |
|
2045 } |
|
2046 |