diff -r 73ea206103e6 -r 43365a9b78a3 kernel/eka/memmodel/epoc/flexible/mmu/mmanager.cpp --- a/kernel/eka/memmodel/epoc/flexible/mmu/mmanager.cpp Wed Jun 23 19:44:53 2010 +0300 +++ b/kernel/eka/memmodel/epoc/flexible/mmu/mmanager.cpp Tue Jul 06 15:50:07 2010 +0300 @@ -131,12 +131,14 @@ TInt DMemoryManager::MovePage( DMemoryObject* aMemory, SPageInfo* aOldPageInfo, TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest) { + MmuLock::Unlock(); return KErrNotSupported; } TInt DMemoryManager::MoveAndAllocPage(DMemoryObject*, SPageInfo*, TZonePageType) { + __NK_ASSERT_DEBUG(0); // This should only be invoked on managers that can move or discard pages. return KErrNotSupported; } @@ -748,20 +750,8 @@ TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest) { __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); - - // Allocate the new page to move to, ensuring that we use the page type of the - // manager assigned to this page. - TPhysAddr newPage; - Mmu& m = TheMmu; - TInt r = m.AllocRam(&newPage, 1, aMemory->RamAllocFlags(), aMemory->iManager->PageType(), - aBlockZoneId, aBlockRest); - if (r != KErrNone) - {// Failed to allocate a new page to move the page to so can't continue. - return r; - } - - r = KErrInUse; - MmuLock::Lock(); + __NK_ASSERT_DEBUG(MmuLock::IsHeld()); + TInt r = KErrInUse; TUint index = aOldPageInfo->Index(); TRACE( ("DMovableMemoryManager::MovePage(0x%08x,0x%08x,?,0x%08x,%d) index=0x%x", @@ -774,7 +764,6 @@ if (!movingPageArrayPtr) {// Can't move the page another operation is being performed on it. MmuLock::Unlock(); - TheMmu.FreeRam(&newPage, 1, aMemory->iManager->PageType()); return r; } __NK_ASSERT_DEBUG(RPageArray::IsPresent(*movingPageArrayPtr)); @@ -787,13 +776,41 @@ __NK_ASSERT_DEBUG(SPageInfo::FromPhysAddr(oldPage)->Type() == SPageInfo::EShadow); } #endif + // Set the modifier so we can detect if the page state is updated. + aOldPageInfo->SetModifier(&pageIter); + + MmuLock::Unlock(); + + // Allocate the new page to move to, ensuring that we use the page type of the + // manager assigned to this page. + TPhysAddr newPage; + Mmu& m = TheMmu; + TInt allocRet = m.AllocRam(&newPage, 1, aMemory->RamAllocFlags(), aMemory->iManager->PageType(), + aBlockZoneId, aBlockRest); + if (allocRet != KErrNone) + { + MmuLock::Lock(); + aMemory->iPages.MovePageEnd(*movingPageArrayPtr, index); + MmuLock::Unlock(); + return allocRet; + } + __NK_ASSERT_DEBUG((newPage & KPageMask) == 0); __NK_ASSERT_DEBUG(newPage != oldPage); - // Set the modifier so we can detect if the page state is updated. - aOldPageInfo->SetModifier(&pageIter); + MmuLock::Lock(); + if (aOldPageInfo->CheckModified(&pageIter) || + oldPageEntry != *movingPageArrayPtr) + {// The page was modified or freed. + aMemory->iPages.MovePageEnd(*movingPageArrayPtr, index); + MmuLock::Unlock(); + m.FreeRam(&newPage, 1, aMemory->iManager->PageType()); + return r; + } + MmuLock::Unlock(); - // Restrict the page ready for moving. + // This page's contents may be changed so restrict the page to no access + // so we can detect any access to it while we are moving it. // Read only memory objects don't need to be restricted but we still need // to discover any physically pinned mappings. TBool pageRestrictedNA = !aMemory->IsReadOnly(); @@ -801,9 +818,6 @@ ERestrictPagesNoAccessForMoving : ERestrictPagesForMovingFlag; - // This page's contents may be changed so restrict the page to no access - // so we can detect any access to it while we are moving it. - MmuLock::Unlock(); // This will clear the memory objects mapping added flag so we can detect any new mappings. aMemory->RestrictPages(pageIter, restrictType); @@ -816,7 +830,8 @@ // If the page array entry (*movingPageArrayPtr) has been modified then a pinning // veto'd the preparation. MmuLock::Lock(); - if (aOldPageInfo->CheckModified(&pageIter) || oldPageEntry != *movingPageArrayPtr) + if (aOldPageInfo->CheckModified(&pageIter) || + oldPageEntry != *movingPageArrayPtr) {// Page is pinned or has been modified by another operation. MmuLock::Unlock(); TheMmu.FreeRam(&newPage, 1, aMemory->iManager->PageType()); @@ -841,7 +856,8 @@ #endif MmuLock::Lock(); - if (!aOldPageInfo->CheckModified(&pageIter) && oldPageEntry == *movingPageArrayPtr && + if (!aOldPageInfo->CheckModified(&pageIter) && + oldPageEntry == *movingPageArrayPtr && !aMemory->MappingAddedFlag()) { // The page has been copied without anyone modifying it so set the page @@ -888,7 +904,7 @@ #endif // indicate we've stopped moving memory now... MmuLock::Lock(); - RPageArray::MovePageEnd(*movingPageArrayPtr); + aMemory->iPages.MovePageEnd(*movingPageArrayPtr, index); MmuLock::Unlock(); return r; @@ -1119,7 +1135,10 @@ // attempt to clean the page if it is dirty... if (aPageInfo->IsDirty()) + { + //Kern::Printf("WDP: Cleaning single page in StealPage"); aMemory->iManager->CleanPages(1, &aPageInfo, EFalse); + } if(aPageInfo) { @@ -1535,10 +1554,7 @@ page = *p; if(aPageInfo->CheckModified(&pageList) || page!=originalPage/*page state changed*/) - { - // page state was changed by someone else... r = KErrInUse; - } else { // nobody else has modified page state, so restrictions successfully applied... @@ -1677,6 +1693,8 @@ TRACE(("DPagedMemoryManager::DoPin(0x%08x,0x%08x,0x%08x,0x%08x)",aMemory, aIndex, aCount, aMapping)); __ASSERT_CRITICAL; __NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(aCount)); + __NK_ASSERT_DEBUG(aMapping->IsPinned()); + __NK_ASSERT_DEBUG(!aMapping->PagesPinned()); // check and allocate page array entries... RPageArray::TIter pageList;