diff -r 000000000000 -r a41df078684a kernel/eka/memmodel/epoc/flexible/mmu/mpdalloc.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/kernel/eka/memmodel/epoc/flexible/mmu/mpdalloc.cpp Mon Oct 19 15:55:17 2009 +0100 @@ -0,0 +1,211 @@ +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). +// All rights reserved. +// This component and the accompanying materials are made available +// under the terms of the License "Eclipse Public License v1.0" +// which accompanies this distribution, and is available +// at the URL "http://www.eclipse.org/legal/epl-v10.html". +// +// Initial Contributors: +// Nokia Corporation - initial contribution. +// +// Contributors: +// +// Description: +// + +#include "memmodel.h" +#include "mm.h" +#include "mmu.h" + +#include "mpdalloc.h" +#include "mobject.h" +#include "cache_maintenance.inl" + + +// check enough space for page directories... +__ASSERT_COMPILE(KNumOsAsids <= (KPageDirectoryEnd-KPageDirectoryBase)/KPageDirectorySize); + + +PageDirectoryAllocator PageDirectories; + + +const TUint KLocalPdShift = KPageDirectoryShift > KPageShift ? KPageDirectoryShift-1 : KPageShift; +const TUint KLocalPdSize = 1<=KPageDirectoryBase); + __NK_ASSERT_DEBUG(TLinAddr(aPde)iMap; + do + { + TUint32 bits = ~*ptr++; + do + { + pPde += KPageDirectorySize; // step to next page directory + if(bits&0x80000000u) + { + TRACE2(("!PDE %x=%x",pPde,pde)); + *(TPde*)pPde = pde; + CacheMaintenance::SinglePdeUpdated(pPde); + } + } + while(bits<<=1); + pPde |= 31*KPageDirectorySize; // step to next group of 32 PDs + } + while(pPdeiPageDir = kernelPd; + AssignPages(KKernelOsAsid*(KPageDirectorySize>>KPageShift),KPageDirectorySize>>KPageShift,kernelPd); + + // construct allocator... + iAllocator = TBitMapAllocator::New(KNumOsAsids,ETrue); + __NK_ASSERT_ALWAYS(iAllocator); + iAllocator->Alloc(KKernelOsAsid,1); // kernel page directory already allocated + + TRACEB(("PageDirectoryAllocator::Init2 done")); + } + + +void PageDirectoryAllocator::AssignPages(TUint aIndex, TUint aCount, TPhysAddr aPhysAddr) + { + __NK_ASSERT_DEBUG(aCount<=KMaxPageInfoUpdatesInOneGo); + MmuLock::Lock(); + SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); + SPageInfo* piEnd = pi+aCount; + while(piSetPhysAlloc(iPageDirectoryMemory,aIndex); + ++pi; + ++aIndex; + } + MmuLock::Unlock(); + } + + +TInt PageDirectoryAllocator::Alloc(TUint aOsAsid, TPhysAddr& aPageDirectory) + { + TRACE(("PageDirectoryAllocator::Alloc(%d)",aOsAsid)); + + // get memory for local page directory... + Mmu& m = TheMmu; + TUint offset = aOsAsid*KPageDirectorySize; + TPhysAddr pdPhys; + RamAllocLock::Lock(); + TInt r = m.AllocContiguousRam(pdPhys, KLocalPdPages, KLocalPdShift-KPageShift, iPageDirectoryMemory->RamAllocFlags()); + if(r==KErrNone) + AssignPages(offset>>KPageShift,KLocalPdPages,pdPhys); + RamAllocLock::Unlock(); + + if(r==KErrNone) + { + TRACE(("PageDirectoryAllocator::Alloc pdPhys = 0x%08x",pdPhys)); + + // map local page directory... + r = MM::MemoryAddContiguous(iPageDirectoryMemory,MM::BytesToPages(offset),KLocalPdPages,pdPhys); + if(r!=KErrNone) + { + RamAllocLock::Lock(); + m.FreeContiguousRam(pdPhys,KLocalPdPages); + RamAllocLock::Unlock(); + } + else + { + aPageDirectory = pdPhys; + + TPde* pd = Mmu::PageDirectory(aOsAsid); + const TUint globalOffset = (KGlobalMemoryBase>>KChunkShift)*sizeof(TPde); // start of global part + + // clear local entries in page directory... + memclr(pd,globalOffset); + CacheMaintenance::PdesInitialised((TLinAddr)pd,globalOffset); + + if(KLocalPdSize<(TUint)KPageDirectorySize) + { + // map global page directory after local part... + __NK_ASSERT_DEBUG(KLocalPdSize==globalOffset); + r = MM::MemoryAddContiguous(iPageDirectoryMemory, MM::BytesToPages(offset+KLocalPdSize), + (KPageDirectorySize-KLocalPdSize)/KPageSize, iKernelPageDirectory+KLocalPdSize); + __NK_ASSERT_DEBUG(r==KErrNone); // can't fail + MmuLock::Lock(); // need lock because allocator not otherwise atomic + iAllocator->Alloc(aOsAsid,1); + MmuLock::Unlock(); + } + else + { + // copy global entries to local page directory... + TPde* globalPd = Mmu::PageDirectory(KKernelOsAsid); + MmuLock::Lock(); // need lock because allocator not otherwise atomic, also to make sure GlobalPdeChanged() only accesses extant PDs + memcpy((TUint8*)pd+globalOffset,(TUint8*)globalPd+globalOffset,KPageDirectorySize-globalOffset); + iAllocator->Alloc(aOsAsid,1); + MmuLock::Unlock(); + CacheMaintenance::PdesInitialised((TLinAddr)((TUint8*)pd+globalOffset),KPageDirectorySize-globalOffset); + } + } + } + TRACE(("PageDirectoryAllocator::Alloc returns %d",r)); + return r; + } + + +void PageDirectoryAllocator::Free(TUint aOsAsid) + { + TRACE(("PageDirectoryAllocator::Free(%d)",aOsAsid)); + + MmuLock::Lock(); // need lock because allocator not otherwise atomic, also to make sure GlobalPdeChanged() only accesses extant PDs + iAllocator->Free(aOsAsid, 1); + MmuLock::Unlock(); + + const TUint KPageDirectoryPageCount = KPageDirectorySize>>KPageShift; + TPhysAddr pages[KPageDirectoryPageCount]; + TUint n = MM::MemoryRemovePages(iPageDirectoryMemory,aOsAsid*KPageDirectoryPageCount,KPageDirectoryPageCount,pages); + (void)n; + __NK_ASSERT_DEBUG(n==KPageDirectoryPageCount); + + RamAllocLock::Lock(); + Mmu& m = TheMmu; + // Page directories are fixed. + m.FreeRam(pages, KLocalPdPages, EPageFixed); + RamAllocLock::Unlock(); + } +