diff -r 000000000000 -r a41df078684a kernel/eka/memmodel/epoc/flexible/mmu/mvalloc.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/kernel/eka/memmodel/epoc/flexible/mmu/mvalloc.cpp Mon Oct 19 15:55:17 2009 +0100 @@ -0,0 +1,1000 @@ +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). +// All rights reserved. +// This component and the accompanying materials are made available +// under the terms of the License "Eclipse Public License v1.0" +// which accompanies this distribution, and is available +// at the URL "http://www.eclipse.org/legal/epl-v10.html". +// +// Initial Contributors: +// Nokia Corporation - initial contribution. +// +// Contributors: +// +// Description: +// + +#include +#include "mm.h" +#include "mmu.h" +#include "mvalloc.h" +#include "maddrcont.h" + + +/** +Log2 of the minimum granularity and alignment of virtual address allocation. +Must be greater than or equal to #KPageShift+#KPageColourShift. +*/ +const TUint KVirtualAllocShift = KPageShift+KPageColourShift; + +/** +Log2 of the size of the region covered by a single 'slab' of virtual addresses. +Must be greater than or equal to KChunkShift. +*/ +const TUint KVirtualAllocSlabShift = KChunkShift; + +/** +Size, in bytes, of the size of the region covered by a single 'slab' of virtual addresses. +*/ +const TUint KVirtualAllocSlabSize = 1<=KPageShift+KPageColourShift); +__ASSERT_COMPILE(KVirtualAllocSlabShift>=TUint(KChunkShift)); + + +#if defined(__GCCXML__) +FORCE_INLINE TUint CountLeadingZeroes(TUint32 /*aValue*/) + { + // empty + return 0; + } + +#elif defined(__MARM__) + +#ifdef __ARMCC__ +FORCE_INLINE TUint CountLeadingZeroes(TUint32 aValue) + { + #if __ARMCC_VERSION < 310000 + TUint r; + asm("clz r,aValue"); + return r; + #else + // Inline assembler is deprecated in RVCT 3.1 so we use an intrinsic. + return __clz(aValue); + #endif + } +#endif // __ARMCC__ + +#ifdef __MARM_ARM4__ +__declspec(naked) static TUint CountLeadingZeroes(TUint32) + { + CLZ(0,0); + __JUMP(,lr); + } + +#elif defined(__GNUC__) +FORCE_INLINE TUint CountLeadingZeroes(TUint32 aValue) + { + TUint r; + asm("clz %0,%1" : "=r"(r) : "r"(aValue)); + return r; + } +#endif // __GNUC__ + +#else // !__MARM__ + +inline TUint CountLeadingZeroes(TUint32 aValue) + { + if(!aValue) + return 32; + TUint count = 31; + if(aValue>=(1<<16)) + { + count -= 16; + aValue >>= 16; + } + if(aValue>=(1<<8)) + { + count -= 8; + aValue >>= 8; + } + if(aValue>=(1<<4)) + { + count -= 4; + aValue >>= 4; + } + if(aValue>=(1<<2)) + { + count -= 2; + aValue >>= 2; + } + count -= aValue>>1; + return count; + } + +#endif // __MARM__ + + + +// +// TLogAllocator +// + +/** +Bitmap allocator for allocating regions which have size and alignment which +are a power-of-two. +*/ +class TLogAllocator + { +public: + TLogAllocator(); + + /** + Find and allocate a free region in the bitmap. + + @param aSizeShift Log2 of the number of bits to allocate. + + @return If successful, the index of the first bit allocated. + Otherwise, -1. + */ + TInt Alloc(TUint aSizeShift); + + /** + Allocate a specific region of bits. + + @param aIndex The index of the first bit to allocated. + Must be a integer multiple of 2^aSizeShift. + @param aSizeShift Log2 of the number of bits to allocate. + + @return KErrNone, if successful; + KErrAlreadyExists, if any part of the region was already allocated. + */ + TInt Alloc(TUint aIndex, TUint aSizeShift); + + /** + Free a specific region of bits. + + @param aIndex The index of the first bit to free. + Must be a integer multiple of 2^aSizeShift. + + @param aSizeShift Log2 of the number of bits to free. + + @return True, if the slab no longer has any bits allocated. + */ + TBool Free(TUint aIndex, TUint aSizeShift); +private: + enum + { + ENumBits = 1<<(KVirtualAllocSlabShift-KVirtualAllocShift), + ENumWords = (ENumBits+31)/32 + }; + + /** + Number of bits which have been allocated. + */ + TUint iAllocCount; + + /** + Bitmap where a bit set to one indicates 'free' and a bit cleared to zero + indicates 'allocated'. The most significant bit in each word has the lowest + index value. E.g. + - Index 0 is bit 31 of iBits[0] + - Index 31 is bit 0 of iBits[0] + - Index 32 is bit 31 of iBits[1] + */ + TUint32 iBits[ENumWords]; + }; + + +TLogAllocator::TLogAllocator() + { + iAllocCount = 0; + memset(iBits,~0u,sizeof(iBits)); // unallocated bits are set to one + } + + +TInt TLogAllocator::Alloc(TUint aSizeShift) + { + TUint size = 1<>3)); + TUint32 b = 0xffffffffu; + do b &= *bits++; + while(bits>= size; + mask = ~mask; + mask >>= offset; + *--bits &= ~mask; + + // calculate index for allocated region... + TUint index = (bits-iBits)*32+offset; + + iAllocCount += size; + return index; + } + +big_found: + { + // clear bits... + TUint32* start = (TUint32*)((TUint8*)bits-(size>>3)); + do *--bits = 0; + while(bits>start); + + // calculate index for allocated region... + TUint index = (bits-iBits)*32; + + iAllocCount += size; + return index; + } + + } + + +TInt TLogAllocator::Alloc(TUint aIndex, TUint aSizeShift) + { + TUint size = 1<aIndex); // check overflow + __NK_ASSERT_DEBUG(aIndex+size<=ENumBits); // check in range + __NK_ASSERT_DEBUG(((aIndex>>aSizeShift)<>5); + if(size<32) + { + TUint32 mask = 0xffffffffu; + mask >>= size; + mask = ~mask; + mask >>= aIndex&31; + TUint32 b = *bits; + if((b&mask)!=mask) + return KErrAlreadyExists; + *bits = b&~mask; + } + else + { + TUint32* start = bits; + TUint32* end = bits+(size>>5); + do if(*bits++!=0xffffffffu) return KErrAlreadyExists; + while(bitsaIndex); // check overflow + __NK_ASSERT_DEBUG(aIndex+size<=ENumBits); // check in range + __NK_ASSERT_DEBUG(((aIndex>>aSizeShift)<>5); + if(size<32) + { + TUint32 mask = 0xffffffffu; + mask >>= size; + mask = ~mask; + mask >>= aIndex&31; + TUint32 b = *bits; + __NK_ASSERT_DEBUG((b&mask)==0); // check was allocated + *bits = b|mask; + } + else + { + TUint wordCount = size>>5; + do + { + __NK_ASSERT_DEBUG(bits[0]==0); + *bits++ = 0xffffffffu; + } + while(--wordCount); + } + + iAllocCount -= size; + return !iAllocCount; + } + + + +// +// TVirtualSlab +// + +/** +Class for allocating virtual addresses contained in a single 'slab'. +@see RVirtualAllocSlabSet. +*/ +class TVirtualSlab + { +public: + /** + @param aHead The head of a linked list of slabs to which this one should be added. + @param aBase The starting virtual address of the region covered by this slab. + @param aSlabType The 'slab type'. + */ + TVirtualSlab(SDblQue& aHead, TUint aBase, TUint aSlabType); + + ~TVirtualSlab(); + + /** + Find an allocate a free region of virtual addresses. + + @param aSizeShift Log2 of the size, in bytes, of the region. + + @return If successful, the allocated virtual address. + Otherwise, 0 (zero). + */ + TLinAddr Alloc(TUint aSizeShift); + + /** + Allocate a specific region of virtual addresses. + + @param aAddr The start address of the region. + Must be a integer multiple of 2^aSizeShift. + @param aSizeShift Log2 of the size, in bytes, of the region. + + + @return KErrNone, if successful; + KErrAlreadyExists, if any part of the region was already allocated. + */ + TInt Alloc(TLinAddr aAddr, TUint aSizeShift); + + /** + Free a specific region of virtual addresses. + + @param aAddr The start address of the region. + Must be a integer multiple of 2^aSizeShift. + @param aSizeShift Log2 of the size, in bytes, of the region. + + @return True, if the slab no longer has any addresses allocated. + */ + TBool Free(TLinAddr aAddr, TUint aSizeShift); + + /** + Return the starting virtual address of the region covered by this slab. + */ + FORCE_INLINE TLinAddr Base() { return iBase; } + + /** + Return this objects 'slab type'. + */ + FORCE_INLINE TUint SlabType() { return iSlabType; } +private: + /** + Link object used to insert this slab into lists. + */ + SDblQueLink iLink; + + /** + The starting virtual address of the region covered by this slab. + */ + TLinAddr iBase; + + /** + This objects 'slab type'. + */ + TUint8 iSlabType; + + /** + Bitmap allocator used to allocated pages in this slab's virtual address region. + */ + TLogAllocator iAllocator; + + friend class RVirtualAllocSlabSet; + }; + + +TVirtualSlab::TVirtualSlab(SDblQue& aHead, TUint aBase, TUint aSlabType) + : iBase(aBase),iSlabType(aSlabType) + { + TRACE2(("TVirtualSlab::TVirtualSlab(?,0x%08x,%d)",aBase, aSlabType)); + aHead.Add(&iLink); + } + + +TVirtualSlab::~TVirtualSlab() + { + TRACE2(("TVirtualSlab::~TVirtualSlab base=0x%08x",iBase)); + iLink.Deque(); + } + + +TLinAddr TVirtualSlab::Alloc(TUint aSizeShift) + { + TRACE2(("TVirtualSlab::Alloc(%d)",aSizeShift)); + __NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift); + aSizeShift -= KVirtualAllocShift; + TInt index = iAllocator.Alloc(aSizeShift); + TLinAddr addr = 0; + if(index>=0) + addr = iBase+(index<=KVirtualAllocShift); + aSizeShift -= KVirtualAllocShift; + TUint index = (aAddr-iBase)>>KVirtualAllocShift; + __NK_ASSERT_DEBUG(iBase+(index<=KVirtualAllocShift); + aSizeShift -= KVirtualAllocShift; + TUint offset = aAddr-iBase; + TUint index = offset>>KVirtualAllocShift; + __NK_ASSERT_DEBUG((index<iSlabs)*(aNumSlabTypes-1); + RVirtualAllocSlabSet* set = (RVirtualAllocSlabSet*)Kern::AllocZ(size); + if(set) + new (set) RVirtualAllocSlabSet(aAllocator,aNumSlabTypes,aWriteLock); + return set; + } + + +RVirtualAllocSlabSet::~RVirtualAllocSlabSet() + { + __NK_ASSERT_DEBUG(iSlabs.Count()==0); + } + + +TVirtualSlab* RVirtualAllocSlabSet::NewSlab(TLinAddr aAddr, TUint aSlabType) + { + TRACE2(("RVirtualAllocSlabSet::NewSlab(0x%08x,%d,%d)",aAddr,aSlabType)); + __NK_ASSERT_DEBUG(aSlabTypeAlloc(base,size,aAddr&~KVirtualAllocSlabMask,KVirtualAllocSlabSize,aSlabType); + if(r==KErrNone) + { + slab = new TVirtualSlab(iLists[aSlabType],base,aSlabType); + if(slab && iSlabs.Add(base,slab)!=KErrNone) + { + delete slab; + slab = 0; + } + if(!slab) + iAllocator->Free(base,KVirtualAllocSlabSize); + } + + TRACE2(("RVirtualAllocSlabSet::NewSlab returns 0x%08x",slab)); + return slab; + } + + +void RVirtualAllocSlabSet::DeleteSlab(TVirtualSlab* aSlab) + { + TLinAddr base = aSlab->Base(); +#ifdef _DEBUG + TAny* removedSlab = +#endif + iSlabs.Remove(base); + __NK_ASSERT_DEBUG(removedSlab==aSlab); + delete aSlab; + iAllocator->Free(base,KVirtualAllocSlabSize); + } + + +TInt RVirtualAllocSlabSet::Alloc(TLinAddr& aAddr, TUint aSizeShift, TUint aSlabType) + { + __NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift && aSizeShiftiNext)!=head) + { + TVirtualSlab* slab = _LOFF(link,TVirtualSlab,iLink); + TLinAddr addr = slab->Alloc(aSizeShift); + if(addr) + { + aAddr = addr; + return KErrNone; + } + } + TVirtualSlab* slab = NewSlab(0,aSlabType); + if(!slab) + return KErrNoMemory; + TLinAddr addr = slab->Alloc(aSizeShift); + if(!addr) + return KErrNoMemory; + aAddr = addr; + return KErrNone; + } + + TVirtualSlab* slab = (TVirtualSlab*)iSlabs.Find(aAddr&~KVirtualAllocSlabMask); + if(!slab) + { + slab = NewSlab(aAddr,aSlabType); + if(!slab) + return KErrNoMemory; + } + else + { + if(slab->SlabType()!=aSlabType) + return KErrAlreadyExists; // slab is of incompatible type + } + return slab->Alloc(aAddr,aSizeShift); + } + + +void RVirtualAllocSlabSet::Free(TLinAddr aAddr, TUint aSizeShift) + { + __NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift && aSizeShiftFree(aAddr,aSizeShift)) + DeleteSlab(slab); + } + + +TBool RVirtualAllocSlabSet::CheckSlabType(TLinAddr aAddr, TUint aSizeShift, TUint aSlabType) + { + __NK_ASSERT_DEBUG(aSizeShift>=KVirtualAllocShift && aSizeShiftiSlabType!=aSlabType) + { + TRACE2(("RVirtualAllocSlabSet::CheckSlabType returns Wrong Type")); + return false; + } + + return true; + } + + +// +// RVirtualAllocator +// + +RVirtualAllocator::RVirtualAllocator() + : iBase(0), iSize(0), iAllocator(0), iSlabSet(0) + {} + + +RVirtualAllocator::~RVirtualAllocator() + { + __NK_ASSERT_DEBUG(iAllocator==0 || iAllocator->iAvail==iAllocator->iSize); // should be empty + Kern::Free(iAllocator); + Kern::Free(iSlabSet); + } + + +TInt RVirtualAllocator::Construct(TLinAddr aStart, TLinAddr aEnd, TUint aNumSlabTypes, DMutex*& aWriteLock) + { + if((aStart|aEnd)&KVirtualAllocSlabMask) + return KErrArgument; // region not aligned to KVirtualAllocSlabSize + TUint bitSize = (aEnd-aStart)>>KVirtualAllocSlabShift; + iAllocator = TBitMapAllocator::New(bitSize, ETrue); + if(!iAllocator) + return KErrNoMemory; + iSlabSet = RVirtualAllocSlabSet::New(this,aNumSlabTypes,aWriteLock); + if(!iSlabSet) + return KErrNoMemory; + iBase = aStart; + iSize = aEnd-aStart; + return KErrNone; + } + + +TUint RVirtualAllocator::AdjustRegion(TLinAddr& aAddr, TUint& aSize) + { + TLinAddr first = aAddr; + TLinAddr last = (aAddr+aSize-1); + TLinAddr dif = first^last; + TUint granularity = KVirtualAllocShift; + while(dif>>granularity && ++granularity>= granularity; + last >>= granularity; + aAddr = first<Alloc(aAddr,align,aSlabType); + + __NK_ASSERT_DEBUG(align==KVirtualAllocSlabShift); + TUint size = aSize>>KVirtualAllocSlabShift; + + if(!aAddr) + { + TInt r = iAllocator->AllocConsecutive(size, EFalse); + if(r>=0) + { + iAllocator->Alloc(r, size); + aAddr = iBase+(r<>KVirtualAllocSlabShift; + if(!iAllocator->NotFree(offset,size)) + { + iAllocator->Alloc(offset,size); + return KErrNone; + } + else + { + TRACE2(("RVirtualAllocator::Alloc already allocated!")); + return KErrAlreadyExists; + } + } + + +void RVirtualAllocator::Free(TLinAddr aAddr, TUint aSize) + { + if(!aSize) + return; + + TRACE2(("RVirtualAllocator::Free(0x%08x,0x%08x)",aAddr,aSize)); + + TUint align = AdjustRegion(aAddr,aSize); + TRACE2(("RVirtualAllocator::Free adjusted to 0x%08x+0x%08x, align=%d",aAddr,aSize,align)); + + if(!InRange(aAddr,aSize)) + { + TRACE2(("RVirtualAllocator::Free invalid region")); + __NK_ASSERT_ALWAYS(0); + return; // invalid region + } + + if(alignFree(aAddr,align); + return; + } + + __NK_ASSERT_DEBUG(align==KVirtualAllocSlabShift); + TUint offset = (aAddr-iBase)>>KVirtualAllocSlabShift; + TUint size = aSize>>KVirtualAllocSlabShift; + iAllocator->Free(offset,size); + } + + +TBool RVirtualAllocator::CheckSlabType(TLinAddr aAddr, TUint aSize, TUint aSlabType) + { + TRACE2(("RVirtualAllocator::CheckSlabType(0x%08x,0x%08x,%d)",aAddr,aSize,aSlabType)); + if(!aSize) + return false; + + TUint align = AdjustRegion(aAddr,aSize); + + if(!InRange(aAddr,aSize)) + { + TRACE2(("RVirtualAllocator::CheckSlabType not in range")); + return false; + } + + if(alignCheckSlabType(aAddr,align,aSlabType); + } + else + { + return true; + } + } + + +// +// RBackwardsVirtualAllocator +// + +TInt RBackwardsVirtualAllocator::Alloc(TLinAddr& aAddr, TUint& aSize, TLinAddr aRequestedAddr, TUint aRequestedSize, TUint aSlabType) + { + if(aRequestedAddr) + aRequestedAddr = (iBase+iSize)-(aRequestedAddr+aRequestedSize-iBase); + TInt r = RVirtualAllocator::Alloc(aAddr,aSize,aRequestedAddr,aRequestedSize,aSlabType); + if(r==KErrNone) + aAddr = (iBase+iSize)-(aAddr+aSize-iBase); + return r; + } + + +void RBackwardsVirtualAllocator::Free(TLinAddr aAddr, TUint aSize) + { + RVirtualAllocator::Free((iBase+iSize)-(aAddr+aSize-iBase),aSize); + } + +