kernel/eka/memmodel/epoc/mmubase/ramalloc.cpp
changeset 0 a41df078684a
child 36 538db54a451d
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\memmodel\epoc\mmubase\ramalloc.cpp
       
    15 // 
       
    16 //
       
    17 
       
    18 /**
       
    19  @file
       
    20  @internalComponent
       
    21 */
       
    22 //#define __VERIFY_LEASTMOVDIS
       
    23 
       
    24 #include <plat_priv.h>
       
    25 #include <ramalloc.h>
       
    26 #include <e32btrace.h>
       
    27 
       
    28 #ifndef __MEMMODEL_FLEXIBLE__
       
    29 #include <mmubase.inl>
       
    30 #else
       
    31 #include "mdefrag.inl"
       
    32 #endif //__MEMMODEL_FLEXIBLE__
       
    33 
       
    34 DRamAllocator* DRamAllocator::New()
       
    35 	{
       
    36 	return new DRamAllocator;
       
    37 	}
       
    38 
       
    39 DRamAllocator* DRamAllocator::New(const SRamInfo& aInfo, const SRamZone* aZoneInfo, TRamZoneCallback aZoneCallback)
       
    40 	{
       
    41 	DRamAllocator* pA=New();
       
    42 	if (!pA)
       
    43 		Panic(ECreateNoMemory);
       
    44 	// If this fails then it won't return but panic
       
    45 	pA->Create(aInfo,aZoneInfo, aZoneCallback);
       
    46 	return pA;
       
    47 	}
       
    48 
       
    49 void DRamAllocator::Panic(TPanic aPanic)
       
    50 	{
       
    51 	Kern::Fault("RAM-ALLOC", aPanic);
       
    52 	}
       
    53 
       
    54 #ifdef KMMU
       
    55 void HexDump32(const TAny* a, TInt n, const char* s)
       
    56 	{
       
    57 	const TUint32* p=(const TUint32*)a;
       
    58 	Kern::Printf(s);
       
    59 	TInt i=0;
       
    60 	while(n)
       
    61 		{
       
    62 		TBuf8<80> b;
       
    63 		b.AppendNumFixedWidth(i,EHex,4);
       
    64 		b.Append(':');
       
    65 		TInt m=Min(n,4);
       
    66 		n-=m;
       
    67 		i+=m;
       
    68 		while(m--)
       
    69 			{
       
    70 			b.Append(' ');
       
    71 			b.AppendNumFixedWidth(*p++,EHex,8);
       
    72 			}
       
    73 		Kern::Printf("%S",&b);
       
    74 		}
       
    75 	}
       
    76 
       
    77 void HexDump8(const TAny* a, TInt n, const char* s)
       
    78 	{
       
    79 	const TUint8* p=(const TUint8*)a;
       
    80 	Kern::Printf(s);
       
    81 	TInt i=0;
       
    82 	while(n)
       
    83 		{
       
    84 		TBuf8<80> b;
       
    85 		b.AppendNumFixedWidth(i,EHex,4);
       
    86 		b.Append(':');
       
    87 		TInt m=Min(n,16);
       
    88 		n-=m;
       
    89 		i+=m;
       
    90 		while(m--)
       
    91 			{
       
    92 			b.Append(' ');
       
    93 			b.AppendNumFixedWidth(*p++,EHex,2);
       
    94 			}
       
    95 		Kern::Printf("%S",&b);
       
    96 		}
       
    97 	}
       
    98 
       
    99 void DRamAllocator::DebugDump()
       
   100 	{
       
   101 	Kern::Printf("PageSize=%08x PageShift=%d",KPageSize,KPageShift);
       
   102 	Kern::Printf("Total Pages=%x Total Free=%x",iTotalRamPages,iTotalFreeRamPages);
       
   103 	Kern::Printf("Number of zones=%d, PowerState=%016lx",iNumZones,iZonePwrState);
       
   104 	Kern::Printf("PhysAddrBase=%08x, PhysAddrTop=%08x",iPhysAddrBase,iPhysAddrTop);
       
   105 
       
   106 	TUint i = 0;
       
   107 	Kern::Printf("Zone Info:");
       
   108 	for (; i<iNumZones; ++i)
       
   109 		{
       
   110 		SZone& z=iZones[i];
       
   111 		TBitMapAllocator& b = *(z.iBma[KBmaAllPages]);
       
   112 		Kern::Printf("%x: Avail %x Size %x Phys %08x PhysEnd %08x ID %08x FreePage %x Pref %02x",i,b.iAvail,b.iSize,
       
   113 										z.iPhysBase, z.iPhysEnd, z.iId,z.iFreePages, z.iPref);
       
   114 		Kern::Printf("Allocated Unknown %x Fixed %x Movable %x Discardable %x",iZones[i].iAllocPages[EPageUnknown],iZones[i].iAllocPages[EPageFixed],
       
   115 										iZones[i].iAllocPages[EPageMovable],iZones[i].iAllocPages[EPageDiscard]);
       
   116 		}
       
   117 
       
   118 	Kern::Printf("Zone pref order:");
       
   119 	SDblQueLink* link = iZonePrefList.First();
       
   120 	for (; link != &iZonePrefList.iA; link = link->iNext)
       
   121 		{
       
   122 		SZone& zone = *_LOFF(link, SZone, iPrefLink);
       
   123 		Kern::Printf("ID0x%x rank0x%x", zone.iId, zone.iPrefRank);
       
   124 		}
       
   125 	SZone& zone = *_LOFF(iZoneLeastMovDis, SZone, iPrefLink);
       
   126 	Kern::Printf("iZoneLeastMovDis ID 0x%x rank 0x%x", zone.iId, iZoneLeastMovDisRank);
       
   127 	}
       
   128 #endif
       
   129 
       
   130 TInt CountBanks(const SRamBank* aBankList)
       
   131 	{
       
   132 	TInt banks=0;
       
   133 	for (; aBankList->iSize; ++banks, ++aBankList);
       
   134 	return banks;
       
   135 	}
       
   136 
       
   137 TUint32 TotalBankSize(const SRamBank* aBankList)
       
   138 	{
       
   139 	TUint32 size=0;
       
   140 	for (; aBankList->iSize; ++aBankList)
       
   141 		size+=aBankList->iSize;
       
   142 	return size;
       
   143 	}
       
   144 
       
   145 /**
       
   146 Count how many zones have been specified and do some basic checks on their layout:
       
   147 	Zones must be distinct, i.e. not overlap
       
   148 	Zone ID must be unique
       
   149 	Zones must be page size aligned
       
   150 	Zones must be big enough to cover all of the allocatable RAM
       
   151 The end of the list is indicated by a SRamZone.iSize==0. 
       
   152 @param aZones The list of RAM zones to be setup
       
   153 */
       
   154 void DRamAllocator::CountZones(const SRamZone* aZones)
       
   155 	{
       
   156 	TUint32 totalSize = 0;
       
   157 	TUint32 pageMask = KPageSize-1;
       
   158 	// Check zones don't overlap each other and while running through the zones
       
   159 	// calculate how many there are
       
   160 	const SRamZone* pCurZ = aZones;
       
   161 	for (; pCurZ->iSize != 0; pCurZ++)
       
   162 		{
       
   163 		// Verify zone addresses and alignment
       
   164 		TUint32 curEnd = pCurZ->iBase + pCurZ->iSize - 1;
       
   165 		__KTRACE_OPT(KMMU,Kern::Printf("curBase %x curEnd %x pageMask %x",pCurZ->iBase,curEnd,pageMask));
       
   166 		if (curEnd <= pCurZ->iBase || (((curEnd + 1) | pCurZ->iBase) & pageMask))
       
   167 			{
       
   168 			Panic(EZonesAlignment);
       
   169 			}
       
   170 		
       
   171 		if (pCurZ->iId == KRamZoneInvalidId)
       
   172 			{
       
   173 			Panic(EZonesIDInvalid);
       
   174 			}
       
   175 		// Check the flags are not set to invalid values
       
   176 		if (pCurZ->iFlags & KRamZoneFlagInvalid)
       
   177 			{
       
   178 			Panic(EZonesFlagsInvalid);
       
   179 			}
       
   180 
       
   181 		iNumZones++;
       
   182 		if (iNumZones > KMaxRamZones)
       
   183 			{// Too many zones specified
       
   184 			Panic(EZonesTooNumerousOrFew);
       
   185 			}
       
   186 		totalSize += pCurZ->iSize;
       
   187 		
       
   188 		// Verify this zone doesn't overlap any of the previous zones' address space
       
   189 		const SRamZone* pTmpZ = aZones;
       
   190 		for (; pTmpZ < pCurZ; pTmpZ++)
       
   191 			{
       
   192 			TUint32 tmpEnd = pTmpZ->iBase + pTmpZ->iSize - 1;
       
   193 			if (tmpEnd >= pCurZ->iBase && pTmpZ->iBase <= curEnd)
       
   194 				{
       
   195 				Panic(EZonesNotDistinct);
       
   196 				}
       
   197 			if(pTmpZ->iId == pCurZ->iId)
       
   198 				{
       
   199 				Panic(EZonesIDNotUnique);
       
   200 				}
       
   201 			}
       
   202 		}
       
   203 	__KTRACE_OPT(KMMU,Kern::Printf("iNumZones=%d, totalSize=%x",iNumZones,totalSize));
       
   204 	if (!iNumZones)
       
   205 		{// no zones specified
       
   206 		Panic(EZonesTooNumerousOrFew);
       
   207 		}
       
   208 
       
   209 	// Together all of the zones should cover the whole of the RAM
       
   210 	if (totalSize>>KPageShift < iTotalRamPages)
       
   211 		{
       
   212 		Panic(EZonesIncomplete);
       
   213 		}
       
   214 	}
       
   215 
       
   216 
       
   217 /**
       
   218 Get the zone from the ID
       
   219 @param aId ID of zone to find
       
   220 @return Pointer to the zone if zone of matching ID found, NULL otherwise
       
   221 */
       
   222 SZone* DRamAllocator::ZoneFromId(TUint aId) const
       
   223 	{
       
   224 	SZone* pZ = iZones;
       
   225 	const SZone* const pEndZone = iZones + iNumZones;
       
   226 	for (; pZ < pEndZone; pZ++)
       
   227 		{
       
   228 		if (aId == pZ->iId)
       
   229 			{
       
   230 			return pZ;
       
   231 			}
       
   232 		}
       
   233 	return NULL;
       
   234 	}
       
   235 
       
   236 /** Retrieve the physical base address and number of pages in the specified zone.
       
   237 
       
   238 @param	aZoneId	The ID of the zone
       
   239 @param 	aPhysBaseAddr	Receives the base address of the zone
       
   240 @param	aNumPages	Receives the number of pages in the zone
       
   241 
       
   242 @return KErrNone if zone found, KErrArgument if zone couldn't be found
       
   243 */
       
   244 TInt DRamAllocator::GetZoneAddress(TUint aZoneId, TPhysAddr& aPhysBase, TUint& aNumPages)
       
   245 	{
       
   246 	SZone* zone = ZoneFromId(aZoneId);
       
   247 	if (zone == NULL)
       
   248 		{
       
   249 		return KErrArgument;
       
   250 		}
       
   251 	aPhysBase = zone->iPhysBase;
       
   252 	aNumPages = zone->iPhysPages;
       
   253 	return KErrNone;
       
   254 	}
       
   255 
       
   256 #ifdef __MEMMODEL_FLEXIBLE__
       
   257 /**
       
   258 @param aAddr The address of page to find the zone of
       
   259 @param aOffset The page offset from the start of the zone that the page is in
       
   260 */
       
   261 SZone* DRamAllocator::GetZoneAndOffset(TPhysAddr aAddr, TInt& aOffset)
       
   262 	{
       
   263 	// Get the zone from the SPageInfo of the page at aAddr
       
   264 	SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(aAddr);
       
   265 	if (pageInfo == NULL)
       
   266 		{
       
   267 		return NULL;
       
   268 		}
       
   269 
       
   270 	// Perform a binary search for the RAM zone, we know aAddr is within a RAM 
       
   271 	// zone as pageInfo != NULL.
       
   272 	SZone* left = iZones;
       
   273 	SZone* mid = iZones + (iNumZones>>1);
       
   274 	SZone* top = iZones + iNumZones - 1;
       
   275 
       
   276 	while (mid->iPhysEnd < aAddr || mid->iPhysBase > aAddr)
       
   277 		{
       
   278 		if (mid->iPhysEnd < aAddr)
       
   279 			left = mid + 1;
       
   280 		else
       
   281 			top = mid - 1;
       
   282 		mid = left + ((top - left) >> 1);
       
   283 		__ASSERT_DEBUG(left <= top && mid <= top && mid >= left, Panic(EAllocRamPagesInconsistent));
       
   284 		}
       
   285 	__ASSERT_DEBUG(mid->iPhysBase <= aAddr && mid->iPhysEnd >= aAddr, Panic(EAllocRamPagesInconsistent));
       
   286 	aOffset = (aAddr - mid->iPhysBase) >> KPageShift;
       
   287 	__ASSERT_DEBUG((TUint)aOffset < mid->iPhysPages, Panic(EAllocRamPagesInconsistent));
       
   288 	return mid;
       
   289 	}
       
   290 #else
       
   291 /**
       
   292 @param aAddr The address of page to find the zone of
       
   293 @param aOffset The page offset from the start of the zone that the page is in
       
   294 */
       
   295 SZone* DRamAllocator::GetZoneAndOffset(TPhysAddr aAddr, TInt& aOffset)
       
   296 	{
       
   297 	// Get the zone from the SPageInfo of the page at aAddr
       
   298 	SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(aAddr);
       
   299 	if (pageInfo == NULL)
       
   300 		{
       
   301 		return NULL;
       
   302 		}
       
   303 	SZone* z = iZones + pageInfo->Zone();
       
   304 	aOffset = (aAddr - z->iPhysBase) >> KPageShift;
       
   305 	__ASSERT_DEBUG((TUint)aOffset < z->iPhysPages, Panic(EAllocRamPagesInconsistent));
       
   306 	return z;
       
   307 	}
       
   308 #endif
       
   309 /**
       
   310 @param aId ID of zone to get page count for
       
   311 @param aPageData store for page counts
       
   312 @return KErrNone if zone found, KErrArgument otherwise
       
   313 */
       
   314 TInt DRamAllocator::GetZonePageCount(TUint aId, SRamZonePageCount& aPageData)
       
   315 	{
       
   316 	// Search for the zone of ID aId
       
   317 	const SZone* zone = ZoneFromId(aId);
       
   318 	if (zone == NULL)
       
   319 		{
       
   320 		return KErrArgument;
       
   321 		}
       
   322 	aPageData.iFreePages = zone->iFreePages;
       
   323 	aPageData.iUnknownPages = zone->iAllocPages[EPageUnknown];
       
   324 	aPageData.iFixedPages = zone->iAllocPages[EPageFixed];
       
   325 	aPageData.iMovablePages = zone->iAllocPages[EPageMovable];
       
   326 	aPageData.iDiscardablePages = zone->iAllocPages[EPageDiscard];
       
   327 
       
   328 	return KErrNone;
       
   329 	}
       
   330 
       
   331 
       
   332 /** Update the count of free and allocated pages for the zone with
       
   333 @param aZone The index of the zone whose counts are being updated
       
   334 @param aCount The no of pages being allocated
       
   335 @param aType The type of the pages being allocated
       
   336 */
       
   337 void DRamAllocator::ZoneAllocPages(SZone* aZone, TUint32 aCount, TZonePageType aType)
       
   338 	{
       
   339 #ifdef _DEBUG
       
   340 	TUint32 free = aZone->iFreePages - aCount;
       
   341 	TUint32 alloc = aZone->iAllocPages[aType] + aCount;
       
   342 	TUint32 total_alloc = 	aZone->iAllocPages[EPageUnknown] +
       
   343 							aZone->iAllocPages[EPageDiscard] + 
       
   344 							aZone->iAllocPages[EPageMovable] + 
       
   345 							aZone->iAllocPages[EPageFixed] + aCount;
       
   346 	if (free > aZone->iFreePages || 
       
   347 		alloc < aZone->iAllocPages[aType] ||
       
   348 		free + total_alloc != aZone->iPhysPages ||
       
   349 		iTotalFreeRamPages > iTotalRamPages)
       
   350 		{
       
   351 		__KTRACE_OPT(KMMU,Kern::Printf("TotalFree %x TotalPages %x",iTotalFreeRamPages, iTotalRamPages));
       
   352 		__KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));	// counts rolled over
       
   353 		__KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], 
       
   354 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
       
   355 		Panic(EZonesCountErr);
       
   356 		}
       
   357 	__ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
       
   358 	__KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));
       
   359 	__KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], 
       
   360 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
       
   361 
       
   362 	if (iAllowBmaVerify)
       
   363 		{
       
   364 		TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
       
   365 		TUint allocPages;
       
   366 		if (aType == EPageFixed || aType == EPageUnknown)
       
   367 			allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed];
       
   368 		else
       
   369 			allocPages = aZone->iAllocPages[aType];
       
   370 		allocPages += aCount;
       
   371 		__NK_ASSERT_DEBUG(aZone->iPhysPages - bmaType.iAvail == allocPages);
       
   372 		__NK_ASSERT_DEBUG((TUint)bmaType.iAvail >= aZone->iFreePages - aCount);
       
   373 
       
   374 //#define _FULL_VERIFY_TYPE_BMAS
       
   375 #ifdef _FULL_VERIFY_TYPE_BMAS
       
   376 		TUint offset = 0;
       
   377 		TUint matchedPages = 0;
       
   378 		TInt r = KErrNone;
       
   379 		while (offset < aZone->iPhysPages && r == KErrNone)
       
   380 			{
       
   381 			r = NextAllocatedPage(aZone, offset, EPageTypes);
       
   382 			if (bmaType.NotFree(offset, 1))
       
   383 				{
       
   384 				matchedPages++;
       
   385 				}
       
   386 			offset++;
       
   387 			}
       
   388 		__NK_ASSERT_DEBUG(matchedPages == allocPages);
       
   389 #endif
       
   390 		}
       
   391 #endif
       
   392 
       
   393 	// Update counts
       
   394 	aZone->iAllocPages[aType] += aCount;
       
   395 	aZone->iFreePages -= aCount;
       
   396 	aZone->iFlags &= ~KRamZoneFlagMark;	// clear the mark as this zone is active
       
   397 
       
   398 	// Check if power state of zone needs to be changed
       
   399 	if (iZonePowerFunc && !(iZonePwrState & (((TUint64)1) << aZone - iZones)))
       
   400 		{//zone no longer empty so call variant to power RAM zone up if necessary
       
   401 		iZonePwrState |= (((TUint64)1) << aZone - iZones);
       
   402 
       
   403 		if (iZoneCallbackInitSent)
       
   404 			{
       
   405 			TInt ret = (*iZonePowerFunc)(ERamZoneOp_PowerUp, (TAny*)aZone->iId, (TUint*)&iZonePwrState);
       
   406 			if (ret != KErrNone && ret != KErrNotSupported)
       
   407 				{
       
   408 				Panic(EZonesCallbackErr);
       
   409 				}
       
   410 			CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::ZoneAllocPages");
       
   411 			}
       
   412 		}
       
   413 
       
   414 	// Re-order the zone preference list so that a RAM zone with more immovable pages 
       
   415 	// is more preferable and secondary to that a RAM zone that is not empty is more
       
   416 	// preferable than one that is empty.
       
   417 	while (&aZone->iPrefLink != iZonePrefList.First())
       
   418 		{
       
   419 		SZone* prevZ = _LOFF(aZone->iPrefLink.iPrev, SZone, iPrefLink);
       
   420 		__NK_ASSERT_DEBUG(K::Initialising || prevZ->iPrefRank == aZone->iPrefRank - 1);
       
   421 		if (prevZ->iPref == aZone->iPref && 
       
   422 			(prevZ->iAllocPages[EPageFixed] + prevZ->iAllocPages[EPageUnknown] < 
       
   423 			aZone->iAllocPages[EPageFixed] + aZone->iAllocPages[EPageUnknown] ||
       
   424 			prevZ->iFreePages == prevZ->iPhysPages))
       
   425 			{
       
   426 			__KTRACE_OPT(KMMU, Kern::Printf("a - Reorder aZone 0x%x free 0x%x before prevZ 0x%x free 0x%x", aZone->iId, aZone->iFreePages, prevZ->iId, prevZ->iFreePages));
       
   427 			// Make this RAM zone more preferable.
       
   428 			aZone->iPrefLink.Deque();
       
   429 			aZone->iPrefLink.InsertBefore(&prevZ->iPrefLink);
       
   430 			aZone->iPrefRank--;
       
   431 			prevZ->iPrefRank++;
       
   432 
       
   433 			if (iZoneLeastMovDis == &prevZ->iPrefLink)
       
   434 				{// Ensure iZoneLeastMovDisRank is kept up to date.
       
   435 				iZoneLeastMovDisRank = prevZ->iPrefRank;
       
   436 				}
       
   437 			if (iZoneLeastMovDis == &aZone->iPrefLink)
       
   438 				{// Ensure iZoneLeastMovDisRank is kept up to date.
       
   439 				iZoneLeastMovDisRank = aZone->iPrefRank;		
       
   440 				// aZone was the least preferable with movable and/or discardable so is it still?		
       
   441 				if (prevZ->iAllocPages[EPageMovable] || prevZ->iAllocPages[EPageDiscard])
       
   442 					{// prevZ is now the least preferable RAM zone with movable and/or discardable.
       
   443 					iZoneLeastMovDis = &prevZ->iPrefLink; 
       
   444 					iZoneLeastMovDisRank = prevZ->iPrefRank;
       
   445 					__KTRACE_OPT(KMMU, Kern::Printf("aa - iZoneleastInUse ID 0x%x", (_LOFF(iZoneLeastMovDis, SZone, iPrefLink))->iId));
       
   446 					}
       
   447 				__KTRACE_OPT(KMMU, Kern::Printf("iZoneLeastMovDisRank 0x%x", iZoneLeastMovDisRank));
       
   448 				}
       
   449 			}
       
   450 		else
       
   451 			{
       
   452 			break;
       
   453 			}
       
   454 		}
       
   455 
       
   456 	// Now that the preference list has been re-ordered check whether
       
   457 	// iZoneLeastMovDis needs updating.
       
   458 	if (aType >= EPageMovable && iZoneLeastMovDisRank < aZone->iPrefRank)
       
   459 		{
       
   460 		iZoneLeastMovDis = &aZone->iPrefLink;
       
   461 		iZoneLeastMovDisRank = aZone->iPrefRank;
       
   462 		__KTRACE_OPT(KMMU, Kern::Printf("a - iZoneleastInUse ID 0x%x", (_LOFF(iZoneLeastMovDis, SZone, iPrefLink))->iId));
       
   463 		}
       
   464 	__NK_ASSERT_DEBUG(	K::Initialising || 
       
   465 						iZoneLeastMovDisRank == _LOFF(iZoneLeastMovDis, SZone, iPrefLink)->iPrefRank);
       
   466 #ifdef __VERIFY_LEASTMOVDIS
       
   467 	if (!K::Initialising)
       
   468 		VerifyLeastPrefMovDis();
       
   469 #endif
       
   470 	}
       
   471 
       
   472 
       
   473 /** Update the count of free and allocated pages for the zone with
       
   474 @param aZone The index of the zone whose counts are being updated
       
   475 @param aCount The no of pages being freed
       
   476 @param aType The type of the pages being freed
       
   477 */
       
   478 void DRamAllocator::ZoneFreePages(SZone* aZone, TUint32 aCount, TZonePageType aType)
       
   479 	{
       
   480 #ifdef _DEBUG
       
   481 	TUint32 alloc = aZone->iAllocPages[aType] - aCount;
       
   482 	TUint32 free = aZone->iFreePages + aCount;
       
   483 	TUint32 total_alloc = 	aZone->iAllocPages[EPageUnknown] +
       
   484 							aZone->iAllocPages[EPageDiscard] + 
       
   485 							aZone->iAllocPages[EPageMovable] + 
       
   486 							aZone->iAllocPages[EPageFixed] - aCount;
       
   487 	if (free < aZone->iFreePages ||
       
   488 		alloc > aZone->iAllocPages[aType] ||
       
   489 		free + total_alloc != aZone->iPhysPages ||
       
   490 		iTotalFreeRamPages > iTotalRamPages)
       
   491 		{
       
   492 		__KTRACE_OPT(KMMU,Kern::Printf("TotalFree %x TotalPages %x",iTotalFreeRamPages, iTotalRamPages));
       
   493 		__KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));	// counts rolled over
       
   494 		__KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], 
       
   495 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
       
   496 		Panic(EZonesCountErr);
       
   497 		}
       
   498 	__ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
       
   499 	__KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));
       
   500 	__KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], 
       
   501 					aZone->iAllocPages[EPageFixed],	aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
       
   502 
       
   503 	if (iAllowBmaVerify)
       
   504 		{
       
   505 		TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
       
   506 		TUint allocPages;
       
   507 		if (aType == EPageFixed || aType == EPageUnknown)
       
   508 			allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed];
       
   509 		else
       
   510 			allocPages = aZone->iAllocPages[aType];
       
   511 		allocPages -= aCount;
       
   512 		__NK_ASSERT_DEBUG(aZone->iPhysPages - bmaType.iAvail == allocPages);
       
   513 		__NK_ASSERT_DEBUG((TUint)bmaType.iAvail >= aZone->iFreePages + aCount);
       
   514 
       
   515 #ifdef _FULL_VERIFY_TYPE_BMAS
       
   516 		TUint offset = 0;
       
   517 		TUint matchedPages = 0;
       
   518 		TInt r = KErrNone;
       
   519 		while(offset < aZone->iPhysPages && r == KErrNone)
       
   520 			{
       
   521 			r = NextAllocatedPage(aZone, offset, EPageTypes);
       
   522 			if (bmaType.NotFree(offset, 1))
       
   523 				{
       
   524 				matchedPages++;
       
   525 				}
       
   526 			offset++;
       
   527 			}
       
   528 		__NK_ASSERT_DEBUG(matchedPages == allocPages);
       
   529 #endif
       
   530 		}
       
   531 #endif
       
   532 
       
   533 	// Update counts
       
   534 	aZone->iAllocPages[aType] -= aCount;
       
   535 	aZone->iFreePages += aCount;
       
   536 	aZone->iFlags &= ~KRamZoneFlagMark;	// clear the mark as this zone is active
       
   537 
       
   538 	// Check if power state of zone needs to be changed.
       
   539 	//	Don't update iZonePwrState when a zone is being cleared to then be 
       
   540 	//	claimed as it shouldn't be powered off as it's about to be used.
       
   541 	if (iZonePowerFunc && !(aZone->iFlags & KRamZoneFlagClaiming) &&
       
   542 		aZone->iFreePages == aZone->iPhysPages)
       
   543 		{// Zone is empty so call variant to power down RAM zone if desirable.
       
   544 		TUint64 pwrMask = ~(((TUint64)1) << aZone - iZones);
       
   545 		iZonePwrState &= pwrMask;
       
   546 
       
   547 		// Don't invoke callback until Init callback sent.
       
   548 		if (iZoneCallbackInitSent)
       
   549 			{
       
   550 			TInt ret = (*iZonePowerFunc)(ERamZoneOp_PowerDown, (TAny*)aZone->iId, (TUint*)&iZonePwrState);
       
   551 			if (ret != KErrNone && ret != KErrNotSupported)
       
   552 				{
       
   553 				Panic(EZonesCallbackErr);
       
   554 				}
       
   555 			CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::ZoneFreePages");
       
   556 			}
       
   557 		}
       
   558 
       
   559 	// Re-order the zone preference list so that a RAM zone with more immovable pages 
       
   560 	// is more preferable and secondary to that a RAM zone that is not empty is more
       
   561 	// preferable than one that is empty.
       
   562 	while (&aZone->iPrefLink != iZonePrefList.Last())
       
   563 		{
       
   564 		SZone* nextZ = _LOFF(aZone->iPrefLink.iNext, SZone, iPrefLink);
       
   565 		__NK_ASSERT_DEBUG(K::Initialising || nextZ->iPrefRank == aZone->iPrefRank + 1);
       
   566 		if (nextZ->iPref == aZone->iPref && 
       
   567 			(nextZ->iAllocPages[EPageFixed] + nextZ->iAllocPages[EPageUnknown] >
       
   568 			aZone->iAllocPages[EPageFixed] + aZone->iAllocPages[EPageUnknown] ||
       
   569 			(nextZ->iFreePages != nextZ->iPhysPages &&
       
   570 			aZone->iFreePages == aZone->iPhysPages)))
       
   571 			{
       
   572 			__KTRACE_OPT(KMMU, Kern::Printf("f - Reorder aZone 0x%x free 0x%x after nextZ 0x%x free 0x%x", aZone->iId, aZone->iFreePages, nextZ->iId, nextZ->iFreePages));
       
   573 			// Make this RAM zone less preferable.
       
   574 			aZone->iPrefLink.Deque();
       
   575 			aZone->iPrefLink.InsertAfter(&nextZ->iPrefLink);
       
   576 			aZone->iPrefRank++;
       
   577 			nextZ->iPrefRank--;
       
   578 
       
   579 			if (iZoneLeastMovDis == &aZone->iPrefLink)
       
   580 				{// Ensure iZoneLeastMovDisRank is kept up to date.
       
   581 				iZoneLeastMovDisRank = aZone->iPrefRank;
       
   582 				}
       
   583 			if (iZoneLeastMovDis == &nextZ->iPrefLink)
       
   584 				{// Ensure iZoneLeastMovDisRank is kept up to date.
       
   585 				iZoneLeastMovDisRank = nextZ->iPrefRank;
       
   586 				if (aZone->iAllocPages[EPageMovable] || aZone->iAllocPages[EPageDiscard])
       
   587 					{// aZone is now the least preferable RAM zone with movable and/or discardable.
       
   588 					iZoneLeastMovDis = &aZone->iPrefLink;
       
   589 					iZoneLeastMovDisRank = aZone->iPrefRank;
       
   590 					__KTRACE_OPT(KMMU, Kern::Printf("aa - iZoneleastInUse ID 0x%x", (_LOFF(iZoneLeastMovDis, SZone, iPrefLink))->iId));
       
   591 					}
       
   592 				__KTRACE_OPT(KMMU, Kern::Printf("iZoneLeastMovDis Rank 0x%x", iZoneLeastMovDisRank));
       
   593 				}
       
   594 			}
       
   595 		else
       
   596 			{
       
   597 			break;
       
   598 			}
       
   599 		}
       
   600 	if (&aZone->iPrefLink == iZoneLeastMovDis && 
       
   601 		!aZone->iAllocPages[EPageMovable] && !aZone->iAllocPages[EPageDiscard])
       
   602 		{// This RAM zone no longer has movable or discardable and therefore it 
       
   603 		// is also no longer the least preferable RAM zone with movable and/or 
       
   604 		// discardable.
       
   605 		SZone* zonePrev;
       
   606 		do 
       
   607 			{
       
   608 			iZoneLeastMovDis = iZoneLeastMovDis->iPrev;
       
   609 			iZoneLeastMovDisRank--;
       
   610 			if (iZoneLeastMovDis == iZonePrefList.First())
       
   611 				{// This the most preferable RAM zone so can't go any further.
       
   612 				break;
       
   613 				}
       
   614 			zonePrev = _LOFF(iZoneLeastMovDis, SZone, iPrefLink);
       
   615 			__KTRACE_OPT(KMMU, Kern::Printf("f - iZoneLeastMovDis 0x%x", zonePrev->iId));
       
   616 			}
       
   617 		while (!zonePrev->iAllocPages[EPageMovable] && !zonePrev->iAllocPages[EPageDiscard]);
       
   618 
       
   619 	__NK_ASSERT_DEBUG(	K::Initialising || 
       
   620 						iZoneLeastMovDisRank == _LOFF(iZoneLeastMovDis, SZone, iPrefLink)->iPrefRank);
       
   621 
       
   622 #ifdef __VERIFY_LEASTMOVDIS
       
   623 		if (!K::Initialising)
       
   624 			VerifyLeastPrefMovDis();
       
   625 #endif
       
   626 		}
       
   627 	}
       
   628 
       
   629 
       
   630 /** Calculate the physical address order of the zones and temporally store
       
   631 	the order in aZoneAddrOrder
       
   632 */
       
   633 inline void DRamAllocator::SortRamZones(const SRamZone* aZones, TUint8* aZoneAddrOrder)
       
   634 	{
       
   635 	const SRamZone* const endZone = aZones + iNumZones;
       
   636 	const SRamZone* zone = aZones;
       
   637 	for (; zone < endZone; zone++)
       
   638 		{
       
   639 		// zoneIdx is the number of zones that have a lower base address than the 
       
   640 		// current zone and therefore it is the address index of the current zone
       
   641 		TInt zoneIdx = 0;
       
   642 		// search for any zones of lower base address
       
   643 		const SRamZone* zone2 = aZones;
       
   644 		for (; zone2 < endZone; zone2++)
       
   645 			{
       
   646 			if (zone2->iBase < zone->iBase)
       
   647 				{
       
   648 				zoneIdx++; // have another zone of lower base address
       
   649 				}
       
   650 			}
       
   651 		aZoneAddrOrder[zoneIdx] = zone - aZones;
       
   652 		}
       
   653 	}
       
   654 
       
   655 
       
   656 /** Initialise SPageInfos for all pages in this zone with the 
       
   657 index of the zone.
       
   658 @param aZone The zone the pages to be initialised are in
       
   659 */
       
   660 inline TUint DRamAllocator::InitSPageInfos(const SZone* aZone)
       
   661 	{
       
   662 	TUint pagesUpdated = 0;
       
   663 	if (aZone->iPhysBase > iPhysAddrTop || aZone->iPhysEnd < iPhysAddrBase)
       
   664 		{// None of the zone is in allocatable RAM
       
   665 		return pagesUpdated;
       
   666 		}
       
   667 
       
   668 	// Mark each allocatable page in this zone with the index of the zone
       
   669 #ifndef __MEMMODEL_FLEXIBLE__
       
   670 	TUint8 zoneIndex = aZone - iZones;
       
   671 #endif
       
   672 	TPhysAddr addr = aZone->iPhysBase;
       
   673 	for (; addr <= aZone->iPhysEnd; addr += KPageSize)
       
   674 		{
       
   675 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(addr);
       
   676 		if (pi)
       
   677 			{
       
   678 #ifndef __MEMMODEL_FLEXIBLE__	// The FMM doesn't store zone indices in SPageInfos.
       
   679 			pi->SetZone(zoneIndex);
       
   680 #endif
       
   681 			pagesUpdated++;
       
   682 			}
       
   683 		}
       
   684 	return pagesUpdated;
       
   685 	}
       
   686 
       
   687 /** HAL Function for the RAM allocator.
       
   688 */
       
   689 TInt DRamAllocator::HalFunction(TInt aFunction, TAny* a1, TAny* a2)
       
   690 	{
       
   691 	switch(aFunction)
       
   692 		{
       
   693 		case ERamHalGetZoneCount:
       
   694 			{
       
   695 			kumemput32(a1, &iNumZones, sizeof(iNumZones));
       
   696 			return KErrNone;
       
   697 			}
       
   698 
       
   699 		case ERamHalGetZoneConfig:
       
   700 			{
       
   701 			TUint zoneIndex = (TUint)a1;
       
   702 			if (zoneIndex < iNumZones)
       
   703 				{
       
   704 				SZone* pZone = iZones + zoneIndex;
       
   705 				struct SRamZoneConfig config;
       
   706 				NKern::ThreadEnterCS();
       
   707 				M::RamAllocLock(); // get mutex to ensure consistent set of values are read...
       
   708 				config.iZoneId         = pZone->iId;
       
   709 				config.iZoneIndex      = zoneIndex;		
       
   710 				config.iPhysBase       = pZone->iPhysBase;
       
   711 				config.iPhysEnd        = pZone->iPhysEnd;
       
   712 				config.iPhysPages      = pZone->iPhysPages;
       
   713 				config.iPref		   = pZone->iPref;
       
   714 				config.iFlags		   = pZone->iFlags;
       
   715 				M::RamAllocUnlock();
       
   716 				NKern::ThreadLeaveCS();
       
   717 				kumemput32(a2,&config,sizeof(config));
       
   718 				return KErrNone;
       
   719 				}
       
   720 			return KErrNotFound;
       
   721 			}
       
   722 		
       
   723 		case ERamHalGetZoneUtilisation:
       
   724 			{
       
   725 			TUint zoneIndex = (TUint)a1;
       
   726 			if (zoneIndex < iNumZones)
       
   727 				{
       
   728 				SZone* pZone = iZones + zoneIndex;
       
   729 				struct SRamZoneUtilisation config;
       
   730 				NKern::ThreadEnterCS();
       
   731 				M::RamAllocLock(); // get mutex to ensure consistent set of values are read...
       
   732 				config.iZoneId			 = pZone->iId;
       
   733 				config.iZoneIndex		 = zoneIndex;
       
   734 				config.iPhysPages		 = pZone->iPhysPages;
       
   735 				config.iFreePages		 = pZone->iFreePages;
       
   736 				config.iAllocUnknown	 = pZone->iAllocPages[EPageUnknown];
       
   737 				config.iAllocFixed		 = pZone->iAllocPages[EPageFixed];
       
   738 				config.iAllocMovable	 = pZone->iAllocPages[EPageMovable];
       
   739 				config.iAllocDiscardable = pZone->iAllocPages[EPageDiscard];
       
   740 				config.iAllocOther		 = 0;
       
   741 				M::RamAllocUnlock();
       
   742 				NKern::ThreadLeaveCS();
       
   743 				kumemput32(a2,&config,sizeof(config));
       
   744 				return KErrNone;
       
   745 				}
       
   746 			return KErrNotFound;
       
   747 			}
       
   748 
       
   749 		default:
       
   750 			{
       
   751 			return KErrNotSupported;
       
   752 			}
       
   753 		}
       
   754 	}
       
   755 
       
   756 /**
       
   757 Setup the ram allocator with information of the RAM available in the system that 
       
   758 comes from the bootstrap/superpage.  This is intended to be called from 
       
   759 DRamAllocator::New().
       
   760 @internalComponent
       
   761 @see DRamAllocator::New()
       
   762 @param aInfo Two lists of SRamBanks for available and reserved banks in RAM, respectively
       
   763 @param aZones A list of the ram zones in the system and their configuration/preferences
       
   764 @param aZoneCallback Pointer to a base port call back function that will be invoked by this class
       
   765 */
       
   766 void DRamAllocator::Create(const SRamInfo& aInfo, const SRamZone* aZones, TRamZoneCallback aZoneCallback)
       
   767 	{
       
   768 	__KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::Create"));
       
   769 
       
   770 	// SZone::iBma array assumes this and KBmaAllPages can't be the same as any 
       
   771 	// allocatable page type.
       
   772 	__ASSERT_COMPILE(EPageFixed < KPageImmovable && EPageUnknown < KPageImmovable &&
       
   773 					EPageDiscard >= KPageImmovable && EPageMovable >= KPageImmovable &&
       
   774 					KBmaAllPages != EPageFixed && KBmaAllPages != EPageMovable && 
       
   775 					KBmaAllPages != EPageDiscard);
       
   776 	// NoAllocOfPageType() requires this
       
   777 	__ASSERT_COMPILE(	KRamZoneFlagNoFixed == 1 << (EPageFixed - KPageTypeAllocBase) && 
       
   778 						KRamZoneFlagNoMovable == 1 << (EPageMovable - KPageTypeAllocBase) &&
       
   779 						KRamZoneFlagNoDiscard == 1 << (EPageDiscard - KPageTypeAllocBase));
       
   780 						
       
   781 	// SZone::iPhysEnd and iPhysAddrTop rely on this when checking contiguous zones etc.
       
   782 	__ASSERT_COMPILE(KPageShift != 0);
       
   783 
       
   784 	///////////////////////////////////////////////////////////////////////////
       
   785 	//	Determine where all the allocatable RAM pages are, using the SRamBank
       
   786 	//	data passed to the kernel by the bootstrap
       
   787 	//////////////////////////////////////////////////////////////////////////
       
   788 	TUint num_boot_banks=CountBanks(aInfo.iBanks);
       
   789 	TUint32 total_ram_size=TotalBankSize(aInfo.iBanks);
       
   790 	__KTRACE_OPT(KMMU,Kern::Printf("#banks from bootstrap=%d",num_boot_banks));
       
   791 	__KTRACE_OPT(KMMU,Kern::Printf("Total size=%08x",total_ram_size));
       
   792 	iTotalRamPages=total_ram_size>>KPageShift;
       
   793  	// Assume all pages are allocated as unknown for now
       
   794 	iTotalFreeRamPages = 0;
       
   795 	__KTRACE_OPT(KMMU,Kern::Printf("Total size=%08x, total pages=%08x",total_ram_size,iTotalRamPages));
       
   796 
       
   797 	iPhysAddrBase=aInfo.iBanks[0].iBase;
       
   798 	const SRamBank& last_boot_bank=aInfo.iBanks[num_boot_banks-1];
       
   799 	iPhysAddrTop = last_boot_bank.iBase + last_boot_bank.iSize - 1;
       
   800 	__KTRACE_OPT(KMMU,Kern::Printf("PA base=%08x, PA top=%08x",iPhysAddrBase,iPhysAddrTop));
       
   801 
       
   802 	__ASSERT_DEBUG(iPhysAddrTop > iPhysAddrBase, Panic(ECreateInvalidRamBanks));
       
   803 
       
   804 	
       
   805 	///////////////////////////////////////////////////////////////////////////
       
   806 	//	Determine how many zones are required and allocate all the 
       
   807 	//	data structures that will be required, permanent one first then
       
   808 	//	temporary ones to avoid kernel heap fragmentation.
       
   809 	///////////////////////////////////////////////////////////////////////////
       
   810 	// Stop any RAM zone callback operations until the initial one has been sent
       
   811 	iZoneCallbackInitSent = EFalse;
       
   812 	if (aZones)
       
   813 		{
       
   814 		CountZones(aZones);
       
   815 		iZonePowerFunc = aZoneCallback;
       
   816 		}
       
   817 	else
       
   818 		{// maximum number of zone is number of non-coalesced boot banks
       
   819 		iNumZones = num_boot_banks;
       
   820 		// No zones specified so don't worry about invoking callback function
       
   821 		iZonePowerFunc = NULL;
       
   822 		}
       
   823 	
       
   824 	// Permenant heap allocation #1 - may be resized if no zones specified
       
   825 	__KTRACE_OPT(KMMU,Kern::Printf("iNumZones=%d", iNumZones));
       
   826 	iZones = (SZone*)Kern::AllocZ(iNumZones*sizeof(SZone));
       
   827 	if (!iZones)
       
   828 		{
       
   829 		Panic(ECreateNoMemory);
       
   830 		}
       
   831 
       
   832 	///////////////////////////////////////////////////////////////////////////
       
   833 	//	Coalesce contiguous boot banks
       
   834 	///////////////////////////////////////////////////////////////////////////
       
   835 	SRamBank* physBanks = (SRamBank*)Kern::Alloc(num_boot_banks*sizeof(SRamBank));
       
   836 	if (!physBanks)
       
   837 		{
       
   838 		Panic(ECreateNoMemory);
       
   839 		}
       
   840 	SRamBank* coalescedBank = physBanks;
       
   841 	const SRamBank* const lastBank = aInfo.iBanks + num_boot_banks;
       
   842 	TPhysAddr currentBase = aInfo.iBanks->iBase;
       
   843 	TPhysAddr currentEnd = aInfo.iBanks->iBase + aInfo.iBanks->iSize;
       
   844 	const SRamBank* nextBank = aInfo.iBanks + 1;
       
   845 	for (; nextBank <= lastBank; ++nextBank)
       
   846 		{
       
   847 		// Create new bank if the next bank isn't contiguous or if 
       
   848 		// it is the last bank
       
   849 		if (nextBank == lastBank || nextBank->iBase != currentEnd)
       
   850 			{
       
   851 			coalescedBank->iBase = currentBase;
       
   852 			coalescedBank->iSize = currentEnd - currentBase;
       
   853 			// Mark all the SPageInfos for the pages in this bank as unused.
       
   854 			// Needs to be done here to allow SPageInfo::SafeFromPhysAddr to work
       
   855 			// which is used by InitSPageInfos()
       
   856 			SPageInfo* pi = SPageInfo::FromPhysAddr(coalescedBank->iBase);
       
   857 			SPageInfo* piBankEnd = pi + (coalescedBank->iSize >> KPageShift);
       
   858 			for (; pi < piBankEnd; pi++)
       
   859 				{
       
   860 				pi->SetUnused();
       
   861 				}
       
   862 			++coalescedBank;
       
   863 			__KTRACE_OPT(KMMU, Kern::Printf("Coalesced bank: %08x-%08x", currentBase, currentEnd));
       
   864 			currentBase = nextBank->iBase;
       
   865 			currentEnd = currentBase + nextBank->iSize;
       
   866 			}
       
   867 		else
       
   868 			{
       
   869 			currentEnd += nextBank->iSize;
       
   870 			}
       
   871 		}
       
   872 	TUint num_coalesced_banks = coalescedBank - physBanks;
       
   873 	__KTRACE_OPT(KMMU, Kern::Printf("#Coalesced banks: %d", num_coalesced_banks));
       
   874 
       
   875 	///////////////////////////////////////////////////////////////////////////
       
   876 	//	Initialise the SZone objects and mark all the SPageInfos with the index 
       
   877 	//	of zone they are in.
       
   878 	//////////////////////////////////////////////////////////////////////////
       
   879 	// Assume everything is off so base port will get notification every time the 
       
   880 	// a new zone is required during the rest of boot process.
       
   881 	if (aZones != NULL)
       
   882 		{		
       
   883 		SZone* newZone = iZones;	// pointer to zone being created
       
   884 
       
   885 		// Create and fill zoneAddrOrder with address ordered indices to aZones
       
   886 		TUint8* zoneAddrOrder = (TUint8*)Kern::Alloc(iNumZones);
       
   887 		if (!zoneAddrOrder)
       
   888 			{
       
   889 			Panic(ECreateNoMemory);
       
   890 			}
       
   891 		SortRamZones(aZones, zoneAddrOrder);
       
   892 
       
   893 		// Now go through each SRamZone in address order initialising the SZone 
       
   894 		// objects.
       
   895 		TUint i = 0;
       
   896 		TUint totalZonePages = 0;
       
   897 		for (; i < iNumZones; i++)
       
   898 			{
       
   899 			const SRamZone& ramZone = *(aZones + zoneAddrOrder[i]);
       
   900 			newZone->iPhysBase = ramZone.iBase;
       
   901 			newZone->iPhysEnd = ramZone.iBase + ramZone.iSize - 1;
       
   902 			newZone->iPhysPages = ramZone.iSize >> KPageShift;
       
   903 			newZone->iAllocPages[EPageUnknown] = newZone->iPhysPages;
       
   904 			newZone->iId = ramZone.iId;
       
   905 			newZone->iPref = ramZone.iPref;
       
   906 			newZone->iFlags = ramZone.iFlags;
       
   907 			totalZonePages += InitSPageInfos(newZone);
       
   908 			newZone++;
       
   909 			}
       
   910 
       
   911 		// iZones now points to all the SZone objects stored in address order
       
   912 		Kern::Free(zoneAddrOrder);
       
   913 		if (totalZonePages != iTotalRamPages)
       
   914 			{// The zones don't cover all of the allocatable RAM.
       
   915 			Panic(EZonesIncomplete);
       
   916 			}
       
   917 		}
       
   918 	else
       
   919 		{
       
   920 		iNumZones = num_coalesced_banks;
       
   921 		iZones = (SZone*)Kern::ReAlloc((TAny*)iZones, iNumZones*sizeof(SZone));
       
   922 		if (iZones == NULL)
       
   923 			{
       
   924 			Panic(ECreateNoMemory);
       
   925 			}
       
   926 		// Create a zone for each coalesced boot bank
       
   927 		SRamBank* bank = physBanks;
       
   928 		SRamBank* bankEnd = physBanks + num_coalesced_banks;
       
   929 		SZone* zone = iZones;
       
   930 		for (; bank < bankEnd; bank++, zone++)
       
   931 			{
       
   932 			zone->iPhysBase = bank->iBase;
       
   933 			zone->iPhysEnd = bank->iBase + bank->iSize - 1;
       
   934 			zone->iPhysPages = bank->iSize >> KPageShift;
       
   935 			zone->iAllocPages[EPageUnknown] = zone->iPhysPages;
       
   936 			zone->iId = (TUint)bank; // doesn't matter what it is as long as it is unique
       
   937 			InitSPageInfos(zone);
       
   938 			}
       
   939 		}
       
   940 	// Delete the coalesced banks as no longer required
       
   941 	Kern::Free(physBanks);
       
   942 
       
   943 	//////////////////////////////////////////////////////////////////////////
       
   944 	//	Create each zones' bit map allocator now as no temporary heap 
       
   945 	// 	cells still allocated at this point.
       
   946 	///////////////////////////////////////////////////////////////////////////
       
   947 	const SZone* const endZone = iZones + iNumZones;
       
   948 	SZone* zone = iZones;
       
   949 	for (; zone < endZone; zone++)
       
   950 		{// Create each BMA with all pages allocated as unknown.
       
   951 		for (TUint i = 0; i < EPageTypes; i++)
       
   952 			{
       
   953 			// Only mark the all pages bma and fixed/unknown bma as allocated.
       
   954 			TBool notAllocated = (i >= (TUint)EPageMovable);
       
   955 			zone->iBma[i] = TBitMapAllocator::New(zone->iPhysPages, notAllocated);
       
   956 			if (!zone->iBma[i])
       
   957 				{
       
   958 				Panic(ECreateNoMemory);
       
   959 				}
       
   960 			}
       
   961 		}
       
   962 
       
   963 	///////////////////////////////////////////////////////////////////////////
       
   964 	// Unallocate each page in each bank so that it can be allocated when required.
       
   965 	// Any page that exists outside a bank will remain allocated as EPageUnknown
       
   966 	// and will therefore not be touched by the allocator.
       
   967 	//////////////////////////////////////////////////////////////////////////
       
   968 	// Temporarily fill preference list so SetPhysicalRamState can succeed
       
   969 #ifdef _DEBUG
       
   970 	// Block bma verificaitons as bma and alloc counts aren't consistent yet.
       
   971 	iAllowBmaVerify = EFalse;
       
   972 #endif
       
   973 	const SZone* const lastZone = iZones + iNumZones;
       
   974 	zone = iZones;
       
   975 	for (; zone < lastZone; zone++)
       
   976 		{
       
   977 		iZonePrefList.Add(&zone->iPrefLink);
       
   978 		}
       
   979 	const SRamBank* const lastPhysBank = aInfo.iBanks + num_boot_banks;
       
   980 	const SRamBank* bank = aInfo.iBanks;
       
   981 	for (; bank < lastPhysBank; bank++)
       
   982 		{// Free all the pages in this bank.
       
   983 		SetPhysicalRamState(bank->iBase, bank->iSize, ETrue, EPageUnknown);
       
   984 		}
       
   985 #ifdef _DEBUG
       
   986 	// Only now is it safe to enable bma verifications
       
   987 	iAllowBmaVerify = ETrue;
       
   988 #endif
       
   989 
       
   990 	///////////////////////////////////////////////////////////////////////////
       
   991 	//	Sort the zones by preference and create a preference ordered linked list
       
   992 	///////////////////////////////////////////////////////////////////////////
       
   993 	zone = iZones;
       
   994 	for (; zone < lastZone; zone++)
       
   995 		{// clear all the zones from the preference list as not in preference order
       
   996 		zone->iPrefLink.Deque();
       
   997 		}
       
   998 	SZone** prefOrder = (SZone**)Kern::AllocZ(iNumZones * sizeof(SZone*));
       
   999 	if (!prefOrder)
       
  1000 		{
       
  1001 		Panic(ECreateNoMemory);
       
  1002 		}
       
  1003 	zone = iZones;
       
  1004 	for(; zone < lastZone; zone++)
       
  1005 		{
       
  1006 		TInt lowerZones = 0;
       
  1007 		// Find how many zones that have a lower preference than this one
       
  1008 		const SZone* zone2 = iZones;
       
  1009 		for (; zone2 < lastZone; zone2++)
       
  1010 			{
       
  1011 			if (zone->iPref > zone2->iPref ||
       
  1012 				zone->iPref == zone2->iPref && zone->iFreePages > zone2->iFreePages)
       
  1013 				{
       
  1014 				lowerZones++;
       
  1015 				}
       
  1016 			}
       
  1017 		while (prefOrder[lowerZones] != 0)
       
  1018 			{// Zone(s) of this preference and size already exist so 
       
  1019 			 // place this one after it/them
       
  1020 			lowerZones++;
       
  1021 			}
       
  1022 		prefOrder[lowerZones] = zone;
       
  1023 		}
       
  1024 	// Fill preference ordered linked list
       
  1025 	SZone** const lastPref = prefOrder + iNumZones;
       
  1026 	SZone** prefZone = prefOrder;
       
  1027 	TUint prefRank = 0;
       
  1028 	for (; prefZone < lastPref; prefZone++, prefRank++)
       
  1029 		{
       
  1030 		SZone& zone = **prefZone;
       
  1031 		iZonePrefList.Add(&zone.iPrefLink);
       
  1032 		zone.iPrefRank = prefRank;
       
  1033 		}
       
  1034 	Kern::Free(prefOrder); // Remove temporary allocation
       
  1035 
       
  1036 	///////////////////////////////////////////////////////////////////////////
       
  1037 	// 	Now mark any regions reserved by the base port as allocated and not 
       
  1038 	//	for use by the RAM allocator.
       
  1039 	///////////////////////////////////////////////////////////////////////////
       
  1040 	const SRamBank* pB = lastBank + 1;	// first reserved block specifier
       
  1041 	for (; pB->iSize; ++pB)
       
  1042 		{
       
  1043 		__KTRACE_OPT(KMMU, Kern::Printf("Reserve physical block %08x+%x", pB->iBase, pB->iSize));
       
  1044 		TInt r = SetPhysicalRamState(pB->iBase, pB->iSize, EFalse, EPageFixed);
       
  1045 		__KTRACE_OPT(KMMU, Kern::Printf("Reserve returns %d", r));
       
  1046 		if (r!=KErrNone)
       
  1047 			{
       
  1048 			Panic(ECreateInvalidReserveBank);
       
  1049 			}
       
  1050 #ifdef BTRACE_KERNEL_MEMORY
       
  1051 		BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, pB->iSize, pB->iBase);
       
  1052 		Epoc::DriverAllocdPhysRam += pB->iSize;
       
  1053 #endif
       
  1054 #ifndef __MEMMODEL_FLEXIBLE__ // Mmu::Init2Common() handles this in FMM.
       
  1055 		// Synchronise the SPageInfo with any blocks that were reserved by
       
  1056 		// marking any reserved regions as locked
       
  1057 		TPhysAddr physAddrEnd = pB->iBase + pB->iSize;
       
  1058 		TPhysAddr physAddr = pB->iBase;
       
  1059 		for(; physAddr < physAddrEnd; physAddr += KPageSize)
       
  1060 			{
       
  1061 			SPageInfo* pi = SPageInfo::FromPhysAddr(physAddr);
       
  1062 			pi->Lock();
       
  1063 			}
       
  1064 #endif
       
  1065 		}
       
  1066 
       
  1067 	//////////////////////////////////////////////////////////////////////////
       
  1068 	// Now that we have have the RAM zone preference list and know how many
       
  1069 	// allocatable pages there are, set iZoneLeastMovDis to be the RAM zone 
       
  1070 	// that will be used when half of the RAM is in use. This a boot up 
       
  1071 	// optimisation to reduce the amount of moving and/or discarding fixed page 
       
  1072 	// allocations will have to make during boot.
       
  1073 	//////////////////////////////////////////////////////////////////////////
       
  1074 	TUint halfAllocatablePages = iTotalFreeRamPages >> 1;
       
  1075 	TUint pages = 0;
       
  1076 	SDblQueLink* link = &iZonePrefList.iA;
       
  1077 	do
       
  1078 		{
       
  1079 		link = link->iNext;
       
  1080 		__NK_ASSERT_DEBUG(link != &iZonePrefList.iA);
       
  1081 		SZone& zonePages = *_LOFF(link, SZone, iPrefLink);
       
  1082 		pages += zonePages.iFreePages;
       
  1083 		}
       
  1084 	while(pages < halfAllocatablePages);
       
  1085 	iZoneLeastMovDis = link;
       
  1086 	iZoneLeastMovDisRank = _LOFF(link, SZone, iPrefLink)->iPrefRank;
       
  1087 
       
  1088 	// Reset general defrag links.
       
  1089 	iZoneGeneralPrefLink = NULL;
       
  1090 	iZoneGeneralTmpLink = NULL;
       
  1091 
       
  1092 	__KTRACE_OPT(KMMU,DebugDump());
       
  1093 	}
       
  1094 
       
  1095 
       
  1096 void DRamAllocator::MarkPagesAllocated(TPhysAddr aAddr, TInt aCount, TZonePageType aType)
       
  1097 	{
       
  1098 	__KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::MarkPagesAllocated(%x+%x)",aAddr,aCount));
       
  1099 
       
  1100 	M::RamAllocIsLocked();
       
  1101 
       
  1102 	// Don't allow unknown pages to be allocated, saves extra 'if' when 
       
  1103 	// creating bmaType.
       
  1104 	__NK_ASSERT_DEBUG(aType != EPageUnknown);
       
  1105 
       
  1106 	__ASSERT_DEBUG(	!(TUint32(aAddr) & (KPageSize - 1)) &&
       
  1107 					(TUint32(aAddr) < TUint32(iPhysAddrTop)) && 
       
  1108 					(TUint32(aAddr) >= TUint32(iPhysAddrBase))&&
       
  1109 					(TUint32((aCount << KPageShift) -1 + aAddr) <= TUint32(iPhysAddrTop)),
       
  1110 					Panic(EDoMarkPagesAllocated1));
       
  1111 
       
  1112 	iTotalFreeRamPages-=aCount;
       
  1113 	// Find the 1st zone the 1st set of allocations belong to
       
  1114 	TInt offset = 0;
       
  1115 	SZone* pZ = GetZoneAndOffset(aAddr,offset);
       
  1116 	if (pZ == NULL)
       
  1117 		{//aAddr not in RAM
       
  1118 		Panic(EDoMarkPagesAllocated1);
       
  1119 		}
       
  1120 	while(aCount)
       
  1121 		{
       
  1122 		TBitMapAllocator& bmaAll = *(pZ->iBma[KBmaAllPages]);
       
  1123 		TBitMapAllocator& bmaType = *(pZ->iBma[aType]);
       
  1124 		TInt count = Min(bmaAll.iSize - offset, aCount);
       
  1125 		bmaAll.Alloc(offset, count);
       
  1126 		bmaType.Alloc(offset, count);
       
  1127 		ZoneAllocPages(pZ, count, aType);
       
  1128 		aCount -= count;
       
  1129 
       
  1130 		// If spanning zones then ensure the next zone is contiguous.
       
  1131 		__ASSERT_DEBUG(!aCount || ((pZ + 1)->iPhysBase != 0 && ((pZ + 1)->iPhysBase - 1) == pZ->iPhysEnd), Panic(EDoMarkPagesAllocated1));
       
  1132 
       
  1133 		pZ++; 		// zones in physical address order so move to next one
       
  1134 		offset = 0;	// and reset offset to start of the zone
       
  1135 		}
       
  1136 	}
       
  1137 
       
  1138 TInt DRamAllocator::MarkPageAllocated(TPhysAddr aAddr, TZonePageType aType)
       
  1139 	{
       
  1140 	__KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::MarkPageAllocated %08x",aAddr));
       
  1141 
       
  1142 	M::RamAllocIsLocked();
       
  1143 
       
  1144 	// Don't allow unknown pages to be allocated, saves extra 'if' when 
       
  1145 	// creating bmaType.
       
  1146 	__NK_ASSERT_DEBUG(aType != EPageUnknown);
       
  1147 
       
  1148 	TInt n;
       
  1149 	SZone* z=GetZoneAndOffset(aAddr,n);
       
  1150 	if (!z)
       
  1151 		{
       
  1152 		return KErrArgument;
       
  1153 		}
       
  1154 	__KTRACE_OPT(KMMU2,Kern::Printf("Zone index %d page index %04x",z-iZones,n));
       
  1155 	TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]);
       
  1156 	TBitMapAllocator& bmaType = *(z->iBma[aType]);
       
  1157 	if (bmaAll.NotFree(n,1))
       
  1158 		{
       
  1159 		__KTRACE_OPT(KMMU,Kern::Printf("Page already allocated"));
       
  1160 		return KErrAlreadyExists;			// page is already allocated
       
  1161 		}
       
  1162 	bmaAll.Alloc(n,1);
       
  1163 	bmaType.Alloc(n,1);
       
  1164 	--iTotalFreeRamPages;
       
  1165 	ZoneAllocPages(z, 1, aType);
       
  1166 	__KTRACE_OPT(KMMU,Kern::Printf("Total free RAM pages now = %d",iTotalFreeRamPages));
       
  1167 
       
  1168 #ifdef BTRACE_RAM_ALLOCATOR
       
  1169 	BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocMarkAllocated, aType, aAddr);
       
  1170 #endif
       
  1171 	return KErrNone;
       
  1172 	}
       
  1173 
       
  1174 TInt DRamAllocator::FreeRamPage(TPhysAddr aAddr, TZonePageType aType)
       
  1175 	{
       
  1176 	__KTRACE_OPT(KMMU,Kern::Printf("FreeRamPage %08x",aAddr));
       
  1177 
       
  1178 	M::RamAllocIsLocked();
       
  1179 
       
  1180 #ifdef _DEBUG
       
  1181 #ifndef __MEMMODEL_FLEXIBLE__
       
  1182 	// Check lock counter of the page
       
  1183 	if (aAddr != KPhysAddrInvalid)
       
  1184 		{
       
  1185 		SPageInfo* pi =  SPageInfo::SafeFromPhysAddr(aAddr);
       
  1186 		if(pi && pi->LockCount())
       
  1187 			Panic(EFreeingLockedPage);
       
  1188 		}
       
  1189 #endif
       
  1190 	// Don't allow unknown pages to be freed, saves extra 'if' when 
       
  1191 	// creating bmaType.
       
  1192 	__NK_ASSERT_DEBUG(aType != EPageUnknown);
       
  1193 #endif
       
  1194 	
       
  1195 	TInt n;
       
  1196 	SZone* z=GetZoneAndOffset(aAddr,n);
       
  1197 	if (!z)
       
  1198 		{
       
  1199 		return KErrArgument;
       
  1200 		}
       
  1201 	__KTRACE_OPT(KMMU2,Kern::Printf("Zone index %d page index %04x",z-iZones,n));
       
  1202 	TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]);
       
  1203 	TBitMapAllocator& bmaType = *(z->iBma[aType]);
       
  1204 	bmaAll.Free(n);
       
  1205 	bmaType.Free(n);
       
  1206 	++iTotalFreeRamPages;
       
  1207 	ZoneFreePages(z, 1, aType);
       
  1208 
       
  1209 #ifdef BTRACE_RAM_ALLOCATOR
       
  1210 	BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocFreePage, aType, aAddr);
       
  1211 #endif
       
  1212 	return KErrNone;
       
  1213 	}
       
  1214 
       
  1215 void DRamAllocator::FreeRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType)
       
  1216 	{
       
  1217 	__KTRACE_OPT(KMMU,Kern::Printf("FreeRamPages count=%08x",aNumPages));
       
  1218 
       
  1219 	M::RamAllocIsLocked();
       
  1220 
       
  1221 #if defined(_DEBUG) && !defined(__MEMMODEL_FLEXIBLE__)
       
  1222 	// Check lock counter for each page that is about to be freed.
       
  1223 	TInt pageNum = aNumPages;
       
  1224 	TPhysAddr* pageList = aPageList;
       
  1225 	while (pageNum--)
       
  1226 		{
       
  1227 		TPhysAddr pa = *pageList++;
       
  1228 		if (pa == KPhysAddrInvalid)
       
  1229 			continue;
       
  1230 		SPageInfo* pi =  SPageInfo::SafeFromPhysAddr(pa);
       
  1231 		if(pi && pi->LockCount())
       
  1232 			Panic(EFreeingLockedPage);
       
  1233 		}
       
  1234 #endif
       
  1235 	
       
  1236 	while(aNumPages--)
       
  1237 		{
       
  1238 		TPhysAddr first_pa = *aPageList++;
       
  1239 		if (first_pa == KPhysAddrInvalid)
       
  1240 			{
       
  1241 			continue;
       
  1242 			}
       
  1243 		TInt ix;
       
  1244 		SZone* z = GetZoneAndOffset(first_pa,ix);
       
  1245 		if (!z)
       
  1246 			{
       
  1247 			continue;
       
  1248 			}
       
  1249 		TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]);
       
  1250 		TInt zp_rem = bmaAll.iSize - ix;
       
  1251 		__KTRACE_OPT(KMMU,Kern::Printf("1st PA=%08x Zone %d index %04x",first_pa,z-iZones,ix));
       
  1252 		TInt n = 1;
       
  1253 		TPhysAddr pa = first_pa + KPageSize;
       
  1254 		while (--zp_rem && aNumPages && *aPageList==pa)
       
  1255 			{
       
  1256 			++n;
       
  1257 			--aNumPages;
       
  1258 			++aPageList;
       
  1259 			pa += KPageSize;
       
  1260 			}
       
  1261 		__KTRACE_OPT(KMMU2,Kern::Printf("%d consecutive pages, zp_rem=%x, %d remaining pages",n,zp_rem,aNumPages));
       
  1262 		bmaAll.Free(ix,n);
       
  1263 		TBitMapAllocator& bmaType = *(z->iBma[aType]);
       
  1264 		bmaType.Free(ix,n);
       
  1265 		iTotalFreeRamPages += n;
       
  1266 		ZoneFreePages(z, n, aType);
       
  1267 #ifdef BTRACE_RAM_ALLOCATOR
       
  1268 		BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocFreePages, aType, n, first_pa);
       
  1269 #endif
       
  1270 		}
       
  1271 #ifdef BTRACE_RAM_ALLOCATOR
       
  1272 	BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocFreePagesEnd);
       
  1273 #endif
       
  1274 	}
       
  1275 
       
  1276 /**
       
  1277 	Attempt to clear upto the required amount of discardable or movable pages
       
  1278 	from the RAM zone.
       
  1279 
       
  1280 	@param aZone			The RAM zone to clear.
       
  1281 	@param aRequiredPages	The maximum number of pages to clear.
       
  1282 */
       
  1283 void DRamAllocator::ZoneClearPages(SZone& aZone, TUint aRequiredPages)
       
  1284 	{
       
  1285 	__KTRACE_OPT(KMMU, 
       
  1286 		Kern::Printf("ZoneClearPages: ID 0x%x, req 0x%x", aZone.iId, aRequiredPages));
       
  1287 	// Discard the required number of discardable pages.
       
  1288 	TUint offset = 0;
       
  1289 	TInt r = NextAllocatedPage(&aZone, offset, EPageDiscard);
       
  1290 	while (r == KErrNone && aRequiredPages)
       
  1291 		{
       
  1292 		TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase;
       
  1293 		TInt discarded = M::DiscardPage(physAddr, aZone.iId, EFalse);
       
  1294 		if (discarded == KErrNone)
       
  1295 			{// The page was successfully discarded.
       
  1296 			aRequiredPages--;
       
  1297 			}
       
  1298 		offset++;
       
  1299 		r = NextAllocatedPage(&aZone, offset, EPageDiscard);
       
  1300 		}
       
  1301 	// Move the required number of movable pages.
       
  1302 	offset = 0;
       
  1303 	r = NextAllocatedPage(&aZone, offset, EPageMovable);
       
  1304 	while(r == KErrNone && aRequiredPages)
       
  1305 		{
       
  1306 		TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase;
       
  1307 		TPhysAddr newAddr = KPhysAddrInvalid;
       
  1308 		if (M::MovePage(physAddr, newAddr, aZone.iId, EFalse) == KErrNone)
       
  1309 			{// The page was successfully moved.
       
  1310 #ifdef _DEBUG
       
  1311 			TInt newOffset = 0;
       
  1312 			SZone* newZone = GetZoneAndOffset(newAddr, newOffset);
       
  1313 			__NK_ASSERT_DEBUG(newZone != &aZone);
       
  1314 #endif
       
  1315 			aRequiredPages--;
       
  1316 			}
       
  1317 		offset++;
       
  1318 		r = NextAllocatedPage(&aZone, offset, EPageMovable);
       
  1319 		}
       
  1320 	}
       
  1321 
       
  1322 /** Attempt to allocate pages into a particular zone.  Pages will not
       
  1323 	always be contiguous.
       
  1324 
       
  1325 	@param aPageList On return it will contain the addresses of any allocated pages
       
  1326 	@param aZone The zone to allocate from
       
  1327 	@param aNumPages The number of pages to allocate
       
  1328 	@param aType The type of pages to allocate
       
  1329 	@return The number of pages that were allocated
       
  1330 */
       
  1331 TUint32 DRamAllocator::ZoneFindPages(TPhysAddr*& aPageList, SZone& aZone, TUint32 aNumPages, TZonePageType aType)
       
  1332 	{
       
  1333 	// Don't allow unknown pages to be allocated, saves extra 'if' when 
       
  1334 	// creating bmaType.
       
  1335 	__NK_ASSERT_DEBUG(aType != EPageUnknown);
       
  1336 
       
  1337 	TBitMapAllocator& bmaAll = *aZone.iBma[KBmaAllPages];
       
  1338 	TBitMapAllocator& bmaType = *(aZone.iBma[aType]);
       
  1339 	TPhysAddr zpb = aZone.iPhysBase;
       
  1340 	TInt got = bmaAll.AllocList(aNumPages, (TInt*)aPageList);
       
  1341 	if (got)
       
  1342 		{
       
  1343 		TPhysAddr* pE = aPageList + got;
       
  1344 		while(aPageList < pE)
       
  1345 			{
       
  1346 			TInt ix = *aPageList;
       
  1347 			*aPageList++ = zpb + (ix << KPageShift);
       
  1348 			__KTRACE_OPT(KMMU,Kern::Printf("Got page @%08x",zpb + (ix << KPageShift)));
       
  1349 
       
  1350 			// Mark the page allocated on the page type bit map.
       
  1351 			bmaType.Alloc(ix, 1);
       
  1352 			}
       
  1353 		ZoneAllocPages(&aZone, got, aType);
       
  1354 #ifdef BTRACE_RAM_ALLOCATOR
       
  1355 		BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocRamPages, aType, got, *(pE-got));
       
  1356 #endif
       
  1357 		}
       
  1358 	return got;
       
  1359 	}
       
  1360 
       
  1361 /**
       
  1362 Allocate discontiguous pages.  
       
  1363 
       
  1364 Fixed pages are always allocated into the most preferable RAM zone that has free,
       
  1365 movable or discardable pages in it.  This is to avoid fixed pages being placed 
       
  1366 in the less preferred RAM zones.
       
  1367 
       
  1368 Movable and discardable pages are allocated into the RAM zones currently in use.
       
  1369 An empty RAM zone will only be used (switched on) if there are not enough free 
       
  1370 pages in the in use RAM zones.  The pages will be allocated from the least 
       
  1371 preferable RAM to be in use after the allocation to the more preferred RAM zones.
       
  1372 
       
  1373 If a valid zone is specified in aBlockedZoneId then that RAM zone will not be
       
  1374 allocated into.  Also, if aBlockedZoneId and aBlockRest is set then the allocation
       
  1375 will stop if aBlockZoneId
       
  1376 
       
  1377 @param aPageList 	On success, will contain the address of each allocated page
       
  1378 @param aNumPages 	The number of the pages to allocate
       
  1379 @param aType 		The type of the pages to allocate
       
  1380 @param aBlockedZoneId	The ID of the RAM zone that shouldn't be allocated into.  
       
  1381 						The default value has no effect.
       
  1382 @param aBlockRest 	Set to ETrue to stop this allocation using any currently empty 
       
  1383 					RAM zones, EFalse to allow empty RAM zones to be used. Only
       
  1384 					effects movable and discardable allocations.
       
  1385 
       
  1386 @return 0 on success, the number of extra pages required to fulfill the request on failure.
       
  1387 */
       
  1388 TInt DRamAllocator::AllocRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType, TUint aBlockedZoneId, TBool aBlockRest)
       
  1389 	{
       
  1390 	__KTRACE_OPT(KMMU,Kern::Printf("AllocRamPages 0x%x type%d",aNumPages, aType));
       
  1391 
       
  1392 	M::RamAllocIsLocked();
       
  1393 
       
  1394 	// Should never allocate unknown pages.
       
  1395 	__NK_ASSERT_DEBUG(aType != EPageUnknown);
       
  1396 
       
  1397 	TPhysAddr* pageListBase = aPageList;
       
  1398 	TUint32 numMissing = aNumPages;
       
  1399 
       
  1400 	if (aType == EPageFixed)
       
  1401 		{// Currently only a general defrag operation should set this and it won't
       
  1402 		// allocate fixed pages.
       
  1403 		__NK_ASSERT_DEBUG(!aBlockRest);
       
  1404 		if ((TUint)aNumPages > iTotalFreeRamPages + M::NumberOfFreeDpPages())
       
  1405 			{// Not enough free space and not enough freeable pages.
       
  1406 			goto exit;
       
  1407 			}
       
  1408 
       
  1409 		// Search through each zone in preference order until all pages allocated or
       
  1410 		// have reached the end of the preference list
       
  1411 		SDblQueLink* link = iZonePrefList.First();
       
  1412 		while (numMissing && link != &iZonePrefList.iA)
       
  1413 			{
       
  1414 			SZone& zone = *_LOFF(link, SZone, iPrefLink);
       
  1415 			// Get the link to next zone before any potential reordering.
       
  1416 			// Which would occur if previous zone is same preference and has
       
  1417 			// more free space after this allocation.
       
  1418 			link = link->iNext;
       
  1419 
       
  1420 			if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType))
       
  1421 				{// The flags disallow aType pages or all pages.
       
  1422 				__KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags));
       
  1423 				continue;
       
  1424 				}
       
  1425 
       
  1426 			numMissing -= ZoneFindPages(aPageList, zone, numMissing, aType);
       
  1427 			__KTRACE_OPT(KMMU, Kern::Printf("zone.iId 0x%x", zone.iId));
       
  1428 
       
  1429 			if (numMissing && 
       
  1430 				(zone.iAllocPages[EPageMovable] || zone.iAllocPages[EPageDiscard]))
       
  1431 				{// Not all the required pages where allocated and there are still some
       
  1432 				// movable and discardable pages in this RAM zone.
       
  1433 				ZoneClearPages(zone, numMissing);
       
  1434 
       
  1435 				// Have discarded and moved everything required or possible so
       
  1436 				// now allocate into the pages just freed.
       
  1437 				numMissing -= ZoneFindPages(aPageList, zone, numMissing, aType);
       
  1438 				}
       
  1439 			}
       
  1440 		}
       
  1441 	else
       
  1442 		{
       
  1443 		if ((TUint)aNumPages > iTotalFreeRamPages)
       
  1444 			{// Not enough free pages to fulfill this request so return amount required
       
  1445 			return aNumPages - iTotalFreeRamPages;
       
  1446 			}
       
  1447 
       
  1448 		// Determine if there are enough free pages in the RAM zones in use.
       
  1449 		TUint totalFreeInUse = 0;
       
  1450 		SDblQueLink* link = iZoneLeastMovDis;
       
  1451 		for(; link != &iZonePrefList.iA; link = link->iPrev)
       
  1452 			{
       
  1453 			SZone& zone = *_LOFF(link, SZone, iPrefLink);
       
  1454 			if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType) ||
       
  1455 				(aBlockRest && (zone.iFlags & KRamZoneFlagGenDefragBlock)))
       
  1456 				{// The blocked RAM zone or flags disallow aType pages or all pages
       
  1457 				__KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags));
       
  1458 				continue;
       
  1459 				}
       
  1460 			totalFreeInUse += zone.iFreePages;
       
  1461 			}
       
  1462 
       
  1463 		if (aBlockRest && totalFreeInUse < (TUint)aNumPages)
       
  1464 			{// Allocating as part of a general defragmentation and
       
  1465 			// can't allocate without using a RAM zone less preferable than
       
  1466 			// the current least prefeable RAM zone with movable and/or 
       
  1467 			//discardable.
       
  1468 			__NK_ASSERT_DEBUG(numMissing);
       
  1469 			goto exit;
       
  1470 			}
       
  1471 		
       
  1472 		SDblQueLink* leastClearable = iZoneLeastMovDis;
       
  1473 		while (totalFreeInUse < (TUint)aNumPages)
       
  1474 			{// The amount of free pages in the RAM zones with movable 
       
  1475 			// and/or discardable isn't enough.
       
  1476 			leastClearable = leastClearable->iNext;
       
  1477 			if (leastClearable == &iZonePrefList.iA)
       
  1478 				{// There are no more RAM zones to allocate into.
       
  1479 				__NK_ASSERT_DEBUG(numMissing);
       
  1480 				goto exit;
       
  1481 				}
       
  1482 			SZone& zone = *_LOFF(leastClearable, SZone, iPrefLink);
       
  1483 			if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType))
       
  1484 				{// The flags disallow aType pages or all pages
       
  1485 				__KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags));
       
  1486 				continue;
       
  1487 				}
       
  1488 			totalFreeInUse += zone.iFreePages;
       
  1489 			}
       
  1490 		// Now that we know exactly how many RAM zones will be required do
       
  1491 		// the allocation. To reduce fixed allocations having to clear RAM 
       
  1492 		// zones, allocate from the least preferable RAM to be used
       
  1493 		// to the most preferable RAM zone.
       
  1494 		link = leastClearable;
       
  1495 		while (numMissing)
       
  1496 			{
       
  1497 			__NK_ASSERT_DEBUG(link != &iZonePrefList.iA);
       
  1498 			SZone& zone = *_LOFF(link, SZone, iPrefLink);
       
  1499 			// Update the link before any reordering so we don't miss a RAM zone.
       
  1500 			link = link->iPrev;
       
  1501 
       
  1502 			if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType) ||
       
  1503 				(aBlockRest && (zone.iFlags & KRamZoneFlagGenDefragBlock)))
       
  1504 				{// The blocked RAM zone or flags disallow aType pages or all pages
       
  1505 				__KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags));
       
  1506 				continue;
       
  1507 				}
       
  1508 
       
  1509 			numMissing -= ZoneFindPages(aPageList, zone, numMissing, aType);
       
  1510 			__KTRACE_OPT(KMMU, Kern::Printf("zone.iId 0x%x", zone.iId));
       
  1511 			}
       
  1512 		__NK_ASSERT_DEBUG(!numMissing);
       
  1513 		}
       
  1514 
       
  1515 exit:
       
  1516 	// Update here so any call to FreeRamPages doesn't upset count
       
  1517 	aNumPages -= numMissing; //set to number of pages that are allocated
       
  1518 	iTotalFreeRamPages -= aNumPages;
       
  1519 
       
  1520 	if (numMissing)
       
  1521 		{// Couldn't allocate all required pages so free those that were allocated
       
  1522 		FreeRamPages(pageListBase, aNumPages, aType);
       
  1523 		}
       
  1524 #ifdef BTRACE_RAM_ALLOCATOR
       
  1525 	else
       
  1526 		{
       
  1527 		BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocRamPagesEnd);
       
  1528 		}
       
  1529 #endif
       
  1530 	return numMissing;
       
  1531 	}
       
  1532 
       
  1533 
       
  1534 /**
       
  1535 Attempt to allocate discontiguous pages from the specified RAM zone.
       
  1536 
       
  1537 NOTE - This method only obeys the KRamZoneFlagNoAlloc and KRamZoneFlagClaiming 
       
  1538 flags and not the others.
       
  1539 But as currently only EFixed pages will be allocated using this method that is
       
  1540 the desired behaviour.
       
  1541 
       
  1542 @param aZoneIdList 	An array of the IDs of the RAM zones to allocate from.
       
  1543 @param aZoneIdCount	The number of IDs in aZoneIdList.
       
  1544 @param aPageList 	On success, will contain the address of each allocated page.
       
  1545 @param aNumPages 	The number of the pages to allocate.
       
  1546 @param aType 		The type of the pages to allocate.
       
  1547 
       
  1548 @return KErrNone on success, KErrNoMemory if allocation couldn't succeed or 
       
  1549 the RAM zone has the KRamZoneFlagNoAlloc flag set, KErrArgument if a zone of
       
  1550 aZoneIdList doesn't exist or aNumPages is greater than the total pages in the zone.
       
  1551 */
       
  1552 TInt DRamAllocator::ZoneAllocRamPages(TUint* aZoneIdList, TUint aZoneIdCount, TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType)
       
  1553 	{
       
  1554 	M::RamAllocIsLocked();
       
  1555 	__NK_ASSERT_DEBUG(aType == EPageFixed);
       
  1556 
       
  1557 
       
  1558 	__KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocRamPages 0x%x zones 0x%x",aNumPages, aZoneIdCount));
       
  1559 
       
  1560 	TInt r = KErrNone;
       
  1561 	TUint* zoneIdPtr = aZoneIdList;
       
  1562 	TUint* zoneIdEnd = zoneIdPtr + aZoneIdCount;
       
  1563 	TUint numMissing = aNumPages;
       
  1564 	TUint physicalPages = 0;
       
  1565 	TPhysAddr* pageListBase = aPageList;
       
  1566 
       
  1567 	// Always loop through all the RAM zones so that if an invalid ID is specified
       
  1568 	// it is always detected whether all the specified RAM zones were required 
       
  1569 	// for the allocation or not.
       
  1570 	for(; zoneIdPtr < zoneIdEnd; zoneIdPtr++)
       
  1571 		{
       
  1572 		SZone* zone = ZoneFromId(*zoneIdPtr);
       
  1573 
       
  1574 		if (zone == NULL)
       
  1575 			{// Invalid zone ID.
       
  1576 			r = KErrArgument;
       
  1577 			break;
       
  1578 			}
       
  1579 
       
  1580 		physicalPages += zone->iPhysPages;
       
  1581 
       
  1582 		if (zone->iFlags & (KRamZoneFlagNoAlloc|KRamZoneFlagClaiming))
       
  1583 			{// If this RAM zone can't be allocated into then skip it.
       
  1584 			continue;
       
  1585 			}
       
  1586 
       
  1587 		numMissing -= ZoneFindPages(aPageList, *zone, numMissing, aType);
       
  1588 
       
  1589 		if (numMissing && aType == EPageFixed)
       
  1590 			{// Remove up to required number of pages from the RAM zone 
       
  1591 			// and reattempt the allocation.
       
  1592 			ZoneClearPages(*zone, numMissing);
       
  1593 			numMissing -= ZoneFindPages(aPageList, *zone, numMissing, aType);
       
  1594 			}
       
  1595 		}
       
  1596 
       
  1597 	// Update iTotalFreeRamPages here so that if allocation doesn't succeed then
       
  1598 	// FreeRamPages() will keep it consistent.
       
  1599 	TUint numAllocated = aNumPages - numMissing;
       
  1600 	iTotalFreeRamPages -= numAllocated;
       
  1601 
       
  1602 	if (r == KErrArgument || physicalPages < (TUint)aNumPages)
       
  1603 		{// Invalid zone ID or the number of pages requested is too large.
       
  1604 		// This should fail regardless of whether the allocation failed or not.
       
  1605 		FreeRamPages(pageListBase, numAllocated, aType);
       
  1606 		return KErrArgument;
       
  1607 		}
       
  1608 
       
  1609 	if (numMissing)
       
  1610 		{// Couldn't allocate all required pages so free those that were allocated
       
  1611 		FreeRamPages(pageListBase, numAllocated, aType);
       
  1612 		return KErrNoMemory;
       
  1613 		}
       
  1614 
       
  1615 	// Have allocated all the required pages.
       
  1616 #ifdef BTRACE_RAM_ALLOCATOR
       
  1617 	BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocZoneRamPagesEnd);
       
  1618 #endif
       
  1619 	return KErrNone;
       
  1620 	}
       
  1621 
       
  1622 
       
  1623 /**
       
  1624 Will return zones one at a time in the following search patterns until a suitable
       
  1625 zone has been found or it is determined that there is no suitable zone:
       
  1626 	- preference order
       
  1627 	- address order
       
  1628 Before the first call for a new search sequence must set:
       
  1629 		iZoneTmpAddrIndex = -1;
       
  1630 		iZoneTmpPrefLink = iZonePrefList.First();
       
  1631 
       
  1632 @param aZone On return this will be a pointer to the next zone to search.  
       
  1633 @param aState The current search state, i.e. which of the zone orderings to follow.
       
  1634 It will be updated if necessary by this function.
       
  1635 @param aType The type of page to be allocated.
       
  1636 @param aBlockedZoneId The ID of a RAM zone to not allocate into.
       
  1637 @param aBlockRest ETrue if allocation should fail as soon as a blocked zone is reached, 
       
  1638 EFalse otherwise. (Currently not used)
       
  1639 @return ETrue a sutiable zone is found, EFalse when the allocation is not possible.
       
  1640 */
       
  1641 TBool DRamAllocator::NextAllocZone(SZone*& aZone, TZoneSearchState& aState, TZonePageType aType, TUint aBlockedZoneId, TBool aBlockRest)
       
  1642 	{
       
  1643 	TUint currentState = aState;
       
  1644 	TBool r = EFalse;
       
  1645 
       
  1646 	for (; currentState < EZoneSearchEnd; currentState++)
       
  1647 		{
       
  1648 		if (currentState == EZoneSearchAddr)
       
  1649 			{
       
  1650 			iZoneTmpAddrIndex++;
       
  1651 			for (; iZoneTmpAddrIndex < (TInt)iNumZones; iZoneTmpAddrIndex++)
       
  1652 				{
       
  1653 				aZone = iZones + iZoneTmpAddrIndex;
       
  1654 				if (aBlockedZoneId != aZone->iId && !NoAllocOfPageType(*aZone, aType))
       
  1655 					{
       
  1656 					r = ETrue;
       
  1657 					goto exit;
       
  1658 					}				
       
  1659 				}
       
  1660 			}
       
  1661 		else
       
  1662 			{
       
  1663 			while(iZoneTmpPrefLink != &iZonePrefList.iA)
       
  1664 				{
       
  1665 				aZone = _LOFF(iZoneTmpPrefLink, SZone, iPrefLink);
       
  1666 				iZoneTmpPrefLink = iZoneTmpPrefLink->iNext; // Update before any re-ordering
       
  1667 				if (aBlockedZoneId != aZone->iId && !NoAllocOfPageType(*aZone, aType))
       
  1668 					{
       
  1669 					r = ETrue;
       
  1670 					goto exit;
       
  1671 					}
       
  1672 				}
       
  1673 			}
       
  1674 		}
       
  1675 exit:
       
  1676 	__NK_ASSERT_DEBUG((r && currentState < EZoneSearchEnd) || (!r && currentState == EZoneSearchEnd));
       
  1677 
       
  1678 	aState = (TZoneSearchState)currentState;
       
  1679 	return r;
       
  1680 	}
       
  1681 
       
  1682 /**
       
  1683 Search through the zones for the requested contiguous RAM, first in preference 
       
  1684 order then, if that fails, in address order.
       
  1685 
       
  1686 @param aNumPages The number of contiguous pages to find
       
  1687 @param aPhysAddr Will contain the base address of any contiguous run if found
       
  1688 @param aType The page type of the memory to be allocated
       
  1689 @param aAlign Alignment specified as the alignment shift
       
  1690 @param aBlockedZoneId The ID of a zone that can't be allocated into, by default this has no effect
       
  1691 @param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached 
       
  1692 in preference ordering.  EFalse otherwise.
       
  1693 
       
  1694 @return KErrNone on success, KErrNoMemory otherwise
       
  1695 */	
       
  1696 TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest)
       
  1697 	{
       
  1698 	__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign));
       
  1699 
       
  1700 	M::RamAllocIsLocked();
       
  1701 
       
  1702 	// No support for non-fixed pages as this will discard and move 
       
  1703 	// pages if required.
       
  1704 	__NK_ASSERT_DEBUG(aType == EPageFixed);
       
  1705 	TInt alignWrtPage = Max(aAlign - KPageShift, 0);
       
  1706 	TUint32 alignmask = (1u << alignWrtPage) - 1;
       
  1707 
       
  1708 	// Attempt to find enough pages searching in preference order first then
       
  1709 	// in address order
       
  1710 	TZoneSearchState searchState = EZoneSearchPref;
       
  1711 	SZone* zone;
       
  1712 	SZone* prevZone = NULL;
       
  1713 	TInt carryAll = 0;		// Carry for all pages bma, clear to start new run.
       
  1714 	TInt carryImmov = 0;	// Carry for immovable pages bma, clear to start new run.
       
  1715 	TInt base = 0;
       
  1716 	TInt offset = 0;
       
  1717 	iZoneTmpAddrIndex = -1;
       
  1718 	iZoneTmpPrefLink = iZonePrefList.First();
       
  1719 	while (NextAllocZone(zone, searchState, aType, aBlockedZoneId, aBlockRest))
       
  1720 		{
       
  1721 		// Be sure to start from scratch if zone not contiguous with previous zone
       
  1722 		if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd))
       
  1723 			{
       
  1724 			carryAll = 0;
       
  1725 			carryImmov = 0;
       
  1726 			}
       
  1727 		prevZone = zone;
       
  1728 		TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
       
  1729 		base = TInt(zone->iPhysBase >> KPageShift);
       
  1730 		TInt runLength;
       
  1731 		__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset));
       
  1732 		offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength);
       
  1733 		__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
       
  1734 
       
  1735 		if (offset >= 0)
       
  1736 			{// Have found enough contiguous pages so return address of physical page
       
  1737 			 // at the start of the region
       
  1738 			aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift;
       
  1739 			MarkPagesAllocated(aPhysAddr, aNumPages, aType);
       
  1740 
       
  1741 			__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
       
  1742 #ifdef BTRACE_RAM_ALLOCATOR
       
  1743 			BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr);
       
  1744 #endif
       
  1745 			return KErrNone;
       
  1746 			}
       
  1747 		else
       
  1748 			{// No run found when looking in just the free pages so see if this
       
  1749 			// RAM zone could be used if pages where moved or discarded.
       
  1750 			if (aNumPages > KMaxFreeableContiguousPages)
       
  1751 				{// Can't move or discard any pages so move on to next RAM zone 
       
  1752 				// taking any run at the end of this RAM zone into account.
       
  1753 				carryImmov = 0;
       
  1754 				continue;
       
  1755 				}
       
  1756 			TBitMapAllocator& bmaImmov = *(zone->iBma[EPageFixed]);
       
  1757 			offset = 0;	// Clear so searches whole of fixed BMA on the first pass.
       
  1758 			do
       
  1759 				{
       
  1760 				__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryImmov=%08x offset=%08x", base, carryImmov, offset));
       
  1761 				offset = bmaImmov.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryImmov, runLength, offset);
       
  1762 				__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
       
  1763 				if (offset >= 0)
       
  1764 					{// Have found a run in immovable page bma so attempt to clear
       
  1765 					// it for the allocation.
       
  1766 					TPhysAddr addrBase = TPhysAddr((base + offset - carryImmov + alignmask) & ~alignmask) << KPageShift;
       
  1767 					TPhysAddr addrEnd = addrBase + (aNumPages << KPageShift);
       
  1768 					
       
  1769 					// Block the RAM zones containing the contiguous region
       
  1770 					// from being allocated into when pages are moved or replaced.
       
  1771 					TPhysAddr addr = addrBase;
       
  1772 					TInt tmpOffset;
       
  1773 					SZone* tmpZone = GetZoneAndOffset(addr, tmpOffset);
       
  1774 					while (addr < addrEnd-1)
       
  1775 						{
       
  1776 						tmpZone->iFlags |= KRamZoneFlagTmpBlockAlloc;
       
  1777 						addr = tmpZone->iPhysEnd;
       
  1778 						tmpZone++;
       
  1779 						}
       
  1780 
       
  1781 					addr = addrBase;
       
  1782 					TInt contigOffset = 0;
       
  1783 					SZone* contigZone = GetZoneAndOffset(addr, contigOffset);
       
  1784 					for (; addr != addrEnd; addr += KPageSize, contigOffset++)
       
  1785 						{
       
  1786 						if (contigZone->iPhysEnd < addr)
       
  1787 							{
       
  1788 							contigZone = GetZoneAndOffset(addr, contigOffset);
       
  1789 							__NK_ASSERT_DEBUG(contigZone != NULL);
       
  1790 							}
       
  1791 #ifdef _DEBUG			// This page shouldn't be allocated as fixed, only movable or discardable.
       
  1792 						__NK_ASSERT_DEBUG(contigZone != NULL);
       
  1793 						__NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotAllocated(contigOffset, 1));
       
  1794 						SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(addr);
       
  1795 						__NK_ASSERT_DEBUG(pageInfo != NULL);
       
  1796 #endif
       
  1797 						TPhysAddr newAddr;
       
  1798 						TInt moveRet = M::MovePage(addr, newAddr, contigZone->iId, EFalse);
       
  1799 						if (moveRet != KErrNone && moveRet != KErrNotFound)
       
  1800 							{// This page couldn't be moved or discarded so 
       
  1801 							// restart the search the page after this one.
       
  1802 							__KTRACE_OPT(KMMU2, 
       
  1803 										Kern::Printf("ContigMov fail offset %x moveRet %d addr %x carryImmov %x", 
       
  1804 										offset, moveRet, addr, carryImmov));
       
  1805 							// Can't rely on RAM zone preference ordering being
       
  1806 							// the same so clear carrys and restart search from
       
  1807 							// within the current RAM zone or skip onto the next 
       
  1808 							// one if at the end of this one.
       
  1809 							carryImmov = 0;
       
  1810 							carryAll = 0;
       
  1811 							offset = (addr < zone->iPhysBase)? 0 : contigOffset + 1;
       
  1812 							__KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail offset %x", offset));
       
  1813 							break;
       
  1814 							}
       
  1815 						}
       
  1816 					// Unblock the RAM zones containing the contiguous region. 
       
  1817 					TPhysAddr flagAddr = addrBase;
       
  1818 					tmpZone = GetZoneAndOffset(flagAddr, tmpOffset);
       
  1819 					while (flagAddr < addrEnd-1)
       
  1820 						{
       
  1821 						tmpZone->iFlags &= ~KRamZoneFlagTmpBlockAlloc;
       
  1822 						flagAddr = tmpZone->iPhysEnd;
       
  1823 						tmpZone++;
       
  1824 						}
       
  1825 
       
  1826 					if (addr == addrEnd)
       
  1827 						{// Cleared all the required pages so allocate them.
       
  1828 						// Return address of physical page at the start of the region.
       
  1829 						aPhysAddr = addrBase;
       
  1830 						MarkPagesAllocated(aPhysAddr, aNumPages, aType);
       
  1831 
       
  1832 						__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
       
  1833 #ifdef BTRACE_RAM_ALLOCATOR
       
  1834 						BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr);
       
  1835 #endif
       
  1836 						return KErrNone;
       
  1837 						}
       
  1838 					}
       
  1839 				}
       
  1840 			// Keep searching immovable page bma of the current RAM zone until 
       
  1841 			// gone past end of RAM zone or no run can be found.
       
  1842 			while (offset >= 0 && (TUint)offset < zone->iPhysPages);
       
  1843 			}
       
  1844 		}
       
  1845 	return KErrNoMemory;
       
  1846 	}
       
  1847 
       
  1848 
       
  1849 /**
       
  1850 Attempt to allocate the contiguous RAM from the specified zone.
       
  1851 
       
  1852 NOTE - This method only obeys the KRamZoneFlagNoAlloc and KRamZoneFlagClaiming 
       
  1853 flags and not the others.
       
  1854 But as currently only EFixed pages will be allocated using this method that is
       
  1855 the desired behaviour.
       
  1856 
       
  1857 @param aZoneIdList 	An array of the IDs of the RAM zones to allocate from.
       
  1858 @param aZoneIdCount	The number of the IDs listed by aZoneIdList.
       
  1859 @param aSize 		The number of contiguous bytes to find
       
  1860 @param aPhysAddr 	Will contain the base address of the contiguous run if found
       
  1861 @param aType 		The page type of the memory to be allocated
       
  1862 @param aAlign 		Alignment specified as the alignment shift
       
  1863 
       
  1864 @return KErrNone on success, KErrNoMemory if allocation couldn't succeed or 
       
  1865 the RAM zone has the KRamZoneFlagNoAlloc flag set.  KErrArgument if a zone of
       
  1866 aZoneIdList exists or if aSize is larger than the size of the zone.
       
  1867 */	
       
  1868 TInt DRamAllocator::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign)
       
  1869 	{
       
  1870 	__KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam zones 0x%x size 0x%08x align %d",aZoneIdCount, aSize, aAlign));
       
  1871 
       
  1872 	M::RamAllocIsLocked();
       
  1873 	__NK_ASSERT_DEBUG(aType == EPageFixed);
       
  1874 
       
  1875 
       
  1876 	TUint numPages = (aSize + KPageSize - 1) >> KPageShift;
       
  1877 	TInt carry = 0; // must be zero as this is always the start of a new run
       
  1878 	TInt alignWrtPage = Max(aAlign - KPageShift, 0);
       
  1879 	TUint32 alignmask = (1u << alignWrtPage) - 1;
       
  1880 	TInt offset = -1;
       
  1881 	TInt base = 0;
       
  1882 	
       
  1883 	TUint physPages = 0;
       
  1884 	TUint* zoneIdPtr = aZoneIdList;
       
  1885 	TUint* zoneIdEnd = aZoneIdList + aZoneIdCount;
       
  1886 	SZone* prevZone = NULL;
       
  1887 	for (; zoneIdPtr < zoneIdEnd; zoneIdPtr++)
       
  1888 		{
       
  1889 		SZone* zone = ZoneFromId(*zoneIdPtr);
       
  1890 		if (zone == NULL)
       
  1891 			{// Couldn't find zone of this ID or it isn't large enough
       
  1892 			return KErrArgument;
       
  1893 			}
       
  1894 		physPages += zone->iPhysPages;
       
  1895 
       
  1896 		if (offset >= 0 ||
       
  1897 			(zone->iFlags & (KRamZoneFlagNoAlloc|KRamZoneFlagClaiming)))
       
  1898 			{// Keep searching through the RAM zones if the allocation
       
  1899 			// has succeeded, to ensure the ID list is always fully verified or
       
  1900 			// if this zone is currently blocked for further allocations.
       
  1901 			continue;
       
  1902 			}
       
  1903 
       
  1904 		// Be sure to start from scratch if zone not contiguous with previous zone
       
  1905 		if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd))
       
  1906 			{
       
  1907 			carry = 0;
       
  1908 			}
       
  1909 		prevZone = zone;
       
  1910 
       
  1911 		TInt len;
       
  1912 		TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
       
  1913 		base = TInt(zone->iPhysBase >> KPageShift);
       
  1914 
       
  1915 		__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: aBase=%08x aCarry=%08x", base, carry));
       
  1916 		offset = bmaAll.AllocAligned(numPages, alignWrtPage, base, EFalse, carry, len);
       
  1917 		__KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
       
  1918 		}
       
  1919 
       
  1920 	if (physPages < numPages)
       
  1921 		{// The allocation requested is too large for the specified RAM zones.
       
  1922 		return KErrArgument;
       
  1923 		}
       
  1924 
       
  1925 	if (offset < 0)
       
  1926 		{// The allocation failed.
       
  1927 		return KErrNoMemory;
       
  1928 		}
       
  1929 
       
  1930 	// Have found enough contiguous pages so mark the pages allocated and 
       
  1931 	// return address of physical page at the start of the region.
       
  1932 	aPhysAddr = TPhysAddr((base + offset - carry + alignmask) & ~alignmask) << KPageShift;
       
  1933 	MarkPagesAllocated(aPhysAddr, numPages, aType);
       
  1934 
       
  1935 	__KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam returns %08x",aPhysAddr));
       
  1936 #ifdef BTRACE_RAM_ALLOCATOR
       
  1937 	BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocZoneContiguousRam, aType, numPages, aPhysAddr);
       
  1938 #endif
       
  1939 	return KErrNone;
       
  1940 	}
       
  1941 
       
  1942 
       
  1943 /**
       
  1944 Attempt to set the specified contiguous block of RAM pages to be either 
       
  1945 allocated or free.
       
  1946 
       
  1947 @param aBase The base address of the RAM to update.
       
  1948 @param aSize The number of contiguous bytes of RAM to update.
       
  1949 @param aState Set to ETrue to free the RAM, EFalse to allocate the RAM.
       
  1950 @param aType The type of the pages being updated.
       
  1951 
       
  1952 @return KErrNone on success, KErrArgument if aBase is an invalid address, 
       
  1953 KErrGeneral if a page being marked free is already free,
       
  1954 KErrInUse if the page being marked allocated is already allocated.
       
  1955 */
       
  1956 TInt DRamAllocator::SetPhysicalRamState(TPhysAddr aBase, TInt aSize, TBool aState, TZonePageType aType)
       
  1957 	{
       
  1958 	M::RamAllocIsLocked();
       
  1959 
       
  1960 	__KTRACE_OPT(KMMU,Kern::Printf("SetPhysicalRamState(%08x,%x,%d)",aBase,aSize,aState?1:0));
       
  1961 	TUint32 pageMask = KPageSize-1;
       
  1962 	aSize += (aBase & pageMask);
       
  1963 	aBase &= ~pageMask;
       
  1964 	TInt npages = (aSize + pageMask) >> KPageShift;
       
  1965 	__KTRACE_OPT(KMMU,Kern::Printf("Rounded base %08x npages=%x",aBase,npages));
       
  1966 	TInt baseOffset;
       
  1967 	SZone* baseZone = GetZoneAndOffset(aBase, baseOffset);
       
  1968 	if (!baseZone || (TUint32)aSize > (iPhysAddrTop - aBase + 1))
       
  1969 		{
       
  1970 		return KErrArgument;
       
  1971 		}
       
  1972 	SZone* zone = baseZone;
       
  1973 	SZone* zoneEnd = iZones + iNumZones;
       
  1974 	TPhysAddr base = aBase;
       
  1975 	TInt pagesLeft = npages;
       
  1976 	TInt offset = baseOffset;
       
  1977 	TInt pageCount = -1;
       
  1978 	__KTRACE_OPT(KMMU2,Kern::Printf("Zone %x page index %x z=%08x zE=%08x n=%x base=%08x",zone->iId, offset, zone, zoneEnd, pagesLeft, base));
       
  1979 	for (; 	pagesLeft && zone < zoneEnd; ++zone)
       
  1980 		{
       
  1981 		if (zone->iPhysBase + (offset << KPageShift) != base)
       
  1982 			{// Zone not contiguous with current run of page, so have been 
       
  1983 			// asked to set the state of non-existent pages.
       
  1984 			return KErrArgument;
       
  1985 			}
       
  1986 
       
  1987 		TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
       
  1988 		TInt zp_rem = bmaAll.iSize - offset;
       
  1989 		pageCount = Min(pagesLeft, zp_rem);
       
  1990 		__KTRACE_OPT(KMMU2,Kern::Printf("Zone %x pages %x+%x base %08x", zone->iId, offset, pageCount, base));
       
  1991 		if(aState)
       
  1992 			{
       
  1993 			if(bmaAll.NotAllocated(offset, pageCount))
       
  1994 				{
       
  1995 				return KErrGeneral;
       
  1996 				}
       
  1997 			}
       
  1998 		else
       
  1999 			{
       
  2000 			if(bmaAll.NotFree(offset, pageCount))
       
  2001 				{
       
  2002 				return KErrInUse;
       
  2003 				}
       
  2004 			}
       
  2005 		pagesLeft -= pageCount;
       
  2006 		offset = 0;
       
  2007 		base += (TPhysAddr(pageCount) << KPageShift);
       
  2008 		}
       
  2009 	if (pagesLeft)
       
  2010 		{
       
  2011 		return KErrArgument;	// not all of the specified range exists
       
  2012 		}
       
  2013 
       
  2014 	iTotalFreeRamPages += (aState ? npages : -npages);
       
  2015 	zone = baseZone;
       
  2016 	offset = baseOffset;
       
  2017 	for (pagesLeft = npages; pagesLeft; pagesLeft -= pageCount)
       
  2018 		{
       
  2019 		TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
       
  2020 		// Unknown and fixed pages share a bit map.
       
  2021 		TBitMapAllocator& bmaType = *(zone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
       
  2022 		TInt zp_rem = bmaAll.iSize - offset;
       
  2023 		pageCount = Min(pagesLeft, zp_rem);
       
  2024 		if (aState)
       
  2025 			{
       
  2026 			bmaAll.Free(offset, pageCount);
       
  2027 			bmaType.Free(offset, pageCount);
       
  2028 			ZoneFreePages(zone, pageCount, aType);
       
  2029 			}
       
  2030 		else
       
  2031 			{
       
  2032 			bmaAll.Alloc(offset, pageCount);
       
  2033 			bmaType.Alloc(offset, pageCount);
       
  2034 			ZoneAllocPages(zone, pageCount, aType);
       
  2035 			}
       
  2036 		__KTRACE_OPT(KMMU2,Kern::Printf("Zone %d pages %x+%x base %08x",zone-iZones, offset, pageCount, base));
       
  2037 		++zone;
       
  2038 		offset = 0;
       
  2039 		}
       
  2040 	return KErrNone;
       
  2041 	}
       
  2042 
       
  2043 /** Update the allocated page counts for the zone that is page is allocated into.
       
  2044 
       
  2045 @param aAddr The physical address of the page
       
  2046 @param aOldPageType The type the page was allocated as
       
  2047 @param aNewPageType The type the page is changing to
       
  2048 */
       
  2049 void DRamAllocator::ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldType, TZonePageType aNewType)
       
  2050 	{
       
  2051 
       
  2052 	TInt offset;
       
  2053 	SZone* zone = GetZoneAndOffset(aPageInfo->PhysAddr(), offset);
       
  2054 #ifdef _DEBUG
       
  2055 // ***********	System lock may be held while this is invoked so don't do********
       
  2056 // ***********	anything too slow and definitely don't call zone callback********
       
  2057 	M::RamAllocIsLocked();
       
  2058 	CHECK_PRECONDITIONS((MASK_THREAD_CRITICAL) & ~MASK_NO_FAST_MUTEX, "DRamAllocator::ChangePageType");
       
  2059 
       
  2060 	// Get zone page is in and on debug builds check that it is allocated
       
  2061 	if (zone == NULL || zone->iBma[KBmaAllPages]->NotAllocated(offset, 1))
       
  2062 		{
       
  2063 		Panic(EAllocRamPagesInconsistent);
       
  2064 		}
       
  2065 
       
  2066 	// Check if adjusting counts is valid, i.e. won't cause a roll over
       
  2067 	if (zone->iAllocPages[aOldType] - 1 > zone->iAllocPages[aOldType] ||
       
  2068 		zone->iAllocPages[aNewType] + 1 < zone->iAllocPages[aNewType])
       
  2069 		{
       
  2070 		__KTRACE_OPT(KMMU, Kern::Printf("ChangePageType Alloc Unk %x Fx %x Mv %x Dis %x",zone->iAllocPages[EPageUnknown],
       
  2071 					zone->iAllocPages[EPageFixed], zone->iAllocPages[EPageMovable],zone->iAllocPages[EPageDiscard]));
       
  2072 		Panic(EZonesCountErr);
       
  2073 		}
       
  2074 #endif
       
  2075 
       
  2076 	// Update the counts and bmas
       
  2077 	zone->iAllocPages[aOldType]--;
       
  2078 	zone->iBma[aOldType]->Free(offset);
       
  2079 	zone->iAllocPages[aNewType]++;
       
  2080 	zone->iBma[aNewType]->Alloc(offset, 1);
       
  2081 
       
  2082 	__KTRACE_OPT(KMMU2, Kern::Printf("ChangePageType Alloc Unk %x Fx %x Mv %x Dis %x",zone->iAllocPages[EPageUnknown],
       
  2083 					zone->iAllocPages[EPageFixed], zone->iAllocPages[EPageMovable],zone->iAllocPages[EPageDiscard]));
       
  2084 #ifdef BTRACE_RAM_ALLOCATOR
       
  2085 	BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocChangePageType, aNewType, aPageInfo->PhysAddr());
       
  2086 #endif
       
  2087 	}
       
  2088 
       
  2089 /**
       
  2090 Get the next page in this zone that is allocated after this one.
       
  2091 
       
  2092 @param aZone	The zone to find the next allocated page in.
       
  2093 @param aOffset	On entry this is the offset from which the next allocated
       
  2094 				page in the zone should be found, on return it will be the offset 
       
  2095 				of the next allocated page.
       
  2096 @return KErrNone if a next allocated page could be found, KErrNotFound if no more pages in
       
  2097 the zone after aOffset are allocated, KErrArgument if aOffset is outside the zone.
       
  2098 */
       
  2099 TInt DRamAllocator::NextAllocatedPage(SZone* aZone, TUint& aOffset, TZonePageType aType) const
       
  2100 	{
       
  2101 	const TUint KWordAlignMask = KMaxTUint32 << 5;
       
  2102 
       
  2103 	M::RamAllocIsLocked();
       
  2104 
       
  2105 	__NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones);
       
  2106 	// Makes things simpler for bma selection.
       
  2107 	__NK_ASSERT_DEBUG(aType != EPageUnknown);
       
  2108 
       
  2109 	if (aOffset >= aZone->iPhysPages)
       
  2110 		{// Starting point is outside the zone
       
  2111 		return KErrArgument;
       
  2112 		}
       
  2113 
       
  2114 	TUint offset = aOffset;
       
  2115 	TUint endOffset = aZone->iPhysPages;
       
  2116 	TUint endOffsetAligned = endOffset & KWordAlignMask;
       
  2117 
       
  2118 	// Select the BMA to search, 
       
  2119 	TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType;
       
  2120 	TUint32* map = &(aZone->iBma[bmaIndex]->iMap[offset >> 5]);
       
  2121 	TUint32 bits = *map++;
       
  2122 
       
  2123 	// Set bits for pages before 'offset' (i.e. ones we want to ignore)...
       
  2124 	bits |= ~(KMaxTUint32 >> (offset & ~KWordAlignMask));
       
  2125 
       
  2126 	// Find the first bit map word from aOffset in aZone with allocated pages
       
  2127 	while (bits == KMaxTUint32 && offset < endOffsetAligned)
       
  2128 		{
       
  2129 		bits = *map++;
       
  2130 		offset = (offset + 32) & KWordAlignMask;
       
  2131 		}
       
  2132 
       
  2133 	if (offset >= endOffsetAligned && endOffset != endOffsetAligned)
       
  2134 		{// Have reached the last bit mask word so set the bits that are
       
  2135 		//  outside of the zone so that they are ignored.
       
  2136 		bits |= KMaxTUint32 >> (endOffset - endOffsetAligned);
       
  2137 		}
       
  2138 
       
  2139 	if (bits == KMaxTUint32)
       
  2140 		{// No allocated pages found after aOffset in aZone.
       
  2141 		return KErrNotFound;
       
  2142 		}
       
  2143 
       
  2144 	// Now we have bits with allocated pages in it so determine the exact 
       
  2145 	// offset of the next allocated page
       
  2146 	TUint32 mask = 0x80000000 >> (offset & ~KWordAlignMask);
       
  2147 	while (bits & mask)
       
  2148 		{
       
  2149 		mask >>= 1;
       
  2150 		offset++;
       
  2151 		}
       
  2152 
       
  2153 	if (offset >= endOffset)
       
  2154 		{// Reached the end of the zone without finding an allocated page after aOffset
       
  2155 		return KErrNotFound;
       
  2156 		}
       
  2157 
       
  2158 	// Should definitely have found an allocated page within aZone's pages
       
  2159 	__NK_ASSERT_DEBUG(mask != 0 && !(bits & mask) && offset < aZone->iPhysPages);
       
  2160 
       
  2161 	aOffset = offset;
       
  2162 	return KErrNone;
       
  2163 	}
       
  2164 
       
  2165 /**
       
  2166 See if any of the least preferable RAM zones can be emptied.  If they can then 
       
  2167 initialise the allocator for a general defragmentation operation.
       
  2168 
       
  2169 Stage 0 of the general defrag is to ensure that there are enough free 
       
  2170 pages in the more preferable RAM zones to be in use after the general defrag
       
  2171 for the movable page allocations.  This is achieved by discarding the 
       
  2172 required amount of discardable pages from the more preferable RAM zones
       
  2173 to be in use after the general defrag.
       
  2174 
       
  2175 
       
  2176 @parm 	aInitialStage			On return this will contain the stage the general 
       
  2177 								defragmentation should begin at.  I.e. if no RAM 
       
  2178 								zones can be cleared then just perform the final
       
  2179 								tidying stage.
       
  2180 @param 	aRequiredToBeDiscarded	On return this will contain the number of 
       
  2181 								discardable pages that need to be discarded 
       
  2182 								from the RAM zones to be in use after the 
       
  2183 								general defrag.
       
  2184 @return Pointer to the RAM zone object that may potentially have pages
       
  2185 		discarded by the general defrag.  This will be NULL if no suitable 
       
  2186 		RAM zone could be found.
       
  2187 */
       
  2188 SZone* DRamAllocator::GeneralDefragStart0(TGenDefragStage& aStage, TUint& aRequiredToBeDiscarded)
       
  2189 	{
       
  2190 #ifdef _DEBUG
       
  2191 	if (!K::Initialising) 
       
  2192 		{
       
  2193 		M::RamAllocIsLocked();
       
  2194 #ifdef __VERIFY_LEASTMOVDIS
       
  2195 		VerifyLeastPrefMovDis();
       
  2196 #endif
       
  2197 		}
       
  2198 	// Any previous general defrag operation must have ended.
       
  2199 	__NK_ASSERT_DEBUG(iZoneGeneralPrefLink == NULL);
       
  2200 	__NK_ASSERT_DEBUG(iZoneGeneralTmpLink == NULL);
       
  2201 #endif
       
  2202 
       
  2203 	if (iNumZones == 1)
       
  2204 		{
       
  2205 		// Only have one RAM zone so a defrag can't do anything.
       
  2206 		return NULL;
       
  2207 		}
       
  2208 
       
  2209 	// Determine how many movable or discardable pages are required to be allocated.
       
  2210 	TUint requiredPagesDis = 0;
       
  2211 	TUint requiredPagesMov = 0;
       
  2212 	TUint firstClearableInUseRank = 0;
       
  2213 	SDblQueLink* link = iZoneLeastMovDis;
       
  2214 	do
       
  2215 		{
       
  2216 		SZone& zone = *_LOFF(link, SZone, iPrefLink);
       
  2217 		requiredPagesDis += zone.iAllocPages[EPageDiscard];
       
  2218 		requiredPagesMov += zone.iAllocPages[EPageMovable];
       
  2219 		
       
  2220 		if (!firstClearableInUseRank && 
       
  2221 			(zone.iAllocPages[EPageMovable] || zone.iAllocPages[EPageDiscard]) &&
       
  2222 			!zone.iAllocPages[EPageFixed] && !zone.iAllocPages[EPageUnknown])
       
  2223 			{// This is the least preferable RAM zone that is has movable or 
       
  2224 			// discardable but may be clearable as it has no immovable pages.
       
  2225 			firstClearableInUseRank = zone.iPrefRank;
       
  2226 			}
       
  2227 
       
  2228 		// Reset KRamZoneFlagGenDefrag flag bit for each RAM zone to be defraged.
       
  2229 		zone.iFlags &= ~(KRamZoneFlagGenDefrag | KRamZoneFlagGenDefragBlock);
       
  2230 
       
  2231 		link = link->iPrev;
       
  2232 		}
       
  2233 	while (link != &iZonePrefList.iA);
       
  2234 
       
  2235 	// Adjust the number of discardable pages for those that are freeable.
       
  2236 	// Dirty pages will be moved rather than discarded so they are not freeable
       
  2237 	// and we must make sure that we have enough space in zones for these dirty 
       
  2238 	// paged pages.
       
  2239 	__NK_ASSERT_DEBUG(requiredPagesDis >= (TUint)M::NumberOfFreeDpPages());
       
  2240 	requiredPagesDis -= M::NumberOfFreeDpPages();
       
  2241 	TUint totalDirtyPagesDis = M::NumberOfDirtyDpPages();
       
  2242 	if (requiredPagesDis < totalDirtyPagesDis)
       
  2243 		requiredPagesDis = totalDirtyPagesDis;
       
  2244 
       
  2245 	// Determine which is the least preferable RAM zone that needs to be 
       
  2246 	// in use for required number of movable and discardable pages.
       
  2247 	TUint onlyPagesDis = 0;		// Number of pages in RAM zones for discard only.
       
  2248 	TUint onlyPagesMov = 0;		// Number of pages in RAM zones for movable only.
       
  2249 	TUint totalPagesDis = 0;	// Total pages found so far for discardable pages.
       
  2250 	TUint totalPagesMov = 0;	// Total pages found so far for movable pages.
       
  2251 	TUint totalCurrentDis = 0;	// Number of allocated discardable pages found in 
       
  2252 								// RAM zones to be in use after the general defrag.
       
  2253 	TUint totalCurrentMov = 0;	// Number of allocated movable pages found in 
       
  2254 								// RAM zones to be in use after the general defrag.
       
  2255 	TUint totalCurrentFree = 0; // The current number of free pages in the RAM zones
       
  2256 								// to be in use after the general defrag.
       
  2257 	iZoneGeneralPrefLink = &iZonePrefList.iA;
       
  2258 	while (iZoneGeneralPrefLink != iZoneLeastMovDis && 
       
  2259 			(requiredPagesMov > totalPagesMov ||
       
  2260 			requiredPagesDis > totalPagesDis))
       
  2261 		{
       
  2262 		iZoneGeneralPrefLink = iZoneGeneralPrefLink->iNext;
       
  2263 		SZone& zone = *_LOFF(iZoneGeneralPrefLink, SZone, iPrefLink);
       
  2264 		// Update the current totals.
       
  2265 		totalCurrentDis += zone.iAllocPages[EPageDiscard];
       
  2266 		totalCurrentMov += zone.iAllocPages[EPageMovable];
       
  2267 		totalCurrentFree += zone.iFreePages;
       
  2268 
       
  2269 		TBool onlyAllocDis = NoAllocOfPageType(zone, EPageMovable);
       
  2270 		TBool onlyAllocMov = NoAllocOfPageType(zone, EPageDiscard);
       
  2271 		if (!onlyAllocMov || !onlyAllocDis)
       
  2272 			{// Either movable, discardable or both can be allocated in this zone.
       
  2273 			TUint zonePagesFree = zone.iFreePages;
       
  2274 			TUint zonePagesDis = zone.iAllocPages[EPageDiscard];
       
  2275 			TUint zonePagesMov = zone.iAllocPages[EPageMovable];
       
  2276 			// Total pages in this RAM zone that can be used for either 
       
  2277 			// discardable or movable pages.
       
  2278 			TUint zonePagesGen = zonePagesDis + zonePagesMov + zonePagesFree;
       
  2279 			if (onlyAllocMov)
       
  2280 				{
       
  2281 				if (requiredPagesDis > totalPagesDis)
       
  2282 					{// No further discardable pages can be allocated into
       
  2283 					// this RAM zone but consider any that already are.
       
  2284 					TUint usedPages = Min(	(TInt)zonePagesDis,
       
  2285 											requiredPagesDis - totalPagesDis);
       
  2286 					totalPagesDis += usedPages;
       
  2287 					zonePagesDis -= usedPages;
       
  2288 					}
       
  2289 				TUint zoneOnlyMov = zonePagesDis + zonePagesMov + zonePagesFree;
       
  2290 				onlyPagesMov += zoneOnlyMov;
       
  2291 				totalPagesMov += zoneOnlyMov;
       
  2292 				__KTRACE_OPT(KMMU2, Kern::Printf("onlyMov ID%x tot %x", 
       
  2293 									zone.iId, zoneOnlyMov));
       
  2294 				zonePagesGen = 0;	// These pages aren't general purpose.
       
  2295 				}
       
  2296 			if (onlyAllocDis)
       
  2297 				{
       
  2298 				if (requiredPagesMov > totalPagesMov)
       
  2299 					{// No further movable pages can be allocated into
       
  2300 					// this RAM zone but consider any that already are.
       
  2301 					TUint usedPages = Min(	(TInt)zonePagesMov,
       
  2302 											requiredPagesMov - totalPagesMov);
       
  2303 					totalPagesMov += usedPages;
       
  2304 					zonePagesMov -= usedPages;
       
  2305 					}
       
  2306 				TUint zoneOnlyDis = zonePagesDis + zonePagesMov + zonePagesFree;
       
  2307 				onlyPagesDis +=	zoneOnlyDis;
       
  2308 				totalPagesDis += zoneOnlyDis;
       
  2309 				__KTRACE_OPT(KMMU2, Kern::Printf("onlyDis ID%x tot %x", 
       
  2310 									zone.iId, zoneOnlyDis));
       
  2311 				zonePagesGen = 0;	// These pages aren't general purpose.
       
  2312 				}
       
  2313 
       
  2314 			if (requiredPagesDis > totalPagesDis)
       
  2315 				{// Need some discardable pages so first steal any spare 
       
  2316 				// movable pages for discardable allocations.
       
  2317 				if (totalPagesMov > requiredPagesMov)
       
  2318 					{// Use any spare movable pages that can also be 
       
  2319 					// used for discardable allocations for discardable.
       
  2320 					__NK_ASSERT_DEBUG(onlyPagesMov);
       
  2321 					TUint spareMovPages = Min((TInt)(totalPagesMov - onlyPagesMov),
       
  2322 												totalPagesMov - requiredPagesMov);
       
  2323 					totalPagesMov -= spareMovPages;
       
  2324 					totalPagesDis += spareMovPages;
       
  2325 					__KTRACE_OPT(KMMU2, Kern::Printf("genDis Mov ID%x used%x", 
       
  2326 										zone.iId, spareMovPages));
       
  2327 					}
       
  2328 				if (requiredPagesDis > totalPagesDis)
       
  2329 					{
       
  2330 					// Need more discardable pages but only grab those required.
       
  2331 					TUint usedPages = Min(	(TInt) zonePagesGen, 
       
  2332 											requiredPagesDis - totalPagesDis);
       
  2333 					totalPagesDis += usedPages;
       
  2334 					zonePagesGen -= usedPages;
       
  2335 					__KTRACE_OPT(KMMU2, Kern::Printf("genDis ID%x used%x", 
       
  2336 										zone.iId, usedPages));
       
  2337 					}
       
  2338 				}
       
  2339 			if (requiredPagesMov > totalPagesMov)
       
  2340 				{// Need some movable pages so first steal any spare 
       
  2341 				// discardable pages for movable allocations.
       
  2342 				if (totalPagesDis > requiredPagesDis)
       
  2343 					{// Use any spare discardable pages that can also be 
       
  2344 					// used for movable allocations for movable.
       
  2345 					__NK_ASSERT_DEBUG(onlyPagesDis);
       
  2346 					TUint spareDisPages = Min((TInt)(totalPagesDis - onlyPagesDis),
       
  2347 												totalPagesDis - requiredPagesDis);
       
  2348 					totalPagesDis -= spareDisPages;
       
  2349 					totalPagesMov += spareDisPages;
       
  2350 					__KTRACE_OPT(KMMU2, Kern::Printf("genMov Dis ID%x used%x", 
       
  2351 										zone.iId, spareDisPages));
       
  2352 					}
       
  2353 				if (requiredPagesMov > totalPagesMov)
       
  2354 					{// Still need some movable pages so grab them from this zone.
       
  2355 					// Just grab all of the general pages left as discard pages will
       
  2356 					// have already grabbed some if it had needed to.
       
  2357 					totalPagesMov += zonePagesGen;
       
  2358 					__KTRACE_OPT(KMMU2, Kern::Printf("genMov ID%x used%x", 
       
  2359 										zone.iId, zonePagesGen));
       
  2360 					}
       
  2361 				}	
       
  2362 			}
       
  2363 		}
       
  2364 
       
  2365 	__KTRACE_OPT(KMMU, Kern::Printf("gen least in use ID 0x%x", 
       
  2366 				(_LOFF(iZoneGeneralPrefLink, SZone, iPrefLink))->iId));
       
  2367 	__NK_ASSERT_DEBUG(_LOFF(iZoneGeneralPrefLink, SZone, iPrefLink)->iPrefRank <= 
       
  2368 						iZoneLeastMovDisRank);
       
  2369 
       
  2370 	if (iZoneGeneralPrefLink != iZoneLeastMovDis &&
       
  2371 		firstClearableInUseRank > _LOFF(iZoneGeneralPrefLink, SZone, iPrefLink)->iPrefRank)
       
  2372 		{// We can reduce the number of RAM zones in use so block all the RAM
       
  2373 		// zones not to be in use after the defrag from being allocated into 
       
  2374 		// by the general defrag.
       
  2375 		link = iZoneLeastMovDis;
       
  2376 		while (link != iZoneGeneralPrefLink)
       
  2377 			{
       
  2378 			SZone& zone = *_LOFF(link, SZone, iPrefLink);
       
  2379 			zone.iFlags |= KRamZoneFlagGenDefragBlock;
       
  2380 			link = link->iPrev;
       
  2381 			}
       
  2382 
       
  2383 		// Determine how many pages will need to be discarded to allow general 
       
  2384 		// defrag to succeed in using the minimum RAM zones required.
       
  2385 		if (requiredPagesDis > totalCurrentDis)
       
  2386 			{// Need to replace some discardable pages in RAM zones to be 
       
  2387 			// cleared with pages in the RAM zones to be in use after the 
       
  2388 			// general defrag.
       
  2389 			__NK_ASSERT_DEBUG(totalCurrentFree >= requiredPagesDis - totalCurrentDis);
       
  2390 			totalCurrentFree -= requiredPagesDis - totalCurrentDis;
       
  2391 			}
       
  2392 		TUint totalForMov = totalCurrentFree + totalCurrentMov;
       
  2393 		if (requiredPagesMov > totalForMov)
       
  2394 			{// Need to discard some pages from the least preferable RAM zone to be
       
  2395 			// in use after the general for the movable pages to be moved to.
       
  2396 			aRequiredToBeDiscarded = requiredPagesMov - totalForMov;
       
  2397 			__NK_ASSERT_DEBUG(aRequiredToBeDiscarded <= totalCurrentDis);
       
  2398 			__NK_ASSERT_DEBUG(totalCurrentDis - aRequiredToBeDiscarded >= requiredPagesDis);
       
  2399 			}
       
  2400 
       
  2401 		// This stage should discard pages from the least preferable RAM zones
       
  2402 		// to be in use after the general defrag to save the pages having to
       
  2403 		// be moved again by the final stage.
       
  2404 		iZoneGeneralStage = EGenDefragStage0;
       
  2405 		aStage = EGenDefragStage1;	// Defrag::GeneralDefrag() requires this.
       
  2406 		iZoneGeneralTmpLink = iZoneGeneralPrefLink;
       
  2407 		return GeneralDefragNextZone0();
       
  2408 		}
       
  2409 
       
  2410 	// General defrag can't clear any RAM zones so jump to tidying stage.
       
  2411 	aStage = EGenDefragStage2;
       
  2412 	iZoneGeneralStage = EGenDefragStage2;
       
  2413 	return NULL;
       
  2414 	}
       
  2415 
       
  2416 
       
  2417 /**
       
  2418 Find the next RAM zone that is suitable for stage 0 of a general defrag.
       
  2419 This should only be called after a preceeding call to 
       
  2420 DRamAllocator::GeneralDefragStart0().
       
  2421 
       
  2422 This goes through the RAM zones from the least preferable to be in use
       
  2423 after the general defrag to the most preferable RAM zone. It will 
       
  2424 return each time it finds a RAM zone with discardable pages allocated into it.
       
  2425 
       
  2426 @return Pointer to the RAM zone object that may potentially have pages
       
  2427 		discarded by the general defrag.  This will be NULL if no suitable 
       
  2428 		RAM zone could be found.
       
  2429 */
       
  2430 SZone* DRamAllocator::GeneralDefragNextZone0()
       
  2431 	{
       
  2432 	M::RamAllocIsLocked();
       
  2433 	// Any previous general defrag operation must have ended.
       
  2434 	__NK_ASSERT_DEBUG(iZoneGeneralPrefLink != NULL);
       
  2435 	__NK_ASSERT_DEBUG(iZoneGeneralTmpLink != NULL);
       
  2436 	__NK_ASSERT_DEBUG(iZoneGeneralStage == EGenDefragStage0);
       
  2437 
       
  2438 	while (iZoneGeneralTmpLink != &iZonePrefList.iA)
       
  2439 		{
       
  2440 		SZone* zone = _LOFF(iZoneGeneralTmpLink, SZone, iPrefLink);
       
  2441 
       
  2442 		// Save the RAM zone that is currently more preferable than this one
       
  2443 		// before any reordering.
       
  2444 		iZoneGeneralTmpLink = iZoneGeneralTmpLink->iPrev;
       
  2445 
       
  2446 		if (zone->iFlags & KRamZoneFlagGenDefrag)
       
  2447 			{// This zone has been selected for a general defrag already.
       
  2448 			__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext0 zone ID 0x%x already defraged", 
       
  2449 						zone->iId));
       
  2450 			return NULL;
       
  2451 			}
       
  2452 		zone->iFlags |= KRamZoneFlagGenDefrag;
       
  2453 		if (zone->iAllocPages[EPageDiscard])
       
  2454 			{
       
  2455 			// A RAM zone that may have pages discarded by a general defrag has been found.
       
  2456 			__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext0 zone ID 0x%x", zone->iId));
       
  2457 			return zone;
       
  2458 			}
       
  2459 		}
       
  2460 	return NULL;
       
  2461 	}
       
  2462 
       
  2463 
       
  2464 /**
       
  2465 Initialise this stage of a general defrag operation which will attempt
       
  2466 to clear all the RAM zones not to be in use once the general defrag
       
  2467 has completed.
       
  2468 
       
  2469 @return Pointer to the RAM zone object that may potentially be cleared
       
  2470 		by the general defrag.  This will be NULL if no suitable 
       
  2471 		RAM zone could be found.
       
  2472 */
       
  2473 SZone* DRamAllocator::GeneralDefragStart1()
       
  2474 	{
       
  2475 	M::RamAllocIsLocked();
       
  2476 	__NK_ASSERT_DEBUG(iNumZones == 1 || iZoneGeneralPrefLink != NULL);
       
  2477 
       
  2478 
       
  2479 	if (iNumZones == 1)
       
  2480 		{// On a device with one RAM zone can't do any defrag so return NULL.
       
  2481 		return NULL;
       
  2482 		}
       
  2483 
       
  2484 	// Clear general defrag flags of each RAM zone to be defraged.
       
  2485 	SDblQueLink* link = iZoneGeneralPrefLink;
       
  2486 	for (; link != &iZonePrefList.iA; link = link->iPrev)
       
  2487 		{
       
  2488 		SZone& zone = *_LOFF(link, SZone, iPrefLink);
       
  2489 		zone.iFlags &= ~KRamZoneFlagGenDefrag;
       
  2490 		}
       
  2491 	
       
  2492 	// Flags cleared so now to start this stage from least preferable RAM zone
       
  2493 	// currently in use.
       
  2494 	iZoneGeneralTmpLink = iZoneLeastMovDis;
       
  2495 	iZoneGeneralStage = EGenDefragStage1;
       
  2496 	return GeneralDefragNextZone1();
       
  2497 	}
       
  2498 
       
  2499 
       
  2500 /**
       
  2501 Find the next RAM zone that is suitable for stage 1 of a general defrag.
       
  2502 This should only be called after a preceeding call to 
       
  2503 DRamAllocator::GeneralDefragStart1().
       
  2504 
       
  2505 This goes through the RAM zones from the least preferable currently 
       
  2506 with movable or discardable pages allocated into it to the least 
       
  2507 preferable RAM zone that is to be in use after the general defrag.
       
  2508 It will return each time it finds a RAM zone with movable and/or 
       
  2509 discardable pages allocated into it.
       
  2510 
       
  2511 @return Pointer to the RAM zone object that may potentially be cleared by a 
       
  2512 		general defrag.  This will be NULL if no suitable zone could be found.
       
  2513 */
       
  2514 SZone* DRamAllocator::GeneralDefragNextZone1()
       
  2515 	{
       
  2516 	M::RamAllocIsLocked();
       
  2517 	// Any previous general defrag operation must have ended.
       
  2518 	__NK_ASSERT_DEBUG(iZoneGeneralPrefLink != NULL);
       
  2519 	__NK_ASSERT_DEBUG(iZoneGeneralTmpLink != NULL);
       
  2520 	__NK_ASSERT_DEBUG(iZoneGeneralStage == EGenDefragStage1);
       
  2521 
       
  2522 
       
  2523 	// If we hit the target least preferable RAM zone to be in use once 
       
  2524 	// the defrag has completed then stop this stage of the general defrag.
       
  2525 
       
  2526 	// Should never skip past iZoneGeneralPrefLink.
       
  2527 	__NK_ASSERT_DEBUG(iZoneGeneralTmpLink != &iZonePrefList.iA);
       
  2528 
       
  2529 	while (iZoneGeneralTmpLink != iZoneGeneralPrefLink)
       
  2530 		{
       
  2531 		SZone* zone = _LOFF(iZoneGeneralTmpLink, SZone, iPrefLink);
       
  2532 
       
  2533 		// Save the RAM zone that is currently more preferable than this one
       
  2534 		// before any reordering.
       
  2535 		iZoneGeneralTmpLink = iZoneGeneralTmpLink->iPrev;
       
  2536 
       
  2537 		if (zone->iFlags & KRamZoneFlagGenDefrag)
       
  2538 			{// This zone has been selected for a general defrag already.
       
  2539 			__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext1 zone ID 0x%x already defraged", 
       
  2540 						zone->iId));
       
  2541 			return NULL;
       
  2542 			}
       
  2543 		zone->iFlags |= KRamZoneFlagGenDefrag;
       
  2544 		if (zone->iAllocPages[EPageMovable] || zone->iAllocPages[EPageDiscard])
       
  2545 			{
       
  2546 			// A RAM zone that may be cleared by a general defrag has been found.
       
  2547 			__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext1 zone ID 0x%x", zone->iId));
       
  2548 			return zone;
       
  2549 			}
       
  2550 		}
       
  2551 	__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext1 reached general target"));
       
  2552 	return NULL;
       
  2553 	}
       
  2554 
       
  2555 
       
  2556 /**
       
  2557 Initialise stage 2 of a general defrag operation.
       
  2558 
       
  2559 Stage 2 creates room for fixed pages allocations in the more preferable RAM 
       
  2560 zones in use by moving pages into the least preferable RAM zones in use.
       
  2561 
       
  2562 @return Pointer to the RAM zone object that may potentially be cleared of
       
  2563 		movable and discardable pages by the general defrag.  This will be 
       
  2564 		NULL if no suitable zone could be found.
       
  2565 */
       
  2566 SZone* DRamAllocator::GeneralDefragStart2()
       
  2567 	{
       
  2568 	M::RamAllocIsLocked();
       
  2569 	__NK_ASSERT_DEBUG(iNumZones == 1 || iZoneGeneralPrefLink != NULL);
       
  2570 
       
  2571 
       
  2572 	if (iNumZones == 1)
       
  2573 		{// On a device with one RAM zone can't do any defrag so return NULL.
       
  2574 		return NULL;
       
  2575 		}
       
  2576 
       
  2577 	// Clear general defrag flags of each RAM zone to be defraged.
       
  2578 	SDblQueLink* link = iZoneLeastMovDis;
       
  2579 	for (; link != &iZonePrefList.iA; link = link->iPrev)
       
  2580 		{
       
  2581 		SZone& zone = *_LOFF(link, SZone, iPrefLink);
       
  2582 		zone.iFlags &= ~(KRamZoneFlagGenDefrag | KRamZoneFlagGenDefragBlock);
       
  2583 		}
       
  2584 	
       
  2585 	// Flags cleared so now to start 2nd stage from most preferable RAM zone.
       
  2586 	iZoneGeneralTmpLink = iZonePrefList.First();
       
  2587 	iZoneGeneralStage = EGenDefragStage2;
       
  2588 	return GeneralDefragNextZone2();
       
  2589 	}
       
  2590 
       
  2591 
       
  2592 /**
       
  2593 Find the next RAM zone that is suitable for this stage of general defrag.
       
  2594 This should only be called after a preceeding call to 
       
  2595 DRamAllocator::GeneralDefragStart2().
       
  2596 
       
  2597 This goes through the RAM zones from the most preferable to the least 
       
  2598 preferable RAM zone that has movable and/or discardable pages allocated
       
  2599 into it.  It will return each time it finds a RAM zone with movable and/or 
       
  2600 discardable pages allocated into it.
       
  2601 
       
  2602 @return Pointer to the RAM zone object that may potentially be cleared of
       
  2603 		movable and discardable pages by the general defrag.  This will be 
       
  2604 		NULL if no suitable zone could be found.
       
  2605 */
       
  2606 SZone* DRamAllocator::GeneralDefragNextZone2()
       
  2607 	{
       
  2608 	M::RamAllocIsLocked();
       
  2609 	__NK_ASSERT_DEBUG(iZoneGeneralTmpLink != NULL);
       
  2610 	__NK_ASSERT_DEBUG(iZoneGeneralStage == EGenDefragStage2);
       
  2611 
       
  2612 
       
  2613 	while (iZoneGeneralTmpLink != iZoneLeastMovDis)
       
  2614 		{
       
  2615 		SZone* zone = _LOFF(iZoneGeneralTmpLink, SZone, iPrefLink);
       
  2616 
       
  2617 		// Save the RAM zone that is currently less preferable than this one
       
  2618 		// before any reordering.
       
  2619 		iZoneGeneralTmpLink = iZoneGeneralTmpLink->iNext; 
       
  2620 
       
  2621 		if (zone->iFlags & KRamZoneFlagGenDefrag)
       
  2622 			{// This zone has been selected for a general defrag already.
       
  2623 			__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext2 zone ID 0x%x already defraged", zone->iId));
       
  2624 			return NULL;
       
  2625 			}
       
  2626 		zone->iFlags |= KRamZoneFlagGenDefrag | KRamZoneFlagGenDefragBlock;
       
  2627 		if (zone->iAllocPages[EPageMovable] || zone->iAllocPages[EPageDiscard])
       
  2628 			{// A RAM zone that may be cleared by a general defrag has been found.
       
  2629 			__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext2 zone ID 0x%x", zone->iId));
       
  2630 			return zone;
       
  2631 			}
       
  2632 		}
       
  2633 	__KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext2 reached general target"));
       
  2634 	return NULL;
       
  2635 	}
       
  2636 
       
  2637 /**
       
  2638 Inform the allocator that a general defragmentation operation has completed.
       
  2639 
       
  2640 */
       
  2641 void DRamAllocator::GeneralDefragEnd()
       
  2642 	{
       
  2643 #ifdef _DEBUG
       
  2644 	if (!K::Initialising) 
       
  2645 		{
       
  2646 		M::RamAllocIsLocked();
       
  2647 #ifdef __VERIFY_LEASTMOVDIS
       
  2648 		VerifyLeastPrefMovDis();
       
  2649 #endif
       
  2650 		}
       
  2651 #endif
       
  2652 	// Reset the general defrag preference link as it is no longer required.
       
  2653 	iZoneGeneralPrefLink = NULL;
       
  2654 	iZoneGeneralTmpLink = NULL;
       
  2655 	}
       
  2656 
       
  2657 
       
  2658 /**
       
  2659 Calculate the number of free pages in all the RAM zones to be in use
       
  2660 once the general defragmentation operation has completed.
       
  2661 
       
  2662 @param aType The type of free pages to find in the higher priority zones.
       
  2663 @return The number of free pages in the RAM zones intended to be in use 
       
  2664 after the general defrag operation has completed.
       
  2665 */
       
  2666 TUint DRamAllocator::GenDefragFreePages(TZonePageType aType) const
       
  2667 	{
       
  2668 	M::RamAllocIsLocked();
       
  2669 
       
  2670 	if (iZoneGeneralStage == EGenDefragStage2)
       
  2671 		{// Second stage of general defrag where don't have to empty the RAM zone.
       
  2672 		return KMaxTUint;
       
  2673 		}
       
  2674 	TUint totalFree = 0;
       
  2675 	SDblQueLink* link = iZoneGeneralPrefLink;
       
  2676 	for (; link != &iZonePrefList.iA; link = link->iPrev)
       
  2677 		{
       
  2678 		SZone& zone = *_LOFF(link, SZone, iPrefLink);
       
  2679 		if (NoAllocOfPageType(zone, aType) ||
       
  2680 			zone.iFlags & KRamZoneFlagGenDefragBlock)
       
  2681 			{
       
  2682 			continue;
       
  2683 			}
       
  2684 		// This zone has free space for this type of page
       
  2685 		totalFree += zone.iFreePages;
       
  2686 		}
       
  2687 	return totalFree;
       
  2688 	}
       
  2689 
       
  2690 
       
  2691 /** Mark the RAM zone as being claimed to stop any further allocations.
       
  2692 @param aZone The zone to stop allocations to.
       
  2693 
       
  2694 @pre RamAlloc mutex held.
       
  2695 @post RamAlloc mutex held.
       
  2696 */
       
  2697 void DRamAllocator::ZoneClaimStart(SZone& aZone)
       
  2698 	{
       
  2699 	M::RamAllocIsLocked();
       
  2700 	__NK_ASSERT_DEBUG(!(aZone.iFlags & KRamZoneFlagClaiming));
       
  2701 
       
  2702 	aZone.iFlags |= KRamZoneFlagClaiming;
       
  2703 
       
  2704 #ifdef BTRACE_RAM_ALLOCATOR
       
  2705 	BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags);
       
  2706 #endif
       
  2707 	}
       
  2708 
       
  2709 
       
  2710 /** Mark the RAM zone as not being claimed to allow allocations.
       
  2711 @param aZone The zone to allow allocations into.
       
  2712 
       
  2713 @pre RamAlloc mutex held.
       
  2714 @post RamAlloc mutex held.
       
  2715 */
       
  2716 void DRamAllocator::ZoneClaimEnd(SZone& aZone)
       
  2717 	{
       
  2718 	M::RamAllocIsLocked();
       
  2719 	__NK_ASSERT_DEBUG(aZone.iFlags & KRamZoneFlagClaiming);
       
  2720 
       
  2721 	aZone.iFlags &= ~KRamZoneFlagClaiming;
       
  2722 
       
  2723 #ifdef BTRACE_RAM_ALLOCATOR
       
  2724 	BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags);
       
  2725 #endif
       
  2726 	}
       
  2727 
       
  2728 /** Mark the RAM zone so that any allocation or frees from it can be detected.
       
  2729 Useful for defragging.
       
  2730 @param aZone The zone to mark.
       
  2731 @pre RamAlloc mutex held
       
  2732 @post RamAlloc mutex held
       
  2733 */
       
  2734 void DRamAllocator::ZoneMark(SZone& aZone)
       
  2735 	{
       
  2736 	M::RamAllocIsLocked();
       
  2737 	__NK_ASSERT_DEBUG(!(aZone.iFlags & KRamZoneFlagMark));
       
  2738 
       
  2739 	aZone.iFlags |= KRamZoneFlagMark;
       
  2740 
       
  2741 #ifdef BTRACE_RAM_ALLOCATOR
       
  2742 	BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags);
       
  2743 #endif
       
  2744 	}
       
  2745 
       
  2746 /** Unmark the RAM zone.
       
  2747 Useful for defragging.
       
  2748 @param aZone The zone to mark.
       
  2749 @return ETrue if the RAM zone is inactive, EFalse otherwise.
       
  2750 @pre RamAlloc mutex held
       
  2751 @post RamAlloc mutex held
       
  2752 */
       
  2753 TBool DRamAllocator::ZoneUnmark(SZone& aZone)
       
  2754 	{
       
  2755 	M::RamAllocIsLocked();
       
  2756 
       
  2757 	TInt r = aZone.iFlags & KRamZoneFlagMark;
       
  2758 	aZone.iFlags &= ~KRamZoneFlagMark;
       
  2759 
       
  2760 #ifdef BTRACE_RAM_ALLOCATOR
       
  2761 	BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags);
       
  2762 #endif
       
  2763 	return r;
       
  2764 	}
       
  2765 
       
  2766 /** Determine whether it is OK to allocate the specified page type
       
  2767 to the RAM zone.
       
  2768 
       
  2769 This should be used by all functions that search through the zones when
       
  2770 attempting to allocate pages.
       
  2771 
       
  2772 @return ETrue if this page type shouldn't be allocated into the RAM zone,
       
  2773 EFalse if it is OK to allocate that page type into the RAM zone.
       
  2774 */
       
  2775 TBool DRamAllocator::NoAllocOfPageType(SZone& aZone, TZonePageType aType) const
       
  2776 	{
       
  2777 	TUint8 flagMask = 1 << (aType - KPageTypeAllocBase);
       
  2778 	return 	(aZone.iFlags & (KRamZoneFlagClaiming|KRamZoneFlagNoAlloc|KRamZoneFlagTmpBlockAlloc)) ||
       
  2779 			(aZone.iFlags & flagMask);
       
  2780 	}
       
  2781 
       
  2782 
       
  2783 /** Updates the flags of the specified RAM zone.
       
  2784 
       
  2785 @param aId			The ID of the RAM zone to modify.
       
  2786 @param aClearFlags	The bit flags to clear.
       
  2787 @param aSetFlags	The bit flags to set.
       
  2788 
       
  2789 @return KErrNone on success, KErrArgument if the RAM zone of aId not found or
       
  2790 aSetMask contains invalid flags.
       
  2791 
       
  2792 @pre RamAlloc mutex held
       
  2793 @post RamAlloc mutex held
       
  2794 */
       
  2795 TInt DRamAllocator::ModifyZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
       
  2796 	{
       
  2797 	M::RamAllocIsLocked();
       
  2798 
       
  2799 	SZone* zone = ZoneFromId(aId);
       
  2800 	if (zone == NULL || (aSetMask & KRamZoneFlagInvalid))
       
  2801 		{// aId invalid or an invalid flag bit was requested to be set.
       
  2802 		return KErrArgument;
       
  2803 		}
       
  2804 	zone->iFlags &= ~aClearMask;
       
  2805 	zone->iFlags |= aSetMask;
       
  2806 
       
  2807 	__KTRACE_OPT(KMMU, Kern::Printf("Zone %x Flags %x", zone->iId, zone->iFlags));
       
  2808 
       
  2809 #ifdef BTRACE_RAM_ALLOCATOR
       
  2810 	BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, zone->iId, zone->iFlags);
       
  2811 #endif
       
  2812 	return KErrNone;
       
  2813 	}
       
  2814 
       
  2815 
       
  2816 /** Invoke the RAM zone call back function to inform the variant of the RAM zones
       
  2817 in use so far by the system.
       
  2818 This is designed to only be invoked once during boot in MmuBase::Init2()
       
  2819 */
       
  2820 void DRamAllocator::InitialCallback()
       
  2821 	{
       
  2822 	__NK_ASSERT_DEBUG(iZoneCallbackInitSent == EFalse);
       
  2823 	if (iZonePowerFunc)
       
  2824 		{
       
  2825 		TInt ret = (*iZonePowerFunc)(ERamZoneOp_Init, NULL, (TUint*)&iZonePwrState);
       
  2826 		if (ret != KErrNone && ret != KErrNotSupported)
       
  2827 			{
       
  2828 			Panic(EZonesCallbackErr);
       
  2829 			}
       
  2830 		CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::ZoneAllocPages");
       
  2831 		}
       
  2832 	iZoneCallbackInitSent = ETrue;
       
  2833 	}
       
  2834 
       
  2835 
       
  2836 #ifdef BTRACE_RAM_ALLOCATOR
       
  2837 /**
       
  2838 Structure for outputing zone information to BTrace that couldn't be fit into first
       
  2839 2 words of the BTraceN call
       
  2840 */
       
  2841 struct TRamAllocBtraceZone
       
  2842 	{
       
  2843 	TUint32 iId;
       
  2844 	TUint8 iPref;
       
  2845 	TUint8 iFlags;
       
  2846 	TUint16 iReserved;
       
  2847 	};
       
  2848 
       
  2849 /**
       
  2850 This will be invoked when BTrace starts logging BTrace::ERamAllocator category 
       
  2851 traces.
       
  2852 It outputs the zone configuration and the base addresses of any contiguous block
       
  2853 of allocated pages.
       
  2854 */
       
  2855 void DRamAllocator::SendInitialBtraceLogs(void)
       
  2856 	{
       
  2857 	M::RamAllocIsLocked();
       
  2858 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::SendInitialBtraceLogs");
       
  2859 
       
  2860 	// Output the zone information
       
  2861 	TRamAllocBtraceZone bZone;
       
  2862 	BTrace4(BTrace::ERamAllocator, BTrace::ERamAllocZoneCount, iNumZones);
       
  2863 	const SZone* zone = iZones;
       
  2864 	const SZone* const  endZone = iZones + iNumZones;
       
  2865 	for (; zone < endZone; zone++)
       
  2866 		{
       
  2867 		bZone.iId = zone->iId;
       
  2868 		bZone.iPref = zone->iPref;
       
  2869 		bZone.iFlags = zone->iFlags;
       
  2870 		BTraceN(BTrace::ERamAllocator, BTrace::ERamAllocZoneConfig, zone->iPhysPages,
       
  2871 				zone->iPhysBase, &bZone, sizeof(TRamAllocBtraceZone));
       
  2872 		}
       
  2873 
       
  2874 	// Search through zones and output each contiguous region of allocated pages
       
  2875 	for (zone = iZones; zone < endZone; zone++)
       
  2876 		{
       
  2877 		if (zone->iFreePages != zone->iPhysPages)
       
  2878 			{
       
  2879 			TInt pageCount = 0;
       
  2880 			TInt totalPages = 0;
       
  2881 			TUint32 runStart = 0;
       
  2882 			while ((TUint)totalPages != zone->iPhysPages - zone->iFreePages)
       
  2883 				{
       
  2884 				// find set of contiguous pages that have been allocated
       
  2885 				// runStart will be set to first page of allocated run if one found
       
  2886 				for (;runStart < zone->iPhysPages && zone->iBma[KBmaAllPages]->NotAllocated(runStart,1);	runStart++);
       
  2887 
       
  2888 				// find last allocated page of this run
       
  2889 				TUint32 runEnd = runStart + 1;
       
  2890 				for (;runEnd < zone->iPhysPages && zone->iBma[KBmaAllPages]->NotFree(runEnd,1); runEnd++);
       
  2891 				
       
  2892 				pageCount = runEnd - runStart;
       
  2893 				if (pageCount > 0)
       
  2894 					{// have a run of allocated pages so output BTrace
       
  2895 					TPhysAddr baseAddr = (runStart << KPageShift) + zone->iPhysBase;
       
  2896 					__KTRACE_OPT(KMMU2, Kern::Printf("offset %x physBase %x pages %x baseAddr %08x",runStart, zone->iPhysBase, pageCount, baseAddr));
       
  2897 					BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocBootAllocation, pageCount, baseAddr);
       
  2898 					runStart += pageCount;
       
  2899 					totalPages += pageCount;
       
  2900 					}
       
  2901 				}
       
  2902 			}
       
  2903 		}
       
  2904 	BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocBootAllocationEnd);
       
  2905 	}
       
  2906 #endif // BTRACE_RAM_ALLOCATOR
       
  2907 
       
  2908 TInt DRamAllocator::ClaimPhysicalRam(TPhysAddr aBase, TInt aSize)
       
  2909 	{
       
  2910 	TInt ret = SetPhysicalRamState(aBase,aSize,EFalse, EPageFixed);
       
  2911 #ifdef BTRACE_RAM_ALLOCATOR
       
  2912 	if (ret == KErrNone)
       
  2913 		{
       
  2914 		BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocClaimRam, aSize, aBase);
       
  2915 		}
       
  2916 #endif
       
  2917 	return ret;
       
  2918 	}
       
  2919 
       
  2920 TInt DRamAllocator::FreePhysicalRam(TPhysAddr aBase, TInt aSize)
       
  2921 	{
       
  2922 	TInt ret = SetPhysicalRamState(aBase,aSize,ETrue, EPageFixed); 
       
  2923 #ifdef BTRACE_RAM_ALLOCATOR
       
  2924 	if (ret == KErrNone)
       
  2925 		{
       
  2926 		BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocFreePhysical, aSize, aBase);
       
  2927 		}
       
  2928 #endif
       
  2929 	return ret;
       
  2930 	}
       
  2931 
       
  2932 
       
  2933 TInt DRamAllocator::FreeRamInBytes()
       
  2934 	{
       
  2935 	return iTotalFreeRamPages<<KPageShift;
       
  2936 	}
       
  2937 
       
  2938 TUint DRamAllocator::FreeRamInPages()
       
  2939 	{
       
  2940 	return iTotalFreeRamPages;
       
  2941 	}
       
  2942 
       
  2943 TUint DRamAllocator::TotalPhysicalRamPages()
       
  2944 	{
       
  2945 	return iTotalRamPages;
       
  2946 	}
       
  2947 
       
  2948 #ifdef __VERIFY_LEASTMOVDIS
       
  2949 void DRamAllocator::VerifyLeastPrefMovDis()
       
  2950 	{
       
  2951 	// Shouldn't have any movable or discardable pages in any RAM
       
  2952 	// zone less preferable than iZoneLeastMovDis
       
  2953 	SDblQueLink* tmpLink = iZoneLeastMovDis->iNext;
       
  2954 	while (tmpLink != &iZonePrefList.iA)
       
  2955 		{
       
  2956 		SZone& zone = *_LOFF(tmpLink, SZone, iPrefLink);
       
  2957 		if (zone.iAllocPages[EPageMovable] != 0 ||
       
  2958 			zone.iAllocPages[EPageDiscard] != 0)
       
  2959 			{
       
  2960 			DebugDump();
       
  2961 			__NK_ASSERT_DEBUG(0);
       
  2962 			}
       
  2963 		tmpLink = tmpLink->iNext;
       
  2964 		}
       
  2965 	}
       
  2966 #endif