kernel/eka/memmodel/epoc/flexible/mmu/mmu.h
changeset 9 96e5fb8b040d
child 22 2f92ad2dc5db
equal deleted inserted replaced
-1:000000000000 9:96e5fb8b040d
       
     1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 //
       
    15 
       
    16 /**
       
    17  @file
       
    18  @internalComponent
       
    19 */
       
    20 
       
    21 #ifndef __MMU_H__
       
    22 #define __MMU_H__
       
    23 
       
    24 #define _USE_OLDEST_LISTS
       
    25 
       
    26 #include "mm.h"
       
    27 #include "mmboot.h"
       
    28 #include <mmtypes.h>
       
    29 #include <kern_priv.h>
       
    30 
       
    31 
       
    32 class DCoarseMemory;
       
    33 class DMemoryObject;
       
    34 class DMemoryMapping;
       
    35 
       
    36 /**
       
    37 A page information structure giving the current use and state for a
       
    38 RAM page being managed by the kernel.
       
    39 
       
    40 Any modification to the contents of any SPageInfo structure requires the
       
    41 #MmuLock to be held. The exceptions to this is when a page is unused (#Type()==#EUnused),
       
    42 in this case only the #RamAllocLock is required to use #SetAllocated(), #SetUncached(),
       
    43 and #CacheInvalidateCounter().
       
    44 
       
    45 These structures are stored in an array at the virtual address #KPageInfoLinearBase
       
    46 which is indexed by the physical address of the page they are associated with, divided
       
    47 by #KPageSize. The memory for this array is allocated by the bootstrap and it has
       
    48 unallocated regions where no memory is required to store SPageInfo structures.
       
    49 These unallocated memory regions are indicated by zeros in the bitmap stored at
       
    50 #KPageInfoMap.
       
    51 */
       
    52 struct SPageInfo
       
    53 	{
       
    54 	/**
       
    55 	Enumeration for the usage of a RAM page. This is stored in #iType.
       
    56 	*/
       
    57 	enum TType
       
    58 		{
       
    59 		/**
       
    60 		No physical RAM exists for this page.
       
    61 
       
    62 		This represents memory which doesn't exist or is not part of the physical
       
    63 		address range being managed by the kernel.
       
    64 		*/
       
    65 		EInvalid,
       
    66 
       
    67 		/**
       
    68 		RAM fixed at boot time.
       
    69 
       
    70 		This is for memory which was allocated by the bootstrap and which
       
    71 		the kernel does not actively manage.
       
    72 		*/
       
    73 		EFixed,
       
    74 
       
    75 		/**
       
    76 		Page is unused.
       
    77 
       
    78 		The page is either free memory in Mmu::iRamPageAllocator or the demand
       
    79 		paging 'live' list.
       
    80 
       
    81 		To change from or to this type the #RamAllocLock must be held.
       
    82 		*/
       
    83 		EUnused,
       
    84 
       
    85 		/**
       
    86 		Page is in an indeterminate state.
       
    87 
       
    88 		A page is placed into this state by Mmu::PagesAllocated when it is
       
    89 		allocated (ceases to be #EUnused). Once the page
       
    90 		*/
       
    91 		EUnknown,
       
    92 
       
    93 		/**
       
    94 		Page was allocated with Mmu::AllocPhysicalRam, Mmu::ClaimPhysicalRam
       
    95 		or is part of a reserved RAM bank set at system boot.
       
    96 		*/
       
    97 		EPhysAlloc,
       
    98 
       
    99 		/**
       
   100 		Page is owned by a memory object.
       
   101 
       
   102 		#iOwner will point to the owning memory object and #iIndex will
       
   103 		be the page index into its memory for this page.
       
   104 		*/
       
   105 		EManaged,
       
   106 
       
   107 		/**
       
   108 		Page is being used as a shadow page.
       
   109 
       
   110 		@see DShadowPage.
       
   111 		*/
       
   112 		EShadow
       
   113 		};
       
   114 
       
   115 
       
   116 	/**
       
   117 	Flags stored in #iFlags.
       
   118 
       
   119 	The least significant bits of these flags are used for the #TMemoryAttributes
       
   120 	value for the page.
       
   121 	*/
       
   122 	enum TFlags
       
   123 		{
       
   124 		// lower bits hold TMemoryAttribute value for this page
       
   125 
       
   126 		/**
       
   127 		Flag set to indicate that the page has writable mappings.
       
   128 		(This is to facilitate demand paged memory.)
       
   129 		*/
       
   130 		EWritable			= 1<<(EMemoryAttributeShift),
       
   131 
       
   132 		/**
       
   133 		Flag set to indicate that the memory page contents may be different
       
   134 		to those previously saved to backing store (contents are 'dirty').
       
   135 		This is set whenever a page gains a writeable mapping and only every
       
   136 		cleared once a demand paging memory manager 'cleans' the page.
       
   137 		*/
       
   138 		EDirty				= 1<<(EMemoryAttributeShift+1)
       
   139 		};
       
   140 
       
   141 
       
   142 	/**
       
   143 	State for the page when being used to contain demand paged content.
       
   144 	*/
       
   145 	enum TPagedState
       
   146 		{
       
   147 		/**
       
   148 		Page is not being managed for demand paging purposes, is has been transiently
       
   149 		removed from the demand paging live list.
       
   150 		*/
       
   151 		EUnpaged 			= 0x0,
       
   152 
       
   153 		/**
       
   154 		Page is in the live list as a young page.
       
   155 		*/
       
   156 		EPagedYoung 		= 0x1,
       
   157 
       
   158 		/**
       
   159 		Page is in the live list as an old page.
       
   160 		*/
       
   161 		EPagedOld 			= 0x2,
       
   162 
       
   163 		/**
       
   164 		Page was pinned but it has been moved but not yet freed.
       
   165 		*/
       
   166 		EPagedPinnedMoved	= 0x3,
       
   167 
       
   168 		/**
       
   169 		Page has been removed from live list to prevent contents being paged-out.
       
   170 		*/
       
   171 		// NOTE - This must be the same value as EStatePagedLocked as defined in mmubase.h
       
   172 		EPagedPinned 		= 0x4,
       
   173 
       
   174 #ifdef _USE_OLDEST_LISTS
       
   175 		/**
       
   176 		Page is in the live list as one of oldest pages that is clean.
       
   177 		*/
       
   178 		EPagedOldestClean	= 0x5,
       
   179 
       
   180 		/**
       
   181 		Page is in the live list as one of oldest pages that is dirty.
       
   182 		*/
       
   183 		EPagedOldestDirty 	= 0x6
       
   184 #endif
       
   185 		};
       
   186 
       
   187 
       
   188 	/**
       
   189 	Additional flags, stored in #iFlags2.
       
   190 	*/
       
   191 	enum TFlags2
       
   192 		{
       
   193 		/**
       
   194 		When #iPagedState==#EPagedPinned this indicates the page is a 'reserved' page
       
   195 		and is does not increase free page count when returned to the live list.
       
   196 		*/
       
   197 		EPinnedReserve	= 1<<0,
       
   198 		};
       
   199 
       
   200 private:
       
   201 	/**
       
   202 	Value from enum #TType, returned by #Type().
       
   203 	*/
       
   204 	TUint8 iType;
       
   205 
       
   206 	/**
       
   207 	Bitmask of values from #TFlags, returned by #Flags().
       
   208 	*/
       
   209 	TUint8 iFlags;
       
   210 
       
   211 	/**
       
   212 	Value from enum #TPagedState, returned by #PagedState().
       
   213 	*/
       
   214 	TUint8 iPagedState;
       
   215 
       
   216 	/**
       
   217 	Bitmask of values from #TFlags2.
       
   218 	*/
       
   219 	TUint8 iFlags2;
       
   220 
       
   221 	union
       
   222 		{
       
   223 		/**
       
   224 		The memory object which owns this page.
       
   225 		Used for always set for #EManaged pages and can be set for #PhysAlloc pages.
       
   226 		*/
       
   227 		DMemoryObject* iOwner;
       
   228 
       
   229 		/**
       
   230 		A pointer to the SPageInfo of the page that is being shadowed.
       
   231 		For use with #EShadow pages only.
       
   232 		*/
       
   233 		SPageInfo* iOriginalPageInfo;
       
   234 		};
       
   235 
       
   236 	/**
       
   237 	The index for this page within within the owning object's (#iOwner) memory.
       
   238 	*/
       
   239 	TUint32 iIndex;
       
   240 
       
   241 	/**
       
   242 	Pointer identifying the current modifier of the page. See #SetModifier.
       
   243 	*/
       
   244 	TAny* iModifier;
       
   245 
       
   246 	/**
       
   247 	Storage location for data specific to the memory manager object handling this page.
       
   248 	See #SetPagingManagerData.
       
   249 	*/
       
   250 	TUint32 iPagingManagerData;
       
   251 
       
   252 	/**
       
   253 	Union of values which vary depending of the current value of #iType.
       
   254 	*/
       
   255 	union
       
   256 		{
       
   257 		/**
       
   258 		When #iType==#EPhysAlloc, this stores a count of the number of memory objects
       
   259 		this page has been added to.
       
   260 		*/
       
   261 		TUint32 iUseCount;
       
   262 
       
   263 		/**
       
   264 		When #iType==#EUnused, this stores the value of Mmu::iCacheInvalidateCounter
       
   265 		at the time the page was freed. This is used for some cache maintenance optimisations.
       
   266 		*/
       
   267 		TUint32 iCacheInvalidateCounter;
       
   268 
       
   269 		/**
       
   270 		When #iType==#EManaged, this holds the count of the number of times the page was pinned.
       
   271 		This will only be non-zero for demand paged memory.
       
   272 		*/
       
   273 		TUint32 iPinCount;
       
   274 		};
       
   275 
       
   276 public:
       
   277 	/**
       
   278 	Used for placing page into linked lists. E.g. the various demand paging live lists.
       
   279 	*/
       
   280 	SDblQueLink iLink;
       
   281 
       
   282 public:
       
   283 	/**
       
   284 	Return the SPageInfo for a given page of physical RAM.
       
   285 	*/
       
   286 	static SPageInfo* FromPhysAddr(TPhysAddr aAddress);
       
   287 
       
   288 	/**
       
   289 	Return physical address of the RAM page which this SPageInfo object is associated.
       
   290 	If the address has no SPageInfo, then a null pointer is returned.
       
   291 	*/
       
   292 	static SPageInfo* SafeFromPhysAddr(TPhysAddr aAddress);
       
   293 
       
   294 	/**
       
   295 	Return physical address of the RAM page which this SPageInfo object is associated.
       
   296 	*/
       
   297 	FORCE_INLINE TPhysAddr PhysAddr();
       
   298 
       
   299 	/**
       
   300 	Return a SPageInfo by conversion from the address of its embedded link member #iLink.
       
   301 	*/
       
   302 	FORCE_INLINE static SPageInfo* FromLink(SDblQueLink* aLink)
       
   303 		{
       
   304 		return (SPageInfo*)((TInt)aLink-_FOFF(SPageInfo,iLink));
       
   305 		}
       
   306 
       
   307 	//
       
   308 	// Getters...
       
   309 	//
       
   310 
       
   311 	/**
       
   312 	Return the current #TType value stored in #iType.
       
   313 	@pre #MmuLock held.
       
   314 	*/
       
   315 	FORCE_INLINE TType Type()
       
   316 		{
       
   317 		CheckAccess("Type");
       
   318 		return (TType)iType;
       
   319 		}
       
   320 
       
   321 	/**
       
   322 	Return the current value of #iFlags.
       
   323 	@pre #MmuLock held (if \a aNoCheck false).
       
   324 	*/
       
   325 	FORCE_INLINE TUint Flags(TBool aNoCheck=false)
       
   326 		{
       
   327 		if(!aNoCheck)
       
   328 			CheckAccess("Flags");
       
   329 		return iFlags;
       
   330 		}
       
   331 
       
   332 	/**
       
   333 	Return the current value of #iPagedState.
       
   334 	@pre #MmuLock held.
       
   335 	*/
       
   336 	FORCE_INLINE TPagedState PagedState()
       
   337 		{
       
   338 		CheckAccess("PagedState");
       
   339 		return (TPagedState)iPagedState;
       
   340 		}
       
   341 
       
   342 	/**
       
   343 	Return the current value of #iOwner.
       
   344 	@pre #MmuLock held.
       
   345 	*/
       
   346 	FORCE_INLINE DMemoryObject* Owner()
       
   347 		{
       
   348 		CheckAccess("Owner");
       
   349 		return iOwner;
       
   350 		}
       
   351 
       
   352 	/**
       
   353 	Return the current value of #iIndex.
       
   354 	@pre #MmuLock held (if \a aNoCheck false).
       
   355 	*/
       
   356 	FORCE_INLINE TUint32 Index(TBool aNoCheck=false)
       
   357 		{
       
   358 		if(!aNoCheck)
       
   359 			CheckAccess("Index");
       
   360 		return iIndex;
       
   361 		}
       
   362 
       
   363 	/**
       
   364 	Return the current value of #iModifier.
       
   365 	@pre #MmuLock held (if \a aNoCheck false).
       
   366 	*/
       
   367 	FORCE_INLINE TAny* Modifier()
       
   368 		{
       
   369 		CheckAccess("Modifier");
       
   370 		return iModifier;
       
   371 		}
       
   372 
       
   373 
       
   374 	//
       
   375 	// Setters..
       
   376 	//
       
   377 
       
   378 	/**
       
   379 	Set this page as type #EFixed.
       
   380 	This is only used during boot by Mmu::Init2Common.
       
   381 	*/
       
   382 	inline void SetFixed(TUint32 aIndex=0)
       
   383 		{
       
   384 		CheckAccess("SetFixed");
       
   385 		Set(EFixed,0,aIndex);
       
   386 		}
       
   387 
       
   388 	/**
       
   389 	Set this page as type #EUnused.
       
   390 
       
   391 	@pre #MmuLock held.
       
   392 	@pre #RamAllocLock held if previous page type != #EUnknown.
       
   393 
       
   394 	@post #iModifier==0 to indicate that page usage has changed.
       
   395 	*/
       
   396 	inline void SetUnused()
       
   397 		{
       
   398 		CheckAccess("SetUnused",ECheckNotUnused|((iType!=EUnknown)?(TInt)ECheckRamAllocLock:0));
       
   399 		iType = EUnused;
       
   400 		iModifier = 0;
       
   401 		// do not modify iFlags or iIndex in this function because page allocating cache cleaning operations rely on using this value
       
   402 		}
       
   403 
       
   404 	/**
       
   405 	Set this page as type #EUnknown.
       
   406 	This is only used by Mmu::PagesAllocated.
       
   407 
       
   408 	@pre #RamAllocLock held.
       
   409 
       
   410 	@post #iModifier==0 to indicate that page usage has changed.
       
   411 	*/
       
   412 	inline void SetAllocated()
       
   413 		{
       
   414 		CheckAccess("SetAllocated",ECheckUnused|ECheckRamAllocLock|ENoCheckMmuLock);
       
   415 		iType = EUnknown;
       
   416 		iModifier = 0;
       
   417 		// do not modify iFlags or iIndex in this function because cache cleaning operations rely on using this value
       
   418 		}
       
   419 
       
   420 	/**
       
   421 	Set this page as type #EPhysAlloc.
       
   422 	@param aOwner	 Optional value for #iOwner.
       
   423 	@param aIndex	 Optional value for #iIndex.
       
   424 
       
   425 	@pre #MmuLock held.
       
   426 
       
   427 	@post #iModifier==0 to indicate that page usage has changed.
       
   428 	*/
       
   429 	inline void SetPhysAlloc(DMemoryObject* aOwner=0, TUint32 aIndex=0)
       
   430 		{
       
   431 		CheckAccess("SetPhysAlloc");
       
   432 		Set(EPhysAlloc,aOwner,aIndex);
       
   433 		iUseCount = 0;
       
   434 		}
       
   435 
       
   436 	/**
       
   437 	Set this page as type #EManaged.
       
   438 
       
   439 	@param aOwner	Value for #iOwner.
       
   440 	@param aIndex 	Value for #iIndex.
       
   441 	@param aFlags 	Value for #iFlags (aOwner->PageInfoFlags()).
       
   442 
       
   443 	@pre #MmuLock held.
       
   444 
       
   445 	@post #iModifier==0 to indicate that page usage has changed.
       
   446 	*/
       
   447 	inline void SetManaged(DMemoryObject* aOwner, TUint32 aIndex, TUint8 aFlags)
       
   448 		{
       
   449 		CheckAccess("SetManaged");
       
   450 		Set(EManaged,aOwner,aIndex);
       
   451 		iFlags = aFlags;
       
   452 		iPinCount = 0;
       
   453 		}
       
   454 
       
   455 	/**
       
   456 	Set this page as type #EShadow.
       
   457 
       
   458 	This is for use by #DShadowPage.
       
   459 
       
   460 	@param aIndex 	Value for #iIndex.
       
   461 	@param aFlags 	Value for #iFlags.
       
   462 
       
   463 	@pre #MmuLock held.
       
   464 
       
   465 	@post #iModifier==0 to indicate that page usage has changed.
       
   466 	*/
       
   467 	inline void SetShadow(TUint32 aIndex, TUint8 aFlags)
       
   468 		{
       
   469 		CheckAccess("SetShadow");
       
   470 		Set(EShadow,0,aIndex);
       
   471 		iFlags = aFlags;
       
   472 		}
       
   473 
       
   474 	/**
       
   475 	Store a pointer to the SPageInfo of the page that this page is shadowing.
       
   476 
       
   477 	@param aOrigPageInfo	Pointer to the SPageInfo that this page is shadowing
       
   478 
       
   479 	@pre #MmuLock held.
       
   480 	*/
       
   481 	inline void SetOriginalPage(SPageInfo* aOrigPageInfo)
       
   482 		{
       
   483 		CheckAccess("SetOriginalPage");
       
   484 		__NK_ASSERT_DEBUG(iType == EShadow);
       
   485 		__NK_ASSERT_DEBUG(!iOriginalPageInfo);
       
   486 		iOriginalPageInfo = aOrigPageInfo;
       
   487 		}
       
   488 
       
   489 	/**
       
   490 	Reutrns a pointer to the SPageInfo of the page that this page is shadowing.
       
   491 
       
   492 	@return	A pointer to the SPageInfo that this page is shadowing
       
   493 
       
   494 	@pre #MmuLock held.
       
   495 	*/
       
   496 	inline SPageInfo* GetOriginalPage()
       
   497 		{
       
   498 		CheckAccess("GetOriginalPage");
       
   499 		__NK_ASSERT_DEBUG(iType == EShadow);
       
   500 		__NK_ASSERT_DEBUG(iOriginalPageInfo);
       
   501 		return iOriginalPageInfo;
       
   502 		}
       
   503 
       
   504 
       
   505 private:
       
   506 	/** Internal implementation factor for methods which set page type. */
       
   507 	FORCE_INLINE void Set(TType aType, DMemoryObject* aOwner, TUint32 aIndex)
       
   508 		{
       
   509 		CheckAccess("Set",ECheckNotAllocated|ECheckNotPaged);
       
   510 		(TUint32&)iType = aType; // also clears iFlags, iFlags2 and iPagedState
       
   511 		iOwner = aOwner;
       
   512 		iIndex = aIndex;
       
   513 		iModifier = 0;
       
   514 		}
       
   515 
       
   516 public:
       
   517 
       
   518 
       
   519 	//
       
   520 	//
       
   521 	//
       
   522 
       
   523 	/**
       
   524 	Set #iFlags to indicate that the contents of this page have been removed from
       
   525 	any caches.
       
   526 
       
   527 	@pre #MmuLock held if #iType!=#EUnused, #RamAllocLock held if #iType==#EUnused.
       
   528 	*/
       
   529 	FORCE_INLINE void SetUncached()
       
   530 		{
       
   531 		CheckAccess("SetUncached",iType==EUnused ? ECheckRamAllocLock|ENoCheckMmuLock : 0);
       
   532 		__NK_ASSERT_DEBUG(iType==EUnused || (iType==EPhysAlloc && iUseCount==0));
       
   533 		iFlags = EMemAttNormalUncached;
       
   534 		}
       
   535 
       
   536 	/**
       
   537 	Set memory attributes and colour for a page of type #EPhysAlloc.
       
   538 	
       
   539 	This is set the first time a page of type #EPhysAlloc is added to a memory
       
   540 	object with DMemoryManager::AddPages or DMemoryManager::AddContiguous.
       
   541 	The set values are used to check constraints are met if the page is
       
   542 	also added to other memory objects.
       
   543 
       
   544 	@param aIndex	The page index within a memory object at which this page
       
   545 					has been added. This is stored in #iIndex and used to determine
       
   546 					the page's 'colour'.
       
   547 	@param aFlags 	Value for #iFlags. This sets the memory attributes for the page.
       
   548 
       
   549 	@post #iModifier==0 to indicate that page usage has changed.
       
   550 	*/
       
   551 	inline void SetMapped(TUint32 aIndex, TUint aFlags)
       
   552 		{
       
   553 		CheckAccess("SetMapped");
       
   554 		__NK_ASSERT_DEBUG(iType==EPhysAlloc);
       
   555 		__NK_ASSERT_DEBUG(iUseCount==0); // check page not already added to an object
       
   556 		iIndex = aIndex;
       
   557 		iFlags = aFlags;
       
   558 		iModifier = 0;
       
   559 		}
       
   560 
       
   561 	/**
       
   562 	Set #iPagedState
       
   563 
       
   564 	@pre #MmuLock held.
       
   565 
       
   566 	@post #iModifier==0 to indicate that page state has changed.
       
   567 	*/
       
   568 	FORCE_INLINE void SetPagedState(TPagedState aPagedState)
       
   569 		{
       
   570 		CheckAccess("SetPagedState");
       
   571 		__NK_ASSERT_DEBUG(aPagedState==iPagedState || iPagedState!=EPagedPinned || iPinCount==0); // make sure don't set an unpinned state if iPinCount!=0
       
   572 		iPagedState = aPagedState;
       
   573 		iModifier = 0;
       
   574 		}
       
   575 
       
   576 	/**
       
   577 	The the pages #iModifier value.
       
   578 
       
   579 	#iModifier is cleared to zero whenever the usage or paging state of the page
       
   580 	changes. So if a thread sets this to a suitable unique value (e.g. the address
       
   581 	of a local variable) then it may perform a long running operation on the page
       
   582 	and later check with #CheckModified that no other thread has changed the page
       
   583 	state or used SetModifier in the intervening time.
       
   584 	Example.
       
   585 
       
   586 	@code
       
   587 	TInt anyLocalVariable; // arbitrary local variable
       
   588 
       
   589 	MmuLock::Lock();
       
   590 	SPageInfo* thePageInfo = GetAPage();
       
   591 	thePageInfo->SetModifier(&anyLocalVariable); // use &anyLocalVariable as value unique to this thread 
       
   592 	MmuLock::Unlock();
       
   593 
       
   594 	DoOperation(thePageInfo);
       
   595 
       
   596 	MmuLock::Lock();
       
   597 	TInt r;
       
   598 	if(!thePageInfo->CheckModified(&anyLocalVariable));
       
   599 		{
       
   600 		// nobody else touched the page...
       
   601 		OperationSucceeded(thePageInfo);
       
   602 		r = KErrNone;
       
   603 		}
       
   604 	else
       
   605 		{
       
   606 		// somebody else changed our page...
       
   607 		OperationInterrupted(thePageInfo);
       
   608 		r = KErrAbort;
       
   609 		}
       
   610 	MmuLock::Unlock();
       
   611 
       
   612 	return r;
       
   613 	@endcode
       
   614 
       
   615 	@pre #MmuLock held.
       
   616 	*/
       
   617 	FORCE_INLINE void SetModifier(TAny* aModifier)
       
   618 		{
       
   619 		CheckAccess("SetModifier");
       
   620 		iModifier = aModifier;
       
   621 		}
       
   622 
       
   623 	/**
       
   624 	Return true if the #iModifier value does not match a specified value.
       
   625 
       
   626 	@param aModifier	A 'modifier' value previously set with #SetModifier.
       
   627 
       
   628 	@pre #MmuLock held.
       
   629 
       
   630 	@see SetModifier.
       
   631 	*/
       
   632 	FORCE_INLINE TBool CheckModified(TAny* aModifier)
       
   633 		{
       
   634 		CheckAccess("CheckModified");
       
   635 		return iModifier!=aModifier;
       
   636 		}
       
   637 
       
   638 	/**
       
   639 	Flag this page as having Page Table Entries which give writeable access permissions.
       
   640 	This sets flags #EWritable and #EDirty.
       
   641 
       
   642 	@pre #MmuLock held.
       
   643 	*/
       
   644 	FORCE_INLINE void SetWritable()
       
   645 		{
       
   646 		CheckAccess("SetWritable");
       
   647 		// This should only be invoked on paged pages.
       
   648 		__NK_ASSERT_DEBUG(PagedState() != EUnpaged);
       
   649 		iFlags |= EWritable;
       
   650 		SetDirty();
       
   651 		}
       
   652 
       
   653 	/**
       
   654 	Flag this page as having no longer having any Page Table Entries which give writeable
       
   655 	access permissions.
       
   656 	This clears the flag #EWritable.
       
   657 
       
   658 	@pre #MmuLock held.
       
   659 	*/
       
   660 	FORCE_INLINE void SetReadOnly()
       
   661 		{
       
   662 		CheckAccess("SetReadOnly");
       
   663 		iFlags &= ~EWritable;
       
   664 		}
       
   665 
       
   666 	/**
       
   667 	Returns true if #SetWritable has been called without a subsequent #SetReadOnly.
       
   668 	This returns the flag #EWritable.
       
   669 
       
   670 	@pre #MmuLock held.
       
   671 	*/
       
   672 	FORCE_INLINE TBool IsWritable()
       
   673 		{
       
   674 		CheckAccess("IsWritable");
       
   675 		return iFlags&EWritable;
       
   676 		}
       
   677 
       
   678 	/**
       
   679 	Flag this page as 'dirty', indicating that its contents may no longer match those saved
       
   680 	to a backing store. This sets the flag #EWritable.
       
   681 
       
   682 	This is used in the management of demand paged memory.
       
   683 
       
   684 	@pre #MmuLock held.
       
   685 	*/
       
   686 	FORCE_INLINE void SetDirty()
       
   687 		{
       
   688 		CheckAccess("SetDirty");
       
   689 		iFlags |= EDirty;
       
   690 		}
       
   691 
       
   692 	/**
       
   693 	Flag this page as 'clean', indicating that its contents now match those saved
       
   694 	to a backing store. This clears the flag #EWritable.
       
   695 
       
   696 	This is used in the management of demand paged memory.
       
   697 
       
   698 	@pre #MmuLock held.
       
   699 	*/
       
   700 	FORCE_INLINE void SetClean()
       
   701 		{
       
   702 		CheckAccess("SetClean");
       
   703 		iFlags &= ~EDirty;
       
   704 		}
       
   705 
       
   706 	/**
       
   707 	Return the  #EDirty flag. See #SetDirty and #SetClean.
       
   708 
       
   709 	This is used in the management of demand paged memory.
       
   710 
       
   711 	@pre #MmuLock held.
       
   712 	*/
       
   713 	FORCE_INLINE TBool IsDirty()
       
   714 		{
       
   715 		CheckAccess("IsDirty");
       
   716 		return iFlags&EDirty;
       
   717 		}
       
   718 
       
   719 
       
   720 	//
       
   721 	// Type specific...
       
   722 	//
       
   723 
       
   724 	/**
       
   725 	Set #iCacheInvalidateCounter to the specified value.
       
   726 
       
   727 	@pre #MmuLock held.
       
   728 	@pre #iType==#EUnused.
       
   729 	*/
       
   730 	void SetCacheInvalidateCounter(TUint32 aCacheInvalidateCounter)
       
   731 		{
       
   732 		CheckAccess("SetCacheInvalidateCounter");
       
   733 		__NK_ASSERT_DEBUG(iType==EUnused);
       
   734 		iCacheInvalidateCounter = aCacheInvalidateCounter;
       
   735 		}
       
   736 
       
   737 	/**
       
   738 	Return #iCacheInvalidateCounter.
       
   739 
       
   740 	@pre #MmuLock held.
       
   741 	@pre #iType==#EUnused.
       
   742 	*/
       
   743 	TUint32 CacheInvalidateCounter()
       
   744 		{
       
   745 		CheckAccess("CacheInvalidateCounter",ECheckRamAllocLock|ENoCheckMmuLock);
       
   746 		__NK_ASSERT_DEBUG(iType==EUnused);
       
   747 		return iCacheInvalidateCounter;
       
   748 		}
       
   749 
       
   750 	/**
       
   751 	Increment #iUseCount to indicate that the page has been added to a memory object.
       
   752 
       
   753 	@return New value of #iUseCount.
       
   754 
       
   755 	@pre #MmuLock held.
       
   756 	@pre #iType==#EPhysAlloc.
       
   757 	*/
       
   758 	TUint32 IncUseCount()
       
   759 		{
       
   760 		CheckAccess("IncUseCount");
       
   761 		__NK_ASSERT_DEBUG(iType==EPhysAlloc);
       
   762 		return ++iUseCount;
       
   763 		}
       
   764 
       
   765 	/**
       
   766 	Decrement #iUseCount to indicate that the page has been removed from a memory object.
       
   767 
       
   768 	@return New value of #iUseCount.
       
   769 
       
   770 	@pre #MmuLock held.
       
   771 	@pre #iType==#EPhysAlloc.
       
   772 	*/
       
   773 	TUint32 DecUseCount()
       
   774 		{
       
   775 		CheckAccess("DecUseCount");
       
   776 		__NK_ASSERT_DEBUG(iType==EPhysAlloc);
       
   777 		__NK_ASSERT_DEBUG(iUseCount);
       
   778 		return --iUseCount;
       
   779 		}
       
   780 
       
   781 	/**
       
   782 	Return #iUseCount, this indicates the number of times the page has been added to memory object(s).
       
   783 
       
   784 	@return #iUseCount.
       
   785 
       
   786 	@pre #MmuLock held.
       
   787 	@pre #iType==#EPhysAlloc.
       
   788 	*/
       
   789 	TUint32 UseCount()
       
   790 		{
       
   791 		CheckAccess("UseCount");
       
   792 		__NK_ASSERT_DEBUG(iType==EPhysAlloc);
       
   793 		return iUseCount;
       
   794 		}
       
   795 
       
   796 	/**
       
   797 	Increment #iPinCount to indicate that a mapping has pinned this page.
       
   798 	This is only done for demand paged memory; unpaged memory does not have
       
   799 	#iPinCount updated when it is pinned.
       
   800 
       
   801 	@return New value of #iPinCount.
       
   802 
       
   803 	@pre #MmuLock held.
       
   804 	@pre #iType==#EManaged.
       
   805 	*/
       
   806 	TUint32 IncPinCount()
       
   807 		{
       
   808 		CheckAccess("IncPinCount");
       
   809 		__NK_ASSERT_DEBUG(iType==EManaged);
       
   810 		return ++iPinCount;
       
   811 		}
       
   812 
       
   813 	/**
       
   814 	Decrement #iPinCount to indicate that a mapping which was pinning this page has been removed.
       
   815 	This is only done for demand paged memory; unpaged memory does not have
       
   816 	#iPinCount updated when it is unpinned.
       
   817 
       
   818 	@return New value of #iPinCount.
       
   819 
       
   820 	@pre #MmuLock held.
       
   821 	@pre #iType==#EManaged.
       
   822 	*/
       
   823 	TUint32 DecPinCount()
       
   824 		{
       
   825 		CheckAccess("DecPinCount");
       
   826 		__NK_ASSERT_DEBUG(iType==EManaged);
       
   827 		__NK_ASSERT_DEBUG(iPinCount);
       
   828 		return --iPinCount;
       
   829 		}
       
   830 
       
   831 	/**
       
   832 	Clear #iPinCount to zero as this page is no longer being used for the 
       
   833 	pinned page.
       
   834 	This is only done for demand paged memory; unpaged memory does not have
       
   835 	#iPinCount set.
       
   836 
       
   837 	@pre #MmuLock held.
       
   838 	@pre #iType==#EManaged.
       
   839 	*/
       
   840 	void ClearPinCount()
       
   841 		{
       
   842 		CheckAccess("ClearPinCount");
       
   843 		__NK_ASSERT_DEBUG(iType==EManaged);
       
   844 		__NK_ASSERT_DEBUG(iPinCount);
       
   845 		iPinCount = 0;
       
   846 		}
       
   847 
       
   848 	/**
       
   849 	Return #iPinCount which indicates the number of mappings that have pinned this page.
       
   850 	This is only valid for demand paged memory; unpaged memory does not have
       
   851 	#iPinCount updated when it is pinned.
       
   852 
       
   853 	@return #iPinCount.
       
   854 
       
   855 	@pre #MmuLock held.
       
   856 	@pre #iType==#EManaged.
       
   857 	*/
       
   858 	TUint32 PinCount()
       
   859 		{
       
   860 		CheckAccess("PinCount");
       
   861 		__NK_ASSERT_DEBUG(iType==EManaged);
       
   862 		return iPinCount;
       
   863 		}
       
   864 
       
   865 	/**
       
   866 	Set the #EPinnedReserve flag.
       
   867 	@pre #MmuLock held.
       
   868 	@see EPinnedReserve.
       
   869 	*/
       
   870 	void SetPinnedReserve()
       
   871 		{
       
   872 		CheckAccess("SetPinnedReserve");
       
   873 		iFlags2 |= EPinnedReserve;
       
   874 		}
       
   875 
       
   876 	/**
       
   877 	Clear the #EPinnedReserve flag.
       
   878 	@pre #MmuLock held.
       
   879 	@see EPinnedReserve.
       
   880 	*/
       
   881 	TBool ClearPinnedReserve()
       
   882 		{
       
   883 		CheckAccess("ClearPinnedReserve");
       
   884 		TUint oldFlags2 = iFlags2;
       
   885 		iFlags2 = oldFlags2&~EPinnedReserve;
       
   886 		return oldFlags2&EPinnedReserve;
       
   887 		}
       
   888 
       
   889 	/**
       
   890 	Set #iPagingManagerData to the specified value.
       
   891 	@pre #MmuLock held.
       
   892 	@pre #iType==#EManaged.
       
   893 	*/
       
   894 	void SetPagingManagerData(TUint32 aPagingManagerData)
       
   895 		{
       
   896 		CheckAccess("SetPagingManagerData");
       
   897 		__NK_ASSERT_DEBUG(iType==EManaged);
       
   898 		iPagingManagerData = aPagingManagerData;
       
   899 		}
       
   900 
       
   901 	/**
       
   902 	Return #iPagingManagerData.
       
   903 	@pre #MmuLock held.
       
   904 	@pre #iType==#EManaged.
       
   905 	*/
       
   906 	TUint32 PagingManagerData()
       
   907 		{
       
   908 		CheckAccess("PagingManagerData");
       
   909 		__NK_ASSERT_DEBUG(iType==EManaged);
       
   910 		return iPagingManagerData;
       
   911 		}
       
   912 
       
   913 	//
       
   914 	// Debug...
       
   915 	//
       
   916 
       
   917 private:
       
   918 	enum TChecks
       
   919 		{
       
   920 		ECheckNotAllocated	= 1<<0,
       
   921 		ECheckNotUnused		= 1<<1,
       
   922 		ECheckUnused		= 1<<2,
       
   923 		ECheckNotPaged		= 1<<3,
       
   924 		ECheckRamAllocLock	= 1<<4,
       
   925 		ENoCheckMmuLock		= 1<<5
       
   926 		};
       
   927 #ifdef _DEBUG
       
   928 	void CheckAccess(const char* aMessage, TUint aFlags=0);
       
   929 #else
       
   930 	FORCE_INLINE void CheckAccess(const char* /*aMessage*/, TUint /*aFlags*/=0)
       
   931 		{}
       
   932 #endif
       
   933 
       
   934 public:
       
   935 #ifdef _DEBUG
       
   936 	/**
       
   937 	Debug function which outputs the contents of this object to the kernel debug port.
       
   938 	*/
       
   939 	void Dump();
       
   940 #else
       
   941 	FORCE_INLINE void Dump()
       
   942 		{}
       
   943 #endif
       
   944 	};
       
   945 
       
   946 
       
   947 const TInt KPageInfosPerPageShift = KPageShift-KPageInfoShift;
       
   948 const TInt KPageInfosPerPage = 1<<KPageInfosPerPageShift;
       
   949 const TInt KNumPageInfoPagesShift = 32-KPageShift-KPageInfosPerPageShift;
       
   950 const TInt KNumPageInfoPages = 1<<KNumPageInfoPagesShift;
       
   951 
       
   952 FORCE_INLINE SPageInfo* SPageInfo::FromPhysAddr(TPhysAddr aAddress)
       
   953 	{
       
   954 	return ((SPageInfo*)KPageInfoLinearBase)+(aAddress>>KPageShift);
       
   955 	}
       
   956 
       
   957 FORCE_INLINE TPhysAddr SPageInfo::PhysAddr()
       
   958 	{
       
   959 	return ((TPhysAddr)this)<<KPageInfosPerPageShift;
       
   960 	}
       
   961 
       
   962 
       
   963 
       
   964 /**
       
   965 A page table information structure giving the current use and state for a
       
   966 page table.
       
   967 */
       
   968 struct SPageTableInfo
       
   969 	{
       
   970 public:
       
   971 
       
   972 	/**
       
   973 	Enumeration for the usage of a page table. This is stored in #iType.
       
   974 	*/
       
   975 	enum TType
       
   976 		{
       
   977 		/**
       
   978 		Page table is unused (implementation assumes this enumeration == 0).
       
   979 		@see #iUnused and #SPageTableInfo::TUnused.
       
   980 		*/
       
   981 		EUnused=0,
       
   982 
       
   983 		/**
       
   984 		Page table has undetermined use.
       
   985 		(Either created by the bootstrap or is newly allocated but not yet assigned.)
       
   986 		*/
       
   987 		EUnknown=1,
       
   988 
       
   989 		/**
       
   990 		Page table is being used by a coarse memory object.
       
   991 		@see #iCoarse and #SPageTableInfo::TCoarse.
       
   992 		*/
       
   993 		ECoarseMapping=2,
       
   994 
       
   995 		/**
       
   996 		Page table is being used for fine mappings.
       
   997 		@see #iFine and #SPageTableInfo::TFine.
       
   998 		*/
       
   999 		EFineMapping=3
       
  1000 		};
       
  1001 
       
  1002 private:
       
  1003 
       
  1004 	/**
       
  1005 	Flags stored in #iFlags.
       
  1006 	*/
       
  1007 	enum TFlags
       
  1008 		{
       
  1009 		/**
       
  1010 		Page table if for mapping demand paged content.
       
  1011 		*/
       
  1012 		EDemandPaged		= 	1<<0,
       
  1013 		/**
       
  1014 		Page table is in Page Table Allocator's cleanup list
       
  1015 		(only set for first page table in a RAM page)
       
  1016 		*/
       
  1017 		EOnCleanupList		= 	1<<1,
       
  1018 		/**
       
  1019 		The page table cluster that this page table info refers to is currently allocated.
       
  1020 		*/
       
  1021 		EPtClusterAllocated 	=	1<<2
       
  1022 		};
       
  1023 
       
  1024 	/**
       
  1025 	Value from enum #TType.
       
  1026 	*/
       
  1027 	TUint8 iType;				
       
  1028 
       
  1029 	/**
       
  1030 	Bitmask of values from #TFlags.
       
  1031 	*/
       
  1032 	TUint8 iFlags;
       
  1033 
       
  1034 	/**
       
  1035 	Spare member used for padding.
       
  1036 	*/
       
  1037 	TUint16 iSpare2;
       
  1038 
       
  1039 	/**
       
  1040 	Number of pages currently mapped by this page table.
       
  1041 	Normally, when #iPageCount==0 and #iPermanenceCount==0, the page table is freed.
       
  1042 	*/
       
  1043 	TUint16 iPageCount;
       
  1044 
       
  1045 	/**
       
  1046 	Count for the number of uses of this page table which require it to be permanently allocated;
       
  1047 	even when it maps no pages (#iPageCount==0).
       
  1048 	*/
       
  1049 	TUint16 iPermanenceCount;
       
  1050 
       
  1051 	/**
       
  1052 	Information about a page table when #iType==#EUnused.
       
  1053 	*/
       
  1054 	struct TUnused
       
  1055 		{
       
  1056 		/**
       
  1057 		Cast this object to a SDblQueLink reference.
       
  1058 		This is used for placing unused SPageTableInfo objects into free lists.
       
  1059 		*/
       
  1060 		FORCE_INLINE SDblQueLink& Link()
       
  1061 			{ return *(SDblQueLink*)this; }
       
  1062 	private:
       
  1063 		SDblQueLink* iNext;	///< Next free page table
       
  1064 		SDblQueLink* iPrev;	///< Previous free page table
       
  1065 		};
       
  1066 
       
  1067 	/**
       
  1068 	Information about a page table when #iType==#ECoarseMapping.
       
  1069 	*/
       
  1070 	struct TCoarse
       
  1071 		{
       
  1072 		/**
       
  1073 		Memory object which owns this page table.
       
  1074 		*/
       
  1075 		DCoarseMemory*	iMemoryObject;
       
  1076 
       
  1077 		/**
       
  1078 		The index of the page table, i.e. the offset, in 'chunks',
       
  1079 		into the object's memory that the page table is being used to map.
       
  1080 		*/
       
  1081 		TUint16			iChunkIndex;
       
  1082 
       
  1083 		/**
       
  1084 		The #TPteType the page table is being used for.
       
  1085 		*/
       
  1086 		TUint8			iPteType;
       
  1087 		};
       
  1088 
       
  1089 	/**
       
  1090 	Information about a page table when #iType==#EFineMapping.
       
  1091 	*/
       
  1092 	struct TFine
       
  1093 		{
       
  1094 		/**
       
  1095 		Start of the virtual address region that this page table is currently
       
  1096 		mapping memory at, ORed with the OS ASID of the address space this lies in.
       
  1097 		*/
       
  1098 		TLinAddr		iLinAddrAndOsAsid;
       
  1099 		};
       
  1100 
       
  1101 	/**
       
  1102 	Union of type specific info.
       
  1103 	*/
       
  1104 	union
       
  1105 		{
       
  1106 		TUnused	iUnused; ///< Information about a page table when #iType==#EUnused.
       
  1107 		TCoarse	iCoarse; ///< Information about a page table when #iType==#ECoarseMapping.
       
  1108 		TFine	iFine;   ///< Information about a page table when #iType==#EFineMapping.
       
  1109 		};
       
  1110 
       
  1111 public:
       
  1112 	/**
       
  1113 	Return the SPageTableInfo for the page table in which a given PTE lies.
       
  1114 	*/
       
  1115 	static SPageTableInfo* FromPtPtr(TPte* aPtPte);
       
  1116 
       
  1117 	/**
       
  1118 	Return the page table with which this SPageTableInfo is associated.
       
  1119 	*/
       
  1120 	TPte* PageTable();
       
  1121 
       
  1122 	/**
       
  1123 	Used at boot time to initialise page tables which were allocated by the bootstrap. 
       
  1124 
       
  1125 	@param aCount	The number of pages being mapped by this page table.
       
  1126 	*/
       
  1127 	FORCE_INLINE void Boot(TUint aCount)
       
  1128 		{
       
  1129 		CheckInit("Boot");
       
  1130 		iPageCount = aCount;
       
  1131 		iPermanenceCount = 1; // assume page table shouldn't be freed
       
  1132 		iType = EUnknown;
       
  1133 		iFlags = EPtClusterAllocated;
       
  1134 		}
       
  1135 
       
  1136 	/**
       
  1137 	Initialise a page table after it has had memory allocated for it.
       
  1138 
       
  1139 	@param aDemandPaged	True if this page table has been allocated for use with
       
  1140 						demand paged memory.
       
  1141 	*/
       
  1142 	FORCE_INLINE void New(TBool aDemandPaged)
       
  1143 		{
       
  1144 		iType = EUnused;
       
  1145 		iFlags = EPtClusterAllocated | (aDemandPaged ? EDemandPaged : 0);
       
  1146 		}
       
  1147 
       
  1148 	/**
       
  1149 	Return true if the page table cluster that this page table info refers to has
       
  1150 	been previously allocated.
       
  1151 	*/
       
  1152 	FORCE_INLINE TBool IsPtClusterAllocated()
       
  1153 		{
       
  1154 		return iFlags & EPtClusterAllocated;
       
  1155 		}
       
  1156 
       
  1157 	/**
       
  1158 	The page table cluster that this page table info refers to has been freed.
       
  1159 	*/
       
  1160 	FORCE_INLINE void PtClusterFreed()
       
  1161 		{
       
  1162 		__NK_ASSERT_DEBUG(IsPtClusterAllocated());
       
  1163 		iFlags &= ~EPtClusterAllocated;
       
  1164 		}
       
  1165 
       
  1166 	/**
       
  1167 	The page table cluster that this page table info refers to has been allocated.
       
  1168 	*/
       
  1169 	FORCE_INLINE void PtClusterAlloc()
       
  1170 		{
       
  1171 		__NK_ASSERT_DEBUG(!IsPtClusterAllocated());
       
  1172 		iFlags |= EPtClusterAllocated;
       
  1173 		}
       
  1174 
       
  1175 	/**
       
  1176 	Initialilse a page table to type #EUnknown after it has been newly allocated.
       
  1177 
       
  1178 	@pre #PageTablesLockIsHeld.
       
  1179 	*/
       
  1180 	FORCE_INLINE void Init()
       
  1181 		{
       
  1182 		__NK_ASSERT_DEBUG(IsPtClusterAllocated());
       
  1183 		CheckInit("Init");
       
  1184 		iPageCount = 0;
       
  1185 		iPermanenceCount = 0;
       
  1186 		iType = EUnknown;
       
  1187 		}
       
  1188 
       
  1189 	/**
       
  1190 	Increment #iPageCount to account for newly mapped pages.
       
  1191 
       
  1192 	@param aStep	Amount to add to #iPageCount. Default is one.
       
  1193 
       
  1194 	@return New value of #iPageCount.
       
  1195 
       
  1196 	@pre #MmuLock held.
       
  1197 	*/
       
  1198 	FORCE_INLINE TUint IncPageCount(TUint aStep=1)
       
  1199 		{
       
  1200 		CheckAccess("IncPageCount");
       
  1201 		TUint count = iPageCount; // compiler handles half-word values stupidly, so give it a hand
       
  1202 		count += aStep;
       
  1203 		iPageCount = count;
       
  1204 		return count;
       
  1205 		}
       
  1206 
       
  1207 	/**
       
  1208 	Decrement #iPageCount to account for removed pages.
       
  1209 
       
  1210 	@param aStep	Amount to subtract from #iPageCount. Default is one.
       
  1211 
       
  1212 	@return New value of #iPageCount.
       
  1213 
       
  1214 	@pre #MmuLock held.
       
  1215 	*/
       
  1216 	FORCE_INLINE TUint DecPageCount(TUint aStep=1)
       
  1217 		{
       
  1218 		CheckAccess("DecPageCount");
       
  1219 		TUint count = iPageCount; // compiler handles half-word values stupidly, so give it a hand
       
  1220 		count -= aStep;
       
  1221 		iPageCount = count;
       
  1222 		return count;
       
  1223 		}
       
  1224 
       
  1225 	/**
       
  1226 	Return #iPageCount.
       
  1227 	@pre #MmuLock held.
       
  1228 	*/
       
  1229 	FORCE_INLINE TUint PageCount()
       
  1230 		{
       
  1231 		CheckAccess("PageCount");
       
  1232 		return iPageCount;
       
  1233 		}
       
  1234 
       
  1235 	/**
       
  1236 	Increment #iPermanenceCount to indicate a new use of this page table which
       
  1237 	requires it to be permanently allocated.
       
  1238 
       
  1239 	@return New value of #iPermanenceCount.
       
  1240 
       
  1241 	@pre #MmuLock held.
       
  1242 	*/
       
  1243 	FORCE_INLINE TUint IncPermanenceCount()
       
  1244 		{
       
  1245 		CheckAccess("IncPermanenceCount");
       
  1246 		TUint count = iPermanenceCount; // compiler handles half-word values stupidly, so give it a hand
       
  1247 		++count;
       
  1248 		iPermanenceCount = count;
       
  1249 		return count;
       
  1250 		}
       
  1251 
       
  1252 	/**
       
  1253 	Decrement #iPermanenceCount to indicate the removal of a use added by #IncPermanenceCount.
       
  1254 
       
  1255 	@return New value of #iPermanenceCount.
       
  1256 
       
  1257 	@pre #MmuLock held.
       
  1258 	*/
       
  1259 	FORCE_INLINE TUint DecPermanenceCount()
       
  1260 		{
       
  1261 		CheckAccess("DecPermanenceCount");
       
  1262 		TUint count = iPermanenceCount; // compiler handles half-word values stupidly, so give it a hand
       
  1263 		__NK_ASSERT_DEBUG(count);
       
  1264 		--count;
       
  1265 		iPermanenceCount = count;
       
  1266 		return count;
       
  1267 		}
       
  1268 
       
  1269 	/**
       
  1270 	Return #iPermanenceCount.
       
  1271 
       
  1272 	@pre #MmuLock held.
       
  1273 	*/
       
  1274 	FORCE_INLINE TUint PermanenceCount()
       
  1275 		{
       
  1276 		CheckAccess("PermanenceCount");
       
  1277 		return iPermanenceCount;
       
  1278 		}
       
  1279 
       
  1280 	/**
       
  1281 	Set page table to the #EUnused state.
       
  1282 	This is only intended for use by #PageTableAllocator.
       
  1283 
       
  1284 	@pre #MmuLock held and #PageTablesLockIsHeld.
       
  1285 	*/
       
  1286 	FORCE_INLINE void SetUnused()
       
  1287 		{
       
  1288 		CheckChangeUse("SetUnused");
       
  1289 		iType = EUnused;
       
  1290 		}
       
  1291 
       
  1292 	/**
       
  1293 	Return true if the page table is in the #EUnused state.
       
  1294 	This is only intended for use by #PageTableAllocator.
       
  1295 
       
  1296 	@pre #MmuLock held or #PageTablesLockIsHeld.
       
  1297 	*/
       
  1298 	FORCE_INLINE TBool IsUnused()
       
  1299 		{
       
  1300 		CheckCheckUse("IsUnused");
       
  1301 		return iType==EUnused;
       
  1302 		}
       
  1303 
       
  1304 	/**
       
  1305 	Set page table as being used by a coarse memory object.
       
  1306 
       
  1307 	@param aMemory		Memory object which owns this page table.
       
  1308 	@param aChunkIndex	The index of the page table, i.e. the offset, in 'chunks',
       
  1309 						into the object's memory that the page table is being used to map.
       
  1310 	@param aPteType		The #TPteType the page table is being used for.
       
  1311 
       
  1312 	@pre #MmuLock held and #PageTablesLockIsHeld.
       
  1313 
       
  1314 	@see TCoarse.
       
  1315 	*/
       
  1316 	inline void SetCoarse(DCoarseMemory* aMemory, TUint aChunkIndex, TUint aPteType)
       
  1317 		{
       
  1318 		CheckChangeUse("SetCoarse");
       
  1319 		iPageCount = 0;
       
  1320 		iPermanenceCount = 0;
       
  1321 		iType = ECoarseMapping;
       
  1322 		iCoarse.iMemoryObject = aMemory;
       
  1323 		iCoarse.iChunkIndex = aChunkIndex;
       
  1324 		iCoarse.iPteType = aPteType;
       
  1325 		}
       
  1326 
       
  1327 	/**
       
  1328 	Return true if this page table is currently being used by a coarse memory object
       
  1329 	matching the specified arguments.
       
  1330 	For arguments, see #SetCoarse.
       
  1331 
       
  1332 	@pre #MmuLock held or #PageTablesLockIsHeld.
       
  1333 	*/
       
  1334 	inline TBool CheckCoarse(DCoarseMemory* aMemory, TUint aChunkIndex, TUint aPteType)
       
  1335 		{
       
  1336 		CheckCheckUse("CheckCoarse");
       
  1337 		return iType==ECoarseMapping
       
  1338 			&& iCoarse.iMemoryObject==aMemory
       
  1339 			&& iCoarse.iChunkIndex==aChunkIndex
       
  1340 			&& iCoarse.iPteType==aPteType;
       
  1341 		}
       
  1342 
       
  1343 	/**
       
  1344 	Set page table as being used for fine mappings.
       
  1345 
       
  1346 	@param aLinAddr	Start of the virtual address region that the page table is
       
  1347 					mapping memory at.
       
  1348 	@param aOsAsid	The OS ASID of the address space which \a aLinAddr lies in.
       
  1349 
       
  1350 	@pre #MmuLock held and #PageTablesLockIsHeld.
       
  1351 	*/
       
  1352 	inline void SetFine(TLinAddr aLinAddr, TUint aOsAsid)
       
  1353 		{
       
  1354 		CheckChangeUse("SetFine");
       
  1355 		__NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0);
       
  1356 		iPageCount = 0;
       
  1357 		iPermanenceCount = 0;
       
  1358 		iType = EFineMapping;
       
  1359 		iFine.iLinAddrAndOsAsid = aLinAddr|aOsAsid;
       
  1360 		}
       
  1361 
       
  1362 	/**
       
  1363 	Return true if this page table is currently being used for fine mappings
       
  1364 	matching the specified arguments.
       
  1365 	For arguments, see #SetFine.
       
  1366 
       
  1367 	@pre #MmuLock held or #PageTablesLockIsHeld.
       
  1368 	*/
       
  1369 	inline TBool CheckFine(TLinAddr aLinAddr, TUint aOsAsid)
       
  1370 		{
       
  1371 		CheckCheckUse("CheckFine");
       
  1372 		__NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0);
       
  1373 		return iType==EFineMapping
       
  1374 			&& iFine.iLinAddrAndOsAsid==(aLinAddr|aOsAsid);
       
  1375 		}
       
  1376 
       
  1377 	/**
       
  1378 	Set a previously unknown page table as now being used for fine mappings.
       
  1379 	This is used during the boot process by DFineMemory::ClaimInitialPages
       
  1380 	to initialise the state of a page table allocated by the bootstrap.
       
  1381 
       
  1382 	@param aLinAddr	Start of the virtual address region that the page table is
       
  1383 					mapping memory at.
       
  1384 	@param aOsAsid	The OS ASID of the address space which \a aLinAddr lies in.
       
  1385 					(This should be KKernelOsAsid.)
       
  1386 
       
  1387 	@pre #MmuLock held and #PageTablesLockIsHeld.
       
  1388 	*/
       
  1389 	inline TBool ClaimFine(TLinAddr aLinAddr, TUint aOsAsid)
       
  1390 		{
       
  1391 		CheckChangeUse("ClaimFine");
       
  1392 		__NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0);
       
  1393 		if(iType==EFineMapping)
       
  1394 			return CheckFine(aLinAddr,aOsAsid);
       
  1395 		if(iType!=EUnknown)
       
  1396 			return false;
       
  1397 		iType = EFineMapping;
       
  1398 		iFine.iLinAddrAndOsAsid = aLinAddr|aOsAsid;
       
  1399 		return true;
       
  1400 		}
       
  1401 
       
  1402 	/**
       
  1403 	Return true if page table was allocated for use with demand paged memory.
       
  1404 	*/
       
  1405 	FORCE_INLINE TBool IsDemandPaged()
       
  1406 		{
       
  1407 		return iFlags&EDemandPaged;
       
  1408 		}
       
  1409 
       
  1410 #ifdef _DEBUG
       
  1411 	/**
       
  1412 	Debug check returning true if the value of #iPageCount is consistent with
       
  1413 	the PTEs in this page table.
       
  1414 
       
  1415 	@pre #MmuLock held.
       
  1416 	*/
       
  1417 	TBool CheckPageCount();
       
  1418 #endif
       
  1419 
       
  1420 	/**
       
  1421 	Return a reference to an embedded SDblQueLink which is used for placing this
       
  1422 	SPageTableInfo objects into free lists.
       
  1423 	@pre #PageTablesLockIsHeld.
       
  1424 	@pre #iType==#EUnused.
       
  1425 	*/
       
  1426 	inline SDblQueLink& FreeLink()
       
  1427 		{
       
  1428 		__NK_ASSERT_DEBUG(IsUnused());
       
  1429 		return iUnused.Link();
       
  1430 		}
       
  1431 
       
  1432 	/**
       
  1433 	Return a pointer to a SPageTableInfo by conversion from the address
       
  1434 	of its embedded link as returned by #FreeLink.
       
  1435 	*/
       
  1436 	FORCE_INLINE static SPageTableInfo* FromFreeLink(SDblQueLink* aLink)
       
  1437 		{
       
  1438 		return (SPageTableInfo*)((TInt)aLink-_FOFF(SPageTableInfo,iUnused));
       
  1439 		}
       
  1440 
       
  1441 	/**
       
  1442 	Return the SPageTableInfo for the first page table in the same
       
  1443 	physical ram page as the page table for this SPageTableInfo.
       
  1444 	*/
       
  1445 	FORCE_INLINE SPageTableInfo* FirstInPage()
       
  1446 		{
       
  1447 		return (SPageTableInfo*)(TLinAddr(this)&~(KPtClusterMask*sizeof(SPageTableInfo)));
       
  1448 		}
       
  1449 
       
  1450 	/**
       
  1451 	Return the SPageTableInfo for the last page table in the same
       
  1452 	physical ram page as the page table for this SPageTableInfo.
       
  1453 	*/
       
  1454 	FORCE_INLINE SPageTableInfo* LastInPage()
       
  1455 		{
       
  1456 		return (SPageTableInfo*)(TLinAddr(this)|(KPtClusterMask*sizeof(SPageTableInfo)));
       
  1457 		}
       
  1458 
       
  1459 	/**
       
  1460 	Return true if the page table for this SPageTableInfo is
       
  1461 	the first page table in the physical page it occupies.
       
  1462 	*/
       
  1463 	FORCE_INLINE TBool IsFirstInPage()
       
  1464 		{
       
  1465 		return (TLinAddr(this)&(KPtClusterMask*sizeof(SPageTableInfo)))==0;
       
  1466 		}
       
  1467 
       
  1468 	/**
       
  1469 	Return true if this page table has been added to the cleanup list with
       
  1470 	#AddToCleanupList.
       
  1471 	Must only be used for page tables which return true for #IsFirstInPage.
       
  1472 
       
  1473 	@pre #PageTablesLockIsHeld.
       
  1474 	*/
       
  1475 	FORCE_INLINE TBool IsOnCleanupList()
       
  1476 		{
       
  1477 		__NK_ASSERT_DEBUG(IsFirstInPage());
       
  1478 		return iFlags&EOnCleanupList;
       
  1479 		}
       
  1480 
       
  1481 	/**
       
  1482 	Add the RAM page containing this page table to the specified cleanup list.
       
  1483 	Must only be used for page tables which return true for #IsFirstInPage.
       
  1484 
       
  1485 	@pre #PageTablesLockIsHeld.
       
  1486 	*/
       
  1487 	FORCE_INLINE void AddToCleanupList(SDblQue& aCleanupList)
       
  1488 		{
       
  1489 		__NK_ASSERT_DEBUG(IsUnused());
       
  1490 		__NK_ASSERT_DEBUG(IsFirstInPage());
       
  1491 		__NK_ASSERT_DEBUG(!IsOnCleanupList());
       
  1492 		aCleanupList.Add(&FreeLink());
       
  1493 		iFlags |= EOnCleanupList;
       
  1494 		}
       
  1495 
       
  1496 	/**
       
  1497 	Remove the RAM page containing this page table from a cleanup list it
       
  1498 	was added to with aCleanupList.
       
  1499 	Must only be used for page tables which return true for #IsFirstInPage.
       
  1500 
       
  1501 	@pre #PageTablesLockIsHeld.
       
  1502 	*/
       
  1503 	FORCE_INLINE void RemoveFromCleanupList()
       
  1504 		{
       
  1505 		__NK_ASSERT_DEBUG(IsUnused());
       
  1506 		__NK_ASSERT_DEBUG(IsFirstInPage());
       
  1507 		__NK_ASSERT_DEBUG(IsOnCleanupList());
       
  1508 		iFlags &= ~EOnCleanupList;
       
  1509 		FreeLink().Deque();
       
  1510 		}
       
  1511 
       
  1512 	/**
       
  1513 	Remove this page table from its owner and free it.
       
  1514 	This is only used with page tables which map demand paged memory
       
  1515 	and is intended for use in implementing #DPageTableMemoryManager.
       
  1516 
       
  1517 	@return KErrNone if successful,
       
  1518 			otherwise one of the system wide error codes.
       
  1519 
       
  1520 	@pre #MmuLock held and #PageTablesLockIsHeld.
       
  1521 	*/
       
  1522 	TInt ForcedFree();
       
  1523 
       
  1524 private:
       
  1525 
       
  1526 #ifdef _DEBUG
       
  1527 	void CheckChangeUse(const char* aName);
       
  1528 	void CheckCheckUse(const char* aName);
       
  1529 	void CheckAccess(const char* aName);
       
  1530 	void CheckInit(const char* aName);
       
  1531 #else
       
  1532 	FORCE_INLINE void CheckChangeUse(const char* /*aName*/)
       
  1533 		{}
       
  1534 	FORCE_INLINE void CheckCheckUse(const char* /*aName*/)
       
  1535 		{}
       
  1536 	FORCE_INLINE void CheckAccess(const char* /*aName*/)
       
  1537 		{}
       
  1538 	FORCE_INLINE void CheckInit(const char* /*aName*/)
       
  1539 		{}
       
  1540 #endif
       
  1541 	};
       
  1542 
       
  1543 
       
  1544 const TInt KPageTableInfoShift = 4;
       
  1545 __ASSERT_COMPILE(sizeof(SPageTableInfo)==(1<<KPageTableInfoShift));
       
  1546 
       
  1547 FORCE_INLINE SPageTableInfo* SPageTableInfo::FromPtPtr(TPte* aPtPte)
       
  1548 	{
       
  1549 	TUint id = ((TLinAddr)aPtPte-KPageTableBase)>>KPageTableShift;
       
  1550 	return (SPageTableInfo*)KPageTableInfoBase+id;
       
  1551 	}
       
  1552 
       
  1553 FORCE_INLINE TPte* SPageTableInfo::PageTable()
       
  1554 	{
       
  1555 	return (TPte*)
       
  1556 		(KPageTableBase+
       
  1557 			(
       
  1558 			((TLinAddr)this-(TLinAddr)KPageTableInfoBase)
       
  1559 			<<(KPageTableShift-KPageTableInfoShift)
       
  1560 			)
       
  1561 		);
       
  1562 	}
       
  1563 
       
  1564 
       
  1565 
       
  1566 /**
       
  1567 Class providing access to the mutex used to protect memory allocation operations;
       
  1568 this is the mutex Mmu::iRamAllocatorMutex.
       
  1569 In addition to providing locking, these functions monitor the system's free RAM
       
  1570 levels and call K::CheckFreeMemoryLevel to notify the system of changes.
       
  1571 */
       
  1572 class RamAllocLock
       
  1573 	{
       
  1574 public:
       
  1575 	/**
       
  1576 	Acquire the lock.
       
  1577 	The lock may be acquired multiple times by a thread, and will remain locked
       
  1578 	until #Unlock has been used enough times to balance this.
       
  1579 	*/
       
  1580 	static void Lock();
       
  1581 
       
  1582 	/**
       
  1583 	Release the lock.
       
  1584 
       
  1585 	@pre The current thread has previously acquired the lock.
       
  1586 	*/
       
  1587 	static void Unlock();
       
  1588 
       
  1589 	/**
       
  1590 	Allow another thread to acquire the lock.
       
  1591 	This is equivalent to #Unlock followed by #Lock, but optimised
       
  1592 	to only do this if there is another thread waiting on the lock.
       
  1593 
       
  1594 	@return True if the lock was released by this function.
       
  1595 
       
  1596 	@pre The current thread has previously acquired the lock.
       
  1597 	*/
       
  1598 	static TBool Flash();
       
  1599 
       
  1600 	/**
       
  1601 	Return true if the current thread holds the lock.
       
  1602 	This is used for debug checks.
       
  1603 	*/
       
  1604 	static TBool IsHeld();
       
  1605 	};
       
  1606 
       
  1607 
       
  1608 
       
  1609 /**
       
  1610 Return true if the PageTableLock is held by the current thread.
       
  1611 This lock is the mutex used to protect page table allocation; it is acquired
       
  1612 with
       
  1613 @code
       
  1614 	::PageTables.Lock();
       
  1615 @endcode
       
  1616 and released with
       
  1617 @code
       
  1618 	::PageTables.Unlock();
       
  1619 @endcode
       
  1620 */
       
  1621 TBool PageTablesLockIsHeld();
       
  1622 
       
  1623 
       
  1624 
       
  1625 /**
       
  1626 Class providing access to the fast mutex used to protect various
       
  1627 low level memory operations.
       
  1628 
       
  1629 This lock must only be held for a very short and bounded time.
       
  1630 */
       
  1631 class MmuLock
       
  1632 	{
       
  1633 public:
       
  1634 	/**
       
  1635 	Acquire the lock.
       
  1636 	*/
       
  1637 	static void Lock();
       
  1638 
       
  1639 	/**
       
  1640 	Release the lock.
       
  1641 
       
  1642 	@pre The current thread has previously acquired the lock.
       
  1643 	*/
       
  1644 	static void Unlock();
       
  1645 
       
  1646 	/**
       
  1647 	Allow another thread to acquire the lock.
       
  1648 	This is equivalent to #Unlock followed by #Lock, but optimised
       
  1649 	to only do this if there is another thread waiting on the lock.
       
  1650 
       
  1651 	@return True if the lock was released by this function.
       
  1652 
       
  1653 	@pre The current thread has previously acquired the lock.
       
  1654 	*/
       
  1655 	static TBool Flash();
       
  1656 
       
  1657 	/**
       
  1658 	Return true if the current thread holds the lock.
       
  1659 	This is used for debug checks.
       
  1660 	*/
       
  1661 	static TBool IsHeld();
       
  1662 
       
  1663 	/**
       
  1664 	Increment a counter and perform the action of #Flash() once a given threshold
       
  1665 	value is reached. After flashing the counter is reset.
       
  1666 
       
  1667 	This is typically used in long running loops to periodically flash the lock
       
  1668 	and so avoid holding it for too long, e.g.
       
  1669 
       
  1670 	@code
       
  1671 	MmuLock::Lock();
       
  1672 	TUint flash = 0;
       
  1673 	const TUint KMaxInterationsWithLock = 10;
       
  1674 	while(WorkToDo)
       
  1675 		{
       
  1676 		DoSomeWork();
       
  1677 		MmuLock::Flash(flash,KMaxInterationsWithLock); // flash every N loops
       
  1678 		}
       
  1679 	MmuLock::Unlock();
       
  1680 	@endcode
       
  1681 
       
  1682 	@param aCounter			Reference to the counter.
       
  1683 	@param aFlashThreshold	Value \a aCounter must reach before flashing the lock.
       
  1684 	@param aStep			Value to add to \a aCounter.
       
  1685 
       
  1686 	@return True if the lock was released by this function.
       
  1687 
       
  1688 	@pre The current thread has previously acquired the lock.
       
  1689 	*/
       
  1690 	static FORCE_INLINE TBool Flash(TUint& aCounter, TUint aFlashThreshold, TUint aStep=1)
       
  1691 		{
       
  1692 		UnlockGuardCheck();
       
  1693 		if((aCounter+=aStep)<aFlashThreshold)
       
  1694 			return EFalse;
       
  1695 		aCounter -= aFlashThreshold;
       
  1696 		return MmuLock::Flash();
       
  1697 		}
       
  1698 
       
  1699 	/**
       
  1700 	Begin a debug check to test that the MmuLock is not unlocked unexpectedly.
       
  1701 
       
  1702 	This is used in situations where a series of operation must be performed
       
  1703 	atomically with the MmuLock held. It is usually used via the
       
  1704 	#__UNLOCK_GUARD_START macro, e.g.
       
  1705 
       
  1706 	@code
       
  1707 	__UNLOCK_GUARD_START(MmuLock);
       
  1708 	SomeCode();
       
  1709 	SomeMoreCode();
       
  1710 	__UNLOCK_GUARD_END(MmuLock); // fault if MmuLock released by SomeCode or SomeMoreCode
       
  1711 	@endcode
       
  1712 	*/
       
  1713 	static FORCE_INLINE void UnlockGuardStart()
       
  1714 		{
       
  1715 		#ifdef _DEBUG
       
  1716 			++UnlockGuardNest;
       
  1717 		#endif
       
  1718 		}
       
  1719 
       
  1720 	/**
       
  1721 	End a debug check testing that the MmuLock is not unlocked unexpectedly.
       
  1722 	This is usually used via the #__UNLOCK_GUARD_END which faults if true is returned.
       
  1723 
       
  1724 	@see UnlockGuardStart
       
  1725 
       
  1726 	@return True if the MmuLock was released between a previous #UnlockGuardStart
       
  1727 			and the call this function.
       
  1728 	*/
       
  1729 	static FORCE_INLINE TBool UnlockGuardEnd()
       
  1730 		{
       
  1731 		#ifdef _DEBUG
       
  1732 			__NK_ASSERT_DEBUG(UnlockGuardNest);
       
  1733 			--UnlockGuardNest;
       
  1734 			return UnlockGuardFail==0;
       
  1735 		#else
       
  1736 			return true;
       
  1737 		#endif
       
  1738 		}
       
  1739 
       
  1740 private:
       
  1741 	/**
       
  1742 	Exectued whenever the lock is released to check that
       
  1743 	#UnlockGuardStart and #UnlockGuardEnd are balanced.
       
  1744 	*/
       
  1745 	static FORCE_INLINE void UnlockGuardCheck()
       
  1746 		{
       
  1747 		#ifdef _DEBUG
       
  1748 			if(UnlockGuardNest)
       
  1749 				UnlockGuardFail = true;
       
  1750 		#endif
       
  1751 		}
       
  1752 
       
  1753 private:
       
  1754 	/** The lock */
       
  1755 	static NFastMutex iLock;
       
  1756 
       
  1757 #ifdef _DEBUG
       
  1758 	static TUint UnlockGuardNest;
       
  1759 	static TUint UnlockGuardFail;
       
  1760 #endif
       
  1761 	};
       
  1762 
       
  1763 
       
  1764 
       
  1765 /**
       
  1766 Interface for accessing the lock mutex being used to serialise
       
  1767 explicit modifications to a specified memory object.
       
  1768 
       
  1769 The lock mutex is either the one which was previously assigned with
       
  1770 DMemoryObject::SetLock. Or, if none was set, a dynamically assigned
       
  1771 mutex from #MemoryObjectMutexPool will be of 'order' #KMutexOrdMemoryObject.
       
  1772 */
       
  1773 class MemoryObjectLock
       
  1774 	{
       
  1775 public:
       
  1776 	/**
       
  1777 	Acquire the lock for the specified memory object.
       
  1778 	If the object has no lock, one is assigned from #MemoryObjectMutexPool.
       
  1779 	*/
       
  1780 	static void Lock(DMemoryObject* aMemory);
       
  1781 
       
  1782 	/**
       
  1783 	Release the lock for the specified memory object, which was acquired
       
  1784 	with #Lock. If the lock was one which was dynamically assigned, and there
       
  1785 	are no threads waiting for it, the the lock is unassigned from the memory
       
  1786 	object.
       
  1787 	*/
       
  1788 	static void Unlock(DMemoryObject* aMemory);
       
  1789 
       
  1790 	/**
       
  1791 	Return true if the current thread holds lock for the specified memory object.
       
  1792 	This is used for debug checks.
       
  1793 	*/
       
  1794 	static TBool IsHeld(DMemoryObject* aMemory);
       
  1795 	};
       
  1796 
       
  1797 
       
  1798 #define __UNLOCK_GUARD_START(_l) __DEBUG_ONLY(_l::UnlockGuardStart())
       
  1799 #define __UNLOCK_GUARD_END(_l) __NK_ASSERT_DEBUG(_l::UnlockGuardEnd())
       
  1800 
       
  1801 
       
  1802 const TUint KMutexOrdAddresSpace = KMutexOrdKernelHeap + 2;
       
  1803 const TUint KMutexOrdMemoryObject = KMutexOrdKernelHeap + 1;
       
  1804 const TUint KMutexOrdMmuAlloc = KMutexOrdRamAlloc + 1;
       
  1805 
       
  1806 
       
  1807 #ifdef _DEBUG
       
  1808 //#define FORCE_TRACE
       
  1809 //#define FORCE_TRACE2
       
  1810 //#define FORCE_TRACEB
       
  1811 //#define FORCE_TRACEP
       
  1812 #endif
       
  1813 
       
  1814 
       
  1815 
       
  1816 #define TRACE_printf Kern::Printf
       
  1817 
       
  1818 #define TRACE_ALWAYS(t) TRACE_printf t
       
  1819 
       
  1820 #ifdef FORCE_TRACE
       
  1821 #define TRACE(t) TRACE_printf t
       
  1822 #else
       
  1823 #define TRACE(t) __KTRACE_OPT(KMMU2,TRACE_printf t)
       
  1824 #endif
       
  1825 
       
  1826 #ifdef FORCE_TRACE2
       
  1827 #define TRACE2(t) TRACE_printf t
       
  1828 #else
       
  1829 #define TRACE2(t) __KTRACE_OPT(KMMU2,TRACE_printf t)
       
  1830 #endif
       
  1831 
       
  1832 #ifdef FORCE_TRACEB
       
  1833 #define TRACEB(t) TRACE_printf t
       
  1834 #else
       
  1835 #define TRACEB(t) __KTRACE_OPT2(KMMU,KBOOT,TRACE_printf t)
       
  1836 #endif
       
  1837 
       
  1838 #ifdef FORCE_TRACEP
       
  1839 #define TRACEP(t) TRACE_printf t
       
  1840 #else
       
  1841 #define TRACEP(t) __KTRACE_OPT(KPAGING,TRACE_printf t)
       
  1842 #endif
       
  1843 
       
  1844 
       
  1845 /**
       
  1846 The maximum number of consecutive updates to #SPageInfo structures which
       
  1847 should be executed without releasing the #MmuLock.
       
  1848 
       
  1849 This value must be an integer power of two.
       
  1850 */
       
  1851 const TUint KMaxPageInfoUpdatesInOneGo = 64;
       
  1852 
       
  1853 /**
       
  1854 The maximum number of simple operations on memory page state which should
       
  1855 occur without releasing the #MmuLock. Examples of the operations are
       
  1856 read-modify-write of a Page Table Entry (PTE) or entries in a memory objects
       
  1857 RPageArray.
       
  1858 
       
  1859 This value must be an integer power of two.
       
  1860 */
       
  1861 const TUint KMaxPagesInOneGo = KMaxPageInfoUpdatesInOneGo/2;
       
  1862 
       
  1863 /**
       
  1864 The maximum number of Page Directory Entries which should be updated
       
  1865 without releasing the #MmuLock.
       
  1866 
       
  1867 This value must be an integer power of two.
       
  1868 */
       
  1869 const TUint KMaxPdesInOneGo = KMaxPageInfoUpdatesInOneGo;
       
  1870 
       
  1871 
       
  1872 /********************************************
       
  1873  * MMU stuff
       
  1874  ********************************************/
       
  1875 
       
  1876 class DRamAllocator;
       
  1877 class TPinArgs;
       
  1878 class Defrag;
       
  1879 
       
  1880 /**
       
  1881 Interface to RAM allocation and MMU data structure manipulation.
       
  1882 */
       
  1883 class Mmu
       
  1884 	{
       
  1885 public:
       
  1886 	enum TPanic
       
  1887 		{
       
  1888 		EInvalidRamBankAtBoot,
       
  1889 		EInvalidReservedBankAtBoot,
       
  1890 		EInvalidPageTableAtBoot,
       
  1891 		EInvalidPdeAtBoot,
       
  1892 		EBadMappedPageAfterBoot,
       
  1893 		ERamAllocMutexCreateFailed,
       
  1894 		EBadFreePhysicalRam,
       
  1895 		EUnsafePageInfoAccess,
       
  1896 		EUnsafePageTableInfoAccess,
       
  1897 		EPhysMemSyncMutexCreateFailed,
       
  1898 		EDefragAllocFailed
       
  1899 		};
       
  1900 
       
  1901 	/**
       
  1902 	Attribute flags used when allocating RAM pages.
       
  1903 	See #AllocRam etc.
       
  1904 
       
  1905 	The least significant bits of these flags are used for the #TMemoryType
       
  1906 	value for the memory.
       
  1907 	*/
       
  1908 	enum TRamAllocFlags
       
  1909 		{
       
  1910 		// lower bits hold TMemoryType
       
  1911 
       
  1912 		/**
       
  1913 		If this flag is set, don't wipe the contents of the memory when allocated.
       
  1914 		By default, for security and confidentiality reasons, the memory is filled
       
  1915 		with a 'wipe' value to erase the previous contents.
       
  1916 		*/
       
  1917 		EAllocNoWipe			= 1<<(KMemoryTypeShift),
       
  1918 
       
  1919 		/**
       
  1920 		If this flag is set, any memory wiping will fill memory with the byte
       
  1921 		value starting at bit position #EAllocWipeByteShift in these flags.
       
  1922 		*/
       
  1923 		EAllocUseCustomWipeByte	= 1<<(KMemoryTypeShift+1),
       
  1924 
       
  1925 		/**
       
  1926 		If this flag is set, memory allocation won't attempt to reclaim pages
       
  1927 		from the demand paging system.
       
  1928 		This is used to prevent deadlock when the paging system itself attempts
       
  1929 		to allocate memory for itself.
       
  1930 		*/
       
  1931 		EAllocNoPagerReclaim	= 1<<(KMemoryTypeShift+2),
       
  1932 
       
  1933 		/**
       
  1934 		@internal
       
  1935 		*/
       
  1936 		EAllocFlagLast,
       
  1937 
       
  1938 		/*
       
  1939 		Bit position within these flags, for the least significant bit of the
       
  1940 		byte value used when #EAllocUseCustomWipeByte is set.
       
  1941 		*/
       
  1942 		EAllocWipeByteShift		= 8
       
  1943 		};
       
  1944 
       
  1945 public:
       
  1946 	void Init1();
       
  1947 	void Init1Common();
       
  1948 	void Init2();
       
  1949 	void Init2Common();
       
  1950 	void Init2Final();
       
  1951 	void Init2FinalCommon();
       
  1952 	void Init3();
       
  1953 
       
  1954 	static void Panic(TPanic aPanic);
       
  1955 
       
  1956 	static TInt HandlePageFault(TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions, TAny* aExceptionInfo);
       
  1957 
       
  1958 	TUint FreeRamInPages();
       
  1959 	TUint TotalPhysicalRamPages();
       
  1960 
       
  1961 	TInt AllocRam(	TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags, TZonePageType aZonePageType, 
       
  1962 					TUint aBlockZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
       
  1963 	void FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType);
       
  1964 	TInt AllocContiguousRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags);
       
  1965 	void FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount);
       
  1966 
       
  1967 	const SRamZone* RamZoneConfig(TRamZoneCallback& aCallback) const;
       
  1968 	void SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback);
       
  1969 	TInt ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask);
       
  1970 	TInt GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData);
       
  1971 	TInt ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aBytes, TPhysAddr& aPhysAddr, TInt aAlign);
       
  1972 	TInt ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList);
       
  1973 	TInt RamHalFunction(TInt aFunction, TAny* a1, TAny* a2);	
       
  1974 	void ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldPageType, TZonePageType aNewPageType);
       
  1975 
       
  1976 	TInt AllocPhysicalRam(TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags);
       
  1977 	void FreePhysicalRam(TPhysAddr* aPages, TUint aCount);
       
  1978 	TInt AllocPhysicalRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags);
       
  1979 	void FreePhysicalRam(TPhysAddr aPhysAddr, TUint aCount);
       
  1980 	TInt ClaimPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags);
       
  1981 	void AllocatedPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags);
       
  1982 
       
  1983 	TLinAddr MapTemp(TPhysAddr aPage, TUint aColour, TUint aSlot=0);
       
  1984 	void UnmapTemp(TUint aSlot=0);
       
  1985 	void RemoveAliasesForPageTable(TPhysAddr aPageTable);
       
  1986 
       
  1987 	static TBool MapPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte);
       
  1988 	static TBool UnmapPages(TPte* const aPtePtr, TUint aCount);
       
  1989 	static TBool UnmapPages(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages);
       
  1990 	static void RemapPage(TPte* const aPtePtr, TPhysAddr& aPage, TPte aBlankPte);
       
  1991 	static void RestrictPagesNA(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages);
       
  1992 	static TBool PageInPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte);
       
  1993 
       
  1994 	// implemented in CPU-specific code...
       
  1995 	static TUint PteType(TMappingPermissions aPermissions, TBool aGlobal);
       
  1996 	static TUint PdeType(TMemoryAttributes aAttributes);
       
  1997 	static TPte BlankPte(TMemoryAttributes aAttributes, TUint aPteType);
       
  1998 	static TPde BlankPde(TMemoryAttributes aAttributes);
       
  1999 	static TPde BlankSectionPde(TMemoryAttributes aAttributes, TUint aPteType);
       
  2000 	static TBool CheckPteTypePermissions(TUint aPteType, TUint aAccessPermissions);
       
  2001 	static TMappingPermissions PermissionsFromPteType(TUint aPteType);
       
  2002 	void PagesAllocated(TPhysAddr* aPageList, TUint aCount, TRamAllocFlags aFlags, TBool aReallocate=false);
       
  2003 	void PageFreed(SPageInfo* aPageInfo);
       
  2004 	void CleanAndInvalidatePages(TPhysAddr* aPages, TUint aCount, TMemoryAttributes aAttributes, TUint aColour);
       
  2005 public:
       
  2006 	// utils, implemented in CPU-specific code...
       
  2007 	static TPde* PageDirectory(TInt aOsAsid);
       
  2008 	static TPde* PageDirectoryEntry(TInt aOsAsid, TLinAddr aAddress);
       
  2009 	static TPhysAddr PdePhysAddr(TPde aPde);
       
  2010 	static TPhysAddr PtePhysAddr(TPte aPte, TUint aPteIndex);
       
  2011 	static TPte* PageTableFromPde(TPde aPde);
       
  2012 	static TPte* SafePageTableFromPde(TPde aPde);
       
  2013 	static TPhysAddr SectionBaseFromPde(TPde aPde);
       
  2014 	static TPte* PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid);
       
  2015 	static TPte* SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid);
       
  2016 	static TPhysAddr PageTablePhysAddr(TPte* aPt);
       
  2017 	static TPhysAddr LinearToPhysical(TLinAddr aAddr, TInt aOsAsid=KKernelOsAsid);
       
  2018 	static TPhysAddr UncheckedLinearToPhysical(TLinAddr aAddr, TInt aOsAsid);
       
  2019 	static TPte MakePteInaccessible(TPte aPte, TBool aReadOnly);
       
  2020 	static TPte MakePteAccessible(TPte aPte, TBool aWrite);
       
  2021 	static TBool IsPteReadOnly(TPte aPte);
       
  2022 	static TBool IsPteMoreAccessible(TPte aNewPte, TPte aOldPte);
       
  2023 	static TBool IsPteInaccessible(TPte aPte);
       
  2024 	static TBool PdeMapsPageTable(TPde aPde);
       
  2025 	static TBool PdeMapsSection(TPde aPde);
       
  2026 
       
  2027 	void SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr);
       
  2028 	void SyncPhysicalMemoryBeforeDmaRead (TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr);
       
  2029 	void SyncPhysicalMemoryAfterDmaRead  (TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr);
       
  2030 
       
  2031 	static TPte SectionToPageEntry(TPde& aPde);
       
  2032 	static TPde PageToSectionEntry(TPte aPte, TPde aPde);
       
  2033 	static TMemoryAttributes CanonicalMemoryAttributes(TMemoryAttributes aAttr);
       
  2034 
       
  2035 public:
       
  2036 	/**
       
  2037 	Class representing the resources and methods required to create temporary
       
  2038 	mappings of physical memory pages in order to make them accessible to
       
  2039 	software.
       
  2040 	These mare required by various memory model functions and are created only
       
  2041 	during system boot.
       
  2042 	*/
       
  2043 	class TTempMapping
       
  2044 		{
       
  2045 	public:
       
  2046 		void Alloc(TUint aNumPages);
       
  2047 		TLinAddr Map(TPhysAddr aPage, TUint aColour);
       
  2048 		TLinAddr Map(TPhysAddr aPage, TUint aColour, TPte aBlankPte);
       
  2049 		TLinAddr Map(TPhysAddr* aPages, TUint aCount, TUint aColour);
       
  2050 		void Unmap();
       
  2051 		void Unmap(TBool aIMBRequired);
       
  2052 		FORCE_INLINE TTempMapping()
       
  2053 			: iSize(0)
       
  2054 			{}
       
  2055 	public:
       
  2056 		TLinAddr iLinAddr;		///< Virtual address of the memory page mapped by #iPtePtr.
       
  2057 		TPte* iPtePtr;			///< Pointer to first PTE allocated to this object.
       
  2058 	private:
       
  2059 		TPte iBlankPte;			///< PTE value to use for mapping pages, with the physical address component equal to zero.
       
  2060 		TUint8 iSize;			///< Maximum number of pages which can be mapped in one go.
       
  2061 		TUint8 iCount;			///< Number of pages currently mapped.
       
  2062 		TUint8 iColour;			///< Colour of any pages mapped (acts as index from #iLinAddr and #iPtePtr).
       
  2063 		TUint8 iSpare1;
       
  2064 	private:
       
  2065 		static TLinAddr iNextLinAddr;
       
  2066 		};
       
  2067 private:
       
  2068 	enum { KNumTempMappingSlots=2 };
       
  2069 	/**
       
  2070 	Temporary mappings used by various functions.
       
  2071 	Use of these is serialised by the #RamAllocLock.
       
  2072 	*/
       
  2073 	TTempMapping iTempMap[KNumTempMappingSlots];
       
  2074 
       
  2075 	TTempMapping iPhysMemSyncTemp;	///< Temporary mapping used for physical memory sync.
       
  2076 	DMutex* 	 iPhysMemSyncMutex;	///< Mutex used to serialise use of #iPhysMemSyncTemp.
       
  2077 
       
  2078 public:
       
  2079 	TPte iTempPteCached;			///< PTE value for cached temporary mappings
       
  2080 	TPte iTempPteUncached;			///< PTE value for uncached temporary mappings
       
  2081 	TPte iTempPteCacheMaintenance;	///< PTE value for temporary mapping of cache maintenance
       
  2082 private:
       
  2083 	DRamAllocator* iRamPageAllocator;			///< The RAM allocator used for managing free RAM pages.
       
  2084 	const SRamZone* iRamZones;					///< A pointer to the RAM zone configuration from the variant.
       
  2085 	TRamZoneCallback iRamZoneCallback;			///< Pointer to the RAM zone callback function.
       
  2086 	Defrag* iDefrag;							///< The RAM defrag class implementation.
       
  2087 
       
  2088 	/**
       
  2089 	A counter incremented every time Mmu::PagesAllocated invalidates the L1 cache.
       
  2090 	This is used as part of a cache maintenance optimisation.
       
  2091 	*/
       
  2092 	TInt iCacheInvalidateCounter;
       
  2093 
       
  2094 	/**
       
  2095 	Number of free RAM pages which are cached at L1 and have
       
  2096 	SPageInfo::CacheInvalidateCounter()==#iCacheInvalidateCounter.
       
  2097 	This is used as part of a cache maintenance optimisation.
       
  2098 	*/
       
  2099 	TInt iCacheInvalidatePageCount;
       
  2100 
       
  2101 public:
       
  2102 	/**
       
  2103 	Linked list of threads which have an active IPC alias. I.e. have called
       
  2104 	DMemModelThread::Alias. Threads are linked by their DMemModelThread::iAliasLink member.
       
  2105 	Updates to this list are protected by the #MmuLock.
       
  2106 	*/
       
  2107 	SDblQue iAliasList;
       
  2108 
       
  2109 	/**
       
  2110 	The mutex used to protect RAM allocation.
       
  2111 	This is the mutex #RamAllocLock operates on.
       
  2112 	*/
       
  2113 	DMutex* iRamAllocatorMutex;
       
  2114 
       
  2115 private:
       
  2116 	/**
       
  2117 	Number of nested calls to RamAllocLock::Lock.
       
  2118 	*/
       
  2119 	TUint iRamAllocLockCount;
       
  2120 
       
  2121 	/**
       
  2122 	Set by various memory allocation routines to indicate that a memory allocation
       
  2123 	has failed. This is used by #RamAllocLock in its management of out-of-memory
       
  2124 	notifications.
       
  2125 	*/
       
  2126 	TBool iRamAllocFailed;
       
  2127 
       
  2128 	/**
       
  2129 	Saved value for #FreeRamInPages which is used by #RamAllocLock in its management
       
  2130 	of memory level change notifications.
       
  2131 	*/
       
  2132 	TUint iRamAllocInitialFreePages;
       
  2133 
       
  2134 	friend class RamAllocLock;
       
  2135 private:
       
  2136 	void VerifyRam();
       
  2137 	};
       
  2138 
       
  2139 /**
       
  2140 The single instance of class #Mmu.
       
  2141 */
       
  2142 extern Mmu TheMmu;
       
  2143 
       
  2144 
       
  2145 #ifndef _DEBUG
       
  2146 /**
       
  2147 Perform a page table walk to return the physical address of
       
  2148 the memory mapped at virtual address \a aLinAddr in the
       
  2149 address space \a aOsAsid.
       
  2150 
       
  2151 If the page table used was not one allocated by the kernel
       
  2152 then the results are unpredictable and may cause a system fault.
       
  2153 
       
  2154 @pre #MmuLock held.
       
  2155 */
       
  2156 FORCE_INLINE TPhysAddr Mmu::LinearToPhysical(TLinAddr aAddr, TInt aOsAsid)
       
  2157 	{
       
  2158 	return Mmu::UncheckedLinearToPhysical(aAddr,aOsAsid);
       
  2159 	}
       
  2160 #endif
       
  2161 
       
  2162 
       
  2163 __ASSERT_COMPILE((Mmu::EAllocFlagLast>>Mmu::EAllocWipeByteShift)==0); // make sure flags don't run into wipe byte value
       
  2164 
       
  2165 
       
  2166 /**
       
  2167 Create a temporary mapping of a physical page.
       
  2168 The RamAllocatorMutex must be held before this function is called and not released
       
  2169 until after UnmapTemp has been called.
       
  2170 
       
  2171 @param aPage	The physical address of the page to be mapped.
       
  2172 @param aColour	The 'colour' of the page if relevant.
       
  2173 @param aSlot	Slot number to use, must be less than Mmu::KNumTempMappingSlots.
       
  2174 
       
  2175 @return The linear address of where the page has been mapped.
       
  2176 */
       
  2177 FORCE_INLINE TLinAddr Mmu::MapTemp(TPhysAddr aPage, TUint aColour, TUint aSlot)
       
  2178 	{
       
  2179 //	Kern::Printf("Mmu::MapTemp(0x%08x,%d,%d)",aPage,aColour,aSlot);
       
  2180 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
       
  2181 	__NK_ASSERT_DEBUG(aSlot<KNumTempMappingSlots);
       
  2182 	return iTempMap[aSlot].Map(aPage,aColour);
       
  2183 	}
       
  2184 
       
  2185 
       
  2186 /**
       
  2187 Remove the temporary mapping created with MapTemp.
       
  2188 
       
  2189 @param aSlot	Slot number which was used when temp mapping was made.
       
  2190 */
       
  2191 FORCE_INLINE void Mmu::UnmapTemp(TUint aSlot)
       
  2192 	{
       
  2193 //	Kern::Printf("Mmu::UnmapTemp(%d)",aSlot);
       
  2194 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
       
  2195 	__NK_ASSERT_DEBUG(aSlot<KNumTempMappingSlots);
       
  2196 	iTempMap[aSlot].Unmap();
       
  2197 	}
       
  2198 
       
  2199 
       
  2200 /**
       
  2201 Class representing the resources and arguments needed for various
       
  2202 memory pinning operations.
       
  2203 
       
  2204 The term 'replacement pages' in this documentation means excess
       
  2205 RAM pages which have been allocated to the demand paging pool so
       
  2206 that when a demand paged memory is pinned and removed the pool
       
  2207 does not become too small.
       
  2208 
       
  2209 Relacement pages are allocated with #AllocReplacementPages and their
       
  2210 number remembered in #iReplacementPages. When a memory pinning operation
       
  2211 removes pages from the paging pool it will reduce #iReplacementPages
       
  2212 accordingly. At the end of the pinning operation, #FreeReplacementPages
       
  2213 is used to free any unused replacement pages.
       
  2214 */
       
  2215 class TPinArgs
       
  2216 	{
       
  2217 public:
       
  2218 	/**
       
  2219 	Boolean value set to true if the requester of the pinning operation
       
  2220 	will only read from the pinned memory, not write to it.
       
  2221 	This is used as an optimisation to avoid unnecessarily marking
       
  2222 	demand paged memory as dirty.
       
  2223 	*/
       
  2224 	TBool iReadOnly;
       
  2225 
       
  2226 	/**
       
  2227 	Boolean value set to true if sufficient replacement pages already exists
       
  2228 	in the demand paging pool and that #AllocReplacementPages does not need
       
  2229 	to actually allocated any.
       
  2230 	*/
       
  2231 	TBool iUseReserve;
       
  2232 
       
  2233 	/**
       
  2234 	The number of replacement pages allocated to this object by #AllocReplacementPages.
       
  2235 	A value of #EUseReserveForPinReplacementPages indicates that #iUseReserve
       
  2236 	was true, and there is sufficient RAM already reserved for the operation
       
  2237 	being attempted.
       
  2238 	*/
       
  2239 	TUint iReplacementPages;
       
  2240 
       
  2241 	/**
       
  2242 	The number of page tables which have been pinned during the course
       
  2243 	of an operation. This is the number of valid entries written to
       
  2244 	#iPinnedPageTables.
       
  2245 	*/
       
  2246 	TUint iNumPinnedPageTables;
       
  2247 
       
  2248 	/**
       
  2249 	Pointer to the location to store the addresses of any page tables
       
  2250 	which have been pinned during the course of an operation. This is
       
  2251 	incremented as entries are added.
       
  2252 
       
  2253 	The null-pointer indicates that page tables do not require pinning.
       
  2254 	*/
       
  2255 	TPte** iPinnedPageTables;
       
  2256 
       
  2257 public:
       
  2258 	/**
       
  2259 	Construct an empty TPinArgs, one which owns no resources.
       
  2260 	*/
       
  2261 	inline TPinArgs()
       
  2262 		: iReadOnly(0), iUseReserve(0), iReplacementPages(0), iNumPinnedPageTables(0), iPinnedPageTables(0)
       
  2263 		{
       
  2264 		}
       
  2265 
       
  2266 	/**
       
  2267 	Return true if this TPinArgs has at least \a aRequired number of
       
  2268 	replacement pages allocated.
       
  2269 	*/
       
  2270 	FORCE_INLINE TBool HaveSufficientPages(TUint aRequired)
       
  2271 		{
       
  2272 		return iReplacementPages>=aRequired; // Note, EUseReserveForPinReplacementPages will always return true.
       
  2273 		}
       
  2274 
       
  2275 	/**
       
  2276 	Allocate replacement pages for this TPinArgs so that it has at least
       
  2277 	\a aNumPages.
       
  2278 	*/
       
  2279 	TInt AllocReplacementPages(TUint aNumPages);
       
  2280 
       
  2281 	/**
       
  2282 	Free all replacement pages which this TPinArgs still owns.
       
  2283 	*/
       
  2284 	void FreeReplacementPages();
       
  2285 
       
  2286 #ifdef _DEBUG
       
  2287 	~TPinArgs();
       
  2288 #endif
       
  2289 
       
  2290 	/**
       
  2291 	Value used to indicate that replacement pages are to come
       
  2292 	from an already allocated reserve and don't need specially
       
  2293 	allocating.
       
  2294 	*/
       
  2295 	enum { EUseReserveForPinReplacementPages = 0xffffffffu };
       
  2296 	};
       
  2297 
       
  2298 
       
  2299 #ifdef _DEBUG
       
  2300 inline TPinArgs::~TPinArgs()
       
  2301 	{
       
  2302 	__NK_ASSERT_DEBUG(!iReplacementPages);
       
  2303 	}
       
  2304 #endif
       
  2305 
       
  2306 
       
  2307 /**
       
  2308 Enumeration used in various RestrictPages APIs to specify the type of restrictions to apply.
       
  2309 */
       
  2310 enum TRestrictPagesType
       
  2311 	{
       
  2312 	/**
       
  2313 	Make all mappings of page not accessible.
       
  2314 	Pinned mappings will veto this operation.
       
  2315 	*/
       
  2316 	ERestrictPagesNoAccess			 = 1,
       
  2317 
       
  2318 	/**
       
  2319 	Demand paged memory being made 'old'.
       
  2320 	Specific case of ERestrictPagesNoAccess.
       
  2321 	*/
       
  2322 	ERestrictPagesNoAccessForOldPage = ERestrictPagesNoAccess|0x80000000,
       
  2323 
       
  2324 	/**
       
  2325 	For page moving pinned mappings always veto the moving operation.
       
  2326 	*/
       
  2327 	ERestrictPagesForMovingFlag  = 0x40000000,
       
  2328 
       
  2329 	/**
       
  2330 	Movable memory being made no access whilst its being copied.
       
  2331 	Special case of ERestrictPagesNoAccess where pinned mappings always veto 
       
  2332 	this operation even if they are read-only mappings.
       
  2333 	*/
       
  2334 	ERestrictPagesNoAccessForMoving  = ERestrictPagesNoAccess|ERestrictPagesForMovingFlag,
       
  2335 	};
       
  2336 
       
  2337 #include "xmmu.h"
       
  2338 
       
  2339 #endif