kernel/eka/memmodel/epoc/moving/arm/xmmu.cpp
changeset 0 a41df078684a
child 36 538db54a451d
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\memmodel\epoc\moving\arm\xmmu.cpp
       
    15 // 
       
    16 //
       
    17 
       
    18 #include "arm_mem.h"
       
    19 #include <mmubase.inl>
       
    20 #include <ramcache.h>
       
    21 #include <demand_paging.h>
       
    22 #include "execs.h"
       
    23 #include <defrag.h>
       
    24 #include "cache_maintenance.h"
       
    25 
       
    26 
       
    27 extern void FlushTLBs();
       
    28 
       
    29 #if defined(__CPU_SA1__)
       
    30 const TPde KRomSectionPermissions	=	SECTION_PDE(KArmV45PermRORO, KArmV45MemAttWB, EDomainClient);
       
    31 const TPde KShadowPdePerm			=	PT_PDE(EDomainClient);
       
    32 const TPte KPtPtePerm				=	SP_PTE(KArmV45PermRWNO, KArmV45MemAttBuf);	// page tables not cached
       
    33 const TPte KRomPtePermissions		=	SP_PTE(KArmV45PermRORO, KArmV45MemAttWB);	// ROM is cached, read-only for everyone
       
    34 const TPte KShadowPtePerm			=	SP_PTE(KArmV45PermRWRO, KArmV45MemAttWB);	// shadowed ROM is cached, supervisor writeable
       
    35 
       
    36 #elif defined(__CPU_ARM710T__) || defined(__CPU_ARM720T__)
       
    37 const TPde KRomSectionPermissions	=	SECTION_PDE(KArmV45PermRORO, KArmV45MemAttWB, EDomainClient);
       
    38 const TPde KShadowPdePerm			=	PT_PDE(EDomainClient);
       
    39 const TPte KPtPtePerm				=	SP_PTE(KArmV45PermRWNO, KArmV45MemAttWB);	// page tables cached (write-through)
       
    40 const TPte KRomPtePermissions		=	SP_PTE(KArmV45PermRORO, KArmV45MemAttWB);	// ROM is cached, read-only for everyone
       
    41 const TPte KShadowPtePerm			=	SP_PTE(KArmV45PermRWRO, KArmV45MemAttWB);	// shadowed ROM is cached, supervisor writeable
       
    42 
       
    43 #elif defined(__CPU_ARM920T__) || defined(__CPU_ARM925T__) || defined(__CPU_ARM926J__)
       
    44 const TPde KRomSectionPermissions	=	SECTION_PDE(KArmV45PermRORO, KArmV45MemAttWT, EDomainClient);
       
    45 const TPde KShadowPdePerm			=	PT_PDE(EDomainClient);
       
    46 const TPte KPtPtePerm				=	SP_PTE(KArmV45PermRWNO, KArmV45MemAttWT);	// page tables cached write through
       
    47 const TPte KRomPtePermissions		=	SP_PTE(KArmV45PermRORO, KArmV45MemAttWT);	// ROM is cached, read-only for everyone
       
    48 const TPte KShadowPtePerm			=	SP_PTE(KArmV45PermRWRO, KArmV45MemAttWT);	// shadowed ROM is cached, supervisor writeable
       
    49 
       
    50 #elif defined(__CPU_XSCALE__)
       
    51 	#ifdef __CPU_XSCALE_MANZANO__
       
    52 const TPde KRomSectionPermissions	=	SECTION_PDE(KArmV45PermRORO, KXScaleMemAttWTRA_WBWA, EDomainClient);
       
    53 const TPde KShadowPdePerm			=	PT_PDE(EDomainClient);
       
    54 const TPte KPtPtePerm				=	SP_PTE(KArmV45PermRWNO, KXScaleMemAttWTRA_WBWA);	// page tables write-through cached
       
    55 const TPte KRomPtePermissions		=	SP_PTE(KArmV45PermRORO, KXScaleMemAttWTRA_WBWA);	// ROM is cached, read-only for everyone
       
    56 const TPte KShadowPtePerm			=	SP_PTE(KArmV45PermRWRO, KXScaleMemAttWTRA_WBWA);	// shadowed ROM is cached, supervisor writeable
       
    57 	#else
       
    58 const TPde KRomSectionPermissions	=	SECTION_PDE(KArmV45PermRORO, KXScaleMemAttWTRA, EDomainClient);
       
    59 const TPde KShadowPdePerm			=	PT_PDE(EDomainClient);
       
    60 const TPte KPtPtePerm				=	SP_PTE(KArmV45PermRWNO, KXScaleMemAttWTRA);	// page tables write-through cached
       
    61 const TPte KRomPtePermissions		=	SP_PTE(KArmV45PermRORO, KXScaleMemAttWTRA);	// ROM is cached, read-only for everyone
       
    62 const TPte KShadowPtePerm			=	SP_PTE(KArmV45PermRWRO, KXScaleMemAttWTRA);	// shadowed ROM is cached, supervisor writeable
       
    63 	#endif
       
    64 #endif
       
    65 
       
    66 const TPte KPtInfoPtePerm = KPtPtePerm;
       
    67 const TPde KPtPdePerm = PT_PDE(EDomainClient);
       
    68 
       
    69 // Permissions for each chunk type
       
    70 enum TPTEProperties
       
    71 	{
       
    72 	ESupRo	=	SP_PTE(KArmV45PermRORO, KDefaultCaching),
       
    73 	ESupRw	=	SP_PTE(KArmV45PermRWNO, KDefaultCaching),
       
    74 	EUserRo	=	SP_PTE(KArmV45PermRWRO, KDefaultCaching),
       
    75 	EUserRw	=	SP_PTE(KArmV45PermRWRW, KDefaultCaching)
       
    76 	};
       
    77 
       
    78 LOCAL_D const TPde ChunkPdePermissions[ENumChunkTypes] =
       
    79 	{
       
    80 	PT_PDE(EDomainClient),		// EKernelData
       
    81 	PT_PDE(EDomainClient),		// EKernelStack
       
    82 	PT_PDE(EDomainClient),		// EKernelCode
       
    83 	PT_PDE(EDomainClient),		// EDll
       
    84 	PT_PDE(EDomainClient),		// EUserCode - user/ro & sup/rw everywhere
       
    85 	PT_PDE(EDomainClient),		// ERamDrive - sup/rw accessed by domain change
       
    86 
       
    87 	// user data or self modifying code is sup/rw, user no access at home. It's user/rw & sup/rw when running
       
    88 	// note ARM MMU architecture prevents implementation of user read-only data
       
    89 	PT_PDE(EDomainClient),		// EUserData
       
    90 	PT_PDE(EDomainClient),		// EDllData
       
    91 	PT_PDE(EDomainClient),		// EUserSelfModCode
       
    92 	PT_PDE(EDomainClient),		// ESharedKernelSingle
       
    93 	PT_PDE(EDomainClient),		// ESharedKernelMultiple
       
    94 	PT_PDE(EDomainClient),		// ESharedIo
       
    95 	PT_PDE(EDomainClient),		// ESharedKernelMirror (unused in this memory model)
       
    96 	PT_PDE(EDomainClient),		// EKernelMessage
       
    97 	};
       
    98 
       
    99 const TPde KUserDataRunningPermissions = PT_PDE(EDomainVarUserRun);
       
   100 
       
   101 LOCAL_D const TPte ChunkPtePermissions[ENumChunkTypes] =
       
   102 	{
       
   103 	ESupRw,					// EKernelData
       
   104 	ESupRw,					// EKernelStack
       
   105 	ESupRw,					// EKernelCode
       
   106 	EUserRo,				// EDll
       
   107 	EUserRo,				// EUserCode
       
   108 	ESupRw,					// ERamDrive
       
   109 	ESupRw,					// EUserData
       
   110 	ESupRw,					// EDllData
       
   111 	ESupRw,					// EUserSelfModCode
       
   112 	ESupRw,					// ESharedKernelSingle
       
   113 	ESupRw,					// ESharedKernelMultiple
       
   114 	ESupRw,					// ESharedIo
       
   115 	ESupRw,					// ESharedKernelMirror (unused in this memory model)
       
   116 	ESupRw,					// EKernelMessage
       
   117 	};
       
   118 
       
   119 const TPte KUserCodeLoadPte = (TPte)EUserRo;
       
   120 const TPte KKernelCodeRunPte = (TPte)ESupRw;
       
   121 
       
   122 // Inline functions for simple transformations
       
   123 inline TLinAddr PageTableLinAddr(TInt aId)
       
   124 	{
       
   125 	return KPageTableBase + (aId<<KPageTableShift);
       
   126 	}
       
   127 
       
   128 inline TPte* PageTable(TInt aId)
       
   129 	{
       
   130 	return (TPte*)(KPageTableBase+(aId<<KPageTableShift));
       
   131 	}
       
   132 
       
   133 inline TPde* PageDirectoryEntry(TLinAddr aLinAddr)
       
   134 	{
       
   135 	return PageDirectory + (aLinAddr>>KChunkShift);
       
   136 	}
       
   137 
       
   138 inline TBool IsPageTable(TPde aPde)
       
   139 	{
       
   140 	return ((aPde&KPdeTypeMask)==KArmV45PdePageTable);
       
   141 	}
       
   142 
       
   143 inline TBool IsSectionDescriptor(TPde aPde)
       
   144 	{
       
   145 	return ((aPde&KPdeTypeMask)==KArmV45PdeSection);
       
   146 	}
       
   147 
       
   148 inline TBool IsPresent(TPte aPte)
       
   149 	{
       
   150 	return (aPte&KPtePresentMask);
       
   151 	}
       
   152 
       
   153 inline TPhysAddr PageTablePhysAddr(TPde aPde)
       
   154 	{
       
   155 	return aPde & KPdePageTableAddrMask;
       
   156 	}
       
   157 
       
   158 inline TPhysAddr PhysAddrFromSectionDescriptor(TPde aPde)
       
   159 	{
       
   160 	return aPde & KPdeSectionAddrMask;
       
   161 	}
       
   162 
       
   163 extern void InvalidateTLBForPage(TLinAddr /*aLinAddr*/);
       
   164 
       
   165 void Mmu::SetupInitialPageInfo(SPageInfo* aPageInfo, TLinAddr aChunkAddr, TInt aPdeIndex)
       
   166 	{
       
   167 	__ASSERT_ALWAYS(aChunkAddr==0 || aChunkAddr>=KRamDriveEndAddress, Panic(EBadInitialPageAddr));
       
   168 	TLinAddr addr = aChunkAddr + (aPdeIndex<<KPageShift);
       
   169 	if (aPageInfo->Type()!=SPageInfo::EUnused)
       
   170 		return;	// already set (page table)
       
   171 	if (addr == KPageTableInfoBase)
       
   172 		{
       
   173 		aPageInfo->SetPtInfo(0);
       
   174 		aPageInfo->Lock();
       
   175 		}
       
   176 	else if (addr>=KPageDirectoryBase && addr<(KPageDirectoryBase+KPageDirectorySize))
       
   177 		{
       
   178 		aPageInfo->SetPageDir(0,aPdeIndex);
       
   179 		aPageInfo->Lock();
       
   180 		}
       
   181 	else
       
   182 		aPageInfo->SetFixed();
       
   183 	}
       
   184 
       
   185 void Mmu::SetupInitialPageTableInfo(TInt aId, TLinAddr aChunkAddr, TInt aNumPtes)
       
   186 	{
       
   187 	__ASSERT_ALWAYS(aChunkAddr==0 || aChunkAddr>=KRamDriveEndAddress, Panic(EBadInitialPageAddr));
       
   188 	SPageTableInfo& pti=PtInfo(aId);
       
   189 	pti.iCount=aNumPtes;
       
   190 	pti.SetGlobal(aChunkAddr>>KChunkShift);
       
   191 	}
       
   192 
       
   193 TInt Mmu::GetPageTableId(TLinAddr aAddr)
       
   194 	{
       
   195 	TInt id=-1;
       
   196 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::PageTableId(%08x)",aAddr));
       
   197 	TInt pdeIndex=aAddr>>KChunkShift;
       
   198 	TPde pde = PageDirectory[pdeIndex];
       
   199 	if (IsPageTable(pde))
       
   200 		{
       
   201 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
       
   202 		if (pi)
       
   203 			id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
       
   204 		}
       
   205 	__KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
       
   206 	return id;
       
   207 	}
       
   208 
       
   209 // Used only during boot for recovery of RAM drive
       
   210 TInt ArmMmu::BootPageTableId(TLinAddr aAddr, TPhysAddr& aPtPhys)
       
   211 	{
       
   212 	TInt id=KErrNotFound;
       
   213 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:BootPageTableId(%08x,&)",aAddr));
       
   214 	TInt pdeIndex=aAddr>>KChunkShift;
       
   215 	TPde pde = PageDirectory[pdeIndex];
       
   216 	if (IsPageTable(pde))
       
   217 		{
       
   218 		aPtPhys = pde & KPdePageTableAddrMask;
       
   219 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
       
   220 		if (pi)
       
   221 			{
       
   222 			SPageInfo::TType type = pi->Type();
       
   223 			if (type == SPageInfo::EPageTable)
       
   224 				id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
       
   225 			else if (type == SPageInfo::EUnused)
       
   226 				id = KErrUnknown;
       
   227 			}
       
   228 		}
       
   229 	__KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
       
   230 	return id;
       
   231 	}
       
   232 
       
   233 TBool ArmMmu::PteIsPresent(TPte aPte)
       
   234 	{
       
   235 	return aPte & KPtePresentMask;
       
   236 	}
       
   237 
       
   238 TPhysAddr ArmMmu::PtePhysAddr(TPte aPte, TInt aPteIndex)
       
   239 	{
       
   240 	TUint pte_type = aPte & KPteTypeMask;
       
   241 	if (pte_type == KArmV45PteLargePage)
       
   242 		return (aPte & KPteLargePageAddrMask) + (TPhysAddr(aPteIndex << KPageShift) & KLargePageMask);
       
   243 	else if (pte_type != 0)
       
   244 		return aPte & KPteSmallPageAddrMask;
       
   245 	return KPhysAddrInvalid;
       
   246 	}
       
   247 
       
   248 TPhysAddr ArmMmu::PdePhysAddr(TLinAddr aAddr)
       
   249 	{
       
   250 	TPde pde = PageDirectory[aAddr>>KChunkShift];
       
   251 	if (IsSectionDescriptor(pde))
       
   252 		return PhysAddrFromSectionDescriptor(pde);
       
   253 	return KPhysAddrInvalid;
       
   254 	}
       
   255 
       
   256 TPte* SafePageTableFromPde(TPde aPde)
       
   257 	{
       
   258 	if((aPde&KPdeTypeMask)==KArmV45PdePageTable)
       
   259 		{
       
   260 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde);
       
   261 		if(pi)
       
   262 			{
       
   263 			TInt id = (pi->Offset()<<KPtClusterShift) | ((aPde>>KPageTableShift)&KPtClusterMask);
       
   264 			return PageTable(id);
       
   265 			}
       
   266 		}
       
   267 	return 0;
       
   268 	}
       
   269 
       
   270 TPte* SafePtePtrFromLinAddr(TLinAddr aAddress)
       
   271 	{
       
   272 	TPde pde = PageDirectory[aAddress>>KChunkShift];
       
   273 	TPte* pt = SafePageTableFromPde(pde);
       
   274 	if(pt)
       
   275 		pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
       
   276 	return pt;
       
   277 	}
       
   278 
       
   279 #ifdef __ARMCC__
       
   280 	__forceinline /* RVCT ignores normal inline qualifier :-( */
       
   281 #else
       
   282 	inline
       
   283 #endif
       
   284 TPte* PtePtrFromLinAddr(TLinAddr aAddress)
       
   285 	{
       
   286 	TPde pde = PageDirectory[aAddress>>KChunkShift];
       
   287 	SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
       
   288 	TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
       
   289 	TPte* pt = PageTable(id);
       
   290 	pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
       
   291 	return pt;
       
   292 	}
       
   293 
       
   294 
       
   295 TInt ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aSize, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
       
   296 	{
       
   297 	TPhysAddr physStart = ArmMmu::LinearToPhysical(aLinAddr);
       
   298 	TPhysAddr nextPhys = physStart&~KPageMask;
       
   299 
       
   300 	TUint32* pageList = aPhysicalPageList;
       
   301 
       
   302 	TInt pageIndex = aLinAddr>>KPageShift;
       
   303 	TInt pagesLeft = ((aLinAddr+aSize-1)>>KPageShift)+1 - pageIndex;
       
   304 	TPde* pdePtr = &PageDirectory[aLinAddr>>KChunkShift];
       
   305 
       
   306 	while(pagesLeft)
       
   307 		{
       
   308 		pageIndex &= KChunkMask>>KPageShift;
       
   309 		TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex;
       
   310 		if(pagesLeftInChunk>pagesLeft)
       
   311 			pagesLeftInChunk = pagesLeft;
       
   312 		pagesLeft -= pagesLeftInChunk;
       
   313 
       
   314 		TPhysAddr phys;
       
   315 		TPde pde = *pdePtr++;
       
   316 		TUint pdeType = pde&KPdeTypeMask;
       
   317 		if(pdeType==KArmV45PdeSection)
       
   318 			{
       
   319 			phys = (pde & KPdeSectionAddrMask) + (pageIndex*KPageSize);
       
   320 			__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Section phys=%8x",phys));
       
   321 			TInt n=pagesLeftInChunk;
       
   322 			phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid;
       
   323 			if(pageList)
       
   324 				{
       
   325 				TUint32* pageEnd = pageList+n;
       
   326 				do
       
   327 					{
       
   328 					*pageList++ = phys;
       
   329 					phys+=KPageSize;
       
   330 					}
       
   331 				while(pageList<pageEnd);
       
   332 				}
       
   333 			}
       
   334 		else
       
   335 			{
       
   336 			TPte* pt = SafePageTableFromPde(pde);
       
   337 			if(!pt)
       
   338 				{
       
   339 				__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical missing page table: PDE=%8x",pde));
       
   340 				return KErrNotFound;
       
   341 				}
       
   342 			pt += pageIndex;
       
   343 			for(;;)
       
   344 				{
       
   345 				TPte pte = *pt++;
       
   346 				TUint pte_type = pte & KPteTypeMask;
       
   347 				if (pte_type >= KArmV45PteSmallPage)
       
   348 					{
       
   349 					phys = (pte & KPteSmallPageAddrMask);
       
   350 					__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Small Page phys=%8x",phys));
       
   351 					phys==nextPhys ? nextPhys+=KPageSize : nextPhys=KPhysAddrInvalid;
       
   352 					if(pageList)
       
   353 						*pageList++ = phys;
       
   354 					if(--pagesLeftInChunk)
       
   355 						continue;
       
   356 					break;
       
   357 					}
       
   358 				if (pte_type == KArmV45PteLargePage)
       
   359 					{
       
   360 					--pt; // back up ptr
       
   361 					TUint pageOffset = ((TUint)pt>>2)&(KLargeSmallPageRatio-1);
       
   362 					phys = (pte & KPteLargePageAddrMask) + pageOffset*KPageSize;
       
   363 					__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Large Page phys=%8x",phys));
       
   364 					TInt n=KLargeSmallPageRatio-pageOffset;
       
   365 					if(n>pagesLeftInChunk)
       
   366 						n = pagesLeftInChunk;
       
   367 					phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid;
       
   368 					if(pageList)
       
   369 						{
       
   370 						TUint32* pageEnd = pageList+n;
       
   371 						do
       
   372 							{
       
   373 							*pageList++ = phys;
       
   374 							phys+=KPageSize;
       
   375 							}
       
   376 						while(pageList<pageEnd);
       
   377 						}
       
   378 					pt += n;
       
   379 					if(pagesLeftInChunk-=n)
       
   380 						continue;
       
   381 					break;
       
   382 					}
       
   383 				__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical bad PTE %8x",pte));
       
   384 				return KErrNotFound;
       
   385 				}
       
   386 			}
       
   387 		if(!pageList && nextPhys==KPhysAddrInvalid)
       
   388 			{
       
   389 			__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical not contiguous"));
       
   390 			return KErrNotFound;
       
   391 			}
       
   392 		pageIndex = 0;
       
   393 		}
       
   394 
       
   395 	if(nextPhys==KPhysAddrInvalid)
       
   396 		{
       
   397 		// Memory is discontiguous...
       
   398 		aPhysicalAddress = KPhysAddrInvalid;
       
   399 		return 1;
       
   400 		}
       
   401 	else
       
   402 		{
       
   403 		// Memory is contiguous...
       
   404 		aPhysicalAddress = physStart;
       
   405 		return KErrNone;
       
   406 		}
       
   407 	}
       
   408 
       
   409 TPhysAddr ArmMmu::LinearToPhysical(TLinAddr aLinAddr)
       
   410 	{
       
   411 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical(%08x)",aLinAddr));
       
   412 	TPhysAddr phys = KPhysAddrInvalid;
       
   413 	TPde pde = PageDirectory[aLinAddr>>KChunkShift];
       
   414 	if (IsPageTable(pde))
       
   415 		{
       
   416 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
       
   417 		if (pi)
       
   418 			{
       
   419 			TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
       
   420 			TInt pteIndex = (aLinAddr & KChunkMask)>>KPageShift;
       
   421 			TPte pte = PageTable(id)[pteIndex];
       
   422 			TUint pte_type = pte & KPteTypeMask;
       
   423 			if (pte_type == KArmV45PteLargePage)
       
   424 				{
       
   425 				phys = (pte & KPteLargePageAddrMask) + (aLinAddr & KLargePageMask);
       
   426 				__KTRACE_OPT(KMMU,Kern::Printf("Mapped with 64K page - returning %08x", phys));
       
   427 				}
       
   428 			else if (pte_type != 0)
       
   429 				{
       
   430 				phys = (pte & KPteSmallPageAddrMask) + (aLinAddr & KPageMask);
       
   431 				__KTRACE_OPT(KMMU,Kern::Printf("Mapped with 4K page - returning %08x", phys));
       
   432 				}
       
   433 			}
       
   434 		}
       
   435 	else if (IsSectionDescriptor(pde))
       
   436 		{
       
   437 		phys = (pde & KPdeSectionAddrMask) + (aLinAddr & KChunkMask);
       
   438 		__KTRACE_OPT(KMMU,Kern::Printf("Mapped with section - returning %08x", phys));
       
   439 		}
       
   440 	else
       
   441 		{
       
   442 		__KTRACE_OPT(KMMU,Kern::Printf("Address invalid"));
       
   443 		}
       
   444 	return phys;
       
   445 	}
       
   446 
       
   447 TInt ArmMmu::PreparePagesForDMA(TLinAddr aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
       
   448 //Returns the list of physical pages belonging to the specified memory space.
       
   449 //Checks these pages belong to a chunk marked as being trusted. 
       
   450 //Locks these pages so they can not be moved by e.g. ram defragmenation.
       
   451 	{
       
   452 	SPageInfo* pi = NULL;
       
   453 	DChunk* chunk = NULL;
       
   454 	TInt err = KErrNone;
       
   455 	
       
   456 	__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA %08x+%08x, asid=%d",aLinAddr,aSize));
       
   457 
       
   458 	TUint32* pageList = aPhysicalPageList;
       
   459 	TInt pagesInList = 0;				//The number of pages we put in the list so far
       
   460 	
       
   461 	TInt pageIndex = (aLinAddr & KChunkMask) >> KPageShift;	// Index of the page within the section
       
   462 	TInt pagesLeft = ((aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift;
       
   463 
       
   464 	MmuBase::Wait(); 	// RamAlloc Mutex for accessing page/directory tables.
       
   465 	NKern::LockSystem();// SystemlLock for accessing SPageInfo objects.
       
   466 	
       
   467 	TPde* pdePtr = PageDirectory + (aLinAddr>>KChunkShift);
       
   468 	
       
   469 	while(pagesLeft)
       
   470 		{
       
   471 		TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex;
       
   472 		if(pagesLeftInChunk>pagesLeft)
       
   473 			pagesLeftInChunk = pagesLeft;
       
   474 		
       
   475 		pagesLeft -= pagesLeftInChunk;
       
   476 
       
   477 		TPte* pt = SafePageTableFromPde(*pdePtr++);
       
   478 		if(!pt) { err = KErrNotFound; goto fail; }// Cannot get page table.
       
   479 
       
   480 		pt += pageIndex;
       
   481 
       
   482 		for(;pagesLeftInChunk--;)
       
   483 			{
       
   484 			TPhysAddr phys = (*pt++ & KPteSmallPageAddrMask);
       
   485 			pi =  SPageInfo::SafeFromPhysAddr(phys);
       
   486 			if(!pi)	{ err = KErrNotFound; goto fail; }// Invalid address
       
   487 
       
   488 			__KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: PA:%x T:%x S:%x O:%x C:%x",phys, pi->Type(), pi->State(), pi->Owner(), pi->LockCount()));
       
   489 			if (chunk==NULL)
       
   490 				{//This is the first page. Check 'trusted' bit.
       
   491 				if (pi->Type()!= SPageInfo::EChunk)
       
   492 					{ err = KErrAccessDenied; goto fail; }// The first page do not belong to chunk.	
       
   493 
       
   494 				chunk = (DChunk*)pi->Owner();
       
   495 				if ( (chunk == NULL) || ((chunk->iAttributes & DChunk::ETrustedChunk)== 0) )
       
   496 					{ err = KErrAccessDenied; goto fail; } // Not a trusted chunk
       
   497 				}
       
   498 			pi->Lock();
       
   499 
       
   500 			*pageList++ = phys;
       
   501 			if ( (++pagesInList&127) == 0) //release system lock temporarily on every 512K
       
   502 				NKern::FlashSystem();
       
   503 			}
       
   504 		pageIndex = 0;
       
   505 		}
       
   506 
       
   507 	if (pi->Type()!= SPageInfo::EChunk)
       
   508 		{ err = KErrAccessDenied; goto fail; }// The last page do not belong to chunk.	
       
   509 
       
   510 	if (chunk && (chunk != (DChunk*)pi->Owner()))
       
   511 		{ err = KErrArgument; goto fail; }//The first & the last page do not belong to the same chunk.
       
   512 
       
   513 	NKern::UnlockSystem();
       
   514 	MmuBase::Signal();
       
   515 	return KErrNone;
       
   516 
       
   517 fail:
       
   518 	__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA failed"));
       
   519 	NKern::UnlockSystem();
       
   520 	MmuBase::Signal();
       
   521 	ReleasePagesFromDMA(aPhysicalPageList, pagesInList);
       
   522 	return err;
       
   523 	}
       
   524 
       
   525 TInt ArmMmu::ReleasePagesFromDMA(TPhysAddr* aPhysicalPageList, TInt aPageCount)
       
   526 // Unlocks physical pages.
       
   527 // @param aPhysicalPageList - points to the list of physical pages that should be released.
       
   528 // @param aPageCount		- the number of physical pages in the list.
       
   529 	{
       
   530 	NKern::LockSystem();
       
   531 	__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::ReleasePagesFromDMA count:%d",aPageCount));
       
   532 
       
   533 	while (aPageCount--)
       
   534 		{
       
   535 		SPageInfo* pi =  SPageInfo::SafeFromPhysAddr(*aPhysicalPageList++);
       
   536 		if(!pi)
       
   537 			{
       
   538 			NKern::UnlockSystem();
       
   539 			return KErrArgument;
       
   540 			}
       
   541 		__KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: T:%x S:%x O:%x C:%x",pi->Type(), pi->State(), pi->Owner(), pi->LockCount()));
       
   542 		pi->Unlock();
       
   543 		}
       
   544 	NKern::UnlockSystem();
       
   545 	return KErrNone;
       
   546 	}
       
   547 
       
   548 
       
   549 void ArmMmu::Init1()
       
   550 	{
       
   551 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::Init1"));
       
   552 
       
   553 	// MmuBase data
       
   554 	iPageSize=KPageSize;
       
   555 	iPageMask=KPageMask;
       
   556 	iPageShift=KPageShift;
       
   557 	iChunkSize=KChunkSize;
       
   558 	iChunkMask=KChunkMask;
       
   559 	iChunkShift=KChunkShift;
       
   560 	iPageTableSize=KPageTableSize;
       
   561 	iPageTableMask=KPageTableMask;
       
   562 	iPageTableShift=KPageTableShift;
       
   563 	iPtClusterSize=KPtClusterSize;
       
   564 	iPtClusterMask=KPtClusterMask;
       
   565 	iPtClusterShift=KPtClusterShift;
       
   566 	iPtBlockSize=KPtBlockSize;
       
   567 	iPtBlockMask=KPtBlockMask;
       
   568 	iPtBlockShift=KPtBlockShift;
       
   569 	iPtGroupSize=KChunkSize/KPageTableSize;
       
   570 	iPtGroupMask=iPtGroupSize-1;
       
   571 	iPtGroupShift=iChunkShift-iPageTableShift;
       
   572 	//TInt* iPtBlockCount;		// dynamically allocated - Init2
       
   573 	//TInt* iPtGroupCount;		// dynamically allocated - Init2
       
   574 	iPtInfo=(SPageTableInfo*)KPageTableInfoBase;
       
   575 	iPageTableLinBase=KPageTableBase;
       
   576 	//iRamPageAllocator;		// dynamically allocated - Init2
       
   577 	//iAsyncFreeList;			// dynamically allocated - Init2
       
   578 	//iPageTableAllocator;		// dynamically allocated - Init2
       
   579 	//iPageTableLinearAllocator;// dynamically allocated - Init2
       
   580 	iPtInfoPtePerm=KPtInfoPtePerm;
       
   581 	iPtPtePerm=KPtPtePerm;
       
   582 	iPtPdePerm=KPtPdePerm;
       
   583 	iTempAddr=KTempAddr;
       
   584 	iSecondTempAddr=KSecondTempAddr;
       
   585 	iMapSizes=KPageSize|KLargePageSize|KChunkSize;
       
   586 	iRomLinearBase = ::RomHeaderAddress;
       
   587 	iRomLinearEnd = KRomLinearEnd;
       
   588 	iShadowPtePerm = KShadowPtePerm;
       
   589 	iShadowPdePerm = KShadowPdePerm;
       
   590 
       
   591 	// Mmu data
       
   592 	TInt total_ram=TheSuperPage().iTotalRamSize;
       
   593 
       
   594 #if defined(__HAS_EXTERNAL_CACHE__) 
       
   595 	//L2 cache on ARMv5 is always in write-back mode => must be always purged
       
   596 	iDecommitThreshold = CacheMaintenance::SyncAllPerformanceThresholdPages();
       
   597 #else
       
   598 	iDecommitThreshold = 0; ///no cache consistency issues on decommit
       
   599 #endif
       
   600 
       
   601 	iDataSectionBase = KDataSectionBase;
       
   602 	iDataSectionEnd = KDataSectionEnd;
       
   603 	iMaxDllDataSize=Min(total_ram/2, 0x08000000);					// phys RAM/2 up to 128Mb
       
   604 	iMaxDllDataSize=(iMaxDllDataSize+iChunkMask)&~iChunkMask;		// round up to chunk size
       
   605 	iMaxUserCodeSize=Min(total_ram, 0x10000000);					// phys RAM up to 256Mb
       
   606 	iMaxUserCodeSize=(iMaxUserCodeSize+iChunkMask)&~iChunkMask;		// round up to chunk size
       
   607 	iMaxKernelCodeSize=Min(total_ram/2, 0x04000000);				// phys RAM/2 up to 64Mb
       
   608 	iMaxKernelCodeSize=(iMaxKernelCodeSize+iChunkMask)&~iChunkMask;	// round up to chunk size
       
   609 	iPdeBase=KPageDirectoryBase;
       
   610 	iUserCodeLoadPtePerm=KUserCodeLoadPte;
       
   611 	iKernelCodePtePerm=KKernelCodeRunPte;
       
   612 	iDllDataBase = KDataSectionEnd - iMaxDllDataSize;
       
   613 	iUserCodeBase = KPageInfoLinearBase - iMaxUserCodeSize;
       
   614 	iKernelCodeBase = iUserCodeBase - iMaxKernelCodeSize;
       
   615 
       
   616 	__KTRACE_OPT(KMMU,Kern::Printf("DDS %08x UCS %08x KCS %08x", iMaxDllDataSize, iMaxUserCodeSize, iMaxKernelCodeSize));
       
   617 	__KTRACE_OPT(KMMU,Kern::Printf("DDB %08x KCB %08x UCB %08x RLB %08x", iDllDataBase, iKernelCodeBase, iUserCodeBase, iRomLinearBase));
       
   618 
       
   619 	// ArmMmu data
       
   620 
       
   621 	// other
       
   622 	PP::MaxUserThreadStack=0x14000;			// 80K - STDLIB asks for 64K for PosixServer!!!!
       
   623 	PP::UserThreadStackGuard=0x2000;		// 8K
       
   624 	PP::MaxStackSpacePerProcess=0x200000;	// 2Mb
       
   625 	K::SupervisorThreadStackSize=0x1000;	// 4K
       
   626 	PP::SupervisorThreadStackGuard=0x1000;	// 4K
       
   627 	K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr;
       
   628 	PP::RamDriveStartAddress=KRamDriveStartAddress;
       
   629 	PP::RamDriveRange=KRamDriveMaxSize;
       
   630 	PP::RamDriveMaxSize=KRamDriveMaxSize;	// may be reduced later
       
   631 
       
   632 	__KTRACE_OPT(KBOOT,Kern::Printf("K::MaxMemCopyInOneGo=0x%x",K::MaxMemCopyInOneGo));
       
   633 	K::MemModelAttributes=EMemModelTypeMoving|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt|
       
   634 						EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSupportFixed|
       
   635 						EMemModelAttrSvKernProt|EMemModelAttrIPCKernProt;
       
   636 
       
   637 	Arm::DefaultDomainAccess=KDefaultDomainAccess;
       
   638 
       
   639 	// Domains 0-3 are preallocated
       
   640 	// 0=Variable user running, 1=Client, 2=Page tables, 3=RAM drive
       
   641 	Domains=(~(0xffffffffu<<ENumDomains))&0xfffffff0u;
       
   642 
       
   643 	iMaxPageTables = 1<<(32-KChunkShift);		// possibly reduced when RAM size known
       
   644 
       
   645 	Mmu::Init1();
       
   646 	}
       
   647 
       
   648 void ArmMmu::DoInit2()
       
   649 	{
       
   650 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::DoInit2"));
       
   651 	iTempPte=PageTable(GetPageTableId(iTempAddr))+((iTempAddr&KChunkMask)>>KPageShift);
       
   652 	iSecondTempPte=PageTable(GetPageTableId(iSecondTempAddr))+((iSecondTempAddr&KChunkMask)>>KPageShift);
       
   653 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iTempAddr=%08x, iTempPte=%08x, iSecondTempAddr=%08x, iSecondTempPte=%08x", iTempAddr, iTempPte, iSecondTempAddr, iSecondTempPte));
       
   654 	CreateKernelSection(iKernelCodeBase, KPageShift);
       
   655 	iHomePdeMap=(TUint32*)Kern::AllocZ(-KSuperPageLinAddr>>KChunkShift<<2);
       
   656 	iHomePdeMap=(TUint32*)((TUint32)iHomePdeMap-(KSuperPageLinAddr>>KChunkShift<<2)); //adjust the pointer so it's indexed by address>>20
       
   657 #if defined(__CPU_WRITE_BACK_CACHE)
       
   658 #if defined(__CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH)
       
   659 	if (InternalCache::Info[KCacheInfoD].iLineLength == 32)
       
   660 		iCopyPageFn = &::CopyPageForRemap32;
       
   661 	else if (InternalCache::Info[KCacheInfoD].iLineLength == 16)
       
   662 		iCopyPageFn = &::CopyPageForRemap16;
       
   663 	else
       
   664 		Panic(ENoCopyPageFunction);		
       
   665 #else
       
   666 #error Write-back cache without single entry dcache flush is not supported
       
   667 #endif
       
   668 #else // !__CPU_HAS_WRITE_BACK_CACHE
       
   669 	iCopyPageFn = &::CopyPageForRemapWT;
       
   670 #endif
       
   671 	Mmu::DoInit2();
       
   672 	}
       
   673 
       
   674 #ifndef __MMU_MACHINE_CODED__
       
   675 void ArmMmu::MapRamPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, const TPhysAddr* aPageList, TInt aNumPages, TPte aPtePerm)
       
   676 //
       
   677 // Map a list of physical RAM pages into a specified page table with specified PTE permissions.
       
   678 // Update the page information array.
       
   679 // Call this with the system locked.
       
   680 //
       
   681 	{
       
   682 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapRamPages() id=%d type=%d ptr=%08x off=%08x n=%d perm=%08x",
       
   683 			aId, aType, aPtr, aOffset, aNumPages, aPtePerm));
       
   684 
       
   685 	SPageTableInfo& ptinfo=iPtInfo[aId];
       
   686 	ptinfo.iCount+=aNumPages;
       
   687 	aOffset>>=KPageShift;
       
   688 	TInt ptOffset=aOffset & KPagesInPDEMask;				// entry number in page table
       
   689 	TPte* pPte=PageTable(aId)+ptOffset;						// address of first PTE
       
   690 	while(aNumPages--)
       
   691 		{
       
   692 		TPhysAddr pa = *aPageList++;
       
   693 		*pPte++ =  pa | aPtePerm;					// insert PTE
       
   694 		__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
       
   695 		if (aType!=SPageInfo::EInvalid)
       
   696 			{
       
   697 			SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
       
   698 			if(pi)
       
   699 				{
       
   700 				pi->Set(aType,aPtr,aOffset);
       
   701 				__KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
       
   702 				++aOffset;	// increment offset for next page
       
   703 				}
       
   704 			}
       
   705 		}
       
   706 	__DRAIN_WRITE_BUFFER;
       
   707 	}
       
   708 
       
   709 void ArmMmu::MapPhysicalPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, TPhysAddr aPhysAddr, TInt aNumPages, TPte aPtePerm)
       
   710 //
       
   711 // Map consecutive physical pages into a specified page table with specified PTE permissions.
       
   712 // Update the page information array if RAM pages are being mapped.
       
   713 // Call this with the system locked.
       
   714 //
       
   715 	{
       
   716 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapPhysicalPages() id=%d type=%d ptr=%08x off=%08x phys=%08x n=%d perm=%08x",
       
   717 			aId, aType, aPtr, aOffset, aPhysAddr, aNumPages, aPtePerm));
       
   718 	SPageTableInfo& ptinfo=iPtInfo[aId];
       
   719 	ptinfo.iCount+=aNumPages;
       
   720 	aOffset>>=KPageShift;
       
   721 	TInt ptOffset=aOffset & KPagesInPDEMask;				// entry number in page table
       
   722 	TPte* pPte=PageTable(aId)+ptOffset;						// address of first PTE
       
   723 	SPageInfo* pi;
       
   724 	if(aType==SPageInfo::EInvalid)
       
   725 		pi = NULL;
       
   726 	else
       
   727 		pi = SPageInfo::SafeFromPhysAddr(aPhysAddr);
       
   728 	while(aNumPages--)
       
   729 		{
       
   730 		*pPte++ = aPhysAddr|aPtePerm;						// insert PTE
       
   731 		aPhysAddr+=KPageSize;
       
   732 		__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
       
   733 		if (pi)
       
   734 			{
       
   735 			pi->Set(aType,aPtr,aOffset);
       
   736 			__KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
       
   737 			++aOffset;	// increment offset for next page
       
   738 			++pi;
       
   739 			}
       
   740 		}
       
   741 	__DRAIN_WRITE_BUFFER;
       
   742 	}
       
   743 
       
   744 void ArmMmu::MapVirtual(TInt aId, TInt aNumPages)
       
   745 //
       
   746 // Called in place of MapRamPages or MapPhysicalPages to update mmu data structures when committing
       
   747 // virtual address space to a chunk.  No pages are mapped.
       
   748 // Call this with the system locked.
       
   749 //
       
   750 	{
       
   751 	SPageTableInfo& ptinfo=iPtInfo[aId];
       
   752 	ptinfo.iCount+=aNumPages;
       
   753 	}
       
   754 
       
   755 void ArmMmu::RemapPage(TInt aId, TUint32 aAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm, DProcess* /*aProcess*/)
       
   756 //
       
   757 // Replace the mapping at address aAddr in page table aId.
       
   758 // Update the page information array for both the old and new pages.
       
   759 // Call this with the system locked.
       
   760 //
       
   761 	{
       
   762 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPages() id=%d addr=%08x old=%08x new=%08x perm=%08x", aId, aAddr, aOldAddr, aNewAddr, aPtePerm));
       
   763 
       
   764 	TInt ptOffset=(aAddr&KChunkMask)>>KPageShift;			// entry number in page table
       
   765 	TPte* pPte=PageTable(aId)+ptOffset;						// address of PTE
       
   766 	TPte pte=*pPte;
       
   767 
       
   768 	TUint pageType = (pte & KPteTypeMask);
       
   769 	if (pageType == KArmPteSmallPage || pageType == 0)
       
   770 		{
       
   771 		__ASSERT_ALWAYS((pte & KPteSmallPageAddrMask) == aOldAddr || pte==KPteNotPresentEntry, Panic(ERemapPageFailed));
       
   772 		SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOldAddr);
       
   773 		__ASSERT_DEBUG(oldpi->LockCount()==0,Panic(ERemapPageFailed));
       
   774 
       
   775 		// remap page
       
   776 		*pPte = aNewAddr | aPtePerm;					// overwrite PTE
       
   777 		__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",*pPte,pPte));
       
   778 		__DRAIN_WRITE_BUFFER;
       
   779 		InvalidateTLBForPage(aAddr);		// flush any corresponding TLB entry
       
   780 
       
   781 		// update new pageinfo, clear old
       
   782 		SPageInfo* pi = SPageInfo::FromPhysAddr(aNewAddr);
       
   783 		pi->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset());
       
   784 		oldpi->SetUnused();
       
   785 		}
       
   786 	else
       
   787 		{
       
   788 		__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPages() called on a non-4K page!"));
       
   789 		Panic(ERemapPageFailed);
       
   790 		}
       
   791 	}
       
   792 
       
   793 void ArmMmu::RemapKernelPage(TInt aId, TLinAddr aSrc, TLinAddr aDest, TPhysAddr aNewPhys, TPte aPtePerm)
       
   794 //
       
   795 // Replace the mapping at address aAddr in page table aId.
       
   796 // Called with the system locked.
       
   797 // MUST NOT INVOKE ANY TRACING - or do anything else that might touch the kernel heap
       
   798 // We are depending on this not reintroducing any of the cache lines we previously
       
   799 // invalidated.
       
   800 //
       
   801 	{
       
   802 	TInt ptOffset=(aSrc&KChunkMask)>>KPageShift;			// entry number in page table
       
   803 	TPte* pPte=PageTable(aId)+ptOffset;						// address of PTE
       
   804 
       
   805 	TInt irq = NKern::DisableAllInterrupts();
       
   806 	CopyPageForRemap(aDest, aSrc);
       
   807 	*pPte = aNewPhys | aPtePerm;					// overwrite PTE
       
   808 	__DRAIN_WRITE_BUFFER;
       
   809 	InvalidateTLBForPage(aSrc);		// flush any corresponding TLB entry
       
   810 	NKern::RestoreInterrupts(irq);
       
   811 	}
       
   812 
       
   813 TInt ArmMmu::UnmapPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess*)
       
   814 //
       
   815 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
       
   816 // pages into aPageList, and count of unmapped pages into aNumPtes.
       
   817 // Return number of pages still mapped using this page table.
       
   818 // Call this with the system locked.
       
   819 // @param aId 			Identifies Page Table to unmap PTEs(Page Table Entries) from.
       
   820 // @param aAddr Base 	Base Virtual Address of the region to unmap. It (indirectly) specifies the first PTE in this Page Table to unmap.
       
   821 // @param aNumPages 	The number of consecutive PTEs to unmap.
       
   822 // @param aPageList 	Points to pre-allocated array. On return, it is filled in with the list of physical addresses of the unmapped 4K
       
   823 //						memory blocks.
       
   824 // @param aSetPagesFree	If true, pages a placed in the free state and only mapped pages are added
       
   825 //						to aPageList.
       
   826 // @param aNumPtes		On return, indicates how many PTEs are unmapped.
       
   827 // @param aNumFree		On return, holds the number are freed 4K memory blocks. Not updated if aSetPagesFree is false.
       
   828 // @return 				The number of PTEs still mapped in this Page Table (aId).
       
   829 	{
       
   830 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapPages() id=%d addr=%08x n=%d pl=%08x set-free=%d",aId,aAddr,aNumPages,aPageList,aSetPagesFree));
       
   831 	TInt ptOffset=(aAddr&KChunkMask)>>KPageShift;			// entry number in page table
       
   832 	TPte* pPte=PageTable(aId)+ptOffset;						// address of first PTE
       
   833 	TInt np=0;
       
   834 	TInt nf=0;
       
   835 	while(aNumPages--)
       
   836 		{
       
   837 		TPte pte=*pPte;							// get original PTE
       
   838 		*pPte++=0;								// clear PTE
       
   839 		TUint pageType = (pte & KPteTypeMask);
       
   840 		if (pageType == KArmPteSmallPage)
       
   841 			InvalidateTLBForPage(aAddr);		// flush any corresponding TLB entry
       
   842 		if (pageType == KArmPteSmallPage || (pageType == 0 && pte != KPteNotPresentEntry))
       
   843 			{
       
   844 			++np;								// count unmapped pages
       
   845 			TPhysAddr pa=pte & KPteSmallPageAddrMask;	// physical address of unmapped page
       
   846 			if (aSetPagesFree)
       
   847 				{
       
   848 				SPageInfo* pi = SPageInfo::FromPhysAddr(pa);
       
   849 				__NK_ASSERT_DEBUG(pageType == KArmPteSmallPage ||
       
   850 								  (pi->Type()==SPageInfo::EPagedCode && pi->State()==SPageInfo::EStatePagedOld));
       
   851 				if(iRamCache->PageUnmapped(pi))
       
   852 					{
       
   853 					pi->SetUnused();					// mark page as unused
       
   854 					if (pi->LockCount()==0)
       
   855 						{
       
   856 						*aPageList++=pa;			// store in page list
       
   857 						++nf;						// count free pages
       
   858 						}
       
   859 					}
       
   860 				}
       
   861 			else
       
   862 				*aPageList++=pa;				// store in page list
       
   863 			}
       
   864 		aAddr+=KPageSize;
       
   865 		}
       
   866 	aNumPtes=np;
       
   867 	aNumFree=nf;
       
   868 	SPageTableInfo& ptinfo=iPtInfo[aId];
       
   869 	TInt r=(ptinfo.iCount-=np);
       
   870 	__DRAIN_WRITE_BUFFER;
       
   871 	__KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r));
       
   872 	return r;								// return number of pages remaining in this page table
       
   873 	}
       
   874 #endif
       
   875 
       
   876 TInt ArmMmu::UnmapVirtual(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
       
   877 //
       
   878 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
       
   879 // pages into aPageList, and count of unmapped pages into aNumPtes.
       
   880 // Adjust the page table reference count as if aNumPages pages were unmapped.
       
   881 // Return number of pages still mapped using this page table.
       
   882 // Call this with the system locked.
       
   883 //
       
   884 	{
       
   885 	SPageTableInfo& ptinfo=iPtInfo[aId];
       
   886 	TInt newCount = ptinfo.iCount - aNumPages;
       
   887 	UnmapPages(aId, aAddr, aNumPages, aPageList, aSetPagesFree, aNumPtes, aNumFree, aProcess);
       
   888 	ptinfo.iCount = newCount;
       
   889 	aNumPtes = aNumPages;
       
   890 	return newCount;
       
   891 	}
       
   892    
       
   893 
       
   894 #ifndef __MMU_MACHINE_CODED__
       
   895 void ArmMmu::DoAssignPageTable(TInt aId, TLinAddr aAddr, TPde aPdePerm)
       
   896 //
       
   897 // Assign an allocated page table to map a given linear address with specified permissions.
       
   898 // This should be called with the system locked and the MMU mutex held.
       
   899 //
       
   900 	{
       
   901 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoAssignPageTable %d to %08x perm %08x",aId,aAddr,aPdePerm));
       
   902 	TLinAddr ptLin=PageTableLinAddr(aId);
       
   903 	TPhysAddr ptPhys=LinearToPhysical(ptLin);
       
   904 	TInt pdeIndex=TInt(aAddr>>KChunkShift);
       
   905 	PageDirectory[pdeIndex]=ptPhys|aPdePerm;
       
   906 	__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", ptPhys|aPdePerm, PageDirectory+pdeIndex));
       
   907 	__DRAIN_WRITE_BUFFER;
       
   908 	}
       
   909 
       
   910 void ArmMmu::RemapPageTable(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr)
       
   911 //
       
   912 // Replace a page table mapping the specified linear address.
       
   913 // This should be called with the system locked and the MMU mutex held.
       
   914 //
       
   915 	{
       
   916 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTable %08x to %08x at %08x",aOld,aNew,aAddr));
       
   917 	TInt pdeIndex=TInt(aAddr>>KChunkShift);
       
   918 	TPde pde=PageDirectory[pdeIndex];
       
   919 	__ASSERT_ALWAYS((pde & KPdePageTableAddrMask) == aOld, Panic(ERemapPageTableFailed));
       
   920 	TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
       
   921 	PageDirectory[pdeIndex]=newPde;
       
   922 	__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newPde, PageDirectory+pdeIndex));
       
   923 	__DRAIN_WRITE_BUFFER;
       
   924 	}
       
   925 
       
   926 void ArmMmu::DoUnassignPageTable(TLinAddr aAddr)
       
   927 //
       
   928 // Unassign a now-empty page table currently mapping the specified linear address.
       
   929 // We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped.
       
   930 // This should be called with the system locked and the MMU mutex held.
       
   931 //
       
   932 	{
       
   933 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoUnassignPageTable at %08x",aAddr));
       
   934 	TInt pdeIndex=TInt(aAddr>>KChunkShift);
       
   935 	PageDirectory[pdeIndex]=0;
       
   936 	__KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x", PageDirectory+pdeIndex));
       
   937 	__DRAIN_WRITE_BUFFER;
       
   938 	}
       
   939 #endif
       
   940 
       
   941 // Initialise page table at physical address aXptPhys to be used as page table aXptId
       
   942 // to expand the virtual address range used for mapping page tables. Map the page table
       
   943 // at aPhysAddr as page table aId using the expanded range.
       
   944 // Assign aXptPhys to kernel's Page Directory.
       
   945 // Called with system unlocked and MMU mutex held.
       
   946 void ArmMmu::BootstrapPageTable(TInt aXptId, TPhysAddr aXptPhys, TInt aId, TPhysAddr aPhysAddr)
       
   947 	{
       
   948 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::BootstrapPageTable xptid=%04x, xptphys=%08x, id=%04x, phys=%08x",
       
   949 						aXptId, aXptPhys, aId, aPhysAddr));
       
   950 	
       
   951 	// put in a temporary mapping for aXptPhys
       
   952 	// make it noncacheable
       
   953 	TPhysAddr pa=aXptPhys&~KPageMask;
       
   954 	*iTempPte = pa | SP_PTE(KArmV45PermRWNO, KMemAttNC);
       
   955 	__DRAIN_WRITE_BUFFER;
       
   956 
       
   957 	// clear XPT
       
   958 	TPte* xpt=(TPte*)(iTempAddr+(aXptPhys&KPageMask));
       
   959 	memclr(xpt, KPageTableSize);
       
   960 
       
   961 	// must in fact have aXptPhys and aPhysAddr in same physical page
       
   962 	__ASSERT_ALWAYS( TUint32(aXptPhys^aPhysAddr)<TUint32(KPageSize), MM::Panic(MM::EBootstrapPageTableBadAddr));
       
   963 
       
   964 	// so only need one mapping
       
   965 	xpt[(aXptId>>KPtClusterShift)&KPagesInPDEMask] = pa | KPtPtePerm;
       
   966 
       
   967 	// remove temporary mapping
       
   968 	*iTempPte=0;
       
   969 	__DRAIN_WRITE_BUFFER;
       
   970 	InvalidateTLBForPage(iTempAddr);
       
   971 
       
   972 	// initialise PtInfo...
       
   973 	TLinAddr xptAddr = PageTableLinAddr(aXptId);
       
   974 	iPtInfo[aXptId].SetGlobal(xptAddr>>KChunkShift);
       
   975 
       
   976 	// map xpt...
       
   977 	TInt pdeIndex=TInt(xptAddr>>KChunkShift);
       
   978 	NKern::LockSystem();
       
   979 	PageDirectory[pdeIndex]=aXptPhys|KPtPdePerm;
       
   980 	__DRAIN_WRITE_BUFFER;
       
   981 	NKern::UnlockSystem();				
       
   982 	}
       
   983 
       
   984 // Edit the self-mapping entry in page table aId, mapped at aTempMap, to
       
   985 // change the physical address from aOld to aNew. Used when moving page
       
   986 // tables which were created by BootstrapPageTable.
       
   987 // Called with system locked and MMU mutex held.
       
   988 void ArmMmu::FixupXPageTable(TInt aId, TLinAddr aTempMap, TPhysAddr aOld, TPhysAddr aNew)
       
   989 	{
       
   990 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::FixupXPageTable id=%04x, tempmap=%08x, old=%08x, new=%08x",
       
   991 						aId, aTempMap, aOld, aNew));
       
   992 	
       
   993 	// find correct page table inside the page
       
   994 	TPte* xpt=(TPte*)(aTempMap + ((aId & KPtClusterMask) << KPageTableShift));
       
   995 	// find the pte in that page table
       
   996 	xpt += (aId>>KPtClusterShift)&KPagesInPDEMask;
       
   997 
       
   998 	// switch the mapping
       
   999 	__ASSERT_ALWAYS((*xpt&~KPageMask)==aOld, Panic(EFixupXPTFailed));
       
  1000 	*xpt = aNew | KPtPtePerm;
       
  1001 
       
  1002 	// invalidate the TLB entry for the self-mapping page table
       
  1003 	// the PDE has not yet been changed, but since we hold the
       
  1004 	// system lock, nothing should bring this back into the TLB.
       
  1005 	InvalidateTLBForPage(PageTableLinAddr(aId));
       
  1006 	}
       
  1007 
       
  1008 // Set up a page table (specified by aId) to map a 1Mb section of ROM containing aRomAddr
       
  1009 // using ROM at aOrigPhys.
       
  1010 void ArmMmu::InitShadowPageTable(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
       
  1011 	{
       
  1012 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPageTable id=%04x aRomAddr=%08x aOrigPhys=%08x",
       
  1013 		aId, aRomAddr, aOrigPhys));
       
  1014 	TPte* ppte = PageTable(aId);
       
  1015 	TPte* ppte_End = ppte + KChunkSize/KPageSize;
       
  1016 	TPhysAddr phys = aOrigPhys - (aRomAddr & KChunkMask);
       
  1017 	for (; ppte<ppte_End; ++ppte, phys+=KPageSize)
       
  1018 		*ppte = phys | KRomPtePermissions;
       
  1019 	__DRAIN_WRITE_BUFFER;
       
  1020 	}
       
  1021 
       
  1022 // Copy the contents of ROM at aRomAddr to a shadow page at physical address aShadowPhys
       
  1023 void ArmMmu::InitShadowPage(TPhysAddr aShadowPhys, TLinAddr aRomAddr)
       
  1024 	{
       
  1025 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPage aShadowPhys=%08x aRomAddr=%08x",
       
  1026 		aShadowPhys, aRomAddr));
       
  1027 
       
  1028 	// put in a temporary mapping for aShadowPhys
       
  1029 	// make it noncacheable
       
  1030 	*iTempPte = aShadowPhys | SP_PTE(KArmV45PermRWNO, KMemAttNC);
       
  1031 	__DRAIN_WRITE_BUFFER;
       
  1032 
       
  1033 	// copy contents of ROM
       
  1034 	wordmove( (TAny*)iTempAddr, (const TAny*)aRomAddr, KPageSize );
       
  1035 	__DRAIN_WRITE_BUFFER;	// make sure contents are written to memory
       
  1036 
       
  1037 	// remove temporary mapping
       
  1038 	*iTempPte=0;
       
  1039 	__DRAIN_WRITE_BUFFER;
       
  1040 	InvalidateTLBForPage(iTempAddr);
       
  1041 	}
       
  1042 
       
  1043 // Assign a shadow page table to replace a ROM section mapping
       
  1044 // Enter and return with system locked
       
  1045 void ArmMmu::AssignShadowPageTable(TInt aId, TLinAddr aRomAddr)
       
  1046 	{
       
  1047 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:AssignShadowPageTable aId=%04x aRomAddr=%08x",
       
  1048 		aId, aRomAddr));
       
  1049 	TLinAddr ptLin=PageTableLinAddr(aId);
       
  1050 	TPhysAddr ptPhys=LinearToPhysical(ptLin);
       
  1051 	TPde* ppde = PageDirectory + (aRomAddr>>KChunkShift);
       
  1052 	TPde newpde = ptPhys | KShadowPdePerm;
       
  1053 	__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
       
  1054 	TInt irq=NKern::DisableAllInterrupts();
       
  1055 	*ppde = newpde;		// map in the page table
       
  1056 	__DRAIN_WRITE_BUFFER;	// make sure new PDE written to main memory
       
  1057 	FlushTLBs();	// flush both TLBs (no need to flush cache yet)
       
  1058 	NKern::RestoreInterrupts(irq);
       
  1059 	}
       
  1060 
       
  1061 void ArmMmu::DoUnmapShadowPage(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
       
  1062 	{
       
  1063 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:DoUnmapShadowPage, id=%04x lin=%08x origphys=%08x", aId, aRomAddr, aOrigPhys));
       
  1064 	TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
       
  1065 	TPte newpte = aOrigPhys | KRomPtePermissions;
       
  1066 	__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
       
  1067 	TInt irq=NKern::DisableAllInterrupts();
       
  1068 	*ppte = newpte;
       
  1069 	__DRAIN_WRITE_BUFFER;
       
  1070 	InvalidateTLBForPage(aRomAddr);
       
  1071 	SyncCodeMappings();
       
  1072 	CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap);
       
  1073 	CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid);
       
  1074 	NKern::RestoreInterrupts(irq);
       
  1075 	}
       
  1076 
       
  1077 TInt ArmMmu::UnassignShadowPageTable(TLinAddr aRomAddr, TPhysAddr aOrigPhys)
       
  1078 	{
       
  1079 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:UnassignShadowPageTable, lin=%08x origphys=%08x", aRomAddr, aOrigPhys));
       
  1080 	TPde* ppde = PageDirectory + (aRomAddr>>KChunkShift);
       
  1081 	TPde newpde = (aOrigPhys &~ KChunkMask) | KRomSectionPermissions;
       
  1082 	__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
       
  1083 	TInt irq=NKern::DisableAllInterrupts();
       
  1084 	*ppde = newpde;			// revert to section mapping
       
  1085 	__DRAIN_WRITE_BUFFER;	// make sure new PDE written to main memory
       
  1086 	FlushTLBs();			// flush both TLBs
       
  1087 	NKern::RestoreInterrupts(irq);
       
  1088 	return KErrNone;
       
  1089 	}
       
  1090 
       
  1091 void ArmMmu::DoFreezeShadowPage(TInt aId, TLinAddr aRomAddr)
       
  1092 	{
       
  1093 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage aId=%04x aRomAddr=%08x",
       
  1094 		aId, aRomAddr));
       
  1095 	TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
       
  1096 	TPte newpte = (*ppte & KPteSmallPageAddrMask) | KRomPtePermissions;
       
  1097 	__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
       
  1098 	*ppte = newpte;
       
  1099 	__DRAIN_WRITE_BUFFER;
       
  1100 	InvalidateTLBForPage(aRomAddr);
       
  1101 	}
       
  1102 
       
  1103 void ArmMmu::Pagify(TInt aId, TLinAddr aLinAddr)
       
  1104 	{
       
  1105 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:Pagify aId=%04x aLinAddr=%08x", aId, aLinAddr));
       
  1106 	
       
  1107 	TInt pteIndex = (aLinAddr & KChunkMask)>>KPageShift;
       
  1108 	TPte* pte = PageTable(aId);
       
  1109 	if ((pte[pteIndex] & KPteTypeMask) == KArmV45PteLargePage)
       
  1110 		{
       
  1111 		__KTRACE_OPT(KMMU,Kern::Printf("Converting 64K page to 4K pages"));
       
  1112 		pteIndex &= ~0xf;
       
  1113 		TPte source = pte[pteIndex];
       
  1114 		source = (source & KPteLargePageAddrMask) | SP_PTE_FROM_LP_PTE(source);
       
  1115 		pte += pteIndex;
       
  1116 		for (TInt entry=0; entry<16; entry++)
       
  1117 			{
       
  1118 			pte[entry] = source | (entry<<12);
       
  1119 			}
       
  1120 		FlushTLBs();
       
  1121 		}
       
  1122 	}
       
  1123 
       
  1124 void ArmMmu::FlushShadow(TLinAddr aRomAddr)
       
  1125 	{
       
  1126 	CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap);
       
  1127 	CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid);
       
  1128 	InvalidateTLBForPage(aRomAddr);		// remove all TLB references to original ROM page
       
  1129 	SyncCodeMappings();
       
  1130 	}
       
  1131 
       
  1132 
       
  1133 inline void ZeroPdes(TLinAddr aBase, TLinAddr aEnd)
       
  1134 	{
       
  1135 	memclr(PageDirectory+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
       
  1136 	}
       
  1137 
       
  1138 void ArmMmu::ClearPageTable(TInt aId, TInt aFirstIndex)
       
  1139 	{
       
  1140 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ClearPageTable(%d,%d)",aId,aFirstIndex));
       
  1141 	TPte* pte=PageTable(aId);
       
  1142 	memclr(pte+aFirstIndex, KPageTableSize-aFirstIndex*sizeof(TPte));
       
  1143 	__DRAIN_WRITE_BUFFER;
       
  1144 	}
       
  1145 
       
  1146 void ArmMmu::ClearRamDrive(TLinAddr aStart)
       
  1147 	{
       
  1148 	// clear the page directory entries corresponding to the RAM drive
       
  1149 	ZeroPdes(aStart, KRamDriveEndAddress);
       
  1150 	__DRAIN_WRITE_BUFFER;
       
  1151 	}
       
  1152 
       
  1153 void ArmMmu::ApplyTopLevelPermissions(TLinAddr aAddr, TUint aChunkSize, TPde aPdePerm)
       
  1154 	{
       
  1155 	__KTRACE_OPT(KMMU,Kern::Printf("ApplyTopLevelPermissions at %x",aAddr));
       
  1156 	TInt pdeIndex=aAddr>>KChunkShift;
       
  1157 	TInt numPdes=(aChunkSize+KChunkMask)>>KChunkShift;
       
  1158 	TPde* pPde=PageDirectory+pdeIndex;
       
  1159 	while(numPdes--)
       
  1160 		{
       
  1161 		*pPde=(*pPde)?((*pPde & KPdePageTableAddrMask)|aPdePerm):0;
       
  1162 		pPde++;
       
  1163 		}
       
  1164 	__DRAIN_WRITE_BUFFER;
       
  1165 	}
       
  1166 
       
  1167 void ArmMmu::ApplyPagePermissions(TInt aId, TInt aPageOffset, TInt aNumPages, TPte aPtePerm)
       
  1168 	{
       
  1169 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ApplyPagePermissions %04x:%03x+%03x perm %08x",
       
  1170 												aId, aPageOffset, aNumPages, aPtePerm));
       
  1171 	TPte* pPte=PageTable(aId)+aPageOffset;
       
  1172 	TPde* pPteEnd=pPte+aNumPages;
       
  1173 	NKern::LockSystem();
       
  1174 	for (; pPte<pPteEnd; ++pPte)
       
  1175 		{
       
  1176 		TPte pte=*pPte;
       
  1177 		if (pte)
       
  1178 			*pPte = (pte&KPteSmallPageAddrMask)|aPtePerm;
       
  1179 		}
       
  1180 	NKern::UnlockSystem();
       
  1181 	FlushTLBs();
       
  1182 	__DRAIN_WRITE_BUFFER;
       
  1183 	}
       
  1184 
       
  1185 void ArmMmu::MoveChunk(TLinAddr aInitAddr, TUint aSize, TLinAddr aFinalAddr, TPde aPdePerm)
       
  1186 	{
       
  1187 	__KTRACE_OPT(KMMU,Kern::Printf("MoveChunk at %08x to %08x size %08x PdePerm %08x",
       
  1188 		aInitAddr, aFinalAddr, aSize, aPdePerm));
       
  1189 	TInt numPdes=(aSize+KChunkMask)>>KChunkShift;
       
  1190 	TInt iS=aInitAddr>>KChunkShift;
       
  1191 	TInt iD=aFinalAddr>>KChunkShift;
       
  1192 	TPde* pS=PageDirectory+iS;
       
  1193 	TPde* pD=PageDirectory+iD;
       
  1194 	while(numPdes--)
       
  1195 		{
       
  1196 		*pD++=(*pS)?((*pS & KPdePageTableAddrMask)|aPdePerm):0;
       
  1197 		*pS++=KPdeNotPresentEntry;
       
  1198 		}
       
  1199 	__DRAIN_WRITE_BUFFER;
       
  1200 	}
       
  1201 
       
  1202 void ArmMmu::MoveChunk(TLinAddr aInitAddr, TLinAddr aFinalAddr, TInt aNumPdes)
       
  1203 //
       
  1204 // Move a block of PDEs without changing permissions. Must work with overlapping initial and final
       
  1205 // regions. Call this with kernel locked.
       
  1206 //
       
  1207 	{
       
  1208 	__KTRACE_OPT(KMMU,Kern::Printf("MoveChunk at %08x to %08x numPdes %d", aInitAddr, aFinalAddr, aNumPdes));
       
  1209 	if (aInitAddr==aFinalAddr || aNumPdes==0)
       
  1210 		return;
       
  1211 	TInt iS=aInitAddr>>KChunkShift;
       
  1212 	TInt iD=aFinalAddr>>KChunkShift;
       
  1213 	TBool forwardOverlap=(iS<iD && iD-iS<aNumPdes);
       
  1214 	TBool backwardOverlap=(iS>iD && iS-iD<aNumPdes);
       
  1215 	TInt iC=backwardOverlap?(iD+aNumPdes):iS;	// first index to clear
       
  1216 	TInt iZ=forwardOverlap?iD:(iS+aNumPdes);	// last index to clear + 1
       
  1217 	TPde* pS=PageDirectory+iS;
       
  1218 	TPde* pD=PageDirectory+iD;
       
  1219 	__KTRACE_OPT(KMMU,Kern::Printf("backwardOverlap=%d, forwardOverlap=%d",backwardOverlap,forwardOverlap));
       
  1220 	__KTRACE_OPT(KMMU,Kern::Printf("first clear %03x, last clear %03x",iC,iZ));
       
  1221 	wordmove(pD,pS,aNumPdes<<2);				// move PDEs
       
  1222 	pD=PageDirectory+iC;						// pointer to first PDE to clear
       
  1223 	iZ-=iC;										// number of PDEs to clear
       
  1224 	memclr(pD, iZ<<2);							// clear PDEs
       
  1225 	__DRAIN_WRITE_BUFFER;
       
  1226 	}
       
  1227 
       
  1228 TPde ArmMmu::PdePermissions(TChunkType aChunkType, TInt aChunkState)
       
  1229 	{
       
  1230 	if ((aChunkType==EUserData || aChunkType==EDllData || aChunkType==EUserSelfModCode
       
  1231 		|| aChunkType==ESharedKernelSingle || aChunkType==ESharedKernelMultiple || aChunkType==ESharedIo)
       
  1232 		&& aChunkState!=0)
       
  1233 		return KUserDataRunningPermissions;
       
  1234 	return ChunkPdePermissions[aChunkType];
       
  1235 	}
       
  1236 
       
  1237 TPte ArmMmu::PtePermissions(TChunkType aChunkType)
       
  1238 	{
       
  1239 	return ChunkPtePermissions[aChunkType];
       
  1240 	}
       
  1241 
       
  1242 const TUint FBLK=(EMapAttrFullyBlocking>>12);
       
  1243 const TUint BFNC=(EMapAttrBufferedNC>>12);
       
  1244 const TUint BUFC=(EMapAttrBufferedC>>12);
       
  1245 const TUint L1UN=(EMapAttrL1Uncached>>12);
       
  1246 const TUint WTRA=(EMapAttrCachedWTRA>>12);
       
  1247 const TUint WTWA=(EMapAttrCachedWTWA>>12);
       
  1248 const TUint WBRA=(EMapAttrCachedWBRA>>12);
       
  1249 const TUint WBWA=(EMapAttrCachedWBWA>>12);
       
  1250 const TUint AWTR=(EMapAttrAltCacheWTRA>>12);
       
  1251 const TUint AWTW=(EMapAttrAltCacheWTWA>>12);
       
  1252 const TUint AWBR=(EMapAttrAltCacheWBRA>>12);
       
  1253 const TUint AWBW=(EMapAttrAltCacheWBWA>>12);
       
  1254 const TUint MAXC=(EMapAttrL1CachedMax>>12);
       
  1255 
       
  1256 const TUint L2UN=(EMapAttrL2Uncached>>12);
       
  1257 
       
  1258 const TUint16 UNS=0xffffu;	// Unsupported attribute
       
  1259 const TUint16 SPE=0xfffeu;	// Special processing required
       
  1260 
       
  1261 #if defined(__CPU_ARM710T__) || defined(__CPU_ARM720T__)
       
  1262 // Original definition of C B
       
  1263 static const TUint16 CacheBuffAttributes[16]=
       
  1264 	{0x00,0x00,0x04,0x04,0x0C,0x0C,0x0C,0x0C, UNS, UNS, UNS, UNS, UNS, UNS, UNS,0x0C};
       
  1265 static const TUint8 CacheBuffActual[16]=
       
  1266 	{FBLK,FBLK,BUFC,BUFC,WTRA,WTRA,WTRA,WTRA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WTRA};
       
  1267 
       
  1268 #elif defined(__CPU_ARM920T__) || defined(__CPU_ARM925T__) || defined(__CPU_ARM926J__)
       
  1269 // Newer definition of C B
       
  1270 static const TUint16 CacheBuffAttributes[16]=
       
  1271 	{0x00,0x00,0x04,0x04,0x08,0x08,0x0C,0x0C, UNS, UNS, UNS, UNS, UNS, UNS, UNS,0x0C};
       
  1272 static const TUint8 CacheBuffActual[16]=
       
  1273 	{FBLK,FBLK,BUFC,BUFC,WTRA,WTRA,WBRA,WBRA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WBRA};
       
  1274 
       
  1275 #elif defined(__CPU_SA1__)
       
  1276 // Special definition of C B
       
  1277 static const TUint16 CacheBuffAttributes[16]=
       
  1278 	{0x00,0x00,0x04,0x04,0x04,0x04,0x0C,0x0C,0x04,0x04,0x08,0x08, UNS, UNS, UNS,0x0C};
       
  1279 static const TUint8 CacheBuffActual[16]=
       
  1280 	{FBLK,FBLK,BUFC,BUFC,BUFC,BUFC,WBRA,WBRA,FBLK,FBLK,AWBR,AWBR,FBLK,FBLK,FBLK,WBRA};
       
  1281 
       
  1282 #elif defined(__CPU_XSCALE__)
       
  1283 #ifdef __CPU_XSCALE_MANZANO__
       
  1284 #ifdef __HAS_EXTERNAL_CACHE__
       
  1285 // ***MANZANO with L2 cache****** //
       
  1286 
       
  1287 //Specifies TEX::CB bits for different L1/L2 cache attributes
       
  1288 //  ...876543201
       
  1289 //  ...TEX..CB..
       
  1290 static const TUint16 CacheBuffAttributes[80]=
       
  1291 	{									// L1CACHE:
       
  1292 //  FBLK  BFNC  BUFC   L1UN   WTRA   WTWA   WBRA   WBWA  AWTR AWTW AWBR AWBT UNS UNS UNS MAX     L2CACHE:
       
  1293 	0x00, 0x44, 0x40,  0x40, 0x108, 0x108, 0x10c, 0x10c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x10c,  //NC
       
  1294 	0x00, 0x44, 0x40,  0x40, 0x108, 0x108, 0x10c, 0x10c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x10c,  //WTRA
       
  1295 	0x00, 0x44, 0x40,  0x40, 0x108, 0x108, 0x10c, 0x10c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x10c,  //WTWA
       
  1296 	0x00, 0x44, 0x40, 0x140, 0x148, 0x148, 0x14c, 0x14c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x14c,  //WBRA
       
  1297 	0x00, 0x44, 0x40, 0x140, 0x148, 0x148, 0x14c, 0x14c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x14c,  //WBWA
       
  1298    	};
       
  1299 
       
  1300 extern TUint MiniCacheConfig();
       
  1301 //Converts page table attributes(TEX:CB) into appropriate cache attributes.
       
  1302 TInt CacheAttributesActual(TUint& cacheL1, TUint& cacheL2, TUint cbatt)
       
  1303 	{
       
  1304 	switch (cbatt)
       
  1305 		{
       
  1306 		case 0: 	cacheL1 = FBLK; cacheL2 = L2UN; return KErrNone;
       
  1307 		case 0x40: 	cacheL1 = L1UN; cacheL2 = L2UN; return KErrNone;
       
  1308 		case 0x44: 	cacheL1 = BFNC; cacheL2 = L2UN; return KErrNone;
       
  1309 		case 0x48: 	cacheL1 = MiniCacheConfig(); cacheL2 = L2UN; return KErrNone;
       
  1310 		case 0x108: cacheL1 = WTRA; cacheL2 = L2UN; return KErrNone;
       
  1311 		case 0x10c: cacheL1 = WBRA; cacheL2 = L2UN; return KErrNone;
       
  1312 		case 0x140: cacheL1 = L1UN; cacheL2 = WBWA; return KErrNone;
       
  1313 		case 0x148: cacheL1 = WTRA; cacheL2 = WBWA; return KErrNone;
       
  1314 		case 0x14c: cacheL1 = WBRA; cacheL2 = WBWA; return KErrNone;
       
  1315 		}
       
  1316 	return KErrNotSupported;
       
  1317 	}
       
  1318 #else //__HAS_EXTERNAL_CACHE__
       
  1319 // ***MANZANO without L2 cache****** //
       
  1320 
       
  1321 static const TUint16 CacheBuffAttributes[16]=
       
  1322 //  FBLK BFNC BUFC L1UN WTRA  WTWA  WBRA   WBWA -----------AltCache--------  MAXC 
       
  1323    {0x00,0x44,0x40,0x40,0x148,0x148,0x14C,0x14C,SPE,SPE,SPE,SPE,UNS,UNS,UNS,0x14C};
       
  1324 static const TUint8 CacheBuffActual[16]=
       
  1325 	{FBLK,BFNC,BUFC,BUFC,WTRA,WTRA,WBRA,WBRA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WBRA};
       
  1326 #endif //__HAS_EXTERNAL_CACHE__
       
  1327 
       
  1328 #else 
       
  1329 // ***XSCALE that is not MANZANO (no L2 cache)****** //
       
  1330 
       
  1331 // X C B
       
  1332 static const TUint16 CacheBuffAttributes[16]=
       
  1333 	{0x00,0x44,0x04,0x04,0x08,0x08,0x0C,0x4C,SPE,SPE,SPE,SPE,UNS,UNS,UNS,0x4C};
       
  1334 static const TUint8 CacheBuffActual[16]=
       
  1335 	{FBLK,BFNC,BUFC,BUFC,WTRA,WTRA,WBRA,WBWA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WBWA};
       
  1336 #endif
       
  1337 
       
  1338 // ***Common code for all XSCALE cores*** //
       
  1339 
       
  1340 extern TUint MiniCacheConfig();
       
  1341 void ProcessSpecialCacheAttr(TUint& cache, TUint& cbatt)
       
  1342 	{
       
  1343 	// If writeback requested, give writeback or writethrough
       
  1344 	// If writethrough requested, give writethrough or uncached
       
  1345 	// Give other allocation policy if necessary.
       
  1346 	TUint mccfg=MiniCacheConfig();
       
  1347 	__KTRACE_OPT(KMMU,Kern::Printf("MiniCacheConfig: %x",mccfg));
       
  1348 
       
  1349 	if (cache<AWBR && mccfg>=AWBR)	// asked for WT but cache is set for WB
       
  1350 		{
       
  1351 		cache=BUFC;					// so give uncached, buffered, coalescing
       
  1352 		#if defined (__CPU_XSCALE_MANZANO__)
       
  1353 		cbatt=0x40;
       
  1354 		#else
       
  1355 		cbatt=0x04;
       
  1356 		#endif
       
  1357 		}
       
  1358 	else
       
  1359 		{
       
  1360 		cache=mccfg;	// give whatever minicache is configured for
       
  1361 		cbatt=0x48;		// minicache attributes
       
  1362 		}
       
  1363 	}
       
  1364 #endif
       
  1365 
       
  1366 static const TUint8 ActualReadPrivilegeLevel[4]={4,1,4,4};		// RORO,RWNO,RWRO,RWRW
       
  1367 static const TUint8 ActualWritePrivilegeLevel[4]={0,1,1,4};	// RORO,RWNO,RWRO,RWRW
       
  1368 
       
  1369 /** Calculates cb attributes for page table and sets actual cache attributes*/
       
  1370 TInt GetCacheAttr(TUint& cacheL1, TUint& cacheL2, TUint& cbatt)
       
  1371 	{
       
  1372 	TInt r = KErrNone;
       
  1373 	// Scale down L2 to 0-4 : NC, WTRA, WTWA, WBRA, WBWA
       
  1374 #if defined (__CPU_XSCALE_MANZANO__) && defined(__HAS_EXTERNAL_CACHE__)
       
  1375 	if      (cacheL2 == MAXC) cacheL2 = WBWA-3;			//	Scale down L2 cache attributes...
       
  1376 	else if (cacheL2 > WBWA)  return KErrNotSupported;	//	... to 0-4 for...
       
  1377 	else if (cacheL2 < WTRA)  cacheL2 = L2UN;			//	... L2UN to WBWA 
       
  1378 	else					  cacheL2-=3;				//
       
  1379 #else
       
  1380 	cacheL2 = 0; // Either no L2 cache or L2 cache attributes will be just a copy of L1 cache attributes.
       
  1381 #endif
       
  1382 
       
  1383 	//Get cb page attributes. (On some platforms, tex bits are includded as well.)
       
  1384 	cbatt = CacheBuffAttributes[cacheL1 + (cacheL2<<4)];
       
  1385 	__KTRACE_OPT(KMMU,Kern::Printf("GetCacheAttr, table returned:%x",cbatt));
       
  1386 
       
  1387 #if defined(__CPU_XSCALE__)
       
  1388 	//Check if altDCache/LLR cache attributes are defined
       
  1389 	if (cbatt == SPE)
       
  1390 		{
       
  1391 		cacheL2 = 0; //Not L2 cached in such case
       
  1392 		ProcessSpecialCacheAttr(cacheL1,cbatt);
       
  1393 		__KTRACE_OPT(KMMU,Kern::Printf("GetCacheAttr, spec case returned:%x",cbatt));
       
  1394 		}
       
  1395 #endif
       
  1396 
       
  1397 	if(cbatt == UNS)
       
  1398 		return KErrNotSupported;
       
  1399 	
       
  1400 	//W Got CB page attributes. Now, find out what are the actual cache attributes.
       
  1401 #if defined(__CPU_XSCALE_MANZANO__) && defined(__HAS_EXTERNAL_CACHE__)
       
  1402 	r = CacheAttributesActual(cacheL1, cacheL2, cbatt);
       
  1403 #else
       
  1404 	cacheL1 = CacheBuffActual[cacheL1];
       
  1405 #if defined(__HAS_EXTERNAL_CACHE__)
       
  1406 	cacheL2 = cacheL1;
       
  1407 #else
       
  1408 	cacheL2 = 0;
       
  1409 #endif	
       
  1410 #endif
       
  1411 	return r;
       
  1412 	}
       
  1413 
       
  1414 TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte)
       
  1415 	{
       
  1416 	__KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr));
       
  1417 	TUint read=aMapAttr & EMapAttrReadMask;
       
  1418 	TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
       
  1419 	TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
       
  1420 
       
  1421 	// if execute access is greater than read, adjust read (since there are no separate execute permissions on ARM)
       
  1422 	if (exec>read)
       
  1423 		read=exec;
       
  1424 	TUint ap;
       
  1425 	if (write==0)
       
  1426 		{
       
  1427 		// read-only
       
  1428 		if (read>=4)
       
  1429 			ap=KArmV45PermRORO;			// user and supervisor read-only
       
  1430 		else
       
  1431 			ap=KArmV45PermRWNO;			// supervisor r/w user no access (since no RO/NO access is available)
       
  1432 		}
       
  1433 	else if (write<4)
       
  1434 		{
       
  1435 		// only supervisor can write
       
  1436 		if (read>=4)
       
  1437 			ap=KArmV45PermRWRO;			// supervisor r/w user r/o
       
  1438 		else
       
  1439 			ap=KArmV45PermRWNO;			// supervisor r/w user no access
       
  1440 		}
       
  1441 	else
       
  1442 		ap=KArmV45PermRWRW;				// supervisor r/w user r/w
       
  1443 	read=ActualReadPrivilegeLevel[ap];
       
  1444 	write=ActualWritePrivilegeLevel[ap];
       
  1445 #ifndef __CPU_USE_MMU_TEX_FIELD
       
  1446 	ap|=(ap<<2);
       
  1447 	ap|=(ap<<4);						// replicate permissions in all four subpages
       
  1448 #endif
       
  1449 	ap<<=4;								// shift access permissions into correct position for PTE
       
  1450 	ap|=KArmPteSmallPage;				// add in mandatory small page bits
       
  1451 
       
  1452 	// Get cb atributes for the page table and the actual cache attributes
       
  1453 	TUint cbatt;
       
  1454 	TUint cacheL1=(aMapAttr & EMapAttrL1CacheMask)>>12;
       
  1455 	TUint cacheL2=(aMapAttr & EMapAttrL2CacheMask)>>16;
       
  1456 	TInt r = GetCacheAttr(cacheL1, cacheL2, cbatt);
       
  1457 
       
  1458 	if (r==KErrNone)
       
  1459 		{
       
  1460 		aPde=PT_PDE(EDomainClient);
       
  1461 		aPte=ap|cbatt;
       
  1462 		aMapAttr=read|(write<<4)|(read<<8)|(cacheL1<<12)|(cacheL2<<16);
       
  1463 		}
       
  1464 	__KTRACE_OPT(KMMU,Kern::Printf("<ArmMmu::PdePtePermissions, r=%d, mapattr=%08x, pde=%08x, pte=%08x",
       
  1465 								r,aMapAttr,aPde,aPte));
       
  1466 	return r;
       
  1467 	}
       
  1468 
       
  1469 void ArmMmu::Map(TLinAddr aLinAddr, TPhysAddr aPhysAddr, TInt aSize, TPde aPdePerm, TPte aPtePerm, TInt aMapShift)
       
  1470 //
       
  1471 // Map a region of physical addresses aPhysAddr to aPhysAddr+aSize-1 to virtual address aLinAddr.
       
  1472 // Use permissions specified by aPdePerm and aPtePerm. Use mapping sizes up to and including (1<<aMapShift).
       
  1473 // Assume any page tables required are already assigned.
       
  1474 // aLinAddr, aPhysAddr, aSize must be page-aligned.
       
  1475 //
       
  1476 	{
       
  1477 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Map lin=%08x phys=%08x size=%08x", aLinAddr, aPhysAddr, aSize));
       
  1478 	__KTRACE_OPT(KMMU, Kern::Printf("pde=%08x pte=%08x mapshift=%d", aPdePerm, aPtePerm, aMapShift));
       
  1479 	TPde pt_pde=aPdePerm;
       
  1480 	TPte sp_pte=aPtePerm;
       
  1481 	TPde section_pde=SECTION_PDE_FROM_PDEPTE(pt_pde, sp_pte);
       
  1482 	TPte lp_pte=LP_PTE_FROM_SP_PTE(sp_pte);
       
  1483 	TLinAddr la=aLinAddr;
       
  1484 	TPhysAddr pa=aPhysAddr;
       
  1485 	TInt remain=aSize;
       
  1486 	while (remain)
       
  1487 		{
       
  1488 		if (aMapShift>=KChunkShift && (la & KChunkMask)==0 && remain>=KChunkSize)
       
  1489 			{
       
  1490 			// use sections
       
  1491 			TInt npdes = remain>>KChunkShift;
       
  1492 			TPde* p_pde = PageDirectory + (la>>KChunkShift);
       
  1493 			TPde* p_pde_E = p_pde + npdes;
       
  1494 			TPde pde = pa|section_pde;
       
  1495 			NKern::LockSystem();
       
  1496 			for (; p_pde < p_pde_E; pde+=KChunkSize)
       
  1497 				{
       
  1498 				__ASSERT_DEBUG(*p_pde==0, MM::Panic(MM::EPdeAlreadyInUse));
       
  1499 				__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, p_pde));
       
  1500 				*p_pde++=pde;
       
  1501 				}
       
  1502 			NKern::UnlockSystem();
       
  1503 			npdes<<=KChunkShift;
       
  1504 			la+=npdes, pa+=npdes, remain-=npdes;
       
  1505 			continue;
       
  1506 			}
       
  1507 		TInt block_size = Min(remain, KChunkSize-(la&KChunkMask));
       
  1508 		TPte pa_mask=~KPageMask;
       
  1509 		TPte pte_perm=sp_pte;
       
  1510 		if (aMapShift>=KLargePageShift && block_size>=KLargePageSize)
       
  1511 			{
       
  1512 			if ((la & KLargePageMask)==0)
       
  1513 				{
       
  1514 				// use 64K large pages
       
  1515 				pa_mask=~KLargePageMask;
       
  1516 				pte_perm=lp_pte;
       
  1517 				}
       
  1518 			else
       
  1519 				block_size = Min(remain, KLargePageSize-(la&KLargePageMask));
       
  1520 			}
       
  1521 		block_size &= pa_mask;
       
  1522 
       
  1523 		// use pages (large or small)
       
  1524 		TInt id = PageTableId(la);
       
  1525 		__ASSERT_DEBUG(id>=0, MM::Panic(MM::EMmuMapNoPageTable));
       
  1526 		TPte* p_pte = PageTable(id) + ((la&KChunkMask)>>KPageShift);
       
  1527 		TPte* p_pte_E = p_pte + (block_size>>KPageShift);
       
  1528 		SPageTableInfo& ptinfo = iPtInfo[id];
       
  1529 		NKern::LockSystem();
       
  1530 		for (; p_pte < p_pte_E; pa+=KPageSize)
       
  1531 			{
       
  1532 			__ASSERT_DEBUG(*p_pte==0, MM::Panic(MM::EPteAlreadyInUse));
       
  1533 			TPte pte = (pa & pa_mask) | pte_perm;
       
  1534 			__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", pte, p_pte));
       
  1535 			*p_pte++=pte;
       
  1536 			++ptinfo.iCount;
       
  1537 			NKern::FlashSystem();
       
  1538 			}
       
  1539 		NKern::UnlockSystem();
       
  1540 		la+=block_size, remain-=block_size;
       
  1541 		}
       
  1542 	}
       
  1543 
       
  1544 void ArmMmu::Unmap(TLinAddr aLinAddr, TInt aSize)
       
  1545 //
       
  1546 // Remove all mappings in the specified range of addresses.
       
  1547 // Assumes there are only global mappings involved.
       
  1548 // Don't free page tables.
       
  1549 // aLinAddr, aSize must be page-aligned.
       
  1550 //
       
  1551 	{
       
  1552 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Unmap lin=%08x size=%08x", aLinAddr, aSize));
       
  1553 	TLinAddr a=aLinAddr;
       
  1554 	TLinAddr end=a+aSize;
       
  1555 	__KTRACE_OPT(KMMU,Kern::Printf("a=%08x end=%08x",a,end));
       
  1556 	NKern::LockSystem();
       
  1557 	while(a!=end)
       
  1558 		{
       
  1559 		TInt pdeIndex=a>>KChunkShift;
       
  1560 		TLinAddr next=(pdeIndex<<KChunkShift)+KChunkSize;
       
  1561 		TInt to_do = Min(TInt(end-a), TInt(next-a))>>KPageShift;
       
  1562 		__KTRACE_OPT(KMMU,Kern::Printf("a=%08x next=%08x to_do=%d",a,next,to_do));
       
  1563 		TPde pde = PageDirectory[pdeIndex];
       
  1564 		if ( (pde&KPdePresentMask)==KArmV45PdeSection )
       
  1565 			{
       
  1566 			__ASSERT_DEBUG(!(a&KChunkMask), MM::Panic(MM::EUnmapBadAlignment));
       
  1567 			PageDirectory[pdeIndex]=0;
       
  1568 			InvalidateTLBForPage(a);
       
  1569 			a=next;
       
  1570 			NKern::FlashSystem();
       
  1571 			continue;
       
  1572 			}
       
  1573 		TInt ptid = GetPageTableId(a);
       
  1574 		SPageTableInfo& ptinfo=iPtInfo[ptid];
       
  1575 		if (ptid>=0)
       
  1576 			{
       
  1577 			TPte* ppte = PageTable(ptid) + ((a&KChunkMask)>>KPageShift);
       
  1578 			TPte* ppte_End = ppte + to_do;
       
  1579 			for (; ppte<ppte_End; ++ppte, a+=KPageSize)
       
  1580 				{
       
  1581 				TUint pte_type = *ppte & KPteTypeMask;
       
  1582 				if (pte_type && pte_type != KArmV45PteLargePage)
       
  1583 					{
       
  1584 					--ptinfo.iCount;
       
  1585 					*ppte=0;
       
  1586 					InvalidateTLBForPage(a);
       
  1587 					}
       
  1588 				else if (pte_type)
       
  1589 					{
       
  1590 					__ASSERT_DEBUG(!(a&KLargePageMask), MM::Panic(MM::EUnmapBadAlignment));
       
  1591 					ptinfo.iCount-=KLargeSmallPageRatio;
       
  1592 					memclr(ppte, KLargeSmallPageRatio*sizeof(TPte));
       
  1593 					InvalidateTLBForPage(a);
       
  1594 					a+=(KLargePageSize-KPageSize);
       
  1595 					ppte+=(KLargeSmallPageRatio-1);
       
  1596 					}
       
  1597 				NKern::FlashSystem();
       
  1598 				}
       
  1599 			}
       
  1600 		else
       
  1601 			a += (to_do<<KPageShift);
       
  1602 		}
       
  1603 	NKern::UnlockSystem();
       
  1604 	}
       
  1605 
       
  1606 TInt ArmMmu::AllocDomain()
       
  1607 	{
       
  1608 	NKern::FMWait(&DomainLock);
       
  1609 	TInt r=-1;
       
  1610 	if (Domains)
       
  1611 		{
       
  1612 		r=__e32_find_ls1_32(Domains);
       
  1613 		Domains &= ~(1<<r);
       
  1614 		}
       
  1615 	NKern::FMSignal(&DomainLock);
       
  1616 	return r;
       
  1617 	}
       
  1618 
       
  1619 void ArmMmu::FreeDomain(TInt aDomain)
       
  1620 	{
       
  1621 	__ASSERT_ALWAYS(aDomain>=0 && aDomain<ENumDomains, MM::Panic(MM::EFreeInvalidDomain));
       
  1622 	TUint32 m=1<<aDomain;
       
  1623 	NKern::FMWait(&DomainLock);
       
  1624 	__ASSERT_ALWAYS(!(Domains&m), MM::Panic(MM::EFreeDomainNotAllocated));
       
  1625 	Domains|=m;
       
  1626 	NKern::FMSignal(&DomainLock);
       
  1627 	}
       
  1628 
       
  1629 void ArmMmu::ClearPages(TInt aNumPages, TPhysAddr* aPageList, TUint8 aClearByte)
       
  1630 	{
       
  1631 	//map the pages at a temporary address, clear them and unmap
       
  1632 	__ASSERT_MUTEX(RamAllocatorMutex);
       
  1633 	while (--aNumPages >= 0)
       
  1634 		{
       
  1635 		TPhysAddr pa;
       
  1636 		if((TInt)aPageList&1)
       
  1637 			{
       
  1638 			pa = (TPhysAddr)aPageList&~1;
       
  1639 			*(TPhysAddr*)&aPageList += iPageSize;
       
  1640 			}
       
  1641 		else
       
  1642 			pa = *aPageList++;
       
  1643 		*iTempPte = pa | SP_PTE(KArmV45PermRWNO, KMemAttBuf);
       
  1644 		__DRAIN_WRITE_BUFFER;
       
  1645 		InvalidateTLBForPage(iTempAddr);
       
  1646 		memset((TAny*)iTempAddr, aClearByte, iPageSize);
       
  1647 		}
       
  1648 	*iTempPte=0;
       
  1649 	__DRAIN_WRITE_BUFFER;
       
  1650 	InvalidateTLBForPage(iTempAddr);
       
  1651 	}
       
  1652 
       
  1653 TLinAddr DoMapTemp(TPhysAddr aPage, TBool aCached, TLinAddr aTempAddr, TPte* aTempPte)
       
  1654 	{
       
  1655 	__ASSERT_DEBUG(!*aTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
       
  1656 	*aTempPte = (aPage&~KPageMask) | SP_PTE(KArmV45PermRWNO, aCached?KDefaultCaching:KMemAttBuf);
       
  1657 	__DRAIN_WRITE_BUFFER;
       
  1658 	return aTempAddr;
       
  1659 	}
       
  1660 
       
  1661 /**
       
  1662 Create a temporary mapping of a physical page.
       
  1663 The RamAllocatorMutex must be held before this function is called and not released
       
  1664 until after UnmapTemp has been called.
       
  1665 
       
  1666 @param aPage	The physical address of the page to be mapped.
       
  1667 @param aCached	Whether to map the page cached or not.
       
  1668 
       
  1669 @return The linear address of where the page has been mapped.
       
  1670 */
       
  1671 TLinAddr ArmMmu::MapTemp(TPhysAddr aPage, TBool aCached)
       
  1672 	{
       
  1673 	__ASSERT_MUTEX(RamAllocatorMutex);
       
  1674 	return DoMapTemp(aPage, aCached, iTempAddr, iTempPte);
       
  1675 	}
       
  1676 
       
  1677 /**
       
  1678 Create a temporary mapping of a physical page, distinct from that created by MapTemp.
       
  1679 The RamAllocatorMutex must be held before this function is called and not released
       
  1680 until after UnmapSecondTemp has been called.
       
  1681 
       
  1682 @param aPage	The physical address of the page to be mapped.
       
  1683 @param aCached	Whether to map the page cached or not.
       
  1684 
       
  1685 @return The linear address of where the page has been mapped.
       
  1686 */
       
  1687 TLinAddr ArmMmu::MapSecondTemp(TPhysAddr aPage, TBool aCached)
       
  1688 	{
       
  1689 	__ASSERT_MUTEX(RamAllocatorMutex);
       
  1690 	return DoMapTemp(aPage, aCached, iSecondTempAddr, iSecondTempPte);
       
  1691 	}
       
  1692 
       
  1693 void DoUnmapTemp(TLinAddr aTempAddr, TPte* aTempPte)
       
  1694 	{
       
  1695 	*aTempPte = 0;
       
  1696 	__DRAIN_WRITE_BUFFER;
       
  1697 	InvalidateTLBForPage(aTempAddr);
       
  1698 	}
       
  1699 
       
  1700 /**
       
  1701 Remove the temporary mapping created with MapTemp.
       
  1702 */
       
  1703 void ArmMmu::UnmapTemp()
       
  1704 	{
       
  1705 	__ASSERT_MUTEX(RamAllocatorMutex);
       
  1706 	DoUnmapTemp(iTempAddr, iTempPte);
       
  1707 	}
       
  1708 
       
  1709 /**
       
  1710 Remove the temporary mapping created with MapSecondTemp.
       
  1711 */
       
  1712 void ArmMmu::UnmapSecondTemp()
       
  1713 	{
       
  1714 	__ASSERT_MUTEX(RamAllocatorMutex);
       
  1715 	DoUnmapTemp(iSecondTempAddr, iSecondTempPte);
       
  1716 	}
       
  1717 
       
  1718 /*
       
  1719  * Performs cache maintenance on physical cache (VIPT & PIPT) for a page to be reused.
       
  1720  */
       
  1721 void ArmMmu::CacheMaintenanceOnDecommit(TPhysAddr aAddr)
       
  1722 	{
       
  1723 	CacheMaintenance::PageToReusePhysicalCache(aAddr);
       
  1724 	}
       
  1725 
       
  1726 void ArmMmu::CacheMaintenanceOnDecommit(const TPhysAddr* aAddr, TInt aCount)
       
  1727 	{
       
  1728 	while (--aCount>=0)
       
  1729 		ArmMmu::CacheMaintenanceOnDecommit(*aAddr++);
       
  1730 	}
       
  1731 
       
  1732 void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr, TUint)
       
  1733 	{
       
  1734 	//Not required for moving memory model
       
  1735 	__ASSERT_ALWAYS(0, Panic(ECacheMaintenance));
       
  1736 	}
       
  1737 
       
  1738 void ArmMmu::CacheMaintenanceOnPreserve(const TPhysAddr*, TInt, TUint)
       
  1739 	{
       
  1740 	//Not required for moving memory model
       
  1741 	__ASSERT_ALWAYS(0, Panic(ECacheMaintenance));
       
  1742 	}
       
  1743 
       
  1744 void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr , TInt , TLinAddr , TUint )
       
  1745 	{
       
  1746 	//Not required for moving memory model
       
  1747 	__ASSERT_ALWAYS(0, Panic(ECacheMaintenance));
       
  1748 	}
       
  1749 
       
  1750 
       
  1751 TInt ArmMmu::UnlockRamCachePages(TUint8* volatile & aBase, TInt aStartPage, TInt aNumPages)
       
  1752 	{
       
  1753 	NKern::LockSystem();
       
  1754 	for(;;)
       
  1755 		{
       
  1756 		TInt page = ((TLinAddr)aBase>>KPageShift)+aStartPage;
       
  1757 		TPde* pd = PageDirectory+(page>>(KChunkShift-KPageShift));
       
  1758 		TPte* pt = SafePageTableFromPde(*pd++);
       
  1759 		TInt pteIndex = page&(KChunkMask>>KPageShift);
       
  1760 		if(!pt)
       
  1761 			{
       
  1762 			// whole page table has gone, so skip all pages in it...
       
  1763 			TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
       
  1764 			aNumPages -= pagesInPt;
       
  1765 			aStartPage += pagesInPt;
       
  1766 			if(aNumPages>0)
       
  1767 				continue;
       
  1768 			NKern::UnlockSystem();
       
  1769 			return KErrNone;
       
  1770 			}
       
  1771 		pt += pteIndex;
       
  1772 		do
       
  1773 			{
       
  1774 			TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
       
  1775 			if(pagesInPt>aNumPages)
       
  1776 				pagesInPt = aNumPages;
       
  1777 			if(pagesInPt>KMaxPages)
       
  1778 				pagesInPt = KMaxPages;
       
  1779 
       
  1780 			aNumPages -= pagesInPt;
       
  1781 			aStartPage += pagesInPt;
       
  1782 
       
  1783 			do
       
  1784 				{
       
  1785 				TPte pte = *pt++;
       
  1786 				if(pte!=KPteNotPresentEntry) // pte may be null if page has already been unlocked and reclaimed by system
       
  1787 					iRamCache->DonateRamCachePage(SPageInfo::FromPhysAddr(pte));
       
  1788 				}
       
  1789 			while(--pagesInPt);
       
  1790 
       
  1791 			if(!aNumPages)
       
  1792 				{
       
  1793 				NKern::UnlockSystem();
       
  1794 				return KErrNone;
       
  1795 				}
       
  1796 
       
  1797 			pteIndex = aStartPage&(KChunkMask>>KPageShift);
       
  1798 			}
       
  1799 		while(!NKern::FlashSystem() && pteIndex);
       
  1800 		}
       
  1801 	}
       
  1802 
       
  1803 
       
  1804 TInt ArmMmu::LockRamCachePages(TUint8* volatile & aBase, TInt aStartPage, TInt aNumPages)
       
  1805 	{
       
  1806 	NKern::LockSystem();
       
  1807 	for(;;)
       
  1808 		{
       
  1809 		TInt page = ((TLinAddr)aBase>>KPageShift)+aStartPage;
       
  1810 		TPde* pd = PageDirectory+(page>>(KChunkShift-KPageShift));
       
  1811 		TPte* pt = SafePageTableFromPde(*pd++);
       
  1812 		TInt pteIndex = page&(KChunkMask>>KPageShift);
       
  1813 		if(!pt)
       
  1814 			goto not_found;
       
  1815 		pt += pteIndex;
       
  1816 		do
       
  1817 			{
       
  1818 			TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
       
  1819 			if(pagesInPt>aNumPages)
       
  1820 				pagesInPt = aNumPages;
       
  1821 			if(pagesInPt>KMaxPages)
       
  1822 				pagesInPt = KMaxPages;
       
  1823 
       
  1824 			aNumPages -= pagesInPt;
       
  1825 			aStartPage += pagesInPt;
       
  1826 
       
  1827 			do
       
  1828 				{
       
  1829 				TPte pte = *pt++;
       
  1830 				if(pte==KPteNotPresentEntry)
       
  1831 					goto not_found;
       
  1832 				if(!iRamCache->ReclaimRamCachePage(SPageInfo::FromPhysAddr(pte)))
       
  1833 					goto not_found;
       
  1834 				}
       
  1835 			while(--pagesInPt);
       
  1836 
       
  1837 			if(!aNumPages)
       
  1838 				{
       
  1839 				NKern::UnlockSystem();
       
  1840 				return KErrNone;
       
  1841 				}
       
  1842 
       
  1843 			pteIndex = aStartPage&(KChunkMask>>KPageShift);
       
  1844 			}
       
  1845 		while(!NKern::FlashSystem() && pteIndex);
       
  1846 		}
       
  1847 not_found:
       
  1848 	NKern::UnlockSystem();
       
  1849 	return KErrNotFound;
       
  1850 	}
       
  1851 
       
  1852 
       
  1853 void RamCache::SetFree(SPageInfo* aPageInfo)
       
  1854 	{
       
  1855 	// Make a page free
       
  1856 	SPageInfo::TType type = aPageInfo->Type();
       
  1857 	if(type==SPageInfo::EPagedCache)
       
  1858 		{
       
  1859 		TInt offset = aPageInfo->Offset()<<KPageShift;
       
  1860 		DArmPlatChunk* chunk = (DArmPlatChunk*)aPageInfo->Owner();
       
  1861 		__NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize));
       
  1862 		TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
       
  1863 		TPte* pt = PtePtrFromLinAddr(lin);
       
  1864 		*pt = KPteNotPresentEntry;
       
  1865 		__DRAIN_WRITE_BUFFER;
       
  1866 		InvalidateTLBForPage(lin);
       
  1867 		((ArmMmu*)iMmu)->SyncCodeMappings();
       
  1868 		CacheMaintenance::PageToReuseVirtualCache(lin);
       
  1869 		// actually decommit it from chunk...
       
  1870 		TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
       
  1871 		SPageTableInfo& ptinfo=((ArmMmu*)iMmu)->iPtInfo[ptid];
       
  1872 		if(!--ptinfo.iCount)
       
  1873 			{
       
  1874 			((ArmMmu*)iMmu)->DoUnassignPageTable(lin);
       
  1875 			chunk->RemovePde(offset);
       
  1876 			NKern::UnlockSystem();
       
  1877 			((ArmMmu*)iMmu)->FreePageTable(ptid);
       
  1878 			NKern::LockSystem();
       
  1879 			}
       
  1880 		}
       
  1881 	else
       
  1882 		{
       
  1883 		__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
       
  1884 		Panic(EUnexpectedPageType);
       
  1885 		}
       
  1886 	}
       
  1887 
       
  1888 
       
  1889 //
       
  1890 // MemModelDemandPaging
       
  1891 //
       
  1892 
       
  1893 class MemModelDemandPaging : public DemandPaging
       
  1894 	{
       
  1895 public:
       
  1896 	// From RamCacheBase
       
  1897 	virtual void Init2();
       
  1898 	virtual TInt Init3();
       
  1899 	virtual TBool PageUnmapped(SPageInfo* aPageInfo);
       
  1900 	// From DemandPaging
       
  1901 	virtual TInt Fault(TAny* aExceptionInfo);
       
  1902 	virtual void SetOld(SPageInfo* aPageInfo);
       
  1903 	virtual void SetFree(SPageInfo* aPageInfo);
       
  1904 	virtual void NotifyPageFree(TPhysAddr aPage);
       
  1905 	virtual TInt EnsurePagePresent(TLinAddr aPage, DProcess* aProcess);
       
  1906 	virtual TPhysAddr LinearToPhysical(TLinAddr aPage, DProcess* aProcess);
       
  1907 	virtual void AllocLoadAddress(DPagingRequest& aReq, TInt aDeviceId);
       
  1908 	virtual TInt PageState(TLinAddr aAddr);
       
  1909 	virtual TBool NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength);
       
  1910 	// New
       
  1911 	inline ArmMmu& Mmu() { return (ArmMmu&)*iMmu; }
       
  1912 	void InitRomPaging();
       
  1913 	void InitCodePaging();
       
  1914 	TInt HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TBool aInRom);
       
  1915 	TInt PageIn(TLinAddr aAddress, DMemModelCodeSegMemory* aCodeSegMemory);
       
  1916 private:
       
  1917 	TLinAddr GetLinearAddress(SPageInfo* aPageInfo);
       
  1918 	};
       
  1919 
       
  1920 
       
  1921 //
       
  1922 // MemModelDemandPaging
       
  1923 //
       
  1924 
       
  1925 
       
  1926 DemandPaging* DemandPaging::New()
       
  1927 	{
       
  1928 	return new MemModelDemandPaging();
       
  1929 	}
       
  1930 
       
  1931 
       
  1932 void MemModelDemandPaging::Init2()
       
  1933 	{
       
  1934 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">MemModelDemandPaging::Init2"));
       
  1935 	DemandPaging::Init2();
       
  1936 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init2"));
       
  1937 	}
       
  1938 
       
  1939 
       
  1940 void MemModelDemandPaging::AllocLoadAddress(DPagingRequest& aReq, TInt aReqId)
       
  1941 	{
       
  1942 	aReq.iLoadAddr = iTempPages + aReqId * KPageSize;
       
  1943 	aReq.iLoadPte = PtePtrFromLinAddr(aReq.iLoadAddr);
       
  1944 	}
       
  1945 
       
  1946 
       
  1947 TInt MemModelDemandPaging::Init3()
       
  1948 	{
       
  1949 	TInt r=DemandPaging::Init3();
       
  1950 	if(r!=KErrNone)
       
  1951 		return r;
       
  1952 
       
  1953 	// Create a region for mapping pages during page in
       
  1954 	DPlatChunkHw* chunk;
       
  1955 	TInt chunkSize = KMaxPagingDevices * KPagingRequestsPerDevice * KPageSize;
       
  1956 	DPlatChunkHw::DoNew(chunk, KPhysAddrInvalid, chunkSize, EMapAttrSupRw|EMapAttrFullyBlocking);
       
  1957 	if(!chunk)
       
  1958 		Panic(EInitialiseFailed);
       
  1959 	iTempPages = chunk->iLinAddr;
       
  1960 
       
  1961 	if(RomPagingRequested())
       
  1962 		InitRomPaging();
       
  1963 
       
  1964 	if (CodePagingRequested())
       
  1965 		InitCodePaging();
       
  1966 
       
  1967 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init3"));
       
  1968 	return KErrNone;
       
  1969 	}
       
  1970 
       
  1971 	
       
  1972 void MemModelDemandPaging::InitRomPaging()
       
  1973 	{
       
  1974 	// Make page tables for demand paged part of ROM...
       
  1975 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("MemModelDemandPaging::Init3 making page tables for paged ROM"));
       
  1976 	TLinAddr lin = iRomPagedLinearBase&~KChunkMask; // first chunk with paged ROM in
       
  1977 	TLinAddr linEnd = iRomLinearBase+iRomSize;
       
  1978 	while(lin<linEnd)
       
  1979 		{
       
  1980 		// Get a Page Table
       
  1981 		TInt ptid = Mmu().PageTableId(lin);
       
  1982 		if(ptid<0)
       
  1983 			{
       
  1984 			MmuBase::Wait();
       
  1985 			ptid = Mmu().AllocPageTable();
       
  1986 			MmuBase::Signal();
       
  1987 			__NK_ASSERT_DEBUG(ptid>=0);
       
  1988 			Mmu().PtInfo(ptid).SetGlobal(lin >> KChunkShift);
       
  1989 			}
       
  1990 
       
  1991 		// Get new page table addresses
       
  1992 		TPte* pt = PageTable(ptid);
       
  1993 		TPhysAddr ptPhys=Mmu().LinearToPhysical((TLinAddr)pt);
       
  1994 
       
  1995 		// Pointer to page dirctory entry
       
  1996 		TPde* ppde = PageDirectory + (lin>>KChunkShift);
       
  1997 
       
  1998 		// Fill in Page Table
       
  1999 		TPte* ptEnd = pt+(1<<(KChunkShift-KPageShift));
       
  2000 		pt += (lin&KChunkMask)>>KPageShift;
       
  2001 		do
       
  2002 			{
       
  2003 			if(lin<iRomPagedLinearBase)
       
  2004 				*pt++ = Mmu().LinearToPhysical(lin) | KRomPtePermissions;
       
  2005 			else
       
  2006 				*pt++ = KPteNotPresentEntry;
       
  2007 			lin += KPageSize;
       
  2008 			}
       
  2009 		while(pt<ptEnd && lin<=linEnd);
       
  2010 		__DRAIN_WRITE_BUFFER;
       
  2011 
       
  2012 		// Add new Page Table to the Page Directory
       
  2013 		TPde newpde = ptPhys | KShadowPdePerm;
       
  2014 		__KTRACE_OPT2(KPAGING,KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
       
  2015 		TInt irq=NKern::DisableAllInterrupts();
       
  2016 		*ppde = newpde;
       
  2017 		__DRAIN_WRITE_BUFFER;
       
  2018 		FlushTLBs();
       
  2019 		NKern::RestoreInterrupts(irq);
       
  2020 		}
       
  2021 	}
       
  2022 
       
  2023 
       
  2024 void MemModelDemandPaging::InitCodePaging()
       
  2025 	{
       
  2026 	// Initialise code paging info
       
  2027 	iCodeLinearBase = Mmu().iUserCodeBase;
       
  2028 	iCodeSize = Mmu().iMaxUserCodeSize;
       
  2029 	}
       
  2030 
       
  2031 /**
       
  2032 @return ETrue when the unmapped page should be freed, EFalse otherwise
       
  2033 */
       
  2034 TBool MemModelDemandPaging::PageUnmapped(SPageInfo* aPageInfo)
       
  2035 	{
       
  2036 	SPageInfo::TType type = aPageInfo->Type();
       
  2037 
       
  2038 	if(type!=SPageInfo::EPagedCache && type!=SPageInfo::EPagedCode)
       
  2039 		{
       
  2040 		__NK_ASSERT_DEBUG(type!=SPageInfo::EPagedData); // not supported yet
       
  2041 		return ETrue;
       
  2042 		}
       
  2043 
       
  2044 	RemovePage(aPageInfo);
       
  2045 	AddAsFreePage(aPageInfo);
       
  2046 	// Return false to stop DMemModelChunk::DoDecommit from freeing this page
       
  2047 	return EFalse;
       
  2048 	}
       
  2049 
       
  2050 
       
  2051 TLinAddr MemModelDemandPaging::GetLinearAddress(SPageInfo* aPageInfo)
       
  2052 	{
       
  2053 	TInt offset = aPageInfo->Offset()<<KPageShift;
       
  2054 	SPageInfo::TType type = aPageInfo->Type();
       
  2055 	__NK_ASSERT_DEBUG(TUint(offset)<(type==SPageInfo::EPagedROM ? iRomSize : iCodeSize));
       
  2056 	TLinAddr base = type==SPageInfo::EPagedROM ? iRomLinearBase : iCodeLinearBase;
       
  2057 	return base + offset;
       
  2058 	}
       
  2059 
       
  2060 
       
  2061 void MemModelDemandPaging::SetOld(SPageInfo* aPageInfo)
       
  2062 	{
       
  2063 	__NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedOld);
       
  2064 	SPageInfo::TType type = aPageInfo->Type();
       
  2065 
       
  2066 	if(type==SPageInfo::EPagedROM || type==SPageInfo::EPagedCode)
       
  2067 		{
       
  2068 		START_PAGING_BENCHMARK;
       
  2069 		
       
  2070 		// get linear address of page...
       
  2071 		TLinAddr lin = GetLinearAddress(aPageInfo);
       
  2072 
       
  2073 		// make page inaccessible...
       
  2074 		TPte* pt = PtePtrFromLinAddr(lin);
       
  2075 		*pt &= ~KPtePresentMask;
       
  2076 		__DRAIN_WRITE_BUFFER;
       
  2077 		InvalidateTLBForPage(lin);
       
  2078 		Mmu().SyncCodeMappings();
       
  2079 
       
  2080 		if (type==SPageInfo::EPagedCode)
       
  2081 			END_PAGING_BENCHMARK(this, EPagingBmSetCodePageOld);
       
  2082 		}
       
  2083 	else if(type==SPageInfo::EPagedCache)
       
  2084 		{
       
  2085 		// leave page accessible
       
  2086 		}
       
  2087 	else if(type!=SPageInfo::EPagedFree)
       
  2088 		{
       
  2089 		__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetOld() with bad page type = %d",aPageInfo->Type()));
       
  2090 		Panic(EUnexpectedPageType);
       
  2091 		}
       
  2092 	NKern::FlashSystem();
       
  2093 	}
       
  2094 
       
  2095 
       
  2096 void MemModelDemandPaging::SetFree(SPageInfo* aPageInfo)
       
  2097 	{
       
  2098 	__ASSERT_SYSTEM_LOCK;
       
  2099 	__ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
       
  2100 	__NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedDead);
       
  2101 	if(aPageInfo->LockCount())
       
  2102 		Panic(ERamPageLocked);
       
  2103 
       
  2104 	SPageInfo::TType type = aPageInfo->Type();
       
  2105 
       
  2106 	if(type==SPageInfo::EPagedROM || type==SPageInfo::EPagedCode)
       
  2107 		{
       
  2108 		START_PAGING_BENCHMARK;
       
  2109 		
       
  2110 		// get linear address of page...
       
  2111 		TLinAddr lin = GetLinearAddress(aPageInfo);
       
  2112 
       
  2113 		// unmap it...
       
  2114 		TPte* pt = PtePtrFromLinAddr(lin);
       
  2115 		*pt = KPteNotPresentEntry;
       
  2116 		__DRAIN_WRITE_BUFFER;
       
  2117 		InvalidateTLBForPage(lin);
       
  2118 		Mmu().SyncCodeMappings();
       
  2119 
       
  2120 		if (type==SPageInfo::EPagedCode)
       
  2121 			END_PAGING_BENCHMARK(this, EPagingBmSetCodePageFree);
       
  2122 #ifdef BTRACE_PAGING
       
  2123 		TInt subCat = type==SPageInfo::EPagedCode ? BTrace::EPagingPageOutCode : BTrace::EPagingPageOutROM;
       
  2124 		TPhysAddr phys = aPageInfo->PhysAddr();
       
  2125 		BTraceContext8(BTrace::EPaging,subCat,phys,lin); 
       
  2126 #endif
       
  2127 		}
       
  2128 	else if(type==SPageInfo::EPagedCache)
       
  2129 		{
       
  2130 		// get linear address of page...
       
  2131 		TInt offset = aPageInfo->Offset()<<KPageShift;
       
  2132 		DArmPlatChunk* chunk = (DArmPlatChunk*)aPageInfo->Owner();
       
  2133 		__NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize));
       
  2134 		TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
       
  2135 
       
  2136 		// unmap it...
       
  2137 		TPte* pt = PtePtrFromLinAddr(lin);
       
  2138 		*pt = KPteNotPresentEntry;
       
  2139 		__DRAIN_WRITE_BUFFER;
       
  2140 		InvalidateTLBForPage(lin);
       
  2141 		Mmu().SyncCodeMappings();
       
  2142 		NKern::UnlockSystem();
       
  2143 		CacheMaintenance::PageToReuseVirtualCache(lin);
       
  2144 		NKern::LockSystem();
       
  2145 
       
  2146 		// actually decommit it from chunk...
       
  2147 		TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
       
  2148 		SPageTableInfo& ptinfo=((ArmMmu*)iMmu)->iPtInfo[ptid];
       
  2149 		if(!--ptinfo.iCount)
       
  2150 			{
       
  2151 			((ArmMmu*)iMmu)->DoUnassignPageTable(lin);
       
  2152 			chunk->RemovePde(offset);
       
  2153 			NKern::UnlockSystem();
       
  2154 			((ArmMmu*)iMmu)->FreePageTable(ptid);
       
  2155 			NKern::LockSystem();
       
  2156 			}
       
  2157 
       
  2158 #ifdef BTRACE_PAGING
       
  2159 		TPhysAddr phys = aPageInfo->PhysAddr();
       
  2160 		BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCache,phys,lin);
       
  2161 #endif
       
  2162 		}
       
  2163 	else if(type==SPageInfo::EPagedFree)
       
  2164 		{
       
  2165 		// already free...
       
  2166 #ifdef BTRACE_PAGING
       
  2167 		TPhysAddr phys = aPageInfo->PhysAddr();
       
  2168 		BTraceContext4(BTrace::EPaging,BTrace::EPagingPageOutFree,phys);
       
  2169 #endif
       
  2170 		// external cache may not have been cleaned if PageUnmapped called
       
  2171 		CacheMaintenance::PageToReusePhysicalCache(aPageInfo->PhysAddr());
       
  2172 		}
       
  2173 	else
       
  2174 		{
       
  2175 		__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
       
  2176 		Panic(EUnexpectedPageType);
       
  2177 		}
       
  2178 	NKern::FlashSystem();
       
  2179 	}
       
  2180 
       
  2181 
       
  2182 void MemModelDemandPaging::NotifyPageFree(TPhysAddr aPage)
       
  2183 	{
       
  2184 	MM::Panic(MM::EOperationNotImplemented);
       
  2185 	}
       
  2186 
       
  2187 
       
  2188 /**
       
  2189 Return True if exception was caused by a memory write access.
       
  2190 This function can cause a paging exception!
       
  2191 */
       
  2192 static TBool FaultDuringWrite(TArmExcInfo& aExc)
       
  2193 	{
       
  2194 	// We can't decode jazelle instruction to determine if they faulted during a read.
       
  2195 	// Therefore we will treat them as writes (which will panic the thread)...
       
  2196 	if(aExc.iCpsr&(1<<24))
       
  2197 		return ETrue; 
       
  2198 
       
  2199 	if(aExc.iCpsr&(1<<5))
       
  2200 		{
       
  2201 		// thumb
       
  2202 		TUint32 op = *(TUint16*)aExc.iR15;
       
  2203 		switch((op>>13)&7)
       
  2204 			{
       
  2205 		case 2:
       
  2206 			if((op&0xfa00)==0x5000)
       
  2207 				return ETrue;			// STR (2) and STRB (2)
       
  2208 			if((op&0xfe00)==0x5200)
       
  2209 				return ETrue;			// STRH (2)
       
  2210 			return EFalse;
       
  2211 		case 3:
       
  2212 			return !(op&(1<<11));		// STR (1) and STRB (1)
       
  2213 		case 4:
       
  2214 			return !(op&(1<<11));		// STR (3) and STRH (1)
       
  2215 		case 5:
       
  2216 			return (op&0xfe00)==0xb400;	// PUSH
       
  2217 		case 6:
       
  2218 			return (op&0xf800)==0xc000; // STMIA
       
  2219 			}
       
  2220 		}
       
  2221 	else
       
  2222 		{
       
  2223 		// ARM
       
  2224 		TUint32 op = *(TUint32*)aExc.iR15;
       
  2225 		if(op<0xf0000000)
       
  2226 			{
       
  2227 			switch((op>>25)&7)
       
  2228 				{
       
  2229 			case 0:
       
  2230 				if((op&0xf0)==(0xb0))
       
  2231 					return !(op&(1<<20));		// load/store halfword
       
  2232 				else if((op&0x0e1000f0)==(0x000000f0))
       
  2233 					return ETrue;				// store double
       
  2234 				else if((op&0x0fb000f0) == 0x010000f0)
       
  2235 					return ETrue;				// swap instruction
       
  2236 				else if((op&0x0ff000f0) == 0x01800090)
       
  2237 					return ETrue;				// strex
       
  2238 				return EFalse;
       
  2239 			case 2:
       
  2240 				return !(op&(1<<20));			 // load/store immediate
       
  2241 			case 3:
       
  2242 				if(!(op&0x10))
       
  2243 					return !(op&(1<<20));		// load/store register offset
       
  2244 				return EFalse;
       
  2245 			case 4:
       
  2246 				return !(op&(1<<20));			// load/store multiple
       
  2247 			case 6:
       
  2248 				return !(op&(1<<20));			// coproc store 
       
  2249 				}
       
  2250 			}
       
  2251 		else
       
  2252 			{
       
  2253 			switch((op>>25)&7)
       
  2254 				{
       
  2255 			case 4:
       
  2256 				if((op&0xfe5f0f00)==(0xf84d0500))
       
  2257 					return ETrue;				// SRS instructions
       
  2258 				return EFalse;
       
  2259 			case 6:
       
  2260 				return !(op&(1<<20));			// coproc store (STC2)
       
  2261 				}
       
  2262 			}
       
  2263 		}
       
  2264 	return EFalse;
       
  2265 	}
       
  2266 
       
  2267 
       
  2268 TInt MemModelDemandPaging::Fault(TAny* aExceptionInfo)
       
  2269 	{
       
  2270 	TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
       
  2271 
       
  2272 	// Get faulting address
       
  2273 	TLinAddr faultAddress = exc.iFaultAddress;
       
  2274 	if(exc.iExcCode==EArmExceptionDataAbort)
       
  2275 		{
       
  2276 		// Only handle page translation faults
       
  2277 		if((exc.iFaultStatus&0xf)!=0x7)
       
  2278 			return KErrUnknown;
       
  2279 		// Let writes take an exception rather than page in any memory...
       
  2280 		if(FaultDuringWrite(exc))
       
  2281 			return KErrUnknown;
       
  2282 		}
       
  2283 	else if (exc.iExcCode != EArmExceptionPrefetchAbort)
       
  2284 		return KErrUnknown; // Not prefetch or data abort
       
  2285 
       
  2286 	DThread* thread = TheCurrentThread;
       
  2287 
       
  2288 	// check which ragion fault occured in...
       
  2289 	TBool inRom=ETrue;
       
  2290 	if(TUint(faultAddress-iRomPagedLinearBase)<iRomPagedSize)
       
  2291 		{
       
  2292 		// in ROM
       
  2293 		}
       
  2294 	else if(TUint(faultAddress-iCodeLinearBase)<iCodeSize)
       
  2295 		{
       
  2296 		// in code
       
  2297 		inRom=EFalse;
       
  2298 		}
       
  2299 	else
       
  2300 		return KErrUnknown; // Not in pageable region
       
  2301 
       
  2302 	// Check if thread holds fast mutex and claim system lock
       
  2303 	NFastMutex* fm = NKern::HeldFastMutex();
       
  2304 	TPagingExcTrap* trap = thread->iPagingExcTrap;
       
  2305 	if(!fm)
       
  2306 		NKern::LockSystem();
       
  2307 	else
       
  2308 		{
       
  2309 		if(!trap || fm!=&TheScheduler.iLock)
       
  2310 			{
       
  2311 			__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15));
       
  2312 			Panic(EPageFaultWhilstFMHeld); // Not allowed to hold mutexes
       
  2313 			}
       
  2314 
       
  2315 		// Current thread already has the system lock...
       
  2316 		NKern::FlashSystem(); // Let someone else have a go with the system lock.
       
  2317 		}
       
  2318 
       
  2319 	// System locked here
       
  2320 
       
  2321 	TInt r = KErrNone;	
       
  2322 	if(thread->IsRealtime())
       
  2323 		r = CheckRealtimeThreadFault(thread, aExceptionInfo);
       
  2324 	if (r == KErrNone)
       
  2325 		r = HandleFault(exc, faultAddress, inRom);
       
  2326 	
       
  2327 	// Restore system lock state
       
  2328 	if (fm != NKern::HeldFastMutex())
       
  2329 		{
       
  2330 		if (fm)
       
  2331 			NKern::LockSystem();
       
  2332 		else
       
  2333 			NKern::UnlockSystem();
       
  2334 		}
       
  2335 	
       
  2336 	// Deal with XTRAP_PAGING
       
  2337 	if(r == KErrNone && trap)
       
  2338 		{
       
  2339 		trap->Exception(1); // Return from exception trap with result '1' (value>0)
       
  2340 		// code doesn't continue beyond this point.
       
  2341 		}
       
  2342 
       
  2343 	return r;
       
  2344 	}
       
  2345 
       
  2346 
       
  2347 TInt MemModelDemandPaging::HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TBool aInRom)
       
  2348 	{
       
  2349 	++iEventInfo.iPageFaultCount;
       
  2350 
       
  2351 	// get page table entry...
       
  2352 	TPte* pt = SafePtePtrFromLinAddr(aFaultAddress);
       
  2353 	if(!pt)
       
  2354 		return KErrNotFound;
       
  2355 	TPte pte = *pt;
       
  2356 
       
  2357 	// Do what is required to make page accessible...
       
  2358 
       
  2359 	if(pte&KPtePresentMask)
       
  2360 		{
       
  2361 		// PTE is present, so assume it has already been dealt with
       
  2362 #ifdef BTRACE_PAGING
       
  2363 		BTraceContext12(BTrace::EPaging,BTrace::EPagingPageNop,pte&~KPageMask,aFaultAddress,aExc.iR15);
       
  2364 #endif
       
  2365 		return KErrNone;
       
  2366 		}
       
  2367 
       
  2368 	if(pte!=KPteNotPresentEntry)
       
  2369 		{
       
  2370 		// PTE alread has a page
       
  2371 		SPageInfo* pageInfo = SPageInfo::FromPhysAddr(pte);
       
  2372 		if(pageInfo->State()==SPageInfo::EStatePagedDead)
       
  2373 			{
       
  2374 			// page currently being unmapped, so do that here...
       
  2375 			*pt = KPteNotPresentEntry; // Update page table
       
  2376 			__DRAIN_WRITE_BUFFER;
       
  2377 			}
       
  2378 		else
       
  2379 			{
       
  2380 			// page just needs making young again...
       
  2381 			*pt = TPte(pte|KArmPteSmallPage); // Update page table
       
  2382 			__DRAIN_WRITE_BUFFER;
       
  2383 			Rejuvenate(pageInfo);
       
  2384 #ifdef BTRACE_PAGING
       
  2385 			BTraceContext12(BTrace::EPaging,BTrace::EPagingRejuvenate,pte&~KPageMask,aFaultAddress,aExc.iR15);
       
  2386 #endif
       
  2387 			return KErrNone;
       
  2388 			}
       
  2389 		}
       
  2390 
       
  2391 	// PTE not present, so page it in...
       
  2392 	// check if fault in a CodeSeg...
       
  2393 	DMemModelCodeSegMemory* codeSegMemory = NULL;
       
  2394 	if (aInRom)
       
  2395 		NKern::ThreadEnterCS();
       
  2396 	else
       
  2397 		{
       
  2398 		// find CodeSeg...
       
  2399 		DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aFaultAddress);
       
  2400 		if (!codeSeg)
       
  2401 			return KErrNotFound;
       
  2402 		codeSegMemory = codeSeg->Memory();
       
  2403 		if (codeSegMemory==0 || !codeSegMemory->iIsDemandPaged)
       
  2404 			return KErrNotFound;
       
  2405 		// open reference on CodeSegMemory
       
  2406 		NKern::ThreadEnterCS();
       
  2407 #ifdef _DEBUG
       
  2408 		TInt r = 
       
  2409 #endif
       
  2410 				 codeSegMemory->Open();
       
  2411 		__NK_ASSERT_DEBUG(r==KErrNone);
       
  2412 		NKern::FlashSystem();
       
  2413 		}		
       
  2414 
       
  2415 #ifdef BTRACE_PAGING
       
  2416 	BTraceContext8(BTrace::EPaging,BTrace::EPagingPageInBegin,aFaultAddress,aExc.iR15);
       
  2417 #endif
       
  2418 	
       
  2419 	TInt r = PageIn(aFaultAddress,codeSegMemory);
       
  2420 
       
  2421 	NKern::UnlockSystem();
       
  2422 
       
  2423 	if(codeSegMemory)
       
  2424 		codeSegMemory->Close();
       
  2425 
       
  2426 	NKern::ThreadLeaveCS();
       
  2427 	
       
  2428 	return r;
       
  2429 	}
       
  2430 
       
  2431 
       
  2432 TInt MemModelDemandPaging::PageIn(TLinAddr aAddress, DMemModelCodeSegMemory* aCodeSegMemory)
       
  2433 	{
       
  2434 	// Get a request object - this may block until one is available
       
  2435 	DPagingRequest* req = AcquireRequestObject();
       
  2436 	
       
  2437 	// Get page table entry
       
  2438 	TPte* pt = SafePtePtrFromLinAddr(aAddress);
       
  2439 
       
  2440 	// Check page is still required...
       
  2441 	if(!pt || *pt!=KPteNotPresentEntry)
       
  2442 		{
       
  2443 #ifdef BTRACE_PAGING
       
  2444 		BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
       
  2445 #endif
       
  2446 		ReleaseRequestObject(req);
       
  2447 		return pt ? KErrNone : KErrNotFound;
       
  2448 		}
       
  2449 
       
  2450 	++iEventInfo.iPageInReadCount;
       
  2451 
       
  2452 	// Get a free page
       
  2453 	SPageInfo* pageInfo = AllocateNewPage();
       
  2454 	__NK_ASSERT_DEBUG(pageInfo);
       
  2455 
       
  2456 	// Get physical address of free page
       
  2457 	TPhysAddr phys = pageInfo->PhysAddr();
       
  2458 	__NK_ASSERT_DEBUG(phys!=KPhysAddrInvalid);
       
  2459 
       
  2460 	// Temporarily map free page
       
  2461 	TLinAddr loadAddr = req->iLoadAddr;
       
  2462 	pt = req->iLoadPte;
       
  2463 	*pt = phys | SP_PTE(KArmV45PermRWNO, KMemAttTempDemandPaging);
       
  2464 	__DRAIN_WRITE_BUFFER;
       
  2465 
       
  2466 	// Read page from backing store
       
  2467 	aAddress &= ~KPageMask;	
       
  2468 	NKern::UnlockSystem();
       
  2469 
       
  2470 	TInt r;
       
  2471 	if (!aCodeSegMemory)
       
  2472 		r = ReadRomPage(req, aAddress);
       
  2473 	else
       
  2474 		{
       
  2475 		r = ReadCodePage(req, aCodeSegMemory, aAddress);
       
  2476 		if (r == KErrNone)
       
  2477 			aCodeSegMemory->ApplyCodeFixups((TUint32*)loadAddr, aAddress);
       
  2478 		}
       
  2479 	if(r!=KErrNone)
       
  2480 		Panic(EPageInFailed);
       
  2481 
       
  2482 	// make caches consistant (uncached memory is used for page loading)
       
  2483 	__DRAIN_WRITE_BUFFER;
       
  2484 	NKern::LockSystem();
       
  2485 
       
  2486 	// Invalidate temporary mapping
       
  2487 	*pt = KPteNotPresentEntry;
       
  2488 	__DRAIN_WRITE_BUFFER;
       
  2489 	InvalidateTLBForPage(loadAddr);
       
  2490 
       
  2491 	ReleaseRequestObject(req);
       
  2492 	
       
  2493 	// Get page table entry
       
  2494 	pt = SafePtePtrFromLinAddr(aAddress);
       
  2495 
       
  2496 	// Check page still needs updating
       
  2497 	TBool notNeeded = pt==0 || *pt!=KPteNotPresentEntry;
       
  2498 	if(notNeeded)
       
  2499 		{
       
  2500 		// We don't need the new page after all, so put it on the active list as a free page
       
  2501 		__KTRACE_OPT(KPAGING,Kern::Printf("DP: PageIn (New page not used)"));
       
  2502 #ifdef BTRACE_PAGING
       
  2503 		BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
       
  2504 #endif
       
  2505 		AddAsFreePage(pageInfo);
       
  2506 		return pt ? KErrNone : KErrNotFound;
       
  2507 		}
       
  2508 
       
  2509 	// Update page info
       
  2510 	if (!aCodeSegMemory)
       
  2511 		pageInfo->SetPagedROM((aAddress-iRomLinearBase)>>KPageShift);
       
  2512 	else
       
  2513 		pageInfo->SetPagedCode(aCodeSegMemory,(aAddress-Mmu().iUserCodeBase)>>KPageShift);
       
  2514 
       
  2515 	// Map page into final location
       
  2516 	*pt = phys | (aCodeSegMemory ? KUserCodeLoadPte : KRomPtePermissions);
       
  2517 	__DRAIN_WRITE_BUFFER;
       
  2518 #ifdef BTRACE_PAGING
       
  2519 	TInt subCat = aCodeSegMemory ? BTrace::EPagingPageInCode : BTrace::EPagingPageInROM;
       
  2520 	BTraceContext8(BTrace::EPaging,subCat,phys,aAddress);
       
  2521 #endif
       
  2522 
       
  2523 	AddAsYoungest(pageInfo);
       
  2524 	BalanceAges();
       
  2525 
       
  2526 	return KErrNone;
       
  2527 	}
       
  2528 
       
  2529 
       
  2530 inline TUint8 ReadByte(TLinAddr aAddress)
       
  2531 	{ return *(volatile TUint8*)aAddress; }
       
  2532 
       
  2533 
       
  2534 TInt MemModelDemandPaging::EnsurePagePresent(TLinAddr aPage, DProcess* aProcess)
       
  2535 	{
       
  2536 	XTRAPD(exc,XT_DEFAULT,XTRAP_PAGING_RETRY(CHECK_PAGING_SAFE; ReadByte(aPage);));
       
  2537 	return exc;
       
  2538 	}
       
  2539 
       
  2540 
       
  2541 TPhysAddr MemModelDemandPaging::LinearToPhysical(TLinAddr aPage, DProcess* aProcess)
       
  2542 	{
       
  2543 	return Mmu().LinearToPhysical(aPage);
       
  2544 	}
       
  2545 
       
  2546 
       
  2547 TInt MemModelDemandPaging::PageState(TLinAddr aAddr)
       
  2548 	{
       
  2549 	TPte* ptePtr = 0;
       
  2550 	TPte pte = 0;
       
  2551 	TInt r = 0;
       
  2552 	SPageInfo* pageInfo = NULL;
       
  2553 
       
  2554 	NKern::LockSystem();
       
  2555 
       
  2556 	DMemModelCodeSegMemory* codeSegMemory = 0;
       
  2557 	if(TUint(aAddr-iRomPagedLinearBase)<iRomPagedSize)
       
  2558 		r |= EPageStateInRom;
       
  2559 	else if (TUint(aAddr-iCodeLinearBase)<iCodeSize)
       
  2560 		{
       
  2561 		DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aAddr);
       
  2562 		if(codeSeg)
       
  2563 			codeSegMemory = codeSeg->Memory();
       
  2564 		if (codeSegMemory)
       
  2565 			{
       
  2566 			r |= EPageStateInRamCode;
       
  2567 			if (codeSegMemory->iIsDemandPaged)
       
  2568 				r |= EPageStatePaged;
       
  2569 			}
       
  2570 		}
       
  2571 
       
  2572 	ptePtr = SafePtePtrFromLinAddr(aAddr);
       
  2573 	if (!ptePtr)
       
  2574 		goto done;
       
  2575 	r |= EPageStatePageTablePresent;
       
  2576 	pte = *ptePtr;
       
  2577 	if (pte == KPteNotPresentEntry)
       
  2578 		goto done;		
       
  2579 	r |= EPageStatePtePresent;
       
  2580 	if (pte & KPtePresentMask)
       
  2581 		r |= EPageStatePteValid;
       
  2582 	
       
  2583 	pageInfo = SPageInfo::FromPhysAddr(pte);
       
  2584 	r |= pageInfo->Type();
       
  2585 	r |= pageInfo->State()<<8;
       
  2586 
       
  2587 done:
       
  2588 	NKern::UnlockSystem();
       
  2589 	return r;
       
  2590 	}
       
  2591 
       
  2592 
       
  2593 TBool MemModelDemandPaging::NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength)
       
  2594 	{
       
  2595 	// Don't check mutex order for reads from unpaged rom, kernel data area and kernel stack chunk
       
  2596 	TLinAddr endAddr = aStartAddr + aLength;
       
  2597 	TLinAddr stackBase = (TLinAddr)MM::SvStackChunk->Base();
       
  2598 	TLinAddr stackEnd = stackBase + MM::SvStackChunk->iMaxSize;
       
  2599 	TLinAddr unpagedRomEnd = iRomPagedLinearBase ? iRomPagedLinearBase : iRomLinearBase + iRomSize;
       
  2600 	TBool rangeInUnpagedRom = aStartAddr >= iRomLinearBase && endAddr <= unpagedRomEnd;
       
  2601 	TBool rangeInKernelData = aStartAddr >= KKernelDataBase && endAddr <= KKernelDataEnd;
       
  2602 	TBool rangeInKernelStack = aStartAddr >= stackBase && endAddr <= stackEnd;
       
  2603 	return !rangeInUnpagedRom && !rangeInKernelData && !rangeInKernelStack;
       
  2604 	}
       
  2605 
       
  2606 
       
  2607 EXPORT_C TBool DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize)
       
  2608 	{
       
  2609 	MemModelDemandPaging* pager = (MemModelDemandPaging*)iThePager;
       
  2610 	if(pager)
       
  2611 		{
       
  2612 		ArmMmu& m = pager->Mmu();
       
  2613 		TLinAddr end = aStart+aSize;
       
  2614 		
       
  2615 		if ((aStart < TUint(pager->iRomPagedLinearBase+pager->iRomPagedSize) && end > pager->iRomPagedLinearBase) ||
       
  2616 			(aStart < TUint(m.iUserCodeBase + m.iMaxUserCodeSize) && end > m.iUserCodeBase))
       
  2617 			return pager->ReserveLock(aThread,aStart,aSize,*this);
       
  2618 		}
       
  2619 		
       
  2620 	return EFalse;
       
  2621 	}
       
  2622 
       
  2623 void ArmMmu::DisablePageModification(DMemModelChunk* aChunk, TInt aOffset)
       
  2624 //
       
  2625 // Mark the page at aOffset in aChunk inaccessible to prevent it being
       
  2626 // modified while defrag is in progress. Save the required information
       
  2627 // to allow the fault handler to deal with this.
       
  2628 // Flush the cache for the page so that it can be aliased elsewhere for
       
  2629 // copying.
       
  2630 // Call this with the system unlocked.
       
  2631 //
       
  2632 	{
       
  2633 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DisablePageModification() offset=%08x", aOffset));
       
  2634 
       
  2635 	// Acquire the system lock here for atomic access to aChunk->iBase as moving 
       
  2636 	// between the home and run addresses (a reschedule) may update aChunk->iBase.
       
  2637 	NKern::LockSystem();
       
  2638 
       
  2639 	iDisabledAddr = (TLinAddr)(aChunk->iBase) + aOffset;
       
  2640 	TInt ptid=GetPageTableId(iDisabledAddr);
       
  2641 	if(ptid<0)
       
  2642 		Panic(EDefragDisablePageFailed);	
       
  2643 
       
  2644 	TPte* pPte = PageTable(ptid) + ((aOffset&KChunkMask)>>KPageShift);
       
  2645 	TPte pte = *pPte;
       
  2646 	if ((pte & KPteTypeMask) != KArmPteSmallPage)
       
  2647 		Panic(EDefragDisablePageFailed);
       
  2648 
       
  2649 	iDisabledPte = pPte;
       
  2650 	iDisabledOldVal = pte;
       
  2651 
       
  2652 	*pPte = 0;
       
  2653 	__DRAIN_WRITE_BUFFER;
       
  2654 	InvalidateTLBForPage(iDisabledAddr);
       
  2655 	NKern::UnlockSystem();
       
  2656 
       
  2657 	CacheMaintenance::PageToPreserveAndReuseVirtualCache(iDisabledAddr);
       
  2658 	__DRAIN_WRITE_BUFFER;
       
  2659 	}
       
  2660 
       
  2661 TBool FaultStatusFromLinAddr(TLinAddr aAddr, TBool aKernel, TUint32& aFaultStatus)
       
  2662 	// Walk the page tables looking for the given linear address. If access
       
  2663 	// would've caused a fault, return ETrue and fill in aFaultStatus with a
       
  2664 	// FSR value. Otherwise, return EFalse. Assumes it was a read.
       
  2665 	{
       
  2666 	TPde pde = PageDirectory[aAddr>>KChunkShift];
       
  2667 	TPde pdetype = pde & KPdeTypeMask;
       
  2668 	if (pdetype == 0)
       
  2669 		{
       
  2670 		// section translation fault
       
  2671 		aFaultStatus = 0x5;
       
  2672 		return ETrue;
       
  2673 		}
       
  2674 
       
  2675 	TPte pte=0;
       
  2676 	TInt domain = (pde >> 5) & 0xf;
       
  2677 	TUint32 dacr = Arm::Dacr();
       
  2678 	TInt domaccess = (dacr >> (domain<<1)) & 0x3;
       
  2679 	TInt ispage = (pdetype == KArmV45PdeSection) ? 0 : 0x2;
       
  2680 
       
  2681 	if (ispage)
       
  2682 		{
       
  2683 		pte = *PtePtrFromLinAddr(aAddr);
       
  2684 		if ((pte & KPteTypeMask) == 0)
       
  2685 			{
       
  2686 			// page translation fault
       
  2687 			aFaultStatus = 0x7;
       
  2688 			return ETrue;
       
  2689 			}
       
  2690 		}
       
  2691 
       
  2692 	if (domaccess == 0x3)
       
  2693 		{
       
  2694 		// manager access
       
  2695 		return EFalse;
       
  2696 		}
       
  2697 	if (domaccess == 0)
       
  2698 		{
       
  2699 		// domain fault
       
  2700 		aFaultStatus = 0x9 | ispage;
       
  2701 		return ETrue;
       
  2702 		}
       
  2703 
       
  2704 	TInt perms;
       
  2705 	if (ispage)
       
  2706 		perms = (pte >> 4) & 0x3;
       
  2707 	else
       
  2708 		perms = (pde >> 10) & 0x3;
       
  2709 	
       
  2710 	if (aKernel || perms != 0x1)
       
  2711 		return EFalse;
       
  2712 
       
  2713 	// permission fault
       
  2714 	aFaultStatus = 0xd | ispage;
       
  2715 	return ETrue;
       
  2716 	}
       
  2717 
       
  2718 TInt ArmMmu::RamDefragFault(TAny* aExceptionInfo)
       
  2719 	{
       
  2720 	TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
       
  2721 
       
  2722 	// Get faulting address
       
  2723 	TLinAddr faultAddress;
       
  2724 	TBool prefetch=EFalse;
       
  2725 	if(exc.iExcCode==EArmExceptionDataAbort)
       
  2726 		{
       
  2727 		// Only handle page translation faults
       
  2728 		if((exc.iFaultStatus & 0xf) != 0x7)
       
  2729 			return KErrUnknown;
       
  2730 		faultAddress = exc.iFaultAddress;
       
  2731 		}
       
  2732 	else if(exc.iExcCode==EArmExceptionPrefetchAbort)
       
  2733 		{
       
  2734 		prefetch = ETrue;
       
  2735 		faultAddress = exc.iR15;
       
  2736 		}
       
  2737 	else
       
  2738 		return KErrUnknown; // Not data/prefetch abort
       
  2739 
       
  2740 	TBool kernelmode = exc.iCpsr&EMaskMode != EUserMode;
       
  2741 
       
  2742 	// Take system lock if not already held
       
  2743 	NFastMutex* fm = NKern::HeldFastMutex();
       
  2744 	if(!fm)
       
  2745 		NKern::LockSystem();
       
  2746 	else if(fm!=&TheScheduler.iLock)
       
  2747 		{
       
  2748 		__KTRACE_OPT2(KMMU,KPANIC,Kern::Printf("Defrag: Fault with FM Held! %x (%O pc=%x)",faultAddress,TheCurrentThread,exc.iR15));
       
  2749 		Panic(EDefragFaultWhilstFMHeld); // Not allowed to hold mutexes
       
  2750 		}
       
  2751 
       
  2752 	TInt r = KErrUnknown;
       
  2753 
       
  2754 	// check if the mapping of the page has already been restored and retry if so
       
  2755 	if (prefetch)
       
  2756 		{
       
  2757 		TUint32 fsr;
       
  2758 		if (!FaultStatusFromLinAddr(faultAddress, kernelmode, fsr))
       
  2759 			{
       
  2760 			r = KErrNone;
       
  2761 			goto leave;
       
  2762 			}
       
  2763 		}
       
  2764 	else
       
  2765 		{
       
  2766 		TPte* pt = SafePtePtrFromLinAddr(faultAddress);
       
  2767 		if(!pt)
       
  2768 			{
       
  2769 			r = KErrNotFound;
       
  2770 			goto leave;
       
  2771 			}
       
  2772 		if ((*pt & 0x3) != 0)
       
  2773 			{
       
  2774 			r = KErrNone;
       
  2775 			goto leave;
       
  2776 			}
       
  2777 		}
       
  2778 
       
  2779 	// check if the fault occurred in the page we are moving
       
  2780 	if (iDisabledPte && TUint(faultAddress - iDisabledAddr) < TUint(KPageSize))
       
  2781 		{
       
  2782 		// restore access to the page
       
  2783 		*iDisabledPte = iDisabledOldVal;
       
  2784 		__DRAIN_WRITE_BUFFER;
       
  2785 		InvalidateTLBForPage(iDisabledAddr);
       
  2786 		iDisabledAddr = 0;
       
  2787 		iDisabledPte = NULL;
       
  2788 		iDisabledOldVal = 0;
       
  2789 		r = KErrNone;
       
  2790 		}
       
  2791 
       
  2792 leave:
       
  2793 	// Restore system lock state
       
  2794 	if (!fm)
       
  2795 		NKern::UnlockSystem();
       
  2796 	
       
  2797 	return r;
       
  2798 	}