kernel/eka/memmodel/epoc/flexible/mmu/x86/xmmu.cpp
changeset 0 a41df078684a
child 36 538db54a451d
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 //
       
    15 
       
    16 #include <x86_mem.h>
       
    17 #include "cache_maintenance.inl"
       
    18 #include "execs.h"
       
    19 #include "mm.h"
       
    20 #include "mmu.h"
       
    21 #include "mpager.h"
       
    22 #include "mpdalloc.h"
       
    23 
       
    24 
       
    25 TPte PteGlobal;	// =0x100 on processors which support global pages, 0 on processors which don't
       
    26 
       
    27 #if defined(KMMU)
       
    28 extern "C" void __DebugMsgFlushTLB()
       
    29 	{
       
    30 	__KTRACE_OPT(KMMU,Kern::Printf("FlushTLB"));
       
    31 	}
       
    32 
       
    33 extern "C" void __DebugMsgLocalFlushTLB()
       
    34 	{
       
    35 	__KTRACE_OPT(KMMU,Kern::Printf("FlushTLB"));
       
    36 	}
       
    37 
       
    38 extern "C" void __DebugMsgINVLPG(int a)
       
    39 	{
       
    40 	__KTRACE_OPT(KMMU,Kern::Printf("INVLPG(%08x)",a));
       
    41 	}
       
    42 #endif
       
    43 
       
    44 
       
    45 
       
    46 extern void DoLocalInvalidateTLB();
       
    47 
       
    48 
       
    49 #ifndef __SMP__
       
    50 
       
    51 
       
    52 FORCE_INLINE void LocalInvalidateTLB()
       
    53 	{
       
    54 	DoLocalInvalidateTLB();
       
    55 	}
       
    56 
       
    57 
       
    58 #else // __SMP__
       
    59 
       
    60 
       
    61 const TInt KMaxPages = 1;
       
    62 
       
    63 class TTLBIPI : public TGenericIPI
       
    64 	{
       
    65 public:
       
    66 	TTLBIPI();
       
    67 	static void InvalidateForPagesIsr(TGenericIPI*);
       
    68 	static void LocalInvalidateIsr(TGenericIPI*);
       
    69 	static void InvalidateIsr(TGenericIPI*);
       
    70 	static void WaitAndInvalidateIsr(TGenericIPI*);
       
    71 	void AddAddress(TLinAddr aAddr);
       
    72 	void InvalidateList();
       
    73 public:
       
    74 	volatile TInt	iFlag;
       
    75 	TInt			iCount;
       
    76 	TLinAddr		iAddr[KMaxPages];
       
    77 	};
       
    78 
       
    79 TTLBIPI::TTLBIPI()
       
    80 	:	iFlag(0), iCount(0)
       
    81 	{
       
    82 	}
       
    83 
       
    84 void TTLBIPI::LocalInvalidateIsr(TGenericIPI*)
       
    85 	{
       
    86 	TRACE2(("TLBLocInv"));
       
    87 	DoLocalInvalidateTLB();
       
    88 	}
       
    89 
       
    90 void TTLBIPI::InvalidateIsr(TGenericIPI*)
       
    91 	{
       
    92 	TRACE2(("TLBInv"));
       
    93 	DoInvalidateTLB();
       
    94 	}
       
    95 
       
    96 void TTLBIPI::WaitAndInvalidateIsr(TGenericIPI* aTLBIPI)
       
    97 	{
       
    98 	TRACE2(("TLBWtInv"));
       
    99 	TTLBIPI& a = *(TTLBIPI*)aTLBIPI;
       
   100 	while (!a.iFlag)
       
   101 		{}
       
   102 	if (a.iCount == 1)
       
   103 		DoInvalidateTLBForPage(a.iAddr[0]);
       
   104 	else
       
   105 		DoInvalidateTLB();
       
   106 	}
       
   107 
       
   108 void TTLBIPI::InvalidateForPagesIsr(TGenericIPI* aTLBIPI)
       
   109 	{
       
   110 	TTLBIPI& a = *(TTLBIPI*)aTLBIPI;
       
   111 	TInt i;
       
   112 	for (i=0; i<a.iCount; ++i)
       
   113 		{
       
   114 		TRACE2(("TLBInv %08x", a.iAddr[i]));
       
   115 		DoInvalidateTLBForPage(a.iAddr[i]);
       
   116 		}
       
   117 	}
       
   118 
       
   119 void TTLBIPI::AddAddress(TLinAddr aAddr)
       
   120 	{
       
   121 	iAddr[iCount] = aAddr;
       
   122 	if (++iCount == KMaxPages)
       
   123 		InvalidateList();
       
   124 	}
       
   125 
       
   126 void TTLBIPI::InvalidateList()
       
   127 	{
       
   128 	NKern::Lock();
       
   129 	InvalidateForPagesIsr(this);
       
   130 	QueueAllOther(&InvalidateForPagesIsr);
       
   131 	NKern::Unlock();
       
   132 	WaitCompletion();
       
   133 	iCount = 0;
       
   134 	}
       
   135 
       
   136 void LocalInvalidateTLB()
       
   137 	{
       
   138 	TTLBIPI ipi;
       
   139 	NKern::Lock();
       
   140 	DoLocalInvalidateTLB();
       
   141 	ipi.QueueAllOther(&TTLBIPI::LocalInvalidateIsr);
       
   142 	NKern::Unlock();
       
   143 	ipi.WaitCompletion();
       
   144 	}
       
   145 
       
   146 void InvalidateTLB()
       
   147 	{
       
   148 	TTLBIPI ipi;
       
   149 	NKern::Lock();
       
   150 	DoInvalidateTLB();
       
   151 	ipi.QueueAllOther(&TTLBIPI::InvalidateIsr);
       
   152 	NKern::Unlock();
       
   153 	ipi.WaitCompletion();
       
   154 	}
       
   155 
       
   156 void InvalidateTLBForPage(TLinAddr aAddr)
       
   157 	{
       
   158 	TTLBIPI ipi;
       
   159 	ipi.AddAddress(aAddr);
       
   160 	ipi.InvalidateList();
       
   161 	}
       
   162 
       
   163 
       
   164 #endif // __SMP__
       
   165 
       
   166 
       
   167 void InvalidateTLBForAsid(TUint aAsid)
       
   168 	{
       
   169 	if(aAsid==KKernelOsAsid)
       
   170 		InvalidateTLB();
       
   171 	else
       
   172 		LocalInvalidateTLB();
       
   173 	}
       
   174 
       
   175 
       
   176 void SinglePdeUpdated(TPde* aPde)
       
   177 	{
       
   178 	CacheMaintenance::SinglePdeUpdated((TLinAddr)aPde);
       
   179 	PageDirectories.GlobalPdeChanged(aPde);
       
   180 	}
       
   181 
       
   182 
       
   183 //
       
   184 // Functions for class Mmu
       
   185 //
       
   186 
       
   187 TPhysAddr Mmu::PtePhysAddr(TPte aPte, TUint /*aPteIndex*/)
       
   188 	{
       
   189 	if(aPte&KPdePtePresent)
       
   190 		return aPte & KPdePtePhysAddrMask;
       
   191 	return KPhysAddrInvalid;
       
   192 	}
       
   193 
       
   194 
       
   195 TPte* Mmu::PageTableFromPde(TPde aPde)
       
   196 	{
       
   197 	if((aPde&(KPdeLargePage|KPdePtePresent)) == KPdePtePresent)
       
   198 		{
       
   199 		SPageInfo* pi = SPageInfo::FromPhysAddr(aPde);
       
   200 		TInt id = (pi->Index()<<KPtClusterShift) | ((aPde>>KPageTableShift)&KPtClusterMask);
       
   201 		return (TPte*)(KPageTableBase+(id<<KPageTableShift));
       
   202 		}
       
   203 	return 0;
       
   204 	}
       
   205 
       
   206 
       
   207 TPte* Mmu::SafePageTableFromPde(TPde aPde)
       
   208 	{
       
   209 	if((aPde&(KPdeLargePage|KPdePtePresent)) == KPdePtePresent)
       
   210 		{
       
   211 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde&~KPageMask);
       
   212 		if(pi)
       
   213 			{
       
   214 			TInt id = (pi->Index()<<KPtClusterShift) | ((aPde>>KPageTableShift)&KPtClusterMask);
       
   215 			return (TPte*)(KPageTableBase+(id<<KPageTableShift));
       
   216 			}
       
   217 		}
       
   218 	return 0;
       
   219 	}
       
   220 
       
   221 
       
   222 /**
       
   223 Return the base phsical address of the section table referenced by the given
       
   224 Page Directory Entry (PDE) \a aPde. If the PDE doesn't refer to a
       
   225 section then KPhysAddrInvalid is returned.
       
   226 
       
   227 @pre #MmuLock held.
       
   228 */
       
   229 TPhysAddr Mmu::SectionBaseFromPde(TPde aPde)
       
   230 	{
       
   231 	if(PdeMapsSection(aPde))
       
   232 	   return aPde&KPdeLargePagePhysAddrMask;
       
   233 	return KPhysAddrInvalid;
       
   234 	}
       
   235 
       
   236 
       
   237 TPte* Mmu::PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid)
       
   238 	{
       
   239 	TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
       
   240 	SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
       
   241 	TPte* pt = (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(pde&(KPageMask&~KPageTableMask)));
       
   242 	pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
       
   243 	return pt;
       
   244 	}
       
   245 
       
   246 
       
   247 TPte* Mmu::SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid)
       
   248 	{
       
   249 	TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
       
   250 	TPte* pt = SafePageTableFromPde(pde);
       
   251 	if(pt)
       
   252 		pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
       
   253 	return pt;
       
   254 	}
       
   255 
       
   256 
       
   257 TPhysAddr Mmu::PageTablePhysAddr(TPte* aPt)
       
   258 	{
       
   259 	__NK_ASSERT_DEBUG(MmuLock::IsHeld() || PageTablesLockIsHeld());
       
   260 
       
   261 	TInt pdeIndex = ((TLinAddr)aPt)>>KChunkShift;
       
   262 	TPde pde = PageDirectory(KKernelOsAsid)[pdeIndex];
       
   263 	__NK_ASSERT_DEBUG((pde&(KPdePtePresent|KPdeLargePage))==KPdePtePresent);
       
   264 
       
   265 	SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
       
   266 	TPte* pPte = (TPte*)(KPageTableBase+(pi->Index(true)<<KPageShift));
       
   267 	TPte pte = pPte[(((TLinAddr)aPt)&KChunkMask)>>KPageShift];
       
   268 	__NK_ASSERT_DEBUG(pte & KPdePtePresent);
       
   269 
       
   270 	return pte&KPdePtePhysAddrMask;
       
   271 	}
       
   272 
       
   273 
       
   274 TPhysAddr Mmu::UncheckedLinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
       
   275 	{
       
   276 	TRACE2(("Mmu::UncheckedLinearToPhysical(%08x,%d)",aLinAddr,aOsAsid));
       
   277 	TInt pdeIndex = aLinAddr>>KChunkShift;
       
   278 	TPde pde = PageDirectory(aOsAsid)[pdeIndex];
       
   279 	TPhysAddr pa=KPhysAddrInvalid;
       
   280 	if (pde & KPdePtePresent)
       
   281 		{
       
   282 		if(pde&KPdeLargePage)
       
   283 			{
       
   284 			pa=(pde&KPdeLargePagePhysAddrMask)+(aLinAddr&~KPdeLargePagePhysAddrMask);
       
   285 			__KTRACE_OPT(KMMU,Kern::Printf("Mapped with large table - returning %08x",pa));
       
   286 			}
       
   287 		else
       
   288 			{
       
   289 			SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
       
   290 			TInt id = (pi->Index(true)<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
       
   291 			TPte* pPte = (TPte*)(KPageTableBase+(id<<KPageTableShift));
       
   292 			TPte pte = pPte[(aLinAddr&KChunkMask)>>KPageShift];
       
   293 			if (pte & KPdePtePresent)
       
   294 				{
       
   295 				pa=(pte&KPdePtePhysAddrMask)+(aLinAddr&KPageMask);
       
   296 				__KTRACE_OPT(KMMU,Kern::Printf("Mapped with page table - returning %08x",pa));
       
   297 				}
       
   298 			}
       
   299 		}
       
   300 	return pa;
       
   301 	}
       
   302 
       
   303 
       
   304 void Mmu::Init1()
       
   305 	{
       
   306 	TRACEB(("Mmu::Init1"));
       
   307 
       
   308 	TUint pge = TheSuperPage().iCpuId & EX86Feat_PGE;
       
   309 	PteGlobal = pge ? KPdePteGlobal : 0;
       
   310 	X86_UseGlobalPTEs = pge!=0;
       
   311 
       
   312 #ifdef __SMP__
       
   313 	ApTrampolinePage = KApTrampolinePageLin;
       
   314 
       
   315 	TInt i;
       
   316 	for (i=0; i<KMaxCpus; ++i)
       
   317 		{
       
   318 		TSubScheduler& ss = TheSubSchedulers[i];
       
   319 		TLinAddr a = KIPCAlias + (i<<KChunkShift);
       
   320 		ss.i_AliasLinAddr = (TAny*)a;
       
   321 		ss.i_AliasPdePtr = (TAny*)(KPageDirectoryBase + (a>>KChunkShift)*sizeof(TPde));
       
   322 		}
       
   323 #endif
       
   324 
       
   325 	Init1Common();
       
   326 	}
       
   327 
       
   328 void Mmu::Init2()
       
   329 	{
       
   330 	TRACEB(("Mmu::Init2"));
       
   331 
       
   332 	Init2Common();
       
   333 	}
       
   334 
       
   335 void Mmu::Init2Final()
       
   336 	{
       
   337 	TRACEB(("Mmu::Init2Final"));
       
   338 
       
   339 	Init2FinalCommon();
       
   340 	}
       
   341 
       
   342 
       
   343 const TPde KPdeForBlankPageTable = KPdePtePresent|KPdePteWrite|KPdePteUser;
       
   344 
       
   345 TPde Mmu::BlankPde(TMemoryAttributes aAttributes)
       
   346 	{
       
   347 	(void)aAttributes;
       
   348 	TPde pde = KPdeForBlankPageTable;
       
   349 	TRACE2(("Mmu::BlankPde(%x) returns 0x%x",aAttributes,pde));
       
   350 	return pde;
       
   351 	}
       
   352 
       
   353 
       
   354 TPde Mmu::BlankSectionPde(TMemoryAttributes aAttributes, TUint aPteType)
       
   355 	{
       
   356 	return PageToSectionEntry(BlankPte(aAttributes, aPteType), KPdeForBlankPageTable);
       
   357 	}
       
   358 
       
   359 
       
   360 TPte Mmu::BlankPte(TMemoryAttributes aAttributes, TUint aPteType)
       
   361 	{
       
   362 	TPte pte = KPdePtePresent;
       
   363 	if(aPteType&EPteTypeUserAccess)
       
   364 		pte |= KPdePteUser;
       
   365 	if(aPteType&EPteTypeWritable)
       
   366 		pte |= KPdePteWrite;
       
   367 	if(aPteType&EPteTypeGlobal)
       
   368 		pte |= PteGlobal;
       
   369 
       
   370 	switch((TMemoryType)(aAttributes&EMemoryAttributeTypeMask))
       
   371 		{
       
   372 	case EMemAttStronglyOrdered:
       
   373 	case EMemAttDevice:
       
   374 	case EMemAttNormalUncached:
       
   375 		pte |= KPdePteUncached;
       
   376 		break;
       
   377 	case EMemAttNormalCached:
       
   378 		break;
       
   379 	default:
       
   380 		__NK_ASSERT_ALWAYS(0);
       
   381 		break;
       
   382 		}
       
   383 
       
   384 	TRACE2(("Mmu::BlankPte(%x,%x) returns 0x%x",aAttributes,aPteType,pte));
       
   385 	return pte;
       
   386 	}
       
   387 
       
   388 
       
   389 TPte Mmu::SectionToPageEntry(TPde& aPde)
       
   390 	{
       
   391 	TPte pte = aPde&~(KPdePtePhysAddrMask|KPdeLargePage);
       
   392 	aPde = KPdeForBlankPageTable;
       
   393 	return pte;
       
   394 	}
       
   395 
       
   396 
       
   397 TPde Mmu::PageToSectionEntry(TPte aPte, TPde /*aPde*/)
       
   398 	{
       
   399 	TPte pde = aPte&~KPdeLargePagePhysAddrMask;
       
   400 	pde |= KPdeLargePage;
       
   401 	return pde;
       
   402 	}
       
   403 
       
   404 
       
   405 TMemoryAttributes Mmu::CanonicalMemoryAttributes(TMemoryAttributes aAttr)
       
   406 	{
       
   407 	TUint attr = aAttr;
       
   408 	if(attr&EMemoryAttributeDefaultShareable)
       
   409 		{
       
   410 		// sharing not specified, use default...
       
   411 #if defined	(__CPU_USE_SHARED_MEMORY)
       
   412 		attr |= EMemoryAttributeShareable;
       
   413 #else
       
   414 		attr &= ~EMemoryAttributeShareable;
       
   415 #endif
       
   416 		}
       
   417 
       
   418 	// remove invalid attributes...
       
   419 	attr &= ~(EMemoryAttributeUseECC);
       
   420 
       
   421 	return (TMemoryAttributes)(attr&EMemoryAttributeMask);
       
   422 	}
       
   423 
       
   424 
       
   425 void Mmu::PagesAllocated(TPhysAddr* aPageList, TUint aCount, TRamAllocFlags aFlags, TBool aReallocate)
       
   426 	{
       
   427 	TRACE2(("Mmu::PagesAllocated(0x%08x,%d,0x%x,%d)",aPageList, aCount, aFlags, (bool)aReallocate));
       
   428 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
       
   429 
       
   430 	TBool wipe = !(aFlags&EAllocNoWipe); // do we need to wipe page contents?
       
   431 	TMemoryType newType = (TMemoryType)(aFlags&KMemoryTypeMask); // memory type that pages will be used for
       
   432 	TUint8 wipeByte = (aFlags&EAllocUseCustomWipeByte) ? (aFlags>>EAllocWipeByteShift)&0xff : 0x03; // value to wipe memory with
       
   433 
       
   434 	// process each page in turn...
       
   435 	while(aCount--)
       
   436 		{
       
   437 		// get physical address of next page...
       
   438 		TPhysAddr pagePhys;
       
   439 		if((TPhysAddr)aPageList&1)
       
   440 			{
       
   441 			// aPageList is actually the physical address to use...
       
   442 			pagePhys = (TPhysAddr)aPageList&~1;
       
   443 			*(TPhysAddr*)&aPageList += KPageSize;
       
   444 			}
       
   445 		else
       
   446 			pagePhys = *aPageList++;
       
   447 		__NK_ASSERT_DEBUG((pagePhys&KPageMask)==0);
       
   448 
       
   449 		// get info about page...
       
   450 		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
       
   451 		TMemoryType oldType = (TMemoryType)(pi->Flags(true)&KMemoryTypeMask);
       
   452 
       
   453 		TRACE2(("Mmu::PagesAllocated page=0x%08x, oldType=%d, wipe=%d",pagePhys,oldType,wipe));
       
   454 		if(wipe)
       
   455 			{
       
   456 			// work out temporary mapping values...
       
   457 			TLinAddr tempLinAddr = iTempMap[0].iLinAddr;
       
   458 			TPte* tempPte = iTempMap[0].iPtePtr;
       
   459 
       
   460 			// temporarily map page...
       
   461 			*tempPte = pagePhys | iTempPteCached;
       
   462 			CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
       
   463 			InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
       
   464 
       
   465 			// wipe contents of memory...
       
   466 			memset((TAny*)tempLinAddr, wipeByte, KPageSize);
       
   467 			__e32_io_completion_barrier();
       
   468 
       
   469 			// invalidate temporary mapping...
       
   470 			*tempPte = KPteUnallocatedEntry;
       
   471 			CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
       
   472 			InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
       
   473 			}
       
   474 
       
   475 		// indicate page has been allocated...
       
   476 		if(aReallocate==false)
       
   477 			pi->SetAllocated();
       
   478 		}
       
   479 	}
       
   480 
       
   481 
       
   482 void Mmu::PageFreed(SPageInfo* aPageInfo)
       
   483 	{
       
   484 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   485 
       
   486 	if(aPageInfo->Type()==SPageInfo::EUnused)
       
   487 		return;
       
   488 
       
   489 	aPageInfo->SetUnused();
       
   490 
       
   491 	TRACE2(("Mmu::PageFreed page=0x%08x type=%d colour=%d",aPageInfo->PhysAddr(),aPageInfo->Flags()&KMemoryTypeMask,aPageInfo->Index()&KPageColourMask));
       
   492 	}
       
   493 
       
   494 
       
   495 void Mmu::CleanAndInvalidatePages(TPhysAddr* aPages, TUint aCount, TMemoryAttributes aAttributes, TUint aColour)
       
   496 	{
       
   497 	TMemoryType type = (TMemoryType)(aAttributes&EMemoryAttributeTypeMask);
       
   498 	if(!CacheMaintenance::IsCached(type))
       
   499 		{
       
   500 		TRACE2(("Mmu::CleanAndInvalidatePages - nothing to do"));
       
   501 		return;
       
   502 		}
       
   503 
       
   504 	RamAllocLock::Lock();
       
   505 
       
   506 	while(aCount--)
       
   507 		{
       
   508 		TPhysAddr pagePhys = *aPages++;
       
   509 		TRACE2(("Mmu::CleanAndInvalidatePages 0x%08x",pagePhys));
       
   510 
       
   511 		// work out temporary mapping values...
       
   512 		TLinAddr tempLinAddr = iTempMap[0].iLinAddr;
       
   513 		TPte* tempPte = iTempMap[0].iPtePtr;
       
   514 
       
   515 		// temporarily map page...
       
   516 		*tempPte = pagePhys | iTempPteCached;
       
   517 		CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
       
   518 		InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
       
   519 
       
   520 		// sort out cache for memory reuse...
       
   521 		CacheMaintenance::PageToPreserveAndReuse(tempLinAddr, type, KPageSize);
       
   522 
       
   523 		// invalidate temporary mapping...
       
   524 		*tempPte = KPteUnallocatedEntry;
       
   525 		CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
       
   526 		InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
       
   527 
       
   528 		RamAllocLock::Flash();
       
   529 		}
       
   530 	RamAllocLock::Unlock();
       
   531 	}
       
   532 
       
   533 
       
   534 TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TLinAddr& aAliasAddr, TUint& aAliasSize)
       
   535 //
       
   536 // Set up an alias mapping starting at address aAddr in specified process.
       
   537 // Note: Alias is removed if an exception is trapped by DThread::IpcExcHandler.
       
   538 //
       
   539 	{
       
   540 	TRACE2(("Thread %O Alias %08x+%x Process %O",this,aAddr,aSize,aProcess));
       
   541 	__NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false
       
   542 	// If there is an existing alias it should be on the same process otherwise
       
   543 	// the os asid reference may be leaked.
       
   544 	__NK_ASSERT_DEBUG(!iAliasLinAddr || aProcess == iAliasProcess);
       
   545 
       
   546 	if(TUint(aAddr^KIPCAlias)<TUint(KIPCAliasAreaSize))
       
   547 		return KErrBadDescriptor; // prevent access to alias region
       
   548 
       
   549 	// Grab the mmu lock before opening a reference on os asid so that this thread 
       
   550 	// is in an implicit critical section and therefore can't leak the reference by
       
   551 	// dying before iAliasLinAddr is set.
       
   552 	MmuLock::Lock();
       
   553 
       
   554 	TInt osAsid;
       
   555 	if (!iAliasLinAddr)
       
   556 		{// There isn't any existing alias.
       
   557 		// Open a reference on the aProcess's os asid so that it is not freed and/or reused
       
   558 		// while we are aliasing an address belonging to it.
       
   559 		osAsid = aProcess->TryOpenOsAsid();
       
   560 		if (osAsid < 0)
       
   561 			{// Couldn't open os asid so aProcess is no longer running.
       
   562 			MmuLock::Unlock();
       
   563 			return KErrBadDescriptor;
       
   564 			}
       
   565 		}
       
   566 	else
       
   567 		{
       
   568 		// Just read the os asid of the process being aliased we already have a reference on it.
       
   569 		osAsid = aProcess->OsAsid();
       
   570 		}
       
   571 
       
   572 	// Now we have the os asid check access to kernel memory.
       
   573 	if(aAddr >= KUserMemoryLimit && osAsid != (TUint)KKernelOsAsid)
       
   574 		{
       
   575 		if (!iAliasLinAddr)
       
   576 			{// Close the new reference as RemoveAlias won't do as iAliasLinAddr is not set.
       
   577 			aProcess->AsyncCloseOsAsid();
       
   578 			}
       
   579 		MmuLock::Unlock();
       
   580 		return KErrBadDescriptor; // prevent access to supervisor only memory
       
   581 		}
       
   582 
       
   583 	// Now we know all accesses to global memory are safe so check if aAddr is global.
       
   584 	if(aAddr >= KGlobalMemoryBase)
       
   585 		{
       
   586 		// address is in global section, don't bother aliasing it...
       
   587 		if (!iAliasLinAddr)
       
   588 			{// Close the new reference as not required.
       
   589 			aProcess->AsyncCloseOsAsid();
       
   590 			}
       
   591 		else
       
   592 			{// Remove the existing alias as it is not required.
       
   593 			DoRemoveAlias(iAliasLinAddr);
       
   594 			}
       
   595 		MmuLock::Unlock();
       
   596 		aAliasAddr = aAddr;
       
   597 		TInt maxSize = KChunkSize-(aAddr&KChunkMask);
       
   598 		aAliasSize = aSize<maxSize ? aSize : maxSize;
       
   599 		TRACE2(("DMemModelThread::Alias() abandoned as memory is globally mapped"));
       
   600 		return KErrNone;
       
   601 		}
       
   602 
       
   603 	TPde* pd = Mmu::PageDirectory(osAsid);
       
   604 	TInt pdeIndex = aAddr>>KChunkShift;
       
   605 	TPde pde = pd[pdeIndex];
       
   606 #ifdef __SMP__
       
   607 	TLinAddr aliasAddr;
       
   608 #else
       
   609 	TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask));
       
   610 #endif
       
   611 	if(pde==iAliasPde && iAliasLinAddr)
       
   612 		{
       
   613 		// pde already aliased, so just update linear address...
       
   614 #ifdef __SMP__
       
   615 		__NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
       
   616 		aliasAddr = iAliasLinAddr & ~KChunkMask;
       
   617 		aliasAddr |= (aAddr & (KChunkMask & ~KPageMask));
       
   618 #endif
       
   619 		iAliasLinAddr = aliasAddr;
       
   620 		}
       
   621 	else
       
   622 		{
       
   623 		// alias PDE changed...
       
   624 		if(!iAliasLinAddr)
       
   625 			{
       
   626 			TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased
       
   627 #ifdef __SMP__
       
   628 			__NK_ASSERT_DEBUG(iCpuRestoreCookie==-1);
       
   629 			iCpuRestoreCookie = NKern::FreezeCpu();	// temporarily lock current thread to this processor
       
   630 #endif
       
   631 			}
       
   632 		iAliasPde = pde;
       
   633 		iAliasProcess = aProcess;
       
   634 #ifdef __SMP__
       
   635 		TSubScheduler& ss = SubScheduler();		// OK since we are locked to this CPU
       
   636 		aliasAddr = TLinAddr(ss.i_AliasLinAddr) + (aAddr & (KChunkMask & ~KPageMask));
       
   637 		iAliasPdePtr = (TPde*)(TLinAddr(ss.i_AliasPdePtr) + (osAsid << KPageTableShift));
       
   638 #endif
       
   639 		iAliasLinAddr = aliasAddr;
       
   640 		*iAliasPdePtr = pde;
       
   641 		}
       
   642 	TRACE2(("DMemModelThread::Alias() PDEntry=%x, iAliasLinAddr=%x",pde, aliasAddr));
       
   643 	LocalInvalidateTLBForPage(aliasAddr);
       
   644 	TInt offset = aAddr&KPageMask;
       
   645 	aAliasAddr = aliasAddr | offset;
       
   646 	TInt maxSize = KPageSize - offset;
       
   647 	aAliasSize = aSize<maxSize ? aSize : maxSize;
       
   648 	iAliasTarget = aAddr & ~KPageMask;
       
   649 
       
   650 	MmuLock::Unlock();
       
   651 
       
   652 	return KErrNone;
       
   653 	}
       
   654 
       
   655 
       
   656 void DMemModelThread::RemoveAlias()
       
   657 //
       
   658 // Remove alias mapping (if present)
       
   659 //
       
   660 	{
       
   661 	TRACE2(("Thread %O RemoveAlias", this));
       
   662 	__NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false
       
   663 
       
   664 	TLinAddr addr = iAliasLinAddr;
       
   665 	if(addr)
       
   666 		{
       
   667 		MmuLock::Lock();
       
   668 
       
   669 		DoRemoveAlias(addr);
       
   670 
       
   671 		MmuLock::Unlock();
       
   672 		}
       
   673 	}
       
   674 
       
   675 
       
   676 /**
       
   677 Remove the alias mapping.
       
   678 
       
   679 @pre Mmulock held
       
   680 */
       
   681 void DMemModelThread::DoRemoveAlias(TLinAddr aAddr)
       
   682 	{
       
   683 	iAliasLinAddr = 0;
       
   684 	iAliasPde = KPdeUnallocatedEntry;
       
   685 	*iAliasPdePtr = KPdeUnallocatedEntry;
       
   686 	SinglePdeUpdated(iAliasPdePtr);
       
   687 	__NK_ASSERT_DEBUG((aAddr&KPageMask)==0);
       
   688 	LocalInvalidateTLBForPage(aAddr);
       
   689 	iAliasLink.Deque();
       
   690 #ifdef __SMP__
       
   691 	__NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
       
   692 	NKern::EndFreezeCpu(iCpuRestoreCookie);
       
   693 	iCpuRestoreCookie = -1;
       
   694 #endif
       
   695 	// Must close the os asid while the mmu lock is held to prevent it being 
       
   696 	// leaked, however this requires that it is closed asynchronously as can't
       
   697 	// delete os asid with mmu lock held.
       
   698 	iAliasProcess->AsyncCloseOsAsid();
       
   699 	}
       
   700 
       
   701 
       
   702 TInt M::DemandPagingFault(TAny* aExceptionInfo)
       
   703 	{
       
   704 	TX86ExcInfo& exc=*(TX86ExcInfo*)aExceptionInfo;
       
   705 	if(exc.iExcId!=EX86VectorPageFault)
       
   706 		return KErrAbort; // not a page fault
       
   707 
       
   708 	/*
       
   709 	Meanings of exc.iExcErrorCode when exception type is EX86VectorPageFault...
       
   710 
       
   711 	Bit 0	0 The fault was caused by a non-present page.
       
   712 			1 The fault was caused by a page-level protection violation.
       
   713 	Bit 1	0 The access causing the fault was a read.
       
   714 			1 The access causing the fault was a write.
       
   715 	Bit 2	0 The access causing the fault originated when the processor was executing in supervisor mode.
       
   716 			1 The access causing the fault originated when the processor was executing in user mode.   
       
   717 	Bit 3	0 The fault was not caused by reserved bit violation.
       
   718 			1 The fault was caused by reserved bits set to 1 in a page directory.
       
   719 	Bit 4	0 The fault was not caused by an instruction fetch.
       
   720 			1 The fault was caused by an instruction fetch.
       
   721 	*/
       
   722 
       
   723 	// check access type...
       
   724 	TUint accessPermissions = EUser; // we only allow paging of user memory
       
   725 	if(exc.iExcErrorCode&(1<<1))
       
   726 		accessPermissions |= EReadWrite;
       
   727 
       
   728 	// let TheMmu handle the fault...
       
   729 	return TheMmu.HandlePageFault(exc.iEip, exc.iFaultAddress, accessPermissions, aExceptionInfo);
       
   730 	}
       
   731 
       
   732