kernel/eka/memmodel/epoc/flexible/mmu/mexport.cpp
changeset 9 96e5fb8b040d
equal deleted inserted replaced
-1:000000000000 9:96e5fb8b040d
       
     1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 //
       
    15 
       
    16 #include "memmodel.h"
       
    17 #include "mm.h"
       
    18 #include "mmu.h"
       
    19 
       
    20 #include "mrom.h"
       
    21 
       
    22 /**	Returns the amount of free RAM currently available.
       
    23 
       
    24 @return The number of bytes of free RAM currently available.
       
    25 @pre	any context
       
    26  */
       
    27 EXPORT_C TInt Kern::FreeRamInBytes()
       
    28 	{
       
    29 	TUint numPages = TheMmu.FreeRamInPages();
       
    30 	// hack, clip free RAM to fit into a signed integer...
       
    31 	if(numPages>(KMaxTInt>>KPageShift))
       
    32 		return KMaxTInt;
       
    33 	return numPages*KPageSize;
       
    34 	}
       
    35 
       
    36 
       
    37 /**	Rounds up the argument to the size of a MMU page.
       
    38 
       
    39 	To find out the size of a MMU page:
       
    40 	@code
       
    41 	size = Kern::RoundToPageSize(1);
       
    42 	@endcode
       
    43 
       
    44 	@param aSize Value to round up
       
    45 	@pre any context
       
    46  */
       
    47 EXPORT_C TUint32 Kern::RoundToPageSize(TUint32 aSize)
       
    48 	{
       
    49 	return (aSize+KPageMask)&~KPageMask;
       
    50 	}
       
    51 
       
    52 
       
    53 /**	Rounds up the argument to the amount of memory mapped by a MMU page 
       
    54 	directory entry.
       
    55 
       
    56 	Chunks occupy one or more consecutive page directory entries (PDE) and
       
    57 	therefore the amount of linear and physical memory allocated to a chunk is
       
    58 	always a multiple of the amount of memory mapped by a page directory entry.
       
    59  */
       
    60 EXPORT_C TUint32 Kern::RoundToChunkSize(TUint32 aSize)
       
    61 	{
       
    62 	return (aSize+KChunkMask)&~KChunkMask;
       
    63 	}
       
    64 
       
    65 
       
    66 //
       
    67 // Epoc class
       
    68 // 
       
    69 #ifdef BTRACE_KERNEL_MEMORY
       
    70 TInt   Epoc::DriverAllocdPhysRam = 0;
       
    71 TInt   Epoc::KernelMiscPages = 0;
       
    72 #endif
       
    73 
       
    74 
       
    75 /**
       
    76 Allows the variant to specify the details of the RAM zones. This should be invoked 
       
    77 by the variant in its implementation of the pure virtual function Asic::Init1().
       
    78 
       
    79 There are some limitations to how the RAM zones can be specified:
       
    80 - Each RAM zone's address space must be distinct and not overlap with any 
       
    81 other RAM zone's address space
       
    82 - Each RAM zone's address space must have a size that is multiples of the 
       
    83 ASIC's MMU small page size and be aligned to the ASIC's MMU small page size, 
       
    84 usually 4KB on ARM MMUs.
       
    85 - When taken together all of the RAM zones must cover the whole of the physical RAM
       
    86 address space as specified by the bootstrap in the SuperPage members iTotalRamSize
       
    87 and iRamBootData;.
       
    88 - There can be no more than KMaxRamZones RAM zones specified by the base port
       
    89 
       
    90 Note the verification of the RAM zone data is not performed here but by the ram 
       
    91 allocator later in the boot up sequence.  This is because it is only possible to
       
    92 verify the zone data once the physical RAM configuration has been read from 
       
    93 the super page. Any verification errors result in a "RAM-ALLOC" panic 
       
    94 faulting the kernel during initialisation.
       
    95 
       
    96 @param aZones Pointer to an array of SRamZone structs containing the details for all 
       
    97 the zones. The end of the array is specified by an element with an iSize of zero. The array must 
       
    98 remain in memory at least until the kernel has successfully booted.
       
    99 
       
   100 @param aCallback Pointer to a call back function that the kernel may invoke to request 
       
   101 one of the operations specified by TRamZoneOp.
       
   102 
       
   103 @return KErrNone if successful, otherwise one of the system wide error codes
       
   104 
       
   105 @see TRamZoneOp
       
   106 @see SRamZone
       
   107 @see TRamZoneCallback
       
   108 */
       
   109 EXPORT_C TInt Epoc::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback)
       
   110 	{
       
   111 	TRamZoneCallback dummy;
       
   112 	// Ensure this is only called once and only while we are initialising the kernel
       
   113 	if (!K::Initialising || TheMmu.RamZoneConfig(dummy) != NULL)
       
   114 		{// fault kernel, won't return
       
   115 		K::Fault(K::EBadSetRamZoneConfig);
       
   116 		}
       
   117 
       
   118 	if (NULL == aZones)
       
   119 		{
       
   120 		return KErrArgument;
       
   121 		}
       
   122 	TheMmu.SetRamZoneConfig(aZones, aCallback);
       
   123 	return KErrNone;
       
   124 	}
       
   125 
       
   126 
       
   127 /**
       
   128 Modify the specified RAM zone's flags.
       
   129 
       
   130 This allows the BSP or device driver to configure which type of pages, if any,
       
   131 can be allocated into a RAM zone by the system.
       
   132 
       
   133 Note: updating a RAM zone's flags can result in
       
   134 	1 - memory allocations failing despite there being enough free RAM in the system.
       
   135 	2 - the methods TRamDefragRequest::EmptyRamZone(), TRamDefragRequest::ClaimRamZone()
       
   136 	or TRamDefragRequest::DefragRam() never succeeding.
       
   137 
       
   138 The flag masks KRamZoneFlagDiscardOnly, KRamZoneFlagMovAndDisOnly and KRamZoneFlagNoAlloc
       
   139 are intended to be used with this method.
       
   140 
       
   141 @param aId			The ID of the RAM zone to modify.
       
   142 @param aClearMask	The bit mask to clear, each flag of which must already be set on the RAM zone.
       
   143 @param aSetMask		The bit mask to set.
       
   144 
       
   145 @return KErrNone on success, KErrArgument if the RAM zone of aId not found or if 
       
   146 aSetMask contains invalid flag bits.
       
   147 
       
   148 @see TRamDefragRequest::EmptyRamZone()
       
   149 @see TRamDefragRequest::ClaimRamZone()
       
   150 @see TRamDefragRequest::DefragRam()
       
   151 
       
   152 @see KRamZoneFlagDiscardOnly
       
   153 @see KRamZoneFlagMovAndDisOnly
       
   154 @see KRamZoneFlagNoAlloc
       
   155 */
       
   156 EXPORT_C TInt Epoc::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
       
   157 	{
       
   158 	RamAllocLock::Lock();
       
   159 	TInt r = TheMmu.ModifyRamZoneFlags(aId, aClearMask, aSetMask);
       
   160 	RamAllocLock::Unlock();
       
   161 	return r;
       
   162 	}
       
   163 
       
   164 
       
   165 /**
       
   166 Gets the current count of a particular RAM zone's pages by type.
       
   167 
       
   168 @param aId The ID of the RAM zone to enquire about
       
   169 @param aPageData If successful, on return this contains the page count
       
   170 
       
   171 @return KErrNone if successful, KErrArgument if a RAM zone of aId is not found or
       
   172 one of the system wide error codes 
       
   173 
       
   174 @pre Calling thread must be in a critical section.
       
   175 @pre Interrupts must be enabled.
       
   176 @pre Kernel must be unlocked.
       
   177 @pre No fast mutex can be held.
       
   178 @pre Call in a thread context.
       
   179 
       
   180 @see SRamZonePageCount
       
   181 */
       
   182 EXPORT_C TInt Epoc::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
       
   183 	{
       
   184 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::GetRamZonePageCount");
       
   185 	RamAllocLock::Lock();
       
   186 	TInt r = TheMmu.GetRamZonePageCount(aId, aPageData);
       
   187 	RamAllocLock::Unlock();
       
   188 	return r;
       
   189 	}
       
   190 
       
   191 
       
   192 /**
       
   193 Allocate a block of physically contiguous RAM with a physical address aligned
       
   194 to a specified power of 2 boundary.
       
   195 When the RAM is no longer required it should be freed using
       
   196 Epoc::FreePhysicalRam()
       
   197 
       
   198 @param	aSize		The size in bytes of the required block. The specified size
       
   199 					is rounded up to the page size, since only whole pages of
       
   200 					physical RAM can be allocated.
       
   201 @param	aPhysAddr	Receives the physical address of the base of the block on
       
   202 					successful allocation.
       
   203 @param	aAlign		Specifies the number of least significant bits of the
       
   204 					physical address which are required to be zero. If a value
       
   205 					less than log2(page size) is specified, page alignment is
       
   206 					assumed. Pass 0 for aAlign if there are no special alignment
       
   207 					constraints (other than page alignment).
       
   208 @return	KErrNone if the allocation was successful.
       
   209 		KErrNoMemory if a sufficiently large physically contiguous block of free
       
   210 		RAM	with the specified alignment could not be found.
       
   211 @pre Calling thread must be in a critical section.
       
   212 @pre Interrupts must be enabled.
       
   213 @pre Kernel must be unlocked.
       
   214 @pre No fast mutex can be held.
       
   215 @pre Call in a thread context.
       
   216 @pre Can be used in a device driver.
       
   217 */
       
   218 EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
       
   219 	{
       
   220 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocPhysicalRam");
       
   221 	RamAllocLock::Lock();
       
   222 	TInt r = TheMmu.AllocPhysicalRam
       
   223 		(
       
   224 		aPhysAddr,
       
   225 		MM::RoundToPageCount(aSize),
       
   226 		MM::RoundToPageShift(aAlign),
       
   227 		(Mmu::TRamAllocFlags)EMemAttStronglyOrdered
       
   228 		);
       
   229 	RamAllocLock::Unlock();
       
   230 	return r;
       
   231 	}
       
   232 
       
   233 
       
   234 /**
       
   235 Allocate a block of physically contiguous RAM with a physical address aligned
       
   236 to a specified power of 2 boundary from the specified zone.
       
   237 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
       
   238 
       
   239 Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt
       
   240 to allocate regardless of whether the other flags are set for the specified RAM zones 
       
   241 or not.
       
   242 
       
   243 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
       
   244 
       
   245 @param 	aZoneId		The ID of the zone to attempt to allocate from.
       
   246 @param	aSize		The size in bytes of the required block. The specified size
       
   247 					is rounded up to the page size, since only whole pages of
       
   248 					physical RAM can be allocated.
       
   249 @param	aPhysAddr	Receives the physical address of the base of the block on
       
   250 					successful allocation.
       
   251 @param	aAlign		Specifies the number of least significant bits of the
       
   252 					physical address which are required to be zero. If a value
       
   253 					less than log2(page size) is specified, page alignment is
       
   254 					assumed. Pass 0 for aAlign if there are no special alignment
       
   255 					constraints (other than page alignment).
       
   256 @return	KErrNone if the allocation was successful.
       
   257 		KErrNoMemory if a sufficiently large physically contiguous block of free
       
   258 		RAM	with the specified alignment could not be found within the specified 
       
   259 		zone.
       
   260 		KErrArgument if a RAM zone of the specified ID can't be found or if the
       
   261 		RAM zone has a total number of physical pages which is less than those 
       
   262 		requested for the allocation.
       
   263 
       
   264 @pre Calling thread must be in a critical section.
       
   265 @pre Interrupts must be enabled.
       
   266 @pre Kernel must be unlocked.
       
   267 @pre No fast mutex can be held.
       
   268 @pre Call in a thread context.
       
   269 @pre Can be used in a device driver.
       
   270 */
       
   271 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
       
   272 	{
       
   273 	return ZoneAllocPhysicalRam(&aZoneId, 1, aSize, aPhysAddr, aAlign);
       
   274 	}
       
   275 
       
   276 
       
   277 /**
       
   278 Allocate a block of physically contiguous RAM with a physical address aligned
       
   279 to a specified power of 2 boundary from the specified RAM zones.
       
   280 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
       
   281 
       
   282 RAM will be allocated into the RAM zones in the order they are specified in the 
       
   283 aZoneIdList parameter. If the contiguous allocations are intended to span RAM zones 
       
   284 when required then aZoneIdList should be listed with the RAM zones in ascending 
       
   285 physical address order.
       
   286 
       
   287 Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt
       
   288 to allocate regardless of whether the other flags are set for the specified RAM zones 
       
   289 or not.
       
   290 
       
   291 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
       
   292 
       
   293 @param 	aZoneIdList	A pointer to an array of RAM zone IDs of the RAM zones to 
       
   294 					attempt to allocate from.
       
   295 @param 	aZoneIdCount The number of RAM zone IDs contained in aZoneIdList.
       
   296 @param	aSize		The size in bytes of the required block. The specified size
       
   297 					is rounded up to the page size, since only whole pages of
       
   298 					physical RAM can be allocated.
       
   299 @param	aPhysAddr	Receives the physical address of the base of the block on
       
   300 					successful allocation.
       
   301 @param	aAlign		Specifies the number of least significant bits of the
       
   302 					physical address which are required to be zero. If a value
       
   303 					less than log2(page size) is specified, page alignment is
       
   304 					assumed. Pass 0 for aAlign if there are no special alignment
       
   305 					constraints (other than page alignment).
       
   306 @return	KErrNone if the allocation was successful.
       
   307 		KErrNoMemory if a sufficiently large physically contiguous block of free
       
   308 		RAM	with the specified alignment could not be found within the specified 
       
   309 		zone.
       
   310 		KErrArgument if a RAM zone of a specified ID can't be found or if the
       
   311 		RAM zones have a total number of physical pages which is less than those 
       
   312 		requested for the allocation.
       
   313 
       
   314 @pre Calling thread must be in a critical section.
       
   315 @pre Interrupts must be enabled.
       
   316 @pre Kernel must be unlocked.
       
   317 @pre No fast mutex can be held.
       
   318 @pre Call in a thread context.
       
   319 @pre Can be used in a device driver.
       
   320 */
       
   321 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
       
   322 	{
       
   323 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ZoneAllocPhysicalRam");
       
   324 	RamAllocLock::Lock();
       
   325 	TInt r = TheMmu.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aAlign);
       
   326 	RamAllocLock::Unlock();
       
   327 	return r;
       
   328 	}
       
   329 
       
   330 
       
   331 /**
       
   332 Attempt to allocate discontiguous RAM pages.
       
   333 
       
   334 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
       
   335 
       
   336 @param	aNumPages	The number of discontiguous pages required to be allocated
       
   337 @param	aPageList	This should be a pointer to a previously allocated array of
       
   338 					aNumPages TPhysAddr elements.  On a successful allocation it 
       
   339 					will receive the physical addresses of each page allocated.
       
   340 
       
   341 @return	KErrNone if the allocation was successful.
       
   342 		KErrNoMemory if the requested number of pages can't be allocated
       
   343 
       
   344 @pre Calling thread must be in a critical section.
       
   345 @pre Interrupts must be enabled.
       
   346 @pre Kernel must be unlocked.
       
   347 @pre No fast mutex can be held.
       
   348 @pre Call in a thread context.
       
   349 @pre Can be used in a device driver.
       
   350 */
       
   351 EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
       
   352 	{
       
   353 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::AllocPhysicalRam");
       
   354 	RamAllocLock::Lock();
       
   355 	TInt r = TheMmu.AllocPhysicalRam(aPageList,aNumPages,(Mmu::TRamAllocFlags)EMemAttStronglyOrdered);
       
   356 	RamAllocLock::Unlock();
       
   357 	return r;
       
   358 	}
       
   359 
       
   360 
       
   361 /**
       
   362 Attempt to allocate discontiguous RAM pages from the specified zone.
       
   363 
       
   364 Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt
       
   365 to allocate regardless of whether the other flags are set for the specified RAM zones 
       
   366 or not.
       
   367 
       
   368 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
       
   369 
       
   370 @param 	aZoneId		The ID of the zone to attempt to allocate from.
       
   371 @param	aNumPages	The number of discontiguous pages required to be allocated 
       
   372 					from the specified zone.
       
   373 @param	aPageList	This should be a pointer to a previously allocated array of
       
   374 					aNumPages TPhysAddr elements.  On a successful 
       
   375 					allocation it will receive the physical addresses of each 
       
   376 					page allocated.
       
   377 @return	KErrNone if the allocation was successful.
       
   378 		KErrNoMemory if the requested number of pages can't be allocated from the 
       
   379 		specified zone.
       
   380 		KErrArgument if a RAM zone of the specified ID can't be found or if the
       
   381 		RAM zone has a total number of physical pages which is less than those 
       
   382 		requested for the allocation.
       
   383 
       
   384 @pre Calling thread must be in a critical section.
       
   385 @pre Interrupts must be enabled.
       
   386 @pre Kernel must be unlocked.
       
   387 @pre No fast mutex can be held.
       
   388 @pre Call in a thread context.
       
   389 @pre Can be used in a device driver.
       
   390 */
       
   391 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aNumPages, TPhysAddr* aPageList)
       
   392 	{
       
   393 	return ZoneAllocPhysicalRam(&aZoneId, 1, aNumPages, aPageList);
       
   394 	}
       
   395 
       
   396 
       
   397 /**
       
   398 Attempt to allocate discontiguous RAM pages from the specified RAM zones.
       
   399 The RAM pages will be allocated into the RAM zones in the order that they are specified 
       
   400 in the aZoneIdList parameter, the RAM zone preferences will be ignored.
       
   401 
       
   402 Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt
       
   403 to allocate regardless of whether the other flags are set for the specified RAM zones 
       
   404 or not.
       
   405 
       
   406 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
       
   407 
       
   408 @param 	aZoneIdList	A pointer to an array of RAM zone IDs of the RAM zones to 
       
   409 					attempt to allocate from.
       
   410 @param	aZoneIdCount The number of RAM zone IDs pointed to by aZoneIdList.
       
   411 @param	aNumPages	The number of discontiguous pages required to be allocated 
       
   412 					from the specified zone.
       
   413 @param	aPageList	This should be a pointer to a previously allocated array of
       
   414 					aNumPages TPhysAddr elements.  On a successful 
       
   415 					allocation it will receive the physical addresses of each 
       
   416 					page allocated.
       
   417 @return	KErrNone if the allocation was successful.
       
   418 		KErrNoMemory if the requested number of pages can't be allocated from the 
       
   419 		specified zone.
       
   420 		KErrArgument if a RAM zone of a specified ID can't be found or if the
       
   421 		RAM zones have a total number of physical pages which is less than those 
       
   422 		requested for the allocation.
       
   423 
       
   424 @pre Calling thread must be in a critical section.
       
   425 @pre Interrupts must be enabled.
       
   426 @pre Kernel must be unlocked.
       
   427 @pre No fast mutex can be held.
       
   428 @pre Call in a thread context.
       
   429 @pre Can be used in a device driver.
       
   430 */
       
   431 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
       
   432 	{
       
   433 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::ZoneAllocPhysicalRam");
       
   434 	RamAllocLock::Lock();
       
   435 	TInt r = TheMmu.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aNumPages, aPageList);
       
   436 	RamAllocLock::Unlock();
       
   437 	return r;
       
   438 	}
       
   439 
       
   440 
       
   441 /**
       
   442 Free a previously-allocated block of physically contiguous RAM.
       
   443 
       
   444 Specifying one of the following may cause the system to panic: 
       
   445 a) an invalid physical RAM address.
       
   446 b) valid physical RAM addresses where some had not been previously allocated.
       
   447 c) an address not aligned to a page boundary.
       
   448 
       
   449 @param	aPhysAddr	The physical address of the base of the block to be freed.
       
   450 					This must be the address returned by a previous call to
       
   451 					Epoc::AllocPhysicalRam(), Epoc::ZoneAllocPhysicalRam(), 
       
   452 					Epoc::ClaimPhysicalRam() or Epoc::ClaimRamZone().
       
   453 @param	aSize		The size in bytes of the required block. The specified size
       
   454 					is rounded up to the page size, since only whole pages of
       
   455 					physical RAM can be allocated.
       
   456 @return	KErrNone if the operation was successful.
       
   457 
       
   458 
       
   459 
       
   460 @pre Calling thread must be in a critical section.
       
   461 @pre Interrupts must be enabled.
       
   462 @pre Kernel must be unlocked.
       
   463 @pre No fast mutex can be held.
       
   464 @pre Call in a thread context.
       
   465 @pre Can be used in a device driver.
       
   466 */
       
   467 EXPORT_C TInt Epoc::FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
       
   468 	{
       
   469 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam");
       
   470 	RamAllocLock::Lock();
       
   471 	TheMmu.FreePhysicalRam(aPhysAddr,MM::RoundToPageCount(aSize));
       
   472 	RamAllocLock::Unlock();
       
   473 	return KErrNone;
       
   474 	}
       
   475 
       
   476 
       
   477 /**
       
   478 Free a number of physical RAM pages that were previously allocated using
       
   479 Epoc::AllocPhysicalRam() or Epoc::ZoneAllocPhysicalRam().
       
   480 
       
   481 Specifying one of the following may cause the system to panic: 
       
   482 a) an invalid physical RAM address.
       
   483 b) valid physical RAM addresses where some had not been previously allocated.
       
   484 c) an address not aligned to a page boundary.
       
   485 
       
   486 @param	aNumPages	The number of pages to be freed.
       
   487 @param	aPageList	An array of aNumPages TPhysAddr elements.  Where each element
       
   488 					should contain the physical address of each page to be freed.
       
   489 					This must be the same set of addresses as those returned by a 
       
   490 					previous call to Epoc::AllocPhysicalRam() or 
       
   491 					Epoc::ZoneAllocPhysicalRam().
       
   492 @return	KErrNone if the operation was successful.
       
   493   
       
   494 @pre Calling thread must be in a critical section.
       
   495 @pre Interrupts must be enabled.
       
   496 @pre Kernel must be unlocked.
       
   497 @pre No fast mutex can be held.
       
   498 @pre Call in a thread context.
       
   499 @pre Can be used in a device driver.
       
   500 		
       
   501 */
       
   502 EXPORT_C TInt Epoc::FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
       
   503 	{
       
   504 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam");
       
   505 	RamAllocLock::Lock();
       
   506 	TheMmu.FreePhysicalRam(aPageList,aNumPages);
       
   507 	RamAllocLock::Unlock();
       
   508 	return KErrNone;
       
   509 	}
       
   510 
       
   511 
       
   512 /**
       
   513 Allocate a specific block of physically contiguous RAM, specified by physical
       
   514 base address and size.
       
   515 If and when the RAM is no longer required it should be freed using
       
   516 Epoc::FreePhysicalRam()
       
   517 
       
   518 @param	aPhysAddr	The physical address of the base of the required block.
       
   519 @param	aSize		The size in bytes of the required block. The specified size
       
   520 					is rounded up to the page size, since only whole pages of
       
   521 					physical RAM can be allocated.
       
   522 @return	KErrNone if the operation was successful.
       
   523 		KErrArgument if the range of physical addresses specified included some
       
   524 					which are not valid physical RAM addresses.
       
   525 		KErrInUse	if the range of physical addresses specified are all valid
       
   526 					physical RAM addresses but some of them have already been
       
   527 					allocated for other purposes.
       
   528 @pre Calling thread must be in a critical section.
       
   529 @pre Interrupts must be enabled.
       
   530 @pre Kernel must be unlocked.
       
   531 @pre No fast mutex can be held.
       
   532 @pre Call in a thread context.
       
   533 @pre Can be used in a device driver.
       
   534 */
       
   535 EXPORT_C TInt Epoc::ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
       
   536 	{
       
   537 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ClaimPhysicalRam");
       
   538 	RamAllocLock::Lock();
       
   539 	TInt r = TheMmu.ClaimPhysicalRam
       
   540 		(
       
   541 		aPhysAddr,
       
   542 		MM::RoundToPageCount(aSize),
       
   543 		(Mmu::TRamAllocFlags)EMemAttStronglyOrdered
       
   544 		);
       
   545 	RamAllocLock::Unlock();
       
   546 	return r;
       
   547 	}
       
   548 
       
   549 
       
   550 /**
       
   551 Translate a virtual address to the corresponding physical address.
       
   552 
       
   553 @param	aLinAddr	The virtual address to be translated.
       
   554 @return	The physical address corresponding to the given virtual address, or
       
   555 		KPhysAddrInvalid if the specified virtual address is unmapped.
       
   556 @pre Interrupts must be enabled.
       
   557 @pre Kernel must be unlocked.
       
   558 @pre Call in a thread context.
       
   559 @pre Can be used in a device driver.
       
   560 */
       
   561 EXPORT_C TPhysAddr Epoc::LinearToPhysical(TLinAddr aLinAddr)
       
   562 	{
       
   563 //	This precondition is violated by various parts of the system under some conditions,
       
   564 //	e.g. when __FLUSH_PT_INTO_RAM__ is defined. This function might also be called by
       
   565 //	a higher-level RTOS for which these conditions are meaningless. Thus, it's been
       
   566 //	disabled for now.
       
   567 //	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"Epoc::LinearToPhysical");
       
   568 
       
   569 	// When called by a higher-level OS we may not be in a DThread context, so avoid looking up the
       
   570 	// current process in the DThread for a global address
       
   571 	TInt osAsid = KKernelOsAsid;
       
   572 	if (aLinAddr < KGlobalMemoryBase)
       
   573 		{
       
   574 		// Get the os asid of current thread's process so no need to open a reference on it.
       
   575 		DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
       
   576 		osAsid = pP->OsAsid();
       
   577 		}
       
   578 	
       
   579 #if 1
       
   580 	return Mmu::UncheckedLinearToPhysical(aLinAddr, osAsid);
       
   581 #else
       
   582 	MmuLock::Lock();
       
   583 	TPhysAddr addr =  Mmu::LinearToPhysical(aLinAddr, osAsid);
       
   584 	MmuLock::Unlock();
       
   585 	return addr;
       
   586 #endif
       
   587 	}
       
   588 
       
   589 
       
   590 //
       
   591 // Misc
       
   592 //
       
   593 
       
   594 EXPORT_C TInt TInternalRamDrive::MaxSize()
       
   595 	{
       
   596 	TUint maxPages = (TUint(TheSuperPage().iRamDriveSize)>>KPageShift)+TheMmu.FreeRamInPages(); // current size plus spare memory
       
   597 	TUint maxPages2 = TUint(PP::RamDriveMaxSize)>>KPageShift;
       
   598 	if(maxPages>maxPages2)
       
   599 		maxPages = maxPages2;
       
   600 	return maxPages*KPageSize;
       
   601 	}
       
   602 
       
   603 
       
   604 TInt M::PageSizeInBytes()
       
   605 	{
       
   606 	return KPageSize;
       
   607 	}
       
   608 
       
   609 
       
   610 #ifdef BTRACE_KERNEL_MEMORY
       
   611 void M::BTracePrime(TUint aCategory)
       
   612 	{
       
   613 	// TODO:
       
   614 	}
       
   615 #endif
       
   616 
       
   617 
       
   618 
       
   619 //
       
   620 // DPlatChunkHw
       
   621 //
       
   622 
       
   623 /**
       
   624 Create a hardware chunk object, optionally mapping a specified block of physical
       
   625 addresses with specified access permissions and cache policy.
       
   626 
       
   627 When the mapping is no longer required, close the chunk using chunk->Close(0);
       
   628 Note that closing a chunk does not free any RAM pages which were mapped by the
       
   629 chunk - these must be freed separately using Epoc::FreePhysicalRam().
       
   630 
       
   631 @param	aChunk	Upon successful completion this parameter receives a pointer to
       
   632 				the newly created chunk. Upon unsuccessful completion it is
       
   633 				written with a NULL pointer. The virtual address of the mapping
       
   634 				can subsequently be discovered using the LinearAddress()
       
   635 				function on the chunk.
       
   636 @param	aAddr	The base address of the physical region to be mapped. This will
       
   637 				be rounded down to a multiple of the hardware page size before
       
   638 				being used.
       
   639 @param	aSize	The size of the physical address region to be mapped. This will
       
   640 				be rounded up to a multiple of the hardware page size before
       
   641 				being used; the rounding is such that the entire range from
       
   642 				aAddr to aAddr+aSize-1 inclusive is mapped. For example if
       
   643 				aAddr=0xB0001FFF, aSize=2 and the hardware page size is 4KB, an
       
   644 				8KB range of physical addresses from 0xB0001000 to 0xB0002FFF
       
   645 				inclusive will be mapped.
       
   646 @param	aMapAttr Mapping attributes required for the mapping. This is formed
       
   647 				by ORing together values from the TMappingAttributes enumeration
       
   648 				to specify the access permissions and caching policy.
       
   649 
       
   650 @pre Calling thread must be in a critical section.
       
   651 @pre Interrupts must be enabled.
       
   652 @pre Kernel must be unlocked.
       
   653 @pre No fast mutex can be held.
       
   654 @pre Call in a thread context.
       
   655 @pre Can be used in a device driver.
       
   656 @see TMappingAttributes
       
   657 */
       
   658 EXPORT_C TInt DPlatChunkHw::New(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aMapAttr)
       
   659 	{
       
   660 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DPlatChunkHw::New");
       
   661 	__KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw::New phys=%08x, size=%x, attribs=%x",aAddr,aSize,aMapAttr));
       
   662 
       
   663 	aChunk = NULL;
       
   664 
       
   665 	// check size...
       
   666 	if(aSize<=0)
       
   667 		return KErrArgument;
       
   668 	TPhysAddr end = aAddr+aSize-1;
       
   669 	if(end<aAddr) // overflow?
       
   670 		return KErrArgument;
       
   671 	aAddr &= ~KPageMask;
       
   672 	TUint pageCount = (end>>KPageShift)-(aAddr>>KPageShift)+1;
       
   673 
       
   674 	// check attributes...
       
   675 	TMappingPermissions perm;
       
   676 	TInt r = MM::MappingPermissions(perm,*(TMappingAttributes2*)&aMapAttr);
       
   677 	if(r!=KErrNone)
       
   678 		return r;
       
   679 	TMemoryAttributes attr;
       
   680 	r = MM::MemoryAttributes(attr,*(TMappingAttributes2*)&aMapAttr);
       
   681 	if(r!=KErrNone)
       
   682 		return r;
       
   683 
       
   684 	// construct a hardware chunk...
       
   685 	DMemModelChunkHw* pC = new DMemModelChunkHw;
       
   686 	if(!pC)
       
   687 		return KErrNoMemory;
       
   688 
       
   689 	// set the executable flags based on the specified mapping permissions...
       
   690 	TMemoryCreateFlags flags = EMemoryCreateDefault;
       
   691 	if(perm&EExecute)
       
   692 		flags = (TMemoryCreateFlags)(flags|EMemoryCreateAllowExecution);
       
   693 
       
   694 	r = MM::MemoryNew(pC->iMemoryObject, EMemoryObjectHardware, pageCount, flags, attr);
       
   695 	if(r==KErrNone)
       
   696 		{
       
   697 		r = MM::MemoryAddContiguous(pC->iMemoryObject,0,pageCount,aAddr);
       
   698 		if(r==KErrNone)
       
   699 			{
       
   700 			r = MM::MappingNew(pC->iKernelMapping,pC->iMemoryObject,perm,KKernelOsAsid);
       
   701 			if(r==KErrNone)
       
   702 				{
       
   703 				pC->iPhysAddr = aAddr;
       
   704 				pC->iLinAddr = MM::MappingBase(pC->iKernelMapping);
       
   705 				pC->iSize = pageCount<<KPageShift;
       
   706 				const TMappingAttributes2& lma = MM::LegacyMappingAttributes(attr,perm); // not needed, but keep in case someone uses this internal member
       
   707 				*(TMappingAttributes2*)&pC->iAttribs = lma;
       
   708 				}
       
   709 			}
       
   710 		}
       
   711 
       
   712 	if(r==KErrNone)
       
   713 		aChunk = pC;
       
   714 	else
       
   715 		pC->Close(NULL);
       
   716 	return r;
       
   717 	}
       
   718 
       
   719 
       
   720 TInt DMemModelChunkHw::Close(TAny*)
       
   721 	{
       
   722 	__KTRACE_OPT2(KOBJECT,KMMU,Kern::Printf("DMemModelChunkHw::Close %d %O",AccessCount(),this));
       
   723 	TInt r = Dec();
       
   724 	if(r==1)
       
   725 		{
       
   726 		MM::MappingDestroy(iKernelMapping);
       
   727 		MM::MemoryDestroy(iMemoryObject);
       
   728 		DBase::Delete(this);
       
   729 		}
       
   730 	return r;
       
   731 	}
       
   732 
       
   733 
       
   734 
       
   735 //
       
   736 // Demand Paging
       
   737 //
       
   738 
       
   739 #ifdef _DEBUG
       
   740 extern "C" void ASMCheckPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength)
       
   741 	{
       
   742 	if(M::CheckPagingSafe(EFalse, aStartAddres, aLength))
       
   743 		return;
       
   744 	Kern::Printf("ASM_ASSERT_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR);
       
   745 	__NK_ASSERT_ALWAYS(0);
       
   746 	}
       
   747 
       
   748 extern "C" void ASMCheckDataPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength)
       
   749 	{
       
   750 	if(M::CheckPagingSafe(ETrue, aStartAddres, aLength))
       
   751 		return;
       
   752 	__KTRACE_OPT(KDATAPAGEWARN,Kern::Printf("Data paging: ASM_ASSERT_DATA_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR));
       
   753 	}
       
   754 #endif
       
   755 
       
   756 
       
   757 DMutex* CheckMutexOrder()
       
   758 	{
       
   759 #ifdef _DEBUG
       
   760 	SDblQue& ml = TheCurrentThread->iMutexList;
       
   761 	if(ml.IsEmpty())
       
   762 		return NULL;
       
   763 	DMutex* mm = _LOFF(ml.First(), DMutex, iOrderLink);
       
   764 	if (KMutexOrdPageOut >= mm->iOrder)
       
   765 		return mm;
       
   766 #endif
       
   767 	return NULL;
       
   768 	}
       
   769 
       
   770 
       
   771 TBool M::CheckPagingSafe(TBool aDataPaging, TLinAddr aStartAddr, TUint aLength)
       
   772 	{
       
   773 	if(K::Initialising)
       
   774 		return ETrue;
       
   775 	
       
   776 	NThread* nt = NCurrentThread();
       
   777 	if(!nt)
       
   778 		return ETrue; // We've not booted properly yet!
       
   779 
       
   780 	if(aStartAddr>=KUserMemoryLimit)
       
   781 		return ETrue; // kernel memory can't be paged
       
   782 
       
   783 	if(IsUnpagedRom(aStartAddr,aLength))
       
   784 		return ETrue;
       
   785 
       
   786 	TBool dataPagingEnabled = K::MemModelAttributes&EMemModelAttrDataPaging;
       
   787 
       
   788 	DThread* thread = _LOFF(nt,DThread,iNThread);
       
   789 	NFastMutex* fm = NKern::HeldFastMutex();
       
   790 	if(fm)
       
   791 		{
       
   792 		if(!thread->iPagingExcTrap || fm!=&TheScheduler.iLock)
       
   793 			{
       
   794 			if (!aDataPaging)
       
   795 				{
       
   796 				__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: CheckPagingSafe FAILED - FM Held"));
       
   797 				return EFalse;
       
   798 				}
       
   799 			else
       
   800 				{
       
   801 				__KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: CheckPagingSafe FAILED - FM Held"));
       
   802 				return !dataPagingEnabled;
       
   803 				}
       
   804 			}
       
   805 		}
       
   806 
       
   807 	DMutex* m = CheckMutexOrder();
       
   808 	if (m)
       
   809 		{
       
   810 		if (!aDataPaging)
       
   811 			{
       
   812 			__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Mutex Order Fault %O",m));
       
   813 			return EFalse;
       
   814 			}
       
   815 		else
       
   816 			{
       
   817 			__KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: Mutex Order Fault %O mem=%x+%x",m,aStartAddr,aLength));
       
   818 			return !dataPagingEnabled;
       
   819 			}
       
   820 		}
       
   821 	
       
   822 	return ETrue;
       
   823 	}
       
   824 
       
   825 
       
   826 
       
   827 EXPORT_C void DPagingDevice::NotifyIdle()
       
   828 	{
       
   829 	}
       
   830 
       
   831 EXPORT_C void DPagingDevice::NotifyBusy()
       
   832 	{
       
   833 	}