|
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // |
|
15 |
|
16 #include "memmodel.h" |
|
17 #include "mm.h" |
|
18 #include "mmu.h" |
|
19 |
|
20 #include "mrom.h" |
|
21 |
|
22 /** Returns the amount of free RAM currently available. |
|
23 |
|
24 @return The number of bytes of free RAM currently available. |
|
25 @pre any context |
|
26 */ |
|
27 EXPORT_C TInt Kern::FreeRamInBytes() |
|
28 { |
|
29 TUint numPages = TheMmu.FreeRamInPages(); |
|
30 // hack, clip free RAM to fit into a signed integer... |
|
31 if(numPages>(KMaxTInt>>KPageShift)) |
|
32 return KMaxTInt; |
|
33 return numPages*KPageSize; |
|
34 } |
|
35 |
|
36 |
|
37 /** Rounds up the argument to the size of a MMU page. |
|
38 |
|
39 To find out the size of a MMU page: |
|
40 @code |
|
41 size = Kern::RoundToPageSize(1); |
|
42 @endcode |
|
43 |
|
44 @param aSize Value to round up |
|
45 @pre any context |
|
46 */ |
|
47 EXPORT_C TUint32 Kern::RoundToPageSize(TUint32 aSize) |
|
48 { |
|
49 return (aSize+KPageMask)&~KPageMask; |
|
50 } |
|
51 |
|
52 |
|
53 /** Rounds up the argument to the amount of memory mapped by a MMU page |
|
54 directory entry. |
|
55 |
|
56 Chunks occupy one or more consecutive page directory entries (PDE) and |
|
57 therefore the amount of linear and physical memory allocated to a chunk is |
|
58 always a multiple of the amount of memory mapped by a page directory entry. |
|
59 */ |
|
60 EXPORT_C TUint32 Kern::RoundToChunkSize(TUint32 aSize) |
|
61 { |
|
62 return (aSize+KChunkMask)&~KChunkMask; |
|
63 } |
|
64 |
|
65 |
|
66 // |
|
67 // Epoc class |
|
68 // |
|
69 #ifdef BTRACE_KERNEL_MEMORY |
|
70 TInt Epoc::DriverAllocdPhysRam = 0; |
|
71 TInt Epoc::KernelMiscPages = 0; |
|
72 #endif |
|
73 |
|
74 |
|
75 /** |
|
76 Allows the variant to specify the details of the RAM zones. This should be invoked |
|
77 by the variant in its implementation of the pure virtual function Asic::Init1(). |
|
78 |
|
79 There are some limitations to how the RAM zones can be specified: |
|
80 - Each RAM zone's address space must be distinct and not overlap with any |
|
81 other RAM zone's address space |
|
82 - Each RAM zone's address space must have a size that is multiples of the |
|
83 ASIC's MMU small page size and be aligned to the ASIC's MMU small page size, |
|
84 usually 4KB on ARM MMUs. |
|
85 - When taken together all of the RAM zones must cover the whole of the physical RAM |
|
86 address space as specified by the bootstrap in the SuperPage members iTotalRamSize |
|
87 and iRamBootData;. |
|
88 - There can be no more than KMaxRamZones RAM zones specified by the base port |
|
89 |
|
90 Note the verification of the RAM zone data is not performed here but by the ram |
|
91 allocator later in the boot up sequence. This is because it is only possible to |
|
92 verify the zone data once the physical RAM configuration has been read from |
|
93 the super page. Any verification errors result in a "RAM-ALLOC" panic |
|
94 faulting the kernel during initialisation. |
|
95 |
|
96 @param aZones Pointer to an array of SRamZone structs containing the details for all |
|
97 the zones. The end of the array is specified by an element with an iSize of zero. The array must |
|
98 remain in memory at least until the kernel has successfully booted. |
|
99 |
|
100 @param aCallback Pointer to a call back function that the kernel may invoke to request |
|
101 one of the operations specified by TRamZoneOp. |
|
102 |
|
103 @return KErrNone if successful, otherwise one of the system wide error codes |
|
104 |
|
105 @see TRamZoneOp |
|
106 @see SRamZone |
|
107 @see TRamZoneCallback |
|
108 */ |
|
109 EXPORT_C TInt Epoc::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback) |
|
110 { |
|
111 TRamZoneCallback dummy; |
|
112 // Ensure this is only called once and only while we are initialising the kernel |
|
113 if (!K::Initialising || TheMmu.RamZoneConfig(dummy) != NULL) |
|
114 {// fault kernel, won't return |
|
115 K::Fault(K::EBadSetRamZoneConfig); |
|
116 } |
|
117 |
|
118 if (NULL == aZones) |
|
119 { |
|
120 return KErrArgument; |
|
121 } |
|
122 TheMmu.SetRamZoneConfig(aZones, aCallback); |
|
123 return KErrNone; |
|
124 } |
|
125 |
|
126 |
|
127 /** |
|
128 Modify the specified RAM zone's flags. |
|
129 |
|
130 This allows the BSP or device driver to configure which type of pages, if any, |
|
131 can be allocated into a RAM zone by the system. |
|
132 |
|
133 Note: updating a RAM zone's flags can result in |
|
134 1 - memory allocations failing despite there being enough free RAM in the system. |
|
135 2 - the methods TRamDefragRequest::EmptyRamZone(), TRamDefragRequest::ClaimRamZone() |
|
136 or TRamDefragRequest::DefragRam() never succeeding. |
|
137 |
|
138 The flag masks KRamZoneFlagDiscardOnly, KRamZoneFlagMovAndDisOnly and KRamZoneFlagNoAlloc |
|
139 are intended to be used with this method. |
|
140 |
|
141 @param aId The ID of the RAM zone to modify. |
|
142 @param aClearMask The bit mask to clear, each flag of which must already be set on the RAM zone. |
|
143 @param aSetMask The bit mask to set. |
|
144 |
|
145 @return KErrNone on success, KErrArgument if the RAM zone of aId not found or if |
|
146 aSetMask contains invalid flag bits. |
|
147 |
|
148 @see TRamDefragRequest::EmptyRamZone() |
|
149 @see TRamDefragRequest::ClaimRamZone() |
|
150 @see TRamDefragRequest::DefragRam() |
|
151 |
|
152 @see KRamZoneFlagDiscardOnly |
|
153 @see KRamZoneFlagMovAndDisOnly |
|
154 @see KRamZoneFlagNoAlloc |
|
155 */ |
|
156 EXPORT_C TInt Epoc::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask) |
|
157 { |
|
158 RamAllocLock::Lock(); |
|
159 TInt r = TheMmu.ModifyRamZoneFlags(aId, aClearMask, aSetMask); |
|
160 RamAllocLock::Unlock(); |
|
161 return r; |
|
162 } |
|
163 |
|
164 |
|
165 /** |
|
166 Gets the current count of a particular RAM zone's pages by type. |
|
167 |
|
168 @param aId The ID of the RAM zone to enquire about |
|
169 @param aPageData If successful, on return this contains the page count |
|
170 |
|
171 @return KErrNone if successful, KErrArgument if a RAM zone of aId is not found or |
|
172 one of the system wide error codes |
|
173 |
|
174 @pre Calling thread must be in a critical section. |
|
175 @pre Interrupts must be enabled. |
|
176 @pre Kernel must be unlocked. |
|
177 @pre No fast mutex can be held. |
|
178 @pre Call in a thread context. |
|
179 |
|
180 @see SRamZonePageCount |
|
181 */ |
|
182 EXPORT_C TInt Epoc::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData) |
|
183 { |
|
184 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::GetRamZonePageCount"); |
|
185 RamAllocLock::Lock(); |
|
186 TInt r = TheMmu.GetRamZonePageCount(aId, aPageData); |
|
187 RamAllocLock::Unlock(); |
|
188 return r; |
|
189 } |
|
190 |
|
191 |
|
192 /** |
|
193 Allocate a block of physically contiguous RAM with a physical address aligned |
|
194 to a specified power of 2 boundary. |
|
195 When the RAM is no longer required it should be freed using |
|
196 Epoc::FreePhysicalRam() |
|
197 |
|
198 @param aSize The size in bytes of the required block. The specified size |
|
199 is rounded up to the page size, since only whole pages of |
|
200 physical RAM can be allocated. |
|
201 @param aPhysAddr Receives the physical address of the base of the block on |
|
202 successful allocation. |
|
203 @param aAlign Specifies the number of least significant bits of the |
|
204 physical address which are required to be zero. If a value |
|
205 less than log2(page size) is specified, page alignment is |
|
206 assumed. Pass 0 for aAlign if there are no special alignment |
|
207 constraints (other than page alignment). |
|
208 @return KErrNone if the allocation was successful. |
|
209 KErrNoMemory if a sufficiently large physically contiguous block of free |
|
210 RAM with the specified alignment could not be found. |
|
211 @pre Calling thread must be in a critical section. |
|
212 @pre Interrupts must be enabled. |
|
213 @pre Kernel must be unlocked. |
|
214 @pre No fast mutex can be held. |
|
215 @pre Call in a thread context. |
|
216 @pre Can be used in a device driver. |
|
217 */ |
|
218 EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign) |
|
219 { |
|
220 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocPhysicalRam"); |
|
221 RamAllocLock::Lock(); |
|
222 TInt r = TheMmu.AllocPhysicalRam |
|
223 ( |
|
224 aPhysAddr, |
|
225 MM::RoundToPageCount(aSize), |
|
226 MM::RoundToPageShift(aAlign), |
|
227 (Mmu::TRamAllocFlags)EMemAttStronglyOrdered |
|
228 ); |
|
229 RamAllocLock::Unlock(); |
|
230 return r; |
|
231 } |
|
232 |
|
233 |
|
234 /** |
|
235 Allocate a block of physically contiguous RAM with a physical address aligned |
|
236 to a specified power of 2 boundary from the specified zone. |
|
237 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam(). |
|
238 |
|
239 Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt |
|
240 to allocate regardless of whether the other flags are set for the specified RAM zones |
|
241 or not. |
|
242 |
|
243 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam(). |
|
244 |
|
245 @param aZoneId The ID of the zone to attempt to allocate from. |
|
246 @param aSize The size in bytes of the required block. The specified size |
|
247 is rounded up to the page size, since only whole pages of |
|
248 physical RAM can be allocated. |
|
249 @param aPhysAddr Receives the physical address of the base of the block on |
|
250 successful allocation. |
|
251 @param aAlign Specifies the number of least significant bits of the |
|
252 physical address which are required to be zero. If a value |
|
253 less than log2(page size) is specified, page alignment is |
|
254 assumed. Pass 0 for aAlign if there are no special alignment |
|
255 constraints (other than page alignment). |
|
256 @return KErrNone if the allocation was successful. |
|
257 KErrNoMemory if a sufficiently large physically contiguous block of free |
|
258 RAM with the specified alignment could not be found within the specified |
|
259 zone. |
|
260 KErrArgument if a RAM zone of the specified ID can't be found or if the |
|
261 RAM zone has a total number of physical pages which is less than those |
|
262 requested for the allocation. |
|
263 |
|
264 @pre Calling thread must be in a critical section. |
|
265 @pre Interrupts must be enabled. |
|
266 @pre Kernel must be unlocked. |
|
267 @pre No fast mutex can be held. |
|
268 @pre Call in a thread context. |
|
269 @pre Can be used in a device driver. |
|
270 */ |
|
271 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign) |
|
272 { |
|
273 return ZoneAllocPhysicalRam(&aZoneId, 1, aSize, aPhysAddr, aAlign); |
|
274 } |
|
275 |
|
276 |
|
277 /** |
|
278 Allocate a block of physically contiguous RAM with a physical address aligned |
|
279 to a specified power of 2 boundary from the specified RAM zones. |
|
280 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam(). |
|
281 |
|
282 RAM will be allocated into the RAM zones in the order they are specified in the |
|
283 aZoneIdList parameter. If the contiguous allocations are intended to span RAM zones |
|
284 when required then aZoneIdList should be listed with the RAM zones in ascending |
|
285 physical address order. |
|
286 |
|
287 Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt |
|
288 to allocate regardless of whether the other flags are set for the specified RAM zones |
|
289 or not. |
|
290 |
|
291 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam(). |
|
292 |
|
293 @param aZoneIdList A pointer to an array of RAM zone IDs of the RAM zones to |
|
294 attempt to allocate from. |
|
295 @param aZoneIdCount The number of RAM zone IDs contained in aZoneIdList. |
|
296 @param aSize The size in bytes of the required block. The specified size |
|
297 is rounded up to the page size, since only whole pages of |
|
298 physical RAM can be allocated. |
|
299 @param aPhysAddr Receives the physical address of the base of the block on |
|
300 successful allocation. |
|
301 @param aAlign Specifies the number of least significant bits of the |
|
302 physical address which are required to be zero. If a value |
|
303 less than log2(page size) is specified, page alignment is |
|
304 assumed. Pass 0 for aAlign if there are no special alignment |
|
305 constraints (other than page alignment). |
|
306 @return KErrNone if the allocation was successful. |
|
307 KErrNoMemory if a sufficiently large physically contiguous block of free |
|
308 RAM with the specified alignment could not be found within the specified |
|
309 zone. |
|
310 KErrArgument if a RAM zone of a specified ID can't be found or if the |
|
311 RAM zones have a total number of physical pages which is less than those |
|
312 requested for the allocation. |
|
313 |
|
314 @pre Calling thread must be in a critical section. |
|
315 @pre Interrupts must be enabled. |
|
316 @pre Kernel must be unlocked. |
|
317 @pre No fast mutex can be held. |
|
318 @pre Call in a thread context. |
|
319 @pre Can be used in a device driver. |
|
320 */ |
|
321 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign) |
|
322 { |
|
323 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ZoneAllocPhysicalRam"); |
|
324 RamAllocLock::Lock(); |
|
325 TInt r = TheMmu.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aAlign); |
|
326 RamAllocLock::Unlock(); |
|
327 return r; |
|
328 } |
|
329 |
|
330 |
|
331 /** |
|
332 Attempt to allocate discontiguous RAM pages. |
|
333 |
|
334 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam(). |
|
335 |
|
336 @param aNumPages The number of discontiguous pages required to be allocated |
|
337 @param aPageList This should be a pointer to a previously allocated array of |
|
338 aNumPages TPhysAddr elements. On a successful allocation it |
|
339 will receive the physical addresses of each page allocated. |
|
340 |
|
341 @return KErrNone if the allocation was successful. |
|
342 KErrNoMemory if the requested number of pages can't be allocated |
|
343 |
|
344 @pre Calling thread must be in a critical section. |
|
345 @pre Interrupts must be enabled. |
|
346 @pre Kernel must be unlocked. |
|
347 @pre No fast mutex can be held. |
|
348 @pre Call in a thread context. |
|
349 @pre Can be used in a device driver. |
|
350 */ |
|
351 EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList) |
|
352 { |
|
353 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::AllocPhysicalRam"); |
|
354 RamAllocLock::Lock(); |
|
355 TInt r = TheMmu.AllocPhysicalRam(aPageList,aNumPages,(Mmu::TRamAllocFlags)EMemAttStronglyOrdered); |
|
356 RamAllocLock::Unlock(); |
|
357 return r; |
|
358 } |
|
359 |
|
360 |
|
361 /** |
|
362 Attempt to allocate discontiguous RAM pages from the specified zone. |
|
363 |
|
364 Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt |
|
365 to allocate regardless of whether the other flags are set for the specified RAM zones |
|
366 or not. |
|
367 |
|
368 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam(). |
|
369 |
|
370 @param aZoneId The ID of the zone to attempt to allocate from. |
|
371 @param aNumPages The number of discontiguous pages required to be allocated |
|
372 from the specified zone. |
|
373 @param aPageList This should be a pointer to a previously allocated array of |
|
374 aNumPages TPhysAddr elements. On a successful |
|
375 allocation it will receive the physical addresses of each |
|
376 page allocated. |
|
377 @return KErrNone if the allocation was successful. |
|
378 KErrNoMemory if the requested number of pages can't be allocated from the |
|
379 specified zone. |
|
380 KErrArgument if a RAM zone of the specified ID can't be found or if the |
|
381 RAM zone has a total number of physical pages which is less than those |
|
382 requested for the allocation. |
|
383 |
|
384 @pre Calling thread must be in a critical section. |
|
385 @pre Interrupts must be enabled. |
|
386 @pre Kernel must be unlocked. |
|
387 @pre No fast mutex can be held. |
|
388 @pre Call in a thread context. |
|
389 @pre Can be used in a device driver. |
|
390 */ |
|
391 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aNumPages, TPhysAddr* aPageList) |
|
392 { |
|
393 return ZoneAllocPhysicalRam(&aZoneId, 1, aNumPages, aPageList); |
|
394 } |
|
395 |
|
396 |
|
397 /** |
|
398 Attempt to allocate discontiguous RAM pages from the specified RAM zones. |
|
399 The RAM pages will be allocated into the RAM zones in the order that they are specified |
|
400 in the aZoneIdList parameter, the RAM zone preferences will be ignored. |
|
401 |
|
402 Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt |
|
403 to allocate regardless of whether the other flags are set for the specified RAM zones |
|
404 or not. |
|
405 |
|
406 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam(). |
|
407 |
|
408 @param aZoneIdList A pointer to an array of RAM zone IDs of the RAM zones to |
|
409 attempt to allocate from. |
|
410 @param aZoneIdCount The number of RAM zone IDs pointed to by aZoneIdList. |
|
411 @param aNumPages The number of discontiguous pages required to be allocated |
|
412 from the specified zone. |
|
413 @param aPageList This should be a pointer to a previously allocated array of |
|
414 aNumPages TPhysAddr elements. On a successful |
|
415 allocation it will receive the physical addresses of each |
|
416 page allocated. |
|
417 @return KErrNone if the allocation was successful. |
|
418 KErrNoMemory if the requested number of pages can't be allocated from the |
|
419 specified zone. |
|
420 KErrArgument if a RAM zone of a specified ID can't be found or if the |
|
421 RAM zones have a total number of physical pages which is less than those |
|
422 requested for the allocation. |
|
423 |
|
424 @pre Calling thread must be in a critical section. |
|
425 @pre Interrupts must be enabled. |
|
426 @pre Kernel must be unlocked. |
|
427 @pre No fast mutex can be held. |
|
428 @pre Call in a thread context. |
|
429 @pre Can be used in a device driver. |
|
430 */ |
|
431 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList) |
|
432 { |
|
433 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::ZoneAllocPhysicalRam"); |
|
434 RamAllocLock::Lock(); |
|
435 TInt r = TheMmu.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aNumPages, aPageList); |
|
436 RamAllocLock::Unlock(); |
|
437 return r; |
|
438 } |
|
439 |
|
440 |
|
441 /** |
|
442 Free a previously-allocated block of physically contiguous RAM. |
|
443 |
|
444 Specifying one of the following may cause the system to panic: |
|
445 a) an invalid physical RAM address. |
|
446 b) valid physical RAM addresses where some had not been previously allocated. |
|
447 c) an address not aligned to a page boundary. |
|
448 |
|
449 @param aPhysAddr The physical address of the base of the block to be freed. |
|
450 This must be the address returned by a previous call to |
|
451 Epoc::AllocPhysicalRam(), Epoc::ZoneAllocPhysicalRam(), |
|
452 Epoc::ClaimPhysicalRam() or Epoc::ClaimRamZone(). |
|
453 @param aSize The size in bytes of the required block. The specified size |
|
454 is rounded up to the page size, since only whole pages of |
|
455 physical RAM can be allocated. |
|
456 @return KErrNone if the operation was successful. |
|
457 |
|
458 |
|
459 |
|
460 @pre Calling thread must be in a critical section. |
|
461 @pre Interrupts must be enabled. |
|
462 @pre Kernel must be unlocked. |
|
463 @pre No fast mutex can be held. |
|
464 @pre Call in a thread context. |
|
465 @pre Can be used in a device driver. |
|
466 */ |
|
467 EXPORT_C TInt Epoc::FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize) |
|
468 { |
|
469 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam"); |
|
470 RamAllocLock::Lock(); |
|
471 TheMmu.FreePhysicalRam(aPhysAddr,MM::RoundToPageCount(aSize)); |
|
472 RamAllocLock::Unlock(); |
|
473 return KErrNone; |
|
474 } |
|
475 |
|
476 |
|
477 /** |
|
478 Free a number of physical RAM pages that were previously allocated using |
|
479 Epoc::AllocPhysicalRam() or Epoc::ZoneAllocPhysicalRam(). |
|
480 |
|
481 Specifying one of the following may cause the system to panic: |
|
482 a) an invalid physical RAM address. |
|
483 b) valid physical RAM addresses where some had not been previously allocated. |
|
484 c) an address not aligned to a page boundary. |
|
485 |
|
486 @param aNumPages The number of pages to be freed. |
|
487 @param aPageList An array of aNumPages TPhysAddr elements. Where each element |
|
488 should contain the physical address of each page to be freed. |
|
489 This must be the same set of addresses as those returned by a |
|
490 previous call to Epoc::AllocPhysicalRam() or |
|
491 Epoc::ZoneAllocPhysicalRam(). |
|
492 @return KErrNone if the operation was successful. |
|
493 |
|
494 @pre Calling thread must be in a critical section. |
|
495 @pre Interrupts must be enabled. |
|
496 @pre Kernel must be unlocked. |
|
497 @pre No fast mutex can be held. |
|
498 @pre Call in a thread context. |
|
499 @pre Can be used in a device driver. |
|
500 |
|
501 */ |
|
502 EXPORT_C TInt Epoc::FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList) |
|
503 { |
|
504 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam"); |
|
505 RamAllocLock::Lock(); |
|
506 TheMmu.FreePhysicalRam(aPageList,aNumPages); |
|
507 RamAllocLock::Unlock(); |
|
508 return KErrNone; |
|
509 } |
|
510 |
|
511 |
|
512 /** |
|
513 Allocate a specific block of physically contiguous RAM, specified by physical |
|
514 base address and size. |
|
515 If and when the RAM is no longer required it should be freed using |
|
516 Epoc::FreePhysicalRam() |
|
517 |
|
518 @param aPhysAddr The physical address of the base of the required block. |
|
519 @param aSize The size in bytes of the required block. The specified size |
|
520 is rounded up to the page size, since only whole pages of |
|
521 physical RAM can be allocated. |
|
522 @return KErrNone if the operation was successful. |
|
523 KErrArgument if the range of physical addresses specified included some |
|
524 which are not valid physical RAM addresses. |
|
525 KErrInUse if the range of physical addresses specified are all valid |
|
526 physical RAM addresses but some of them have already been |
|
527 allocated for other purposes. |
|
528 @pre Calling thread must be in a critical section. |
|
529 @pre Interrupts must be enabled. |
|
530 @pre Kernel must be unlocked. |
|
531 @pre No fast mutex can be held. |
|
532 @pre Call in a thread context. |
|
533 @pre Can be used in a device driver. |
|
534 */ |
|
535 EXPORT_C TInt Epoc::ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize) |
|
536 { |
|
537 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ClaimPhysicalRam"); |
|
538 RamAllocLock::Lock(); |
|
539 TInt r = TheMmu.ClaimPhysicalRam |
|
540 ( |
|
541 aPhysAddr, |
|
542 MM::RoundToPageCount(aSize), |
|
543 (Mmu::TRamAllocFlags)EMemAttStronglyOrdered |
|
544 ); |
|
545 RamAllocLock::Unlock(); |
|
546 return r; |
|
547 } |
|
548 |
|
549 |
|
550 /** |
|
551 Translate a virtual address to the corresponding physical address. |
|
552 |
|
553 @param aLinAddr The virtual address to be translated. |
|
554 @return The physical address corresponding to the given virtual address, or |
|
555 KPhysAddrInvalid if the specified virtual address is unmapped. |
|
556 @pre Interrupts must be enabled. |
|
557 @pre Kernel must be unlocked. |
|
558 @pre Call in a thread context. |
|
559 @pre Can be used in a device driver. |
|
560 */ |
|
561 EXPORT_C TPhysAddr Epoc::LinearToPhysical(TLinAddr aLinAddr) |
|
562 { |
|
563 // This precondition is violated by various parts of the system under some conditions, |
|
564 // e.g. when __FLUSH_PT_INTO_RAM__ is defined. This function might also be called by |
|
565 // a higher-level RTOS for which these conditions are meaningless. Thus, it's been |
|
566 // disabled for now. |
|
567 // CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"Epoc::LinearToPhysical"); |
|
568 DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess; |
|
569 // Get the os asid of current thread's process so no need to open a reference on it. |
|
570 TInt osAsid = pP->OsAsid(); |
|
571 #if 1 |
|
572 return Mmu::UncheckedLinearToPhysical(aLinAddr, osAsid); |
|
573 #else |
|
574 MmuLock::Lock(); |
|
575 TPhysAddr addr = Mmu::LinearToPhysical(aLinAddr, osAsid); |
|
576 MmuLock::Unlock(); |
|
577 return addr; |
|
578 #endif |
|
579 } |
|
580 |
|
581 |
|
582 // |
|
583 // Misc |
|
584 // |
|
585 |
|
586 EXPORT_C TInt TInternalRamDrive::MaxSize() |
|
587 { |
|
588 TUint maxPages = (TUint(TheSuperPage().iRamDriveSize)>>KPageShift)+TheMmu.FreeRamInPages(); // current size plus spare memory |
|
589 TUint maxPages2 = TUint(PP::RamDriveMaxSize)>>KPageShift; |
|
590 if(maxPages>maxPages2) |
|
591 maxPages = maxPages2; |
|
592 return maxPages*KPageSize; |
|
593 } |
|
594 |
|
595 |
|
596 TInt M::PageSizeInBytes() |
|
597 { |
|
598 return KPageSize; |
|
599 } |
|
600 |
|
601 |
|
602 #ifdef BTRACE_KERNEL_MEMORY |
|
603 void M::BTracePrime(TUint aCategory) |
|
604 { |
|
605 // TODO: |
|
606 } |
|
607 #endif |
|
608 |
|
609 |
|
610 |
|
611 // |
|
612 // DPlatChunkHw |
|
613 // |
|
614 |
|
615 /** |
|
616 Create a hardware chunk object, optionally mapping a specified block of physical |
|
617 addresses with specified access permissions and cache policy. |
|
618 |
|
619 When the mapping is no longer required, close the chunk using chunk->Close(0); |
|
620 Note that closing a chunk does not free any RAM pages which were mapped by the |
|
621 chunk - these must be freed separately using Epoc::FreePhysicalRam(). |
|
622 |
|
623 @param aChunk Upon successful completion this parameter receives a pointer to |
|
624 the newly created chunk. Upon unsuccessful completion it is |
|
625 written with a NULL pointer. The virtual address of the mapping |
|
626 can subsequently be discovered using the LinearAddress() |
|
627 function on the chunk. |
|
628 @param aAddr The base address of the physical region to be mapped. This will |
|
629 be rounded down to a multiple of the hardware page size before |
|
630 being used. |
|
631 @param aSize The size of the physical address region to be mapped. This will |
|
632 be rounded up to a multiple of the hardware page size before |
|
633 being used; the rounding is such that the entire range from |
|
634 aAddr to aAddr+aSize-1 inclusive is mapped. For example if |
|
635 aAddr=0xB0001FFF, aSize=2 and the hardware page size is 4KB, an |
|
636 8KB range of physical addresses from 0xB0001000 to 0xB0002FFF |
|
637 inclusive will be mapped. |
|
638 @param aMapAttr Mapping attributes required for the mapping. This is formed |
|
639 by ORing together values from the TMappingAttributes enumeration |
|
640 to specify the access permissions and caching policy. |
|
641 |
|
642 @pre Calling thread must be in a critical section. |
|
643 @pre Interrupts must be enabled. |
|
644 @pre Kernel must be unlocked. |
|
645 @pre No fast mutex can be held. |
|
646 @pre Call in a thread context. |
|
647 @pre Can be used in a device driver. |
|
648 @see TMappingAttributes |
|
649 */ |
|
650 EXPORT_C TInt DPlatChunkHw::New(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aMapAttr) |
|
651 { |
|
652 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DPlatChunkHw::New"); |
|
653 __KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw::New phys=%08x, size=%x, attribs=%x",aAddr,aSize,aMapAttr)); |
|
654 |
|
655 aChunk = NULL; |
|
656 |
|
657 // check size... |
|
658 if(aSize<=0) |
|
659 return KErrArgument; |
|
660 TPhysAddr end = aAddr+aSize-1; |
|
661 if(end<aAddr) // overflow? |
|
662 return KErrArgument; |
|
663 aAddr &= ~KPageMask; |
|
664 TUint pageCount = (end>>KPageShift)-(aAddr>>KPageShift)+1; |
|
665 |
|
666 // check attributes... |
|
667 TMappingPermissions perm; |
|
668 TInt r = MM::MappingPermissions(perm,*(TMappingAttributes2*)&aMapAttr); |
|
669 if(r!=KErrNone) |
|
670 return r; |
|
671 TMemoryAttributes attr; |
|
672 r = MM::MemoryAttributes(attr,*(TMappingAttributes2*)&aMapAttr); |
|
673 if(r!=KErrNone) |
|
674 return r; |
|
675 |
|
676 // construct a hardware chunk... |
|
677 DMemModelChunkHw* pC = new DMemModelChunkHw; |
|
678 if(!pC) |
|
679 return KErrNoMemory; |
|
680 |
|
681 // set the executable flags based on the specified mapping permissions... |
|
682 TMemoryCreateFlags flags = EMemoryCreateDefault; |
|
683 if(perm&EExecute) |
|
684 flags = (TMemoryCreateFlags)(flags|EMemoryCreateAllowExecution); |
|
685 |
|
686 r = MM::MemoryNew(pC->iMemoryObject, EMemoryObjectHardware, pageCount, flags, attr); |
|
687 if(r==KErrNone) |
|
688 { |
|
689 r = MM::MemoryAddContiguous(pC->iMemoryObject,0,pageCount,aAddr); |
|
690 if(r==KErrNone) |
|
691 { |
|
692 r = MM::MappingNew(pC->iKernelMapping,pC->iMemoryObject,perm,KKernelOsAsid); |
|
693 if(r==KErrNone) |
|
694 { |
|
695 pC->iPhysAddr = aAddr; |
|
696 pC->iLinAddr = MM::MappingBase(pC->iKernelMapping); |
|
697 pC->iSize = pageCount<<KPageShift; |
|
698 const TMappingAttributes2& lma = MM::LegacyMappingAttributes(attr,perm); // not needed, but keep in case someone uses this internal member |
|
699 *(TMappingAttributes2*)&pC->iAttribs = lma; |
|
700 } |
|
701 } |
|
702 } |
|
703 |
|
704 if(r==KErrNone) |
|
705 aChunk = pC; |
|
706 else |
|
707 pC->Close(NULL); |
|
708 return r; |
|
709 } |
|
710 |
|
711 |
|
712 TInt DMemModelChunkHw::Close(TAny*) |
|
713 { |
|
714 __KTRACE_OPT2(KOBJECT,KMMU,Kern::Printf("DMemModelChunkHw::Close %d %O",AccessCount(),this)); |
|
715 TInt r = Dec(); |
|
716 if(r==1) |
|
717 { |
|
718 MM::MappingDestroy(iKernelMapping); |
|
719 MM::MemoryDestroy(iMemoryObject); |
|
720 DBase::Delete(this); |
|
721 } |
|
722 return r; |
|
723 } |
|
724 |
|
725 |
|
726 |
|
727 // |
|
728 // Demand Paging |
|
729 // |
|
730 |
|
731 #ifdef _DEBUG |
|
732 extern "C" void ASMCheckPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength) |
|
733 { |
|
734 if(M::CheckPagingSafe(EFalse, aStartAddres, aLength)) |
|
735 return; |
|
736 Kern::Printf("ASM_ASSERT_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR); |
|
737 __NK_ASSERT_ALWAYS(0); |
|
738 } |
|
739 |
|
740 extern "C" void ASMCheckDataPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength) |
|
741 { |
|
742 if(M::CheckPagingSafe(ETrue, aStartAddres, aLength)) |
|
743 return; |
|
744 __KTRACE_OPT(KDATAPAGEWARN,Kern::Printf("Data paging: ASM_ASSERT_DATA_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR)); |
|
745 } |
|
746 #endif |
|
747 |
|
748 |
|
749 DMutex* CheckMutexOrder() |
|
750 { |
|
751 #ifdef _DEBUG |
|
752 SDblQue& ml = TheCurrentThread->iMutexList; |
|
753 if(ml.IsEmpty()) |
|
754 return NULL; |
|
755 DMutex* mm = _LOFF(ml.First(), DMutex, iOrderLink); |
|
756 if (KMutexOrdPageOut >= mm->iOrder) |
|
757 return mm; |
|
758 #endif |
|
759 return NULL; |
|
760 } |
|
761 |
|
762 |
|
763 TBool M::CheckPagingSafe(TBool aDataPaging, TLinAddr aStartAddr, TUint aLength) |
|
764 { |
|
765 if(K::Initialising) |
|
766 return ETrue; |
|
767 |
|
768 NThread* nt = NCurrentThread(); |
|
769 if(!nt) |
|
770 return ETrue; // We've not booted properly yet! |
|
771 |
|
772 if(aStartAddr>=KUserMemoryLimit) |
|
773 return ETrue; // kernel memory can't be paged |
|
774 |
|
775 if(IsUnpagedRom(aStartAddr,aLength)) |
|
776 return ETrue; |
|
777 |
|
778 TBool dataPagingEnabled = K::MemModelAttributes&EMemModelAttrDataPaging; |
|
779 |
|
780 DThread* thread = _LOFF(nt,DThread,iNThread); |
|
781 NFastMutex* fm = NKern::HeldFastMutex(); |
|
782 if(fm) |
|
783 { |
|
784 if(!thread->iPagingExcTrap || fm!=&TheScheduler.iLock) |
|
785 { |
|
786 if (!aDataPaging) |
|
787 { |
|
788 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: CheckPagingSafe FAILED - FM Held")); |
|
789 return EFalse; |
|
790 } |
|
791 else |
|
792 { |
|
793 __KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: CheckPagingSafe FAILED - FM Held")); |
|
794 return !dataPagingEnabled; |
|
795 } |
|
796 } |
|
797 } |
|
798 |
|
799 DMutex* m = CheckMutexOrder(); |
|
800 if (m) |
|
801 { |
|
802 if (!aDataPaging) |
|
803 { |
|
804 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Mutex Order Fault %O",m)); |
|
805 return EFalse; |
|
806 } |
|
807 else |
|
808 { |
|
809 __KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: Mutex Order Fault %O mem=%x+%x",m,aStartAddr,aLength)); |
|
810 return !dataPagingEnabled; |
|
811 } |
|
812 } |
|
813 |
|
814 return ETrue; |
|
815 } |
|
816 |
|
817 |
|
818 |
|
819 EXPORT_C void DPagingDevice::NotifyIdle() |
|
820 { |
|
821 } |
|
822 |
|
823 EXPORT_C void DPagingDevice::NotifyBusy() |
|
824 { |
|
825 } |