author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Fri, 14 May 2010 17:13:29 +0300 | |
changeset 109 | b3a1d9898418 |
parent 0 | a41df078684a |
child 257 | 3e88ff8f41d5 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32test\mmu\d_demandpaging.cpp |
|
15 |
// |
|
16 |
// |
|
17 |
||
18 |
#include <kernel/kern_priv.h> |
|
19 |
#include <kernel/cache.h> |
|
20 |
#include "d_demandpaging.h" |
|
21 |
||
22 |
/// Page attributes, cut-n-paste'd from mmubase.h |
|
23 |
enum TType |
|
24 |
{ |
|
25 |
// EInvalid=0, // No physical RAM exists for this page |
|
26 |
// EFixed=1, // RAM fixed at boot time |
|
27 |
// EUnused=2, // Page is unused |
|
28 |
// EChunk=3, |
|
29 |
// ECodeSeg=4, |
|
30 |
// EHwChunk=5, |
|
31 |
// EPageTable=6, |
|
32 |
// EPageDir=7, |
|
33 |
// EPtInfo=8, |
|
34 |
// EShadow=9, |
|
35 |
||
36 |
EPagedROM=10, |
|
37 |
EPagedCode=11, |
|
38 |
EPagedData=12, |
|
39 |
EPagedCache=13, |
|
40 |
EPagedFree=14, |
|
41 |
}; |
|
42 |
||
43 |
enum TState |
|
44 |
{ |
|
45 |
EStateNormal = 0, // no special state |
|
46 |
EStatePagedYoung = 1, |
|
47 |
EStatePagedOld = 2, |
|
48 |
EStatePagedDead = 3, |
|
49 |
EStatePagedLocked = 4 |
|
50 |
}; |
|
51 |
||
52 |
// |
|
53 |
// Class definitions |
|
54 |
// |
|
55 |
||
56 |
class DDemandPagingTestFactory : public DLogicalDevice |
|
57 |
{ |
|
58 |
public: |
|
59 |
~DDemandPagingTestFactory(); |
|
60 |
virtual TInt Install(); |
|
61 |
virtual void GetCaps(TDes8& aDes) const; |
|
62 |
virtual TInt Create(DLogicalChannelBase*& aChannel); |
|
63 |
}; |
|
64 |
||
65 |
class DDemandPagingTestChannel : public DLogicalChannelBase |
|
66 |
{ |
|
67 |
public: |
|
68 |
DDemandPagingTestChannel(); |
|
69 |
~DDemandPagingTestChannel(); |
|
70 |
virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer); |
|
71 |
virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2); |
|
72 |
TInt LockTest(const TAny* aBuffer, TInt aSize); |
|
73 |
TInt LockTest2(); |
|
74 |
TInt DoConsumeContiguousRamTest(TInt aAlign, TInt aPages); |
|
75 |
TInt DoCreatePlatHwChunk(TInt aSize, TAny* aLinAddr); |
|
76 |
TInt DoDestroyPlatHwChunk(); |
|
77 |
TInt ReadHoldingMutexTest(TAny* aDest); |
|
78 |
||
79 |
TBool CheckPagedIn(TLinAddr aAddress); |
|
80 |
TBool CheckPagedOut(TLinAddr aAddress); |
|
81 |
TBool CheckLocked(TLinAddr aAddress); |
|
82 |
||
83 |
TInt FreeRam(); |
|
84 |
public: |
|
85 |
DDemandPagingTestFactory* iFactory; |
|
86 |
DDemandPagingLock iLock; |
|
87 |
||
88 |
DPlatChunkHw* iHwChunk; |
|
89 |
TInt iChunkSize; |
|
90 |
TPhysAddr iPhysBase; // This will be base physical address of the chunk |
|
91 |
TLinAddr iLinearBase; // This will be base linear address of the chunk |
|
92 |
}; |
|
93 |
||
94 |
// |
|
95 |
// DDemandPagingTestFactory |
|
96 |
// |
|
97 |
||
98 |
TInt DDemandPagingTestFactory::Install() |
|
99 |
{ |
|
100 |
return SetName(&KDemandPagingTestLddName); |
|
101 |
} |
|
102 |
||
103 |
DDemandPagingTestFactory::~DDemandPagingTestFactory() |
|
104 |
{ |
|
105 |
} |
|
106 |
||
107 |
void DDemandPagingTestFactory::GetCaps(TDes8& /*aDes*/) const |
|
108 |
{ |
|
109 |
// Not used but required as DLogicalDevice::GetCaps is pure virtual |
|
110 |
} |
|
111 |
||
112 |
TInt DDemandPagingTestFactory::Create(DLogicalChannelBase*& aChannel) |
|
113 |
{ |
|
114 |
aChannel = NULL; |
|
115 |
DDemandPagingTestChannel* channel=new DDemandPagingTestChannel; |
|
116 |
if(!channel) |
|
117 |
return KErrNoMemory; |
|
118 |
channel->iFactory = this; |
|
119 |
aChannel = channel; |
|
120 |
return KErrNone; |
|
121 |
} |
|
122 |
||
123 |
DECLARE_STANDARD_LDD() |
|
124 |
{ |
|
125 |
return new DDemandPagingTestFactory; |
|
126 |
} |
|
127 |
||
128 |
// |
|
129 |
// DDemandPagingTestChannel |
|
130 |
// |
|
131 |
||
132 |
TInt DDemandPagingTestChannel::DoCreate(TInt /*aUnit*/, const TDesC8* /*aInfo*/, const TVersion& /*aVer*/) |
|
133 |
{ |
|
134 |
return KErrNone; |
|
135 |
} |
|
136 |
||
137 |
DDemandPagingTestChannel::DDemandPagingTestChannel() |
|
138 |
{ |
|
139 |
} |
|
140 |
||
141 |
DDemandPagingTestChannel::~DDemandPagingTestChannel() |
|
142 |
{ |
|
143 |
DoDestroyPlatHwChunk(); |
|
144 |
} |
|
145 |
||
146 |
TInt DDemandPagingTestChannel::Request(TInt aFunction, TAny* a1, TAny* a2) |
|
147 |
{ |
|
148 |
switch(aFunction) |
|
149 |
{ |
|
150 |
case RDemandPagingTestLdd::ELockTest: |
|
151 |
{ |
|
152 |
TInt r = LockTest(a1,(TInt)a2); |
|
153 |
if (r == KErrNone) |
|
154 |
r = LockTest2(); |
|
155 |
return r; |
|
156 |
} |
|
157 |
||
158 |
case RDemandPagingTestLdd::ESetRealtimeTrace: |
|
159 |
{ |
|
160 |
#if defined(_DEBUG) |
|
109
b3a1d9898418
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
161 |
TUint32 bit = TUint32(1U<<(KREALTIME&31)); |
0 | 162 |
__e32_atomic_axo_ord32(&Kern::SuperPage().iDebugMask[KREALTIME>>5], ~bit, a1?bit:0); |
163 |
#if 0 // can enable this to help debugging |
|
164 |
bit = (1<<(KPAGING&31)); |
|
165 |
__e32_atomic_axo_ord32(&Kern::SuperPage().iDebugMask[KPAGING>>5], ~bit, a1?bit:0); |
|
166 |
#endif |
|
167 |
#endif //_DEBUG |
|
168 |
} |
|
169 |
return KErrNone; |
|
170 |
||
171 |
case RDemandPagingTestLdd::EDoConsumeContiguousRamTest: |
|
172 |
{ |
|
173 |
return DDemandPagingTestChannel::DoConsumeContiguousRamTest((TInt)a1, (TInt)a2); |
|
174 |
} |
|
175 |
||
176 |
case RDemandPagingTestLdd::ECreatePlatHwChunk: |
|
177 |
{ |
|
178 |
return DDemandPagingTestChannel::DoCreatePlatHwChunk((TInt)a1, a2); |
|
179 |
} |
|
180 |
||
181 |
case RDemandPagingTestLdd::EDestroyPlatHwChunk: |
|
182 |
{ |
|
183 |
return DDemandPagingTestChannel::DoDestroyPlatHwChunk(); |
|
184 |
} |
|
185 |
||
186 |
case RDemandPagingTestLdd::ELock: |
|
187 |
{ |
|
188 |
TInt r=iLock.Alloc((TInt)a2); |
|
189 |
if(r!=KErrNone) |
|
190 |
return r; |
|
191 |
return iLock.Lock(&Kern::CurrentThread(),(TLinAddr)a1,(TInt)a2); |
|
192 |
} |
|
193 |
||
194 |
case RDemandPagingTestLdd::EUnlock: |
|
195 |
{ |
|
196 |
iLock.Free(); |
|
197 |
return KErrNone; |
|
198 |
} |
|
199 |
||
200 |
case RDemandPagingTestLdd::EReadHoldingMutexTest: |
|
201 |
return ReadHoldingMutexTest((TAny*)a1); |
|
202 |
||
203 |
default: |
|
204 |
return KErrNotSupported; |
|
205 |
} |
|
206 |
} |
|
207 |
||
208 |
// |
|
209 |
// DDemandPagingTestChannel::DoCreatePlatHwChunk |
|
210 |
// |
|
211 |
// For some of the tests of IPC from demand-paged memory, we need a writable |
|
212 |
// globally-mapped buffer; so this function creates a suitable chunk and |
|
213 |
// returns its (global, virtual) address to the userland caller. The caller |
|
214 |
// should call DoDestroyPlatHwChunk() to release the memory when the tests |
|
215 |
// are finished. |
|
216 |
// |
|
217 |
TInt DDemandPagingTestChannel::DoCreatePlatHwChunk(TInt aSize, TAny* aLinAddr) |
|
218 |
{ |
|
219 |
TInt mapAttr = EMapAttrUserRw; // Supervisor and user both have read/write permissions |
|
220 |
||
221 |
NKern::ThreadEnterCS(); |
|
222 |
if (iHwChunk) // Only one chunk at a atime |
|
223 |
{ |
|
224 |
NKern::ThreadLeaveCS(); |
|
225 |
return KErrAlreadyExists; |
|
226 |
} |
|
227 |
||
228 |
iChunkSize = Kern::RoundToPageSize(aSize); |
|
229 |
||
230 |
Kern::Printf("*** Attempting to allocate contiguous physical RAM ***"); |
|
231 |
TInt free = Kern::FreeRamInBytes(); |
|
232 |
Kern::Printf(" requested: %08x", iChunkSize); |
|
233 |
Kern::Printf(" total free: %08x", free); |
|
234 |
||
235 |
TInt r = Epoc::AllocPhysicalRam(iChunkSize, iPhysBase, 0); // Allocate RAM; result in iPhysBase |
|
236 |
if (r) |
|
237 |
{ |
|
238 |
NKern::ThreadLeaveCS(); |
|
239 |
Kern::Printf(" failed with error %d", r); |
|
240 |
return r; |
|
241 |
} |
|
242 |
else |
|
243 |
Kern::Printf(" success"); |
|
244 |
||
245 |
r = DPlatChunkHw::New(iHwChunk, iPhysBase, iChunkSize, mapAttr); // Create chunk |
|
246 |
if (r) |
|
247 |
{ |
|
248 |
Epoc::FreePhysicalRam(iPhysBase, iChunkSize); |
|
249 |
iHwChunk = 0; |
|
250 |
NKern::ThreadLeaveCS(); |
|
251 |
return r; |
|
252 |
} |
|
253 |
NKern::ThreadLeaveCS(); |
|
254 |
||
255 |
// Return the virtual address to userland |
|
256 |
iLinearBase = iHwChunk->LinearAddress(); |
|
257 |
kumemput(aLinAddr, &iLinearBase, sizeof(iLinearBase)); |
|
258 |
||
259 |
Kern::Printf("CreatePlatHwChunk@%08x: iLinearBase %08x, iPhysBase %08x, size %d", |
|
260 |
iHwChunk, iLinearBase, iPhysBase, iChunkSize); |
|
261 |
||
262 |
return KErrNone; |
|
263 |
} |
|
264 |
||
265 |
TInt DDemandPagingTestChannel::DoDestroyPlatHwChunk() |
|
266 |
{ |
|
267 |
Kern::Printf("DestroyPlatHwChunk@%08x: iLinearBase %08x, iPhysBase %08x, size %d", |
|
268 |
iHwChunk, iLinearBase, iPhysBase, iChunkSize); |
|
269 |
NKern::ThreadEnterCS(); |
|
270 |
if (iHwChunk) |
|
271 |
{ |
|
272 |
iHwChunk->Close(NULL); |
|
273 |
Epoc::FreePhysicalRam(iPhysBase, iChunkSize); |
|
274 |
iPhysBase = 0; |
|
275 |
iChunkSize = 0; |
|
276 |
iHwChunk = 0; |
|
277 |
} |
|
278 |
NKern::ThreadLeaveCS(); |
|
279 |
return KErrNone; |
|
280 |
} |
|
281 |
||
282 |
// |
|
283 |
// DDemandPagingTestChannel::DoConsumeContiguousRamTest |
|
284 |
// |
|
285 |
// This test attempts to consume all available Contiguous Ram until we need to ask the |
|
286 |
// demand paging code to release memory for it. |
|
287 |
// |
|
288 |
// On completion free all the memory allocated. |
|
289 |
// |
|
290 |
#define CHECK(c) { if(!(c)) { Kern::Printf("Fail %d", __LINE__); ; retVal = __LINE__;} } |
|
291 |
||
292 |
TInt DDemandPagingTestChannel::DoConsumeContiguousRamTest(TInt aAlign, TInt aSize) |
|
293 |
{ |
|
294 |
TInt retVal = KErrNone; |
|
295 |
TInt initialFreeRam = FreeRam(); |
|
296 |
TInt totalBlocks = initialFreeRam/aSize; |
|
297 |
||
298 |
NKern::ThreadEnterCS(); |
|
299 |
TPhysAddr* pAddrArray = (TPhysAddr *)Kern::Alloc(sizeof(TPhysAddr) * totalBlocks); |
|
300 |
NKern::ThreadLeaveCS(); |
|
301 |
CHECK(pAddrArray); |
|
302 |
if(!pAddrArray) |
|
303 |
return retVal; |
|
304 |
||
305 |
SVMCacheInfo tempPages; |
|
306 |
||
307 |
// get the initial free ram again as the heap may have grabbed a page during the alloc |
|
308 |
initialFreeRam = FreeRam(); |
|
309 |
Kern::Printf("ConsumeContiguousRamTest: align %d size %d initialFreeRam %d", aAlign, aSize, initialFreeRam); |
|
310 |
||
311 |
CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone); |
|
312 |
Kern::Printf("Start cache info: iMinSize %d iMaxSize %d iCurrentSize %d iMaxFreeSize %d", |
|
313 |
tempPages.iMinSize, tempPages.iMaxSize, tempPages.iCurrentSize ,tempPages.iMaxFreeSize); |
|
314 |
||
315 |
TInt initialFreePages = tempPages.iMaxFreeSize; |
|
316 |
CHECK(initialFreePages != 0); |
|
317 |
||
318 |
// allocate blocks to use up RAM until we fail to allocate any further... |
|
319 |
TBool freedPagesToAlloc = EFalse; |
|
320 |
TInt index; |
|
321 |
TUint32 alignMask = (1 << aAlign) - 1; |
|
322 |
for (index = 0; index < totalBlocks; ) |
|
323 |
{ |
|
324 |
CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone); |
|
325 |
TInt beforePages = tempPages.iMaxFreeSize; |
|
326 |
||
327 |
NKern::ThreadEnterCS(); |
|
328 |
TInt r = Epoc::AllocPhysicalRam(aSize, pAddrArray[index], aAlign); |
|
329 |
if(r==KErrNone) |
|
330 |
{ |
|
331 |
// check the alignment of the returned pages |
|
332 |
CHECK((pAddrArray[index] & alignMask) == 0); |
|
333 |
++index; |
|
334 |
} |
|
335 |
NKern::ThreadLeaveCS(); |
|
336 |
if(r!=KErrNone) |
|
337 |
{ |
|
338 |
break; |
|
339 |
} |
|
340 |
CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone); |
|
341 |
TInt afterPages = tempPages.iMaxFreeSize; |
|
342 |
||
343 |
if (afterPages != beforePages) |
|
344 |
freedPagesToAlloc = ETrue; // the alloc reclaimed memory from the paging cache |
|
345 |
} |
|
346 |
||
347 |
if (!index) |
|
348 |
Kern::Printf("WARNING : DoConsumeContiguousRamTest no allocations were successful"); |
|
349 |
// free the memory we allocated... |
|
350 |
while(--index>=0) |
|
351 |
{ |
|
352 |
NKern::ThreadEnterCS(); |
|
353 |
TInt r = Epoc::FreePhysicalRam(pAddrArray[index], aSize); |
|
354 |
NKern::ThreadLeaveCS(); |
|
355 |
CHECK(r==KErrNone); |
|
356 |
} |
|
357 |
||
358 |
CHECK(FreeRam() == initialFreeRam); |
|
359 |
||
360 |
NKern::ThreadEnterCS(); |
|
361 |
Kern::Free(pAddrArray); |
|
362 |
NKern::ThreadLeaveCS(); |
|
363 |
||
364 |
CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone); |
|
365 |
Kern::Printf("End cache info: iMinSize %d iMaxSize %d iCurrentSize %d iMaxFreeSize %d", |
|
366 |
tempPages.iMinSize, tempPages.iMaxSize, tempPages.iCurrentSize ,tempPages.iMaxFreeSize); |
|
367 |
||
368 |
if (!freedPagesToAlloc) |
|
369 |
Kern::Printf("WARNING : DoConsumeContiguousRamTest freedPagesToAlloc was eFalse"); |
|
370 |
//CHECK(freedPagesToAlloc); |
|
371 |
||
372 |
return retVal; |
|
373 |
} |
|
374 |
#undef CHECK |
|
375 |
||
376 |
||
377 |
TUint8 ReadByte(volatile TUint8* aPtr) |
|
378 |
{ |
|
379 |
return *aPtr; |
|
380 |
} |
|
381 |
||
382 |
#define CHECK(c) { if(!(c)) return __LINE__; } |
|
383 |
||
384 |
#define READ(a) ReadByte((volatile TUint8*)(a)) |
|
385 |
||
386 |
TInt DDemandPagingTestChannel::LockTest(const TAny* aBuffer, TInt aSize) |
|
387 |
{ |
|
388 |
// Get page size info |
|
389 |
TInt pageSize = 0; |
|
390 |
CHECK(Kern::HalFunction(EHalGroupKernel,EKernelHalPageSizeInBytes,&pageSize,0)==KErrNone); |
|
391 |
TInt pageMask = pageSize-1; |
|
392 |
||
393 |
// See if were running of the Flexible Memory Model |
|
394 |
TUint32 memModelAttrib = (TUint32)Kern::HalFunction(EHalGroupKernel,EKernelHalMemModelInfo,0,0); |
|
395 |
TBool fmm = (memModelAttrib&EMemModelTypeMask)==EMemModelTypeFlexible; |
|
396 |
||
397 |
// Round buffer to page boundaries |
|
398 |
TLinAddr start = ((TLinAddr)aBuffer+pageMask)&~pageMask; |
|
399 |
TLinAddr end = ((TLinAddr)aBuffer+aSize)&~pageMask; |
|
400 |
aSize = end-start; |
|
401 |
Kern::Printf("Test buffer is %08x, %x\n",start,aSize); |
|
402 |
CHECK(aSize>pageSize*2); |
|
403 |
||
404 |
// Flush all paged memory |
|
405 |
Kern::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0); |
|
406 |
||
407 |
TInt initialFreeRam; |
|
408 |
TInt freeRam1; |
|
409 |
TInt freeRam2; |
|
410 |
TLinAddr addr; |
|
411 |
TUint lockBytesUsed = fmm ? 0 : 0; // free ram change on locking (zero or aSize depending on implementation) |
|
412 |
||
413 |
{ // this brace is essential for correctness |
|
414 |
DDemandPagingLock lock2; // construct a lock; |
|
415 |
||
416 |
Kern::Printf("Check reading from buffer pages it in\n"); |
|
417 |
for(addr=start; addr<end; addr+=pageSize) READ(addr); |
|
418 |
for(addr=start; addr<end; addr+=pageSize) CHECK(CheckPagedIn(addr)); |
|
419 |
initialFreeRam = FreeRam(); |
|
420 |
||
421 |
Kern::Printf("Check Alloc reserves pages\n"); |
|
422 |
CHECK(iLock.Alloc(aSize)==KErrNone); |
|
423 |
freeRam1 = FreeRam(); |
|
424 |
||
425 |
Kern::Printf("Check flushing pages out the buffer\n"); |
|
426 |
Kern::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0); |
|
427 |
for(addr=start; addr<end; addr+=pageSize) CHECK(CheckPagedOut(addr)); |
|
428 |
||
429 |
Kern::Printf("Check Lock\n"); |
|
430 |
CHECK(iLock.Lock(&Kern::CurrentThread(),start,aSize)); |
|
431 |
CHECK((TUint)FreeRam()==TUint(freeRam1-lockBytesUsed)); |
|
432 |
for(addr=start; addr<end; addr+=pageSize) CHECK(CheckLocked(addr)); |
|
433 |
||
434 |
Kern::Printf("Check flushing doesn't page out the buffer\n"); |
|
435 |
Kern::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0); |
|
436 |
for(addr=start; addr<end; addr+=pageSize) CHECK(CheckLocked(addr)); |
|
437 |
CHECK((TUint)FreeRam()==TUint(freeRam1-lockBytesUsed)); |
|
438 |
||
439 |
Kern::Printf("Check second Alloc\n"); |
|
440 |
CHECK(lock2.Alloc(aSize)==KErrNone); |
|
441 |
freeRam2 = FreeRam(); |
|
442 |
||
443 |
Kern::Printf("Check second Lock\n"); |
|
444 |
CHECK(lock2.Lock(&Kern::CurrentThread(),start,aSize)); |
|
445 |
CHECK(FreeRam()==freeRam2); |
|
446 |
for(addr=start; addr<end; addr+=pageSize) CHECK(CheckLocked(addr)); |
|
447 |
||
448 |
Kern::Printf("Check deleting second lock\n"); |
|
449 |
// lock2 is deleted here because it goes out of scope... |
|
450 |
} // this brace is essential for correctness |
|
451 |
CHECK((TUint)FreeRam()==TUint(freeRam1-lockBytesUsed)); |
|
452 |
for(addr=start; addr<end; addr+=pageSize) CHECK(CheckLocked(addr)); |
|
453 |
||
454 |
Kern::Printf("Check Unlock\n"); |
|
455 |
iLock.Unlock(); |
|
456 |
CHECK(FreeRam()==freeRam1); |
|
457 |
for(addr=start; addr<end; addr+=pageSize) CHECK(CheckPagedIn(addr)); |
|
458 |
iLock.Unlock(); |
|
459 |
CHECK(FreeRam()==initialFreeRam); |
|
460 |
||
461 |
Kern::Printf("Check Free\n"); |
|
462 |
iLock.Free(); |
|
463 |
CHECK(FreeRam()==initialFreeRam); |
|
464 |
iLock.Free(); |
|
465 |
CHECK(FreeRam()==initialFreeRam); |
|
466 |
||
467 |
return KErrNone; |
|
468 |
} |
|
469 |
||
470 |
#undef CHECK |
|
471 |
#define CHECK(c) { if(!(c)) { r = __LINE__; goto cleanup; } } |
|
472 |
||
473 |
TInt DDemandPagingTestChannel::LockTest2() |
|
474 |
{ |
|
475 |
Kern::Printf("Check allocating locks eventually increases size of live list\n"); |
|
476 |
TInt r = KErrNone; |
|
477 |
||
478 |
DDemandPagingLock* lock = NULL; |
|
479 |
RPointerArray<DDemandPagingLock> lockArray; |
|
480 |
||
481 |
const TInt KLockMax = 1000; // make this a bit bigger than current min page count? |
|
482 |
TInt i; |
|
483 |
||
484 |
NKern::ThreadEnterCS(); |
|
485 |
for (i = 0 ; i < KLockMax ; ++i) |
|
486 |
{ |
|
487 |
lock = new DDemandPagingLock; |
|
488 |
CHECK(lock); |
|
489 |
CHECK(lockArray.Append(lock) == KErrNone); |
|
490 |
lock = NULL; |
|
491 |
||
492 |
TInt initialFreeRam = FreeRam(); |
|
493 |
CHECK(lockArray[i]->Alloc(1) == KErrNone); |
|
494 |
if (FreeRam() < initialFreeRam) |
|
495 |
{ |
|
496 |
Kern::Printf("Live list size increased after %d locks allocated", i + 1); |
|
497 |
break; |
|
498 |
} |
|
499 |
} |
|
500 |
||
501 |
CHECK(i < KLockMax); |
|
502 |
||
503 |
cleanup: |
|
504 |
||
505 |
delete lock; |
|
506 |
lock = NULL; |
|
507 |
for (i = 0 ; i < lockArray.Count() ; ++i) |
|
508 |
{ |
|
509 |
delete lockArray[i]; |
|
510 |
lockArray[i] = NULL; |
|
511 |
} |
|
512 |
lockArray.Reset(); |
|
513 |
||
514 |
NKern::ThreadLeaveCS(); |
|
515 |
||
516 |
return r; |
|
517 |
} |
|
518 |
||
519 |
TInt DDemandPagingTestChannel::FreeRam() |
|
520 |
{ |
|
521 |
Kern::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0); |
|
522 |
TInt freeRam = Kern::FreeRamInBytes(); |
|
523 |
Kern::Printf("...free RAM: %x\n",freeRam); |
|
524 |
return freeRam; |
|
525 |
} |
|
526 |
||
527 |
||
528 |
TUint32 PageState(TLinAddr aAddress) |
|
529 |
{ |
|
530 |
TUint32 state = Kern::HalFunction(EHalGroupVM, EVMPageState, (TAny*)aAddress, 0); |
|
531 |
Kern::Printf("PageState: %08x=%08x",aAddress,state); |
|
532 |
return state; |
|
533 |
} |
|
534 |
||
535 |
||
536 |
TBool DDemandPagingTestChannel::CheckPagedIn(TLinAddr aAddress) |
|
537 |
{ |
|
538 |
TUint32 state = PageState(aAddress); |
|
539 |
return (state&0xff00) == (EStatePagedYoung<<8); |
|
540 |
} |
|
541 |
||
542 |
||
543 |
TBool DDemandPagingTestChannel::CheckPagedOut(TLinAddr aAddress) |
|
544 |
{ |
|
545 |
TUint32 state = PageState(aAddress); |
|
546 |
return (state&0xffff) == 0; |
|
547 |
} |
|
548 |
||
549 |
||
550 |
TInt DDemandPagingTestChannel::CheckLocked(TLinAddr aAddress) |
|
551 |
{ |
|
552 |
TUint32 state = PageState(aAddress); |
|
553 |
return (state&0xff00) == (EStatePagedLocked<<8); |
|
554 |
} |
|
555 |
||
556 |
||
557 |
TInt DDemandPagingTestChannel::ReadHoldingMutexTest(TAny* aDest) |
|
558 |
{ |
|
559 |
_LIT(KMutexName, "DPTestMutex"); |
|
560 |
||
561 |
NKern::ThreadEnterCS(); |
|
562 |
||
563 |
DMutex* mutex; |
|
564 |
TInt r = Kern::MutexCreate(mutex, KMutexName, KMutexOrdDebug); // Mutex order < demand paging |
|
565 |
if (r != KErrNone) |
|
566 |
{ |
|
567 |
NKern::ThreadLeaveCS(); |
|
568 |
return r; |
|
569 |
} |
|
570 |
Kern::MutexWait(*mutex); |
|
571 |
||
572 |
const TRomHeader& romHeader = Epoc::RomHeader(); |
|
573 |
TLinAddr unpagedRomStart = (TLinAddr)&romHeader; |
|
574 |
TLinAddr unpagedRomEnd; |
|
575 |
if (romHeader.iPageableRomStart) |
|
576 |
unpagedRomEnd = unpagedRomStart + romHeader.iPageableRomStart; |
|
577 |
else |
|
578 |
unpagedRomEnd = unpagedRomStart + romHeader.iUncompressedSize; |
|
579 |
||
580 |
const TInt length = 16; |
|
581 |
TUint8 localBuf[length]; |
|
582 |
if(!aDest) |
|
583 |
aDest = localBuf; |
|
584 |
Kern::Printf("Local buffer at %08x", aDest); |
|
585 |
||
586 |
TAny* src1 = (TAny*)unpagedRomStart; |
|
587 |
TAny* src2 = (TAny*)(unpagedRomEnd - length); |
|
588 |
||
589 |
DThread* thread = &Kern::CurrentThread(); |
|
590 |
||
591 |
Kern::Printf("Attempting to access %08x", src1); |
|
592 |
Kern::ThreadRawWrite(thread, aDest, src1, length); |
|
593 |
Kern::Printf("Attempting to access %08x", src2); |
|
594 |
Kern::ThreadRawWrite(thread, aDest, src2, length); |
|
595 |
||
596 |
TUint8 stackData[length]; |
|
597 |
Kern::Printf("Attempting to access %08x", stackData); |
|
598 |
Kern::ThreadRawWrite(thread, aDest, stackData, length); |
|
599 |
||
600 |
TAny* heapData = Kern::Alloc(length); |
|
601 |
if (heapData) |
|
602 |
{ |
|
603 |
Kern::Printf("Attempting to access %08x", heapData); |
|
604 |
Kern::ThreadRawWrite(thread, aDest, heapData, length); |
|
605 |
Kern::Free(heapData); |
|
606 |
} |
|
607 |
else |
|
608 |
r = KErrNoMemory; |
|
609 |
||
610 |
Kern::MutexSignal(*mutex); |
|
611 |
mutex->Close(NULL); |
|
612 |
||
613 |
NKern::ThreadLeaveCS(); |
|
614 |
||
615 |
return r; // a kernel fault indicates that the test failed |
|
616 |
} |
|
617 |