0
|
1 |
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// e32test\mmu\t_mmustress.cpp
|
|
15 |
// Stress test for memory management services performed by the kernel's memory model.
|
|
16 |
//
|
|
17 |
//
|
|
18 |
|
|
19 |
/**
|
|
20 |
@file
|
|
21 |
*/
|
|
22 |
|
|
23 |
#define __E32TEST_EXTENSION__
|
|
24 |
#include <e32test.h>
|
|
25 |
#include "u32std.h"
|
|
26 |
#include <u32hal.h>
|
|
27 |
#include <e32svr.h>
|
|
28 |
#include <dptest.h>
|
|
29 |
#include <e32def.h>
|
|
30 |
#include <e32def_private.h>
|
|
31 |
#include "d_memorytest.h"
|
|
32 |
#include "..\defrag\d_pagemove.h"
|
|
33 |
|
|
34 |
TBool TRACE = 0;
|
|
35 |
|
|
36 |
LOCAL_D RTest test(_L("T_MMUSTRESS"));
|
|
37 |
|
|
38 |
TUint32 MemModelAttributes;
|
|
39 |
TUint32 MemModel;
|
|
40 |
TInt PageSize;
|
|
41 |
TInt PageMask;
|
|
42 |
|
|
43 |
#if !defined(__WINS__) && !defined(__X86__)
|
|
44 |
const TPtrC KMoveLddFileName=_L("D_PAGEMOVE.LDD");
|
|
45 |
RPageMove MoveLdd;
|
|
46 |
#endif
|
|
47 |
|
|
48 |
RMemoryTestLdd Ldd;
|
|
49 |
|
|
50 |
const TUint KNumTestChunks = 6;
|
|
51 |
RChunk Chunks[KNumTestChunks];
|
|
52 |
TInt Committed[KNumTestChunks] = {0}; // for each chunk, is the 'owned' region uncommited(0), commited(1) or mixed(-1)
|
|
53 |
class TNicePtr8 : public TPtr8 { public: TNicePtr8() : TPtr8(0,0) {} } ChunkPtr[KNumTestChunks];
|
|
54 |
|
|
55 |
const TUint KNumSlaveProcesses = 4;
|
|
56 |
RProcess Slaves[KNumSlaveProcesses];
|
|
57 |
TRequestStatus SlaveLogons[KNumSlaveProcesses];
|
|
58 |
TRequestStatus SlaveRendezvous[KNumSlaveProcesses];
|
|
59 |
|
|
60 |
TInt SlaveNumber = -1; // master process is slave -1
|
|
61 |
|
|
62 |
const TInt KLocalIpcBufferSize = 0x10000;
|
|
63 |
TUint8* LocalIpcBuffer = 0;
|
|
64 |
|
|
65 |
RSemaphore StartSemaphore;
|
|
66 |
|
|
67 |
//
|
|
68 |
// Random number generation
|
|
69 |
//
|
|
70 |
|
|
71 |
TUint32 RandomSeed;
|
|
72 |
|
|
73 |
TUint32 Random()
|
|
74 |
{
|
|
75 |
RandomSeed = RandomSeed*69069+1;
|
|
76 |
return RandomSeed;
|
|
77 |
}
|
|
78 |
|
|
79 |
TUint32 Random(TUint32 aRange)
|
|
80 |
{
|
|
81 |
return (TUint32)((TUint64(Random())*TUint64(aRange))>>32);
|
|
82 |
}
|
|
83 |
|
|
84 |
void RandomInit(TUint32 aSeed)
|
|
85 |
{
|
|
86 |
RandomSeed = aSeed+(aSeed<<8)+(aSeed<<16)+(aSeed<<24);
|
|
87 |
Random();
|
|
88 |
Random();
|
|
89 |
}
|
|
90 |
|
|
91 |
|
|
92 |
|
|
93 |
//
|
|
94 |
// Chunk utils
|
|
95 |
//
|
|
96 |
|
|
97 |
TBuf<KMaxKernelName> ChunkName(TInt aChunkNumber)
|
|
98 |
{
|
|
99 |
TBuf<KMaxKernelName> name;
|
|
100 |
name.Format(_L("T_MMUSTRESS-Chunk%d"),aChunkNumber);
|
|
101 |
return name;
|
|
102 |
}
|
|
103 |
|
|
104 |
#ifdef __WINS__
|
|
105 |
TInt KChunkShift = 16;
|
|
106 |
#elif defined(__X86__)
|
|
107 |
TInt KChunkShift = 22;
|
|
108 |
#else
|
|
109 |
TInt KChunkShift = 20;
|
|
110 |
#endif
|
|
111 |
|
|
112 |
TInt ChunkSize(TInt aChunkNumber)
|
|
113 |
{
|
|
114 |
// biggest chunk (number 0) is big enough for each slave to own a region which is
|
|
115 |
// 2 page tables ('chunks') in size...
|
|
116 |
return (2*KNumSlaveProcesses)<<(KChunkShift-aChunkNumber);
|
|
117 |
}
|
|
118 |
|
|
119 |
// check smallest chunk is less than 'chunk' size...
|
|
120 |
__ASSERT_COMPILE((2*KNumSlaveProcesses>>(KNumTestChunks-1))==0);
|
|
121 |
|
|
122 |
|
|
123 |
/* Memory region 'owned' by this slave process */
|
|
124 |
void ChunkOwnedRegion(TInt aChunkNumber,TInt& aOffset,TInt& aSize)
|
|
125 |
{
|
|
126 |
TInt size = ChunkSize(aChunkNumber)/KNumSlaveProcesses;
|
|
127 |
aSize = size;
|
|
128 |
aOffset = SlaveNumber*size;
|
|
129 |
test_Equal(0,size&PageMask);
|
|
130 |
}
|
|
131 |
|
|
132 |
void ChunkMarkRegion(TInt aChunkNumber,TInt aOffset,TInt aSize)
|
|
133 |
{
|
|
134 |
TInt pageSize = PageSize;
|
|
135 |
TUint32 mark = aOffset|aChunkNumber|(SlaveNumber<<4);
|
|
136 |
TUint8* ptr = Chunks[aChunkNumber].Base()+aOffset;
|
|
137 |
TUint8* ptrEnd = ptr+aSize;
|
|
138 |
while(ptr<ptrEnd)
|
|
139 |
{
|
|
140 |
((TUint32*)ptr)[0] = mark;
|
|
141 |
((TUint32*)ptr)[1] = ~mark;
|
|
142 |
mark += pageSize;
|
|
143 |
ptr += pageSize;
|
|
144 |
}
|
|
145 |
}
|
|
146 |
|
|
147 |
void ChunkCheckRegion(TInt aChunkNumber,TInt aOffset,TInt aSize)
|
|
148 |
{
|
|
149 |
TInt pageSize = PageSize;
|
|
150 |
TUint32 mark = aOffset|aChunkNumber|(SlaveNumber<<4);
|
|
151 |
TUint8* ptr = Chunks[aChunkNumber].Base()+aOffset;
|
|
152 |
TUint8* ptrEnd = ptr+aSize;
|
|
153 |
while(ptr<ptrEnd)
|
|
154 |
{
|
|
155 |
test_Equal(mark,((TUint32*)ptr)[0]);
|
|
156 |
test_Equal(~mark,((TUint32*)ptr)[1]);
|
|
157 |
mark += pageSize;
|
|
158 |
ptr += pageSize;
|
|
159 |
}
|
|
160 |
}
|
|
161 |
|
|
162 |
TInt ChunkOpen(TInt aChunkNumber)
|
|
163 |
{
|
|
164 |
RChunk& chunk = Chunks[aChunkNumber];
|
|
165 |
if(chunk.Handle()!=0)
|
|
166 |
return KErrNone;
|
|
167 |
|
|
168 |
if(TRACE) RDebug::Printf("%d %d Open",SlaveNumber,aChunkNumber);
|
|
169 |
TInt r = chunk.OpenGlobal(ChunkName(aChunkNumber),false);
|
|
170 |
if(r!=KErrNoMemory)
|
|
171 |
test_KErrNone(r);
|
|
172 |
return r;
|
|
173 |
}
|
|
174 |
|
|
175 |
//
|
|
176 |
// Server utils
|
|
177 |
//
|
|
178 |
|
|
179 |
TBuf<KMaxKernelName> ServerName(TInt aSlaveNumber)
|
|
180 |
{
|
|
181 |
TBuf<KMaxKernelName> name;
|
|
182 |
name.Format(_L("T_MMUSTRESS-Server%d"),aSlaveNumber);
|
|
183 |
return name;
|
|
184 |
}
|
|
185 |
|
|
186 |
RServer2 Server;
|
|
187 |
RMessage2 ServerMessage;
|
|
188 |
TRequestStatus ServerStatus;
|
|
189 |
|
|
190 |
class RTestSession : public RSessionBase
|
|
191 |
{
|
|
192 |
public:
|
|
193 |
TInt Connect(TInt aServerNumber)
|
|
194 |
{
|
|
195 |
return CreateSession(ServerName(aServerNumber),TVersion(),1,EIpcSession_Unsharable,0,&iStatus);
|
|
196 |
}
|
|
197 |
TInt Send(TInt aChunkNumber)
|
|
198 |
{
|
|
199 |
return RSessionBase::Send(0,TIpcArgs(SlaveNumber,aChunkNumber,&ChunkPtr[aChunkNumber]));
|
|
200 |
}
|
|
201 |
TRequestStatus iStatus;
|
|
202 |
};
|
|
203 |
RTestSession Sessions[KNumSlaveProcesses];
|
|
204 |
|
|
205 |
|
|
206 |
//
|
|
207 |
//
|
|
208 |
//
|
|
209 |
|
|
210 |
void SlaveInit()
|
|
211 |
{
|
|
212 |
RDebug::Printf("Slave %d initialising",SlaveNumber);
|
|
213 |
|
|
214 |
TBuf<KMaxKernelName> name;
|
|
215 |
name.Format(_L("T_MMUSTRESS-Slave%d"),SlaveNumber);
|
|
216 |
User::RenameThread(name);
|
|
217 |
|
|
218 |
test_KErrNone(StartSemaphore.Open(2));
|
|
219 |
TInt r;
|
|
220 |
#if !defined(__WINS__) && !defined(__X86__)
|
|
221 |
// Move ldd may not be in the ROM so needs to be loaded.
|
|
222 |
r=User::LoadLogicalDevice(KMoveLddFileName);
|
|
223 |
test_Value(r, r==KErrNone || r==KErrAlreadyExists);
|
|
224 |
test_KErrNone(MoveLdd.Open());
|
|
225 |
#endif
|
|
226 |
|
|
227 |
test_KErrNone(Ldd.Open());
|
|
228 |
test_KErrNone(Ldd.CreateVirtualPinObject());
|
|
229 |
|
|
230 |
LocalIpcBuffer = (TUint8*)User::Alloc(KLocalIpcBufferSize);
|
|
231 |
test(LocalIpcBuffer!=0);
|
|
232 |
|
|
233 |
test_KErrNone(Server.CreateGlobal(ServerName(SlaveNumber)));
|
|
234 |
|
|
235 |
TUint i;
|
|
236 |
|
|
237 |
// create sessions with other slaves...
|
|
238 |
for(i=0; i<KNumSlaveProcesses; i++)
|
|
239 |
{
|
|
240 |
for(;;)
|
|
241 |
{
|
|
242 |
r = Sessions[i].Connect(i);
|
|
243 |
// RDebug::Printf("%d Session %d = %d,%d",SlaveNumber,i,r,Sessions[i].iStatus.Int());
|
|
244 |
if(r==KErrNotFound)
|
|
245 |
{
|
|
246 |
// give other slaves time to create their servers...
|
|
247 |
User::After(10000);
|
|
248 |
continue;
|
|
249 |
}
|
|
250 |
test_KErrNone(r);
|
|
251 |
break;
|
|
252 |
}
|
|
253 |
}
|
|
254 |
|
|
255 |
// process session connect messages...
|
|
256 |
for(i=0; i<KNumSlaveProcesses; i++)
|
|
257 |
{
|
|
258 |
RMessage2 m;
|
|
259 |
// RDebug::Printf("%d Server waiting for connect message",SlaveNumber);
|
|
260 |
Server.Receive(m);
|
|
261 |
test_Equal(RMessage2::EConnect,m.Function())
|
|
262 |
m.Complete(KErrNone);
|
|
263 |
}
|
|
264 |
|
|
265 |
// wait for our session connections...
|
|
266 |
for(i=0; i<KNumSlaveProcesses; i++)
|
|
267 |
{
|
|
268 |
// RDebug::Printf("%d Session wait %d",SlaveNumber,i);
|
|
269 |
User::WaitForRequest(Sessions[i].iStatus);
|
|
270 |
}
|
|
271 |
|
|
272 |
// prime server for receiving mesages...
|
|
273 |
Server.Receive(ServerMessage,ServerStatus);
|
|
274 |
|
|
275 |
// synchronise with other processes...
|
|
276 |
RDebug::Printf("Slave %d waiting for trigger",SlaveNumber);
|
|
277 |
RProcess::Rendezvous(KErrNone);
|
|
278 |
StartSemaphore.Wait();
|
|
279 |
RDebug::Printf("Slave %d started",SlaveNumber);
|
|
280 |
}
|
|
281 |
|
|
282 |
|
|
283 |
|
|
284 |
//
|
|
285 |
// Test by random operations...
|
|
286 |
//
|
|
287 |
|
|
288 |
void DoTest()
|
|
289 |
{
|
|
290 |
RandomInit(SlaveNumber);
|
|
291 |
TInt r;
|
|
292 |
for(;;)
|
|
293 |
{
|
|
294 |
// select random chunk...
|
|
295 |
TInt chunkNumber = Random(KNumTestChunks);
|
|
296 |
RChunk& chunk = Chunks[chunkNumber];
|
|
297 |
|
|
298 |
// get the region of this chunk which this process 'owns'...
|
|
299 |
TInt offset;
|
|
300 |
TInt size;
|
|
301 |
ChunkOwnedRegion(chunkNumber,offset,size);
|
|
302 |
|
|
303 |
// calculate a random region in the owned part...
|
|
304 |
TInt randomOffset = offset+(Random(size)&~PageMask);
|
|
305 |
TInt randomSize = (Random(size-(randomOffset-offset))+PageMask)&~PageMask;
|
|
306 |
if(!randomSize)
|
|
307 |
continue; // try again
|
|
308 |
|
|
309 |
// pick a random slave...
|
|
310 |
TInt randomSlave = Random(KNumSlaveProcesses);
|
|
311 |
|
|
312 |
// open chunk if it isn't already...
|
|
313 |
r = ChunkOpen(chunkNumber);
|
|
314 |
if(r==KErrNoMemory)
|
|
315 |
continue; // can't do anything with chunk if we can't open it
|
|
316 |
|
|
317 |
// check our contents of chunk...
|
|
318 |
if(Committed[chunkNumber]==1)
|
|
319 |
{
|
|
320 |
if(TRACE) RDebug::Printf("%d %d Check %08x+%08x",SlaveNumber,chunkNumber,offset,size);
|
|
321 |
ChunkCheckRegion(chunkNumber,offset,size);
|
|
322 |
}
|
|
323 |
|
|
324 |
// perform random operation...
|
|
325 |
switch(Random(12))
|
|
326 |
{
|
|
327 |
case 0:
|
|
328 |
case 1:
|
|
329 |
// close chunk...
|
|
330 |
if(TRACE) RDebug::Printf("%d %d Close",SlaveNumber,chunkNumber);
|
|
331 |
chunk.Close();
|
|
332 |
break;
|
|
333 |
|
|
334 |
case 2:
|
|
335 |
// commit all...
|
|
336 |
if(TRACE) RDebug::Printf("%d %d Commit all %08x+%08x",SlaveNumber,chunkNumber,offset,size);
|
|
337 |
if(Committed[chunkNumber]!=0)
|
|
338 |
{
|
|
339 |
r = chunk.Decommit(offset,size);
|
|
340 |
test_KErrNone(r);
|
|
341 |
Committed[chunkNumber] = 0;
|
|
342 |
}
|
|
343 |
r = chunk.Commit(offset,size);
|
|
344 |
if(r!=KErrNoMemory)
|
|
345 |
{
|
|
346 |
test_KErrNone(r);
|
|
347 |
Committed[chunkNumber] = 1;
|
|
348 |
ChunkMarkRegion(chunkNumber,offset,size);
|
|
349 |
}
|
|
350 |
break;
|
|
351 |
|
|
352 |
case 3:
|
|
353 |
// decommit all...
|
|
354 |
if(TRACE) RDebug::Printf("%d %d Decommit all %08x+%08x",SlaveNumber,chunkNumber,offset,size);
|
|
355 |
r = chunk.Decommit(offset,size);
|
|
356 |
test_KErrNone(r);
|
|
357 |
Committed[chunkNumber] = 0;
|
|
358 |
break;
|
|
359 |
|
|
360 |
case 4:
|
|
361 |
case 5:
|
|
362 |
// commit random...
|
|
363 |
if(TRACE) RDebug::Printf("%d %d Commit %08x+%08x",SlaveNumber,chunkNumber,randomOffset,randomSize);
|
|
364 |
r = chunk.Commit(randomOffset,randomSize);
|
|
365 |
if(r!=KErrNoMemory)
|
|
366 |
{
|
|
367 |
if(Committed[chunkNumber]==0)
|
|
368 |
{
|
|
369 |
test_KErrNone(r);
|
|
370 |
Committed[chunkNumber] = -1;
|
|
371 |
}
|
|
372 |
else if(Committed[chunkNumber]==1)
|
|
373 |
{
|
|
374 |
test_Equal(KErrAlreadyExists,r);
|
|
375 |
}
|
|
376 |
else
|
|
377 |
{
|
|
378 |
if(r!=KErrAlreadyExists)
|
|
379 |
test_KErrNone(r);
|
|
380 |
}
|
|
381 |
}
|
|
382 |
break;
|
|
383 |
|
|
384 |
case 6:
|
|
385 |
case 7:
|
|
386 |
// decommit random...
|
|
387 |
if(TRACE) RDebug::Printf("%d %d Decommit %08x+%08x",SlaveNumber,chunkNumber,randomOffset,randomSize);
|
|
388 |
r = chunk.Decommit(randomOffset,randomSize);
|
|
389 |
test_KErrNone(r);
|
|
390 |
if(Committed[chunkNumber]==1)
|
|
391 |
Committed[chunkNumber] = -1;
|
|
392 |
break;
|
|
393 |
|
|
394 |
case 8:
|
|
395 |
if(TRACE) RDebug::Printf("%d %d IPC Send->%d",SlaveNumber,chunkNumber,randomSlave);
|
|
396 |
// ChunkPtr[chunkNumber].Set(chunk.Base(),ChunkSize(chunkNumber),ChunkSize(chunkNumber));
|
|
397 |
ChunkPtr[chunkNumber].Set(chunk.Base()+offset,size,size);
|
|
398 |
Sessions[randomSlave].Send(chunkNumber);
|
|
399 |
break;
|
|
400 |
|
|
401 |
case 9:
|
|
402 |
// process IPC messages...
|
|
403 |
if(ServerStatus.Int()==KRequestPending)
|
|
404 |
continue;
|
|
405 |
User::WaitForRequest(ServerStatus);
|
|
406 |
|
|
407 |
{
|
|
408 |
TInt sourceSlave = ServerMessage.Int0();
|
|
409 |
chunkNumber = ServerMessage.Int1();
|
|
410 |
if(TRACE) RDebug::Printf("%d %d IPC Receive<-%d",SlaveNumber,chunkNumber,sourceSlave);
|
|
411 |
test_Equal(0,ServerMessage.Function());
|
|
412 |
|
|
413 |
// get local descriptor for owned region in chunk...
|
|
414 |
size = ServerMessage.GetDesMaxLength(2);
|
|
415 |
test_NotNegative(size);
|
|
416 |
if(size>KLocalIpcBufferSize)
|
|
417 |
size = KLocalIpcBufferSize;
|
|
418 |
TPtr8 local(LocalIpcBuffer,size,size);
|
|
419 |
|
|
420 |
// if(Random(2))
|
|
421 |
{
|
|
422 |
// IPC read from other slave...
|
|
423 |
if(TRACE) RDebug::Printf("%d %d IPC Read<-%d",SlaveNumber,chunkNumber,sourceSlave);
|
|
424 |
TInt panicTrace = Ldd.SetPanicTrace(EFalse);
|
|
425 |
r = ServerMessage.Read(2,local);
|
|
426 |
Ldd.SetPanicTrace(panicTrace);
|
|
427 |
if(r!=KErrBadDescriptor)
|
|
428 |
test_KErrNone(r);
|
|
429 |
}
|
|
430 |
// else
|
|
431 |
// {
|
|
432 |
// // IPC write to other slave...
|
|
433 |
// if(TRACE) RDebug::Printf("%d %d IPC Write->%d",SlaveNumber,chunkNumber,sourceSlave);
|
|
434 |
// r = ServerMessage.Write(2,local,offset);
|
|
435 |
// if(r!=KErrBadDescriptor)
|
|
436 |
// test_KErrNone(r);
|
|
437 |
// if(Committed[chunkNumber]==1)
|
|
438 |
// ChunkMarkRegion(chunkNumber,offset,size);
|
|
439 |
// }
|
|
440 |
}
|
|
441 |
|
|
442 |
ServerMessage.Complete(KErrNone);
|
|
443 |
Server.Receive(ServerMessage,ServerStatus);
|
|
444 |
break;
|
|
445 |
|
|
446 |
case 10:
|
|
447 |
case 11:
|
|
448 |
// pin memory...
|
|
449 |
{
|
|
450 |
test_KErrNone(Ldd.UnpinVirtualMemory());
|
|
451 |
for(TInt tries=10; tries>0; --tries)
|
|
452 |
{
|
|
453 |
TInt chunkSize = ChunkSize(chunkNumber);
|
|
454 |
offset = Random(chunkSize);
|
|
455 |
TInt maxSize = chunkSize-offset;
|
|
456 |
if(maxSize>0x1000)
|
|
457 |
maxSize = 0x1000;
|
|
458 |
size = Random(maxSize);
|
|
459 |
r = Ldd.PinVirtualMemory((TLinAddr)chunk.Base()+offset, size);
|
|
460 |
if(r!=KErrNotFound && r!=KErrNoMemory)
|
|
461 |
{
|
|
462 |
test_KErrNone(r);
|
|
463 |
break;
|
|
464 |
}
|
|
465 |
}
|
|
466 |
}
|
|
467 |
break;
|
|
468 |
case 12:
|
|
469 |
case 13:
|
|
470 |
// Move any page in the chunk, not just the owned region.
|
|
471 |
{
|
|
472 |
#if !defined(__WINS__) && !defined(__X86__)
|
|
473 |
for(TInt tries=10; tries>0; --tries)
|
|
474 |
{
|
|
475 |
TInt chunkSize = ChunkSize(chunkNumber);
|
|
476 |
offset = Random(chunkSize);
|
|
477 |
MoveLdd.TryMovingUserPage((TAny*)(chunk.Base()+offset), ETrue);
|
|
478 |
// Allow the move to fail for any reason as the page of the chunk
|
|
479 |
// may or may not be currently committed, pinned, or accessed.
|
|
480 |
}
|
|
481 |
#endif
|
|
482 |
}
|
|
483 |
break;
|
|
484 |
default:
|
|
485 |
test(false); // can't happen
|
|
486 |
break;
|
|
487 |
}
|
|
488 |
}
|
|
489 |
}
|
|
490 |
|
|
491 |
|
|
492 |
|
|
493 |
TInt E32Main()
|
|
494 |
{
|
|
495 |
// get system info...
|
|
496 |
MemModelAttributes = UserSvr::HalFunction(EHalGroupKernel, EKernelHalMemModelInfo, NULL, NULL);
|
|
497 |
MemModel = MemModelAttributes&EMemModelTypeMask;
|
|
498 |
UserHal::PageSizeInBytes(PageSize);
|
|
499 |
PageMask = PageSize-1;
|
|
500 |
|
|
501 |
// see if we are a slave process...
|
|
502 |
if(User::GetTIntParameter(1,SlaveNumber)==KErrNone)
|
|
503 |
{
|
|
504 |
// do testing...
|
|
505 |
SlaveInit();
|
|
506 |
DoTest();
|
|
507 |
return KErrGeneral; // shouldn't have returned from testing
|
|
508 |
}
|
|
509 |
|
|
510 |
// master process...
|
|
511 |
TBool pass = true; // final test result
|
|
512 |
test.Title();
|
|
513 |
if((MemModelAttributes&EMemModelAttrVA)==false)
|
|
514 |
{
|
|
515 |
test.Start(_L("TESTS NOT RUN - Not relevent for the memory model"));
|
|
516 |
test.End();
|
|
517 |
return KErrNone;
|
|
518 |
}
|
|
519 |
|
|
520 |
// get time to run tests for...
|
|
521 |
TInt timeout = 10; // time in seconds
|
|
522 |
TInt cmdLineLen = User::CommandLineLength();
|
|
523 |
if(cmdLineLen)
|
|
524 |
{
|
|
525 |
// get timeout value from command line
|
|
526 |
RBuf cmdLine;
|
|
527 |
test_KErrNone(cmdLine.Create(cmdLineLen));
|
|
528 |
User::CommandLine(cmdLine);
|
|
529 |
test_KErrNone(TLex(cmdLine).Val(timeout));
|
|
530 |
if(timeout==0)
|
|
531 |
timeout = KMaxTInt;
|
|
532 |
}
|
|
533 |
TTimeIntervalMicroSeconds32 tickTime;
|
|
534 |
test_KErrNone(UserHal::TickPeriod(tickTime));
|
|
535 |
TInt ticksPerSecond = 1000000/tickTime.Int();
|
|
536 |
TInt timeoutTicks;
|
|
537 |
if(timeout<KMaxTInt/ticksPerSecond)
|
|
538 |
timeoutTicks = timeout*ticksPerSecond;
|
|
539 |
else
|
|
540 |
{
|
|
541 |
timeoutTicks = KMaxTInt;
|
|
542 |
timeout = timeoutTicks/ticksPerSecond;
|
|
543 |
}
|
|
544 |
|
|
545 |
// master process runs at higher priority than slaves so it can timeout and kill them...
|
|
546 |
RThread().SetPriority(EPriorityMore);
|
|
547 |
|
|
548 |
test.Start(_L("Creating test chunks"));
|
|
549 |
TUint i;
|
|
550 |
for(i=0; i<KNumTestChunks; i++)
|
|
551 |
{
|
|
552 |
test.Printf(_L("Size %dkB\r\n"),ChunkSize(i)>>10);
|
|
553 |
test_KErrNone(Chunks[i].CreateDisconnectedGlobal(ChunkName(i),0,0,ChunkSize(i)));
|
|
554 |
}
|
|
555 |
|
|
556 |
test.Next(_L("Spawning slave processes"));
|
|
557 |
test_KErrNone(StartSemaphore.CreateGlobal(KNullDesC,0));
|
|
558 |
TFileName processFile(RProcess().FileName());
|
|
559 |
for(i=0; i<KNumSlaveProcesses; i++)
|
|
560 |
{
|
|
561 |
test.Printf(_L("Slave %d\r\n"),i);
|
|
562 |
RProcess& slave = Slaves[i];
|
|
563 |
test_KErrNone(slave.Create(processFile,KNullDesC));
|
|
564 |
test_KErrNone(slave.SetParameter(1,i));
|
|
565 |
test_KErrNone(slave.SetParameter(2,StartSemaphore));
|
|
566 |
slave.Logon(SlaveLogons[i]);
|
|
567 |
test_Equal(KRequestPending,SlaveLogons[i].Int());
|
|
568 |
slave.Rendezvous(SlaveRendezvous[i]);
|
|
569 |
test_Equal(KRequestPending,SlaveRendezvous[i].Int());
|
|
570 |
}
|
|
571 |
|
|
572 |
test.Next(_L("Create timer"));
|
|
573 |
RTimer timer;
|
|
574 |
test_KErrNone(timer.CreateLocal());
|
|
575 |
|
|
576 |
test.Next(_L("Resuming slave processes"));
|
|
577 |
for(i=0; i<KNumSlaveProcesses; i++)
|
|
578 |
Slaves[i].Resume();
|
|
579 |
|
|
580 |
// this test must now take care not to die (e.g. panic due to assert fail)
|
|
581 |
// until it has killed the slave processes
|
|
582 |
|
|
583 |
test.Next(_L("Change paging cache size"));
|
|
584 |
TUint cacheOriginalMin = 0;
|
|
585 |
TUint cacheOriginalMax = 0;
|
|
586 |
TUint cacheCurrentSize = 0;
|
|
587 |
DPTest::CacheSize(cacheOriginalMin, cacheOriginalMax, cacheCurrentSize);
|
|
588 |
DPTest::SetCacheSize(1, 2*ChunkSize(0)); // big enough for all the test chunks
|
|
589 |
|
|
590 |
test.Next(_L("Wait for slaves to initialise"));
|
|
591 |
TRequestStatus timeoutStatus;
|
|
592 |
timer.After(timeoutStatus,10*1000000); // allow short time for slaves to initialise
|
|
593 |
for(i=0; i<KNumSlaveProcesses; i++)
|
|
594 |
{
|
|
595 |
User::WaitForAnyRequest(); // wait for a rendexvous
|
|
596 |
if(timeoutStatus.Int()!=KRequestPending)
|
|
597 |
{
|
|
598 |
test.Printf(_L("Timeout waiting for slaves to initialise\r\n"));
|
|
599 |
pass = false;
|
|
600 |
break;
|
|
601 |
}
|
|
602 |
}
|
|
603 |
|
|
604 |
test.Next(_L("Restore paging cache size"));
|
|
605 |
DPTest::SetCacheSize(cacheOriginalMin, cacheOriginalMax);
|
|
606 |
|
|
607 |
if(pass)
|
|
608 |
{
|
|
609 |
timer.Cancel();
|
|
610 |
User::WaitForAnyRequest(); // swallow timer signal
|
|
611 |
|
|
612 |
test.Next(_L("Check slaves are ready"));
|
|
613 |
for(i=0; i<KNumSlaveProcesses; i++)
|
|
614 |
{
|
|
615 |
if(SlaveRendezvous[i].Int()!=KErrNone || Slaves[i].ExitType()!=EExitPending)
|
|
616 |
{
|
|
617 |
test.Printf(_L("Slaves not ready or died!\r\n"));
|
|
618 |
pass = false;
|
|
619 |
break;
|
|
620 |
}
|
|
621 |
}
|
|
622 |
}
|
|
623 |
|
|
624 |
if(pass)
|
|
625 |
{
|
|
626 |
test.Next(_L("Setup simulated kernel heap failure"));
|
|
627 |
__KHEAP_SETFAIL(RAllocator::EDeterministic,100);
|
|
628 |
|
|
629 |
TBuf<80> text;
|
|
630 |
text.Format(_L("Stressing for %d seconds..."),timeout);
|
|
631 |
test.Next(text);
|
|
632 |
timer.AfterTicks(timeoutStatus,timeoutTicks);
|
|
633 |
StartSemaphore.Signal(KNumSlaveProcesses); // release slaves to start testing
|
|
634 |
User::WaitForAnyRequest(); // wait for timeout or slave death via logon completion
|
|
635 |
|
|
636 |
pass = timeoutStatus.Int()==KErrNone; // timeout means slaves are still running OK
|
|
637 |
|
|
638 |
test.Next(_L("Check slaves still running"));
|
|
639 |
for(i=0; i<KNumSlaveProcesses; i++)
|
|
640 |
if(Slaves[i].ExitType()!=EExitPending)
|
|
641 |
pass = false;
|
|
642 |
|
|
643 |
test.Next(_L("Clear kernel heap failure"));
|
|
644 |
TUint kheapFails = __KHEAP_CHECKFAILURE;
|
|
645 |
__KHEAP_RESET;
|
|
646 |
test.Printf(_L("Number of simulated memory failures = %d\r\n"),kheapFails);
|
|
647 |
}
|
|
648 |
|
|
649 |
test.Next(_L("Killing slave processes"));
|
|
650 |
for(i=0; i<KNumSlaveProcesses; i++)
|
|
651 |
Slaves[i].Kill(0);
|
|
652 |
|
|
653 |
test.Next(_L("Assert test passed"));
|
|
654 |
test(pass);
|
|
655 |
|
|
656 |
test.End();
|
|
657 |
|
|
658 |
for(i=0; i<KNumSlaveProcesses; i++)
|
|
659 |
Slaves[i].Close();
|
|
660 |
for(i=0; i<KNumTestChunks; i++)
|
|
661 |
Chunks[i].Close();
|
|
662 |
timer.Close();
|
|
663 |
for(i=0; i<KNumSlaveProcesses; i++)
|
|
664 |
User::WaitForRequest(SlaveLogons[i]);
|
|
665 |
|
|
666 |
UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, (TAny*)5000, 0);
|
|
667 |
|
|
668 |
return KErrNone;
|
|
669 |
}
|