|
1 // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\kernel\sbtrace.cpp |
|
15 // |
|
16 // |
|
17 |
|
18 #include <kernel/kern_priv.h> |
|
19 #include "execs.h" |
|
20 #include <e32panic.h> |
|
21 #include "memmodel.h" |
|
22 |
|
23 //SBTraceData BTraceData = { {0},0,0 }; |
|
24 |
|
25 |
|
26 TBool DummyBTraceHandler(TUint32,TUint32,const TUint32,const TUint32,const TUint32,const TUint32,const TUint32,const TUint32) |
|
27 { |
|
28 return EFalse; |
|
29 } |
|
30 |
|
31 |
|
32 void BTrace::Init0() |
|
33 { |
|
34 BTrace::SetHandler(DummyBTraceHandler); |
|
35 TUint32* src = Kern::SuperPage().iInitialBTraceFilter; |
|
36 TUint32* srcEnd = src+256/32; |
|
37 |
|
38 // always have EMetaTrace enabled if any trace category is enabled... |
|
39 TUint32 anySet = 0; |
|
40 TUint32* scan = src; |
|
41 do anySet |= *scan++; |
|
42 while(scan<srcEnd); |
|
43 if(anySet) |
|
44 SetFilter(BTrace::EMetaTrace,1); |
|
45 |
|
46 TInt category = 0; |
|
47 do |
|
48 { |
|
49 TUint32 bits = *src++; |
|
50 do |
|
51 { |
|
52 if(category!=BTrace::EMetaTrace) |
|
53 SetFilter(category,(bits&1)); |
|
54 ++category; |
|
55 bits >>= 1; |
|
56 } |
|
57 while(category&31); |
|
58 } |
|
59 while(src<srcEnd); |
|
60 } |
|
61 |
|
62 |
|
63 EXPORT_C TInt BTrace::Control(BTrace::TControl aFunction, TAny* aArg1, TAny* aArg2) |
|
64 { |
|
65 return (*BTraceData.iControl)(aFunction, aArg1, aArg2); |
|
66 } |
|
67 |
|
68 |
|
69 EXPORT_C BTrace::THandler BTrace::SetHandler(BTrace::THandler aHandler) |
|
70 { |
|
71 BTrace::TControlFunction oldControl; |
|
72 BTrace::THandler oldHandler; |
|
73 SetHandlers(aHandler,0,oldHandler,oldControl); |
|
74 return oldHandler; |
|
75 } |
|
76 |
|
77 void TraceFastMutexName(NFastMutex* aMutex, const char* aName) |
|
78 { |
|
79 TPtrC8 name((const TUint8*)aName); |
|
80 BTraceN(BTrace::EFastMutex, BTrace::EFastMutexName, aMutex, 0, name.Ptr(), name.Length()); |
|
81 } |
|
82 |
|
83 void TraceDObject(DObject* aObj, TUint aCat, TUint aSub, const char* aName) |
|
84 { |
|
85 if (!aObj) |
|
86 return; |
|
87 DObject* owner = aObj->iOwner; |
|
88 if (!aObj->iName && aName) |
|
89 { |
|
90 TPtrC8 name((const TUint8*)aName); |
|
91 BTraceN(aCat, aSub, aObj, owner, name.Ptr(), name.Size()); |
|
92 } |
|
93 else |
|
94 { |
|
95 TKName nameBuf; |
|
96 aObj->Name(nameBuf); |
|
97 BTraceN(aCat, aSub, aObj, owner, nameBuf.Ptr(), nameBuf.Size()); |
|
98 } |
|
99 } |
|
100 |
|
101 // IMPORTANT, this function must not be used for objects which have overridden Close() |
|
102 // because the use of AsyncClose() by this function would then be unsafe |
|
103 void TraceContainerContents(DObjectCon* aCon, TUint aCat, TUint aSub) |
|
104 { |
|
105 if (!aCon) |
|
106 return; |
|
107 NKern::ThreadEnterCS(); |
|
108 aCon->Wait(); |
|
109 TInt num = aCon->Count(); |
|
110 for (TInt i=0; i<num; i++) |
|
111 { |
|
112 DObject* obj = (DObject*)(*aCon)[i]; |
|
113 if (obj->Open() == KErrNone) |
|
114 { |
|
115 TraceDObject(obj, aCat, aSub, 0); |
|
116 obj->AsyncClose(); |
|
117 } |
|
118 } |
|
119 aCon->Signal(); |
|
120 NKern::ThreadLeaveCS(); |
|
121 } |
|
122 |
|
123 |
|
124 EXPORT_C void BTrace::Prime(TInt aCategory) |
|
125 { |
|
126 (void)aCategory; |
|
127 #ifdef BTRACE_CPU_USAGE |
|
128 if(aCategory==BTrace::ECpuUsage || aCategory==-1) |
|
129 { |
|
130 BTraceContext0(BTrace::ECpuUsage,BTrace::ENewThreadContext); |
|
131 } |
|
132 #endif |
|
133 |
|
134 #if defined(BTRACE_THREAD_IDENTIFICATION) || defined(BTRACE_FLEXIBLE_MEM_MODEL) |
|
135 if(aCategory==BTrace::EThreadIdentification || aCategory==BTrace::EFlexibleMemModel || aCategory==-1) |
|
136 { |
|
137 DObjectCon* processes=Kern::Containers()[EProcess]; |
|
138 if(processes) |
|
139 { |
|
140 NKern::ThreadEnterCS(); |
|
141 DCodeSeg::Wait(); // FMM implementation needs to traverse code seg graph |
|
142 processes->Wait(); |
|
143 TInt numProcesses = processes->Count(); |
|
144 for(TInt i=0; i<numProcesses; i++) |
|
145 { |
|
146 DProcess* process = (DProcess*)(*processes)[i]; |
|
147 if (process->Open() == KErrNone) |
|
148 { |
|
149 process->BTracePrime(aCategory); |
|
150 process->AsyncClose(); |
|
151 } |
|
152 } |
|
153 processes->Signal(); |
|
154 DCodeSeg::Signal(); |
|
155 NKern::ThreadLeaveCS(); |
|
156 } |
|
157 } |
|
158 #endif |
|
159 |
|
160 #if defined(BTRACE_THREAD_IDENTIFICATION) || defined(BTRACE_FLEXIBLE_MEM_MODEL) |
|
161 if(aCategory==BTrace::EThreadIdentification || aCategory==BTrace::EFlexibleMemModel || aCategory==-1) |
|
162 { |
|
163 DObjectCon* threads=Kern::Containers()[EThread]; |
|
164 if(threads) |
|
165 { |
|
166 NKern::ThreadEnterCS(); |
|
167 threads->Wait(); |
|
168 TInt numThread = threads->Count(); |
|
169 for(TInt i=0; i<numThread; i++) |
|
170 { |
|
171 DThread* thread = (DThread*)(*threads)[i]; |
|
172 if (thread->Open() == KErrNone) |
|
173 { |
|
174 thread->BTracePrime(aCategory); |
|
175 thread->AsyncClose(); |
|
176 } |
|
177 } |
|
178 threads->Signal(); |
|
179 NKern::ThreadLeaveCS(); |
|
180 } |
|
181 } |
|
182 #endif |
|
183 #if defined(BTRACE_CHUNKS) || defined(BTRACE_FLEXIBLE_MEM_MODEL) |
|
184 if(aCategory==BTrace::EChunks || aCategory==BTrace::EFlexibleMemModel || aCategory==-1) |
|
185 { |
|
186 DObjectCon* chunks=Kern::Containers()[EChunk]; |
|
187 if(chunks) |
|
188 { |
|
189 NKern::ThreadEnterCS(); |
|
190 chunks->Wait(); |
|
191 TInt num = chunks->Count(); |
|
192 for(TInt i=0; i<num; i++) |
|
193 { |
|
194 DChunk* chunk = (DChunk*)(*chunks)[i]; |
|
195 if (chunk->Open() == KErrNone) |
|
196 { |
|
197 chunk->BTracePrime(aCategory); |
|
198 chunk->AsyncClose(); |
|
199 } |
|
200 } |
|
201 chunks->Signal(); |
|
202 NKern::ThreadLeaveCS(); |
|
203 } |
|
204 } |
|
205 #endif |
|
206 #if defined(BTRACE_CODESEGS) || defined(BTRACE_FLEXIBLE_MEM_MODEL) |
|
207 if(aCategory==BTrace::ECodeSegs || aCategory==BTrace::EFlexibleMemModel || aCategory==-1) |
|
208 { |
|
209 NKern::ThreadEnterCS(); |
|
210 DCodeSeg::Wait(); |
|
211 SDblQueLink* anchor=&DCodeSeg::GlobalList.iA; |
|
212 SDblQueLink* pL=anchor->iNext; |
|
213 for (; pL!=anchor; pL=pL->iNext) |
|
214 { |
|
215 DCodeSeg* seg=_LOFF(pL,DCodeSeg,iLink); |
|
216 seg->CheckedOpen(); |
|
217 seg->BTracePrime(aCategory); |
|
218 seg->CheckedClose(); |
|
219 } |
|
220 DCodeSeg::Signal(); |
|
221 NKern::ThreadLeaveCS(); |
|
222 } |
|
223 #endif |
|
224 #ifdef BTRACE_PAGING |
|
225 if(aCategory==BTrace::EPaging || aCategory==-1) |
|
226 { |
|
227 BTrace4(BTrace::EPaging,BTrace::EPagingMemoryModel,K::MemModelAttributes & EMemModelTypeMask); |
|
228 } |
|
229 #endif |
|
230 #ifdef BTRACE_THREAD_PRIORITY |
|
231 if(aCategory==BTrace::EThreadPriority || aCategory==-1) |
|
232 { |
|
233 DObjectCon* threads=Kern::Containers()[EThread]; |
|
234 if(threads) |
|
235 { |
|
236 NKern::ThreadEnterCS(); |
|
237 threads->Wait(); |
|
238 TInt numThread = threads->Count(); |
|
239 for(TInt i=0; i<numThread; i++) |
|
240 { |
|
241 DThread* thread = (DThread*)(*threads)[i]; |
|
242 DProcess* process = thread->iOwningProcess; |
|
243 NThread* nThread = &thread->iNThread; |
|
244 BTrace8(BTrace::EThreadPriority,BTrace::EProcessPriority,process,process->iPriority); |
|
245 BTrace12(BTrace::EThreadPriority,BTrace::EDThreadPriority,nThread,thread->iThreadPriority,thread->iDefaultPriority); |
|
246 BTrace8(BTrace::EThreadPriority,BTrace::ENThreadPriority,nThread,nThread->iPriority); |
|
247 } |
|
248 threads->Signal(); |
|
249 NKern::ThreadLeaveCS(); |
|
250 } |
|
251 } |
|
252 #endif |
|
253 |
|
254 #ifdef BTRACE_KERNEL_MEMORY |
|
255 if(aCategory==BTrace::EKernelMemory || aCategory==-1) |
|
256 M::BTracePrime(aCategory); |
|
257 #endif |
|
258 |
|
259 #ifdef BTRACE_RAM_ALLOCATOR |
|
260 if (aCategory == BTrace::ERamAllocator || aCategory == -1) |
|
261 M::BTracePrime(aCategory); |
|
262 #endif |
|
263 |
|
264 #ifdef BTRACE_FAST_MUTEX |
|
265 if (aCategory == BTrace::EFastMutex || aCategory == -1) |
|
266 { |
|
267 // Log the Name and Address of the system lock |
|
268 TraceFastMutexName(&TheScheduler.iLock, "System Lock"); |
|
269 TraceFastMutexName(&TMessageQue::MsgLock, "MsgLock"); |
|
270 TraceFastMutexName(&DObject::Lock, "ObjLock"); |
|
271 TraceFastMutexName(&TLogon::LogonLock, "LogonLock"); |
|
272 } |
|
273 #endif |
|
274 |
|
275 #ifdef BTRACE_SYMBIAN_KERNEL_SYNC |
|
276 if (aCategory == BTrace::ESymbianKernelSync || aCategory == -1) |
|
277 { |
|
278 TInt i; |
|
279 for (i=0; i<ENumObjectTypes; ++i) |
|
280 TraceDObject(K::Containers[i]->Lock(), BTrace::ESymbianKernelSync, BTrace::EMutexCreate, 0); |
|
281 TraceDObject(RObjectIx::HandleMutex, BTrace::ESymbianKernelSync, BTrace::EMutexCreate, 0); |
|
282 TraceDObject(DCodeSeg::CodeSegLock, BTrace::ESymbianKernelSync, BTrace::EMutexCreate, 0); |
|
283 TraceDObject(TTickQ::Mutex, BTrace::ESymbianKernelSync, BTrace::EMutexCreate, 0); |
|
284 TraceDObject(K::MachineConfigMutex, BTrace::ESymbianKernelSync, BTrace::EMutexCreate, 0); |
|
285 TraceDObject(((RHeapK*)K::Allocator)->Mutex(), BTrace::ESymbianKernelSync, BTrace::EMutexCreate, 0); |
|
286 TraceContainerContents(K::Containers[ESemaphore], BTrace::ESymbianKernelSync, BTrace::ESemaphoreCreate); |
|
287 TraceContainerContents(K::Containers[EMutex], BTrace::ESymbianKernelSync, BTrace::EMutexCreate); |
|
288 TraceContainerContents(K::Containers[ECondVar], BTrace::ESymbianKernelSync, BTrace::ECondVarCreate); |
|
289 } |
|
290 #endif |
|
291 } |
|
292 |
|
293 TBool BTrace::IsSupported(TUint aCategory) |
|
294 { |
|
295 if(aCategory>255) |
|
296 return EFalse; |
|
297 switch(aCategory) |
|
298 { |
|
299 // traces which are always supported... |
|
300 case ERDebugPrintf: |
|
301 case EKernPrintf: |
|
302 case EKernPerfLog: |
|
303 case EProfiling: |
|
304 case ETest1: |
|
305 case ETest2: |
|
306 return ETrue; |
|
307 |
|
308 // traces which are conditional... |
|
309 |
|
310 #ifndef __REMOVE_PLATSEC_DIAGNOSTICS__ |
|
311 case EPlatsecPrintf: |
|
312 if(TheSuperPage().KernelConfigFlags() & EKernelConfigPlatSecDiagnostics) |
|
313 return ETrue; |
|
314 return EFalse; |
|
315 #endif |
|
316 |
|
317 #ifdef BTRACE_THREAD_IDENTIFICATION |
|
318 case EThreadIdentification: |
|
319 return ETrue; |
|
320 #endif |
|
321 |
|
322 #ifdef BTRACE_CPU_USAGE |
|
323 case ECpuUsage: |
|
324 return ETrue; |
|
325 #endif |
|
326 |
|
327 #ifdef BTRACE_CLIENT_SERVER |
|
328 case EClientServer: |
|
329 return ETrue; |
|
330 #endif |
|
331 |
|
332 #ifdef BTRACE_REQUESTS |
|
333 case ERequests: |
|
334 return ETrue; |
|
335 #endif |
|
336 |
|
337 #ifdef BTRACE_CHUNKS |
|
338 case EChunks: |
|
339 return ETrue; |
|
340 #endif |
|
341 |
|
342 #ifdef BTRACE_CODESEGS |
|
343 case ECodeSegs: |
|
344 return ETrue; |
|
345 #endif |
|
346 |
|
347 #ifdef BTRACE_PAGING |
|
348 case EPaging: |
|
349 return ETrue; |
|
350 #endif |
|
351 |
|
352 #ifdef BTRACE_THREAD_PRIORITY |
|
353 case EThreadPriority: |
|
354 return ETrue; |
|
355 #endif |
|
356 |
|
357 #ifdef BTRACE_PAGING_MEDIA |
|
358 case EPagingMedia: |
|
359 return ETrue; |
|
360 #endif |
|
361 |
|
362 #ifdef BTRACE_KERNEL_MEMORY |
|
363 case EKernelMemory: |
|
364 return ETrue; |
|
365 #endif |
|
366 |
|
367 case EHeap: |
|
368 case EMetaTrace: |
|
369 return ETrue; |
|
370 |
|
371 #ifdef BTRACE_RAM_ALLOCATOR |
|
372 case ERamAllocator: |
|
373 return ETrue; |
|
374 #endif |
|
375 |
|
376 #ifdef BTRACE_FAST_MUTEX |
|
377 case EFastMutex: |
|
378 return ETrue; |
|
379 #endif |
|
380 |
|
381 #ifdef BTRACE_RESOURCE_MANAGER |
|
382 case EResourceManager: |
|
383 return ETrue; |
|
384 |
|
385 #endif |
|
386 |
|
387 case EIic: |
|
388 return ETrue; |
|
389 |
|
390 #ifdef BTRACE_TRAWEVENT |
|
391 case ERawEvent: |
|
392 return ETrue; |
|
393 #endif |
|
394 |
|
395 #ifdef BTRACE_SYMBIAN_KERNEL_SYNC |
|
396 case ESymbianKernelSync: |
|
397 return ETrue; |
|
398 #endif |
|
399 |
|
400 #ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
401 case EFlexibleMemModel: |
|
402 return ETrue; |
|
403 #endif |
|
404 |
|
405 default: |
|
406 return aCategory>=128; // all categories >=128 are 'supported' |
|
407 } |
|
408 } |
|
409 |
|
410 |
|
411 // |
|
412 // DBTraceFilter2 |
|
413 // |
|
414 |
|
415 #ifdef __SMP__ |
|
416 TSpinLock BTraceFilter2Lock(TSpinLock::EOrderBTrace); |
|
417 #endif |
|
418 |
|
419 DBTraceFilter2* DBTraceFilter2::iCleanupHead = 0; |
|
420 |
|
421 |
|
422 DBTraceFilter2* DBTraceFilter2::New(TInt aNumUids) |
|
423 { |
|
424 DBTraceFilter2* self = (DBTraceFilter2*)Kern::AllocZ(sizeof(DBTraceFilter2)+aNumUids*sizeof(TUint32)); |
|
425 if (self!=NULL) |
|
426 self->iAccessCount = 1; |
|
427 return self; |
|
428 } |
|
429 |
|
430 |
|
431 void DBTraceFilter2::Cleanup() |
|
432 { |
|
433 FOREVER |
|
434 { |
|
435 TInt irq = __SPIN_LOCK_IRQSAVE(BTraceFilter2Lock); |
|
436 DBTraceFilter2* p = iCleanupHead; |
|
437 if (p) |
|
438 iCleanupHead = p->iCleanupLink; |
|
439 __SPIN_UNLOCK_IRQRESTORE(BTraceFilter2Lock, irq); |
|
440 if (!p) |
|
441 break; |
|
442 delete p; |
|
443 } |
|
444 } |
|
445 |
|
446 |
|
447 DBTraceFilter2* DBTraceFilter2::Open(DBTraceFilter2*volatile& aFilter2) |
|
448 { |
|
449 TInt irq = __SPIN_LOCK_IRQSAVE(BTraceFilter2Lock); |
|
450 DBTraceFilter2* filter2 = aFilter2; |
|
451 if ((TLinAddr)filter2>1u) |
|
452 ++filter2->iAccessCount; |
|
453 __SPIN_UNLOCK_IRQRESTORE(BTraceFilter2Lock, irq); |
|
454 return filter2; |
|
455 } |
|
456 |
|
457 |
|
458 void DBTraceFilter2::Close() |
|
459 { |
|
460 if ((TLinAddr)this<=1u) |
|
461 return; |
|
462 TInt irq = __SPIN_LOCK_IRQSAVE(BTraceFilter2Lock); |
|
463 TInt access = iAccessCount; |
|
464 __NK_ASSERT_DEBUG(access>0); |
|
465 iAccessCount = access-1; |
|
466 if (access==1) |
|
467 { |
|
468 iCleanupLink = iCleanupHead; |
|
469 iCleanupHead = this; |
|
470 } |
|
471 __SPIN_UNLOCK_IRQRESTORE(BTraceFilter2Lock, irq); |
|
472 } |
|
473 |
|
474 |
|
475 #ifndef __MARM__ |
|
476 TBool DBTraceFilter2::Check(TUint32 aUid) |
|
477 { |
|
478 TInt l = 0; |
|
479 TInt r = iNumUids; |
|
480 while(r>l) |
|
481 { |
|
482 TUint m = (l+r)>>1; |
|
483 TUint32 x = iUids[m]; |
|
484 if(aUid>x) |
|
485 l = m+1; |
|
486 else if(aUid<x) |
|
487 r = m; |
|
488 else |
|
489 return 1; |
|
490 } |
|
491 return 0; |
|
492 } |
|
493 #endif |
|
494 |
|
495 extern void HeapSortUnsigned(TUint* aEntries,TInt aCount); |
|
496 /** |
|
497 Sort UIDs and remove duplicates. |
|
498 Return number of unique uids. |
|
499 */ |
|
500 static TInt Sort(TUint32* aUids, TInt aNumUids) |
|
501 { |
|
502 HeapSortUnsigned((TUint*)aUids,aNumUids); |
|
503 TUint32* end = aUids+aNumUids-1; |
|
504 // remove duplicates... |
|
505 TUint32* src = aUids; |
|
506 TUint32* dst = aUids; |
|
507 if(src<=end) |
|
508 { |
|
509 TUint32 a = *src++; |
|
510 TUint32 b = a; |
|
511 *dst++ = b; |
|
512 while(src<=end) |
|
513 { |
|
514 a = *src++; |
|
515 if(a!=b) |
|
516 { |
|
517 b = a; |
|
518 *dst++ = b; |
|
519 } |
|
520 } |
|
521 } |
|
522 return dst-aUids; |
|
523 } |
|
524 |
|
525 |
|
526 /** |
|
527 Remove aUid from list aSrc and store result at aDst. |
|
528 */ |
|
529 static TUint Remove(TUint32* aDst, TUint32* aSrc, TInt aSrcCount, TUint32 aUid) |
|
530 { |
|
531 TUint32* dst = aDst; |
|
532 TUint32* end = aSrc+aSrcCount; |
|
533 while(aSrc<end) |
|
534 { |
|
535 TUint32 a = *aSrc++; |
|
536 if(a!=aUid) |
|
537 *dst++ = a; |
|
538 } |
|
539 return dst-aDst; |
|
540 } |
|
541 |
|
542 |
|
543 /** |
|
544 Insert aUid into list aSrc and store result at aDst. |
|
545 */ |
|
546 static TUint Insert(TUint32* aDst, TUint32* aSrc, TInt aSrcCount, TUint32 aUid) |
|
547 { |
|
548 TUint32* dst = aDst; |
|
549 TUint32* end = aSrc+aSrcCount; |
|
550 TUint32 a; |
|
551 while(aSrc<end) |
|
552 { |
|
553 a = *aSrc++; |
|
554 if(a<aUid) |
|
555 *dst++ = a; |
|
556 else |
|
557 goto done; |
|
558 } |
|
559 *dst++ = aUid; |
|
560 return dst-aDst; |
|
561 done: |
|
562 if(a!=aUid) |
|
563 *dst++ = aUid; |
|
564 *dst++ = a; |
|
565 while(aSrc<end) |
|
566 *dst++ = *aSrc++; |
|
567 return dst-aDst; |
|
568 } |
|
569 |
|
570 |
|
571 EXPORT_C TInt BTrace::SetFilter2(TUint32 aUid, TBool aValue) |
|
572 { |
|
573 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"BTrace::Filter2"); |
|
574 NKern::ThreadEnterCS(); |
|
575 SBTraceData& traceData = BTraceData; |
|
576 DBTraceFilter2* oldFilter = DBTraceFilter2::Open(traceData.iFilter2); |
|
577 if((TUint)oldFilter==1u && !aValue) |
|
578 { |
|
579 NKern::ThreadLeaveCS(); |
|
580 return KErrNotSupported; // can't clear a single uid when global filter is in 'pass all' mode |
|
581 } |
|
582 TBool oldValue = (TLinAddr)oldFilter<2u ? (TBool)oldFilter : oldFilter->Check(aUid); |
|
583 if(aValue!=oldValue && (TUint)aValue<=1u) |
|
584 { |
|
585 TUint count = (TLinAddr)oldFilter<2u ? 0 : oldFilter->iNumUids; |
|
586 TUint newCount = count+(aValue?1:-1); |
|
587 DBTraceFilter2* newFilter = DBTraceFilter2::New(newCount); |
|
588 |
|
589 if(!newFilter) |
|
590 oldValue = KErrNoMemory; |
|
591 else |
|
592 { |
|
593 if(aValue) |
|
594 { |
|
595 // add aUid... |
|
596 newFilter->iNumUids = ::Insert(newFilter->iUids,oldFilter->iUids,count,aUid); |
|
597 __NK_ASSERT_DEBUG(newFilter->iNumUids==newCount); |
|
598 } |
|
599 else |
|
600 { |
|
601 // remove aUid... |
|
602 newFilter->iNumUids = ::Remove(newFilter->iUids,oldFilter->iUids,count,aUid); |
|
603 __NK_ASSERT_DEBUG(newFilter->iNumUids==newCount); |
|
604 if(!newCount) |
|
605 { |
|
606 newFilter->Close(); |
|
607 newFilter = 0; |
|
608 } |
|
609 } |
|
610 // finished with old filter... |
|
611 oldFilter->Close(); |
|
612 |
|
613 // use newFilter... |
|
614 TInt irq = __SPIN_LOCK_IRQSAVE(BTraceFilter2Lock); |
|
615 oldFilter = traceData.iFilter2; |
|
616 traceData.iFilter2 = newFilter; |
|
617 __SPIN_UNLOCK_IRQRESTORE(BTraceFilter2Lock, irq); |
|
618 // oldFilter is now the one we replaced, which is not necessarily the same |
|
619 // as the previous oldFilter... |
|
620 } |
|
621 } |
|
622 oldFilter->Close(); |
|
623 DBTraceFilter2::Cleanup(); |
|
624 NKern::ThreadLeaveCS(); |
|
625 return oldValue; |
|
626 } |
|
627 |
|
628 |
|
629 EXPORT_C TInt BTrace::SetFilter2(const TUint32* aUids, TInt aNumUids) |
|
630 { |
|
631 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"BTrace::Filter2"); |
|
632 NKern::ThreadEnterCS(); |
|
633 DBTraceFilter2* newFilter = DBTraceFilter2::New(aNumUids); |
|
634 if(!newFilter) |
|
635 { |
|
636 NKern::ThreadLeaveCS(); |
|
637 return KErrNoMemory; |
|
638 } |
|
639 |
|
640 memcpy(&newFilter->iUids,aUids,aNumUids*sizeof(TUint32)); |
|
641 aNumUids = Sort(newFilter->iUids, aNumUids); |
|
642 newFilter->iNumUids = aNumUids; |
|
643 |
|
644 TInt irq = __SPIN_LOCK_IRQSAVE(BTraceFilter2Lock); |
|
645 DBTraceFilter2* oldFilter = BTraceData.iFilter2; |
|
646 BTraceData.iFilter2 = newFilter; |
|
647 __SPIN_UNLOCK_IRQRESTORE(BTraceFilter2Lock, irq); |
|
648 oldFilter->Close(); |
|
649 DBTraceFilter2::Cleanup(); |
|
650 NKern::ThreadLeaveCS(); |
|
651 return KErrNone; |
|
652 } |
|
653 |
|
654 |
|
655 EXPORT_C TInt BTrace::SetFilter2(TInt aGlobalFilter) |
|
656 { |
|
657 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"BTrace::Filter2"); |
|
658 NKern::ThreadEnterCS(); |
|
659 DBTraceFilter2* oldFilter; |
|
660 if((TUint)aGlobalFilter>1u) |
|
661 oldFilter = BTraceData.iFilter2; // just query existing value |
|
662 else |
|
663 { |
|
664 // replace filter with 0 or 1... |
|
665 TInt irq = __SPIN_LOCK_IRQSAVE(BTraceFilter2Lock); |
|
666 oldFilter = BTraceData.iFilter2; |
|
667 BTraceData.iFilter2 = (DBTraceFilter2*)aGlobalFilter; |
|
668 __SPIN_UNLOCK_IRQRESTORE(BTraceFilter2Lock, irq); |
|
669 oldFilter->Close(); |
|
670 } |
|
671 DBTraceFilter2::Cleanup(); |
|
672 NKern::ThreadLeaveCS(); |
|
673 return (TUint)oldFilter>1u ? -1 : (TInt)oldFilter; |
|
674 } |
|
675 |
|
676 |
|
677 EXPORT_C TInt BTrace::Filter2(TUint32*& aUids, TInt& aGlobalFilter) |
|
678 { |
|
679 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"BTrace::Filter2"); |
|
680 DBTraceFilter2* filter = DBTraceFilter2::Open(BTraceData.iFilter2); |
|
681 TInt r = 0; |
|
682 aUids = 0; |
|
683 aGlobalFilter = (TBool)filter; |
|
684 if((TUint)filter>1u) |
|
685 { |
|
686 aGlobalFilter = -1; |
|
687 r = filter->iNumUids; |
|
688 TUint size = r*sizeof(TUint32); |
|
689 aUids = (TUint32*)Kern::Alloc(size); |
|
690 if(aUids) |
|
691 memcpy(aUids,filter->iUids,size); |
|
692 else |
|
693 r = KErrNoMemory; |
|
694 } |
|
695 filter->Close(); |
|
696 return r; |
|
697 } |
|
698 |
|
699 #ifndef __MARM__ |
|
700 TBool SBTraceData::CheckFilter2(TUint32 aUid) |
|
701 { |
|
702 // quick check for global filter setting... |
|
703 TUint global = (TUint)iFilter2; |
|
704 if(global<2) |
|
705 return global; |
|
706 |
|
707 TBool enterCs = (NKern::CurrentContext()==NKern::EThread) && !NKern::KernelLocked(); |
|
708 if (enterCs) |
|
709 NKern::_ThreadEnterCS(); |
|
710 DBTraceFilter2* filter = DBTraceFilter2::Open(iFilter2); |
|
711 TBool value = (TLinAddr)filter<2u ? (TBool)filter : filter->Check(aUid); |
|
712 filter->Close(); |
|
713 if (enterCs) |
|
714 NKern::_ThreadLeaveCS(); |
|
715 return value; |
|
716 } |
|
717 #endif |
|
718 |
|
719 EXPORT_C TBool BTrace::CheckFilter2(TUint32 aCategory,TUint32 aUid) |
|
720 { |
|
721 SBTraceData& traceData = BTraceData; |
|
722 if(!traceData.iFilter[aCategory&0xff]) |
|
723 return EFalse; |
|
724 return traceData.CheckFilter2(aUid); |
|
725 } |
|
726 |
|
727 |
|
728 EXPORT_C TBool BTrace::CheckFilter(TUint32 aCategory) |
|
729 { |
|
730 return BTraceData.iFilter[aCategory&0xff]; |
|
731 } |
|
732 |
|
733 |
|
734 // |
|
735 // |
|
736 // |
|
737 |
|
738 TBool ExecHandler::BTraceOut(TUint32 aHeader, TUint32 a1, const BTrace::SExecExtension& aExt, TInt aDataSize) |
|
739 { |
|
740 SBTraceData& traceData = BTraceData; |
|
741 if(!traceData.iFilter[(aHeader>>BTrace::ECategoryIndex*8)&0xff]) |
|
742 return EFalse; |
|
743 |
|
744 if(aHeader&(BTrace::EMissingRecord<<BTrace::EFlagsIndex*8)) |
|
745 { |
|
746 // EMissingRecord flag is overloaded to mean that secondary filter should be checked |
|
747 aHeader &= ~(BTrace::EMissingRecord<<BTrace::EFlagsIndex*8); |
|
748 if(!traceData.CheckFilter2(a1)) |
|
749 return EFalse; |
|
750 } |
|
751 |
|
752 // only PC and Context flags allowed... |
|
753 if(aHeader&((0xff^BTrace::EContextIdPresent^BTrace::EPcPresent)<<BTrace::EFlagsIndex*8)) |
|
754 goto error; |
|
755 |
|
756 { |
|
757 // get size of trace data excluding aDataSize |
|
758 TUint size = (aHeader>>BTrace::ESizeIndex*8)&0xff; |
|
759 if(aHeader&(BTrace::EPcPresent<<BTrace::EFlagsIndex*8)) |
|
760 size -= 4; |
|
761 TUint32 context = 0; |
|
762 if(aHeader&(BTrace::EContextIdPresent<<BTrace::EFlagsIndex*8)) |
|
763 { |
|
764 size -= 4; |
|
765 context = (TUint32)NKern::CurrentThread(); |
|
766 } |
|
767 |
|
768 if(!aDataSize) |
|
769 { |
|
770 if((size-4)>(16-4)) // size must be 4...16 |
|
771 goto error; |
|
772 __ACQUIRE_BTRACE_LOCK(); |
|
773 TBool r = traceData.iHandler(aHeader,0,context,a1,aExt.iA2,aExt.iA3,0,aExt.iPc); |
|
774 __RELEASE_BTRACE_LOCK(); |
|
775 return r; |
|
776 } |
|
777 |
|
778 if(size!=12) |
|
779 goto error; |
|
780 if(TUint(aDataSize)>KMaxBTraceDataArray) |
|
781 { |
|
782 aDataSize = KMaxBTraceDataArray; |
|
783 aHeader |= BTrace::ERecordTruncated<<(BTrace::EFlagsIndex*8); |
|
784 } |
|
785 { |
|
786 aHeader += aDataSize<<(BTrace::ESizeIndex*8); |
|
787 TUint32 data[KMaxBTraceDataArray/4]; |
|
788 kumemget32(data,(const TAny*)aExt.iA3,(aDataSize+3)&~3); |
|
789 TUint32 a3 = aDataSize<=4 ? data[0] : (TUint32)&data; |
|
790 __ACQUIRE_BTRACE_LOCK(); |
|
791 TBool r = traceData.iHandler(aHeader,0,context,a1,aExt.iA2,a3,0,aExt.iPc); |
|
792 __RELEASE_BTRACE_LOCK(); |
|
793 return r; |
|
794 } |
|
795 } |
|
796 error: |
|
797 return KErrArgument; |
|
798 } |
|
799 |
|
800 |
|
801 TBool ExecHandler::BTraceOutBig(TUint32 aHeader, TUint32 a1, const BTrace::SExecExtension& aExt, TInt aDataSize) |
|
802 { |
|
803 SBTraceData& traceData = BTraceData; |
|
804 if(!traceData.iFilter[(aHeader>>BTrace::ECategoryIndex*8)&0xff]) |
|
805 return EFalse; |
|
806 |
|
807 if(aHeader&(BTrace::EMissingRecord<<BTrace::EFlagsIndex*8)) |
|
808 { |
|
809 // EMissingRecord flag is overloaded to mean that secondary filter should be checked |
|
810 aHeader &= ~(BTrace::EMissingRecord<<BTrace::EFlagsIndex*8); |
|
811 if(!traceData.CheckFilter2(a1)) |
|
812 return EFalse; |
|
813 } |
|
814 |
|
815 // only PC and Context flags allowed... |
|
816 if(aHeader&((0xff^BTrace::EContextIdPresent^BTrace::EPcPresent)<<BTrace::EFlagsIndex*8)) |
|
817 goto error; |
|
818 |
|
819 { |
|
820 // get size of trace data excluding aDataSize |
|
821 TUint size = (aHeader>>BTrace::ESizeIndex*8)&0xff; |
|
822 if(aHeader&(BTrace::EPcPresent<<BTrace::EFlagsIndex*8)) |
|
823 size -= 4; |
|
824 TUint32 context = 0; |
|
825 if(aHeader&(BTrace::EContextIdPresent<<BTrace::EFlagsIndex*8)) |
|
826 { |
|
827 size -= 4; |
|
828 context = (TUint32)NKern::CurrentThread(); |
|
829 } |
|
830 TUint32 pc = aExt.iPc; |
|
831 |
|
832 if(size!=8) |
|
833 goto error; // size whould be 8 (for data in aHeader and a1) |
|
834 if(TUint(aDataSize)<KMaxBTraceDataArray+4) |
|
835 goto error; // trace too small for a big trace |
|
836 |
|
837 // adjust for header2, extra, and size word... |
|
838 aHeader |= BTrace::EHeader2Present<<(BTrace::EFlagsIndex*8)|BTrace::EExtraPresent<<(BTrace::EFlagsIndex*8); |
|
839 aHeader += 12; |
|
840 |
|
841 TUint8* userData = (TUint8*)aExt.iA3; |
|
842 TUint32 data[KMaxBTraceDataArray/4]; |
|
843 |
|
844 TUint32 traceId = __e32_atomic_add_ord32(&BTrace::BigTraceId, 1); |
|
845 TUint32 header2 = BTrace::EMultipartFirst; |
|
846 TInt offset = 0; |
|
847 do |
|
848 { |
|
849 TUint32 size = aDataSize-offset; |
|
850 if(size>KMaxBTraceDataArray) |
|
851 size = KMaxBTraceDataArray; |
|
852 else |
|
853 header2 = BTrace::EMultipartLast; |
|
854 |
|
855 kumemget32(data,userData,(size+3)&~3); |
|
856 TUint32 dataPtr = (TUint32)&data; |
|
857 if(size<=4) |
|
858 dataPtr = data[0]; // 4 bytes or less are passed by value, not pointer |
|
859 |
|
860 __ACQUIRE_BTRACE_LOCK(); |
|
861 TBool result = traceData.iHandler(aHeader+size,header2,context,aDataSize,a1,dataPtr,traceId,pc); |
|
862 __RELEASE_BTRACE_LOCK(); |
|
863 if(!result) |
|
864 return result; |
|
865 |
|
866 offset += size; |
|
867 userData += size; |
|
868 |
|
869 header2 = BTrace::EMultipartMiddle; |
|
870 a1 = offset; |
|
871 } |
|
872 while(offset<aDataSize); |
|
873 |
|
874 return ETrue; |
|
875 } |
|
876 error: |
|
877 return KErrArgument; |
|
878 } |
|
879 |
|
880 |
|
881 TBool ExecHandler::UTraceOut(TUint32 aHeader, TUint32 a1, const BTrace::SExecExtension& aExt, TInt aDataSize) |
|
882 { |
|
883 SBTraceData& traceData = BTraceData; |
|
884 if(!traceData.iFilter[(aHeader>>BTrace::ECategoryIndex*8)&0xff]) |
|
885 return EFalse; |
|
886 |
|
887 if(aHeader&(BTrace::EMissingRecord<<BTrace::EFlagsIndex*8)) |
|
888 { |
|
889 // EMissingRecord flag is overloaded to mean that secondary filter should be checked |
|
890 aHeader &= ~(BTrace::EMissingRecord<<BTrace::EFlagsIndex*8); |
|
891 if(!traceData.CheckFilter2(a1)) |
|
892 return EFalse; |
|
893 } |
|
894 |
|
895 // only PC and Context flags allowed... |
|
896 if(aHeader&((0xff^BTrace::EContextIdPresent^BTrace::EPcPresent)<<BTrace::EFlagsIndex*8)) |
|
897 return KErrArgument; |
|
898 |
|
899 // get size of trace data excluding aDataSize |
|
900 TUint size = (aHeader>>BTrace::ESizeIndex*8)&0xff; |
|
901 if(aHeader&(BTrace::EPcPresent<<BTrace::EFlagsIndex*8)) |
|
902 size -= 4; |
|
903 TUint32 context = 0; |
|
904 if(aHeader&(BTrace::EContextIdPresent<<BTrace::EFlagsIndex*8)) |
|
905 { |
|
906 size -= 4; |
|
907 context = (TUint32)NKern::CurrentThread(); |
|
908 } |
|
909 |
|
910 if(size!=8) |
|
911 return KErrArgument; // size whould be 8 (for data in aHeader and a1) |
|
912 if(TUint(aDataSize)<KMaxBTraceDataArray) |
|
913 return KErrArgument; // trace too small for a big trace |
|
914 |
|
915 // adjust for header2, extra, and size word... |
|
916 aHeader |= BTrace::EHeader2Present<<(BTrace::EFlagsIndex*8)|BTrace::EExtraPresent<<(BTrace::EFlagsIndex*8); |
|
917 aHeader += 12; |
|
918 |
|
919 // send the first trace including the formatId |
|
920 TUint8* userData = (TUint8*)aExt.iA3; |
|
921 TUint32 data[KMaxBTraceDataArray/4]; |
|
922 data[0] = aExt.iA2; // add the formatId for the first trace |
|
923 TUint32 traceId = NKern::LockedInc((TInt&)BTrace::BigTraceId); |
|
924 TUint32 header2 = BTrace::EMultipartFirst; |
|
925 TInt additionalIdentifiers = 4; |
|
926 TInt identifierOffset = additionalIdentifiers; // bytes |
|
927 TBool result = ETrue; |
|
928 TInt offset = 0; // offset into the payload |
|
929 |
|
930 do |
|
931 { |
|
932 TUint32 dataSize = aDataSize - offset; |
|
933 if(dataSize > (KMaxBTraceDataArray - identifierOffset)) |
|
934 dataSize = KMaxBTraceDataArray - identifierOffset; |
|
935 else |
|
936 header2 = BTrace::EMultipartLast; |
|
937 |
|
938 kumemget32(data+identifierOffset/4,userData,(dataSize+3)&~3); //add the rest of the payload, 4 byte aligned |
|
939 |
|
940 TUint32 dataPtr = (TUint32)&data; |
|
941 if(dataSize<=4) |
|
942 dataPtr = data[0]; // 4 bytes or less are passed by value, not pointer |
|
943 |
|
944 __ACQUIRE_BTRACE_LOCK(); |
|
945 result = traceData.iHandler(aHeader+dataSize,header2,context,aDataSize,a1,dataPtr,traceId,aExt.iPc); |
|
946 __RELEASE_BTRACE_LOCK(); |
|
947 if(!result) |
|
948 return result; |
|
949 |
|
950 offset += dataSize - identifierOffset; |
|
951 userData += dataSize - identifierOffset; |
|
952 a1 = offset; |
|
953 header2 = BTrace::EMultipartMiddle; |
|
954 identifierOffset = 0; // we are only adding identifiers into the first trace |
|
955 } |
|
956 while(offset<aDataSize); |
|
957 |
|
958 return result;//ETrue |
|
959 } |
|
960 |