author | William Roberts <williamr@symbian.org> |
Tue, 19 Jan 2010 13:48:03 +0000 | |
changeset 34 | f497542af8e4 |
parent 31 | 56f325a607ea |
child 90 | 947f0dc9f7a8 |
child 256 | c1f20ce4abcf |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32\kernel\sbtrace.cpp |
|
15 |
// |
|
16 |
// |
|
17 |
||
18 |
#include <kernel/kern_priv.h> |
|
19 |
#include "execs.h" |
|
20 |
#include <e32panic.h> |
|
21 |
#include "memmodel.h" |
|
22 |
||
23 |
//SBTraceData BTraceData = { {0},0,0 }; |
|
24 |
||
25 |
||
26 |
TBool DummyBTraceHandler(TUint32,TUint32,const TUint32,const TUint32,const TUint32,const TUint32,const TUint32,const TUint32) |
|
27 |
{ |
|
28 |
return EFalse; |
|
29 |
} |
|
30 |
||
31 |
||
32 |
void BTrace::Init0() |
|
33 |
{ |
|
34 |
BTrace::SetHandler(DummyBTraceHandler); |
|
35 |
TUint32* src = Kern::SuperPage().iInitialBTraceFilter; |
|
36 |
TUint32* srcEnd = src+256/32; |
|
37 |
||
38 |
// always have EMetaTrace enabled if any trace category is enabled... |
|
39 |
TUint32 anySet = 0; |
|
40 |
TUint32* scan = src; |
|
41 |
do anySet |= *scan++; |
|
42 |
while(scan<srcEnd); |
|
43 |
if(anySet) |
|
44 |
SetFilter(BTrace::EMetaTrace,1); |
|
45 |
||
46 |
TInt category = 0; |
|
47 |
do |
|
48 |
{ |
|
49 |
TUint32 bits = *src++; |
|
50 |
do |
|
51 |
{ |
|
52 |
if(category!=BTrace::EMetaTrace) |
|
53 |
SetFilter(category,(bits&1)); |
|
54 |
++category; |
|
55 |
bits >>= 1; |
|
56 |
} |
|
57 |
while(category&31); |
|
58 |
} |
|
59 |
while(src<srcEnd); |
|
60 |
} |
|
61 |
||
62 |
||
63 |
EXPORT_C TInt BTrace::Control(BTrace::TControl aFunction, TAny* aArg1, TAny* aArg2) |
|
64 |
{ |
|
65 |
return (*BTraceData.iControl)(aFunction, aArg1, aArg2); |
|
66 |
} |
|
67 |
||
68 |
||
69 |
EXPORT_C BTrace::THandler BTrace::SetHandler(BTrace::THandler aHandler) |
|
70 |
{ |
|
71 |
BTrace::TControlFunction oldControl; |
|
72 |
BTrace::THandler oldHandler; |
|
73 |
SetHandlers(aHandler,0,oldHandler,oldControl); |
|
74 |
return oldHandler; |
|
75 |
} |
|
76 |
||
77 |
void TraceFastMutexName(NFastMutex* aMutex, const char* aName) |
|
78 |
{ |
|
79 |
TPtrC8 name((const TUint8*)aName); |
|
80 |
BTraceN(BTrace::EFastMutex, BTrace::EFastMutexName, aMutex, 0, name.Ptr(), name.Length()); |
|
81 |
} |
|
82 |
||
83 |
void TraceDObject(DObject* aObj, TUint aCat, TUint aSub, const char* aName) |
|
84 |
{ |
|
85 |
if (!aObj) |
|
86 |
return; |
|
87 |
DObject* owner = aObj->iOwner; |
|
88 |
if (!aObj->iName && aName) |
|
89 |
{ |
|
90 |
TPtrC8 name((const TUint8*)aName); |
|
91 |
BTraceN(aCat, aSub, aObj, owner, name.Ptr(), name.Size()); |
|
92 |
} |
|
93 |
else |
|
94 |
{ |
|
95 |
TKName nameBuf; |
|
96 |
aObj->Name(nameBuf); |
|
97 |
BTraceN(aCat, aSub, aObj, owner, nameBuf.Ptr(), nameBuf.Size()); |
|
98 |
} |
|
99 |
} |
|
100 |
||
101 |
// IMPORTANT, this function must not be used for objects which have overridden Close() |
|
102 |
// because the use of AsyncClose() by this function would then be unsafe |
|
103 |
void TraceContainerContents(DObjectCon* aCon, TUint aCat, TUint aSub) |
|
104 |
{ |
|
105 |
if (!aCon) |
|
106 |
return; |
|
107 |
NKern::ThreadEnterCS(); |
|
108 |
aCon->Wait(); |
|
109 |
TInt num = aCon->Count(); |
|
110 |
for (TInt i=0; i<num; i++) |
|
111 |
{ |
|
112 |
DObject* obj = (DObject*)(*aCon)[i]; |
|
113 |
if (obj->Open() == KErrNone) |
|
114 |
{ |
|
115 |
TraceDObject(obj, aCat, aSub, 0); |
|
116 |
obj->AsyncClose(); |
|
117 |
} |
|
118 |
} |
|
119 |
aCon->Signal(); |
|
120 |
NKern::ThreadLeaveCS(); |
|
121 |
} |
|
122 |
||
123 |
||
124 |
EXPORT_C void BTrace::Prime(TInt aCategory) |
|
125 |
{ |
|
126 |
(void)aCategory; |
|
127 |
#ifdef BTRACE_CPU_USAGE |
|
128 |
if(aCategory==BTrace::ECpuUsage || aCategory==-1) |
|
129 |
{ |
|
130 |
BTraceContext0(BTrace::ECpuUsage,BTrace::ENewThreadContext); |
|
131 |
} |
|
132 |
#endif |
|
133 |
||
134 |
#if defined(BTRACE_THREAD_IDENTIFICATION) || defined(BTRACE_FLEXIBLE_MEM_MODEL) |
|
135 |
if(aCategory==BTrace::EThreadIdentification || aCategory==BTrace::EFlexibleMemModel || aCategory==-1) |
|
136 |
{ |
|
137 |
DObjectCon* processes=Kern::Containers()[EProcess]; |
|
138 |
if(processes) |
|
139 |
{ |
|
140 |
NKern::ThreadEnterCS(); |
|
141 |
DCodeSeg::Wait(); // FMM implementation needs to traverse code seg graph |
|
142 |
processes->Wait(); |
|
143 |
TInt numProcesses = processes->Count(); |
|
144 |
for(TInt i=0; i<numProcesses; i++) |
|
145 |
{ |
|
146 |
DProcess* process = (DProcess*)(*processes)[i]; |
|
147 |
if (process->Open() == KErrNone) |
|
148 |
{ |
|
149 |
process->BTracePrime(aCategory); |
|
150 |
process->AsyncClose(); |
|
151 |
} |
|
152 |
} |
|
153 |
processes->Signal(); |
|
154 |
DCodeSeg::Signal(); |
|
155 |
NKern::ThreadLeaveCS(); |
|
156 |
} |
|
157 |
} |
|
158 |
#endif |
|
159 |
||
160 |
#if defined(BTRACE_THREAD_IDENTIFICATION) || defined(BTRACE_FLEXIBLE_MEM_MODEL) |
|
161 |
if(aCategory==BTrace::EThreadIdentification || aCategory==BTrace::EFlexibleMemModel || aCategory==-1) |
|
162 |
{ |
|
163 |
DObjectCon* threads=Kern::Containers()[EThread]; |
|
164 |
if(threads) |
|
165 |
{ |
|
166 |
NKern::ThreadEnterCS(); |
|
167 |
threads->Wait(); |
|
168 |
TInt numThread = threads->Count(); |
|
169 |
for(TInt i=0; i<numThread; i++) |
|
170 |
{ |
|
171 |
DThread* thread = (DThread*)(*threads)[i]; |
|
172 |
if (thread->Open() == KErrNone) |
|
173 |
{ |
|
174 |
thread->BTracePrime(aCategory); |
|
175 |
thread->AsyncClose(); |
|
176 |
} |
|
177 |
} |
|
178 |
threads->Signal(); |
|
179 |
NKern::ThreadLeaveCS(); |
|
180 |
} |
|
181 |
} |
|
182 |
#endif |
|
183 |
#if defined(BTRACE_CHUNKS) || defined(BTRACE_FLEXIBLE_MEM_MODEL) |
|
184 |
if(aCategory==BTrace::EChunks || aCategory==BTrace::EFlexibleMemModel || aCategory==-1) |
|
185 |
{ |
|
186 |
DObjectCon* chunks=Kern::Containers()[EChunk]; |
|
187 |
if(chunks) |
|
188 |
{ |
|
189 |
NKern::ThreadEnterCS(); |
|
190 |
chunks->Wait(); |
|
191 |
TInt num = chunks->Count(); |
|
192 |
for(TInt i=0; i<num; i++) |
|
193 |
{ |
|
194 |
DChunk* chunk = (DChunk*)(*chunks)[i]; |
|
195 |
if (chunk->Open() == KErrNone) |
|
196 |
{ |
|
197 |
chunk->BTracePrime(aCategory); |
|
198 |
chunk->AsyncClose(); |
|
199 |
} |
|
200 |
} |
|
201 |
chunks->Signal(); |
|
202 |
NKern::ThreadLeaveCS(); |
|
203 |
} |
|
204 |
} |
|
205 |
#endif |
|
206 |
#if defined(BTRACE_CODESEGS) || defined(BTRACE_FLEXIBLE_MEM_MODEL) |
|
207 |
if(aCategory==BTrace::ECodeSegs || aCategory==BTrace::EFlexibleMemModel || aCategory==-1) |
|
208 |
{ |
|
209 |
NKern::ThreadEnterCS(); |
|
210 |
DCodeSeg::Wait(); |
|
211 |
SDblQueLink* anchor=&DCodeSeg::GlobalList.iA; |
|
212 |
SDblQueLink* pL=anchor->iNext; |
|
213 |
for (; pL!=anchor; pL=pL->iNext) |
|
214 |
{ |
|
215 |
DCodeSeg* seg=_LOFF(pL,DCodeSeg,iLink); |
|
216 |
seg->CheckedOpen(); |
|
217 |
seg->BTracePrime(aCategory); |
|
218 |
seg->CheckedClose(); |
|
219 |
} |
|
220 |
DCodeSeg::Signal(); |
|
221 |
NKern::ThreadLeaveCS(); |
|
222 |
} |
|
223 |
#endif |
|
224 |
#ifdef BTRACE_PAGING |
|
225 |
if(aCategory==BTrace::EPaging || aCategory==-1) |
|
226 |
{ |
|
227 |
BTrace4(BTrace::EPaging,BTrace::EPagingMemoryModel,K::MemModelAttributes & EMemModelTypeMask); |
|
228 |
} |
|
229 |
#endif |
|
230 |
#ifdef BTRACE_THREAD_PRIORITY |
|
231 |
if(aCategory==BTrace::EThreadPriority || aCategory==-1) |
|
232 |
{ |
|
233 |
DObjectCon* threads=Kern::Containers()[EThread]; |
|
234 |
if(threads) |
|
235 |
{ |
|
236 |
NKern::ThreadEnterCS(); |
|
237 |
threads->Wait(); |
|
238 |
TInt numThread = threads->Count(); |
|
239 |
for(TInt i=0; i<numThread; i++) |
|
240 |
{ |
|
241 |
DThread* thread = (DThread*)(*threads)[i]; |
|
242 |
DProcess* process = thread->iOwningProcess; |
|
243 |
NThread* nThread = &thread->iNThread; |
|
244 |
BTrace8(BTrace::EThreadPriority,BTrace::EProcessPriority,process,process->iPriority); |
|
245 |
BTrace12(BTrace::EThreadPriority,BTrace::EDThreadPriority,nThread,thread->iThreadPriority,thread->iDefaultPriority); |
|
246 |
BTrace8(BTrace::EThreadPriority,BTrace::ENThreadPriority,nThread,nThread->iPriority); |
|
247 |
} |
|
248 |
threads->Signal(); |
|
249 |
NKern::ThreadLeaveCS(); |
|
250 |
} |
|
251 |
} |
|
252 |
#endif |
|
253 |
||
254 |
#ifdef BTRACE_KERNEL_MEMORY |
|
255 |
if(aCategory==BTrace::EKernelMemory || aCategory==-1) |
|
256 |
M::BTracePrime(aCategory); |
|
257 |
#endif |
|
258 |
||
259 |
#ifdef BTRACE_RAM_ALLOCATOR |
|
260 |
if (aCategory == BTrace::ERamAllocator || aCategory == -1) |
|
261 |
M::BTracePrime(aCategory); |
|
262 |
#endif |
|
263 |
||
264 |
#ifdef BTRACE_FAST_MUTEX |
|
265 |
if (aCategory == BTrace::EFastMutex || aCategory == -1) |
|
266 |
{ |
|
267 |
// Log the Name and Address of the system lock |
|
268 |
TraceFastMutexName(&TheScheduler.iLock, "System Lock"); |
|
269 |
TraceFastMutexName(&TMessageQue::MsgLock, "MsgLock"); |
|
270 |
TraceFastMutexName(&DObject::Lock, "ObjLock"); |
|
271 |
TraceFastMutexName(&TLogon::LogonLock, "LogonLock"); |
|
272 |
} |
|
273 |
#endif |
|
274 |
||
275 |
#ifdef BTRACE_SYMBIAN_KERNEL_SYNC |
|
276 |
if (aCategory == BTrace::ESymbianKernelSync || aCategory == -1) |
|
277 |
{ |
|
278 |
TInt i; |
|
279 |
for (i=0; i<ENumObjectTypes; ++i) |
|
280 |
TraceDObject(K::Containers[i]->Lock(), BTrace::ESymbianKernelSync, BTrace::EMutexCreate, 0); |
|
281 |
TraceDObject(RObjectIx::HandleMutex, BTrace::ESymbianKernelSync, BTrace::EMutexCreate, 0); |
|
282 |
TraceDObject(DCodeSeg::CodeSegLock, BTrace::ESymbianKernelSync, BTrace::EMutexCreate, 0); |
|
283 |
TraceDObject(TTickQ::Mutex, BTrace::ESymbianKernelSync, BTrace::EMutexCreate, 0); |
|
284 |
TraceDObject(K::MachineConfigMutex, BTrace::ESymbianKernelSync, BTrace::EMutexCreate, 0); |
|
285 |
TraceDObject(((RHeapK*)K::Allocator)->Mutex(), BTrace::ESymbianKernelSync, BTrace::EMutexCreate, 0); |
|
286 |
TraceContainerContents(K::Containers[ESemaphore], BTrace::ESymbianKernelSync, BTrace::ESemaphoreCreate); |
|
287 |
TraceContainerContents(K::Containers[EMutex], BTrace::ESymbianKernelSync, BTrace::EMutexCreate); |
|
288 |
TraceContainerContents(K::Containers[ECondVar], BTrace::ESymbianKernelSync, BTrace::ECondVarCreate); |
|
289 |
} |
|
290 |
#endif |
|
31
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
291 |
|
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
292 |
#ifdef BTRACE_CLIENT_SERVER |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
293 |
if(aCategory==BTrace::EClientServer || aCategory==-1) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
294 |
{ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
295 |
DObjectCon* servers=Kern::Containers()[EServer]; |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
296 |
if(servers) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
297 |
{ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
298 |
NKern::ThreadEnterCS(); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
299 |
servers->Wait(); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
300 |
TInt num = servers->Count(); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
301 |
for(TInt i=0; i<num; i++) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
302 |
{ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
303 |
DServer* server = (DServer*)(*servers)[i]; |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
304 |
if (server->Open() == KErrNone) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
305 |
{ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
306 |
server->BTracePrime(aCategory); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
307 |
server->AsyncClose(); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
308 |
} |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
309 |
} |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
310 |
servers->Signal(); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
311 |
NKern::ThreadLeaveCS(); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
312 |
} |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
313 |
|
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
314 |
DObjectCon* sessions=Kern::Containers()[ESession]; |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
315 |
if(sessions) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
316 |
{ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
317 |
NKern::ThreadEnterCS(); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
318 |
sessions->Wait(); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
319 |
TInt num = sessions->Count(); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
320 |
for(TInt i=0; i<num; i++) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
321 |
{ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
322 |
DSession* session = (DSession*)(*sessions)[i]; |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
323 |
if (session->Open() == KErrNone) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
324 |
{ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
325 |
session->BTracePrime(aCategory); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
326 |
session->AsyncClose(); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
327 |
} |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
328 |
} |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
329 |
sessions->Signal(); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
330 |
NKern::ThreadLeaveCS(); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
331 |
} |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
332 |
} |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
333 |
#endif |
0 | 334 |
} |
335 |
||
336 |
TBool BTrace::IsSupported(TUint aCategory) |
|
337 |
{ |
|
338 |
if(aCategory>255) |
|
339 |
return EFalse; |
|
340 |
switch(aCategory) |
|
341 |
{ |
|
342 |
// traces which are always supported... |
|
343 |
case ERDebugPrintf: |
|
344 |
case EKernPrintf: |
|
345 |
case EKernPerfLog: |
|
346 |
case EProfiling: |
|
347 |
case ETest1: |
|
348 |
case ETest2: |
|
349 |
return ETrue; |
|
350 |
||
351 |
// traces which are conditional... |
|
352 |
||
353 |
#ifndef __REMOVE_PLATSEC_DIAGNOSTICS__ |
|
354 |
case EPlatsecPrintf: |
|
355 |
if(TheSuperPage().KernelConfigFlags() & EKernelConfigPlatSecDiagnostics) |
|
356 |
return ETrue; |
|
357 |
return EFalse; |
|
358 |
#endif |
|
359 |
||
360 |
#ifdef BTRACE_THREAD_IDENTIFICATION |
|
361 |
case EThreadIdentification: |
|
362 |
return ETrue; |
|
363 |
#endif |
|
364 |
||
365 |
#ifdef BTRACE_CPU_USAGE |
|
366 |
case ECpuUsage: |
|
367 |
return ETrue; |
|
368 |
#endif |
|
369 |
||
370 |
#ifdef BTRACE_CLIENT_SERVER |
|
371 |
case EClientServer: |
|
372 |
return ETrue; |
|
373 |
#endif |
|
374 |
||
375 |
#ifdef BTRACE_REQUESTS |
|
376 |
case ERequests: |
|
377 |
return ETrue; |
|
378 |
#endif |
|
379 |
||
380 |
#ifdef BTRACE_CHUNKS |
|
381 |
case EChunks: |
|
382 |
return ETrue; |
|
383 |
#endif |
|
384 |
||
385 |
#ifdef BTRACE_CODESEGS |
|
386 |
case ECodeSegs: |
|
387 |
return ETrue; |
|
388 |
#endif |
|
389 |
||
390 |
#ifdef BTRACE_PAGING |
|
391 |
case EPaging: |
|
392 |
return ETrue; |
|
393 |
#endif |
|
394 |
||
395 |
#ifdef BTRACE_THREAD_PRIORITY |
|
396 |
case EThreadPriority: |
|
397 |
return ETrue; |
|
398 |
#endif |
|
399 |
||
400 |
#ifdef BTRACE_PAGING_MEDIA |
|
401 |
case EPagingMedia: |
|
402 |
return ETrue; |
|
403 |
#endif |
|
404 |
||
405 |
#ifdef BTRACE_KERNEL_MEMORY |
|
406 |
case EKernelMemory: |
|
407 |
return ETrue; |
|
408 |
#endif |
|
409 |
||
410 |
case EHeap: |
|
411 |
case EMetaTrace: |
|
412 |
return ETrue; |
|
413 |
||
414 |
#ifdef BTRACE_RAM_ALLOCATOR |
|
415 |
case ERamAllocator: |
|
416 |
return ETrue; |
|
417 |
#endif |
|
418 |
||
419 |
#ifdef BTRACE_FAST_MUTEX |
|
420 |
case EFastMutex: |
|
421 |
return ETrue; |
|
422 |
#endif |
|
423 |
||
424 |
#ifdef BTRACE_RESOURCE_MANAGER |
|
425 |
case EResourceManager: |
|
426 |
return ETrue; |
|
427 |
||
428 |
#endif |
|
429 |
||
430 |
case EIic: |
|
431 |
return ETrue; |
|
432 |
||
433 |
#ifdef BTRACE_TRAWEVENT |
|
434 |
case ERawEvent: |
|
435 |
return ETrue; |
|
436 |
#endif |
|
437 |
||
438 |
#ifdef BTRACE_SYMBIAN_KERNEL_SYNC |
|
439 |
case ESymbianKernelSync: |
|
440 |
return ETrue; |
|
441 |
#endif |
|
442 |
||
443 |
#ifdef BTRACE_FLEXIBLE_MEM_MODEL |
|
444 |
case EFlexibleMemModel: |
|
445 |
return ETrue; |
|
446 |
#endif |
|
447 |
||
448 |
default: |
|
449 |
return aCategory>=128; // all categories >=128 are 'supported' |
|
450 |
} |
|
451 |
} |
|
452 |
||
453 |
||
454 |
// |
|
455 |
// DBTraceFilter2 |
|
456 |
// |
|
457 |
||
458 |
#ifdef __SMP__ |
|
459 |
TSpinLock BTraceFilter2Lock(TSpinLock::EOrderBTrace); |
|
460 |
#endif |
|
461 |
||
462 |
DBTraceFilter2* DBTraceFilter2::iCleanupHead = 0; |
|
463 |
||
464 |
||
465 |
DBTraceFilter2* DBTraceFilter2::New(TInt aNumUids) |
|
466 |
{ |
|
467 |
DBTraceFilter2* self = (DBTraceFilter2*)Kern::AllocZ(sizeof(DBTraceFilter2)+aNumUids*sizeof(TUint32)); |
|
468 |
if (self!=NULL) |
|
469 |
self->iAccessCount = 1; |
|
470 |
return self; |
|
471 |
} |
|
472 |
||
473 |
||
474 |
void DBTraceFilter2::Cleanup() |
|
475 |
{ |
|
476 |
FOREVER |
|
477 |
{ |
|
478 |
TInt irq = __SPIN_LOCK_IRQSAVE(BTraceFilter2Lock); |
|
479 |
DBTraceFilter2* p = iCleanupHead; |
|
480 |
if (p) |
|
481 |
iCleanupHead = p->iCleanupLink; |
|
482 |
__SPIN_UNLOCK_IRQRESTORE(BTraceFilter2Lock, irq); |
|
483 |
if (!p) |
|
484 |
break; |
|
485 |
delete p; |
|
486 |
} |
|
487 |
} |
|
488 |
||
489 |
||
490 |
DBTraceFilter2* DBTraceFilter2::Open(DBTraceFilter2*volatile& aFilter2) |
|
491 |
{ |
|
492 |
TInt irq = __SPIN_LOCK_IRQSAVE(BTraceFilter2Lock); |
|
493 |
DBTraceFilter2* filter2 = aFilter2; |
|
494 |
if ((TLinAddr)filter2>1u) |
|
495 |
++filter2->iAccessCount; |
|
496 |
__SPIN_UNLOCK_IRQRESTORE(BTraceFilter2Lock, irq); |
|
497 |
return filter2; |
|
498 |
} |
|
499 |
||
500 |
||
501 |
void DBTraceFilter2::Close() |
|
502 |
{ |
|
503 |
if ((TLinAddr)this<=1u) |
|
504 |
return; |
|
505 |
TInt irq = __SPIN_LOCK_IRQSAVE(BTraceFilter2Lock); |
|
506 |
TInt access = iAccessCount; |
|
507 |
__NK_ASSERT_DEBUG(access>0); |
|
508 |
iAccessCount = access-1; |
|
509 |
if (access==1) |
|
510 |
{ |
|
511 |
iCleanupLink = iCleanupHead; |
|
512 |
iCleanupHead = this; |
|
513 |
} |
|
514 |
__SPIN_UNLOCK_IRQRESTORE(BTraceFilter2Lock, irq); |
|
515 |
} |
|
516 |
||
517 |
||
518 |
#ifndef __MARM__ |
|
519 |
TBool DBTraceFilter2::Check(TUint32 aUid) |
|
520 |
{ |
|
521 |
TInt l = 0; |
|
522 |
TInt r = iNumUids; |
|
523 |
while(r>l) |
|
524 |
{ |
|
525 |
TUint m = (l+r)>>1; |
|
526 |
TUint32 x = iUids[m]; |
|
527 |
if(aUid>x) |
|
528 |
l = m+1; |
|
529 |
else if(aUid<x) |
|
530 |
r = m; |
|
531 |
else |
|
532 |
return 1; |
|
533 |
} |
|
534 |
return 0; |
|
535 |
} |
|
536 |
#endif |
|
537 |
||
538 |
extern void HeapSortUnsigned(TUint* aEntries,TInt aCount); |
|
539 |
/** |
|
540 |
Sort UIDs and remove duplicates. |
|
541 |
Return number of unique uids. |
|
542 |
*/ |
|
543 |
static TInt Sort(TUint32* aUids, TInt aNumUids) |
|
544 |
{ |
|
545 |
HeapSortUnsigned((TUint*)aUids,aNumUids); |
|
546 |
TUint32* end = aUids+aNumUids-1; |
|
547 |
// remove duplicates... |
|
548 |
TUint32* src = aUids; |
|
549 |
TUint32* dst = aUids; |
|
550 |
if(src<=end) |
|
551 |
{ |
|
552 |
TUint32 a = *src++; |
|
553 |
TUint32 b = a; |
|
554 |
*dst++ = b; |
|
555 |
while(src<=end) |
|
556 |
{ |
|
557 |
a = *src++; |
|
558 |
if(a!=b) |
|
559 |
{ |
|
560 |
b = a; |
|
561 |
*dst++ = b; |
|
562 |
} |
|
563 |
} |
|
564 |
} |
|
565 |
return dst-aUids; |
|
566 |
} |
|
567 |
||
568 |
||
569 |
/** |
|
570 |
Remove aUid from list aSrc and store result at aDst. |
|
571 |
*/ |
|
572 |
static TUint Remove(TUint32* aDst, TUint32* aSrc, TInt aSrcCount, TUint32 aUid) |
|
573 |
{ |
|
574 |
TUint32* dst = aDst; |
|
575 |
TUint32* end = aSrc+aSrcCount; |
|
576 |
while(aSrc<end) |
|
577 |
{ |
|
578 |
TUint32 a = *aSrc++; |
|
579 |
if(a!=aUid) |
|
580 |
*dst++ = a; |
|
581 |
} |
|
582 |
return dst-aDst; |
|
583 |
} |
|
584 |
||
585 |
||
586 |
/** |
|
587 |
Insert aUid into list aSrc and store result at aDst. |
|
588 |
*/ |
|
589 |
static TUint Insert(TUint32* aDst, TUint32* aSrc, TInt aSrcCount, TUint32 aUid) |
|
590 |
{ |
|
591 |
TUint32* dst = aDst; |
|
592 |
TUint32* end = aSrc+aSrcCount; |
|
593 |
TUint32 a; |
|
594 |
while(aSrc<end) |
|
595 |
{ |
|
596 |
a = *aSrc++; |
|
597 |
if(a<aUid) |
|
598 |
*dst++ = a; |
|
599 |
else |
|
600 |
goto done; |
|
601 |
} |
|
602 |
*dst++ = aUid; |
|
603 |
return dst-aDst; |
|
604 |
done: |
|
605 |
if(a!=aUid) |
|
606 |
*dst++ = aUid; |
|
607 |
*dst++ = a; |
|
608 |
while(aSrc<end) |
|
609 |
*dst++ = *aSrc++; |
|
610 |
return dst-aDst; |
|
611 |
} |
|
612 |
||
613 |
||
614 |
EXPORT_C TInt BTrace::SetFilter2(TUint32 aUid, TBool aValue) |
|
615 |
{ |
|
616 |
CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"BTrace::Filter2"); |
|
617 |
NKern::ThreadEnterCS(); |
|
618 |
SBTraceData& traceData = BTraceData; |
|
619 |
DBTraceFilter2* oldFilter = DBTraceFilter2::Open(traceData.iFilter2); |
|
620 |
if((TUint)oldFilter==1u && !aValue) |
|
621 |
{ |
|
622 |
NKern::ThreadLeaveCS(); |
|
623 |
return KErrNotSupported; // can't clear a single uid when global filter is in 'pass all' mode |
|
624 |
} |
|
625 |
TBool oldValue = (TLinAddr)oldFilter<2u ? (TBool)oldFilter : oldFilter->Check(aUid); |
|
626 |
if(aValue!=oldValue && (TUint)aValue<=1u) |
|
627 |
{ |
|
628 |
TUint count = (TLinAddr)oldFilter<2u ? 0 : oldFilter->iNumUids; |
|
629 |
TUint newCount = count+(aValue?1:-1); |
|
630 |
DBTraceFilter2* newFilter = DBTraceFilter2::New(newCount); |
|
631 |
||
632 |
if(!newFilter) |
|
633 |
oldValue = KErrNoMemory; |
|
634 |
else |
|
635 |
{ |
|
636 |
if(aValue) |
|
637 |
{ |
|
638 |
// add aUid... |
|
639 |
newFilter->iNumUids = ::Insert(newFilter->iUids,oldFilter->iUids,count,aUid); |
|
640 |
__NK_ASSERT_DEBUG(newFilter->iNumUids==newCount); |
|
641 |
} |
|
642 |
else |
|
643 |
{ |
|
644 |
// remove aUid... |
|
645 |
newFilter->iNumUids = ::Remove(newFilter->iUids,oldFilter->iUids,count,aUid); |
|
646 |
__NK_ASSERT_DEBUG(newFilter->iNumUids==newCount); |
|
647 |
if(!newCount) |
|
648 |
{ |
|
649 |
newFilter->Close(); |
|
650 |
newFilter = 0; |
|
651 |
} |
|
652 |
} |
|
653 |
// finished with old filter... |
|
654 |
oldFilter->Close(); |
|
655 |
||
656 |
// use newFilter... |
|
657 |
TInt irq = __SPIN_LOCK_IRQSAVE(BTraceFilter2Lock); |
|
658 |
oldFilter = traceData.iFilter2; |
|
659 |
traceData.iFilter2 = newFilter; |
|
660 |
__SPIN_UNLOCK_IRQRESTORE(BTraceFilter2Lock, irq); |
|
661 |
// oldFilter is now the one we replaced, which is not necessarily the same |
|
662 |
// as the previous oldFilter... |
|
663 |
} |
|
664 |
} |
|
665 |
oldFilter->Close(); |
|
666 |
DBTraceFilter2::Cleanup(); |
|
667 |
NKern::ThreadLeaveCS(); |
|
668 |
return oldValue; |
|
669 |
} |
|
670 |
||
671 |
||
672 |
EXPORT_C TInt BTrace::SetFilter2(const TUint32* aUids, TInt aNumUids) |
|
673 |
{ |
|
674 |
CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"BTrace::Filter2"); |
|
675 |
NKern::ThreadEnterCS(); |
|
676 |
DBTraceFilter2* newFilter = DBTraceFilter2::New(aNumUids); |
|
677 |
if(!newFilter) |
|
678 |
{ |
|
679 |
NKern::ThreadLeaveCS(); |
|
680 |
return KErrNoMemory; |
|
681 |
} |
|
682 |
||
683 |
memcpy(&newFilter->iUids,aUids,aNumUids*sizeof(TUint32)); |
|
684 |
aNumUids = Sort(newFilter->iUids, aNumUids); |
|
685 |
newFilter->iNumUids = aNumUids; |
|
686 |
||
687 |
TInt irq = __SPIN_LOCK_IRQSAVE(BTraceFilter2Lock); |
|
688 |
DBTraceFilter2* oldFilter = BTraceData.iFilter2; |
|
689 |
BTraceData.iFilter2 = newFilter; |
|
690 |
__SPIN_UNLOCK_IRQRESTORE(BTraceFilter2Lock, irq); |
|
691 |
oldFilter->Close(); |
|
692 |
DBTraceFilter2::Cleanup(); |
|
693 |
NKern::ThreadLeaveCS(); |
|
694 |
return KErrNone; |
|
695 |
} |
|
696 |
||
697 |
||
698 |
EXPORT_C TInt BTrace::SetFilter2(TInt aGlobalFilter) |
|
699 |
{ |
|
700 |
CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"BTrace::Filter2"); |
|
701 |
NKern::ThreadEnterCS(); |
|
702 |
DBTraceFilter2* oldFilter; |
|
703 |
if((TUint)aGlobalFilter>1u) |
|
704 |
oldFilter = BTraceData.iFilter2; // just query existing value |
|
705 |
else |
|
706 |
{ |
|
707 |
// replace filter with 0 or 1... |
|
708 |
TInt irq = __SPIN_LOCK_IRQSAVE(BTraceFilter2Lock); |
|
709 |
oldFilter = BTraceData.iFilter2; |
|
710 |
BTraceData.iFilter2 = (DBTraceFilter2*)aGlobalFilter; |
|
711 |
__SPIN_UNLOCK_IRQRESTORE(BTraceFilter2Lock, irq); |
|
712 |
oldFilter->Close(); |
|
713 |
} |
|
714 |
DBTraceFilter2::Cleanup(); |
|
715 |
NKern::ThreadLeaveCS(); |
|
716 |
return (TUint)oldFilter>1u ? -1 : (TInt)oldFilter; |
|
717 |
} |
|
718 |
||
719 |
||
720 |
EXPORT_C TInt BTrace::Filter2(TUint32*& aUids, TInt& aGlobalFilter) |
|
721 |
{ |
|
722 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"BTrace::Filter2"); |
|
723 |
DBTraceFilter2* filter = DBTraceFilter2::Open(BTraceData.iFilter2); |
|
724 |
TInt r = 0; |
|
725 |
aUids = 0; |
|
726 |
aGlobalFilter = (TBool)filter; |
|
727 |
if((TUint)filter>1u) |
|
728 |
{ |
|
729 |
aGlobalFilter = -1; |
|
730 |
r = filter->iNumUids; |
|
731 |
TUint size = r*sizeof(TUint32); |
|
732 |
aUids = (TUint32*)Kern::Alloc(size); |
|
733 |
if(aUids) |
|
734 |
memcpy(aUids,filter->iUids,size); |
|
735 |
else |
|
736 |
r = KErrNoMemory; |
|
737 |
} |
|
738 |
filter->Close(); |
|
739 |
return r; |
|
740 |
} |
|
741 |
||
742 |
#ifndef __MARM__ |
|
743 |
TBool SBTraceData::CheckFilter2(TUint32 aUid) |
|
744 |
{ |
|
745 |
// quick check for global filter setting... |
|
746 |
TUint global = (TUint)iFilter2; |
|
747 |
if(global<2) |
|
748 |
return global; |
|
749 |
||
750 |
TBool enterCs = (NKern::CurrentContext()==NKern::EThread) && !NKern::KernelLocked(); |
|
751 |
if (enterCs) |
|
752 |
NKern::_ThreadEnterCS(); |
|
753 |
DBTraceFilter2* filter = DBTraceFilter2::Open(iFilter2); |
|
754 |
TBool value = (TLinAddr)filter<2u ? (TBool)filter : filter->Check(aUid); |
|
755 |
filter->Close(); |
|
756 |
if (enterCs) |
|
757 |
NKern::_ThreadLeaveCS(); |
|
758 |
return value; |
|
759 |
} |
|
760 |
#endif |
|
761 |
||
762 |
EXPORT_C TBool BTrace::CheckFilter2(TUint32 aCategory,TUint32 aUid) |
|
763 |
{ |
|
764 |
SBTraceData& traceData = BTraceData; |
|
765 |
if(!traceData.iFilter[aCategory&0xff]) |
|
766 |
return EFalse; |
|
767 |
return traceData.CheckFilter2(aUid); |
|
768 |
} |
|
769 |
||
770 |
||
771 |
EXPORT_C TBool BTrace::CheckFilter(TUint32 aCategory) |
|
772 |
{ |
|
773 |
return BTraceData.iFilter[aCategory&0xff]; |
|
774 |
} |
|
775 |
||
776 |
||
777 |
// |
|
778 |
// |
|
779 |
// |
|
780 |
||
781 |
TBool ExecHandler::BTraceOut(TUint32 aHeader, TUint32 a1, const BTrace::SExecExtension& aExt, TInt aDataSize) |
|
782 |
{ |
|
783 |
SBTraceData& traceData = BTraceData; |
|
784 |
if(!traceData.iFilter[(aHeader>>BTrace::ECategoryIndex*8)&0xff]) |
|
785 |
return EFalse; |
|
786 |
||
787 |
if(aHeader&(BTrace::EMissingRecord<<BTrace::EFlagsIndex*8)) |
|
788 |
{ |
|
789 |
// EMissingRecord flag is overloaded to mean that secondary filter should be checked |
|
790 |
aHeader &= ~(BTrace::EMissingRecord<<BTrace::EFlagsIndex*8); |
|
791 |
if(!traceData.CheckFilter2(a1)) |
|
792 |
return EFalse; |
|
793 |
} |
|
794 |
||
795 |
// only PC and Context flags allowed... |
|
796 |
if(aHeader&((0xff^BTrace::EContextIdPresent^BTrace::EPcPresent)<<BTrace::EFlagsIndex*8)) |
|
797 |
goto error; |
|
798 |
||
799 |
{ |
|
800 |
// get size of trace data excluding aDataSize |
|
801 |
TUint size = (aHeader>>BTrace::ESizeIndex*8)&0xff; |
|
802 |
if(aHeader&(BTrace::EPcPresent<<BTrace::EFlagsIndex*8)) |
|
803 |
size -= 4; |
|
804 |
TUint32 context = 0; |
|
805 |
if(aHeader&(BTrace::EContextIdPresent<<BTrace::EFlagsIndex*8)) |
|
806 |
{ |
|
807 |
size -= 4; |
|
808 |
context = (TUint32)NKern::CurrentThread(); |
|
809 |
} |
|
810 |
||
811 |
if(!aDataSize) |
|
812 |
{ |
|
813 |
if((size-4)>(16-4)) // size must be 4...16 |
|
814 |
goto error; |
|
815 |
__ACQUIRE_BTRACE_LOCK(); |
|
816 |
TBool r = traceData.iHandler(aHeader,0,context,a1,aExt.iA2,aExt.iA3,0,aExt.iPc); |
|
817 |
__RELEASE_BTRACE_LOCK(); |
|
818 |
return r; |
|
819 |
} |
|
820 |
||
821 |
if(size!=12) |
|
822 |
goto error; |
|
823 |
if(TUint(aDataSize)>KMaxBTraceDataArray) |
|
824 |
{ |
|
825 |
aDataSize = KMaxBTraceDataArray; |
|
826 |
aHeader |= BTrace::ERecordTruncated<<(BTrace::EFlagsIndex*8); |
|
827 |
} |
|
828 |
{ |
|
829 |
aHeader += aDataSize<<(BTrace::ESizeIndex*8); |
|
830 |
TUint32 data[KMaxBTraceDataArray/4]; |
|
831 |
kumemget32(data,(const TAny*)aExt.iA3,(aDataSize+3)&~3); |
|
832 |
TUint32 a3 = aDataSize<=4 ? data[0] : (TUint32)&data; |
|
833 |
__ACQUIRE_BTRACE_LOCK(); |
|
834 |
TBool r = traceData.iHandler(aHeader,0,context,a1,aExt.iA2,a3,0,aExt.iPc); |
|
835 |
__RELEASE_BTRACE_LOCK(); |
|
836 |
return r; |
|
837 |
} |
|
838 |
} |
|
839 |
error: |
|
840 |
return KErrArgument; |
|
841 |
} |
|
842 |
||
843 |
||
844 |
TBool ExecHandler::BTraceOutBig(TUint32 aHeader, TUint32 a1, const BTrace::SExecExtension& aExt, TInt aDataSize) |
|
845 |
{ |
|
846 |
SBTraceData& traceData = BTraceData; |
|
847 |
if(!traceData.iFilter[(aHeader>>BTrace::ECategoryIndex*8)&0xff]) |
|
848 |
return EFalse; |
|
849 |
||
850 |
if(aHeader&(BTrace::EMissingRecord<<BTrace::EFlagsIndex*8)) |
|
851 |
{ |
|
852 |
// EMissingRecord flag is overloaded to mean that secondary filter should be checked |
|
853 |
aHeader &= ~(BTrace::EMissingRecord<<BTrace::EFlagsIndex*8); |
|
854 |
if(!traceData.CheckFilter2(a1)) |
|
855 |
return EFalse; |
|
856 |
} |
|
857 |
||
858 |
// only PC and Context flags allowed... |
|
859 |
if(aHeader&((0xff^BTrace::EContextIdPresent^BTrace::EPcPresent)<<BTrace::EFlagsIndex*8)) |
|
860 |
goto error; |
|
861 |
||
862 |
{ |
|
863 |
// get size of trace data excluding aDataSize |
|
864 |
TUint size = (aHeader>>BTrace::ESizeIndex*8)&0xff; |
|
865 |
if(aHeader&(BTrace::EPcPresent<<BTrace::EFlagsIndex*8)) |
|
866 |
size -= 4; |
|
867 |
TUint32 context = 0; |
|
868 |
if(aHeader&(BTrace::EContextIdPresent<<BTrace::EFlagsIndex*8)) |
|
869 |
{ |
|
870 |
size -= 4; |
|
871 |
context = (TUint32)NKern::CurrentThread(); |
|
872 |
} |
|
873 |
TUint32 pc = aExt.iPc; |
|
874 |
||
875 |
if(size!=8) |
|
876 |
goto error; // size whould be 8 (for data in aHeader and a1) |
|
877 |
if(TUint(aDataSize)<KMaxBTraceDataArray+4) |
|
878 |
goto error; // trace too small for a big trace |
|
879 |
||
880 |
// adjust for header2, extra, and size word... |
|
881 |
aHeader |= BTrace::EHeader2Present<<(BTrace::EFlagsIndex*8)|BTrace::EExtraPresent<<(BTrace::EFlagsIndex*8); |
|
882 |
aHeader += 12; |
|
883 |
||
884 |
TUint8* userData = (TUint8*)aExt.iA3; |
|
885 |
TUint32 data[KMaxBTraceDataArray/4]; |
|
886 |
||
887 |
TUint32 traceId = __e32_atomic_add_ord32(&BTrace::BigTraceId, 1); |
|
888 |
TUint32 header2 = BTrace::EMultipartFirst; |
|
889 |
TInt offset = 0; |
|
890 |
do |
|
891 |
{ |
|
892 |
TUint32 size = aDataSize-offset; |
|
893 |
if(size>KMaxBTraceDataArray) |
|
894 |
size = KMaxBTraceDataArray; |
|
895 |
else |
|
896 |
header2 = BTrace::EMultipartLast; |
|
897 |
||
898 |
kumemget32(data,userData,(size+3)&~3); |
|
899 |
TUint32 dataPtr = (TUint32)&data; |
|
900 |
if(size<=4) |
|
901 |
dataPtr = data[0]; // 4 bytes or less are passed by value, not pointer |
|
902 |
||
903 |
__ACQUIRE_BTRACE_LOCK(); |
|
904 |
TBool result = traceData.iHandler(aHeader+size,header2,context,aDataSize,a1,dataPtr,traceId,pc); |
|
905 |
__RELEASE_BTRACE_LOCK(); |
|
906 |
if(!result) |
|
907 |
return result; |
|
908 |
||
909 |
offset += size; |
|
910 |
userData += size; |
|
911 |
||
912 |
header2 = BTrace::EMultipartMiddle; |
|
913 |
a1 = offset; |
|
914 |
} |
|
915 |
while(offset<aDataSize); |
|
916 |
||
917 |
return ETrue; |
|
918 |
} |
|
919 |
error: |
|
920 |
return KErrArgument; |
|
921 |
} |
|
922 |
||
923 |
||
924 |
TBool ExecHandler::UTraceOut(TUint32 aHeader, TUint32 a1, const BTrace::SExecExtension& aExt, TInt aDataSize) |
|
925 |
{ |
|
926 |
SBTraceData& traceData = BTraceData; |
|
927 |
if(!traceData.iFilter[(aHeader>>BTrace::ECategoryIndex*8)&0xff]) |
|
928 |
return EFalse; |
|
929 |
||
930 |
if(aHeader&(BTrace::EMissingRecord<<BTrace::EFlagsIndex*8)) |
|
931 |
{ |
|
932 |
// EMissingRecord flag is overloaded to mean that secondary filter should be checked |
|
933 |
aHeader &= ~(BTrace::EMissingRecord<<BTrace::EFlagsIndex*8); |
|
934 |
if(!traceData.CheckFilter2(a1)) |
|
935 |
return EFalse; |
|
936 |
} |
|
937 |
||
938 |
// only PC and Context flags allowed... |
|
939 |
if(aHeader&((0xff^BTrace::EContextIdPresent^BTrace::EPcPresent)<<BTrace::EFlagsIndex*8)) |
|
940 |
return KErrArgument; |
|
941 |
||
942 |
// get size of trace data excluding aDataSize |
|
943 |
TUint size = (aHeader>>BTrace::ESizeIndex*8)&0xff; |
|
944 |
if(aHeader&(BTrace::EPcPresent<<BTrace::EFlagsIndex*8)) |
|
945 |
size -= 4; |
|
946 |
TUint32 context = 0; |
|
947 |
if(aHeader&(BTrace::EContextIdPresent<<BTrace::EFlagsIndex*8)) |
|
948 |
{ |
|
949 |
size -= 4; |
|
950 |
context = (TUint32)NKern::CurrentThread(); |
|
951 |
} |
|
952 |
||
953 |
if(size!=8) |
|
954 |
return KErrArgument; // size whould be 8 (for data in aHeader and a1) |
|
955 |
if(TUint(aDataSize)<KMaxBTraceDataArray) |
|
956 |
return KErrArgument; // trace too small for a big trace |
|
957 |
||
958 |
// adjust for header2, extra, and size word... |
|
959 |
aHeader |= BTrace::EHeader2Present<<(BTrace::EFlagsIndex*8)|BTrace::EExtraPresent<<(BTrace::EFlagsIndex*8); |
|
960 |
aHeader += 12; |
|
961 |
||
962 |
// send the first trace including the formatId |
|
963 |
TUint8* userData = (TUint8*)aExt.iA3; |
|
964 |
TUint32 data[KMaxBTraceDataArray/4]; |
|
965 |
data[0] = aExt.iA2; // add the formatId for the first trace |
|
966 |
TUint32 traceId = NKern::LockedInc((TInt&)BTrace::BigTraceId); |
|
967 |
TUint32 header2 = BTrace::EMultipartFirst; |
|
968 |
TInt additionalIdentifiers = 4; |
|
969 |
TInt identifierOffset = additionalIdentifiers; // bytes |
|
970 |
TBool result = ETrue; |
|
971 |
TInt offset = 0; // offset into the payload |
|
972 |
||
973 |
do |
|
974 |
{ |
|
975 |
TUint32 dataSize = aDataSize - offset; |
|
976 |
if(dataSize > (KMaxBTraceDataArray - identifierOffset)) |
|
977 |
dataSize = KMaxBTraceDataArray - identifierOffset; |
|
978 |
else |
|
979 |
header2 = BTrace::EMultipartLast; |
|
980 |
||
981 |
kumemget32(data+identifierOffset/4,userData,(dataSize+3)&~3); //add the rest of the payload, 4 byte aligned |
|
982 |
||
983 |
TUint32 dataPtr = (TUint32)&data; |
|
984 |
if(dataSize<=4) |
|
985 |
dataPtr = data[0]; // 4 bytes or less are passed by value, not pointer |
|
986 |
||
987 |
__ACQUIRE_BTRACE_LOCK(); |
|
988 |
result = traceData.iHandler(aHeader+dataSize,header2,context,aDataSize,a1,dataPtr,traceId,aExt.iPc); |
|
989 |
__RELEASE_BTRACE_LOCK(); |
|
990 |
if(!result) |
|
991 |
return result; |
|
992 |
||
993 |
offset += dataSize - identifierOffset; |
|
994 |
userData += dataSize - identifierOffset; |
|
995 |
a1 = offset; |
|
996 |
header2 = BTrace::EMultipartMiddle; |
|
997 |
identifierOffset = 0; // we are only adding identifiers into the first trace |
|
998 |
} |
|
999 |
while(offset<aDataSize); |
|
1000 |
||
1001 |
return result;//ETrue |
|
1002 |
} |
|
1003 |