|
1 // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\drivers\trace\btrace.cpp |
|
15 // |
|
16 // |
|
17 |
|
18 #include <kernel/kern_priv.h> |
|
19 #include "platform.h" |
|
20 #include "drivers/btrace.h" |
|
21 |
|
22 #if defined(__EPOC32__) && defined(__CPU_X86) |
|
23 #include <x86.h> |
|
24 #endif |
|
25 |
|
26 TBTraceBufferK Buffer; |
|
27 |
|
28 TBool ChannelOpen = EFalse; |
|
29 |
|
30 const TUint KCopyBufferMaxSize = 0x10000; |
|
31 |
|
32 TInt TBTraceBufferK::Create(TInt aSize) |
|
33 { |
|
34 if(aSize<=0) |
|
35 return KErrArgument; |
|
36 TUint pageSize = Kern::RoundToPageSize(1); |
|
37 aSize = (aSize+pageSize-1)&-(TInt)pageSize; |
|
38 |
|
39 TUint recordOffsets = aSize+pageSize; |
|
40 TUint recordOffsetsSize = Kern::RoundToPageSize(aSize>>2); |
|
41 TUint copyBuffer = recordOffsets+recordOffsetsSize+pageSize; |
|
42 TUint copyBufferSize = Kern::RoundToPageSize(aSize>>2); |
|
43 if(copyBufferSize>KCopyBufferMaxSize) |
|
44 copyBufferSize = KCopyBufferMaxSize; |
|
45 TUint chunkSize = copyBuffer+copyBufferSize+pageSize; |
|
46 |
|
47 // Create chunk... |
|
48 TChunkCreateInfo info; |
|
49 info.iType = TChunkCreateInfo::ESharedKernelSingle; |
|
50 info.iMaxSize = chunkSize; |
|
51 #ifdef __EPOC32__ |
|
52 // we want full caching, no execute, default sharing |
|
53 new (&info.iMapAttr) TMappingAttributes2(EMemAttNormalCached, EFalse, ETrue); |
|
54 #endif |
|
55 info.iOwnsMemory = ETrue; // Use memory from system's free pool |
|
56 info.iDestroyedDfc = NULL; |
|
57 TUint32 mapAttr; |
|
58 TInt r = Kern::ChunkCreate(info, iBufferChunk, iAddress, mapAttr); |
|
59 if(r==KErrNone) |
|
60 r = Kern::ChunkCommit(iBufferChunk, 0, aSize); |
|
61 if(r==KErrNone) |
|
62 r = Kern::ChunkCommit(iBufferChunk, recordOffsets, recordOffsetsSize); |
|
63 if(r==KErrNone) |
|
64 r = Kern::ChunkCommit(iBufferChunk, copyBuffer, copyBufferSize); |
|
65 |
|
66 // Check errors... |
|
67 if(r!=KErrNone) |
|
68 { |
|
69 Close(); |
|
70 return r; |
|
71 } |
|
72 |
|
73 // Initialise state... |
|
74 iStart = sizeof(TBTraceBuffer); |
|
75 iEnd = aSize; |
|
76 iRecordOffsets = (TUint8*)(iAddress+recordOffsets); |
|
77 |
|
78 TBTraceBuffer* userBuffer = (TBTraceBuffer*)iAddress; |
|
79 userBuffer->iRecordOffsets = recordOffsets; |
|
80 userBuffer->iCopyBuffer = copyBuffer; |
|
81 userBuffer->iCopyBufferSize = copyBufferSize; |
|
82 |
|
83 Reset(0); |
|
84 |
|
85 #ifndef __SMP__ |
|
86 TInt irq = NKern::DisableAllInterrupts(); |
|
87 #endif |
|
88 iTimestamp2Enabled = EFalse; |
|
89 BTrace::SetHandlers(TBTraceBufferK::Trace,TBTraceBufferK::ControlFunction,iOldBTraceHandler,iOldBTraceControl); |
|
90 #ifndef __SMP__ |
|
91 NKern::RestoreInterrupts(irq); |
|
92 #endif |
|
93 |
|
94 return KErrNone; |
|
95 } |
|
96 |
|
97 |
|
98 void TBTraceBufferK::Close() |
|
99 { |
|
100 #ifdef __SMP__ |
|
101 if(iOldBTraceHandler) |
|
102 { |
|
103 BTrace::THandler handler; |
|
104 BTrace::TControlFunction control; |
|
105 BTrace::SetHandlers(iOldBTraceHandler,iOldBTraceControl,handler,control); |
|
106 iOldBTraceHandler = NULL; |
|
107 iOldBTraceControl = NULL; |
|
108 } |
|
109 TSpinLock* sl = BTrace::LockPtr(); |
|
110 TInt irq = sl->LockIrqSave(); // guarantees handler can't run at the same time |
|
111 DChunk* chunk = iBufferChunk; |
|
112 iBufferChunk = NULL; |
|
113 iAddress = NULL; |
|
114 sl->UnlockIrqRestore(irq); |
|
115 #else |
|
116 TInt irq = NKern::DisableAllInterrupts(); |
|
117 if(iOldBTraceHandler) |
|
118 { |
|
119 BTrace::THandler handler; |
|
120 BTrace::TControlFunction control; |
|
121 BTrace::SetHandlers(iOldBTraceHandler,iOldBTraceControl,handler,control); |
|
122 iOldBTraceHandler = NULL; |
|
123 iOldBTraceControl = NULL; |
|
124 } |
|
125 DChunk* chunk = iBufferChunk; |
|
126 iBufferChunk = NULL; |
|
127 iAddress = NULL; |
|
128 NKern::RestoreInterrupts(irq); |
|
129 #endif |
|
130 if(chunk) |
|
131 Kern::ChunkClose(chunk); |
|
132 } |
|
133 |
|
134 |
|
135 |
|
136 |
|
137 /** |
|
138 Helper functions for encoding pseudo- floating point values recoverable by: |
|
139 |
|
140 int exponent = (signed char)(encoded_val >> 24); |
|
141 int mantissa = encoded_val & 0xFFFFFF; |
|
142 double val = mantissa * pow(2, exponent); |
|
143 */ |
|
144 TUint EncodeFloatesque(TUint64 val64, TInt exponent) |
|
145 { |
|
146 // Lose precision until it fits in 24 bits |
|
147 TInt round_up = 0; |
|
148 while (val64>=0x1000000) |
|
149 { |
|
150 round_up = (TInt)(val64&1); |
|
151 val64 >>= 1; |
|
152 exponent++; |
|
153 } |
|
154 if (round_up) |
|
155 { |
|
156 val64++; |
|
157 if (val64>=0x1000000) |
|
158 { |
|
159 val64 >>= 1; |
|
160 exponent++; |
|
161 } |
|
162 } |
|
163 |
|
164 // Return 8-bit exponent and 24-bit mantissa |
|
165 return (TUint)(val64 | (((unsigned char)exponent)<<24)); |
|
166 } |
|
167 |
|
168 TUint EncodeReciprocal(TUint val) |
|
169 { |
|
170 if (val==0) return val; |
|
171 |
|
172 // Get reciprocal * 2^64 |
|
173 TUint64 val64 = val; |
|
174 TUint64 div = 0; |
|
175 div--; |
|
176 val64 = div / val64; |
|
177 |
|
178 return EncodeFloatesque(val64, -64); |
|
179 } |
|
180 |
|
181 TUint EncodePostDiv(TUint val, TUint divisor) |
|
182 { |
|
183 TUint64 val64 = val; |
|
184 val64 <<= 32; |
|
185 val64 = val64 / divisor; |
|
186 return EncodeFloatesque(val64, -32); |
|
187 } |
|
188 |
|
189 void BTracePrimeMetatrace() |
|
190 { |
|
191 #ifdef __SMP__ |
|
192 TUint period1 = EncodeReciprocal(NKern::TimestampFrequency()); |
|
193 TUint period2 = period1 + (32u<<24); // timestamp2 period is 2^32 * timestamp1 period |
|
194 BTrace12(BTrace::EMetaTrace, BTrace::EMetaTraceTimestampsInfo, period1, period2, 1); |
|
195 #else |
|
196 TUint period1 = EncodeReciprocal(NKern::FastCounterFrequency()); |
|
197 TUint period2 = EncodePostDiv(NKern::TickPeriod(), 1000000); |
|
198 BTrace12(BTrace::EMetaTrace, BTrace::EMetaTraceTimestampsInfo, period1, period2, 0); |
|
199 #endif |
|
200 } |
|
201 |
|
202 void TBTraceBufferK::Reset(TUint aMode) |
|
203 { |
|
204 #ifdef __SMP__ |
|
205 TSpinLock* sl = BTrace::LockPtr(); |
|
206 #endif |
|
207 TInt irq = __SPIN_LOCK_IRQSAVE(*sl); // guarantees handler can't run at the same time |
|
208 iHead = iStart; |
|
209 TBTraceBuffer* userBuffer = (TBTraceBuffer*)iAddress; |
|
210 userBuffer->iStart = iStart; |
|
211 userBuffer->iEnd = iEnd; |
|
212 userBuffer->iHead = iHead; |
|
213 userBuffer->iTail = iHead; |
|
214 userBuffer->iGeneration = 0; |
|
215 userBuffer->iMode = aMode; |
|
216 __SPIN_UNLOCK_IRQRESTORE(*sl,irq); |
|
217 if(aMode) |
|
218 { |
|
219 if (BTrace::CheckFilter(BTrace::EMetaTrace)) |
|
220 BTracePrimeMetatrace(); |
|
221 BTrace::Prime(); |
|
222 } |
|
223 } |
|
224 |
|
225 |
|
226 TInt TBTraceBufferK::RequestData(TInt aSize, TDfc* aDfc) |
|
227 { |
|
228 if(aSize<=0) |
|
229 aSize = 1; |
|
230 #ifdef __SMP__ |
|
231 TSpinLock* sl = BTrace::LockPtr(); |
|
232 #endif |
|
233 TInt irq = __SPIN_LOCK_IRQSAVE(*sl); // guarantees handler can't run |
|
234 TBTraceBuffer* userBuffer = (TBTraceBuffer*)iAddress; |
|
235 if(!userBuffer) |
|
236 { |
|
237 __SPIN_UNLOCK_IRQRESTORE(*sl,irq); |
|
238 return KErrNotReady; |
|
239 } |
|
240 TInt dif = userBuffer->iTail-iHead; |
|
241 if(dif>0) |
|
242 aSize = 0; // we need no more bytes because all bytes to end of buffer are available |
|
243 else |
|
244 aSize += dif; // number of bytes extra we need |
|
245 if(aSize>0) |
|
246 { |
|
247 iRequestDataSize = aSize; |
|
248 iWaitingDfc = aDfc; |
|
249 } |
|
250 __SPIN_UNLOCK_IRQRESTORE(*sl,irq); |
|
251 if(aSize<=0) |
|
252 return KErrCompletion; |
|
253 return KErrNone; |
|
254 } |
|
255 |
|
256 |
|
257 #ifndef BTRACE_DRIVER_MACHINE_CODED |
|
258 |
|
259 TBool TBTraceBufferK::Trace_Impl(TUint32 aHeader,TUint32 aHeader2,const TUint32 aContext,const TUint32 a1,const TUint32 a2,const TUint32 a3,const TUint32 aExtra, const TUint32 aPc, TBool aIncTimestamp2) |
|
260 { |
|
261 #ifndef __SMP__ |
|
262 TInt irq = NKern::DisableAllInterrupts(); |
|
263 #endif |
|
264 |
|
265 |
|
266 #ifdef __SMP__ |
|
267 // Header 2 always present and contains CPU number |
|
268 // If Header2 not originally there, add 4 to size |
|
269 if (!(aHeader&(BTrace::EHeader2Present<<BTrace::EFlagsIndex*8))) |
|
270 aHeader += (4<<BTrace::ESizeIndex*8) + (BTrace::EHeader2Present<<BTrace::EFlagsIndex*8), aHeader2=0; |
|
271 aHeader2 = (aHeader2 &~ BTrace::ECpuIdMask) | (NKern::CurrentCpu()<<20); |
|
272 #endif |
|
273 #ifdef BTRACE_INCLUDE_TIMESTAMPS |
|
274 // Add timestamp to trace... |
|
275 #if defined(__SMP__) |
|
276 aHeader += 8<<BTrace::ESizeIndex*8; |
|
277 aHeader |= BTrace::ETimestampPresent<<BTrace::EFlagsIndex*8 | BTrace::ETimestamp2Present<<BTrace::EFlagsIndex*8; |
|
278 TUint64 timeStamp = NKern::Timestamp(); |
|
279 #elif defined(__EPOC32__) && defined(__CPU_X86) |
|
280 aHeader += 8<<BTrace::ESizeIndex*8; |
|
281 aHeader |= BTrace::ETimestampPresent<<BTrace::EFlagsIndex*8 | BTrace::ETimestamp2Present<<BTrace::EFlagsIndex*8; |
|
282 TUint64 timeStamp = X86::Timestamp(); |
|
283 #else |
|
284 TUint32 timeStamp = NKern::FastCounter(); |
|
285 TUint32 timeStamp2 = 0; |
|
286 if (aIncTimestamp2) |
|
287 { |
|
288 timeStamp2 = NKern::TickCount(); |
|
289 aHeader += 8<<BTrace::ESizeIndex*8; |
|
290 aHeader |= (BTrace::ETimestampPresent | BTrace::ETimestamp2Present) << BTrace::EFlagsIndex*8; |
|
291 } |
|
292 else |
|
293 { |
|
294 aHeader += 4<<BTrace::ESizeIndex*8; |
|
295 aHeader |= BTrace::ETimestampPresent<<BTrace::EFlagsIndex*8; |
|
296 } |
|
297 #endif |
|
298 #endif |
|
299 TUint size = (aHeader+3)&0xfc; |
|
300 |
|
301 TBTraceBufferK& buffer = Buffer; |
|
302 TLinAddr address = buffer.iAddress; |
|
303 TBTraceBuffer& user_buffer = *(TBTraceBuffer*)address; |
|
304 ++user_buffer.iGeneration; // atomic not required since only driver modifies iGeneration |
|
305 #ifdef __SMP__ |
|
306 __e32_memory_barrier(); |
|
307 #endif |
|
308 TUint start = buffer.iStart; |
|
309 TUint end = buffer.iEnd; |
|
310 TUint orig_head = buffer.iHead; |
|
311 TInt requestDataSize = buffer.iRequestDataSize; |
|
312 TUint8* recordOffsets = buffer.iRecordOffsets; |
|
313 TUint32 orig_tail = user_buffer.iTail; |
|
314 TUint32 newHead, head, tail; |
|
315 |
|
316 if(!(user_buffer.iMode&RBTrace::EEnable)) |
|
317 goto trace_off; |
|
318 |
|
319 retry: |
|
320 head = orig_head; |
|
321 tail = orig_tail &~ 1; |
|
322 newHead = head+size; |
|
323 if(newHead>end) |
|
324 { |
|
325 requestDataSize = 0; |
|
326 newHead = start+size; |
|
327 if(head<tail || tail<newHead+1) |
|
328 { |
|
329 if(!(user_buffer.iMode&RBTrace::EFreeRunning)) |
|
330 goto trace_dropped; |
|
331 user_buffer.iWrap = head; |
|
332 head = start; |
|
333 tail = newHead+(recordOffsets[newHead>>2]<<2); |
|
334 goto overwrite; |
|
335 } |
|
336 user_buffer.iWrap = head; |
|
337 head = start; |
|
338 } |
|
339 else if(head<tail && tail<=newHead) |
|
340 { |
|
341 { |
|
342 requestDataSize = 0; |
|
343 TUint wrap = user_buffer.iWrap; |
|
344 if(!(user_buffer.iMode&RBTrace::EFreeRunning)) |
|
345 goto trace_dropped; |
|
346 if(newHead<end && newHead<wrap) |
|
347 { |
|
348 tail = newHead+(recordOffsets[newHead>>2]<<2); |
|
349 if(tail>=end || tail>=wrap) |
|
350 tail = start; |
|
351 } |
|
352 else |
|
353 tail = start; |
|
354 } |
|
355 overwrite: |
|
356 *(TUint32*)(address+tail) |= BTrace::EMissingRecord<<(BTrace::EFlagsIndex*8); |
|
357 if (!__e32_atomic_cas_ord32(&user_buffer.iTail, &orig_tail, tail|1)) |
|
358 goto retry; // go round again if user side has already updated the tail pointer |
|
359 } |
|
360 |
|
361 buffer.iRequestDataSize = requestDataSize-size; |
|
362 |
|
363 { |
|
364 recordOffsets += head>>2; |
|
365 TUint32* src; |
|
366 TUint32* dst = (TUint32*)((TUint)address+head); |
|
367 size >>= 2; // we are now counting words, not bytes |
|
368 |
|
369 // store first word of trace... |
|
370 TUint w = aHeader; |
|
371 if(buffer.iDropped) |
|
372 { |
|
373 buffer.iDropped = 0; |
|
374 w |= BTrace::EMissingRecord<<(BTrace::EFlagsIndex*8); |
|
375 } |
|
376 *recordOffsets++ = (TUint8)size; |
|
377 --size; |
|
378 *dst++ = w; |
|
379 |
|
380 #ifndef __SMP__ |
|
381 if(aHeader&(BTrace::EHeader2Present<<(BTrace::EFlagsIndex*8))) |
|
382 #endif |
|
383 { |
|
384 w = aHeader2; |
|
385 *recordOffsets++ = (TUint8)size; |
|
386 --size; |
|
387 *dst++ = w; |
|
388 } |
|
389 |
|
390 #ifdef BTRACE_INCLUDE_TIMESTAMPS |
|
391 // store timestamp... |
|
392 #if defined(__SMP__) || (defined(__EPOC32__) && defined(__CPU_X86)) |
|
393 *recordOffsets++ = (TUint8)size; |
|
394 --size; |
|
395 *dst++ = TUint32(timeStamp); |
|
396 *recordOffsets++ = (TUint8)size; |
|
397 --size; |
|
398 *dst++ = TUint32(timeStamp>>32); |
|
399 #else |
|
400 *recordOffsets++ = (TUint8)size; |
|
401 --size; |
|
402 *dst++ = timeStamp; |
|
403 if (aIncTimestamp2) |
|
404 { |
|
405 *recordOffsets++ = (TUint8)size; |
|
406 --size; |
|
407 *dst++ = timeStamp2; |
|
408 } |
|
409 #endif |
|
410 #endif |
|
411 |
|
412 if(aHeader&(BTrace::EContextIdPresent<<(BTrace::EFlagsIndex*8))) |
|
413 { |
|
414 w = aContext; |
|
415 *recordOffsets++ = (TUint8)size; |
|
416 --size; |
|
417 *dst++ = w; |
|
418 } |
|
419 |
|
420 if(aHeader&(BTrace::EPcPresent<<(BTrace::EFlagsIndex*8))) |
|
421 { |
|
422 w = aPc; |
|
423 *recordOffsets++ = (TUint8)size; |
|
424 --size; |
|
425 *dst++ = w; |
|
426 } |
|
427 |
|
428 if(aHeader&(BTrace::EExtraPresent<<(BTrace::EFlagsIndex*8))) |
|
429 { |
|
430 w = aExtra; |
|
431 *recordOffsets++ = (TUint8)size; |
|
432 --size; |
|
433 *dst++ = w; |
|
434 } |
|
435 |
|
436 // store remainding words of trace... |
|
437 if(size) |
|
438 { |
|
439 w = a1; |
|
440 *recordOffsets++ = (TUint8)size; |
|
441 --size; |
|
442 *dst++ = w; |
|
443 if(size) |
|
444 { |
|
445 w = a2; |
|
446 *recordOffsets++ = (TUint8)size; |
|
447 --size; |
|
448 *dst++ = w; |
|
449 if(size) |
|
450 { |
|
451 if(size==1) |
|
452 { |
|
453 w = a3; |
|
454 *recordOffsets++ = (TUint8)size; |
|
455 *dst++ = w; |
|
456 } |
|
457 else |
|
458 { |
|
459 src = (TUint32*)a3; |
|
460 do |
|
461 { |
|
462 w = *src++; |
|
463 *recordOffsets++ = (TUint8)size; |
|
464 --size; |
|
465 *dst++ = w; |
|
466 } |
|
467 while(size); |
|
468 } |
|
469 } |
|
470 } |
|
471 } |
|
472 } |
|
473 buffer.iHead = newHead; |
|
474 #ifdef __SMP__ |
|
475 __e32_memory_barrier(); // make sure written data is observed before head pointer update |
|
476 #endif |
|
477 user_buffer.iHead = newHead; |
|
478 |
|
479 { |
|
480 TDfc* dfc = (TDfc*)buffer.iWaitingDfc; |
|
481 if(dfc && buffer.iRequestDataSize<=0) |
|
482 { |
|
483 buffer.iWaitingDfc = NULL; |
|
484 dfc->RawAdd(); |
|
485 } |
|
486 } |
|
487 |
|
488 #ifdef __SMP__ |
|
489 __e32_memory_barrier(); |
|
490 #endif |
|
491 ++user_buffer.iGeneration; // atomic not required since only driver modifies iGeneration |
|
492 #ifndef __SMP__ |
|
493 NKern::RestoreInterrupts(irq); |
|
494 #endif |
|
495 return ETrue; |
|
496 |
|
497 |
|
498 trace_dropped: |
|
499 buffer.iRequestDataSize = 0; |
|
500 buffer.iDropped = ETrue; |
|
501 #ifdef __SMP__ |
|
502 __e32_memory_barrier(); |
|
503 #endif |
|
504 ++user_buffer.iGeneration; // atomic not required since only driver modifies iGeneration |
|
505 #ifndef __SMP__ |
|
506 NKern::RestoreInterrupts(irq); |
|
507 #endif |
|
508 return ETrue; |
|
509 |
|
510 trace_off: |
|
511 #ifdef __SMP__ |
|
512 __e32_memory_barrier(); |
|
513 #endif |
|
514 ++user_buffer.iGeneration; // atomic not required since only driver modifies iGeneration |
|
515 #ifndef __SMP__ |
|
516 NKern::RestoreInterrupts(irq); |
|
517 #endif |
|
518 return EFalse; |
|
519 } |
|
520 |
|
521 TBool TBTraceBufferK::TraceWithTimestamp2(TUint32 aHeader,TUint32 aHeader2,const TUint32 aContext,const TUint32 a1,const TUint32 a2,const TUint32 a3,const TUint32 aExtra,const TUint32 aPc) |
|
522 { |
|
523 return Trace_Impl(aHeader, aHeader2, aContext, a1, a2, a3, aExtra, aPc, ETrue); |
|
524 } |
|
525 |
|
526 TBool TBTraceBufferK::Trace(TUint32 aHeader,TUint32 aHeader2,const TUint32 aContext,const TUint32 a1,const TUint32 a2,const TUint32 a3,const TUint32 aExtra,const TUint32 aPc) |
|
527 { |
|
528 return Trace_Impl(aHeader, aHeader2, aContext, a1, a2, a3, aExtra, aPc, EFalse); |
|
529 } |
|
530 |
|
531 #endif // BTRACE_DRIVER_MACHINE_CODED |
|
532 |
|
533 |
|
534 TInt TBTraceBufferK::ControlFunction(BTrace::TControl aFunction, TAny* aArg1, TAny* aArg2) |
|
535 { |
|
536 switch(aFunction) |
|
537 { |
|
538 case BTrace::ECtrlSystemCrashed: |
|
539 if(Buffer.iAddress) |
|
540 ((TBTraceBuffer*)Buffer.iAddress)->iMode = 0; // turn off trace |
|
541 return KErrNone; |
|
542 |
|
543 case BTrace::ECtrlCrashReadFirst: |
|
544 Buffer.iCrashReadPart = 0; |
|
545 // fall through... |
|
546 case BTrace::ECtrlCrashReadNext: |
|
547 Buffer.CrashRead(*(TUint8**)aArg1,*(TUint*)aArg2); |
|
548 ++Buffer.iCrashReadPart; |
|
549 return KErrNone; |
|
550 |
|
551 default: |
|
552 return KErrNotSupported; |
|
553 } |
|
554 } |
|
555 |
|
556 |
|
557 void TBTraceBufferK::CrashRead(TUint8*& aData, TUint& aSize) |
|
558 { |
|
559 // start by assuming no data... |
|
560 aData = 0; |
|
561 aSize = 0; |
|
562 |
|
563 TBTraceBuffer* userBuffer = (TBTraceBuffer*)iAddress; |
|
564 if(!userBuffer) |
|
565 return; // no trace buffer, so end... |
|
566 |
|
567 TUint head = iHead; |
|
568 TUint tail = userBuffer->iTail; |
|
569 TUint8* data = (TUint8*)userBuffer; |
|
570 |
|
571 if(head>tail) |
|
572 { |
|
573 // data is in one part... |
|
574 if(iCrashReadPart==0) |
|
575 { |
|
576 aData = data+tail; |
|
577 aSize = head-tail; |
|
578 } |
|
579 // else no more parts |
|
580 } |
|
581 else if(head<tail) |
|
582 { |
|
583 // data is in two parts... |
|
584 if(iCrashReadPart==0) |
|
585 { |
|
586 // first part... |
|
587 aData = data+tail; |
|
588 aSize = userBuffer->iWrap-tail; |
|
589 } |
|
590 else if(iCrashReadPart==1) |
|
591 { |
|
592 // second part... |
|
593 aData = data+iStart; |
|
594 aSize = head-iStart; |
|
595 } |
|
596 // else no more parts |
|
597 } |
|
598 } |
|
599 |
|
600 |
|
601 // |
|
602 // LDD |
|
603 // |
|
604 |
|
605 class DBTraceFactory : public DLogicalDevice |
|
606 { |
|
607 public: |
|
608 virtual TInt Install(); |
|
609 virtual void GetCaps(TDes8& aDes) const; |
|
610 virtual TInt Create(DLogicalChannelBase*& aChannel); |
|
611 }; |
|
612 |
|
613 |
|
614 class DBTraceChannel : public DLogicalChannelBase |
|
615 { |
|
616 public: |
|
617 DBTraceChannel(); |
|
618 virtual ~DBTraceChannel(); |
|
619 // Inherited from DObject |
|
620 virtual TInt RequestUserHandle(DThread* aThread, TOwnerType aType); |
|
621 // Inherited from DLogicalChannelBase |
|
622 virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer); |
|
623 virtual TInt Request(TInt aReqNo, TAny* a1, TAny* a2); |
|
624 // |
|
625 static void WaitCallback(TAny* aSelf); |
|
626 private: |
|
627 DThread* iClient; |
|
628 TClientRequest* iWaitRequest; |
|
629 TDfc iWaitDfc; |
|
630 TBool iOpened; |
|
631 TInt iFilter2Count; |
|
632 TUint32* iFilter2; |
|
633 TUint32* iFilter2Set; |
|
634 TBool iTimestamp2Enabled; |
|
635 }; |
|
636 |
|
637 |
|
638 // |
|
639 // DBTraceFactory |
|
640 // |
|
641 |
|
642 TInt DBTraceFactory::Install() |
|
643 { |
|
644 return SetName(&RBTrace::Name()); |
|
645 } |
|
646 |
|
647 void DBTraceFactory::GetCaps(TDes8& aDes) const |
|
648 { |
|
649 Kern::InfoCopy(aDes,0,0); |
|
650 } |
|
651 |
|
652 TInt DBTraceFactory::Create(DLogicalChannelBase*& aChannel) |
|
653 { |
|
654 aChannel=new DBTraceChannel(); |
|
655 if(!aChannel) |
|
656 return KErrNoMemory; |
|
657 return KErrNone; |
|
658 } |
|
659 |
|
660 void syncDfcFn(TAny* aPtr) |
|
661 { |
|
662 NKern::FSSignal((NFastSemaphore*)aPtr); |
|
663 } |
|
664 |
|
665 void Sync(TDfcQue* aDfcQ) |
|
666 { |
|
667 NFastSemaphore s(0); |
|
668 TDfc dfc(&syncDfcFn, &s, aDfcQ, 0); |
|
669 dfc.Enque(); |
|
670 NKern::FSWait(&s); |
|
671 } |
|
672 |
|
673 // |
|
674 // DBTraceChannel |
|
675 // |
|
676 |
|
677 DBTraceChannel::DBTraceChannel() |
|
678 : iWaitDfc(WaitCallback,this,Kern::DfcQue1(),7) |
|
679 { |
|
680 } |
|
681 |
|
682 DBTraceChannel::~DBTraceChannel() |
|
683 { |
|
684 delete iFilter2Set; |
|
685 Buffer.iWaitingDfc = NULL; |
|
686 iWaitDfc.Cancel(); |
|
687 Sync(Kern::DfcQue1()); |
|
688 if (iWaitRequest) |
|
689 { |
|
690 Kern::QueueRequestComplete(iClient, iWaitRequest, KErrCancel); // does nothing if request not pending |
|
691 Kern::DestroyClientRequest(iWaitRequest); |
|
692 } |
|
693 if (iOpened) |
|
694 __e32_atomic_swp_ord32(&ChannelOpen, 0); |
|
695 } |
|
696 |
|
697 TInt DBTraceChannel::DoCreate(TInt /*aUnit*/, const TDesC8* /*aInfo*/, const TVersion& /*aVer*/) |
|
698 { |
|
699 // _LIT_SECURITY_POLICY_C2(KSecurityPolicy,ECapabilityReadDeviceData,ECapabilityWriteDeviceData); |
|
700 // if(!KSecurityPolicy().CheckPolicy(&Kern::CurrentThread(),__PLATSEC_DIAGNOSTIC_STRING("Checked by BTRACE"))) |
|
701 // return KErrPermissionDenied; |
|
702 iClient = &Kern::CurrentThread(); |
|
703 TInt r = Kern::CreateClientRequest(iWaitRequest); |
|
704 if (r!=KErrNone) |
|
705 return r; |
|
706 if (__e32_atomic_swp_ord32(&ChannelOpen, 1)) |
|
707 return KErrInUse; |
|
708 iOpened = ETrue; |
|
709 return KErrNone; |
|
710 } |
|
711 |
|
712 |
|
713 TInt DBTraceChannel::RequestUserHandle(DThread* aThread, TOwnerType aType) |
|
714 { |
|
715 if (aType!=EOwnerThread || aThread!=iClient) |
|
716 return KErrAccessDenied; |
|
717 return KErrNone; |
|
718 } |
|
719 |
|
720 void DBTraceChannel::WaitCallback(TAny* aSelf) |
|
721 { |
|
722 DBTraceChannel& c = *(DBTraceChannel*)aSelf; |
|
723 Kern::QueueRequestComplete(c.iClient, c.iWaitRequest, KErrNone); |
|
724 } |
|
725 |
|
726 TInt DBTraceChannel::Request(TInt aReqNo, TAny* a1, TAny* a2) |
|
727 { |
|
728 TInt r; |
|
729 TBTraceBufferK& buffer = Buffer; |
|
730 |
|
731 switch(aReqNo) |
|
732 { |
|
733 case RBTrace::EOpenBuffer: |
|
734 NKern::ThreadEnterCS(); |
|
735 if(!Buffer.iBufferChunk) |
|
736 r = buffer.Create(0x100000); |
|
737 else |
|
738 r = KErrNone; |
|
739 if(r==KErrNone) |
|
740 r = Kern::MakeHandleAndOpen(NULL, buffer.iBufferChunk); |
|
741 NKern::ThreadLeaveCS(); |
|
742 return r; |
|
743 |
|
744 case RBTrace::EResizeBuffer: |
|
745 NKern::ThreadEnterCS(); |
|
746 buffer.Close(); |
|
747 r = buffer.Create((TInt)a1); |
|
748 NKern::ThreadLeaveCS(); |
|
749 return r; |
|
750 |
|
751 case RBTrace::ESetFilter: |
|
752 { |
|
753 TInt old = BTrace::SetFilter((BTrace::TCategory)(TInt)a1,(TInt)a2); |
|
754 if((TInt)a2==1 && old==0) // filter turned on? |
|
755 { |
|
756 if ((TInt)a1==BTrace::EMetaTrace) |
|
757 BTracePrimeMetatrace(); |
|
758 BTrace::Prime((TInt)a1); // prime this trace category |
|
759 } |
|
760 return old; |
|
761 } |
|
762 |
|
763 case RBTrace::ESetFilter2: |
|
764 return BTrace::SetFilter2((TUint32)a1,(TBool)a2); |
|
765 |
|
766 case RBTrace::ESetFilter2Array: |
|
767 { |
|
768 NKern::ThreadEnterCS(); |
|
769 delete iFilter2Set; |
|
770 TInt size = (TInt)a2*sizeof(TUint32); |
|
771 TUint32* buffer = (TUint32*)Kern::Alloc(size); |
|
772 iFilter2Set = buffer; |
|
773 NKern::ThreadLeaveCS(); |
|
774 if(!buffer) |
|
775 return KErrNoMemory; |
|
776 kumemget32(buffer,a1,size); |
|
777 r = BTrace::SetFilter2(buffer,(TInt)a2); |
|
778 NKern::ThreadEnterCS(); |
|
779 delete iFilter2Set; |
|
780 iFilter2Set = 0; |
|
781 NKern::ThreadLeaveCS(); |
|
782 return r; |
|
783 } |
|
784 |
|
785 case RBTrace::ESetFilter2Global: |
|
786 BTrace::SetFilter2((TBool)a1); |
|
787 return KErrNone; |
|
788 |
|
789 case RBTrace::EGetFilter2Part1: |
|
790 { |
|
791 NKern::ThreadEnterCS(); |
|
792 delete iFilter2; |
|
793 iFilter2 = 0; |
|
794 iFilter2Count = 0; |
|
795 TInt globalFilter = 0; |
|
796 iFilter2Count = BTrace::Filter2(iFilter2,globalFilter); |
|
797 NKern::ThreadLeaveCS(); |
|
798 kumemput32(a2,&globalFilter,sizeof(TBool)); |
|
799 return iFilter2Count; |
|
800 } |
|
801 |
|
802 case RBTrace::EGetFilter2Part2: |
|
803 if((TInt)a2!=iFilter2Count) |
|
804 return KErrArgument; |
|
805 if(iFilter2Count>0) |
|
806 kumemput32(a1,iFilter2,iFilter2Count*sizeof(TUint32)); |
|
807 NKern::ThreadEnterCS(); |
|
808 delete iFilter2; |
|
809 iFilter2 = 0; |
|
810 iFilter2Count = 0; |
|
811 NKern::ThreadLeaveCS(); |
|
812 return KErrNone; |
|
813 |
|
814 case RBTrace::ERequestData: |
|
815 if (iWaitRequest->SetStatus((TRequestStatus*)a1) != KErrNone) |
|
816 Kern::PanicCurrentThread(RBTrace::Name(),RBTrace::ERequestAlreadyPending); |
|
817 r = buffer.RequestData((TInt)a2,&iWaitDfc); |
|
818 if (r!=KErrNone) |
|
819 { |
|
820 iWaitRequest->Reset(); |
|
821 TRequestStatus* s = (TRequestStatus*)a1; |
|
822 if (r==KErrCompletion) |
|
823 r = KErrNone; |
|
824 Kern::RequestComplete(s, r); |
|
825 } |
|
826 return r; |
|
827 |
|
828 case RBTrace::ECancelRequestData: |
|
829 buffer.iWaitingDfc = NULL; |
|
830 iWaitDfc.Cancel(); |
|
831 Kern::QueueRequestComplete(iClient, iWaitRequest, KErrCancel); |
|
832 return KErrNone; |
|
833 |
|
834 case RBTrace::ESetSerialPortOutput: |
|
835 { |
|
836 TUint mode = Kern::ESerialOutNever+(TUint)a1; |
|
837 mode = Kern::SetTextTraceMode(mode,Kern::ESerialOutMask); |
|
838 mode &= Kern::ESerialOutMask; |
|
839 return mode-Kern::ESerialOutNever; |
|
840 } |
|
841 |
|
842 case RBTrace::ESetTimestamp2Enabled: |
|
843 { |
|
844 TBool old = iTimestamp2Enabled; |
|
845 iTimestamp2Enabled = (TBool)a1; |
|
846 BTrace::TControlFunction oldControl; |
|
847 BTrace::THandler oldHandler; |
|
848 BTrace::THandler handler = iTimestamp2Enabled ? TBTraceBufferK::TraceWithTimestamp2 : TBTraceBufferK::Trace; |
|
849 BTrace::SetHandlers(handler,TBTraceBufferK::ControlFunction,oldHandler,oldControl); |
|
850 return old; |
|
851 } |
|
852 |
|
853 default: |
|
854 break; |
|
855 } |
|
856 return KErrNotSupported; |
|
857 } |
|
858 |
|
859 |
|
860 DECLARE_EXTENSION_LDD() |
|
861 { |
|
862 return new DBTraceFactory; |
|
863 } |
|
864 |
|
865 #ifdef __WINS__ |
|
866 DECLARE_STANDARD_EXTENSION() |
|
867 #else |
|
868 DECLARE_EXTENSION_WITH_PRIORITY(KExtensionMaximumPriority) |
|
869 #endif |
|
870 { |
|
871 TSuperPage& superPage = Kern::SuperPage(); |
|
872 TInt bufferSize = superPage.iInitialBTraceBuffer; |
|
873 if(!bufferSize) |
|
874 bufferSize = 0x10000; |
|
875 TInt r=Buffer.Create(bufferSize); |
|
876 if(r==KErrNone) |
|
877 Buffer.Reset(superPage.iInitialBTraceMode); |
|
878 return r; |
|
879 } |
|
880 |