|
1 // btrace_utils.cpp |
|
2 // |
|
3 // Copyright (c) 2008 - 2010 Accenture. All rights reserved. |
|
4 // This component and the accompanying materials are made available |
|
5 // under the terms of the "Eclipse Public License v1.0" |
|
6 // which accompanies this distribution, and is available |
|
7 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 // |
|
9 // Initial Contributors: |
|
10 // Accenture - Initial contribution |
|
11 // |
|
12 |
|
13 #include <e32math.h> |
|
14 #include <hal.h> |
|
15 #include "btrace_parser.h" |
|
16 #include <fshell/ltkutils.h> |
|
17 |
|
18 void Panic(TBtraceParserPanic aReason) |
|
19 { |
|
20 _LIT(KCat, "btrace_parser"); |
|
21 User::Panic(KCat, aReason); |
|
22 } |
|
23 |
|
24 enum TTlsFlags |
|
25 { |
|
26 ENanoSet = 1, |
|
27 EFastCounterSet = 2, |
|
28 EFastCountsUpSet = 4, |
|
29 }; |
|
30 |
|
31 struct SBtraceParserTls |
|
32 { |
|
33 TUint32 iFlags; |
|
34 TInt iNanoTickPeriod; |
|
35 TInt iFastCounterFrequency; |
|
36 TBool iFastCounterCountsUp; |
|
37 }; |
|
38 |
|
39 EXPORT_C TUint32 TBtraceUtils::MicroSecondsToNanoTicks(TTimeIntervalMicroSeconds32 aInterval) |
|
40 { |
|
41 TUint32 numTicks = aInterval.Int(); |
|
42 numTicks /= NanoTickPeriod(); |
|
43 return numTicks; |
|
44 } |
|
45 |
|
46 EXPORT_C TUint64 TBtraceUtils::MicroSecondsToFastTicks(TTimeIntervalMicroSeconds32 aInterval) |
|
47 { |
|
48 TUint64 numTicks = aInterval.Int(); |
|
49 numTicks *= FastCounterFrequency(); |
|
50 numTicks /= 1000000; |
|
51 return numTicks; |
|
52 } |
|
53 |
|
54 EXPORT_C TTimeIntervalMicroSeconds TBtraceUtils::NanoTicksToMicroSeconds(TUint32 aNanoTicks) |
|
55 { |
|
56 TUint64 ms = aNanoTicks; |
|
57 ms *= NanoTickPeriod(); |
|
58 return ms; |
|
59 } |
|
60 |
|
61 EXPORT_C TTimeIntervalMicroSeconds TBtraceUtils::FastTicksToMicroSeconds(const TUint64& aFastTicks) |
|
62 { |
|
63 TUint64 ms = aFastTicks; |
|
64 ms *= 1000000; |
|
65 TUint64 f = FastCounterFrequency(); |
|
66 ms /= f; |
|
67 return ms; |
|
68 } |
|
69 |
|
70 TInt TBtraceUtils::NanoTickPeriod() |
|
71 { |
|
72 SBtraceParserTls* tls = (SBtraceParserTls*)Dll::Tls(); |
|
73 if (tls && tls->iFlags & ENanoSet) |
|
74 { |
|
75 return tls->iNanoTickPeriod; |
|
76 } |
|
77 else |
|
78 { |
|
79 TInt res = CalculateNanoTickPeriod(); |
|
80 tls = CreateTls(); |
|
81 if (tls) |
|
82 { |
|
83 tls->iNanoTickPeriod = res; |
|
84 tls->iFlags |= ENanoSet; |
|
85 } |
|
86 return res; |
|
87 } |
|
88 } |
|
89 |
|
90 TInt TBtraceUtils::CalculateNanoTickPeriod() |
|
91 { |
|
92 TInt nanoTickPeriod; |
|
93 if (HAL::Get(HAL::ENanoTickPeriod, nanoTickPeriod) != KErrNone) |
|
94 { |
|
95 nanoTickPeriod = 1000; |
|
96 } |
|
97 return nanoTickPeriod; |
|
98 } |
|
99 |
|
100 TInt TBtraceUtils::FastCounterFrequency() |
|
101 { |
|
102 SBtraceParserTls* tls = (SBtraceParserTls*)Dll::Tls(); |
|
103 if (tls && tls->iFlags & EFastCounterSet) |
|
104 { |
|
105 return tls->iFastCounterFrequency; |
|
106 } |
|
107 else |
|
108 { |
|
109 TInt freq = CalculateFastCounterFrequency(); |
|
110 tls = CreateTls(); |
|
111 if (tls) |
|
112 { |
|
113 tls->iFastCounterFrequency = freq; |
|
114 tls->iFlags |= EFastCounterSet; |
|
115 } |
|
116 return freq; |
|
117 } |
|
118 } |
|
119 |
|
120 TInt TBtraceUtils::CalculateFastCounterFrequency() |
|
121 { |
|
122 TInt fastCounterFrequency; |
|
123 if (HAL::Get(HAL::EFastCounterFrequency, fastCounterFrequency) != KErrNone) |
|
124 { |
|
125 fastCounterFrequency = 1000; |
|
126 } |
|
127 return fastCounterFrequency; |
|
128 } |
|
129 |
|
130 TBool TBtraceUtils::FastCounterCountsUp() |
|
131 { |
|
132 SBtraceParserTls* tls = (SBtraceParserTls*)Dll::Tls(); |
|
133 if (tls && tls->iFlags & EFastCountsUpSet) |
|
134 { |
|
135 return tls->iFastCounterCountsUp; |
|
136 } |
|
137 else |
|
138 { |
|
139 TInt res = CalculateFastCounterCountsUp(); |
|
140 tls = CreateTls(); |
|
141 if (tls) |
|
142 { |
|
143 tls->iFastCounterCountsUp = res; |
|
144 tls->iFlags |= EFastCountsUpSet; |
|
145 } |
|
146 return res; |
|
147 } |
|
148 } |
|
149 |
|
150 TBool TBtraceUtils::CalculateFastCounterCountsUp() |
|
151 { |
|
152 TBool countsUp; |
|
153 if (HAL::Get(HAL::EFastCounterCountsUp, countsUp) != KErrNone) |
|
154 { |
|
155 countsUp = EFalse; |
|
156 } |
|
157 |
|
158 // Hack for N96 which returns countsup=false even though the fast counter is slaved to the nanokernel tick counter (and thus does actually count upwards) |
|
159 TInt fastCount = User::FastCounter(); |
|
160 TInt ntick = User::NTickCount(); |
|
161 if (FastCounterFrequency() == NanoTickPeriod() && fastCount == ntick) |
|
162 { |
|
163 countsUp = ETrue; |
|
164 } |
|
165 |
|
166 return countsUp; |
|
167 } |
|
168 |
|
169 SBtraceParserTls* TBtraceUtils::CreateTls() |
|
170 { |
|
171 SBtraceParserTls* res = (SBtraceParserTls*)Dll::Tls(); // Sort any reentrancy issues by checking if we are actually created |
|
172 if (res) return res; |
|
173 res = new SBtraceParserTls; |
|
174 if (!res) return NULL; |
|
175 Mem::FillZ(res, sizeof(SBtraceParserTls)); |
|
176 LtkUtils::MakeHeapCellInvisible(res); |
|
177 TInt err = Dll::SetTls(res); |
|
178 if (err) |
|
179 { |
|
180 delete res; |
|
181 return NULL; |
|
182 } |
|
183 return res; |
|
184 } |
|
185 |
|
186 EXPORT_C void TBtraceUtils::DebugOverrideTimerSettings(TInt aNanoPeriod, TInt aFastCounterFreq, TBool aFastCountUp) |
|
187 { |
|
188 SBtraceParserTls* tls = CreateTls(); |
|
189 if (tls) |
|
190 { |
|
191 tls->iFlags |= ENanoSet | EFastCounterSet | EFastCountsUpSet; |
|
192 tls->iNanoTickPeriod = aNanoPeriod; |
|
193 tls->iFastCounterFrequency = aFastCounterFreq; |
|
194 tls->iFastCounterCountsUp = aFastCountUp; |
|
195 } |
|
196 } |
|
197 |
|
198 EXPORT_C TBtraceTickCount::TBtraceTickCount() |
|
199 : iNano(0), iFast(0) |
|
200 { |
|
201 } |
|
202 |
|
203 EXPORT_C void TBtraceTickCount::SetToNow() |
|
204 { |
|
205 iFast = User::FastCounter(); |
|
206 iNano = User::NTickCount(); |
|
207 } |
|
208 |
|
209 EXPORT_C TUint32 TBtraceTickCount::IntervalInNanoTicks(const TBtraceTickCount& aTickCount) const |
|
210 { |
|
211 __ASSERT_ALWAYS(iNano >= aTickCount.iNano, Panic(EBtpPanicNegativeTickInterval)); |
|
212 return (iNano - aTickCount.iNano); |
|
213 } |
|
214 |
|
215 EXPORT_C TUint64 TBtraceTickCount::IntervalInFastTicks(const TBtraceTickCount& aTickCount) const |
|
216 { |
|
217 // Calc the number of nano-kernel ticks (simple subtraction) |
|
218 TUint64 n = IntervalInNanoTicks(aTickCount); |
|
219 |
|
220 // Convert to microseconds; |
|
221 n *= TBtraceUtils::NanoTickPeriod(); |
|
222 |
|
223 // Convert this into fast counter ticks. |
|
224 n *= TBtraceUtils::FastCounterFrequency(); |
|
225 n /= 1000000; |
|
226 |
|
227 // Round n down to the the nearest KMaxTUint. The next bit of maths is concerned with calculating the remainder |
|
228 n /= KMaxTUint; |
|
229 n *= KMaxTUint; |
|
230 |
|
231 /* |
|
232 This represents how the fast counter overflows (assuming fast counter counting up): |
|
233 |
|
234 |-----|-----|-----| <- tick periods |
|
235 / / / |
|
236 / / /| |
|
237 / / / | |
|
238 / / / | |
|
239 /| / / | |
|
240 / | / / | |
|
241 | | |
|
242 aTickCount this |
|
243 |
|
244 In the case where aTickCount and this are roughly as presented in the above diagram, the answer |
|
245 is (2 tick periods) + this.iFast - aTickCount.iFast |
|
246 |
|
247 Where aTickCount.iFast is numerically greater than this.iFast: |
|
248 |
|
249 |-----|-----|-----|-----| <- tick periods |
|
250 / / / / |
|
251 /| / / / |
|
252 / | / / / |
|
253 / | / / / |
|
254 / | / / / |
|
255 / |/ / /| |
|
256 | | |
|
257 aTickCount this |
|
258 |
|
259 |
|
260 The sum is slightly different: (2 tick periods) + this.iFast + (tick period - aTickCount.iFast) |
|
261 When the counter counts down, the maths is reversed |
|
262 */ |
|
263 |
|
264 // Now calculate the duration (in fast ticks). |
|
265 if (TBtraceUtils::FastCounterCountsUp()) |
|
266 { |
|
267 if (aTickCount.iFast < iFast) |
|
268 { |
|
269 n = n + iFast - aTickCount.iFast; |
|
270 } |
|
271 else if (aTickCount.iFast > iFast) |
|
272 { |
|
273 n = n + iFast + (KMaxTUint - aTickCount.iFast); |
|
274 } |
|
275 else |
|
276 { |
|
277 // Do nothing - the fast counts are the numerically the same, so n being a round number of tick periods is already correct |
|
278 } |
|
279 } |
|
280 else |
|
281 { |
|
282 if (iFast < aTickCount.iFast) |
|
283 { |
|
284 n = n + aTickCount.iFast - iFast; |
|
285 } |
|
286 else if (aTickCount.iFast > iFast) |
|
287 { |
|
288 n = n + aTickCount.iFast + (KMaxTUint - iFast); |
|
289 } |
|
290 else |
|
291 { |
|
292 // Do nothing as above |
|
293 } |
|
294 } |
|
295 |
|
296 return n; |
|
297 } |
|
298 |
|
299 EXPORT_C TTimeIntervalMicroSeconds TBtraceTickCount::IntervalInMicroSeconds(const TBtraceTickCount& aTickCount) const |
|
300 { |
|
301 TUint64 n = IntervalInFastTicks(aTickCount); |
|
302 n *= 1000000; |
|
303 n /= TBtraceUtils::FastCounterFrequency(); |
|
304 return n; |
|
305 } |
|
306 |
|
307 EXPORT_C TBool TBtraceTickCount::operator==(const TBtraceTickCount& aTickCount) const |
|
308 { |
|
309 return (aTickCount.iNano == iNano) && (aTickCount.iFast == iFast); |
|
310 } |
|
311 |
|
312 EXPORT_C TBool TBtraceTickCount::operator!=(const TBtraceTickCount& aTickCount) const |
|
313 { |
|
314 return (aTickCount.iNano != iNano) || (aTickCount.iFast != iFast); |
|
315 } |
|
316 |
|
317 EXPORT_C TBool TBtraceTickCount::operator>=(const TBtraceTickCount& aTickCount) const |
|
318 { |
|
319 if ((iNano > aTickCount.iNano) || (*this == aTickCount)) |
|
320 { |
|
321 return ETrue; |
|
322 } |
|
323 else if (iNano < aTickCount.iNano) |
|
324 { |
|
325 return EFalse; |
|
326 } |
|
327 else |
|
328 { |
|
329 return TBtraceUtils::FastCounterCountsUp() ? (iFast > aTickCount.iFast) : (iFast < aTickCount.iFast); |
|
330 } |
|
331 } |
|
332 |
|
333 EXPORT_C TBool TBtraceTickCount::operator<=(const TBtraceTickCount& aTickCount) const |
|
334 { |
|
335 if ((iNano < aTickCount.iNano) || (*this == aTickCount)) |
|
336 { |
|
337 return ETrue; |
|
338 } |
|
339 else if (iNano > aTickCount.iNano) |
|
340 { |
|
341 return EFalse; |
|
342 } |
|
343 else |
|
344 { |
|
345 return TBtraceUtils::FastCounterCountsUp() ? (iFast < aTickCount.iFast) : (iFast > aTickCount.iFast); |
|
346 } |
|
347 } |
|
348 |
|
349 EXPORT_C TBool TBtraceTickCount::operator>(const TBtraceTickCount& aTickCount) const |
|
350 { |
|
351 if (iNano > aTickCount.iNano) |
|
352 { |
|
353 return ETrue; |
|
354 } |
|
355 else if ((*this == aTickCount) || (iNano < aTickCount.iNano)) |
|
356 { |
|
357 return EFalse; |
|
358 } |
|
359 else |
|
360 { |
|
361 return TBtraceUtils::FastCounterCountsUp() ? (iFast > aTickCount.iFast) : (iFast < aTickCount.iFast); |
|
362 } |
|
363 } |
|
364 |
|
365 EXPORT_C TBool TBtraceTickCount::operator<(const TBtraceTickCount& aTickCount) const |
|
366 { |
|
367 if (iNano < aTickCount.iNano) |
|
368 { |
|
369 return ETrue; |
|
370 } |
|
371 else if ((*this == aTickCount) || (iNano > aTickCount.iNano)) |
|
372 { |
|
373 return EFalse; |
|
374 } |
|
375 else |
|
376 { |
|
377 return TBtraceUtils::FastCounterCountsUp() ? (iFast < aTickCount.iFast) : (iFast > aTickCount.iFast); |
|
378 } |
|
379 } |
|
380 |
|
381 EXPORT_C void CRefCountedObject::IncRef() |
|
382 { |
|
383 ++iRefCount; |
|
384 } |
|
385 |
|
386 EXPORT_C void CRefCountedObject::DecRef() |
|
387 { |
|
388 __ASSERT_ALWAYS(iRefCount > 0, Panic(EBtpPanicNegativeRefCount)); |
|
389 --iRefCount; |
|
390 } |
|
391 |
|
392 EXPORT_C TInt CRefCountedObject::RefCount() const |
|
393 { |
|
394 return iRefCount; |
|
395 } |
|
396 |
|
397 CRefCountedObject::CRefCountedObject() |
|
398 { |
|
399 } |