46 * We are making calls that take a long time, which means we don't want the |
46 * We are making calls that take a long time, which means we don't want the |
47 * user side thread to block while doing this, and we don't want to be interrupted |
47 * user side thread to block while doing this, and we don't want to be interrupted |
48 * and loose our state. So we solve this by running the device driver as |
48 * and loose our state. So we solve this by running the device driver as |
49 * as it's own separate thread in the kernel (DLogicalChannelBase runs a user thread in kernel mode, |
49 * as it's own separate thread in the kernel (DLogicalChannelBase runs a user thread in kernel mode, |
50 * DLogicalChannel runs a kernel thread called from a user thread). |
50 * DLogicalChannel runs a kernel thread called from a user thread). |
51 * We also solve the long timing issue by making an asynchrounous call by using the DoRequest call. |
51 * We also solve the long timing issue by making an asynchrounous call by using the DoRequestL call. |
52 * For this to work we need to create a DfcQ, i.e. a queue of all the incoming calls from the user side. |
52 * For this to work we need to create a DfcQ, i.e. a queue of all the incoming calls from the user side. |
53 * These then get's run in turn. |
53 * These then get's run in turn. |
54 */ |
54 */ |
55 class DUptUTraceChannel : public DLogicalChannel |
55 class DUptUTraceChannel : public DLogicalChannel |
56 { |
56 { |
66 */ |
66 */ |
67 virtual void HandleMsg(TMessageBase* aMsg); |
67 virtual void HandleMsg(TMessageBase* aMsg); |
68 // Inherited from DLogicalChannel |
68 // Inherited from DLogicalChannel |
69 virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer); |
69 virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer); |
70 //Make an asynchronous call... |
70 //Make an asynchronous call... |
71 virtual TInt DoRequest(TInt aReqNo, TRequestStatus* aStatus, TAny* a1, TAny* a2); |
71 virtual TInt DoRequestL(TInt aReqNo, TRequestStatus* aStatus, TAny* a1, TAny* a2); |
72 |
72 |
73 void TestUptTraces(const TApiRunConfig& aApiRunConfig, TApiRunResults& aApiRunResults); |
73 void TestUptTraces(const TApiRunConfig& aApiRunConfig, TApiRunResults& aApiRunResults); |
74 void TimeUptTraces(const TApiRunConfig& aApiRunConfig, TApiRunResults& aApiRunResults); |
74 void TimeUptTraces(const TApiRunConfig& aApiRunConfig, TApiRunResults& aApiRunResults); |
75 |
75 |
76 private: |
76 private: |
162 { |
162 { |
163 //DoCancel(message.Int0()); |
163 //DoCancel(message.Int0()); |
164 message.Complete(KErrNone, ETrue); |
164 message.Complete(KErrNone, ETrue); |
165 return; |
165 return; |
166 } |
166 } |
167 if(id < 0 ) //it was a call for DoRequest() |
167 if(id < 0 ) //it was a call for DoRequestL() |
168 { |
168 { |
169 TRequestStatus* status = (TRequestStatus*)message.Ptr0(); |
169 TRequestStatus* status = (TRequestStatus*)message.Ptr0(); |
170 DoRequest(~id, status, message.Ptr1(), message.Ptr2()); |
170 DoRequestL(~id, status, message.Ptr1(), message.Ptr2()); |
171 message.Complete(KErrNone, ETrue); |
171 message.Complete(KErrNone, ETrue); |
172 } |
172 } |
173 else //it was a call for DoControl, we dont implement that now so just fall through |
173 else //it was a call for DoControl, we dont implement that now so just fall through |
174 { |
174 { |
175 TRequestStatus* status=(TRequestStatus*)message.Ptr0(); |
175 TRequestStatus* status=(TRequestStatus*)message.Ptr0(); |
176 Kern::RequestComplete(iClient,status,KErrNotSupported); |
176 Kern::RequestComplete(iClient,status,KErrNotSupported); |
177 message.Complete(KErrNotSupported, ETrue); |
177 message.Complete(KErrNotSupported, ETrue); |
178 } |
178 } |
179 } |
179 } |
180 |
180 |
181 TInt DUptUTraceChannel::DoRequest(TInt aReqNo, TRequestStatus* aStatus, TAny* a1, TAny* a2) |
181 TInt DUptUTraceChannel::DoRequestL(TInt aReqNo, TRequestStatus* aStatus, TAny* a1, TAny* a2) |
182 { |
182 { |
183 TInt error = KErrNone; |
183 TInt error = KErrNone; |
184 |
184 |
185 /** |
185 /** |
186 * This is the beginning of the new implementation for creating a new thread |
186 * This is the beginning of the new implementation for creating a new thread |
256 error = Kern::ThreadRawRead(iClient, a1, (TUint8 *)&results, sizeof(TSanityResults)); |
256 error = Kern::ThreadRawRead(iClient, a1, (TUint8 *)&results, sizeof(TSanityResults)); |
257 if(!error) |
257 if(!error) |
258 { |
258 { |
259 TTestTimer timer; |
259 TTestTimer timer; |
260 if(aReqNo == RUptUTrace::ESanityTestTimer) |
260 if(aReqNo == RUptUTrace::ESanityTestTimer) |
261 results.iPass = timer.TestKernelTimer(results.iTime); |
261 results.iPass = timer.TestKernelTimerL(results.iTime); |
262 if(aReqNo == RUptUTrace::ESanityTestLongTimer) |
262 if(aReqNo == RUptUTrace::ESanityTestLongTimer) |
263 results.iPass = timer.TestKernelLongTimer(results.iTime); |
263 results.iPass = timer.TestKernelLongTimerL(results.iTime); |
264 if(aReqNo == RUptUTrace::ESanityUtraceTimer) |
264 if(aReqNo == RUptUTrace::ESanityUtraceTimer) |
265 results.iPass = timer.TestUTraceKernelTimer(results.iTime); |
265 results.iPass = timer.TestUTraceKernelTimerL(results.iTime); |
266 } |
266 } |
267 if(!error) |
267 if(!error) |
268 error = Kern::ThreadRawWrite(iClient, a1, (TUint8 *)&results, sizeof(TSanityResults)); |
268 error = Kern::ThreadRawWrite(iClient, a1, (TUint8 *)&results, sizeof(TSanityResults)); |
269 } |
269 } |
270 break; |
270 break; |