|
1 // Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // |
|
15 |
|
16 /** |
|
17 @file |
|
18 @internalTechnology |
|
19 */ |
|
20 |
|
21 |
|
22 #include <e32def.h> |
|
23 #include <commsdattypesv1_1.h> // CommsDat |
|
24 #include <metadatabase.h> // CommsDat |
|
25 |
|
26 #include <comms-infras/ss_activities.h> |
|
27 |
|
28 #include <comms-infras/cs_connservparams.h> |
|
29 #include <comms-infras/cs_connservparams_internal.h> |
|
30 #include <elements/nm_messages_child.h> |
|
31 |
|
32 #include "coretiernotificationactivity.h" |
|
33 #include <comms-infras/coretiernotificationstates.h> |
|
34 #include <comms-infras/coretiernotificationcollectors.h> |
|
35 #include <comms-infras/ss_nodemessages_tiermanager.h> |
|
36 |
|
37 #include <comms-infras/es_connectionservparameterbundletrace.h> |
|
38 #include <comms-infras/es_connectionservparameterbundletraceimpl.h> // include this once per dll |
|
39 |
|
40 |
|
41 #ifdef _DEBUG |
|
42 // Panic category for "absolutely impossible!" vanilla ASSERT()-type panics from this module |
|
43 // (if it could happen through user error then you should give it an explicit, documented, category + code) |
|
44 _LIT(KSpecAssert_ESockCrPrvTNotAC, "ESockCrPrvTNotAC"); |
|
45 #endif |
|
46 |
|
47 |
|
48 |
|
49 using namespace CommsDat; |
|
50 using namespace ESock; |
|
51 using namespace TierNotification; |
|
52 using namespace NetStateMachine; |
|
53 using namespace ConnectionServ; |
|
54 using namespace CoreStates; |
|
55 using namespace Messages; |
|
56 using namespace MeshMachine; |
|
57 |
|
58 |
|
59 template <class T> |
|
60 class CleanupResetAndDestroy |
|
61 { |
|
62 public: |
|
63 inline static void PushL(T& aRef); |
|
64 private: |
|
65 static void ResetAndDestroy(TAny *aPtr); |
|
66 }; |
|
67 |
|
68 template <class T> |
|
69 inline void CleanupResetAndDestroyPushL(T& aRef); |
|
70 |
|
71 |
|
72 template <class T> |
|
73 inline void CleanupResetAndDestroy<T>::PushL(T& aRef) |
|
74 { |
|
75 CleanupStack::PushL(TCleanupItem(&ResetAndDestroy,&aRef)); |
|
76 } |
|
77 |
|
78 template <class T> |
|
79 void CleanupResetAndDestroy<T>::ResetAndDestroy(TAny *aPtr) |
|
80 { |
|
81 static_cast<T*>(aPtr)->ResetAndDestroy(); |
|
82 } |
|
83 |
|
84 template <class T> |
|
85 inline void CleanupResetAndDestroyPushL(T& aRef) |
|
86 { |
|
87 CleanupResetAndDestroy<T>::PushL(aRef); |
|
88 } |
|
89 |
|
90 |
|
91 // |
|
92 // Tier Notification |
|
93 // |
|
94 // The reference tier status query / notification activity. |
|
95 // |
|
96 namespace TMTierNotificationActivity |
|
97 { |
|
98 DEFINE_EXPORT_CUSTOM_NODEACTIVITY(ECFActivityTierNotification, TierNotify, Messages::TNodeSignal::TNullMessageId, CTierNotificationActivity::NewL) |
|
99 FIRST_NODEACTIVITY_ENTRY(TierNotification::TAwaitingTierNotificationRegistration, TNoTag) |
|
100 NODEACTIVITY_ENTRY(KNoTag, TierNotification::TInitTierNotifications, TierNotification::TAwaitingDataCollectors, TCancelOrErrorOrTag<KNoTag>) |
|
101 NODEACTIVITY_ENTRY(KNoTag, TierNotification::TStartTierNotifications, TierNotification::TAwaitingCancelOrErrorOrDestroy, TCancelOrErrorOrTag<KNoTag>) |
|
102 THROUGH_TRIPLE_ENTRY(KErrorTag, MeshMachine::TStoreError, TTag<KCancelTag>) |
|
103 NODEACTIVITY_ENTRY(KCancelTag, TierNotification::TInitiateTierNotificationCancel, TierNotification::TAwaitingDestroy, TNoTag) |
|
104 LAST_NODEACTIVITY_ENTRY(KNoTag, MeshMachine::TSetIdle) |
|
105 NODEACTIVITY_END() |
|
106 } |
|
107 |
|
108 namespace TMTierNotificationGetDataCollectorsActivity |
|
109 { |
|
110 DEFINE_EXPORT_NODEACTIVITY(ECFActivityTierGetDataCollectors, GetCollectors, TCFDataCollector::TTierGetDataCollectors) |
|
111 NODEACTIVITY_ENTRY(KNoTag, TierNotification::TGetDataCollectors, MeshMachine::TAwaitingAny, MeshMachine::TNoTag) |
|
112 NODEACTIVITY_END() |
|
113 } |
|
114 |
|
115 |
|
116 EXPORT_C /*static*/ MeshMachine::CNodeActivityBase* CTierNotificationActivity::NewL(const MeshMachine::TNodeActivity& aActivitySig, MeshMachine::AMMNodeBase& aNode) |
|
117 { |
|
118 TUint c = GetNextActivityCountL(aActivitySig,aNode); |
|
119 CTierNotificationActivity* inst = new (ELeave) CTierNotificationActivity(aActivitySig, aNode, c); |
|
120 _TIER_LOG_2(_L8("CTierNotificationActivity %08x:\t" "NewL()"),inst); |
|
121 return inst; |
|
122 } |
|
123 |
|
124 |
|
125 /*virtual*/ CTierNotificationActivity::~CTierNotificationActivity() |
|
126 { |
|
127 _TIER_LOG_2(_L8("CTierNotificationActivity %08x:\t" "~CTierNotificationActivity()"),this); |
|
128 |
|
129 if (iQueryBundleOwner) |
|
130 { |
|
131 iQueryBundleOwner->Close(); |
|
132 } |
|
133 iCollectorSessions.ResetAndDestroy(); |
|
134 delete iCache; |
|
135 delete iDbSession; |
|
136 } |
|
137 |
|
138 |
|
139 /*virtual*/ void CTierNotificationActivity::InitL(CRefCountOwnedParameterBundle* aQuery, const RMessage2& aPlatSecInfo, TBool aOneOffQuery) |
|
140 { |
|
141 _TIER_LOG_2(_L8("CTierNotificationActivity %08x:\t" "StartL()"),this); |
|
142 |
|
143 // note we don't open a reference since we take the ownership from the DoL |
|
144 // that calls us |
|
145 __ASSERT_DEBUG(!iQueryBundleOwner, User::Panic(KSpecAssert_ESockCrPrvTNotAC, 1)); |
|
146 iQueryBundleOwner = aQuery; |
|
147 iOneOffQuery = aOneOffQuery; |
|
148 |
|
149 if(!HasSufficientCapabilities(aPlatSecInfo)) |
|
150 { |
|
151 User::Leave(KErrPermissionDenied); |
|
152 } |
|
153 } |
|
154 // now pass control back to the mesh so a separate activity can decide on the required collectors |
|
155 |
|
156 |
|
157 /*virtual*/ void CTierNotificationActivity::StartL(RPointerArray<MDataCollector>& aCollectors) |
|
158 { |
|
159 CleanupResetAndDestroyPushL(aCollectors); |
|
160 if(aCollectors.Count() == 0) |
|
161 { |
|
162 User::Leave(KErrArgument); |
|
163 } |
|
164 |
|
165 iDbSession = CMDBSession::NewL(KCDVersion1_2); |
|
166 |
|
167 iCache = CTierNotificationCache::NewL(); |
|
168 |
|
169 // set up a session with each collector |
|
170 for(TInt sessionId=0 ; sessionId<aCollectors.Count() ; ++sessionId) |
|
171 { |
|
172 CDataCollectorSession* newCollector = CDataCollectorSession::NewL(aCollectors[sessionId], *this, sessionId ); |
|
173 __ASSERT_DEBUG(newCollector, User::Panic(KSpecAssert_ESockCrPrvTNotAC, 2)); |
|
174 iCollectorSessions.AppendL(newCollector); |
|
175 aCollectors[sessionId] = 0; // now owned by *this so don't want it to be in the cleanup path twice! |
|
176 } |
|
177 |
|
178 CleanupStack::PopAndDestroy(); |
|
179 |
|
180 // Must run session creation and start loops separately in case the initial data fetch is synchronous. |
|
181 // Reasoning: |
|
182 // when a full set of data for each collector is received, notification is only allowed if all |
|
183 // collectors know they've got their full set. |
|
184 // so all collectors must already be available to answer that question by the time this loop runs. |
|
185 for(TInt i=0; i<iCollectorSessions.Count() ; ++i) |
|
186 { |
|
187 iCollectorSessions[i]->StartL(); |
|
188 } |
|
189 } |
|
190 |
|
191 |
|
192 TBool CompareTypeIds(const STypeId& aFirst, const STypeId& aSecond) |
|
193 { |
|
194 return( aFirst == aSecond ); |
|
195 } |
|
196 |
|
197 |
|
198 |
|
199 EXPORT_C void CTierNotificationActivity::ReportChangesInCacheL() |
|
200 { |
|
201 _TIER_LOG_2(_L8("CTierNotificationActivity %08x:\t" "ReportChangesInCacheL()"),this); |
|
202 |
|
203 const CConnectionServParameterBundle* queryBundle = static_cast<const CConnectionServParameterBundle*>(iQueryBundleOwner->Ptr()); |
|
204 const RArray<Meta::STypeId>& typesToReturn = queryBundle->GetParameterSetsToReturnL(); |
|
205 |
|
206 CParameterBundleBase* resultBundle = NULL; |
|
207 CRefCountOwnedParameterBundle* bundleOwner = NULL; |
|
208 CParameterSetContainer* cacheParSetCtr = NULL; |
|
209 XNotificationCacheParameterSet* cacheParams = NULL; |
|
210 TInt i = 0; |
|
211 |
|
212 while ((cacheParSetCtr = iCache->Get(i++)) != NULL) |
|
213 { |
|
214 cacheParams = XNotificationCacheParameterSet::FindInParamSetContainer(*cacheParSetCtr); |
|
215 __ASSERT_DEBUG(cacheParams, User::Panic(KSpecAssert_ESockCrPrvTNotAC, 3)); |
|
216 |
|
217 if(cacheParams->ShouldReport()) |
|
218 { |
|
219 cacheParams->ClearToReport(); |
|
220 |
|
221 if (!iOneOffQuery && iMessageCount == 0) |
|
222 { |
|
223 // Swallow first notification (pseudo "status report"). |
|
224 // This must be done because the first notification arises |
|
225 // as a result of the difference between UNKNOWN |
|
226 // and discovering the MATCHING data for the first time |
|
227 // meaning that this data enters the "match set". |
|
228 // This way only notifications of CHANGE will be sent |
|
229 // N.B. a client must run a tier query AFTER it's registered |
|
230 // for notification, in order to ensure its view of the data |
|
231 // is up to date. |
|
232 continue; |
|
233 } |
|
234 |
|
235 // create a new bundle when we get the first result |
|
236 // (if bundleOwner is NULL then resultBundle is also NULL) |
|
237 if (!bundleOwner) |
|
238 { |
|
239 resultBundle = CParameterBundleBase::NewL(); |
|
240 CleanupStack::PushL(resultBundle); |
|
241 bundleOwner = new(ELeave)CRefCountOwnedParameterBundle(resultBundle); |
|
242 CleanupStack::Pop(); |
|
243 |
|
244 bundleOwner->Open(); |
|
245 CleanupClosePushL(*bundleOwner); |
|
246 } |
|
247 |
|
248 CParameterSetContainer* newParSetCtr = CParameterSetContainer::NewL(*resultBundle,cacheParSetCtr->Id()); |
|
249 const XParameterSetBase* cacheSet; |
|
250 TInt j=0; |
|
251 while((cacheSet = cacheParSetCtr->GetParameterSet(j++)) != NULL) |
|
252 { |
|
253 // don't copy the cache parameters |
|
254 if(cacheSet == cacheParams) continue; |
|
255 |
|
256 if(typesToReturn.Find(cacheSet->GetTypeId(),TIdentityRelation<STypeId>(CompareTypeIds)) == KErrNotFound ) |
|
257 { |
|
258 _TIER_LOG_3(_L8("\t.. not including type 0x %08x , %d"),cacheSet->GetTypeId().iUid.iUid,cacheSet->GetTypeId().iType); |
|
259 continue; |
|
260 } |
|
261 |
|
262 XConnectionServParameterSet* newSet = static_cast<XConnectionServParameterSet*>(XParameterSetBase::NewInstanceL(cacheSet->GetTypeId())); |
|
263 CleanupStack::PushL(newSet); |
|
264 newParSetCtr->AddParameterSetL(newSet); |
|
265 CleanupStack::Pop(newSet); |
|
266 |
|
267 // this filters out any information we didn't ask for |
|
268 newSet->CopyFieldsRequiredByQueryFromL(*queryBundle, static_cast<const XConnectionServParameterSet&>(*cacheSet)); |
|
269 } |
|
270 |
|
271 // if(newParSetCtr->GetParameterSet(0) == NULL) |
|
272 // { |
|
273 // rob reject param set ctr if no types contained therein |
|
274 // } |
|
275 } |
|
276 } |
|
277 iMessageCount++; |
|
278 |
|
279 __ASSERT_DEBUG(iOriginators.Count(), User::Panic(KSpecAssert_ESockCrPrvTNotAC, 4)); |
|
280 |
|
281 // ok, result bundle has now been generated, let's send it to the originator. |
|
282 // ownership is given to the recipient so pop it from the stack first |
|
283 if (bundleOwner) |
|
284 { |
|
285 CleanupStack::Pop(bundleOwner); |
|
286 } |
|
287 |
|
288 |
|
289 if (iOneOffQuery) |
|
290 { |
|
291 if (!bundleOwner) |
|
292 { |
|
293 // ok.. so no results were found in query... |
|
294 // so create an empty bundle to signify this. |
|
295 resultBundle = CParameterBundleBase::NewL(); |
|
296 CleanupStack::PushL(resultBundle); |
|
297 bundleOwner = new(ELeave) CRefCountOwnedParameterBundle(resultBundle); |
|
298 CleanupStack::Pop(); |
|
299 |
|
300 bundleOwner->Open(); |
|
301 } |
|
302 |
|
303 _TIER_LOG_BUNDLE("About to send result.", resultBundle); |
|
304 TCFTierStatusProvider::TTierStatus msg(bundleOwner); |
|
305 PostRefCountedBundleToOriginators(msg, *bundleOwner); |
|
306 } |
|
307 else // notification then |
|
308 { |
|
309 if(!bundleOwner) |
|
310 { |
|
311 if(iMessageCount == 1) |
|
312 { |
|
313 // ok.. so or notification has started up. |
|
314 // so create an empty bundle to signify this. |
|
315 resultBundle = CParameterBundleBase::NewL(); |
|
316 CleanupStack::PushL(resultBundle); |
|
317 bundleOwner = new(ELeave) CRefCountOwnedParameterBundle(resultBundle); |
|
318 CleanupStack::Pop(); |
|
319 bundleOwner->Open(); |
|
320 } |
|
321 else |
|
322 { |
|
323 // don't send empty bundles for notification |
|
324 _TIER_LOG(_L8("\tSomething changed but it's not appropriate to generate a notification..")); |
|
325 return; |
|
326 } |
|
327 } |
|
328 |
|
329 _TIER_LOG_BUNDLE("About to send notification.", resultBundle); |
|
330 TCFTierStatusProvider::TTierNotification msg(bundleOwner); |
|
331 PostRefCountedBundleToOriginators(msg, *bundleOwner); |
|
332 } |
|
333 |
|
334 // and finally release our ref on the bundle |
|
335 bundleOwner->Close(); |
|
336 } |
|
337 |
|
338 |
|
339 void CTierNotificationActivity::PostRefCountedBundleToOriginators(const Messages::TSignatureBase& aMsg, CRefCountOwnedParameterBundle& aBundleOwner) |
|
340 { |
|
341 TInt originators = iOriginators.Count(); |
|
342 |
|
343 for (TInt i=0; i<originators; i++) |
|
344 { |
|
345 // Open as many refcounts as there are originators before we do the Post. |
|
346 // This avoids a potential race where an originator, and the aBundleOwner's |
|
347 // creator is running in a higher priority thread than this. If both of |
|
348 // these conditions are met the bundle may be destroyed before we finish |
|
349 // sending. By ensuring "enough" refs are opened before we send we can |
|
350 // avoid this. |
|
351 aBundleOwner.Open(); |
|
352 } |
|
353 |
|
354 TInt unusedRefs = originators - PostToOriginators(aMsg); |
|
355 |
|
356 // Close any unused refcounts on the bundle |
|
357 while (unusedRefs-- > 0) |
|
358 { |
|
359 aBundleOwner.Close(); |
|
360 } |
|
361 } |
|
362 |
|
363 |
|
364 EXPORT_C TBool CTierNotificationActivity::AllCollectorsUnlocked() const |
|
365 { |
|
366 _TIER_LOG_2(_L8("CTierNotificationActivity %08x:\t" "AllCollectorsUnlocked()"),this); |
|
367 for(TInt i=0 ; i<iCollectorSessions.Count() ; ++i) |
|
368 { |
|
369 if(iCollectorSessions[i]->IsLocked()) |
|
370 { |
|
371 return EFalse; |
|
372 } |
|
373 } |
|
374 return ETrue; |
|
375 } |
|
376 |
|
377 TBool CTierNotificationActivity::AllAccessPointsReadyForAllCollectors() const |
|
378 { |
|
379 _TIER_LOG_2(_L8("CTierNotificationActivity %08x:\t" "AllAccessPointsReadyForAllCollectors()"),this); |
|
380 |
|
381 // possible optimisation here for notification mode |
|
382 // once we've reached return True we could set a flag.. which it would be up to |
|
383 // disoverers of new access points to clear when it sees them, so we can start running the |
|
384 // check again. |
|
385 |
|
386 CParameterSetContainer* cacheParSetCtr; |
|
387 XNotificationCacheParameterSet* cacheParams; |
|
388 TInt i = 0; |
|
389 while((cacheParSetCtr = iCache->Get(i++)) != NULL) |
|
390 { |
|
391 _TIER_LOG_PSC("Checking if ready: ",cacheParSetCtr); |
|
392 cacheParams = XNotificationCacheParameterSet::FindInParamSetContainer(*cacheParSetCtr); |
|
393 __ASSERT_DEBUG(cacheParams, User::Panic(KSpecAssert_ESockCrPrvTNotAC, 5)); |
|
394 if( ! cacheParams->ReadyToReport()) |
|
395 { |
|
396 return EFalse; |
|
397 } |
|
398 } |
|
399 return ETrue; |
|
400 } |
|
401 |
|
402 |
|
403 EXPORT_C void CTierNotificationActivity::ReportIfReadyL() |
|
404 { |
|
405 if( ! AllCollectorsUnlocked() ) |
|
406 { |
|
407 return; |
|
408 } |
|
409 |
|
410 if( ! AllAccessPointsReadyForAllCollectors() ) |
|
411 { |
|
412 return; |
|
413 } |
|
414 |
|
415 // If we got this far then all sessions are unlocked. In the case of a query we should |
|
416 // now initiate shutdown of all collectors before formulating our response. |
|
417 // This is because TierStatus is the last message this activity will send |
|
418 // so it must be possible for the client to leave this node as soon as it has received TierStatus. |
|
419 // |
|
420 if(iOneOffQuery) |
|
421 { |
|
422 InitiateShutdown(KErrNone); // will send the results once everything is cleaned up |
|
423 } |
|
424 else |
|
425 { |
|
426 // for notify mode this will just mark the cache instead of formulating and sending a response. |
|
427 // actual change notifications will happen via DataReceived |
|
428 ReportChangesInCacheL(); |
|
429 } |
|
430 return; |
|
431 } |
|
432 |
|
433 |
|
434 EXPORT_C void CTierNotificationActivity::InitiateShutdown(TInt aErrorToReport) |
|
435 { |
|
436 _TIER_LOG_2(_L8("CTierNotificationActivity %08x:\t" "ShutdownFinished()"),this); |
|
437 if(iShuttingDown) |
|
438 { |
|
439 return; |
|
440 } |
|
441 iShuttingDown=ETrue; |
|
442 |
|
443 SetError(aErrorToReport); |
|
444 TInt i=0 ; |
|
445 for( ; i<iCollectorSessions.Count() ; ++i) |
|
446 { |
|
447 CDataCollectorSession* sess = iCollectorSessions[i]; |
|
448 if(sess) |
|
449 { |
|
450 sess->Shutdown(); |
|
451 } |
|
452 } |
|
453 if(i==0) |
|
454 { |
|
455 ShutdownFinished(); |
|
456 } |
|
457 } |
|
458 |
|
459 |
|
460 EXPORT_C void CTierNotificationActivity::ShutdownFinished() |
|
461 { |
|
462 _TIER_LOG_2(_L8("CTierNotificationActivity %08x:\t" "ShutdownFinished()"),this); |
|
463 for(TInt i=0 ; i<iCollectorSessions.Count() ; ++i) |
|
464 { |
|
465 CDataCollectorSession* sess = iCollectorSessions[i]; |
|
466 if(sess->HasShutDown()) |
|
467 return; |
|
468 } |
|
469 |
|
470 // If we got this far then all sessions are shut down. In the case of a query we should |
|
471 // now fomulate our response and send it. |
|
472 // Otherwise error originators saying we cancelled. |
|
473 // |
|
474 if(iOneOffQuery) |
|
475 { |
|
476 TInt err = Error(); |
|
477 if(err == KErrNone) |
|
478 { |
|
479 TRAP(err,ReportChangesInCacheL()); |
|
480 } |
|
481 if(err != KErrNone) |
|
482 { |
|
483 TEBase::TError msg(TCFTierStatusProvider::TTierStatusQuery::Id(), Error()); |
|
484 PostToOriginators(msg); |
|
485 } |
|
486 } |
|
487 else |
|
488 { |
|
489 __ASSERT_DEBUG(Error() != KErrNone, User::Panic(KSpecAssert_ESockCrPrvTNotAC, 6)); |
|
490 TEBase::TError msg(TCFTierStatusProvider::TTierNotificationRegistration::Id(), Error()); |
|
491 PostToOriginators(msg); |
|
492 } |
|
493 |
|
494 // Any error has been handled. This prevents panic of mesh machinery |
|
495 SetError(KErrNone); |
|
496 |
|
497 // Our query/notification activity has finished. |
|
498 // We need to send a message back to ourselves so the node can finish |
|
499 // processing the Activity as defined in the map. We are (ab)using the |
|
500 // TDestroy message for this purpose :-I |
|
501 TNodeCtxId us(ActivityId(),iNode.Id()); |
|
502 RClientInterface::OpenPostMessageClose(us, us, TEChild::TDestroy().CRef()); |
|
503 return; |
|
504 } |
|
505 |
|
506 |
|
507 |
|
508 /*virtual*/ TBool CTierNotificationActivity::HasSufficientCapabilities(const RMessage2& aPlatSecInfo) const |
|
509 { |
|
510 // default.. all necessary policing has been done back at RConnectionServ IPC level. |
|
511 // Specific technology implementations may wish to override this function in order to examine the client capabilities more deeply. |
|
512 (void)aPlatSecInfo; |
|
513 return ETrue; |
|
514 } |
|
515 |
|
516 /*virtual*/ MEqualityComparator* CTierNotificationActivity::CreateEqualityComparatorL(const CParameterSetContainer& aData) |
|
517 { |
|
518 _TIER_LOG_2(_L8("CTierNotificationActivity %08x:\t" "CreateEqualityComparatorL()"),this); |
|
519 return new(ELeave) CAccessPointIdComparator(aData); |
|
520 } |
|
521 |
|
522 |
|
523 EXPORT_C void CTierNotificationActivity::DataReceivedL(CDataCollectorSession& aSource, CParameterSetContainer& aNewData, TBool aAllowNotify) |
|
524 { |
|
525 _TIER_LOG_2(_L8("CTierNotificationActivity %08x:\t" "DataReceivedL()"),this); |
|
526 _TIER_LOG_PSC("Data received: ",&aNewData); |
|
527 |
|
528 MEqualityComparator* comparator = CreateEqualityComparatorL(aNewData); |
|
529 CleanupStack::PushL(comparator); |
|
530 |
|
531 CParameterSetContainer* cacheEntry = iCache->Find(*comparator); |
|
532 |
|
533 if(cacheEntry) |
|
534 { |
|
535 aSource.MergeL(*cacheEntry, aNewData); |
|
536 } |
|
537 else |
|
538 { |
|
539 // ok.. no record found. |
|
540 // So create a record populated with unknowns and merge the new data |
|
541 // into it.. this way only 1 merge function is needed |
|
542 // for determining whether we should notify with this data. |
|
543 |
|
544 // This has to happen down in the collector as only the collector knows what an empty |
|
545 // record should look like (by adding technology specifics &c). |
|
546 cacheEntry = aSource.ConstructEmptyRecordLC(aNewData.Id()); |
|
547 |
|
548 aSource.MergeL(*cacheEntry, aNewData); |
|
549 iCache->AddL(cacheEntry); |
|
550 CleanupStack::Pop(cacheEntry); |
|
551 } |
|
552 |
|
553 if(aAllowNotify) |
|
554 { |
|
555 NotifyOtherCollectorsOfChangeL(aSource,*cacheEntry); |
|
556 } |
|
557 |
|
558 ReportIfReadyL(); |
|
559 |
|
560 CleanupStack::PopAndDestroy(comparator); |
|
561 } |
|
562 |
|
563 |
|
564 |
|
565 void CTierNotificationActivity::NotifyOtherCollectorsOfChangeL(CDataCollectorSession& aSource, CParameterSetContainer& aModifiedCacheEntry) |
|
566 { |
|
567 _TIER_LOG_2(_L8("CTierNotificationActivity %08x:\t" "NotifyOtherCollectorsOfChange()"),this); |
|
568 for(TInt i=0; i<iCollectorSessions.Count() ; ++i) |
|
569 { |
|
570 // don't go round in circles |
|
571 if(iCollectorSessions[i] != &aSource) |
|
572 { |
|
573 _TIER_LOG_3(_L8("\tNotifying collector session %d ( %08x ) of change"),i,iCollectorSessions[i]); |
|
574 iCollectorSessions[i]->CacheModifiedL(aModifiedCacheEntry); |
|
575 } |
|
576 } |
|
577 _TIER_LOG_PSC("After notifications: ",&aModifiedCacheEntry); |
|
578 } |
|
579 |
|
580 |
|
581 //#define COMPARE_BY_ID_IN_PARAMETER_SET |
|
582 |
|
583 /*virtual*/ TBool CAccessPointIdComparator::Matches(const CParameterSetContainer& aRhs) |
|
584 { |
|
585 #ifdef COMPARE_BY_ID_IN_PARAMETER_SET |
|
586 const XAccessPointGenericParameterSet* lapgps = XAccessPointGenericParameterSet::FindInParamSetContainer(iToCompare); |
|
587 const XAccessPointGenericParameterSet* rapgps = XAccessPointGenericParameterSet::FindInParamSetContainer(aRhs); |
|
588 return(lapgps && rapgps && lapgps->AccessPointInfo() == rapgps->AccessPointInfo()); |
|
589 #else |
|
590 return (iToCompare.Id() == aRhs.Id()); |
|
591 #endif |
|
592 } |
|
593 |
|
594 |
|
595 |
|
596 |
|
597 // Tier Notification Cache |
|
598 |
|
599 /*static*/ CTierNotificationCache* CTierNotificationCache::NewL() |
|
600 { |
|
601 CTierNotificationCache* inst = new(ELeave) CTierNotificationCache(); |
|
602 CleanupStack::PushL(inst); |
|
603 _TIER_LOG_2(_L8("CTierNotificationCache %08x:\t" "NewL()"), inst); |
|
604 inst->SetCacheBundle(CGenericParameterBundle::NewL()); |
|
605 CleanupStack::Pop(inst); |
|
606 return inst; |
|
607 } |
|
608 |
|
609 |
|
610 CTierNotificationCache::~CTierNotificationCache() |
|
611 { |
|
612 _TIER_LOG_2(_L8("CTierNotificationCache %08x:\t" "~CTierNotificationCache()"),this); |
|
613 delete iCacheBundle; |
|
614 } |
|
615 |
|
616 |
|
617 CParameterSetContainer* CTierNotificationCache::Find(MEqualityComparator& aFinder) |
|
618 { |
|
619 _TIER_LOG_2(_L8("CTierNotificationCache %08x:\t" "Find()"),this); |
|
620 CParameterSetContainer* entry; |
|
621 TInt i=0; |
|
622 while((entry = iCacheBundle->GetParamSetContainer(i++)) != NULL) |
|
623 { |
|
624 if(aFinder.Matches(*entry)) |
|
625 { |
|
626 return entry; |
|
627 } |
|
628 } |
|
629 return 0; |
|
630 } |
|
631 |
|
632 |
|
633 void CTierNotificationCache::AddL(CParameterSetContainer* aItemToAdd) |
|
634 { |
|
635 _TIER_LOG_PSC("Adding to cache: ",aItemToAdd); |
|
636 iCacheBundle->AddParamSetContainerL(*aItemToAdd); |
|
637 } |
|
638 |
|
639 |
|
640 |
|
641 // Data Collector Session |
|
642 |
|
643 |
|
644 |
|
645 EXPORT_C CDataCollectorSession::CDataCollectorSession(MDataCollector* aCollector, CTierNotificationActivity& aActivity, TInt aSessionId) |
|
646 : iActivity(aActivity) |
|
647 , iCollector(aCollector) |
|
648 , iSessionId(aSessionId) |
|
649 , iIsLocked(ETrue) |
|
650 { |
|
651 iCollector->SetReceiver(*this); |
|
652 } |
|
653 |
|
654 |
|
655 /*virtual*/ |
|
656 CDataCollectorSession::~CDataCollectorSession() |
|
657 { |
|
658 _TIER_LOG_2(_L8("CDataCollectorSession %08x:\t" "~CDataCollectorSession()"),this); |
|
659 delete iCollector; |
|
660 } |
|
661 |
|
662 |
|
663 /*virtual*/ |
|
664 void CDataCollectorSession::StartL() |
|
665 { |
|
666 _TIER_LOG_2(_L8("CDataCollectorSession %08x:\t" "StartL()"),this); |
|
667 iCollector->StartL(); |
|
668 } |
|
669 |
|
670 |
|
671 /*virtual*/ |
|
672 CParameterSetContainer* CDataCollectorSession::ConstructEmptyRecordLC(TUint32 aId /*=0*/) |
|
673 { |
|
674 _TIER_LOG_2(_L8("CDataCollectorSession %08x:\t" "ConstructEmptyRecordLC()"),this); |
|
675 return iCollector->ConstructEmptyRecordLC(aId); |
|
676 } |
|
677 |
|
678 /*virtual*/ |
|
679 void CDataCollectorSession::MergeL(CParameterSetContainer& aCacheData, CParameterSetContainer& aNewData) |
|
680 { |
|
681 _TIER_LOG_2(_L8("CDataCollectorSession %08x:\t" "MergeL()"),this); |
|
682 |
|
683 XNotificationCacheParameterSet* cacheParams = XNotificationCacheParameterSet::FindInParamSetContainer(aCacheData); |
|
684 if(!cacheParams) |
|
685 { |
|
686 cacheParams = XNotificationCacheParameterSet::NewL(aCacheData); |
|
687 } |
|
688 |
|
689 // PREPARE THE CACHE FLAGS |
|
690 |
|
691 // 1. InSet flag - whether the currently cached data matches the set of data to watch w.r.t. this data collector. |
|
692 // |
|
693 // We need to remember whether known data is currently in matching set for this data collector, and pass it into the merge fn. |
|
694 // This means doesn't have to run a "before" and "after" match of the query. |
|
695 TBool inSet(cacheParams->InSet(iSessionId)); |
|
696 |
|
697 // 2. ToReport flag - whether we should report this change to client |
|
698 // |
|
699 // The collector's MergeL function decides on the basis of the InSet flag and the new data whether it's appropriate to report |
|
700 // the new data to the client.. This is an output variable only.. but let's set it to EFalse to be sure it's not garbage. |
|
701 TBool toReport(EFalse); |
|
702 |
|
703 _TIER_LOG_PSC("cache before merge: ",&aCacheData); |
|
704 _TIER_LOG_PSC("new data: ",&aNewData); |
|
705 |
|
706 iCollector->MergeL(aCacheData,aNewData,inSet,toReport); |
|
707 |
|
708 cacheParams->InSet(iSessionId,inSet); |
|
709 cacheParams->ToReport(iSessionId,toReport); |
|
710 |
|
711 cacheParams->SetRunning(iSessionId); // we need to match this collector |
|
712 cacheParams->SetReceived(iSessionId); // and we just received data for it |
|
713 |
|
714 _TIER_LOG_PSC("cache after merge: ",&aCacheData); |
|
715 } |
|
716 |
|
717 |
|
718 // Called when cache is modified.. to possibly kickstart further discovery. |
|
719 // be VERY careful if this leads to calling DataReceivedL as potentially |
|
720 // this could result in an infinite recursion |
|
721 // (DataReceived->AddToCache->NotifyOtherCollectors->CacheModified->DataReceived->....) |
|
722 /*virtual*/ |
|
723 void CDataCollectorSession::CacheModifiedL(CParameterSetContainer& aModifiedEntry) |
|
724 { |
|
725 _TIER_LOG_2(_L8("CDataCollectorSession %08x:\t" "CacheModifiedL()"),this); |
|
726 TBool isRunning=EFalse; // collector sets this if it decides the notification |
|
727 // represents an AP it is collecting information about.. |
|
728 iCollector->CacheModifiedL(aModifiedEntry,isRunning); |
|
729 if(isRunning) |
|
730 { |
|
731 XNotificationCacheParameterSet* cacheParams = XNotificationCacheParameterSet::FindInParamSetContainer(aModifiedEntry); |
|
732 __ASSERT_DEBUG(cacheParams, User::Panic(KSpecAssert_ESockCrPrvTNotAC, 7)); |
|
733 // this collector is active in deciding that the AP is ready to report |
|
734 // i.e. it is collecting data about this AP and it won't unlock til |
|
735 // it has received data |
|
736 cacheParams->SetRunning(iSessionId); |
|
737 } |
|
738 } |
|
739 |
|
740 |
|
741 |
|
742 // From MCollectedDataReceiver : called by data collector. |
|
743 // Takes ownership of aNewData and deletes it when finished |
|
744 /*virtual*/ |
|
745 void CDataCollectorSession::DataReceivedL(CParameterSetContainer* aNewData, TBool aAllowNotify) |
|
746 { |
|
747 _TIER_LOG_2(_L8("CDataCollectorSession %08x:\t" "DataReceivedL()"),this); |
|
748 // take ownership straight away |
|
749 CleanupStack::PushL(aNewData); |
|
750 iActivity.DataReceivedL(*this,*aNewData,aAllowNotify); |
|
751 CleanupStack::PopAndDestroy(aNewData); |
|
752 } |
|
753 |
|
754 // From MCollectedDataReceiver : called by data collector |
|
755 /*virtual*/ |
|
756 void CDataCollectorSession::Lock() |
|
757 { |
|
758 _TIER_LOG_2(_L8("CDataCollectorSession %08x:\t" "Lock()"),this); |
|
759 iIsLocked = ETrue; |
|
760 } |
|
761 |
|
762 // From MCollectedDataReceiver : called by data collector |
|
763 /*virtual*/ |
|
764 void CDataCollectorSession::Unlock() |
|
765 { |
|
766 _TIER_LOG_2(_L8("CDataCollectorSession %08x:\t" "Unlock()"),this); |
|
767 iIsLocked = EFalse; |
|
768 |
|
769 // release notifications if necessary |
|
770 TRAP_IGNORE(iActivity.ReportIfReadyL()); |
|
771 } |
|
772 |
|
773 // From MCollectedDataReceiver : called by data collector |
|
774 /*virtual*/ |
|
775 TBool CDataCollectorSession::IsLocked() const |
|
776 { |
|
777 _TIER_LOG_2(_L8("CDataCollectorSession %08x:\t" "IsLocked()"),this); |
|
778 return iIsLocked; |
|
779 } |
|
780 |
|
781 |
|
782 // From MCollectedDataReceiver : called by data collector |
|
783 /*virtual*/ |
|
784 void CDataCollectorSession::ShutdownFinished() |
|
785 { |
|
786 _TIER_LOG_2(_L8("CDataCollectorSession %08x:\t" "ShutdownFinished()"),this); |
|
787 |
|
788 delete iCollector; |
|
789 iCollector=0; |
|
790 |
|
791 iActivity.ShutdownFinished(); |
|
792 } |
|
793 |
|
794 // From MCollectedDataReceiver : called by data collector when it wants to do |
|
795 // its own cleanup and destruction |
|
796 /*virtual*/ |
|
797 void CDataCollectorSession::Detach() |
|
798 { |
|
799 _TIER_LOG_2(_L8("CDataCollectorSession %08x:\t" "Detach()"),this); |
|
800 |
|
801 iCollector=0; |
|
802 |
|
803 iActivity.ShutdownFinished(); |
|
804 } |
|
805 |
|
806 |
|
807 void CDataCollectorSession::Error(TInt aErr) |
|
808 { |
|
809 _TIER_LOG_2(_L8("CDataCollectorSession %08x:\t" "Error()"),this); |
|
810 |
|
811 iActivity.InitiateShutdown(aErr); |
|
812 } |
|
813 |
|
814 |
|
815 |
|
816 /*virtual*/ |
|
817 TBool CDataCollectorSession::HasShutDown() |
|
818 { |
|
819 return iCollector?ETrue:EFalse; |
|
820 } |
|
821 |
|
822 |
|
823 /*virtual*/ |
|
824 void CDataCollectorSession::Shutdown() |
|
825 { |
|
826 _TIER_LOG_2(_L8("CDataCollectorSession %08x:\t" "Shutdown()"),this); |
|
827 iCollector->Shutdown(); |
|
828 } |
|
829 |