|
1 // Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // Core CPR Activities |
|
15 // THIS API IS INTERNAL TO NETWORKING AND IS SUBJECT TO CHANGE AND NOT FOR EXTERNAL USE |
|
16 // |
|
17 // |
|
18 |
|
19 /** |
|
20 @file |
|
21 @internalComponent |
|
22 */ |
|
23 |
|
24 #define SYMBIAN_NETWORKING_UPS |
|
25 |
|
26 #include "corecpractivities.h" |
|
27 |
|
28 #include <comms-infras/ss_log.h> |
|
29 #include <comms-infras/corescpractivities.h> |
|
30 #include <comms-infras/ss_nodemessages_dataclient.h> |
|
31 #include <comms-infras/ss_nodemessages_mcpr.h> |
|
32 #include "ss_internal_activities.h" |
|
33 #ifdef SYMBIAN_NETWORKING_UPS |
|
34 #include <comms-infras/upsmessages.h> |
|
35 #include <comms-infras/upsprstates.h> |
|
36 #endif |
|
37 |
|
38 using namespace ESock; |
|
39 using namespace CprActivities; |
|
40 using namespace NetStateMachine; |
|
41 using namespace PRActivities; |
|
42 using namespace CoreNetStates; |
|
43 using namespace MeshMachine; |
|
44 using namespace Messages; |
|
45 |
|
46 |
|
47 |
|
48 //-========================================================= |
|
49 // |
|
50 // Activities |
|
51 // |
|
52 //-========================================================= |
|
53 |
|
54 namespace CprControlClientJoinActivity |
|
55 { |
|
56 DECLARE_DEFINE_CUSTOM_NODEACTIVITY(ECFActivityClientJoin, CprControlClientJoin, TNodeSignal::TNullMessageId, CControlClientJoinActivity::NewL) |
|
57 FIRST_NODEACTIVITY_ENTRY(CoreNetStates::TAwaitingControlClientJoin, CControlClientJoinActivity::TAddClientOrUpdatePriority) |
|
58 THROUGH_NODEACTIVITY_ENTRY(CprStates::KUpdatePriority, MeshMachine::TDoNothing, CControlClientJoinActivity::TNoTagOrSendPriorityToCtrlProvider) |
|
59 THROUGH_NODEACTIVITY_ENTRY(CprStates::KAddClient, CprStates::TAddControlClient, CControlClientJoinActivity::TNoTagOrSendPriorityToCtrlProvider) |
|
60 NODEACTIVITY_ENTRY(CprStates::KSendPriorityToCtrlProvider, CControlClientJoinActivity::TUpdatePriorityForControlProvider, CoreStates::TAwaitingJoinComplete, CControlClientJoinActivity::TNoTagOrSendPriorityToServProvider) |
|
61 NODEACTIVITY_ENTRY(CprStates::KSendPriorityToServProvider, CControlClientJoinActivity::TUpdatePriorityForServiceProviders, CoreStates::TAwaitingJoinComplete, MeshMachine::TNoTag) |
|
62 THROUGH_NODEACTIVITY_ENTRY(KNoTag, CControlClientJoinActivity::TSendJoinCompleteIfRequest, CControlClientJoinActivity::TNoTagOrSendActive) |
|
63 LAST_NODEACTIVITY_ENTRY(KNoTag, MeshMachine::TDoNothing) |
|
64 LAST_NODEACTIVITY_ENTRY(CprStates::KSendActive, CoreNetStates::TSendDataClientActive) |
|
65 NODEACTIVITY_END() |
|
66 } |
|
67 |
|
68 namespace CprNoBearerActivity |
|
69 { |
|
70 // Note that there is an issue here with multiple NoBearer messages being received in rapid succession which |
|
71 // results in several of these activities running in parallel. This will not be resolved by the |
|
72 // CoreNetStates::TNoTagOrBearerPresent due to the almost simultaneous reception of the messages. This |
|
73 // could possibly be resolved with a custom mutex that checks for a ServiceProvider() as part of its |
|
74 // IsBlocked() check. |
|
75 |
|
76 DECLARE_DEFINE_CUSTOM_NODEACTIVITY(ECFActivityNoBearer, CprNoBearer, TCFControlProvider::TNoBearer, PRActivities::CNoBearer::NewL) |
|
77 FIRST_NODEACTIVITY_ENTRY(CoreNetStates::TAwaitingNoBearer, MeshMachine::TNoTag) |
|
78 THROUGH_NODEACTIVITY_ENTRY(KNoTag, PRActivities::CNoBearer::TStoreRequestParameters, CNoBearer::TNoTagOrBearerPresentBlockedByNoBearer) |
|
79 NODEACTIVITY_ENTRY(KNoTag, CoreNetStates::TSendNoBearer, MeshMachine::TAwaitingMessageState<TCFControlProvider::TBearer>, CoreNetStates::TNoTagOrBearerPresentOrErrorTag) |
|
80 |
|
81 NODEACTIVITY_ENTRY(CoreNetStates::KBearerPresent, PRActivities::CNoBearer::TRequestCommsBinderRetry, CoreNetStates::TAwaitingBinderResponse, MeshMachine::TTag<CoreNetStates::KBearerPresent>) |
|
82 NODEACTIVITY_ENTRY(CoreNetStates::KBearerPresent, CoreNetStates::TSendBindTo, CoreNetStates::TAwaitingBindToComplete, MeshMachine::TTag<CoreNetStates::KBearerPresent>) |
|
83 THROUGH_NODEACTIVITY_ENTRY(CoreNetStates::KBearerPresent, CoreActivities::ABindingActivity::TSendBindToComplete, CNoBearer::TNoTagOrBearerPresentForAutostart) |
|
84 |
|
85 //Autostart on NoBearer. |
|
86 //The philosphy here is that if the local node doesn't have a control client, then there's noone |
|
87 //that could posibly start it. It will hence decide to autostart as the top layer of what looks |
|
88 //like an implicit connection. In the future this autostart behaviour should become a specialty |
|
89 //of someone more concrete (rather than generic function). We are speculating about the implicit |
|
90 //top layer that could acquire this function if it ever comes into being. |
|
91 NODEACTIVITY_ENTRY(CoreNetStates::KBearerPresent, CoreNetStates::TStartServiceProviderRetry, CoreNetStates::TAwaitingStarted, MeshMachine::TNoTag) |
|
92 |
|
93 LAST_NODEACTIVITY_ENTRY(KNoTag, CoreNetStates::TSendBearer) |
|
94 LAST_NODEACTIVITY_ENTRY(KErrorTag, MeshMachine::TDoNothing) |
|
95 NODEACTIVITY_END() |
|
96 } |
|
97 |
|
98 namespace CprBindToActivity |
|
99 { |
|
100 DECLARE_DEFINE_CUSTOM_NODEACTIVITY(ECFActivityBindTo, CprBindTo, TCFDataClient::TBindTo, CCprBindToActivity::NewL) |
|
101 FIRST_NODEACTIVITY_ENTRY(CoreNetStates::TAwaitingBindTo, CCprBindToActivity::TNoTagOrBearerReady) |
|
102 //TBindTo can hold: |
|
103 //[KNoTag] - a valid serviceProvider cookie that this node isn't bound to; |
|
104 //[KBearerReady] - a valid serviceProvider cookie that this node is already bound to; |
|
105 //[KBearerReady] - a NULL serviceProvider (this node is at the stack's bottom); |
|
106 |
|
107 //{ JOINING NEW SERVICE PROVIDER |
|
108 //a valid serviceProvider supplied, new to this node, let's join it; |
|
109 NODEACTIVITY_ENTRY(KNoTag, CCprBindToActivity::TSendControlClientJoinRequestWithPriority, CoreStates::TAwaitingJoinComplete, TTag<KBearerReady>) |
|
110 //} |
|
111 |
|
112 //serviceProvider provisionally joined. Now the activity needs to construct the configuration access points |
|
113 THROUGH_NODEACTIVITY_ENTRY(KDataClientReady, MeshMachine::TDoNothing, CprStates::TCreateAdditionalDataClientOrDataClientReady) |
|
114 NODEACTIVITY_ENTRY(CprStates::KCreateAdditionalDataClient, CprStates::TCreateAdditionalDataClient, CoreNetStates::TAwaitingDataClientJoin, MeshMachine::TTag<CprStates::KCreatingAdditionalDataClient>) |
|
115 NODEACTIVITY_ENTRY(CprStates::KCreatingAdditionalDataClient, CprStates::TProcessAdditionalDataClientCreationAndBindToPrimary, CoreNetStates::TAwaitingBindToComplete, CprStates::TCreateAdditionalDataClientBackwardOrDataClientReady) |
|
116 |
|
117 //Now the activity needs to propagate iteslf (TBindTo) to its dataclients. |
|
118 //The dataclients are either present or not. If not this activity will assume this is the layer construction phase |
|
119 //and will attempt to construct a default dataclient. |
|
120 THROUGH_NODEACTIVITY_ENTRY(KBearerReady, MeshMachine::TDoNothing, CCprBindToActivity::TNoTagOrDataClientReady) |
|
121 |
|
122 //{ DATA CLIENT CREATION |
|
123 //No dataclients present, assume this is the layer creation phase. Attempt to create a dataclient. |
|
124 NODEACTIVITY_ENTRY(KNoTag, CCprBindToActivity::TCreateDataClient, TAcceptErrorState<CoreNetStates::TAwaitingDataClientJoin>, MeshMachine::TErrorTagOr<CCprBindToActivity::TNoTagOrBindToComplete>) |
|
125 //BindTo activity is the pre-start layer builder, hence it always requests the dataclient from the factory. |
|
126 //The factory (being aware of the phase) may decide to: |
|
127 //1. create a new dataclient -> process dataclient creation [KNoTag] |
|
128 //2. return a preexisting dataclient -> bind the client [KDataClientReady] |
|
129 //3. not to create a dataclient -> send TBindToComplete to the originator [KBindToComplete] |
|
130 THROUGH_NODEACTIVITY_ENTRY(KNoTag, PRStates::TProcessDataClientCreation, TTag<KDataClientReady>) |
|
131 //} |
|
132 |
|
133 THROUGH_NODEACTIVITY_ENTRY(KDataClientReady, MeshMachine::TDoNothing, CCprBindToActivity::TNoTagOrBearerReadyOrBindToComplete) |
|
134 //{ BINDING DATACLIENTS LOOP |
|
135 //Dataclient(s) is/are ready. Depending on whether the node has the lower layer or not, |
|
136 //we will [KNoTag] or will not [KNoBearer] need to request a binder for the dataclient. |
|
137 |
|
138 //{SERVICE PROVIDER PRESENT |
|
139 NODEACTIVITY_ENTRY(KNoTag, CCprBindToActivity::TRequestCommsBinder, TAcceptErrorState<CoreNetStates::TAwaitingBinderResponse>, TErrorTagOr<TTag<KBearerReady> >) |
|
140 //} |
|
141 NODEACTIVITY_ENTRY(KBearerReady, CCprBindToActivity::TSendBindTo, CCprBindToActivity::TAwaitingBindToCompleteOrError, |
|
142 TErrorTagOr<TTag<KDataClientReady | NetStateMachine::EBackward> >) |
|
143 //} |
|
144 |
|
145 //Binding is finished. If this is not autocommit (see TCFDataClient::TBindTo), the activity will reply TCFDataClient::TBindToComplete |
|
146 //to the sender await for the confirmation (TCFDataClient::TCommitBindTo) or cancelation (TBase::TCancel) from the sender. |
|
147 //If this is autommit, the activity will skip awaiting for TCFDataClient::TCommitBindTo and commit itself. |
|
148 THROUGH_NODEACTIVITY_ENTRY(KBindToComplete, CCprBindToActivity::TSendBindToComplete, CCprBindToActivity::TNoTagOrCommit) |
|
149 NODEACTIVITY_ENTRY(KNoTag, MeshMachine::TDoNothing, MeshMachine::TAwaitingMessageState<TCFDataClient::TCommitBindTo>, TErrorTagOr<TTag<CoreStates::KCommit> >) |
|
150 |
|
151 //commiting (either implicit or explicit). |
|
152 NODEACTIVITY_ENTRY(CoreStates::KCommit, CCprBindToActivity::TCommit, MeshMachine::TAwaitingLeaveComplete, MeshMachine::TNoTag) |
|
153 |
|
154 //This is not autocommit and the sender has just explicitly cancelled. Alternativelly this is an error path. |
|
155 //Cancelling/processing error entiles sending TCancel to all dataclients awaiting confirmation |
|
156 //as well as it entiles leaving the new service provider. |
|
157 NODEACTIVITY_ENTRY(KErrorTag, CCprBindToActivity::TCancel, MeshMachine::TAwaitingLeaveComplete, MeshMachine::TNoTag) |
|
158 |
|
159 LAST_NODEACTIVITY_ENTRY(KNoTag, MeshMachine::TDoNothing) |
|
160 NODEACTIVITY_END() |
|
161 } |
|
162 |
|
163 namespace CprBinderRequestActivity |
|
164 { |
|
165 DECLARE_DEFINE_CUSTOM_NODEACTIVITY(ECFActivityBinderRequest, CprBinderRequest, TCFServiceProvider::TCommsBinderRequest, CCommsBinderActivity::NewL) |
|
166 FIRST_NODEACTIVITY_ENTRY(CoreNetStates::TAwaitingBinderRequest, CCommsBinderActivity::TNoTagOrWaitForIncomingOrUseExistingDefaultBlockedByBinderRequest) |
|
167 NODEACTIVITY_ENTRY(KNoTag, PRStates::TCreateDataClient, CoreNetStates::TAwaitingDataClientJoin, MeshMachine::TNoTag) |
|
168 |
|
169 // Below this point we need to modify the error handling approach. If we're getting a TError on TBinderResponse, |
|
170 // this means the client requesting the binder couldn't bind to it. As far as the client is concerned, this |
|
171 // activity is finished (it has flagged an error). The standard error handling will result in erroring |
|
172 // the originator. In this case we shouoldn't error the originator, instead, wrap up quietly. |
|
173 THROUGH_NODEACTIVITY_ENTRY(KNoTag, CCommsBinderActivity::TProcessDataClientCreation, MeshMachine::TTag<CoreStates::KUseExisting>) |
|
174 |
|
175 NODEACTIVITY_ENTRY(CoreStates::KUseExisting, CCommsBinderActivity::TSendBinderResponse, CCommsBinderActivity::TAwaitingBindToComplete, MeshMachine::TNoTagOrErrorTag) |
|
176 LAST_NODEACTIVITY_ENTRY(KNoTag, MeshMachine::TDoNothing) |
|
177 |
|
178 LAST_NODEACTIVITY_ENTRY(KErrorTag, MeshMachine::TClearError) |
|
179 LAST_NODEACTIVITY_ENTRY(CoreNetStates::KWaitForIncoming, MeshMachine::TRaiseError<KErrNotSupported>) |
|
180 NODEACTIVITY_END() |
|
181 } |
|
182 |
|
183 namespace CprDataClientStartActivity |
|
184 { |
|
185 DECLARE_DEFINE_CUSTOM_NODEACTIVITY(ECFActivityStartDataClient, CprDataClientStart, TCFDataClient::TStart, CDataClientStartActivity::NewL) |
|
186 FIRST_NODEACTIVITY_ENTRY(CoreNetStates::TAwaitingDataClientStart, CoreNetStates::TNoTagOrNoDataClients) |
|
187 NODEACTIVITY_ENTRY(KNoTag, CprDataClientStartActivity::TStartDataClient, MeshMachine::TAcceptErrorState<CoreNetStates::TAwaitingDataClientStarted>, MeshMachine::TErrorTagOr<CprDataClientStartActivity::TNoTagOrNoTagBackward>) |
|
188 LAST_NODEACTIVITY_ENTRY(KNoTag, PRStates::TSendDataClientStarted) |
|
189 LAST_NODEACTIVITY_ENTRY(CoreNetStates::KNoDataClients, PRStates::TSendDataClientStarted) |
|
190 |
|
191 NODEACTIVITY_ENTRY(KErrorTag, CoreNetStates::TStopSelf, CoreNetStates::TAwaitingDataClientStopped, MeshMachine::TErrorTag) |
|
192 LAST_NODEACTIVITY_ENTRY(KErrorTag, MeshMachine::TDoNothing) |
|
193 NODEACTIVITY_END() |
|
194 } |
|
195 |
|
196 namespace CprDataClientIdleActivity |
|
197 { |
|
198 DECLARE_DEFINE_NODEACTIVITY(ECFActivityDataClientIdle, CprDataClientIdle, TCFControlProvider::TIdle) |
|
199 NODEACTIVITY_ENTRY(KNoTag, CprStates::THandleDataClientIdle, CoreNetStates::TAwaitingDataClientIdle, MeshMachine::TNoTag) |
|
200 NODEACTIVITY_END() |
|
201 } |
|
202 |
|
203 namespace CprClientLeaveActivity |
|
204 { |
|
205 DECLARE_DEFINE_CUSTOM_NODEACTIVITY(ECFActivityClientLeave, CprClientLeave, TNodeSignal::TNullMessageId, CClientLeaveActivity::NewL) |
|
206 FIRST_NODEACTIVITY_ENTRY(CoreStates::TAwaitingClientLeave, MeshMachine::TNoTag) |
|
207 THROUGH_NODEACTIVITY_ENTRY(KNoTag, CClientLeaveActivity::TRemoveClientAndDestroyOrphanedDataClients, CClientLeaveActivity::TNoTagOrSendPriorityToCtrlProvider) |
|
208 NODEACTIVITY_ENTRY(CprStates::KSendPriorityToCtrlProvider, CClientLeaveActivity::TUpdatePriorityForControlProvider, CoreStates::TAwaitingJoinComplete, CClientLeaveActivity::TNoTagOrSendPriorityToServProvider) |
|
209 NODEACTIVITY_ENTRY(CprStates::KSendPriorityToServProvider, CClientLeaveActivity::TUpdatePriorityForServiceProviders, CoreStates::TAwaitingJoinComplete, MeshMachine::TNoTag) |
|
210 THROUGH_NODEACTIVITY_ENTRY(KNoTag, CClientLeaveActivity::TSendLeaveCompleteAndSendDataClientIdleIfNeeded, MeshMachine::TNoTag) |
|
211 LAST_NODEACTIVITY_ENTRY(KNoTag, CprStates::TSendDataClientStatusStoppedIfNoControlClient) |
|
212 NODEACTIVITY_END() |
|
213 } |
|
214 |
|
215 namespace CprDataClientGoneDownActivity |
|
216 { |
|
217 DECLARE_DEFINE_NODEACTIVITY(ECFActivityDataClientGoneDown, CprDataClientGoneDown, TCFControlProvider::TDataClientGoneDown) |
|
218 // The only thing we do is to clear(unset) the "Flags" of |
|
219 // the relative DataClient from "EStart": this is done |
|
220 // in "TAwaitingDataClientGoneDown". |
|
221 FIRST_NODEACTIVITY_ENTRY(CoreNetStates::TAwaitingDataClientGoneDown, MeshMachine::TNoTag) |
|
222 LAST_NODEACTIVITY_ENTRY(KNoTag, MeshMachine::TDoNothing) |
|
223 NODEACTIVITY_END() |
|
224 } |
|
225 |
|
226 namespace CprPolicyCheckRequestActivity |
|
227 { |
|
228 #ifdef SYMBIAN_NETWORKING_UPS |
|
229 DECLARE_DEFINE_CUSTOM_NODEACTIVITY(ECFActivityPolicyCheckRequest, CPrPolicyCheckRequest, UpsMessage::TPolicyCheckRequest, CNodeParallelActivityBase::NewL) |
|
230 // TODO: decide whether we should move some of these transitions/forks to the UpsCoreProviders |
|
231 FIRST_NODEACTIVITY_ENTRY(UpsStates::TAwaitingPolicyCheckRequest, MeshMachine::TNoTag) |
|
232 LAST_NODEACTIVITY_ENTRY(KNoTag, CprStates::TPostPolicyCheckResponseToOriginators) |
|
233 NODEACTIVITY_END() |
|
234 #endif |
|
235 } |
|
236 |
|
237 namespace CprActivities |
|
238 { |
|
239 DEFINE_EXPORT_ACTIVITY_MAP(coreCprActivities) |
|
240 ACTIVITY_MAP_ENTRY(CprControlClientJoinActivity, CprControlClientJoin) |
|
241 ACTIVITY_MAP_ENTRY(CprNoBearerActivity, CprNoBearer) |
|
242 ACTIVITY_MAP_ENTRY(CprBinderRequestActivity, CprBinderRequest) |
|
243 ACTIVITY_MAP_ENTRY(CprBindToActivity, CprBindTo) |
|
244 ACTIVITY_MAP_ENTRY(CprDataClientStartActivity, CprDataClientStart) |
|
245 ACTIVITY_MAP_ENTRY(CprDataClientIdleActivity, CprDataClientIdle) |
|
246 ACTIVITY_MAP_ENTRY(CprClientLeaveActivity, CprClientLeave) |
|
247 ACTIVITY_MAP_ENTRY(CprDataClientGoneDownActivity, CprDataClientGoneDown) |
|
248 #ifdef SYMBIAN_NETWORKING_UPS |
|
249 ACTIVITY_MAP_ENTRY(CprPolicyCheckRequestActivity, CPrPolicyCheckRequest) |
|
250 #endif |
|
251 ACTIVITY_MAP_END_BASE(PRActivities, coreActivitiesCpr) |
|
252 } |
|
253 |
|
254 namespace CprDataClientStartActivity |
|
255 { |
|
256 EXPORT_C CDataClientStartActivity::CDataClientStartActivity(const MeshMachine::TNodeActivity& aActivitySig, MeshMachine::AMMNodeBase& aNode) |
|
257 : MeshMachine::CNodeActivityBase(aActivitySig, aNode), iClientIter(aNode.GetClientIter<TDefaultClientMatchPolicy>(TClientType(TCFClientType::EData))) |
|
258 {} |
|
259 } |
|
260 |
|
261 namespace CprPriorityUpdateActivity |
|
262 { |
|
263 CPriorityUpdateActivity::CPriorityUpdateActivity(const MeshMachine::TNodeActivity& aActivitySig, MeshMachine::AMMNodeBase& aNode, TUint aActivitiesCount) |
|
264 : MeshMachine::CNodeParallelActivityBase(aActivitySig, aNode, aActivitiesCount) |
|
265 { |
|
266 } |
|
267 } |
|
268 |
|
269 namespace CprControlClientJoinActivity |
|
270 { |
|
271 EXPORT_C MeshMachine::CNodeActivityBase* CControlClientJoinActivity::NewL( const MeshMachine::TNodeActivity& aActivitySig, MeshMachine::AMMNodeBase& aNode ) |
|
272 { |
|
273 TUint c = GetNextActivityCountL(aActivitySig,aNode); |
|
274 return new(ELeave)CControlClientJoinActivity(aActivitySig, aNode, c); |
|
275 } |
|
276 |
|
277 CControlClientJoinActivity::CControlClientJoinActivity(const MeshMachine::TNodeActivity& aActivitySig, MeshMachine::AMMNodeBase& aNode, TUint aActivitiesCount) |
|
278 : CprPriorityUpdateActivity::CPriorityUpdateActivity(aActivitySig, aNode, aActivitiesCount) |
|
279 { |
|
280 } |
|
281 |
|
282 EXPORT_C CControlClientJoinActivity::~CControlClientJoinActivity() |
|
283 { |
|
284 } |
|
285 } |
|
286 |
|
287 namespace CprClientLeaveActivity |
|
288 { |
|
289 EXPORT_C MeshMachine::CNodeActivityBase* CClientLeaveActivity::NewL( const MeshMachine::TNodeActivity& aActivitySig, MeshMachine::AMMNodeBase& aNode ) |
|
290 { |
|
291 TUint c = GetNextActivityCountL(aActivitySig,aNode); |
|
292 return new(ELeave)CClientLeaveActivity(aActivitySig, aNode, c); |
|
293 } |
|
294 |
|
295 CClientLeaveActivity::CClientLeaveActivity(const MeshMachine::TNodeActivity& aActivitySig, MeshMachine::AMMNodeBase& aNode, TUint aActivitiesCount) |
|
296 : CprPriorityUpdateActivity::CPriorityUpdateActivity(aActivitySig, aNode, aActivitiesCount) |
|
297 { |
|
298 } |
|
299 |
|
300 EXPORT_C CClientLeaveActivity::~CClientLeaveActivity() |
|
301 { |
|
302 } |
|
303 } |
|
304 |
|
305 namespace CprBindToActivity |
|
306 { |
|
307 CCprBindToActivity::CCprBindToActivity(const MeshMachine::TNodeActivity& aActivitySig, MeshMachine::AMMNodeBase& aNode, TInt aNextActivityCount) |
|
308 : CBindToActivity(aActivitySig, aNode, aNextActivityCount) |
|
309 { |
|
310 } |
|
311 } |
|
312 |
|
313 |