|
1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\kernel\sutils.cpp |
|
15 // |
|
16 // |
|
17 |
|
18 #include <kernel/kern_priv.h> |
|
19 #include "execs.h" |
|
20 #include <e32panic.h> |
|
21 _LIT(KLitDfcThread,"DfcThread"); |
|
22 |
|
23 extern const SNThreadHandlers EpocThreadHandlers; |
|
24 |
|
25 |
|
26 |
|
27 /** |
|
28 Adds a HAL entry handling function for the specified group of HAL entries. |
|
29 |
|
30 @param aId The HAL group attribute that this function handles, as defined by |
|
31 one of the THalFunctionGroup enumerators. |
|
32 @param aFunc Pointer to the handler function |
|
33 @param aPtr Pointer which is passed to the handler function when it is |
|
34 called. This is usually a pointer to an object which handles |
|
35 the HAL attribute. |
|
36 |
|
37 @return KErrNone, if successful; KErrArgument if aId is EHalGroupKernel, EHalGroupVariant or EHalGroupPower, |
|
38 or aId is greater than or equal to KMaxHalGroups; KErrInUse, if a handler is already registered. |
|
39 |
|
40 @pre Interrupts must be enabled. |
|
41 @pre Kernel must be unlocked. |
|
42 @pre No fast mutex can be held. |
|
43 @pre Call in a thread context. |
|
44 @pre Suitable for use in a device driver. |
|
45 |
|
46 @see THalFunctionGroup |
|
47 @see KMaxHalGroups |
|
48 */ |
|
49 EXPORT_C TInt Kern::AddHalEntry(TInt aId, THalFunc aFunc, TAny* aPtr) |
|
50 { |
|
51 return Kern::AddHalEntry(aId, aFunc, aPtr, 0); |
|
52 } |
|
53 |
|
54 /** |
|
55 Adds a HAL entry handling function for the specified group of HAL entries. |
|
56 |
|
57 @param aId The HAL group attribute that this function handles, as defined by |
|
58 one of the THalFunctionGroup enumerators. |
|
59 @param aFunc Pointer to the handler function |
|
60 @param aPtr Pointer which is passed to the handler function when it is |
|
61 called. This is usually a pointer to an object which handles |
|
62 the HAL attribute. |
|
63 @param aDeviceNumber |
|
64 The device number (eg. screen number). |
|
65 |
|
66 @return KErrNone, if successful; KErrArgument if aId is EHalGroupKernel, EHalGroupVariant or EHalGroupPower, |
|
67 or aId is greater than or equal to KMaxHalGroups; KErrInUse, if a handler is already registered. |
|
68 |
|
69 @pre Calling thread must be in a critical section |
|
70 @pre Interrupts must be enabled. |
|
71 @pre Kernel must be unlocked. |
|
72 @pre No fast mutex can be held. |
|
73 @pre Call in a thread context. |
|
74 @pre Suitable for use in a device driver. |
|
75 |
|
76 @see THalFunctionGroup |
|
77 @see KMaxHalGroups |
|
78 */ |
|
79 EXPORT_C TInt Kern::AddHalEntry(TInt aId, THalFunc aFunc, TAny* aPtr, TInt aDeviceNumber) |
|
80 { |
|
81 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::AddHalEntry(TInt aId, THalFunc aFunc, TAny* aPtr, TInt aDeviceNumber)"); |
|
82 __KTRACE_OPT(KEXTENSION,Kern::Printf("Kern::AddHalEntry %d %08x %08x",aId,aFunc,aPtr)); |
|
83 if (aId==(TInt)EHalGroupKernel || aId==(TInt)EHalGroupVariant || aId==(TInt)EHalGroupPower || aId>=KMaxHalGroups || (TUint)aDeviceNumber>=(TUint)KMaxHalEntries) |
|
84 return KErrArgument; |
|
85 TInt r=KErrInUse; |
|
86 if (aDeviceNumber>0) |
|
87 { |
|
88 TBool delete_entry = EFalse; |
|
89 NKern::LockSystem(); |
|
90 SHalEntry2* p = &K::HalEntryArray[aId]; |
|
91 SHalEntry* extended_entry = p->iExtendedEntries; |
|
92 if(!extended_entry) |
|
93 { |
|
94 NKern::UnlockSystem(); |
|
95 extended_entry = (SHalEntry*)Kern::AllocZ((KMaxHalEntries-1)*sizeof(SHalEntry)); |
|
96 if(!extended_entry) |
|
97 return KErrNoMemory; |
|
98 NKern::LockSystem(); |
|
99 if(!p->iExtendedEntries) |
|
100 p->iExtendedEntries = extended_entry; |
|
101 else |
|
102 delete_entry = ETrue; |
|
103 } |
|
104 if(!extended_entry[aDeviceNumber-1].iFunction) |
|
105 { |
|
106 extended_entry[aDeviceNumber-1].iFunction = aFunc; |
|
107 extended_entry[aDeviceNumber-1].iPtr = aPtr; |
|
108 r = KErrNone; |
|
109 } |
|
110 NKern::UnlockSystem(); |
|
111 if(delete_entry) |
|
112 Kern::Free(extended_entry); |
|
113 } |
|
114 else |
|
115 { |
|
116 NKern::LockSystem(); |
|
117 SHalEntry2& e=K::HalEntryArray[aId]; |
|
118 if (!e.iFunction) |
|
119 { |
|
120 e.iFunction=aFunc; |
|
121 e.iPtr=aPtr; |
|
122 r=KErrNone; |
|
123 } |
|
124 NKern::UnlockSystem(); |
|
125 } |
|
126 __KTRACE_OPT(KEXTENSION,Kern::Printf("Kern::AddHalEntry returns %d",r)); |
|
127 return r; |
|
128 } |
|
129 |
|
130 |
|
131 |
|
132 /** |
|
133 Removes a HAL entry handling function for the specified group of HAL entries. |
|
134 |
|
135 @param aId The HAL group attribute, as defined by one of the THalFunctionGroup |
|
136 enumerators, for which the handler function is to be removed. |
|
137 |
|
138 @return KErrNone, if successful; KErrArgument if aId is EHalGroupKernel, |
|
139 EHalGroupVariant or EHalGroupMedia, or aId is greater than |
|
140 or equal KMaxHalGroups. |
|
141 |
|
142 @pre Interrupts must be enabled. |
|
143 @pre Kernel must be unlocked. |
|
144 @pre No fast mutex can be held. |
|
145 @pre Call in a thread context. |
|
146 @pre Can be used in a device driver. |
|
147 |
|
148 @see THalFunctionGroup |
|
149 @see KMaxHalGroups |
|
150 */ |
|
151 EXPORT_C TInt Kern::RemoveHalEntry(TInt aId) |
|
152 { |
|
153 return Kern::RemoveHalEntry(aId,0); |
|
154 } |
|
155 |
|
156 /** |
|
157 Removes a HAL entry handling function for the specified group of HAL entries. |
|
158 |
|
159 @param aId The HAL group attribute, as defined by one of the THalFunctionGroup |
|
160 enumerators, for which the handler function is to be removed. |
|
161 @param aDeviceNumber The device number (eg. screen number) |
|
162 |
|
163 @return KErrNone, if successful; KErrArgument if aId is EHalGroupKernel, |
|
164 EHalGroupVariant or EHalGroupMedia, or aId is greater than |
|
165 or equal KMaxHalGroups. |
|
166 |
|
167 @pre Interrupts must be enabled. |
|
168 @pre Kernel must be unlocked. |
|
169 @pre No fast mutex can be held. |
|
170 @pre Call in a thread context. |
|
171 @pre Can be used in a device driver. |
|
172 |
|
173 @see THalFunctionGroup |
|
174 @see KMaxHalGroups |
|
175 */ |
|
176 EXPORT_C TInt Kern::RemoveHalEntry(TInt aId, TInt aDeviceNumber) |
|
177 { |
|
178 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::RemoveHalEntry(TInt aId, TInt aDeviceNumber)"); |
|
179 __KTRACE_OPT(KEXTENSION,Kern::Printf("Kern::RemoveHalEntry %d %d",aId,aDeviceNumber)); |
|
180 if (aId<(TInt)EHalGroupPower || aId>=KMaxHalGroups || (TUint)aDeviceNumber>=(TUint)KMaxHalEntries) |
|
181 return KErrArgument; |
|
182 NKern::LockSystem(); |
|
183 SHalEntry2* pE=&K::HalEntryArray[aId]; |
|
184 if(aDeviceNumber>0) |
|
185 { |
|
186 SHalEntry* pBase=pE->iExtendedEntries; |
|
187 if(pBase) |
|
188 { |
|
189 pBase[aDeviceNumber-1].iFunction=NULL; |
|
190 pBase[aDeviceNumber-1].iPtr=NULL; |
|
191 } |
|
192 } |
|
193 else |
|
194 { |
|
195 pE->iFunction=NULL; |
|
196 pE->iPtr=NULL; |
|
197 } |
|
198 NKern::UnlockSystem(); |
|
199 return KErrNone; |
|
200 } |
|
201 |
|
202 /** |
|
203 Gets the HAL entry handling function for the specified group of HAL entries. |
|
204 |
|
205 @param aId The HAL group attribute, as defined by one of the THalFunctionGroup |
|
206 enumerators, for which the handler function is required. |
|
207 |
|
208 @return A pointer to handler information containing the handler function; NULL |
|
209 if aId is negative or is greater than or equal to KMaxHalGroups, or no |
|
210 handler function can be found. |
|
211 |
|
212 @pre Interrupts must be enabled. |
|
213 @pre Kernel must be unlocked. |
|
214 @pre No fast mutex can be held. |
|
215 @pre Call in a thread context. |
|
216 @pre Can be used in a device driver. |
|
217 |
|
218 @see THalFunctionGroup |
|
219 @see KMaxHalGroups |
|
220 */ |
|
221 EXPORT_C SHalEntry* Kern::FindHalEntry(TInt aId) |
|
222 { |
|
223 return Kern::FindHalEntry(aId,0); |
|
224 } |
|
225 |
|
226 |
|
227 /** |
|
228 Gets the HAL entry handling function for the specified group of HAL entries. |
|
229 |
|
230 @param aId The HAL group attribute, as defined by one of the THalFunctionGroup |
|
231 enumerators, for which the handler function is required. |
|
232 @param aDeviceNumber The device number (eg. screen number) |
|
233 |
|
234 @return A pointer to handler information containing the handler function; NULL |
|
235 if aId is negative or is greater than or equal to KMaxHalGroups, or no |
|
236 handler function can be found. |
|
237 |
|
238 @pre Interrupts must be enabled. |
|
239 @pre Kernel must be unlocked. |
|
240 @pre No fast mutex can be held. |
|
241 @pre Call in a thread context. |
|
242 @pre Can be used in a device driver. |
|
243 |
|
244 @see THalFunctionGroup |
|
245 @see KMaxHalGroups |
|
246 */ |
|
247 EXPORT_C SHalEntry* Kern::FindHalEntry(TInt aId, TInt aDeviceNumber) |
|
248 { |
|
249 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::FindHalEntry(TInt aId, TInt aDeviceNumber)"); |
|
250 __KTRACE_OPT(KEXTENSION,Kern::Printf("Kern::FindHalEntry %d %d",aId,aDeviceNumber)); |
|
251 if (aId<0 || aId>=KMaxHalGroups || TUint(aDeviceNumber)>=TUint(KMaxHalEntries)) |
|
252 return NULL; |
|
253 SHalEntry2* p=&K::HalEntryArray[0]+aId; |
|
254 SHalEntry* pBase=(SHalEntry*)p; |
|
255 if(aDeviceNumber>0) |
|
256 { |
|
257 if(p->iExtendedEntries) |
|
258 pBase=p->iExtendedEntries + (aDeviceNumber-1); |
|
259 } |
|
260 if(!pBase->iFunction) |
|
261 return NULL; |
|
262 return pBase; |
|
263 } |
|
264 |
|
265 |
|
266 |
|
267 |
|
268 /** |
|
269 Returns the active debug mask obtained by logically ANDing the global debug mask |
|
270 in the super page with the per-thread debug mask in the current DThread object. |
|
271 |
|
272 If the current thread is not a symbian OS thread the global debug mask is used. |
|
273 |
|
274 Only supports the first 32 global debug trace bits. |
|
275 |
|
276 @return The debug mask. |
|
277 */ |
|
278 EXPORT_C TInt KDebugMask() |
|
279 { |
|
280 TInt m=TheSuperPage().iDebugMask[0]; |
|
281 NThread* nt = NCurrentThread(); |
|
282 if (nt && nt->iHandlers==&EpocThreadHandlers) |
|
283 m &= TheCurrentThread->iDebugMask; |
|
284 return m; |
|
285 } |
|
286 |
|
287 |
|
288 |
|
289 /** |
|
290 Returns the state (ETrue or EFalse) of given bit in the active debug mask |
|
291 which is obtained by logically ANDing the global debug mask in the super page |
|
292 with the per-thread debug mask in the current DThread object. |
|
293 |
|
294 If the current thread is not a symbian OS thread the global debug mask is used. |
|
295 |
|
296 @return The state of the debug mask bit number. |
|
297 */ |
|
298 |
|
299 EXPORT_C TBool KDebugNum(TInt aBitNum) |
|
300 { |
|
301 TInt m = 0; |
|
302 |
|
303 // special case for KALWAYS |
|
304 if (aBitNum == KALWAYS) |
|
305 { |
|
306 m = TheSuperPage().iDebugMask[0] || |
|
307 TheSuperPage().iDebugMask[1] || |
|
308 TheSuperPage().iDebugMask[2] || |
|
309 TheSuperPage().iDebugMask[3] || |
|
310 TheSuperPage().iDebugMask[4] || |
|
311 TheSuperPage().iDebugMask[5] || |
|
312 TheSuperPage().iDebugMask[6] || |
|
313 TheSuperPage().iDebugMask[7]; |
|
314 } |
|
315 else if ( (aBitNum > KMAXTRACE) || (aBitNum < 0) ) |
|
316 m = 0; |
|
317 else |
|
318 { |
|
319 TInt index = aBitNum >> 5; |
|
320 m = TheSuperPage().iDebugMask[index]; |
|
321 m &= 1 << (aBitNum & 31); |
|
322 if (!index) |
|
323 { |
|
324 // if index is zero then AND in the per thread debug mask |
|
325 NThread* nt = K::Initialising ? 0 : NCurrentThread(); |
|
326 if (nt && nt->iHandlers==&EpocThreadHandlers) |
|
327 m &= TheCurrentThread->iDebugMask; |
|
328 } |
|
329 } |
|
330 |
|
331 return (m != 0); |
|
332 } |
|
333 |
|
334 |
|
335 /** |
|
336 Prints a formatted string on the debug port. |
|
337 |
|
338 The function uses Kern::AppendFormat() to do the formatting. |
|
339 |
|
340 Although it is safe to call this function from an ISR, it polls the output |
|
341 serial port and may take a long time to complete, invalidating any |
|
342 real-time guarantee. |
|
343 |
|
344 If called from an ISR, it is possible for output text to be intermingled |
|
345 with other output text if one set of output interrupts or preempts another. |
|
346 |
|
347 Some of the formatting options may not work inside an ISR. |
|
348 |
|
349 Be careful not to use a string that is too long to fit onto the stack. |
|
350 |
|
351 @param aFmt The format string. This must not be longer than 256 characters. |
|
352 @param ... A variable number of arguments to be converted to text as dictated |
|
353 by the format string. |
|
354 |
|
355 @pre Calling thread can either be in a critical section or not. |
|
356 @pre Interrupts must be enabled. |
|
357 @pre Kernel must be unlocked |
|
358 @pre Call in any context. |
|
359 @pre Suitable for use in a device driver |
|
360 |
|
361 @see Kern::AppendFormat() |
|
362 */ |
|
363 |
|
364 EXPORT_C void Kern::Printf(const char* aFmt, ...) |
|
365 { |
|
366 TBuf8<256> printBuf; |
|
367 VA_LIST list; |
|
368 VA_START(list,aFmt); |
|
369 Kern::AppendFormat(printBuf,aFmt,list); |
|
370 K::TextTrace(printBuf,EKernelTrace); |
|
371 } |
|
372 |
|
373 void AppendNumBuf(TDes8& aDes, const TDesC8& aNum, TInt width, char fill) |
|
374 { |
|
375 TInt l=aNum.Length(); |
|
376 for (; l<width; ++l) |
|
377 aDes.Append(TChar(fill)); |
|
378 aDes.Append(aNum); |
|
379 } |
|
380 |
|
381 |
|
382 |
|
383 |
|
384 /** |
|
385 Formats and appends text to the specified narrow descriptor without making any |
|
386 executive calls. |
|
387 |
|
388 The function takes a format string and a variable number of arguments. The |
|
389 format specifiers in the format string are used to interpret and the arguments. |
|
390 |
|
391 Format directives have the following syntax: |
|
392 @code |
|
393 <format-directive> ::= |
|
394 "%" [<padding-character>] [<field-width>] [<long-flag>] <conversion-specifier> |
|
395 @endcode |
|
396 |
|
397 If a field width is specified and the width of the formatted field is less |
|
398 than this width, then the field is padded with the padding character. |
|
399 The only supported padding characters are ' ' (default) and '0'. |
|
400 |
|
401 The long flag specifier ('l') modifies the semantic of the conversion |
|
402 specifier as explained below. |
|
403 |
|
404 The possible values for the conversion specifiers, the long flag and the way in |
|
405 which arguments are interpreted, are as follows: |
|
406 @code |
|
407 d Interpret the argument as a TInt decimal representation |
|
408 ld NOT SUPPORTED - use lx instead |
|
409 u Interpret the argument as a TUint decimal representation |
|
410 lu NOT SUPPORTED - use lx instead |
|
411 x Interpret the argument as a TUint hexadecimal representation |
|
412 X As above |
|
413 lx Interpret the argument as a Uint64 hexadecimal representation |
|
414 lX As above |
|
415 c Interpret the argument as a character |
|
416 s Interpret the argument as a pointer to narrow C string |
|
417 ls Interpret the argument as a pointer to narrow C string |
|
418 S Interpret the argument as a pointer to narrow descriptor or NULL |
|
419 lS NOT SUPPORTED - use S instead |
|
420 O Interpret the argument as a pointer to DObject or NULL |
|
421 Generates the object full name or 'NULL' |
|
422 o Interpret the argument as a pointer to DObject or NULL |
|
423 Generates the object name or 'NULL' |
|
424 M Interpret the argument as a pointer to a fast mutex or NULL |
|
425 Generates the name, if this is a well-known fast mutex, address otherwise |
|
426 m Interpret the argument as a pointer to a fast semaphore or NULL |
|
427 Generates the owning thread name, if this is a well-known fast semaphore, address otherwise |
|
428 T Interpret the argument as a pointer to a nanothread or NULL |
|
429 Generates the full name, if this is a Symbian OS thread, address otherwise |
|
430 C Interpret the argument as a pointer to a DCodeSeg or NULL |
|
431 Generates the filename and module version number |
|
432 G Interpret the argument as a pointer to a nanothread group or NULL |
|
433 Generates the full name if this corresponds to a Symbian OS process, address otherwise |
|
434 @endcode |
|
435 |
|
436 The function can be called from the interrupt context, but extreme caution is advised as it |
|
437 may require a lot of stack space and interrupt stacks are very small. |
|
438 |
|
439 @param aDes Narrow descriptor that must be big-enough to hold result |
|
440 @param aFmt The format string |
|
441 @param aList A variable number of arguments to be converted to text as dictated by the format string |
|
442 |
|
443 @pre Calling thread can be either in a critical section or not. |
|
444 @pre Interrupts must be enabled. |
|
445 @pre Kernel must be unlocked |
|
446 @pre Call in any context. |
|
447 @pre Suitable for use in a device driver |
|
448 |
|
449 @panic The set of panics that can be raised when appending data to descriptors. |
|
450 |
|
451 @see TDes8 |
|
452 */ |
|
453 EXPORT_C void Kern::AppendFormat(TDes8& aDes, const char* aFmt, VA_LIST aList) |
|
454 { |
|
455 |
|
456 #define NEXT_FMT(c,p) if (((c)=*(p)++)==0) return |
|
457 _LIT8(NullDescriptor,"(null)"); |
|
458 _LIT8(KLitNULL,"NULL"); |
|
459 _LIT8(KLitSysLock,"SysLock"); |
|
460 _LIT8(KLitObjLock,"ObjLock"); |
|
461 _LIT8(KLitMsgLock,"MsgLock"); |
|
462 _LIT8(KLitLogonLock,"LogonLock"); |
|
463 _LIT8(KLitMiscNtfMgrLock,"MiscNtfMgrLock"); |
|
464 |
|
465 TBuf8<24> NumBuf; |
|
466 FOREVER |
|
467 { |
|
468 char c; |
|
469 NEXT_FMT(c,aFmt); |
|
470 if (c=='%') |
|
471 { |
|
472 char fill=' '; |
|
473 TInt width=0; |
|
474 TBool long_arg=EFalse; |
|
475 TBool ok=ETrue; |
|
476 NEXT_FMT(c,aFmt); |
|
477 if (c=='0') |
|
478 { |
|
479 fill='0'; |
|
480 NEXT_FMT(c,aFmt); |
|
481 } |
|
482 while(c>='0' && c<='9') |
|
483 { |
|
484 width=width*10+c-'0'; |
|
485 NEXT_FMT(c,aFmt); |
|
486 } |
|
487 if (c=='l') |
|
488 { |
|
489 long_arg=ETrue; |
|
490 NEXT_FMT(c,aFmt); |
|
491 } |
|
492 switch(c) |
|
493 { |
|
494 case 'd': |
|
495 { |
|
496 if (long_arg) |
|
497 ok=EFalse; |
|
498 else |
|
499 { |
|
500 TInt val=VA_ARG(aList,TInt); |
|
501 NumBuf.Num(val); |
|
502 AppendNumBuf(aDes,NumBuf,width,fill); |
|
503 } |
|
504 break; |
|
505 } |
|
506 case 'u': |
|
507 { |
|
508 if (long_arg) |
|
509 ok=EFalse; |
|
510 else |
|
511 { |
|
512 TUint val=VA_ARG(aList,TUint); |
|
513 NumBuf.Num(val,EDecimal); |
|
514 AppendNumBuf(aDes,NumBuf,width,fill); |
|
515 } |
|
516 break; |
|
517 } |
|
518 case 'x': |
|
519 case 'X': |
|
520 { |
|
521 if (long_arg) |
|
522 { |
|
523 Uint64 val=VA_ARG(aList,Uint64); |
|
524 TUint vl=(TUint)val; |
|
525 TUint vh=(TUint)(val>>32); |
|
526 if (vh) |
|
527 { |
|
528 NumBuf.Num(vh,EHex); |
|
529 NumBuf.AppendNumFixedWidth(vl,EHex,8); |
|
530 } |
|
531 else |
|
532 { |
|
533 NumBuf.Num(vl,EHex); |
|
534 } |
|
535 } |
|
536 else |
|
537 { |
|
538 TUint val=VA_ARG(aList,TUint); |
|
539 NumBuf.Num(val,EHex); |
|
540 } |
|
541 AppendNumBuf(aDes,NumBuf,width,fill); |
|
542 break; |
|
543 } |
|
544 case 'S': |
|
545 case 's': |
|
546 { |
|
547 TPtrC8 ptrc8; |
|
548 const TDesC *pS=VA_ARG(aList,const TDesC*); |
|
549 if (c=='s') |
|
550 { |
|
551 ptrc8.Set((const TUint8*)pS), pS=(const TDesC*)&ptrc8; |
|
552 } |
|
553 if (pS) |
|
554 { |
|
555 AppendNumBuf(aDes,*(const TDesC8*)pS,width,fill); |
|
556 } |
|
557 else |
|
558 aDes.Append(NullDescriptor); |
|
559 break; |
|
560 } |
|
561 case 'O': |
|
562 { |
|
563 DObject* pO=VA_ARG(aList,DObject*); |
|
564 if (pO) |
|
565 pO->TraceAppendFullName(aDes,ETrue); |
|
566 else |
|
567 aDes.Append(KLitNULL); |
|
568 break; |
|
569 } |
|
570 case 'o': |
|
571 { |
|
572 DObject* pO=VA_ARG(aList,DObject*); |
|
573 if (pO) |
|
574 pO->TraceAppendName(aDes,ETrue); |
|
575 else |
|
576 aDes.Append(KLitNULL); |
|
577 break; |
|
578 } |
|
579 case 'M': // fast mutex |
|
580 { |
|
581 NFastMutex* pM=VA_ARG(aList,NFastMutex*); |
|
582 if (!pM) |
|
583 aDes.Append(KLitNULL); |
|
584 else if (pM==&TheScheduler.iLock) |
|
585 aDes.Append(KLitSysLock); |
|
586 else if (pM==&DObject::Lock) |
|
587 aDes.Append(KLitObjLock); |
|
588 else if (pM==&TMessageQue::MsgLock) |
|
589 aDes.Append(KLitMsgLock); |
|
590 else if (pM==&TLogon::LogonLock) |
|
591 aDes.Append(KLitLogonLock); |
|
592 else if (pM==&K::TheMiscNotifierMgr.iLock) |
|
593 aDes.Append(KLitMiscNtfMgrLock); |
|
594 else |
|
595 aDes.AppendNumFixedWidth((TUint)pM,EHex,8); |
|
596 break; |
|
597 } |
|
598 case 'm': // fast semaphore |
|
599 { |
|
600 NFastSemaphore* pS=VA_ARG(aList,NFastSemaphore*); |
|
601 if (!pS) |
|
602 aDes.Append(KLitNULL); |
|
603 else |
|
604 { |
|
605 // following commented out because pointers may end up referencing non-existent memory... |
|
606 /* |
|
607 DThread* pT1=_LOFF(pS,DThread,iNThread.iRequestSemaphore); |
|
608 DThread* pT2=_LOFF(pS,DThread,iKernMsg.iSem); |
|
609 if (pT1->iNThread.iHandlers==&EpocThreadHandlers) |
|
610 pT1->TraceAppendFullName(aDes,ETrue); |
|
611 else if (pT2->iNThread.iHandlers==&EpocThreadHandlers) |
|
612 pT2->TraceAppendFullName(aDes,ETrue); |
|
613 else |
|
614 */ aDes.AppendNumFixedWidth((TUint)pS,EHex,8); |
|
615 } |
|
616 break; |
|
617 } |
|
618 case 'T': // NKERN thread |
|
619 { |
|
620 NThread* pN=VA_ARG(aList,NThread*); |
|
621 if (!pN) |
|
622 aDes.Append(KLitNULL); |
|
623 else if (pN->iHandlers==&EpocThreadHandlers) |
|
624 { |
|
625 DThread* pT=_LOFF(pN,DThread,iNThread); |
|
626 pT->TraceAppendFullName(aDes,ETrue); |
|
627 } |
|
628 else |
|
629 aDes.AppendNumFixedWidth((TUint)pN,EHex,8); |
|
630 break; |
|
631 } |
|
632 case 'C': |
|
633 { |
|
634 DCodeSeg* pO=VA_ARG(aList,DCodeSeg*); |
|
635 if (pO) |
|
636 pO->TraceAppendFullName(aDes); |
|
637 else |
|
638 aDes.Append(KLitNULL); |
|
639 break; |
|
640 } |
|
641 #ifdef __SMP__ |
|
642 case 'G': // NKERN thread group |
|
643 { |
|
644 NThreadGroup* pG=VA_ARG(aList,NThreadGroup*); |
|
645 if (!pG) |
|
646 aDes.Append(KLitNULL); |
|
647 // else if (pN->iHandlers==&EpocThreadHandlers) |
|
648 // { |
|
649 // DThread* pT=_LOFF(pN,DThread,iNThread); |
|
650 // pT->TraceAppendFullName(aDes,ETrue); |
|
651 // } |
|
652 else |
|
653 aDes.AppendNumFixedWidth((TUint)pG,EHex,8); |
|
654 break; |
|
655 } |
|
656 #endif |
|
657 case 'c': |
|
658 c=(char)VA_ARG(aList,TUint); |
|
659 // fall through |
|
660 default: |
|
661 ok=EFalse; |
|
662 break; |
|
663 } |
|
664 if (ok) |
|
665 continue; |
|
666 } |
|
667 aDes.Append(TChar(c)); |
|
668 } |
|
669 } |
|
670 |
|
671 #if 0 |
|
672 void DumpMemoryLine(TLinAddr a) |
|
673 { |
|
674 const TUint8* p = (const TUint8*)a; |
|
675 TUint8 c[16]; |
|
676 TInt i; |
|
677 for (i=0; i<16; ++i) |
|
678 { |
|
679 TUint8 x = p[i]; |
|
680 if (x<0x21 || x>0x7e) |
|
681 x = 0x2e; |
|
682 c[i] = (TUint8)x; |
|
683 } |
|
684 Kern::Printf("%08x: %02x %02x %02x %02x %02x %02x %02x %02x " |
|
685 "%02x %02x %02x %02x %02x %02x %02x %02x " |
|
686 "%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c", |
|
687 a, p[ 0], p[ 1], p[ 2], p[ 3], p[ 4], p[ 5], p[ 6], p[ 7], |
|
688 p[ 8], p[ 9], p[10], p[11], p[12], p[13], p[14], p[15], |
|
689 c[ 0], c[ 1], c[ 2], c[ 3], c[ 4], c[ 5], c[ 6], c[ 7], |
|
690 c[ 8], c[ 9], c[10], c[11], c[12], c[13], c[14], c[15] |
|
691 ); |
|
692 } |
|
693 |
|
694 void DumpMemory(const char* aTitle, TLinAddr aStart, TLinAddr aSize) |
|
695 { |
|
696 Kern::Printf(aTitle); |
|
697 while (aSize) |
|
698 { |
|
699 DumpMemoryLine(aStart); |
|
700 aStart += 16; |
|
701 if (aSize>=16) |
|
702 aSize -= 16; |
|
703 else |
|
704 aSize = 0; |
|
705 } |
|
706 } |
|
707 #endif |
|
708 |
|
709 extern "C" { |
|
710 /** |
|
711 Faults the system, noting file name and line number. |
|
712 |
|
713 Used from nanokernel code and in various __ASSERT macros. |
|
714 |
|
715 @param file The file name as a C string (__FILE__). |
|
716 @param line The line number (__LINE__). |
|
717 |
|
718 @see Kern::Fault() |
|
719 */ |
|
720 EXPORT_C void NKFault(const char* file, TInt line) |
|
721 { |
|
722 Kern::Fault(file,line); |
|
723 } |
|
724 } |
|
725 |
|
726 |
|
727 |
|
728 |
|
729 /** |
|
730 Faults the system. |
|
731 |
|
732 This will start the Crash Debugger if it is present, |
|
733 otherwise the system is rebooted by calling Kern::Restart(0). |
|
734 |
|
735 @param aCat A pointer to a zero terminated string containing the category |
|
736 of the fault. |
|
737 @param aFault The fault number. |
|
738 |
|
739 @pre Call in any context. |
|
740 @pre Kernel can be locked or unlocked. |
|
741 @pre Interrupts can either be enabled or disabled. |
|
742 @pre Any kind of lock can be held. |
|
743 |
|
744 @see Kern::Restart() |
|
745 */ |
|
746 EXPORT_C void Kern::Fault(const char* aCat, TInt aFault) |
|
747 { |
|
748 TPtrC8 cat((const TUint8*)aCat); |
|
749 Kern::Printf("FAULT: %S 0x%08x (%d) ",&cat,aFault,aFault); |
|
750 |
|
751 // Disables interrupts |
|
752 // Doesn't return |
|
753 NKern::NotifyCrash(&cat, aFault); |
|
754 } |
|
755 |
|
756 |
|
757 void K::DoFault(const TAny* aCat, TInt aFault) |
|
758 { |
|
759 BTrace::Control(BTrace::ECtrlSystemCrashed); |
|
760 A::StartCrashDebugger(aCat, aFault); |
|
761 TheSuperPage().iKernelFault=aFault; |
|
762 |
|
763 // bodge the first 8 bytes of the name into the code and data |
|
764 if (aFault!=K::ESystemException) |
|
765 { |
|
766 const TDesC8* cat = (const TDesC8*)aCat; |
|
767 TInt csz = cat->Size(); |
|
768 TExcInfo& xinf=TheSuperPage().iKernelExcInfo; |
|
769 xinf.iCodeAddress=0; |
|
770 xinf.iDataAddress=0; |
|
771 memcpy((TUint8*)&xinf.iCodeAddress,cat->Ptr(),Min(csz,8)); |
|
772 } |
|
773 |
|
774 Kern::Restart(0); |
|
775 } |
|
776 |
|
777 |
|
778 |
|
779 |
|
780 /** |
|
781 Gets the address of the low priority DFC queue. |
|
782 |
|
783 @return A pointer to the low priority DFC queue. |
|
784 |
|
785 @pre Call in any context. |
|
786 */ |
|
787 EXPORT_C TDfcQue* Kern::DfcQue0() |
|
788 { |
|
789 return K::DfcQ0; |
|
790 } |
|
791 |
|
792 |
|
793 |
|
794 |
|
795 /** |
|
796 Gets the address of the high priority DFC queue. |
|
797 |
|
798 This is the one used for the nanokernel timer DFC. In the absence of |
|
799 a personality layer this will usually be the highest priority thread |
|
800 in the system. |
|
801 |
|
802 @return A pointer to the high priority DFC queue. |
|
803 |
|
804 @pre Call in any context. |
|
805 */ |
|
806 EXPORT_C TDfcQue* Kern::DfcQue1() |
|
807 { |
|
808 return K::DfcQ1; |
|
809 } |
|
810 |
|
811 |
|
812 |
|
813 |
|
814 /** |
|
815 Gets the address of the supervisor thread DFC queue. |
|
816 |
|
817 @return A pointer to the supervisor thread DFC queue. |
|
818 |
|
819 @pre Call in any context. |
|
820 */ |
|
821 EXPORT_C TDfcQue* Kern::SvMsgQue() |
|
822 { |
|
823 return K::SvMsgQ; |
|
824 } |
|
825 |
|
826 |
|
827 |
|
828 |
|
829 /** |
|
830 Creates a new DFC queue. |
|
831 |
|
832 The function allocates a TDfcQue object on the heap and initialises it with |
|
833 the provided parameters. |
|
834 |
|
835 The thread created for the queue will have its real time state enabled. If |
|
836 this is not the desired behaviour then TDynamicDfcQue::SetRealtimeState() can |
|
837 be used to disable the real time state of the thread. |
|
838 |
|
839 @param aDfcQ A reference to a pointer which, on successful return, is set |
|
840 to point to the new DFC queue. On failure, the pointer is set |
|
841 to NULL. |
|
842 |
|
843 @param aPriority The thread priority for the queue. |
|
844 |
|
845 @param aName A pointer to a name for the queue thread. If NULL, |
|
846 a unique name of the form 'DfcThreadNNN' is generated for the |
|
847 queue. |
|
848 |
|
849 @return KErrNone, if successful, otherwise one of the other system-wide error |
|
850 codes. |
|
851 |
|
852 @pre Calling thread must be in a critical section. |
|
853 @pre Interrupts must be enabled. |
|
854 @pre Kernel must be unlocked. |
|
855 @pre No fast mutex can be held. |
|
856 @pre Call in a thread context. |
|
857 @pre Can be used in a device driver. |
|
858 |
|
859 @see Kern::DfcQInit() |
|
860 @see TDynamicDfcQue::SetRealtimeState() |
|
861 */ |
|
862 EXPORT_C TInt Kern::DfcQCreate(TDfcQue*& aDfcQ, TInt aPriority, const TDesC* aName) |
|
863 { |
|
864 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::DfcQCreate"); |
|
865 TInt r=KErrNoMemory; |
|
866 TDfcQue* pQ=new TDfcQue; |
|
867 aDfcQ=pQ; |
|
868 if (pQ) |
|
869 { |
|
870 r=Kern::DfcQInit(pQ,aPriority,aName); |
|
871 if (r!=KErrNone) |
|
872 { |
|
873 delete pQ; |
|
874 aDfcQ=NULL; |
|
875 } |
|
876 } |
|
877 return r; |
|
878 } |
|
879 |
|
880 |
|
881 |
|
882 |
|
883 /** |
|
884 Creates a new dynamic DFC queue. |
|
885 |
|
886 The function allocates a TDynamicDfcQue object on the heap and initialises it |
|
887 with the provided parameters. |
|
888 |
|
889 The thread created for the queue will have its real time state enabled. If |
|
890 this is not the desired behaviour then TDynamicDfcQue::SetRealtimeState() can |
|
891 be used to disable the real time state of the thread. |
|
892 |
|
893 @param aDfcQ A reference to a pointer which, on successful return, is set |
|
894 to point to the new DFC queue. On failure, the pointer is set |
|
895 to NULL. |
|
896 |
|
897 @param aPriority The thread priority for the queue. |
|
898 |
|
899 @param aBaseName The base name for the queue thread. A 9 character string will |
|
900 be appended to this name to create a unique thread name, |
|
901 therefore the base name must not exceed 71 characters. |
|
902 |
|
903 @return KErrNone, if successful, otherwise one of the other system-wide error |
|
904 codes. |
|
905 |
|
906 @pre Calling thread must be in a critical section. |
|
907 @pre Interrupts must be enabled. |
|
908 @pre Kernel must be unlocked. |
|
909 @pre No fast mutex can be held. |
|
910 @pre Call in a thread context. |
|
911 @pre Can be used in a device driver. |
|
912 |
|
913 @see Kern::DfcQInit() |
|
914 @see TDynamicDfcQue::SetRealtimeState() |
|
915 */ |
|
916 EXPORT_C TInt Kern::DynamicDfcQCreate(TDynamicDfcQue*& aDfcQ, TInt aPriority, const TDesC& aBaseName) |
|
917 { |
|
918 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::DynamicDfcQCreate"); |
|
919 aDfcQ = NULL; |
|
920 TDynamicDfcQue* pQ=new TDynamicDfcQue; |
|
921 if (!pQ) |
|
922 return KErrNoMemory; |
|
923 |
|
924 TInt r; |
|
925 do |
|
926 { |
|
927 // Generate successive IDs using linear congruential random number generator |
|
928 TUint32 original_qid; |
|
929 TUint32 qid; |
|
930 do { |
|
931 original_qid = K::DynamicDfcQId; |
|
932 qid = original_qid * 69069 + 1; |
|
933 } while (!__e32_atomic_cas_rlx32(&K::DynamicDfcQId, &original_qid, qid)); |
|
934 TKName name(aBaseName); |
|
935 name.Append('-'); |
|
936 name.AppendNum(qid, EHex); |
|
937 r = Kern::DfcQInit(pQ,aPriority,&name); |
|
938 } |
|
939 while (r == KErrAlreadyExists); |
|
940 |
|
941 if (r!=KErrNone) |
|
942 delete pQ; |
|
943 else |
|
944 aDfcQ = pQ; |
|
945 |
|
946 return r; |
|
947 } |
|
948 |
|
949 |
|
950 |
|
951 |
|
952 void DynamicDfcQKillFunction(TAny* aDfcQ) |
|
953 { |
|
954 Kern::SetThreadPriority(KDefaultExitPriority); |
|
955 delete (TDfcQue*)aDfcQ; |
|
956 Kern::Exit(0); |
|
957 } |
|
958 |
|
959 |
|
960 |
|
961 |
|
962 TDynamicDfcQue::TDynamicDfcQue() |
|
963 : iKillDfc(DynamicDfcQKillFunction, this, this, 0) |
|
964 { |
|
965 } |
|
966 |
|
967 |
|
968 |
|
969 /** |
|
970 Destroys the DFC queue. |
|
971 |
|
972 The function destroys the DFC queue, killing the DFC thread and deleting the TDynamicDfcQue |
|
973 object itself. |
|
974 |
|
975 @pre Calling thread must be in a critical section. |
|
976 @pre Interrupts must be enabled. |
|
977 @pre Kernel must be unlocked. |
|
978 @pre No fast mutex can be held. |
|
979 @pre Call in a thread context. |
|
980 @pre Can be used in a device driver. |
|
981 |
|
982 @see Kern::DfcQCreate() |
|
983 @see Kern::DfcQInit() |
|
984 */ |
|
985 EXPORT_C void TDynamicDfcQue::Destroy() |
|
986 { |
|
987 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"TDynamicDfcQue::Destroy"); |
|
988 iKillDfc.Enque(); |
|
989 } |
|
990 |
|
991 |
|
992 |
|
993 /** |
|
994 Sets the realtime state for the thread that runs the DFC queue. |
|
995 |
|
996 @param aNewState The new realtime state for the thread. |
|
997 |
|
998 @pre No fast mutex can be held. |
|
999 @pre Call in a thread context. |
|
1000 @pre Kernel must be unlocked |
|
1001 @pre Interrupts enabled |
|
1002 @pre Can be used in a device driver. |
|
1003 */ |
|
1004 EXPORT_C void TDynamicDfcQue::SetRealtimeState(TThreadRealtimeState aNewState) |
|
1005 { |
|
1006 _LOFF(iThread,DThread,iNThread)->SetRealtimeState(aNewState); |
|
1007 } |
|
1008 |
|
1009 |
|
1010 |
|
1011 |
|
1012 _LIT(KLitKernCommon, "KERN-COMMON"); |
|
1013 void Panic(TCdtPanic aPanic) |
|
1014 { |
|
1015 Kern::PanicCurrentThread(KLitKernCommon, aPanic); |
|
1016 } |
|
1017 |
|
1018 void K::Fault(K::TFault aFault) |
|
1019 { |
|
1020 Kern::Fault("KERN",aFault); |
|
1021 } |
|
1022 |
|
1023 |
|
1024 |
|
1025 |
|
1026 /** |
|
1027 Waits for a request to complete. |
|
1028 |
|
1029 @param aStatus The status of the request to wait for. |
|
1030 */ |
|
1031 EXPORT_C void Kern::WaitForRequest(TRequestStatus& aStatus) |
|
1032 { |
|
1033 TInt i=-1; |
|
1034 do |
|
1035 { |
|
1036 ++i; |
|
1037 NKern::WaitForAnyRequest(); |
|
1038 } while (aStatus==KRequestPending); |
|
1039 if (i) |
|
1040 ExecHandler::RequestSignal(i); |
|
1041 } |
|
1042 |
|
1043 |
|
1044 /** |
|
1045 Allocates a block of the specified size on the kernel heap and zero-fills it. |
|
1046 |
|
1047 @param aSize The size of the buffer to be allocated, in bytes. This must be |
|
1048 positive and must be less than the value of |
|
1049 @code |
|
1050 KMaxTInt/2 |
|
1051 @endcode |
|
1052 otherwise the allocation request fails. |
|
1053 |
|
1054 @return A pointer to the allocated buffer, if successful; NULL if the |
|
1055 allocation request fails. |
|
1056 |
|
1057 @pre Calling thread must be in a critical section. |
|
1058 @pre Interrupts must be enabled. |
|
1059 @pre Kernel must be unlocked. |
|
1060 @pre No fast mutex can be held. |
|
1061 @pre Call in a thread context. |
|
1062 @pre Can be used in a device driver. |
|
1063 */ |
|
1064 EXPORT_C TAny* Kern::Alloc(TInt aSize) |
|
1065 { |
|
1066 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::Alloc"); |
|
1067 if ((TUint)aSize < KMaxTInt/2) |
|
1068 return K::Allocator->Alloc(aSize); |
|
1069 return NULL; |
|
1070 } |
|
1071 |
|
1072 |
|
1073 |
|
1074 |
|
1075 /** |
|
1076 Allocates a block of the specified size on the kernel heap and zero-fills it. |
|
1077 |
|
1078 @deprecated |
|
1079 |
|
1080 Calling this function has the same effect as calling Kern::Alloc(). |
|
1081 |
|
1082 @param aSize The size of the buffer to be allocated, in bytes. This must be |
|
1083 positive and must be less than the value of |
|
1084 @code |
|
1085 KMaxTInt/2 |
|
1086 @endcode |
|
1087 otherwise the allocation request fails. |
|
1088 |
|
1089 @return A pointer to the allocated buffer, if successful; NULL if the |
|
1090 allocation request fails. |
|
1091 |
|
1092 @pre Calling thread must be in a critical section. |
|
1093 @pre Interrupts must be enabled. |
|
1094 @pre Kernel must be unlocked. |
|
1095 @pre No fast mutex can be held. |
|
1096 @pre Call in a thread context. |
|
1097 @pre Can be used in a device driver. |
|
1098 |
|
1099 @see Kern::Alloc() |
|
1100 */ |
|
1101 EXPORT_C TAny* Kern::AllocZ(TInt aSize) |
|
1102 { |
|
1103 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::AllocZ"); |
|
1104 return Kern::Alloc(aSize); |
|
1105 } |
|
1106 |
|
1107 |
|
1108 |
|
1109 |
|
1110 /** |
|
1111 Frees a block of memory back to the kernel heap. |
|
1112 |
|
1113 The pointer passed must point to a valid allocated kernel heap cell, which |
|
1114 will be the case if it was previously allocated using Kern::Alloc() or |
|
1115 Kern::AllocZ(). |
|
1116 |
|
1117 @param aPtr A pointer to the buffer to be freed. |
|
1118 |
|
1119 @pre Calling thread must be in a critical section. |
|
1120 @pre Interrupts must be enabled. |
|
1121 @pre Kernel must be unlocked. |
|
1122 @pre No fast mutex can be held. |
|
1123 @pre Call in a thread context. |
|
1124 @pre Can be used in a device driver. |
|
1125 |
|
1126 @see Kern::Alloc() |
|
1127 @see Kern::AllocZ() |
|
1128 */ |
|
1129 EXPORT_C void Kern::Free(TAny* aPtr) |
|
1130 { |
|
1131 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::Free"); |
|
1132 K::Allocator->Free(aPtr); |
|
1133 } |
|
1134 |
|
1135 |
|
1136 |
|
1137 |
|
1138 /** |
|
1139 Reallocates a buffer. |
|
1140 |
|
1141 The buffer is assumed to have been previously allocated using Kern::Alloc() or |
|
1142 Kern::AllocZ(). |
|
1143 |
|
1144 If the new requested size is bigger than the current size, then the function |
|
1145 tries to grow the currently allocated buffer, and if that fails, allocates a new |
|
1146 buffer by calling Kern::Alloc(), copies the content of the old buffer into the |
|
1147 new buffer, and frees the old buffer. Any newly committed memory is |
|
1148 zero-filled. If the allocation mode is ENeverMove, the currently allocated |
|
1149 buffer cannot be grown, and the function returns NULL instead. |
|
1150 |
|
1151 If the new requested size is less than the current size, then the function |
|
1152 shrinks the allocated buffer, and, if the remainder is large enough, creates a |
|
1153 new free cell. |
|
1154 |
|
1155 If the pointer passed to this function is NULL, then it behaves like |
|
1156 Kern::Alloc(). However, if the allocation mode is ENeverMove, then it just |
|
1157 returns NULL. |
|
1158 |
|
1159 @param aPtr A pointer to the existing buffer that is to be reallocated. |
|
1160 |
|
1161 @param aSize The new requested size of the buffer, in bytes. |
|
1162 |
|
1163 @param aMode The allocation mode. It specifies how the buffer should be |
|
1164 reallocated. It can take one of the values ENeverMove and |
|
1165 EAllowMoveOnShrink. |
|
1166 |
|
1167 @return Pointer to the reallocated buffer or NULL if the re-allocation request |
|
1168 fails. |
|
1169 |
|
1170 @pre Calling thread must be in a critical section. |
|
1171 @pre Interrupts must be enabled. |
|
1172 @pre Kernel must be unlocked. |
|
1173 @pre No fast mutex can be held. |
|
1174 @pre Call in a thread context. |
|
1175 @pre Can be used in a device driver. |
|
1176 |
|
1177 @post Calling thread is in a critical section. |
|
1178 |
|
1179 @see Kern::Alloc() |
|
1180 @see ENeverMove |
|
1181 @see EAllowMoveOnShrink |
|
1182 */ |
|
1183 EXPORT_C TAny* Kern::ReAlloc(TAny* aPtr, TInt aSize, TInt aMode) |
|
1184 { |
|
1185 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::ReAlloc"); |
|
1186 return K::Allocator->ReAlloc(aPtr, aSize, aMode); |
|
1187 } |
|
1188 |
|
1189 |
|
1190 |
|
1191 |
|
1192 /** |
|
1193 Safely reallocates a buffer. |
|
1194 |
|
1195 The buffer is assumed to have been previously allocated using Kern::Alloc() or |
|
1196 Kern::AllocZ(). |
|
1197 |
|
1198 If the new requested size is zero, the function frees the pointer and sets it |
|
1199 to NULL. |
|
1200 |
|
1201 If the new requested size is bigger than the old size, then the function tries |
|
1202 to grow the currently allocated buffer using Kern::ReAlloc() specifiying the |
|
1203 ENeverMove allocation mode. If this fails, it does the following sequence of |
|
1204 operations: it calls Kern::Alloc() to allocate a new larger size buffer, copies |
|
1205 the content of the old buffer into the new buffer (zero filling the extra space |
|
1206 in the new buffer), acquires the system lock, sets aPtr to point to the new |
|
1207 buffer, releases the system lock and finally frees the original buffer. |
|
1208 |
|
1209 If the new requested size is less than the old size, the function shrinks the |
|
1210 buffer but does not move it. |
|
1211 |
|
1212 This function is intended to allow the implementation of a dynamically growing |
|
1213 array which can be indexed and read very efficiently by holding only the |
|
1214 system lock, while modification of the array is protected by a heavyweight mutex. |
|
1215 |
|
1216 @param aPtr A reference to a pointer to the buffer to be reallocated. |
|
1217 @param aOldSize The size of the currently allocated buffer. |
|
1218 @param aNewSize The new requested size of the buffer. |
|
1219 |
|
1220 @return KErrNone, if successful; KErrNoMemory, if there is insufficient memory. |
|
1221 |
|
1222 @pre Calling thread must be in a critical section. |
|
1223 @pre Interrupts must be enabled. |
|
1224 @pre Kernel must be unlocked. |
|
1225 @pre No fast mutex can be held. |
|
1226 @pre Call in a thread context. |
|
1227 @pre Can be used in a device driver. |
|
1228 |
|
1229 @post Calling thread is in a critical section. |
|
1230 |
|
1231 @see Kern::ReAlloc() |
|
1232 @see Kern::Alloc() |
|
1233 */ |
|
1234 EXPORT_C TInt Kern::SafeReAlloc(TAny*& aPtr, TInt aOldSize, TInt aNewSize) |
|
1235 { |
|
1236 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::SafeReAlloc"); |
|
1237 if (aNewSize > aOldSize) |
|
1238 { |
|
1239 #ifdef _DEBUG |
|
1240 // we can't rely of simulated OOM in the kernel allocator because if |
|
1241 // ReAlloc fails (and swallows simulated OOM) then the following Alloc will succeed... |
|
1242 if(K::CheckForSimulatedAllocFail()) |
|
1243 return KErrNoMemory; |
|
1244 #endif |
|
1245 TAny* p = ReAlloc(aPtr, aNewSize, RAllocator::ENeverMove); |
|
1246 if (p) |
|
1247 return KErrNone; // grow in place succeeded, no need to move |
|
1248 TAny* pNew = Alloc(aNewSize); // otherwise allocate bigger block |
|
1249 if (!pNew) |
|
1250 return KErrNoMemory; |
|
1251 TAny* pOld = aPtr; |
|
1252 memcpy(pNew, pOld, aOldSize); // copy current contents |
|
1253 #ifdef _DEBUG |
|
1254 if (pOld) |
|
1255 K::Allocator->DebugFunction(RAllocator::ECopyDebugInfo, pOld, pNew); |
|
1256 #endif |
|
1257 NKern::LockSystem(); |
|
1258 aPtr = pNew; |
|
1259 NKern::UnlockSystem(); |
|
1260 Free(pOld); // free old block |
|
1261 } |
|
1262 else if (aNewSize < aOldSize) |
|
1263 { |
|
1264 if (aNewSize > 0) |
|
1265 aPtr = ReAlloc(aPtr, aNewSize, 0); // can't fail |
|
1266 else |
|
1267 { |
|
1268 NKern::LockSystem(); |
|
1269 TAny* pOld = aPtr; |
|
1270 aPtr = NULL; |
|
1271 NKern::UnlockSystem(); |
|
1272 Free(pOld); |
|
1273 } |
|
1274 } |
|
1275 return KErrNone; |
|
1276 } |
|
1277 |
|
1278 |
|
1279 |
|
1280 |
|
1281 /** |
|
1282 Walks the kernel heap to validate its consistency. If the heap is inconsistent, |
|
1283 the kernel will panic with an appropriate panic code. |
|
1284 |
|
1285 @pre Calling thread must be in a critical section. |
|
1286 @pre Interrupts must be enabled. |
|
1287 @pre Kernel must be unlocked. |
|
1288 @pre No fast mutex can be held. |
|
1289 @pre Call in a thread context. |
|
1290 @pre Can be used in a device driver. |
|
1291 */ |
|
1292 EXPORT_C void Kern::ValidateHeap() |
|
1293 { |
|
1294 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::ValidateHeap"); |
|
1295 K::Allocator->Check(); |
|
1296 } |
|
1297 |
|
1298 |
|
1299 |
|
1300 |
|
1301 /** |
|
1302 Atomically swaps the pointer to the kernel-side reference counted object with a |
|
1303 NULL value, and then closes the object. |
|
1304 |
|
1305 @param aObj A reference to a pointer to a kernel-side reference counted object |
|
1306 that is to be closed; it is safe to pass a NULL value. |
|
1307 @param aPtr A pointer that is passed as a parameter to DObject::Close(). |
|
1308 |
|
1309 @pre Calling thread must be in a critical section |
|
1310 @pre Interrupts must be enabled. |
|
1311 @pre Kernel must be unlocked. |
|
1312 @pre No fast mutex can be held. |
|
1313 @pre Call in a thread context. |
|
1314 @pre Can be used in a device driver. |
|
1315 |
|
1316 @post aObj is NULL. |
|
1317 |
|
1318 @see DObject::Close() |
|
1319 */ |
|
1320 EXPORT_C void Kern::SafeClose(DObject*& aObj, TAny* aPtr) |
|
1321 { |
|
1322 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::SafeClose"); |
|
1323 DObject* pO = (DObject*)__e32_atomic_swp_ord_ptr(&aObj, 0); |
|
1324 if (pO) |
|
1325 pO->Close(aPtr); |
|
1326 } |
|
1327 |
|
1328 |
|
1329 TInt K::MakeHandle(TOwnerType aType, DObject* anObject) |
|
1330 { |
|
1331 TInt h; |
|
1332 TInt r=TheCurrentThread->MakeHandle(aType,anObject,h); |
|
1333 if(r==KErrNone) |
|
1334 return h; |
|
1335 else |
|
1336 return r; |
|
1337 } |
|
1338 |
|
1339 TInt K::MakeHandle(TOwnerType aType, DObject* anObject, TUint aAttr) |
|
1340 { |
|
1341 TInt h; |
|
1342 TInt r=TheCurrentThread->MakeHandle(aType,anObject,h,aAttr); |
|
1343 if(r==KErrNone) |
|
1344 return h; |
|
1345 else |
|
1346 return r; |
|
1347 } |
|
1348 |
|
1349 TInt K::MakeHandleAndOpen(TOwnerType aType, DObject* anObject, TInt& aHandle) |
|
1350 { |
|
1351 return TheCurrentThread->MakeHandleAndOpen(aType,anObject,aHandle); |
|
1352 } |
|
1353 |
|
1354 TInt K::MakeHandleAndOpen(TOwnerType aType, DObject* anObject, TInt& aHandle, TUint aAttr) |
|
1355 { |
|
1356 return TheCurrentThread->MakeHandleAndOpen(aType,anObject,aHandle, aAttr); |
|
1357 } |
|
1358 |
|
1359 TInt K::HandleClose(TInt aHandle) |
|
1360 { |
|
1361 return TheCurrentThread->HandleClose(aHandle); |
|
1362 } |
|
1363 |
|
1364 TInt DThread::MakeHandle(TOwnerType aType, DObject* aObj, TInt& aHandle) |
|
1365 { |
|
1366 TInt r=MakeHandleAndOpen(aType, aObj, aHandle); |
|
1367 if (r==KErrNone) |
|
1368 aObj->Close(NULL); // NULL to balance access count but leave attached to process |
|
1369 return r; |
|
1370 } |
|
1371 |
|
1372 TInt DThread::MakeHandle(TOwnerType aType, DObject* aObj, TInt& aHandle, TUint aAttr) |
|
1373 { |
|
1374 TInt r=MakeHandleAndOpen(aType, aObj, aHandle, aAttr); |
|
1375 if (r==KErrNone) |
|
1376 aObj->Close(NULL); // NULL to balance access count but leave attached to process |
|
1377 return r; |
|
1378 } |
|
1379 |
|
1380 TInt DThread::MakeHandleAndOpen(TOwnerType aType, DObject* aObj, TInt& aHandle) |
|
1381 { |
|
1382 return MakeHandleAndOpen(aType, aObj, aHandle, 0); |
|
1383 } |
|
1384 |
|
1385 TInt DThread::MakeHandleAndOpen(TOwnerType aType, DObject* aObj, TInt& aHandle, TUint aAttr) |
|
1386 { |
|
1387 TInt r = aObj->Open(); |
|
1388 if (r==KErrNone) |
|
1389 { |
|
1390 r = aObj->RequestUserHandle(this, aType, aAttr); |
|
1391 if (r==KErrNone) |
|
1392 { |
|
1393 if (aType==EOwnerThread) |
|
1394 { |
|
1395 __KTRACE_OPT(KEXEC,Kern::Printf("Making handle from thread %O to object %O", this, aObj)); |
|
1396 |
|
1397 r = iHandles.Add(aObj, aAttr); |
|
1398 if (r >= 0) |
|
1399 { |
|
1400 aHandle = r | KHandleFlagLocal; |
|
1401 r = KErrNone; |
|
1402 } |
|
1403 } |
|
1404 else |
|
1405 { |
|
1406 __KTRACE_OPT(KEXEC,Kern::Printf("Making handle from process %O to object %O", iOwningProcess, aObj)); |
|
1407 |
|
1408 r = iOwningProcess->iHandles.Add(aObj, aAttr); |
|
1409 if (r >= 0) |
|
1410 { |
|
1411 aHandle = r; |
|
1412 r = KErrNone; |
|
1413 } |
|
1414 } |
|
1415 } |
|
1416 if (r==KErrNone) |
|
1417 { |
|
1418 // It is assumed that: |
|
1419 // 1. AddToProcess() can only fail the first time the object is added to the process |
|
1420 // 2. Close(iOwningProcess) is equivalent to Close(NULL) if the object has not been |
|
1421 // added to the process. |
|
1422 r=aObj->AddToProcess(iOwningProcess, aAttr); |
|
1423 if (r!=KErrNone) |
|
1424 { |
|
1425 // Add to process failed - try to remove handle |
|
1426 // If thread/process is exiting this might fail, but the handle will be closed |
|
1427 // by the exit handler. In either case this balances the Open() above. |
|
1428 HandleClose(aHandle); |
|
1429 aHandle=0; |
|
1430 } |
|
1431 } |
|
1432 else |
|
1433 aObj->Close(NULL); // NULL since we did not add to process |
|
1434 } |
|
1435 return r; |
|
1436 } |
|
1437 |
|
1438 /** |
|
1439 Makes a handle to a kernel object and increments the access count on the object. |
|
1440 |
|
1441 @param aThread The thread to own the handle. |
|
1442 If this is NULL, the current thread is used. |
|
1443 |
|
1444 @param aObject The object to which the handle will refer. |
|
1445 |
|
1446 @return The created handle (a value >0), if successful; |
|
1447 otherwise one of the other system wide error codes, (a value <0). |
|
1448 |
|
1449 @return KErrNone, if successful; otherwise one of the other system wide error codes. |
|
1450 |
|
1451 @pre Calling thread must be in a critical section |
|
1452 @pre Interrupts must be enabled. |
|
1453 @pre Kernel must be unlocked. |
|
1454 @pre No fast mutex can be held. |
|
1455 @pre Call in a thread context. |
|
1456 @pre Can be used in a device driver. |
|
1457 */ |
|
1458 EXPORT_C TInt Kern::MakeHandleAndOpen(DThread* aThread, DObject* aObject) |
|
1459 { |
|
1460 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::MakeHandleAndOpen"); |
|
1461 if (!aThread) |
|
1462 aThread = TheCurrentThread; |
|
1463 TInt h; |
|
1464 TInt r = aThread->MakeHandleAndOpen(EOwnerThread, aObject, h); |
|
1465 return (r == KErrNone) ? h : r; |
|
1466 } |
|
1467 |
|
1468 |
|
1469 TInt DThread::HandleClose(TInt aHandle) |
|
1470 { |
|
1471 // Ignore attempts to close special or null handles |
|
1472 // or handles with the 'no close' bit set. |
|
1473 if (aHandle<=0 || (aHandle & KHandleNoClose)) |
|
1474 return KErrNone; |
|
1475 TInt r=KErrNone; |
|
1476 DObject* pO=NULL; |
|
1477 if (aHandle&KHandleFlagLocal) |
|
1478 { |
|
1479 TUint32 attr; // Receives the attributes of the removed handle... |
|
1480 aHandle&=~KHandleFlagLocal; |
|
1481 r=iHandles.Remove(aHandle,pO,attr); |
|
1482 } |
|
1483 else |
|
1484 { |
|
1485 TUint32 attr; // Receives the attributes of the removed handle... |
|
1486 r=iOwningProcess->iHandles.Remove(aHandle,pO,attr); |
|
1487 } |
|
1488 if (r==KErrNone) |
|
1489 r=pO->Close(iOwningProcess)&DObject::EObjectUnmapped; |
|
1490 return r; |
|
1491 } |
|
1492 |
|
1493 /** |
|
1494 Discard a handle to a kernel object and decrements the access count on the object. |
|
1495 |
|
1496 @param aThread The thread which owns the handle. If this is NULL, the current thread is used. |
|
1497 @param aObject The handle to close. |
|
1498 |
|
1499 @return KErrNone, if successful; otherwise one of the other system wide error codes. |
|
1500 |
|
1501 @pre Calling thread must be in a critical section |
|
1502 @pre Interrupts must be enabled. |
|
1503 @pre Kernel must be unlocked. |
|
1504 @pre No fast mutex can be held. |
|
1505 @pre Call in a thread context. |
|
1506 |
|
1507 */ |
|
1508 EXPORT_C TInt Kern::CloseHandle(DThread* aThread, TInt aHandle) |
|
1509 { |
|
1510 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::CloseHandle"); |
|
1511 if (!aThread) |
|
1512 aThread = TheCurrentThread; |
|
1513 return aThread->HandleClose(aHandle); |
|
1514 } |
|
1515 |
|
1516 |
|
1517 TInt DThread::OpenFindHandle(TOwnerType aType, const TFindHandle& aFindHandle, TInt& aHandle) |
|
1518 { |
|
1519 __KTRACE_OPT(KEXEC,Kern::Printf("DThread::OpenFindHandle")); |
|
1520 TInt r=KErrNone; |
|
1521 DObjectCon* pC=K::ContainerFromFindHandle(aFindHandle); |
|
1522 if (!pC) |
|
1523 return KErrBadHandle; |
|
1524 pC->Wait(); |
|
1525 DObject* pO=pC->At(aFindHandle); |
|
1526 if (pO) |
|
1527 r=pO->Open(); |
|
1528 pC->Signal(); |
|
1529 if (!pO) |
|
1530 return KErrNotFound; |
|
1531 if (r!=KErrNone) |
|
1532 return KErrBadHandle; |
|
1533 __KTRACE_OPT(KEXEC,Kern::Printf("Object %O found",pO)); |
|
1534 if ((pO->Protection()!=DObject::EGlobal) && (TheSuperPage().KernelConfigFlags() & EKernelConfigPlatSecProcessIsolation)) |
|
1535 { |
|
1536 #ifndef __REMOVE_PLATSEC_DIAGNOSTICS__ |
|
1537 r = PlatSec::ProcessIsolationFail(__PLATSEC_DIAGNOSTIC_STRING("Checked by RHandleBase::Open(const TFindHandleBase)")); |
|
1538 #else //__REMOVE_PLATSEC_DIAGNOSTICS__ |
|
1539 r = PlatSec::EmitDiagnostic(); |
|
1540 #endif // !__REMOVE_PLATSEC_DIAGNOSTICS__ |
|
1541 } |
|
1542 if (r==KErrNone) |
|
1543 r=MakeHandle(aType,pO,aHandle); |
|
1544 if (r!=KErrNone) |
|
1545 pO->Close(NULL); |
|
1546 return r; |
|
1547 } |
|
1548 |
|
1549 TInt DThread::OpenObject(TOwnerType aType, const TDesC& aName, TInt& aHandle, DObject*& anObj, TInt aObjType) |
|
1550 { |
|
1551 __KTRACE_OPT(KEXEC,Kern::Printf("DThread::OpenObject %lS",&aName)); |
|
1552 anObj=NULL; |
|
1553 TInt r=Kern::ValidateFullName(aName); |
|
1554 if (r!=KErrNone) |
|
1555 return r; |
|
1556 DObject* pO=NULL; |
|
1557 r=K::Containers[aObjType]->OpenByFullName(pO,aName); |
|
1558 if (r!=KErrNone) |
|
1559 return r; |
|
1560 __KTRACE_OPT(KEXEC,Kern::Printf("Object %O found", pO)); |
|
1561 anObj=pO; |
|
1562 r=MakeHandle(aType,pO,aHandle); |
|
1563 if (r!=KErrNone) |
|
1564 pO->Close(NULL); // NULL because chunk not added to process |
|
1565 return r; |
|
1566 } |
|
1567 |
|
1568 #ifndef __HANDLES_MACHINE_CODED__ |
|
1569 /** Translate a user handle relative to a specific thread. |
|
1570 |
|
1571 The handle may refer to type of kernel object. |
|
1572 |
|
1573 @param aHandle The handle to translate. |
|
1574 |
|
1575 @return A pointer to the kernel object to which the handle refers; |
|
1576 NULL if the handle is invalid. |
|
1577 |
|
1578 @pre System lock must be held. |
|
1579 */ |
|
1580 EXPORT_C DObject* DThread::ObjectFromHandle(TInt aHandle) |
|
1581 { |
|
1582 CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"DThread::ObjectFromHandle(TInt aHandle)"); |
|
1583 if (aHandle<0) |
|
1584 { |
|
1585 aHandle &= ~KHandleNoClose; |
|
1586 if (aHandle==(KCurrentThreadHandle&~KHandleNoClose)) |
|
1587 return TheCurrentThread; |
|
1588 if (aHandle==(KCurrentProcessHandle&~KHandleNoClose)) |
|
1589 return TheCurrentThread->iOwningProcess; |
|
1590 #ifdef __OBSOLETE_V1_IPC_SUPPORT__ |
|
1591 TUint32 h = aHandle; |
|
1592 if (h < 0x88000000u) |
|
1593 { |
|
1594 h = (h & 0x00007FFFu) | ((h & 0x07FF0000u) >> 1); |
|
1595 h = TUint32(K::MsgInfo.iBase) + (h << 2); |
|
1596 RMessageK* m = RMessageK::MessageK(h, this); |
|
1597 if (!m || m->iFunction == RMessage2::EDisConnect) |
|
1598 return NULL; |
|
1599 return m->iClient; |
|
1600 } |
|
1601 #endif |
|
1602 return NULL; |
|
1603 } |
|
1604 DObject* pO=NULL; |
|
1605 if (aHandle&KHandleFlagLocal) |
|
1606 { |
|
1607 pO=iHandles.At(aHandle&~KHandleFlagLocal); |
|
1608 } |
|
1609 else |
|
1610 { |
|
1611 pO=iOwningProcess->iHandles.At(aHandle); |
|
1612 } |
|
1613 return pO; |
|
1614 } |
|
1615 |
|
1616 /** |
|
1617 Translates a user handle relative to a specific thread. |
|
1618 |
|
1619 The handle must refer to a specific type of kernel object. |
|
1620 |
|
1621 @param aHandle The handle to translate. |
|
1622 @param aType The type of kernel object to which the handle must refer. |
|
1623 This should be a member of the TObjectType enumeration. |
|
1624 |
|
1625 @return A pointer to the kernel object to which the handle refers. |
|
1626 NULL if the handle is invalid or refers to the wrong type of object. |
|
1627 |
|
1628 @pre System lock must be held. |
|
1629 */ |
|
1630 EXPORT_C DObject* DThread::ObjectFromHandle(TInt aHandle, TInt aType) |
|
1631 { |
|
1632 CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"DThread::ObjectFromHandle(TInt aHandle, TInt aType)"); |
|
1633 TUint attr = 0; |
|
1634 return ObjectFromHandle(aHandle, aType, attr); |
|
1635 } |
|
1636 |
|
1637 EXPORT_C DObject* DThread::ObjectFromHandle(TInt aHandle, TInt aType, TUint& aAttr) |
|
1638 { |
|
1639 CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"DThread::ObjectFromHandle(TInt aHandle, TInt aType)"); |
|
1640 if (aHandle<0) |
|
1641 { |
|
1642 aHandle &= ~KHandleNoClose; |
|
1643 if (aHandle==(KCurrentThreadHandle&~KHandleNoClose) && aType==EThread) |
|
1644 return TheCurrentThread; |
|
1645 if (aHandle==(KCurrentProcessHandle&~KHandleNoClose) && aType==EProcess) |
|
1646 return TheCurrentThread->iOwningProcess; |
|
1647 #ifdef __OBSOLETE_V1_IPC_SUPPORT__ |
|
1648 TUint32 h = aHandle; |
|
1649 if (aType==EThread && h < 0x88000000u) |
|
1650 { |
|
1651 h = (h & 0x00007FFFu) | ((h & 0x07FF0000u) >> 1); |
|
1652 h = TUint32(K::MsgInfo.iBase) + (h << 2); |
|
1653 RMessageK* m = RMessageK::MessageK(h, this); |
|
1654 if (!m || m->iFunction == RMessage2::EDisConnect) |
|
1655 return NULL; |
|
1656 return m->iClient; |
|
1657 } |
|
1658 #endif |
|
1659 return NULL; |
|
1660 } |
|
1661 DObject* pO=NULL; |
|
1662 |
|
1663 if (aHandle&KHandleFlagLocal) |
|
1664 { |
|
1665 pO=iHandles.At(aHandle&~KHandleFlagLocal,aType+1, (TUint32*)&aAttr); |
|
1666 } |
|
1667 else |
|
1668 { |
|
1669 pO=iOwningProcess->iHandles.At(aHandle,aType+1, (TUint32*)&aAttr); |
|
1670 } |
|
1671 return pO; |
|
1672 } |
|
1673 |
|
1674 DObject* K::ObjectFromHandle(TInt aHandle) |
|
1675 // |
|
1676 // Look up an object in the current thread/process handles array |
|
1677 // Panic on bad handle |
|
1678 // Enter and leave with system lock held |
|
1679 // |
|
1680 { |
|
1681 DObject* pO=TheCurrentThread->ObjectFromHandle(aHandle); |
|
1682 if (!pO) |
|
1683 K::PanicCurrentThread(EBadHandle); |
|
1684 return pO; |
|
1685 } |
|
1686 |
|
1687 DObject* K::ObjectFromHandle(TInt aHandle, TInt aType) |
|
1688 // |
|
1689 // Look up an object of specific type in the current thread/process handles array |
|
1690 // Panic on bad handle |
|
1691 // Enter and leave with system lock held |
|
1692 // |
|
1693 { |
|
1694 DObject* pO=TheCurrentThread->ObjectFromHandle(aHandle,aType); |
|
1695 if (!pO) |
|
1696 K::PanicCurrentThread(EBadHandle); |
|
1697 return pO; |
|
1698 } |
|
1699 |
|
1700 DObject* K::ObjectFromHandle(TInt aHandle, TInt aType, TUint& aAttr) |
|
1701 // |
|
1702 // Look up an object of specific type in the current thread/process handles array |
|
1703 // Panic on bad handle |
|
1704 // Enter and leave with system lock held |
|
1705 // |
|
1706 { |
|
1707 DObject* pO=TheCurrentThread->ObjectFromHandle(aHandle,aType,aAttr); |
|
1708 if (!pO) |
|
1709 K::PanicCurrentThread(EBadHandle); |
|
1710 return pO; |
|
1711 } |
|
1712 |
|
1713 |
|
1714 |
|
1715 /** |
|
1716 Returns the kernel object that the given handle refers. |
|
1717 |
|
1718 The handle passed is looked up in the thread's handles collection if the handle is local or |
|
1719 in the thread's owner process' collection otherwise. If aHandle is negative or not found in |
|
1720 the thread's or process' collection then NULL is returned. |
|
1721 Two special handle values KCurrentThreadHandle and KCurrentProcessHandle can be used to get |
|
1722 a pointer to the current thread and the current process. |
|
1723 |
|
1724 aType is used to ensure that the object referred by the handle is of desired type. |
|
1725 If the type of the object referred by aHandle is different from aType then NULL is returned. |
|
1726 If aType is negative, the type of the object is ignored and no type checking is done. |
|
1727 If aType is positive and greater than the maximum number of object types (ENumObjectTypes) |
|
1728 the kernel will fault. |
|
1729 |
|
1730 @param aThread The thread that owns the handle passed. |
|
1731 @param aHandle Handle to the object to be returned. |
|
1732 @param aType TObjectType parameter specifying the type of the object referred by the handle. |
|
1733 |
|
1734 @return Pointer to the DObject referred by the handle or NULL if the handle is not |
|
1735 found in the thread's handles collection. |
|
1736 |
|
1737 @pre System must be locked |
|
1738 @pre Interrupts must be enabled. |
|
1739 @pre Kernel must be unlocked. |
|
1740 @pre Can be used in a device driver. |
|
1741 |
|
1742 @see TObjectType |
|
1743 @see DThread::ObjectFromHandle() |
|
1744 */ |
|
1745 EXPORT_C DObject* Kern::ObjectFromHandle(DThread* aThread, TInt aHandle, TInt aType) |
|
1746 { |
|
1747 CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED|MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED, |
|
1748 "Kern::ObjectFromHandle(DThread* aThread, TInt aHandle, TInt aType)"); |
|
1749 if (aType>=0) |
|
1750 { |
|
1751 if (aType<ENumObjectTypes) |
|
1752 return aThread->ObjectFromHandle(aHandle,aType); |
|
1753 K::Fault(K::EBadObjectType); |
|
1754 } |
|
1755 return aThread->ObjectFromHandle(aHandle); |
|
1756 } |
|
1757 |
|
1758 /** |
|
1759 Returns the kernel object that the given handle refers. |
|
1760 |
|
1761 The handle passed is looked up in the thread's handles collection if the handle is local or |
|
1762 in the thread's owner process' collection otherwise. If aHandle is negative or not found in |
|
1763 the thread's or process' collection then NULL is returned. |
|
1764 Two special handle values KCurrentThreadHandle and KCurrentProcessHandle can be used to get |
|
1765 a pointer to the current thread and the current process. |
|
1766 |
|
1767 aType is used to ensure that the object referred by the handle is of desired type. |
|
1768 If the type of the object referred by aHandle is different from aType then NULL is returned. |
|
1769 If aType is negative, the type of the object is ignored and no type checking is done. |
|
1770 If aType is positive and greater than the maximum number of object types (ENumObjectTypes) |
|
1771 the kernel will fault. |
|
1772 |
|
1773 @param aThread The thread that owns the handle passed. |
|
1774 @param aHandle Handle to the object to be returned. |
|
1775 @param aType TObjectType parameter specifying the type of the object referred by the handle. |
|
1776 @param aAttr Returns the attributes for this object. |
|
1777 |
|
1778 @return Pointer to the DObject referred by the handle or NULL if the handle is not |
|
1779 found in the thread's handles collection. |
|
1780 |
|
1781 @pre System must be locked |
|
1782 @pre Interrupts must be enabled. |
|
1783 @pre Kernel must be unlocked. |
|
1784 @pre Can be used in a device driver. |
|
1785 |
|
1786 @see TObjectType |
|
1787 @see DThread::ObjectFromHandle() |
|
1788 */ |
|
1789 EXPORT_C DObject* Kern::ObjectFromHandle(DThread* aThread, TInt aHandle, TInt aType, TUint& aAttr) |
|
1790 { |
|
1791 CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED|MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED, |
|
1792 "Kern::ObjectFromHandle(DThread* aThread, TInt aHandle, TInt aType)"); |
|
1793 if (aType>=0) |
|
1794 { |
|
1795 if (aType<ENumObjectTypes) |
|
1796 return aThread->ObjectFromHandle(aHandle,aType, aAttr); |
|
1797 K::Fault(K::EBadObjectType); |
|
1798 } |
|
1799 return aThread->ObjectFromHandle(aHandle, 0, aAttr); |
|
1800 } |
|
1801 #endif |
|
1802 |
|
1803 TInt K::OpenObjectFromHandle(TInt aHandle, DObject*& anObject) |
|
1804 // |
|
1805 // Look up a handle and open the object. |
|
1806 // Enter and return with no fast mutexes held. |
|
1807 // If successful, calling thread is placed into critical section. |
|
1808 // Return KErrBadHandle if handle bad, KErrNone if OK |
|
1809 // |
|
1810 { |
|
1811 DThread& t=*TheCurrentThread; |
|
1812 TInt r=KErrBadHandle; |
|
1813 NKern::ThreadEnterCS(); |
|
1814 NKern::LockSystem(); |
|
1815 DObject* pO=t.ObjectFromHandle(aHandle); |
|
1816 if (pO) |
|
1817 r=pO->Open(); |
|
1818 NKern::UnlockSystem(); |
|
1819 if (r!=KErrNone) |
|
1820 { |
|
1821 anObject=NULL; |
|
1822 NKern::ThreadLeaveCS(); |
|
1823 } |
|
1824 else |
|
1825 anObject=pO; |
|
1826 return r; |
|
1827 } |
|
1828 |
|
1829 |
|
1830 |
|
1831 |
|
1832 /** |
|
1833 Gets a pointer to the thread corresponding to the specified thread Id value. |
|
1834 |
|
1835 The caller must ensure that the returned DThread instance is not closed |
|
1836 asynchronously by another thread. |
|
1837 |
|
1838 @param aId The thread id. |
|
1839 |
|
1840 @return A pointer to the thread, or NULL if not found. |
|
1841 |
|
1842 @pre Calling thread must be in a critical section. |
|
1843 @pre Interrupts must be enabled. |
|
1844 @pre Kernel must be unlocked. |
|
1845 @pre Thread container mutex must be held. |
|
1846 @pre Call in a thread context. |
|
1847 @pre No fast mutex must be held |
|
1848 @pre Can be used in a device driver. |
|
1849 |
|
1850 @post Thread container mutex is held. |
|
1851 @post Calling thread is in a critical section. |
|
1852 */ |
|
1853 EXPORT_C DThread* Kern::ThreadFromId(TUint aId) |
|
1854 { |
|
1855 DObjectCon& threads=*K::Containers[EThread]; |
|
1856 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::ThreadFromId"); |
|
1857 __ASSERT_WITH_MESSAGE_MUTEX(threads.Lock(),"Thread container mutex must be held","Kern::ThreadFromId"); |
|
1858 TInt c=threads.Count(); |
|
1859 TInt i; |
|
1860 for (i=0; i<c; i++) |
|
1861 { |
|
1862 DThread* pT=(DThread*)threads[i]; |
|
1863 if (pT->iId==aId) |
|
1864 return pT; |
|
1865 } |
|
1866 return NULL; |
|
1867 } |
|
1868 |
|
1869 |
|
1870 |
|
1871 |
|
1872 /** |
|
1873 Gets a pointer to the process corresponding to the specified process Id value. |
|
1874 |
|
1875 The caller must ensure that the returned DProcess instance is not deleted |
|
1876 asynchronously by another thread. |
|
1877 |
|
1878 @param aId The process id. |
|
1879 @return A pointer to the process, or NULL if not found. |
|
1880 |
|
1881 @pre Calling thread must be in a critical section. |
|
1882 @pre Interrupts must be enabled. |
|
1883 @pre Kernel must be unlocked. |
|
1884 @pre Process container mutex must be held. |
|
1885 @pre Call in a thread context. |
|
1886 @pre No fast mutex must be held |
|
1887 @pre Can be used in a device driver. |
|
1888 |
|
1889 @post Process container mutex is held. |
|
1890 @post Calling thread is in a critical section. |
|
1891 */ |
|
1892 EXPORT_C DProcess* Kern::ProcessFromId(TUint aId) |
|
1893 { |
|
1894 DObjectCon& processes=*K::Containers[EProcess]; |
|
1895 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::ProcessFromId"); |
|
1896 __ASSERT_WITH_MESSAGE_MUTEX(processes.Lock(),"Process container mutex must be held","Kern::ThreadFromId"); |
|
1897 //end of preconditions check |
|
1898 TInt c=processes.Count(); |
|
1899 TInt i; |
|
1900 for (i=0; i<c; i++) |
|
1901 { |
|
1902 DProcess* pP=(DProcess*)processes[i]; |
|
1903 if (pP->iId==aId) |
|
1904 return pP; |
|
1905 } |
|
1906 return NULL; |
|
1907 } |
|
1908 |
|
1909 TBool K::IsInKernelHeap(const TAny* aPtr, TInt aSize) |
|
1910 // |
|
1911 // Check if an address range lies within the kernel heap chunk |
|
1912 // |
|
1913 { |
|
1914 TLinAddr a=(TLinAddr)aPtr; |
|
1915 TLinAddr base=(TLinAddr)K::HeapInfo.iBase; |
|
1916 TInt max=K::HeapInfo.iMaxSize; |
|
1917 return (a>=base && TInt(a-base+aSize)<=max); |
|
1918 } |
|
1919 |
|
1920 GLDEF_C TInt CalcKernelHeapUsed() |
|
1921 { |
|
1922 return ((RHeapK*)K::Allocator)->TotalAllocSize(); |
|
1923 } |
|
1924 |
|
1925 |
|
1926 |
|
1927 |
|
1928 /** |
|
1929 Copies data from a source descriptor in kernel memory, to a target descriptor |
|
1930 in user memory, in a way that enables forward and backward compatibility. |
|
1931 |
|
1932 If the length of the source data is longer that the maximum length of the |
|
1933 target descriptor then the number of bytes copied is limited to the maximum |
|
1934 length of the target descriptor. |
|
1935 |
|
1936 If the length of the source data is smaller that the maximum length of the |
|
1937 target descriptor then the target descriptor is padded with zeros. |
|
1938 |
|
1939 If the current thread is a user thread (i.e. if the mode in spsr_svc |
|
1940 is 'User'), then data is written using user mode privileges. |
|
1941 |
|
1942 @param aDestU The target descriptor in user memory. |
|
1943 @param aSrcK The source descriptor in kernel memory. |
|
1944 |
|
1945 @panic KERN-EXEC 33, if aDestU is not a writable descriptor type. |
|
1946 |
|
1947 @pre Do not call from User thread if in a critical section. |
|
1948 @pre Interrupts must be enabled. |
|
1949 @pre Kernel must be unlocked. |
|
1950 @pre No fast mutex can be held. |
|
1951 @pre Call in a thread context. |
|
1952 @pre Can be used in a device driver. |
|
1953 |
|
1954 @post The length of aDestU is equal to the number of bytes copied, excluding |
|
1955 any padding. |
|
1956 @post If aDestU is a TPtr type then its maximum length is equal its new length. |
|
1957 */ |
|
1958 EXPORT_C void Kern::InfoCopy(TDes8& aDestU, const TDesC8& aSrcK) |
|
1959 { |
|
1960 CHECK_PRECONDITIONS(MASK_NO_CRITICAL_IF_USER|MASK_THREAD_STANDARD,"Kern::InfoCopy(TDes8& aDestU, const TDesC8& aSrcK)"); |
|
1961 Kern::InfoCopy(aDestU,aSrcK.Ptr(),aSrcK.Length()); |
|
1962 } |
|
1963 |
|
1964 |
|
1965 |
|
1966 |
|
1967 /** |
|
1968 Copies data from kernel memory to a target descriptor in user memory, |
|
1969 in a way that enables forward and backward compatibility. |
|
1970 |
|
1971 If the length of the source data is longer that the maximum length of the |
|
1972 target descriptor then the number of bytes copied is limited to the maximum |
|
1973 length of the target descriptor. |
|
1974 |
|
1975 If the length of the source data is smaller that the maximum length of the |
|
1976 target descriptor then the target descriptor is padded with zeros. |
|
1977 |
|
1978 If the current thread is a user thread (i.e. if the mode in spsr_svc |
|
1979 is 'User'), then data is written using user mode privileges. |
|
1980 |
|
1981 @param aDestU The target descriptor in user memory. |
|
1982 @param aPtrK Address of the first byte of data to be copied in kernel memory. |
|
1983 @param aLengthK Length of data to be copied. |
|
1984 |
|
1985 @panic KERN-EXEC 33, the target descriptor is not writable. |
|
1986 |
|
1987 @pre Do not call from User thread if in a critical section. |
|
1988 @pre Interrupts must be enabled. |
|
1989 @pre Kernel must be unlocked. |
|
1990 @pre No fast mutex can be held. |
|
1991 @pre Call in a thread context. |
|
1992 @pre Can be used in a device driver. |
|
1993 |
|
1994 @post The length of aDestU is equal to the number of bytes copied, excluding |
|
1995 any padding. |
|
1996 @post If aDestU is a TPtr type then its maximum length is equal its new length. |
|
1997 */ |
|
1998 EXPORT_C void Kern::InfoCopy(TDes8& aDestU, const TUint8* aPtrK, TInt aLengthK) |
|
1999 { |
|
2000 CHECK_PRECONDITIONS(MASK_NO_CRITICAL_IF_USER|MASK_THREAD_STANDARD,"Kern::InfoCopy(TDes8& aDestU, const TUint8* aPtrK, TInt aLengthK)"); |
|
2001 TInt userLen; |
|
2002 TInt userMax; |
|
2003 TUint8* userPtr=(TUint8*)Kern::KUDesInfo(aDestU,userLen,userMax); |
|
2004 if (userMax<0) |
|
2005 K::PanicKernExec(EKUDesInfoInvalidType); |
|
2006 TInt copyLength=Min(aLengthK,userMax); |
|
2007 if (aLengthK<userMax) |
|
2008 kumemset(userPtr+aLengthK,0,userMax-aLengthK); |
|
2009 TPtrC8 kptr(aPtrK,copyLength); |
|
2010 Kern::KUDesPut(aDestU,kptr); |
|
2011 } |
|
2012 |
|
2013 |
|
2014 |
|
2015 |
|
2016 /** |
|
2017 Gets the power model. |
|
2018 |
|
2019 @return A pointer to the power model object. |
|
2020 |
|
2021 @pre Call in any context. |
|
2022 */ |
|
2023 EXPORT_C DPowerModel* Kern::PowerModel() |
|
2024 { |
|
2025 return K::PowerModel; |
|
2026 } |
|
2027 |
|
2028 |
|
2029 |
|
2030 |
|
2031 /** |
|
2032 Gets the status of the power supply. |
|
2033 |
|
2034 @return The status of the power supply. EGood, if there is no power model. |
|
2035 |
|
2036 @pre Calling thread can be either in a critical section or not. |
|
2037 @pre Interrupts must be enabled. |
|
2038 @pre Kernel must be unlocked. |
|
2039 @pre No fast mutex can be held. |
|
2040 @pre Call in a thread context. |
|
2041 @pre Can be used in a device driver. |
|
2042 */ |
|
2043 EXPORT_C TSupplyStatus Kern::MachinePowerStatus() |
|
2044 { |
|
2045 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::MachinePowerStatus"); |
|
2046 if(K::PowerModel) |
|
2047 return K::PowerModel->MachinePowerStatus(); |
|
2048 // If no power model... |
|
2049 return EGood; |
|
2050 } |
|
2051 |
|
2052 |
|
2053 |
|
2054 |
|
2055 /** |
|
2056 Changes the priority of the specified thread or the current thread. |
|
2057 |
|
2058 @param aPriority The new priority to be set. |
|
2059 @param aThread The thread that is to have its priority set. If NULL, the |
|
2060 thread is the current thread. |
|
2061 |
|
2062 @return KErrNone, if successful; KErrArgument, if the priority value is |
|
2063 negative or greater than or equal to KNumPriorities. |
|
2064 |
|
2065 @pre Calling thread can be either in a critical section or not. |
|
2066 @pre Interrupts must be enabled. |
|
2067 @pre Kernel must be unlocked. |
|
2068 @pre No fast mutex can be held. |
|
2069 @pre Call in a thread context. |
|
2070 @pre Can be used in a device driver. |
|
2071 |
|
2072 @see DThread::SetThreadPriority() |
|
2073 @see KNumPriorities |
|
2074 */ |
|
2075 EXPORT_C TInt Kern::SetThreadPriority(TInt aPriority, DThread* aThread) |
|
2076 { |
|
2077 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::SetThreadPriority"); |
|
2078 if (!aThread) |
|
2079 aThread=TheCurrentThread; |
|
2080 __KTRACE_OPT(KEXEC,Kern::Printf("Kern::SetThreadPriority %d %O",aPriority,aThread)); |
|
2081 if (aPriority<0 || aPriority>=KNumPriorities) |
|
2082 return KErrArgument; |
|
2083 NKern::LockSystem(); |
|
2084 aThread->SetThreadPriority(aPriority); |
|
2085 NKern::UnlockSystem(); |
|
2086 return KErrNone; |
|
2087 } |
|
2088 |
|
2089 |
|
2090 |
|
2091 |
|
2092 /** |
|
2093 Gets the device's superpage. |
|
2094 |
|
2095 @return A reference to the device's superpage. |
|
2096 |
|
2097 @pre Call in any context. |
|
2098 */ |
|
2099 EXPORT_C TSuperPage& Kern::SuperPage() |
|
2100 { |
|
2101 return *(TSuperPage*)SuperPageAddress; |
|
2102 } |
|
2103 |
|
2104 |
|
2105 |
|
2106 |
|
2107 /** |
|
2108 Gets the device's configuration information. |
|
2109 |
|
2110 @return A reference to the device configuration information. |
|
2111 |
|
2112 @pre Call in any context. |
|
2113 */ |
|
2114 EXPORT_C TMachineConfig& Kern::MachineConfig() |
|
2115 { |
|
2116 return *K::MachineConfig; |
|
2117 } |
|
2118 |
|
2119 |
|
2120 |
|
2121 |
|
2122 /** |
|
2123 Suspends execution of the specified thread. |
|
2124 |
|
2125 If the thread is running a critical section, suspension will be deferred until |
|
2126 it leaves the critical section. |
|
2127 |
|
2128 @param aThread The thread to be suspended. |
|
2129 @param aCount Specifies how many times this thread should be suspended. It |
|
2130 will require the same number of calls to ThreadResume() to undo |
|
2131 the result of this call to ThreadSuspend(). |
|
2132 |
|
2133 @pre Calling thread can be either in a critical section or not. |
|
2134 @pre Interrupts must be enabled. |
|
2135 @pre Kernel must be unlocked. |
|
2136 @pre No fast mutex can be held. |
|
2137 @pre Call in a thread context. |
|
2138 @pre Can be used in a device driver. |
|
2139 |
|
2140 @see DThread::Suspend() |
|
2141 */ |
|
2142 EXPORT_C void Kern::ThreadSuspend(DThread& aThread, TInt aCount) |
|
2143 { |
|
2144 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::ThreadSuspend"); |
|
2145 NKern::LockSystem(); |
|
2146 aThread.Suspend(aCount); |
|
2147 NKern::UnlockSystem(); |
|
2148 } |
|
2149 |
|
2150 |
|
2151 |
|
2152 |
|
2153 /** |
|
2154 Resumes execution of the specified thread. |
|
2155 |
|
2156 Calling Resume() does not mean that the thread becomes runnable. Instead it |
|
2157 increments the thread's suspend count. When the count reaches 0, the thread |
|
2158 is made runnable (in case it's not blocked). |
|
2159 |
|
2160 @param aThread The thread to be resumed. |
|
2161 |
|
2162 @pre Calling thread can be either in a critical section or not. |
|
2163 @pre Interrupts must be enabled. |
|
2164 @pre Kernel must be unlocked. |
|
2165 @pre No fast mutex can be held. |
|
2166 @pre Call in a thread context. |
|
2167 @pre Can be used in a device driver. |
|
2168 |
|
2169 @see DThread::Resume() |
|
2170 */ |
|
2171 EXPORT_C void Kern::ThreadResume(DThread& aThread) |
|
2172 { |
|
2173 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::ThreadResume"); |
|
2174 NKern::LockSystem(); |
|
2175 aThread.Resume(); |
|
2176 NKern::UnlockSystem(); |
|
2177 } |
|
2178 |
|
2179 |
|
2180 |
|
2181 |
|
2182 /** |
|
2183 Waits on the specified mutex. |
|
2184 |
|
2185 If the calling thread is a user thread, it must be in a critical section while |
|
2186 it holds the mutex to prevent deadlocks (thread suspended while holding mutex), inconsistent |
|
2187 states (thread killed while data protected by mutex in inconsistent state) |
|
2188 and resource leaks (thread killed before taking ownership of some |
|
2189 resource). |
|
2190 |
|
2191 @param aMutex Mutex to wait on. |
|
2192 |
|
2193 @return KErrNone, if successful, otherwise one of the other system-wide error |
|
2194 codes. |
|
2195 |
|
2196 @pre Calling thread must be in a critical section. |
|
2197 @pre Interrupts must be enabled. |
|
2198 @pre Kernel must be unlocked. |
|
2199 @pre No fast mutex can be held. |
|
2200 @pre Call in a thread context. |
|
2201 @pre Can be used in a device driver. |
|
2202 */ |
|
2203 EXPORT_C TInt Kern::MutexWait(DMutex& aMutex) |
|
2204 { |
|
2205 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::MutexWait"); |
|
2206 NKern::LockSystem(); |
|
2207 TInt r=aMutex.Wait(); |
|
2208 NKern::UnlockSystem(); |
|
2209 return r; |
|
2210 } |
|
2211 |
|
2212 |
|
2213 |
|
2214 |
|
2215 /** |
|
2216 Signals the specified mutex. |
|
2217 |
|
2218 If the calling thread is a user thread, it must be in a critical section. |
|
2219 |
|
2220 @param aMutex Mutex to signal |
|
2221 |
|
2222 @pre Calling thread must be in a critical section. |
|
2223 @pre Interrupts must be enabled. |
|
2224 @pre Kernel must be unlocked. |
|
2225 @pre No fast mutex can be held. |
|
2226 @pre Call in a thread context. |
|
2227 @pre Can be used in a device driver. |
|
2228 */ |
|
2229 EXPORT_C void Kern::MutexSignal(DMutex& aMutex) |
|
2230 { |
|
2231 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::MutexSignal"); |
|
2232 NKern::LockSystem(); |
|
2233 aMutex.Signal(); |
|
2234 } |
|
2235 |
|
2236 |
|
2237 |
|
2238 |
|
2239 /** |
|
2240 Creates a kernel mutex object with the specified name. |
|
2241 |
|
2242 On return, the kernel mutex object is not visible and has no owner. |
|
2243 |
|
2244 @param aMutex A reference to a DMutex pointer. |
|
2245 On successful return from this function, the pointer is set |
|
2246 to the address of the created DMutex object. |
|
2247 @param aName The name of the mutex. |
|
2248 @param aOrder A value representing the order of the mutex with respect to deadlock prevention. |
|
2249 |
|
2250 @pre Calling thread must be in a critical section. |
|
2251 @pre Interrupts must be enabled. |
|
2252 @pre Kernel must be unlocked. |
|
2253 @pre No fast mutex can be held. |
|
2254 @pre Call in a thread context. |
|
2255 @pre Can be used in a device driver. |
|
2256 |
|
2257 @post On successful return, aMutex contains a pointer to the newly created |
|
2258 DMutex object. |
|
2259 |
|
2260 @return KErrNone, if successful, otherwise one of the other system-wide |
|
2261 error codes. |
|
2262 */ |
|
2263 EXPORT_C TInt Kern::MutexCreate(DMutex*& aMutex, const TDesC& aName, TUint aOrder) |
|
2264 { |
|
2265 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::MutexCreate"); |
|
2266 return K::MutexCreate(aMutex, aName, NULL, EFalse, aOrder); |
|
2267 } |
|
2268 |
|
2269 |
|
2270 /** |
|
2271 Waits on the specified semaphore. |
|
2272 |
|
2273 @param aSem Semaphore to wait on |
|
2274 @param aNTicks Maximum number of nanokernel ticks to wait before timing out |
|
2275 the operation. Zero means wait forever. If this parameter is |
|
2276 not specified it defaults to 0. |
|
2277 |
|
2278 @return KErrNone, if successful; |
|
2279 KErrTimedOut, if the maximum wait time was exceeded before the |
|
2280 semaphore was signalled; |
|
2281 KErrGeneral, if the semaphore was deleted. |
|
2282 |
|
2283 @pre Interrupts must be enabled. |
|
2284 @pre Kernel must be unlocked. |
|
2285 @pre No fast mutex can be held. |
|
2286 @pre Call in a thread context. |
|
2287 @pre Can be used in a device driver. |
|
2288 */ |
|
2289 EXPORT_C TInt Kern::SemaphoreWait(DSemaphore& aSem, TInt aNTicks) |
|
2290 { |
|
2291 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::SemaphoreWait"); |
|
2292 NKern::LockSystem(); |
|
2293 return aSem.Wait(aNTicks); |
|
2294 } |
|
2295 |
|
2296 |
|
2297 |
|
2298 |
|
2299 /** |
|
2300 Signals the specified semaphore. |
|
2301 |
|
2302 @param aSem Semaphore to signal. |
|
2303 |
|
2304 @pre Interrupts must be enabled. |
|
2305 @pre Kernel must be unlocked. |
|
2306 @pre No fast mutex can be held. |
|
2307 @pre Call in a thread context. |
|
2308 @pre Can be used in a device driver. |
|
2309 */ |
|
2310 EXPORT_C void Kern::SemaphoreSignal(DSemaphore& aSem) |
|
2311 { |
|
2312 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::SemaphoreSignal"); |
|
2313 NKern::LockSystem(); |
|
2314 aSem.Signal(); |
|
2315 } |
|
2316 |
|
2317 |
|
2318 |
|
2319 /** |
|
2320 Creates a semaphore with the specified name. |
|
2321 |
|
2322 Note that, on return, the semaphore is not visible, and has no owner. |
|
2323 |
|
2324 @param aSem A reference to a pointer to a semaphore. |
|
2325 @param aName The name of the semaphore. |
|
2326 @param aInitialCount The count with which the semaphore should start. |
|
2327 |
|
2328 @pre Calling thread must be in a critical section. |
|
2329 @pre Interrupts must be enabled. |
|
2330 @pre Kernel must be unlocked. |
|
2331 @pre No fast mutex can be held. |
|
2332 @pre Call in a thread context. |
|
2333 @pre Can be used in a device driver. |
|
2334 |
|
2335 @post On successful return, aSem contains a pointer to the newly created |
|
2336 semaphore. |
|
2337 |
|
2338 @return KErrNone, if successful, otherwise one of the other system-wide |
|
2339 error codes. |
|
2340 */ |
|
2341 EXPORT_C TInt Kern::SemaphoreCreate(DSemaphore*& aSem, const TDesC& aName, TInt aInitialCount) |
|
2342 { |
|
2343 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::SemaphoreCreate"); |
|
2344 __KTRACE_OPT(KSEMAPHORE,Kern::Printf("Kern::SemaphoreCreate %lS init %d", &aName, aInitialCount)); |
|
2345 TInt r = KErrNoMemory; |
|
2346 DSemaphore* pS = new DSemaphore; |
|
2347 if (pS) |
|
2348 { |
|
2349 r = pS->Create(NULL, &aName, aInitialCount, EFalse); |
|
2350 if (r==KErrNone) |
|
2351 aSem = pS; |
|
2352 else |
|
2353 pS->Close(NULL); |
|
2354 } |
|
2355 __KTRACE_OPT(KSEMAPHORE,Kern::Printf("Kern::SemaphoreCreate returns %d", r)); |
|
2356 return r; |
|
2357 } |
|
2358 |
|
2359 |
|
2360 |
|
2361 TUint K::CheckFreeMemoryLevel(TInt aInitial, TInt aFinal, TBool aFailed) |
|
2362 { |
|
2363 NKern::LockSystem(); |
|
2364 TInt low=K::MemoryLowThreshold; |
|
2365 TInt good=K::MemoryGoodThreshold; |
|
2366 NKern::UnlockSystem(); |
|
2367 TUint changes=0; |
|
2368 if (aFinal<low && aInitial>=low) |
|
2369 changes |= (EChangesFreeMemory | EChangesLowMemory); |
|
2370 if (aFinal>=good && aInitial<good) |
|
2371 changes |= EChangesFreeMemory; |
|
2372 if (aFailed) |
|
2373 changes |= EChangesOutOfMemory; |
|
2374 if (changes) |
|
2375 { |
|
2376 // asynchronously notify changes |
|
2377 Kern::AsyncNotifyChanges(changes); |
|
2378 } |
|
2379 return changes; |
|
2380 } |
|
2381 |
|
2382 |
|
2383 TBool K::CheckForSimulatedAllocFail() |
|
2384 { |
|
2385 #ifdef _DEBUG |
|
2386 if(K::Allocator) |
|
2387 return ((RHeapK*)K::Allocator)->CheckForSimulatedAllocFail(); |
|
2388 #endif |
|
2389 return EFalse; |
|
2390 } |
|
2391 |
|
2392 |
|
2393 /** |
|
2394 Gets the current Symbian OS thread. |
|
2395 |
|
2396 Note that if this function is called from an ISR or an IDFC, then it returns |
|
2397 a reference to the interrupted thread. |
|
2398 Note also that this function assumes that the current thread is a Symbian OS |
|
2399 thread. The result will not be sensible if it is a raw nanokernel thread. |
|
2400 |
|
2401 @return A reference to the current thread. |
|
2402 |
|
2403 @pre Call in a thread context. |
|
2404 */ |
|
2405 EXPORT_C DThread& Kern::CurrentThread() |
|
2406 { |
|
2407 CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_NOT_IDFC,"Kern::CurrentThread()"); |
|
2408 return *TheCurrentThread; |
|
2409 } |
|
2410 |
|
2411 |
|
2412 |
|
2413 |
|
2414 /** |
|
2415 Gets the current process. |
|
2416 |
|
2417 The current process is that to which the current thread belongs. |
|
2418 |
|
2419 Note that if this function is called from an ISR or an IDFC, then the |
|
2420 associated thread is the interrupted thread. |
|
2421 Note also that this function assumes that the current thread is a Symbian OS |
|
2422 thread. The result will not be sensible if it is a raw nanokernel thread. |
|
2423 |
|
2424 @return A reference to the current process. |
|
2425 |
|
2426 @pre Call in a thread context. |
|
2427 |
|
2428 @see Kern::CurrentThread() |
|
2429 */ |
|
2430 EXPORT_C DProcess& Kern::CurrentProcess() |
|
2431 { |
|
2432 CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_NOT_IDFC,"Kern::CurrentProcess()"); |
|
2433 return *TheCurrentThread->iOwningProcess; |
|
2434 } |
|
2435 |
|
2436 |
|
2437 DThread* K::ThreadEnterCS() |
|
2438 { |
|
2439 NKern::ThreadEnterCS(); |
|
2440 NKern::UnlockSystem(); |
|
2441 return TheCurrentThread; |
|
2442 } |
|
2443 |
|
2444 DThread* K::ThreadLeaveCS() |
|
2445 { |
|
2446 NKern::LockSystem(); |
|
2447 NKern::ThreadLeaveCS(); |
|
2448 return TheCurrentThread; |
|
2449 } |
|
2450 |
|
2451 DObject* K::ThreadEnterCS(TInt aHandle, TInt aType) |
|
2452 // |
|
2453 // Enter a thread critical section, translate a handle and open the object |
|
2454 // Return a pointer to the object |
|
2455 // Enter with system locked, leave with system unlocked |
|
2456 // |
|
2457 { |
|
2458 DObject* pO=NULL; |
|
2459 if (aType>=0) |
|
2460 pO=TheCurrentThread->ObjectFromHandle(aHandle,aType); |
|
2461 else |
|
2462 pO=TheCurrentThread->ObjectFromHandle(aHandle); |
|
2463 if (!pO || pO->Open()) |
|
2464 K::PanicCurrentThread(EBadHandle); |
|
2465 NKern::ThreadEnterCS(); |
|
2466 NKern::UnlockSystem(); |
|
2467 return pO; |
|
2468 } |
|
2469 |
|
2470 TUint32 K::KernelConfigFlags() |
|
2471 { |
|
2472 TUint32 flags = TheSuperPage().KernelConfigFlags(); |
|
2473 if(TEST_DEBUG_MASK_BIT(KTESTLATENCY)) |
|
2474 flags &= ~EKernelConfigPlatSecDiagnostics; |
|
2475 |
|
2476 TBool codePagingSupported = K::MemModelAttributes & EMemModelAttrCodePaging; |
|
2477 if (!codePagingSupported) |
|
2478 flags = (flags & ~EKernelConfigCodePagingPolicyMask) | EKernelConfigCodePagingPolicyNoPaging; |
|
2479 |
|
2480 TBool dataPagingSupported = K::MemModelAttributes & EMemModelAttrDataPaging; |
|
2481 if (!dataPagingSupported) |
|
2482 flags = (flags & ~EKernelConfigDataPagingPolicyMask) | EKernelConfigDataPagingPolicyNoPaging; |
|
2483 |
|
2484 return flags; |
|
2485 } |
|
2486 |
|
2487 void signal_sem(TAny* aPtr) |
|
2488 { |
|
2489 NKern::FSSignal((NFastSemaphore*)aPtr); |
|
2490 } |
|
2491 |
|
2492 TInt WaitForIdle(TInt aTimeoutMilliseconds) |
|
2493 { |
|
2494 NFastSemaphore s(0); |
|
2495 TDfc idler(&signal_sem, &s, Kern::SvMsgQue(), 0); // supervisor thread, priority 0, so will run after destroyed DFC |
|
2496 NTimer timer(&signal_sem, &s); |
|
2497 idler.QueueOnIdle(); |
|
2498 timer.OneShot(NKern::TimerTicks(aTimeoutMilliseconds), ETrue); // runs in DFCThread1 |
|
2499 NKern::FSWait(&s); // wait for either idle DFC or timer |
|
2500 TBool timeout = idler.Cancel(); // cancel idler, return TRUE if it hadn't run |
|
2501 TBool tmc = timer.Cancel(); // cancel timer, return TRUE if it hadn't expired |
|
2502 if (!timeout && !tmc) |
|
2503 NKern::FSWait(&s); // both the DFC and the timer went off - wait for the second one |
|
2504 if (timeout) |
|
2505 return KErrTimedOut; |
|
2506 return KErrNone; |
|
2507 } |
|
2508 |
|
2509 TInt K::KernelHal(TInt aFunction, TAny* a1, TAny* /*a2*/) |
|
2510 { |
|
2511 TInt r=KErrNone; |
|
2512 switch (aFunction) |
|
2513 { |
|
2514 case EKernelHalMemoryInfo: |
|
2515 { |
|
2516 TMemoryInfoV1Buf infoBuf; |
|
2517 TMemoryInfoV1& info=infoBuf(); |
|
2518 info.iTotalRamInBytes=TheSuperPage().iTotalRamSize; |
|
2519 info.iTotalRomInBytes=TheSuperPage().iTotalRomSize; |
|
2520 info.iMaxFreeRamInBytes=K::MaxFreeRam; |
|
2521 NKern::LockSystem(); |
|
2522 info.iFreeRamInBytes=Kern::FreeRamInBytes(); |
|
2523 info.iInternalDiskRamInBytes=TheSuperPage().iRamDriveSize; |
|
2524 NKern::UnlockSystem(); |
|
2525 info.iRomIsReprogrammable=ETrue; |
|
2526 Kern::InfoCopy(*(TDes8*)a1,infoBuf); |
|
2527 break; |
|
2528 } |
|
2529 /* Deprecated in 6.0 ?? |
|
2530 case EKernelHalRomInfo: |
|
2531 { |
|
2532 TRomInfoV1Buf infoBuf; |
|
2533 TRomInfoV1& info=infoBuf(); |
|
2534 memcpy(&info,&TheSuperPage().iRomConfig[0],sizeof(TRomInfoV1)); |
|
2535 Kern::InfoCopy(*(TDes8*)a1,infoBuf); |
|
2536 break; |
|
2537 } |
|
2538 */ |
|
2539 case EKernelHalStartupReason: |
|
2540 kumemput32(a1,&TheSuperPage().iStartupReason,sizeof(TMachineStartupType)); |
|
2541 break; |
|
2542 case EKernelHalFaultReason: |
|
2543 kumemput32(a1,&TheSuperPage().iKernelFault,sizeof(TInt)); |
|
2544 break; |
|
2545 case EKernelHalExceptionId: |
|
2546 kumemput32(a1,&TheSuperPage().iKernelExcId,sizeof(TInt)); |
|
2547 break; |
|
2548 case EKernelHalExceptionInfo: |
|
2549 kumemput32(a1,&TheSuperPage().iKernelExcInfo,sizeof(TExcInfo)); |
|
2550 break; |
|
2551 case EKernelHalCpuInfo: |
|
2552 r=KErrNotSupported; |
|
2553 break; |
|
2554 case EKernelHalPageSizeInBytes: |
|
2555 { |
|
2556 TInt pageSize=M::PageSizeInBytes(); |
|
2557 kumemput32(a1,&pageSize,sizeof(TInt)); |
|
2558 break; |
|
2559 } |
|
2560 case EKernelHalTickPeriod: |
|
2561 { |
|
2562 kumemput32(a1,&K::TickQ->iTickPeriod,sizeof(TInt)); |
|
2563 break; |
|
2564 } |
|
2565 case EKernelHalNTickPeriod: |
|
2566 { |
|
2567 TInt period=NTickPeriod(); |
|
2568 kumemput32(a1,&period,sizeof(TInt)); |
|
2569 break; |
|
2570 } |
|
2571 case EKernelHalFastCounterFrequency: |
|
2572 { |
|
2573 TInt freq=NKern::FastCounterFrequency(); |
|
2574 kumemput32(a1,&freq,sizeof(TInt)); |
|
2575 break; |
|
2576 } |
|
2577 case EKernelHalMemModelInfo: |
|
2578 r=(TInt)K::MemModelAttributes; |
|
2579 break; |
|
2580 case EKernelHalHardwareFloatingPoint: |
|
2581 TUint32 types; |
|
2582 r=K::FloatingPointTypes(types); |
|
2583 kumemput32(a1,&types,sizeof(TUint32)); |
|
2584 break; |
|
2585 |
|
2586 case EKernelHalGetNonsecureClockOffset: |
|
2587 kumemput32(a1, &K::NonSecureOffsetSeconds, sizeof(K::NonSecureOffsetSeconds)); |
|
2588 break; |
|
2589 case EKernelHalSetNonsecureClockOffset: |
|
2590 if(!Kern::CurrentThreadHasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by KernelHal function"))) |
|
2591 r=KErrPermissionDenied; |
|
2592 else |
|
2593 { |
|
2594 // Only allow the nonsecure offset to be set *once* (i.e. by halsettings.exe during startup). |
|
2595 // Subsequent updates to this value are of course done through setting the |
|
2596 // nonsecure system time. |
|
2597 if (K::SecureClockStatus & ESecureClockOffsetPresent) |
|
2598 r = KErrGeneral; |
|
2599 else |
|
2600 { |
|
2601 // Update the nonsecure offset not by writing it directly, but by using the |
|
2602 // time-setting API. This will also cause the software clock to be updated |
|
2603 // with the offset, while leaving the hardware clock untouched. |
|
2604 TTimeK t = Kern::SystemTime(); |
|
2605 K::SecureClockStatus |= ESecureClockOffsetPresent; |
|
2606 TInt64 offset = (TInt)a1; |
|
2607 offset *= 1000000; |
|
2608 t += offset; |
|
2609 NKern::ThreadEnterCS(); |
|
2610 Kern::SetSystemTime(t, 0); |
|
2611 NKern::ThreadLeaveCS(); |
|
2612 } |
|
2613 } |
|
2614 break; |
|
2615 #ifdef __SMP__ |
|
2616 case EKernelHalSmpSupported: |
|
2617 r = KErrNone; |
|
2618 break; |
|
2619 #endif |
|
2620 case EKernelHalNumLogicalCpus: |
|
2621 #ifdef __SMP__ |
|
2622 r = NKern::NumberOfCpus(); |
|
2623 #else |
|
2624 r = 1; |
|
2625 #endif |
|
2626 break; |
|
2627 case EKernelHalSupervisorBarrier: |
|
2628 { |
|
2629 NKern::ThreadEnterCS(); |
|
2630 r = KErrNone; |
|
2631 TInt timeout = (TInt)a1; |
|
2632 if (timeout>0) |
|
2633 { |
|
2634 r = WaitForIdle(timeout); |
|
2635 } |
|
2636 if (r==KErrNone) |
|
2637 { |
|
2638 TMessageBase& m=Kern::Message(); |
|
2639 m.SendReceive(&K::SvBarrierQ); |
|
2640 } |
|
2641 NKern::ThreadLeaveCS(); |
|
2642 break; |
|
2643 } |
|
2644 case EKernelHalFloatingPointSystemId: |
|
2645 TUint32 sysid; |
|
2646 r=K::FloatingPointSystemId(sysid); |
|
2647 kumemput32(a1,&sysid,sizeof(TUint32)); |
|
2648 break; |
|
2649 |
|
2650 case EKernelHalLockThreadToCpu: |
|
2651 { |
|
2652 #ifdef __SMP__ |
|
2653 TUint32 cpuId = (TUint32)a1; |
|
2654 if (cpuId < (TUint32)NKern::NumberOfCpus()) |
|
2655 { |
|
2656 NKern::ThreadSetCpuAffinity(NKern::CurrentThread(), cpuId); |
|
2657 r = KErrNone; |
|
2658 } |
|
2659 else |
|
2660 { |
|
2661 r = KErrArgument; |
|
2662 } |
|
2663 #else |
|
2664 r = KErrNone; |
|
2665 #endif |
|
2666 break; |
|
2667 } |
|
2668 |
|
2669 case EKernelHalConfigFlags: |
|
2670 // return bottom 31 bits of config flags so as not to signal an error |
|
2671 r=K::KernelConfigFlags() & 0x7fffffff; |
|
2672 break; |
|
2673 |
|
2674 default: |
|
2675 r=KErrNotSupported; |
|
2676 break; |
|
2677 } |
|
2678 return r; |
|
2679 } |
|
2680 |
|
2681 void K::CheckKernelUnlocked() |
|
2682 { |
|
2683 if (NKern::KernelLocked() || NKern::HeldFastMutex()) |
|
2684 K::Fault(K::EPanicWhileKernelLocked); |
|
2685 } |
|
2686 |
|
2687 void K::CheckFileServerAccess() |
|
2688 { |
|
2689 DProcess* pP=&Kern::CurrentProcess(); |
|
2690 if (pP!=K::TheKernelProcess && pP!=K::TheFileServerProcess) |
|
2691 K::PanicKernExec(EAccessDenied); |
|
2692 } |
|
2693 |
|
2694 void K::SetMachineConfiguration(const TDesC8& aConfig) |
|
2695 // |
|
2696 // Set the platform dependant machine configuration. |
|
2697 // NOTE: We assume the machine configuration is small enough |
|
2698 // that it can be copied with the kernel locked without adversely |
|
2699 // affecting real-time performance. On EIGER this means about 2K. |
|
2700 // LATER: This 2K has been reduced to 512 bytes, which could be getting a bit tight here. |
|
2701 // |
|
2702 { |
|
2703 TPtr8 c(A::MachineConfiguration()); |
|
2704 NKern::LockSystem(); |
|
2705 c=aConfig; |
|
2706 NKern::UnlockSystem(); |
|
2707 } |
|
2708 |
|
2709 |
|
2710 |
|
2711 |
|
2712 /** |
|
2713 Initialises a new DFC queue. |
|
2714 |
|
2715 The function creates and starts a kernel thread to process the supplied DFC |
|
2716 queue. On successful completion, the queue is ready to start processing DFCs. |
|
2717 |
|
2718 The thread created for the queue will have its real time state enabled. If |
|
2719 this is not the desired behaviour then TDynamicDfcQue::SetRealtimeState() can |
|
2720 be used to disable the real time state of the thread. |
|
2721 |
|
2722 @param aDfcQ A pointer to the DFC queue to be initialised. |
|
2723 @param aPriority The thread priority for the queue. |
|
2724 @param aName A pointer to a descriptor containing the name for the queue |
|
2725 thread. If NULL (the default), a uniqiue name of the form |
|
2726 'DfcThreadNNN' is generated for the queue, where NNN |
|
2727 represents three numeric characters. |
|
2728 |
|
2729 @return KErrNone, if successful, otherwise one of the other system-wide |
|
2730 error codes. |
|
2731 |
|
2732 @pre Calling thread must be in a critical section. |
|
2733 @pre Interrupts must be enabled. |
|
2734 @pre Kernel must be unlocked. |
|
2735 @pre No fast mutex can be held. |
|
2736 @pre Call in a thread context. |
|
2737 @pre Can be used in a device driver. |
|
2738 |
|
2739 @see Kern::DfcQCreate() |
|
2740 @see TDynamicDfcQue::SetRealtimeState() |
|
2741 */ |
|
2742 EXPORT_C TInt Kern::DfcQInit(TDfcQue* aDfcQ, TInt aPriority, const TDesC* aName) |
|
2743 { |
|
2744 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::DfcQInit"); |
|
2745 __KTRACE_OPT(KDFC,Kern::Printf("Kern::DfcQInit %d at %08x",aPriority,aDfcQ)); |
|
2746 SThreadCreateInfo info; |
|
2747 info.iType=EThreadSupervisor; |
|
2748 info.iFunction=(TThreadFunction)TDfcQue::ThreadFunction; |
|
2749 info.iPtr=aDfcQ; |
|
2750 info.iSupervisorStack=NULL; |
|
2751 info.iSupervisorStackSize=0; // zero means use default value |
|
2752 info.iInitialThreadPriority=aPriority; |
|
2753 if (aName) |
|
2754 info.iName.Set(*aName); |
|
2755 else |
|
2756 { |
|
2757 TBuf<16> n(KLitDfcThread()); |
|
2758 n.AppendNum((TInt)__e32_atomic_add_ord32(&K::DfcQId, 1)); |
|
2759 info.iName.Set(n); |
|
2760 } |
|
2761 info.iTotalSize = sizeof(info); |
|
2762 TInt r=Kern::ThreadCreate(info); |
|
2763 if (r==KErrNone) |
|
2764 { |
|
2765 DThread* pT=(DThread*)info.iHandle; |
|
2766 __KTRACE_OPT(KDFC,Kern::Printf("TDfcQue thread %O at %08x",pT,pT)); |
|
2767 aDfcQ->iThread=&pT->iNThread; |
|
2768 #ifndef __DFC_THREADS_NOT_REALTIME |
|
2769 // Dfc threads are real time by default when data paging is enabled. |
|
2770 TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask; |
|
2771 if (dataPolicy != EKernelConfigDataPagingPolicyNoPaging) |
|
2772 pT->SetRealtimeState(ERealtimeStateOn); |
|
2773 #endif |
|
2774 Kern::ThreadResume(*pT); |
|
2775 } |
|
2776 return r; |
|
2777 } |
|
2778 |
|
2779 |
|
2780 |
|
2781 |
|
2782 /** |
|
2783 Performs a polling operation at specified regular intervals, for a specified |
|
2784 maximum number of attempts. |
|
2785 |
|
2786 The polling operation is performed by the specified function. The function is |
|
2787 called repeatedly at each interval until it either returns true, or the maximum |
|
2788 number of attempts has been reached. |
|
2789 |
|
2790 @param aFunction The function implementing the polling operation. |
|
2791 @param aPtr An argument passed to the polling function. |
|
2792 @param aPollPeriodMs The interval between successive attempts at calling the |
|
2793 polling function, in milliseconds. Note that the the time |
|
2794 period is converted into ticks, and may be rounded up to |
|
2795 give an integral number of ticks. |
|
2796 @param aMaxPoll The maximum number of attempts at calling the polling |
|
2797 function before timing out. |
|
2798 |
|
2799 @return KErrNone, if the polling function returns true; |
|
2800 KErrBadPower, if the device's power status is no longer good; |
|
2801 KErrTimedOut, if the maximum number of attempts has been reached. |
|
2802 |
|
2803 @pre Interrupts must be enabled. |
|
2804 @pre Kernel must be unlocked. |
|
2805 @pre No fast mutex can be held. |
|
2806 @pre Call in a thread context. |
|
2807 @pre Can be used in a device driver. |
|
2808 */ |
|
2809 EXPORT_C TInt Kern::PollingWait(TPollFunction aFunction, TAny* aPtr, TInt aPollPeriodMs, TInt aMaxPoll) |
|
2810 { |
|
2811 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::PollingWait"); |
|
2812 TInt ticks=NKern::TimerTicks(aPollPeriodMs); |
|
2813 FOREVER |
|
2814 { |
|
2815 if ((*aFunction)(aPtr)) |
|
2816 return KErrNone; |
|
2817 if (!Kern::PowerGood()) |
|
2818 return KErrBadPower; |
|
2819 if (--aMaxPoll==0) |
|
2820 return KErrTimedOut; |
|
2821 NKern::Sleep(ticks); |
|
2822 } |
|
2823 } |
|
2824 |
|
2825 TUint32 K::CompressKHeapPtr(const TAny* aPtr) |
|
2826 { |
|
2827 TUint32 r=(TUint32(aPtr)-TUint32(K::HeapInfo.iBase))>>2; |
|
2828 __ASSERT_DEBUG(r<(1u<<26),K::Fault(K::EInvalidKernHeapCPtr)); |
|
2829 return r; |
|
2830 } |
|
2831 |
|
2832 const TAny* K::RestoreKHeapPtr(TUint32 aCPtr) |
|
2833 { |
|
2834 __ASSERT_DEBUG(aCPtr<(1u<<26),K::Fault(K::EInvalidKernHeapCPtr)); |
|
2835 return (const TAny*)(TUint32(K::HeapInfo.iBase)+(aCPtr<<2)); |
|
2836 } |
|
2837 |
|
2838 TUint K::NewId() |
|
2839 { |
|
2840 TUint id = __e32_atomic_add_ord32(&K::NextId, 1); |
|
2841 if(id==~0u) |
|
2842 K::Fault(K::EOutOfIds); |
|
2843 return id; |
|
2844 } |
|
2845 |
|
2846 /** |
|
2847 @pre No fast mutex can be held. |
|
2848 @pre Call in a thread context. |
|
2849 @pre Kernel must be unlocked |
|
2850 @pre interrupts enabled |
|
2851 */ |
|
2852 EXPORT_C void Kern::CodeSegGetMemoryInfo(DCodeSeg& aCodeSeg, TModuleMemoryInfo& aInfo, DProcess* aProcess) |
|
2853 { |
|
2854 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::CodeSegGetMemoryInfo"); |
|
2855 aCodeSeg.GetMemoryInfo(aInfo, aProcess); |
|
2856 } |
|
2857 |
|
2858 /** |
|
2859 Discovers the DThread associated with an NThread. |
|
2860 |
|
2861 @param aNThread The NThread who's counterpart DThread is to be found. |
|
2862 |
|
2863 @return A DThread or NULL if there is no counterpart DThread. |
|
2864 */ |
|
2865 EXPORT_C DThread* Kern::NThreadToDThread(NThread* aNThread) |
|
2866 { |
|
2867 if (aNThread && aNThread->iHandlers==&EpocThreadHandlers) |
|
2868 return _LOFF(aNThread,DThread, iNThread); |
|
2869 else |
|
2870 return NULL; |
|
2871 } |
|
2872 |
|
2873 EXPORT_C TKernelHookFn Kern::SetHook(TKernelHookType aType, TKernelHookFn aFunction, TBool aOveride /*=EFalse*/) |
|
2874 { |
|
2875 if((TUint)aType>=ENumKernelHooks) |
|
2876 K::Fault(K::EBadKernelHookType); |
|
2877 TKernelHookFn oldFn = (TKernelHookFn)__e32_atomic_swp_ord_ptr(&K::KernelHooks[aType], aFunction); |
|
2878 if(oldFn && !aOveride) |
|
2879 K::Fault(K::EKernelHookAlreadySet); |
|
2880 return oldFn; |
|
2881 } |
|
2882 |
|
2883 /** |
|
2884 Wait for a length of time specified in nanoseconds. |
|
2885 |
|
2886 This function is typically implemented using a busy-wait, so should only be |
|
2887 called to wait for short periods. |
|
2888 |
|
2889 @param aInterval The length of time to wait in nanoseconds. |
|
2890 */ |
|
2891 EXPORT_C void Kern::NanoWait(TUint32 aInterval) |
|
2892 { |
|
2893 K::NanoWaitHandler()(aInterval); |
|
2894 } |
|
2895 |
|
2896 extern "C" void nanowait(TUint32 aInterval) |
|
2897 { |
|
2898 Kern::NanoWait(aInterval); |
|
2899 } |
|
2900 |
|
2901 |
|
2902 /** |
|
2903 Checks of kernel preconditions. |
|
2904 If some precondition is not met and the appropriate macro is defined, this function will print information about broken precondition |
|
2905 to debug output and fault the system |
|
2906 |
|
2907 @param aConditionMask 32-bit bitmask specifying which particular preconditions should be checked |
|
2908 @param aFunction Title of the calling function |
|
2909 */ |
|
2910 #ifdef _DEBUG |
|
2911 #if (defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)||defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__)) |
|
2912 extern "C" TInt CheckPreconditions(TUint32 aConditionMask, const char* aFunction, TLinAddr aAddr) |
|
2913 { |
|
2914 if (K::Initialising || NKern::Crashed()) |
|
2915 return KErrNone; |
|
2916 |
|
2917 TUint32 m = aConditionMask; |
|
2918 NThread* nt = 0; |
|
2919 DThread* t = 0; |
|
2920 NKern::TContext ctx = (NKern::TContext)NKern::CurrentContext(); |
|
2921 if (ctx == NKern::EThread) |
|
2922 { |
|
2923 nt = NKern::CurrentThread(); |
|
2924 t = Kern::NThreadToDThread(nt); |
|
2925 } |
|
2926 if (m & MASK_NO_FAST_MUTEX) |
|
2927 { |
|
2928 if (!nt || !NKern::HeldFastMutex()) |
|
2929 m &= ~MASK_NO_FAST_MUTEX; |
|
2930 } |
|
2931 if (m & MASK_NO_CRITICAL) |
|
2932 { |
|
2933 if (t && t->iThreadType==EThreadUser && nt->iCsCount==0) |
|
2934 m &= ~MASK_NO_CRITICAL; |
|
2935 else if (!nt || nt->iCsCount==0) |
|
2936 m &= ~MASK_NO_CRITICAL; |
|
2937 } |
|
2938 if (m & MASK_CRITICAL) |
|
2939 { |
|
2940 if (t && (t->iThreadType!=EThreadUser || nt->iCsCount>0)) |
|
2941 m &= ~MASK_CRITICAL; |
|
2942 else if (!nt || nt->iCsCount>0) |
|
2943 m &= ~MASK_CRITICAL; |
|
2944 } |
|
2945 if (m & MASK_KERNEL_LOCKED) |
|
2946 { |
|
2947 if (NKern::KernelLocked()) |
|
2948 m &= ~MASK_KERNEL_LOCKED; |
|
2949 } |
|
2950 if (m & MASK_KERNEL_UNLOCKED) |
|
2951 { |
|
2952 if (!NKern::KernelLocked()) |
|
2953 m &= ~MASK_KERNEL_UNLOCKED; |
|
2954 } |
|
2955 if (m & MASK_KERNEL_LOCKED_ONCE) |
|
2956 { |
|
2957 if (NKern::KernelLocked(1)) |
|
2958 m &= ~MASK_KERNEL_LOCKED_ONCE; |
|
2959 } |
|
2960 if (m & MASK_INTERRUPTS_ENABLED) |
|
2961 { |
|
2962 if (InterruptsStatus(ETrue)) |
|
2963 m &= ~MASK_INTERRUPTS_ENABLED; |
|
2964 } |
|
2965 if (m & MASK_INTERRUPTS_DISABLED) |
|
2966 { |
|
2967 if (InterruptsStatus(EFalse)) |
|
2968 m &= ~MASK_INTERRUPTS_DISABLED; |
|
2969 } |
|
2970 if (m & MASK_SYSTEM_LOCKED) |
|
2971 { |
|
2972 if (TheScheduler.iLock.HeldByCurrentThread()) |
|
2973 m &= ~MASK_SYSTEM_LOCKED; |
|
2974 } |
|
2975 if (m & MASK_NOT_THREAD) |
|
2976 { |
|
2977 if (ctx!=NKern::EThread) |
|
2978 m &= ~MASK_NOT_THREAD; |
|
2979 } |
|
2980 if (m & MASK_NOT_ISR) |
|
2981 { |
|
2982 if (ctx!=NKern::EInterrupt) |
|
2983 m &= ~MASK_NOT_ISR; |
|
2984 } |
|
2985 if (m & MASK_NOT_IDFC) |
|
2986 { |
|
2987 if (ctx!=NKern::EIDFC) |
|
2988 m &= ~MASK_NOT_IDFC; |
|
2989 } |
|
2990 if (m & MASK_NO_CRITICAL_IF_USER) |
|
2991 { |
|
2992 if (t && (t->iThreadType!=EThreadUser || nt->iCsCount==0)) |
|
2993 m &= ~MASK_NO_CRITICAL_IF_USER; |
|
2994 else if (!nt || nt->iCsCount==0) |
|
2995 m &= ~MASK_NO_CRITICAL_IF_USER; |
|
2996 } |
|
2997 if (m & MASK_NO_RESCHED) |
|
2998 { |
|
2999 if (!nt || NKern::KernelLocked()) |
|
3000 m &= ~MASK_NO_RESCHED; |
|
3001 } |
|
3002 if (!m) |
|
3003 return KErrNone; |
|
3004 if (aFunction) |
|
3005 Kern::Printf("In function %s :-", aFunction); |
|
3006 else |
|
3007 Kern::Printf("At address %08x :-", aAddr); |
|
3008 if (m & MASK_NO_FAST_MUTEX) |
|
3009 Kern::Printf("Assertion failed: No fast mutex must be held"); |
|
3010 if (m & MASK_NO_CRITICAL) |
|
3011 Kern::Printf("Assertion failed: Calling thread must not be in critical section"); |
|
3012 if (m & MASK_CRITICAL) |
|
3013 Kern::Printf("Assertion failed: Calling thread must be in critical section"); |
|
3014 if (m & MASK_KERNEL_LOCKED) |
|
3015 Kern::Printf("Assertion failed: Kernel must be locked"); |
|
3016 if (m & MASK_KERNEL_UNLOCKED) |
|
3017 Kern::Printf("Assertion failed: Kernel must not be locked"); |
|
3018 if (m & MASK_KERNEL_LOCKED_ONCE) |
|
3019 Kern::Printf("Assertion failed: Kernel must be locked exactly once"); |
|
3020 if (m & MASK_INTERRUPTS_ENABLED) |
|
3021 Kern::Printf("Assertion failed: Interrupts must be enabled"); |
|
3022 if (m & MASK_INTERRUPTS_DISABLED) |
|
3023 Kern::Printf("Assertion failed: Interrupts must be disabled"); |
|
3024 if (m & MASK_SYSTEM_LOCKED) |
|
3025 Kern::Printf("Assertion failed: System lock must be held"); |
|
3026 if (m & MASK_NOT_THREAD) |
|
3027 Kern::Printf("Assertion failed: Don't call in thread context"); |
|
3028 if (m & MASK_NOT_ISR) |
|
3029 Kern::Printf("Assertion failed: Don't call in ISR context"); |
|
3030 if (m & MASK_NOT_IDFC) |
|
3031 Kern::Printf("Assertion failed: Don't call in IDFC context"); |
|
3032 if (m & MASK_NO_CRITICAL_IF_USER) |
|
3033 Kern::Printf("Assertion failed: Don't call from user thread in critical section"); |
|
3034 if (m & MASK_ALWAYS_FAIL) |
|
3035 Kern::Printf("Assertion failed"); |
|
3036 if (m & MASK_NO_RESCHED) |
|
3037 Kern::Printf("Assertion failed: Don't call from thread with kernel unlocked"); |
|
3038 |
|
3039 #ifdef __KERNEL_APIS_CONTEXT_CHECKS_FAULT__ |
|
3040 if (aFunction) |
|
3041 Kern::Fault(aFunction, 0); |
|
3042 return KErrGeneral; |
|
3043 #else |
|
3044 return KErrNone; |
|
3045 #endif//__KERNEL_APIS_CONTEXT_CHECKS_FAULT__ |
|
3046 } |
|
3047 #endif//__KERNEL_APIS_CONTEXT_CHECKS_WARNING__||__KERNEL_APIS_CONTEXT_CHECKS_FAULT__ |
|
3048 #endif |
|
3049 |
|
3050 |
|
3051 /** |
|
3052 Set the behaviour of text tracing. (Kern::Printf, RDebug::Print etc.) |
|
3053 |
|
3054 For example, to disable text trace output to serial port, use: |
|
3055 @code |
|
3056 Kern::SetTextTraceMode(Kern::ESerialOutNever,Kern::ESerialOutMask); |
|
3057 @endcode |
|
3058 |
|
3059 To query the current behaviour: |
|
3060 @code |
|
3061 TUint textTraceMode = Kern::SetTextTraceMode(0,0); |
|
3062 @endcode |
|
3063 |
|
3064 @param aMode Values formed from enum TTextTraceMode. |
|
3065 @param aMask Bitmask indicating which flags are to be modified. |
|
3066 @return The text trace mode in operation before this function was called. |
|
3067 |
|
3068 @publishedPartner |
|
3069 */ |
|
3070 EXPORT_C TUint Kern::SetTextTraceMode(TUint aMode, TUint aMask) |
|
3071 { |
|
3072 return __e32_atomic_axo_ord32(&K::TextTraceMode, ~aMask, aMode&aMask); |
|
3073 } |
|
3074 |
|
3075 |
|
3076 void K::TextTrace(const TDesC8& aText, TTraceSource aTraceSource, TBool aNewLine) |
|
3077 { |
|
3078 TBool crashed = NKern::Crashed(); |
|
3079 const TUint8* ptr = aText.Ptr(); |
|
3080 TInt size = aText.Size(); |
|
3081 |
|
3082 // Handle BTrace first... |
|
3083 TUint category; |
|
3084 switch(aTraceSource) |
|
3085 { |
|
3086 case EUserTrace: |
|
3087 category = BTrace::ERDebugPrintf; |
|
3088 break; |
|
3089 case EKernelTrace: |
|
3090 category = BTrace::EKernPrintf; |
|
3091 break; |
|
3092 case EPlatSecTrace: |
|
3093 category = BTrace::EPlatsecPrintf; |
|
3094 break; |
|
3095 default: |
|
3096 category = ~0u; |
|
3097 break; |
|
3098 } |
|
3099 TInt result = 0; |
|
3100 if(category!=~0u) |
|
3101 { |
|
3102 TUint threadId = KNullThreadId; |
|
3103 if(!K::Initialising && NKern::CurrentContext()==NKern::EThread) |
|
3104 { |
|
3105 NThread* n = NKern::CurrentThread(); |
|
3106 if(n) |
|
3107 { |
|
3108 DThread* t = Kern::NThreadToDThread(n); |
|
3109 if(t) |
|
3110 threadId = t->iId; |
|
3111 } |
|
3112 } |
|
3113 result = BTraceContextBig(category,0,threadId,ptr,size); |
|
3114 } |
|
3115 |
|
3116 NThread* csThread = 0; |
|
3117 if (!K::Initialising && NKern::CurrentContext() == NKern::EThread && !NKern::KernelLocked() && !crashed && InterruptsStatus(ETrue)) |
|
3118 { |
|
3119 csThread = NCurrentThread(); |
|
3120 NKern::_ThreadEnterCS(); |
|
3121 } |
|
3122 |
|
3123 if(!result) |
|
3124 if(K::TraceHandler()) |
|
3125 result = K::TraceHandler()(aText, aTraceSource); |
|
3126 |
|
3127 TUint mode = K::TextTraceMode; |
|
3128 if(mode!=Kern::ESerialOutNever) |
|
3129 if(mode==Kern::ESerialOutAlways || !result) |
|
3130 A::DebugPrint(ptr,size,aNewLine); |
|
3131 |
|
3132 if (csThread) |
|
3133 NKern::_ThreadLeaveCS(); |
|
3134 } |
|
3135 |
|
3136 #if defined(_DEBUG) && !defined(__SMP__) |
|
3137 TInt KCrazySchedulerEnabled() |
|
3138 { |
|
3139 return TheSuperPage().KernelConfigFlags() & EKernelConfigCrazyScheduling; |
|
3140 } |
|
3141 #endif |
|
3142 |
|
3143 /* |
|
3144 TClientRequest states and synchronization |
|
3145 |
|
3146 TClientRequest objects are synchronized based on atomic updates to the iStatus |
|
3147 member using __e32_atomic_xxx_yyy_ptr() operations. |
|
3148 |
|
3149 The contents of the iStatus member are made up of a TRequestStatus pointer in |
|
3150 bit 2-31 and two flag bits in bits 0 and 1. |
|
3151 |
|
3152 The object can be in the following states indicated by the value in iStatus: |
|
3153 |
|
3154 State: Pointer: Bit 1: Bit 0: |
|
3155 --------------------------------- |
|
3156 FREE zero 0 0 |
|
3157 READY non-zero 0 0 |
|
3158 INUSE non-zero 1 0 |
|
3159 CLOSING non-zero 1 1 |
|
3160 DEAD any 0 1 |
|
3161 |
|
3162 The following state transitions are possible: |
|
3163 |
|
3164 Start state: Operation: End state: |
|
3165 ------------------------------------ |
|
3166 FREE Reset FREE |
|
3167 Close DEAD |
|
3168 SetStatus READY |
|
3169 |
|
3170 READY Reset FREE |
|
3171 Close DEAD |
|
3172 Queue INUSE |
|
3173 |
|
3174 INUSE Callback FREE |
|
3175 Close CLOSING |
|
3176 |
|
3177 CLOSING Callback DEAD |
|
3178 |
|
3179 When the object enters the DEAD state, it is deleted. |
|
3180 */ |
|
3181 |
|
3182 inline void IgnorePrintf(...) { } |
|
3183 |
|
3184 #define CLIENT_REQUEST_DEBUG IgnorePrintf |
|
3185 //#define CLIENT_REQUEST_DEBUG Kern::Printf |
|
3186 |
|
3187 /** |
|
3188 Create a TClientRequest object. |
|
3189 |
|
3190 The object is initially in the EFree state. |
|
3191 |
|
3192 @param aRequestPtr A reference to the TClientRequest pointer which is to be set |
|
3193 to the newly created object. |
|
3194 |
|
3195 @return KErrNone, if successful, otherwise one of the other system-wide error codes. |
|
3196 |
|
3197 @see TClientRequest::State() |
|
3198 |
|
3199 @publishedPartner |
|
3200 @released |
|
3201 */ |
|
3202 EXPORT_C TInt Kern::CreateClientRequest(TClientRequest*& aRequestPtr) |
|
3203 { |
|
3204 TClientRequest* self = (TClientRequest*)Kern::Alloc(sizeof(TClientRequest)); |
|
3205 if (!self) |
|
3206 return KErrNoMemory; |
|
3207 new (self) TClientRequest; |
|
3208 T_UintPtr zero = 0; |
|
3209 if (!__e32_atomic_cas_ord_ptr(&aRequestPtr, &zero, self)) |
|
3210 { |
|
3211 self->Close(); |
|
3212 return KErrInUse; |
|
3213 } |
|
3214 return KErrNone; |
|
3215 } |
|
3216 |
|
3217 /** |
|
3218 @prototype |
|
3219 @internalTechnology |
|
3220 */ |
|
3221 EXPORT_C TInt Kern::CreateClientDataRequestBase(TClientDataRequestBase*& aRequestPtr, TInt aSize) |
|
3222 { |
|
3223 TClientDataRequestBase* self = (TClientDataRequestBase*)Kern::Alloc(sizeof(TClientDataRequestBase) + aSize); |
|
3224 if (!self) |
|
3225 return KErrNoMemory; |
|
3226 new (self) TClientDataRequestBase(aSize); |
|
3227 T_UintPtr zero = 0; |
|
3228 if (!__e32_atomic_cas_ord_ptr(&aRequestPtr, &zero, self)) |
|
3229 { |
|
3230 self->Close(); |
|
3231 return KErrInUse; |
|
3232 } |
|
3233 return KErrNone; |
|
3234 } |
|
3235 |
|
3236 /** |
|
3237 @prototype |
|
3238 @internalTechnology |
|
3239 */ |
|
3240 EXPORT_C TInt Kern::CreateClientDataRequestBase2(TClientDataRequestBase2*& aRequestPtr, TInt aSize1, TInt aSize2) |
|
3241 { |
|
3242 TInt size = _ALIGN_UP(sizeof(TClientDataRequestBase2), 8) + _ALIGN_UP(aSize1, 8) + aSize2; |
|
3243 TClientDataRequestBase2* self = (TClientDataRequestBase2*)Kern::Alloc(size); |
|
3244 if (!self) |
|
3245 return KErrNoMemory; |
|
3246 new (self) TClientDataRequestBase2(aSize1, aSize2); |
|
3247 T_UintPtr zero = 0; |
|
3248 if (!__e32_atomic_cas_ord_ptr(&aRequestPtr, &zero, self)) |
|
3249 { |
|
3250 self->Close(); |
|
3251 return KErrInUse; |
|
3252 } |
|
3253 return KErrNone; |
|
3254 } |
|
3255 |
|
3256 /** |
|
3257 Destroy a TClientRequest object. |
|
3258 |
|
3259 The pointer to the object is set to NULL. |
|
3260 |
|
3261 @param aRequestPtr A reference to the TClientRequest pointer to free. |
|
3262 |
|
3263 @pre Calling thread must be in a critical section. |
|
3264 @pre Interrupts must be enabled. |
|
3265 @pre Kernel must be unlocked. |
|
3266 @pre No fast mutex can be held. |
|
3267 @pre Call in a thread context. |
|
3268 @pre Can be used in a device driver. |
|
3269 |
|
3270 @publishedPartner |
|
3271 @released |
|
3272 */ |
|
3273 EXPORT_C void Kern::DestroyClientRequest(TClientRequest*& aRequestPtr) |
|
3274 { |
|
3275 TClientRequest* request = (TClientRequest*)__e32_atomic_swp_rel_ptr(&aRequestPtr, 0); |
|
3276 if (request) |
|
3277 request->Close(); |
|
3278 } |
|
3279 |
|
3280 TClientRequest::TClientRequest(TUserModeCallbackFunc aCallback) |
|
3281 : TUserModeCallback(aCallback), |
|
3282 iStatus(0), |
|
3283 iResult(KRequestPending) |
|
3284 { |
|
3285 } |
|
3286 |
|
3287 void TClientRequest::Close() |
|
3288 { |
|
3289 CLIENT_REQUEST_DEBUG("%08x TClientRequest::Close", this); |
|
3290 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"TClientRequest::Close"); |
|
3291 T_UintPtr status = (T_UintPtr)__e32_atomic_ior_ord_ptr(&iStatus, KClientRequestFlagClosing); |
|
3292 CLIENT_REQUEST_DEBUG(" state == %d", GetState(status)); |
|
3293 __ASSERT_DEBUG(GetState(status) <= EInUse, K::Fault(K::EClientRequestCloseInWrongState)); |
|
3294 if (!(status & KClientRequestFlagInUse)) |
|
3295 Kern::AsyncFree(this); // must call async version since current thread may be exiting here |
|
3296 } |
|
3297 |
|
3298 /** |
|
3299 Indicates whether the request is ready to be queued, in other words whether SetState() has been called on it. |
|
3300 |
|
3301 Note that this method is not synchronised. If multiple threads are accessing this object (except by |
|
3302 calling Kern::QueueRequestComplete), then some form of external synchronisation is required. |
|
3303 |
|
3304 @publishedPartner |
|
3305 @released |
|
3306 */ |
|
3307 EXPORT_C TBool TClientRequest::IsReady() |
|
3308 { |
|
3309 T_UintPtr status = iStatus; // sample volatile value |
|
3310 return status && !(status & KClientRequestFlagMask); |
|
3311 } |
|
3312 |
|
3313 TClientRequest::~TClientRequest() |
|
3314 { |
|
3315 // This should never be called because we use Kern::Free to free the object after calling |
|
3316 // Close(). If this is called it means someone deleted a derived object without calling |
|
3317 // Close(). |
|
3318 CLIENT_REQUEST_DEBUG("%08x TClientRequest::~TClientRequest", this); |
|
3319 K::Fault(K::EClientRequestDeletedNotClosed); |
|
3320 } |
|
3321 |
|
3322 /** |
|
3323 Get the current state of this object. |
|
3324 |
|
3325 A TClientRequest object can be in one of three states, described by the TClientRequest::TState |
|
3326 enumeration. These are: |
|
3327 - EFree: The initial state |
|
3328 - EReady: The object has been set up with the TRequestStatus pointer of a client request, and is |
|
3329 ready to be queued for completion. |
|
3330 - EInUse: The object has been queued for completion, but this has not yet occurred. |
|
3331 - EClosing: The object has been queued for completion and then had Close() called on it, but |
|
3332 completion has not yet occured. |
|
3333 |
|
3334 @return The state of the object. |
|
3335 */ |
|
3336 TClientRequest::TState TClientRequest::State() |
|
3337 { |
|
3338 return GetState(iStatus); |
|
3339 } |
|
3340 |
|
3341 TClientRequest::TState TClientRequest::GetState(T_UintPtr aStatus) |
|
3342 { |
|
3343 if (aStatus == 0) |
|
3344 return EFree; |
|
3345 switch (aStatus & KClientRequestFlagMask) |
|
3346 { |
|
3347 case 0: |
|
3348 return EReady; |
|
3349 case KClientRequestFlagInUse: |
|
3350 return EInUse; |
|
3351 case KClientRequestFlagInUse | KClientRequestFlagClosing: |
|
3352 return EClosing; |
|
3353 } |
|
3354 return EBad; |
|
3355 } |
|
3356 |
|
3357 /** |
|
3358 Set the client's TRequestStatus pointer. |
|
3359 |
|
3360 This method should be called when the client initiates an asynchronous request. |
|
3361 If the object was initially in the EFree state this method puts it into the |
|
3362 EReady state, otherwise it does nothing. |
|
3363 |
|
3364 @return KErrNone if the object state has been transitioned from EFree to EReady |
|
3365 KErrInUse if the object was not initially in the EFree state |
|
3366 |
|
3367 @publishedPartner |
|
3368 @released |
|
3369 */ |
|
3370 EXPORT_C TInt TClientRequest::SetStatus(TRequestStatus* aStatus) |
|
3371 { |
|
3372 CLIENT_REQUEST_DEBUG("%08x TClientRequest::SetStatus", this); |
|
3373 // Return an error if the status pointer is bad. Don't fault the kernel as this would allow a |
|
3374 // user thread to crash the system. |
|
3375 if (((T_UintPtr)aStatus & KClientRequestFlagMask) != 0 || (T_UintPtr)aStatus == KClientRequestNullStatus) |
|
3376 return KErrArgument; |
|
3377 T_UintPtr newStatus = aStatus ? (T_UintPtr)aStatus : KClientRequestNullStatus; |
|
3378 T_UintPtr zero = 0; |
|
3379 return __e32_atomic_cas_ord_ptr(&iStatus, &zero, newStatus) ? KErrNone : KErrInUse; // acq? |
|
3380 } |
|
3381 |
|
3382 /** |
|
3383 Get the client's TRequestStatus pointer. |
|
3384 |
|
3385 @return The client's TRequestStatus pointer. |
|
3386 |
|
3387 @publishedPartner |
|
3388 @released |
|
3389 */ |
|
3390 EXPORT_C TRequestStatus* TClientRequest::StatusPtr() |
|
3391 { |
|
3392 return (TRequestStatus*)(iStatus & ~KClientRequestFlagMask); |
|
3393 } |
|
3394 |
|
3395 /** |
|
3396 Queue the request for completion. |
|
3397 |
|
3398 If the object is not in the EReady state, this method does nothing. Otherwise the client thread is |
|
3399 signalled immediately, and the object left in the EInUse state. When the client thread next runs, |
|
3400 the reason code is written back to it and the object is left in the EFree state. |
|
3401 |
|
3402 This method is only synchronised with respect to itself. Multiple threads can call this method |
|
3403 concurrently and only one will complete the request. |
|
3404 |
|
3405 @param aThread The client thread to which to write the reason code. |
|
3406 @param aRequest The client request object. |
|
3407 @param aReason The reason code with which to complete the request. |
|
3408 |
|
3409 @pre Call in a thread context. |
|
3410 @pre Kernel must be unlocked |
|
3411 @pre Interrupts enabled |
|
3412 |
|
3413 @publishedPartner |
|
3414 @released |
|
3415 */ |
|
3416 EXPORT_C void Kern::QueueRequestComplete(DThread* aThread, TClientRequest* aRequest, TInt aReason) |
|
3417 { |
|
3418 CLIENT_REQUEST_DEBUG("%08x Kern::QueueRequestComplete %T %d", aRequest, aThread, aReason); |
|
3419 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED | MASK_INTERRUPTS_ENABLED | MASK_NOT_ISR | MASK_NOT_IDFC, "Kern::QueueRequestComplete"); |
|
3420 if (aRequest->StartComplete(aThread, aReason)) |
|
3421 aRequest->EndComplete(aThread); |
|
3422 } |
|
3423 |
|
3424 TBool TClientRequest::StartComplete(DThread* aThread, TInt aReason) |
|
3425 { |
|
3426 NKern::ThreadEnterCS(); |
|
3427 T_UintPtr status = iStatus; |
|
3428 do { |
|
3429 if (!status || (status & KClientRequestFlagMask)) |
|
3430 { |
|
3431 CLIENT_REQUEST_DEBUG("status %08x request not ready", status); |
|
3432 NKern::ThreadLeaveCS(); |
|
3433 return EFalse; |
|
3434 } |
|
3435 } while (!__e32_atomic_cas_ord_ptr(&iStatus, &status, status | KClientRequestFlagInUse)); |
|
3436 iResult = aReason; |
|
3437 (void)aThread; |
|
3438 #ifdef BTRACE_REQUESTS |
|
3439 BTraceContext12(BTrace::ERequests,BTrace::ERequestComplete,&aThread->iNThread,iStatus,aReason); |
|
3440 #endif |
|
3441 return ETrue; |
|
3442 } |
|
3443 |
|
3444 void TClientRequest::EndComplete(DThread* aThread) |
|
3445 { |
|
3446 TInt r = NKern::QueueUserModeCallback(&aThread->iNThread, this); |
|
3447 if (r == KErrNone) |
|
3448 { |
|
3449 if (iStatus != (KClientRequestNullStatus | KClientRequestFlagInUse)) |
|
3450 NKern::ThreadRequestSignal(&aThread->iNThread); |
|
3451 } |
|
3452 else |
|
3453 { |
|
3454 __NK_ASSERT_DEBUG(r == KErrDied); |
|
3455 // Thread was exiting, queue it for cleanup by attaching it to |
|
3456 // the supervisor thread and queueing a DFC to deal with it |
|
3457 CLIENT_REQUEST_DEBUG(" queue callback failed, queueing for cleanup"); |
|
3458 NKern::QueueUserModeCallback(K::SvMsgQ->iThread, this); |
|
3459 DeadClientCleanupDfc.Enque(); |
|
3460 } |
|
3461 NKern::ThreadLeaveCS(); |
|
3462 } |
|
3463 |
|
3464 void TClientRequest::DoDeadClientCleanup(TAny*) |
|
3465 { |
|
3466 NKern::CancelUserModeCallbacks(); |
|
3467 } |
|
3468 |
|
3469 /** |
|
3470 Reset this object to its initial state so that it can be re-used. |
|
3471 |
|
3472 The request pointer is cleared and the state of the object is set to EFree. |
|
3473 |
|
3474 This method may only be called when the object is in the EFree or EReady states. |
|
3475 |
|
3476 Note that this method is not synchronized. If multiple threads are accessing |
|
3477 this object (except by calling Kern::QueueRequestComplete), then some form of |
|
3478 external synchronisation is required. |
|
3479 |
|
3480 @publishedPartner |
|
3481 @released |
|
3482 */ |
|
3483 EXPORT_C void TClientRequest::Reset() |
|
3484 { |
|
3485 CLIENT_REQUEST_DEBUG("%08x TClientRequest::Reset", this); |
|
3486 T_UintPtr oldStatus = (T_UintPtr)__e32_atomic_swp_ord_ptr(&iStatus, 0); |
|
3487 CLIENT_REQUEST_DEBUG("oldStatus == %08x", oldStatus); |
|
3488 __ASSERT_DEBUG(GetState(oldStatus) <= EReady, K::Fault(K::EClientRequestResetInWrongState)); |
|
3489 } |
|
3490 |
|
3491 #ifndef __CLIENT_REQUEST_MACHINE_CODED__ |
|
3492 |
|
3493 void TClientRequest::CallbackFunc(TAny* aData, TUserModeCallbackReason aReason) |
|
3494 { |
|
3495 TClientRequest* req = (TClientRequest*)aData; |
|
3496 CLIENT_REQUEST_DEBUG("%08x TClientRequest::CallbackFunc", req); |
|
3497 TInt result = req->iResult; |
|
3498 |
|
3499 // Ensure request object can be reused before write to user-space takes place |
|
3500 T_UintPtr statusPtr = req->MakeFree() & ~KClientRequestFlagMask; |
|
3501 |
|
3502 if (aReason == EUserModeCallbackRun && statusPtr != KClientRequestNullStatus) |
|
3503 K::USafeWrite((TAny*)statusPtr, &result, sizeof(result)); |
|
3504 } |
|
3505 |
|
3506 #endif |
|
3507 |
|
3508 T_UintPtr TClientRequest::MakeFree() |
|
3509 { |
|
3510 // Move callback to the free state, deleting it if necessary |
|
3511 CHECK_PRECONDITIONS(MASK_CRITICAL,"TClientRequest::MakeFree"); // needed for Kern::AsyncFree |
|
3512 iResult = KRequestPending; |
|
3513 T_UintPtr oldStatus = (T_UintPtr)__e32_atomic_and_ord_ptr(&iStatus, KClientRequestFlagClosing); |
|
3514 CLIENT_REQUEST_DEBUG("MakeFree %08x oldStatus %08x", this, oldStatus); |
|
3515 __ASSERT_DEBUG(GetState(oldStatus)==EInUse || GetState(oldStatus)==EClosing, K::Fault(K::EClientRequestCallbackInWrongState)); |
|
3516 if (oldStatus & KClientRequestFlagClosing) |
|
3517 Kern::AsyncFree(this); // must call async version since current thread may be exiting here |
|
3518 return oldStatus; |
|
3519 } |
|
3520 |
|
3521 TClientDataRequestBase::TClientDataRequestBase(TInt aBufferSize) : |
|
3522 TClientRequest(CallbackFunc), |
|
3523 iSize(aBufferSize) |
|
3524 { |
|
3525 } |
|
3526 |
|
3527 void TClientDataRequestBase::CallbackFunc(TAny* aData, TUserModeCallbackReason aReason) |
|
3528 { |
|
3529 TClientDataRequestBase* req = (TClientDataRequestBase*)aData; |
|
3530 |
|
3531 #ifdef _DEBUG |
|
3532 TState state = GetState(req->iStatus); |
|
3533 __ASSERT_DEBUG(state == EInUse || state == EClosing, K::Fault(K::EClientRequestCallbackInWrongState)); |
|
3534 #endif |
|
3535 |
|
3536 if (aReason == EUserModeCallbackRun) |
|
3537 K::USafeWrite(req->iDestPtr, req->Buffer(), req->iSize); |
|
3538 |
|
3539 TClientRequest::CallbackFunc(aData, aReason); |
|
3540 } |
|
3541 |
|
3542 TClientDataRequestBase2::TClientDataRequestBase2(TInt aBufferSize1, TInt aBufferSize2) : |
|
3543 TClientRequest(CallbackFunc), |
|
3544 iSize1(aBufferSize1), |
|
3545 iSize2(aBufferSize2) |
|
3546 { |
|
3547 } |
|
3548 |
|
3549 void TClientDataRequestBase2::CallbackFunc(TAny* aData, TUserModeCallbackReason aReason) |
|
3550 { |
|
3551 TClientDataRequestBase2* req = (TClientDataRequestBase2*)aData; |
|
3552 |
|
3553 #ifdef _DEBUG |
|
3554 TState state = GetState(req->iStatus); |
|
3555 __ASSERT_DEBUG(state == EInUse || state == EClosing, K::Fault(K::EClientRequestCallbackInWrongState)); |
|
3556 #endif |
|
3557 |
|
3558 if (aReason == EUserModeCallbackRun) |
|
3559 { |
|
3560 K::USafeWrite(req->iDestPtr1, req->Buffer1(), req->iSize1); |
|
3561 K::USafeWrite(req->iDestPtr2, req->Buffer2(), req->iSize2); |
|
3562 } |
|
3563 |
|
3564 TClientRequest::CallbackFunc(aData, aReason); |
|
3565 } |
|
3566 |
|
3567 // TClientBuffer implementation |
|
3568 |
|
3569 #ifndef __MARM__ |
|
3570 |
|
3571 /** |
|
3572 Read the header of a user-side descriptor in the current process, parse it, and populate a |
|
3573 TDesHeader with the result. |
|
3574 |
|
3575 @param aDesPtr The descriptor for which information is to be fetched. |
|
3576 @param aOut On return, set to the parsed contents of the descriptor header. |
|
3577 |
|
3578 @return KErrNone if successful, or one of the system-wide error codes. |
|
3579 |
|
3580 @pre Interrupts must be enabled. |
|
3581 @pre Kernel must be unlocked. |
|
3582 @pre No fast mutex can be held. |
|
3583 @pre Call in a thread context. |
|
3584 @pre Can be used in a device driver. |
|
3585 */ |
|
3586 TInt K::USafeReadAndParseDesHeader(TAny* aDesPtr, TDesHeader& aOut) |
|
3587 { |
|
3588 CHECK_PAGING_SAFE; |
|
3589 static const TUint8 LengthLookup[16]={4,8,12,8,12,0,0,0,0,0,0,0,0,0,0,0}; |
|
3590 TRawDesHeader header; |
|
3591 const TUint32* pS=(const TUint32*)aDesPtr; |
|
3592 if (!pS || (TInt(pS)&3)!=0) |
|
3593 return KErrBadDescriptor; |
|
3594 if (K::USafeRead(pS,&header[0],sizeof(TUint32))) |
|
3595 return KErrBadDescriptor; |
|
3596 TInt type=header[0]>>KShiftDesType8; |
|
3597 TInt l=LengthLookup[type]; |
|
3598 if (l==0) |
|
3599 return KErrBadDescriptor; |
|
3600 if (l>(TInt)sizeof(TUint32) && K::USafeRead(pS+1,&header[1],l-sizeof(TUint32))) |
|
3601 return KErrBadDescriptor; |
|
3602 return K::ParseDesHeader(aDesPtr, header, aOut); |
|
3603 } |
|
3604 |
|
3605 #endif |
|
3606 |
|
3607 // Parse a descriptor header, return KErrBadDescriptor if there's an error |
|
3608 // Note that this can parse a header in-place (i.e. when &aIn == &aOut) |
|
3609 TInt K::ParseDesHeader(const TAny* aDes, const TRawDesHeader& aIn, TDesHeader& aOut) |
|
3610 { |
|
3611 TUint type = aIn[0] >> KShiftDesType; |
|
3612 TUint len = aIn[0] & KMaskDesLength; |
|
3613 TUint max = (TUint)TDesHeader::KConstMaxLength; |
|
3614 TLinAddr p; |
|
3615 switch (type) |
|
3616 { |
|
3617 case EBufC: p=(TLinAddr)aDes+sizeof(TDesC); break; |
|
3618 case EPtrC: p=(TLinAddr)aIn[1]; break; |
|
3619 case EPtr: p=(TLinAddr)aIn[2]; max=(TInt)aIn[1]; break; |
|
3620 case EBuf: p=(TLinAddr)aDes+sizeof(TDes); max=(TInt)aIn[1]; break; |
|
3621 case EBufCPtr: p=(TLinAddr)aIn[2]+sizeof(TDesC); max=(TInt)aIn[1]; break; |
|
3622 default: |
|
3623 return KErrBadDescriptor; |
|
3624 } |
|
3625 if (len>max || (type == EBufCPtr && ((TUint)p & 3) != 0)) |
|
3626 return KErrBadDescriptor; |
|
3627 aOut.Set(aIn[0], p, max); |
|
3628 return KErrNone; |
|
3629 } |
|
3630 |
|
3631 /** |
|
3632 Create a TClientBuffer object. |
|
3633 |
|
3634 The object is not initially populated with information about a buffer, and the IsSet() method will |
|
3635 return false. |
|
3636 */ |
|
3637 EXPORT_C TClientBuffer::TClientBuffer() : |
|
3638 iPtr(0) |
|
3639 { |
|
3640 } |
|
3641 |
|
3642 /** |
|
3643 Indicates whether this object has been set by calling either SetFromDescriptor() or SetFromBuffer(). |
|
3644 |
|
3645 @return Whether the object has been set. |
|
3646 */ |
|
3647 EXPORT_C TBool TClientBuffer::IsSet() const |
|
3648 { |
|
3649 return iPtr != 0; |
|
3650 } |
|
3651 |
|
3652 /** |
|
3653 Reset this object to its initial state. |
|
3654 |
|
3655 Calling IsSet() will subsequently return false. |
|
3656 |
|
3657 @publishedPartner |
|
3658 @released |
|
3659 */ |
|
3660 EXPORT_C void TClientBuffer::Reset() |
|
3661 { |
|
3662 iPtr = 0; |
|
3663 } |
|
3664 |
|
3665 /** |
|
3666 Set this object to refer to a client descriptor. |
|
3667 |
|
3668 @param aDesPtr A pointer to the client's descriptor (in user memory). |
|
3669 @param aClientThread This should normally be NULL to indicate the current thread, although a |
|
3670 different thread can be specified. |
|
3671 |
|
3672 The descriptor (including the header) is expected to reside in user memory. The header is read in the process of populating this object. |
|
3673 |
|
3674 Calling IsSet() will subsequently return true. |
|
3675 |
|
3676 @publishedPartner |
|
3677 @released |
|
3678 */ |
|
3679 EXPORT_C TInt TClientBuffer::SetFromDescriptor(TAny* aDesPtr, DThread* aClientThread) |
|
3680 { |
|
3681 iPtr = (TUint32)aDesPtr; |
|
3682 __NK_ASSERT_ALWAYS((iPtr & 3) == 0); |
|
3683 TInt r; |
|
3684 if (aClientThread) |
|
3685 { |
|
3686 #ifndef __MEMMODEL_FLEXIBLE__ |
|
3687 NKern::LockSystem(); |
|
3688 #endif |
|
3689 r = aClientThread->ReadAndParseDesHeader(aDesPtr, iHeader); |
|
3690 #ifndef __MEMMODEL_FLEXIBLE__ |
|
3691 NKern::UnlockSystem(); |
|
3692 #endif |
|
3693 } |
|
3694 else |
|
3695 r = K::USafeReadAndParseDesHeader(aDesPtr, iHeader); |
|
3696 return r; |
|
3697 } |
|
3698 |
|
3699 /** |
|
3700 Set this object to refer to a client buffer specified by start address and length. |
|
3701 |
|
3702 @param aStartAddr The start address of the buffer (in user memory) |
|
3703 @param aLength The length of the buffer in bytes. |
|
3704 @param aWriteable Whether the buffer should be written to by kernel-side code. |
|
3705 |
|
3706 The buffer is expected to reside in user memory. |
|
3707 |
|
3708 Calling IsSet() will subsequently return true. |
|
3709 |
|
3710 @publishedPartner |
|
3711 @released |
|
3712 */ |
|
3713 EXPORT_C void TClientBuffer::SetFromBuffer(TLinAddr aStartAddr, TInt aLength, TBool aWriteable) |
|
3714 { |
|
3715 iPtr = EIsBuffer; |
|
3716 if (aWriteable) |
|
3717 iHeader.Set(EPtr << KShiftDesType8, aStartAddr, aLength); |
|
3718 else |
|
3719 iHeader.Set((EPtrC << KShiftDesType8) | aLength, aStartAddr); |
|
3720 } |
|
3721 |
|
3722 /** |
|
3723 Indicates whether the client descriptor is writeable, as opposed to constant. |
|
3724 |
|
3725 @return Whether the client descriptor is writeable. |
|
3726 |
|
3727 @publishedPartner |
|
3728 @released |
|
3729 */ |
|
3730 EXPORT_C TBool TClientBuffer::IsWriteable() const |
|
3731 { |
|
3732 return iHeader.IsWriteable(); |
|
3733 } |
|
3734 |
|
3735 /** |
|
3736 Get the length of the client's descriptor. |
|
3737 |
|
3738 @return The length of the descriptor |
|
3739 |
|
3740 @publishedPartner |
|
3741 @released |
|
3742 */ |
|
3743 EXPORT_C TInt TClientBuffer::Length() const |
|
3744 { |
|
3745 return iHeader.Length(); |
|
3746 } |
|
3747 |
|
3748 /** |
|
3749 Get the maximum length of the client's writeable descriptor. |
|
3750 |
|
3751 @return The length of the descriptor on sucess, otherwise one of the system-wide error codes. |
|
3752 |
|
3753 @publishedPartner |
|
3754 @released |
|
3755 */ |
|
3756 EXPORT_C TInt TClientBuffer::MaxLength() const |
|
3757 { |
|
3758 return iHeader.MaxLength(); |
|
3759 } |
|
3760 |
|
3761 TAny* TClientBuffer::DesPtr() const |
|
3762 { |
|
3763 return (TAny*)(iPtr & ~3); |
|
3764 } |
|
3765 |
|
3766 TAny* TClientBuffer::DataPtr() const |
|
3767 { |
|
3768 return (TAny*)iHeader.DataPtr(); |
|
3769 } |
|
3770 |
|
3771 /** |
|
3772 Update the client's descriptor header to reflect the length of data written to the buffer. |
|
3773 |
|
3774 @param aClientThread This should normally be NULL to indicate the current thread, although a |
|
3775 different thread can be specified. |
|
3776 |
|
3777 This method should be called (usually in the context of the client thread) after the buffer has been |
|
3778 written to using Kern::ThreadBufWrite(). |
|
3779 |
|
3780 If this object was not set by calling SetFromDescriptor(), this method does nothing. |
|
3781 |
|
3782 @return KErrNone if successful, or KErrBadDescriptor if there was an exception while updating the length. |
|
3783 |
|
3784 @publishedPartner |
|
3785 @released |
|
3786 */ |
|
3787 EXPORT_C TInt TClientBuffer::UpdateDescriptorLength(DThread* aClientThread) |
|
3788 { |
|
3789 TInt r = KErrNone; |
|
3790 |
|
3791 if ((iPtr & EIsBuffer) == 0 && IsWriteable()) |
|
3792 { |
|
3793 if (aClientThread) |
|
3794 r = Kern::ThreadRawWrite(aClientThread, (TAny*)iPtr, &iHeader.TypeAndLength(), sizeof(TUint32)); |
|
3795 else |
|
3796 { |
|
3797 TAny* excAddr = K::USafeWrite((TAny*)iPtr, &iHeader.TypeAndLength(), sizeof(TUint32)); |
|
3798 if (excAddr != NULL) |
|
3799 r = KErrBadDescriptor; |
|
3800 } |
|
3801 if (r == KErrNone && iHeader.Type() == EBufCPtr) |
|
3802 { |
|
3803 TInt len = iHeader.Length(); |
|
3804 TUint8* pL = (TUint8*)(iHeader.DataPtr() - sizeof(TDesC)); |
|
3805 if (aClientThread) |
|
3806 r = Kern::ThreadRawWrite(aClientThread, (TAny*)pL, &len, sizeof(TUint32)); |
|
3807 else |
|
3808 { |
|
3809 TAny* excAddr = K::USafeWrite((TAny*)pL, &len, sizeof(TUint32)); |
|
3810 if (excAddr != NULL) |
|
3811 r = KErrBadDescriptor; |
|
3812 } |
|
3813 } |
|
3814 } |
|
3815 return r; |
|
3816 } |
|
3817 |
|
3818 // Implementation of TClientBufferRequest |
|
3819 |
|
3820 NFastMutex TClientBufferRequest::Lock; |
|
3821 |
|
3822 TClientBufferRequest::TClientBufferRequest(TUint aFlags) : |
|
3823 TClientRequest(TClientBufferRequest::CallbackFunc), |
|
3824 iFlags(aFlags) |
|
3825 { |
|
3826 } |
|
3827 |
|
3828 TInt TClientBufferRequest::AllocateBufferData() |
|
3829 { |
|
3830 // allocate data for one buffer and add it to the end of the list |
|
3831 SBufferData* item = new SBufferData; |
|
3832 if (item == NULL) |
|
3833 return KErrNoMemory; |
|
3834 if (iFlags & EPinVirtual) |
|
3835 { |
|
3836 TInt r = Kern::CreateVirtualPinObject(item->iPinObject); |
|
3837 if (r != KErrNone) |
|
3838 { |
|
3839 delete item; |
|
3840 return r; |
|
3841 } |
|
3842 } |
|
3843 iBufferList.Add(item); |
|
3844 return KErrNone; |
|
3845 } |
|
3846 |
|
3847 TInt TClientBufferRequest::Construct(TInt aInitialBuffers) |
|
3848 { |
|
3849 TInt r = KErrNone; |
|
3850 for (TInt i = 0 ; r == KErrNone && i < aInitialBuffers ; ++i) |
|
3851 r = AllocateBufferData(); |
|
3852 return r; |
|
3853 } |
|
3854 |
|
3855 /** |
|
3856 Create a TClientBufferRequest object. |
|
3857 |
|
3858 @param aInitialBuffers The number of buffer slots to allocate initially. |
|
3859 @param aFlags Indicates whether buffers should have their virtual memory pinned. |
|
3860 |
|
3861 @publishedPartner |
|
3862 @released |
|
3863 */ |
|
3864 EXPORT_C TInt Kern::CreateClientBufferRequest(TClientBufferRequest*& aRequestPtr, TUint aInitialBuffers, TUint aFlags) |
|
3865 { |
|
3866 TClientBufferRequest* self = (TClientBufferRequest*)Kern::Alloc(sizeof(TClientBufferRequest)); |
|
3867 if (!self) |
|
3868 return KErrNoMemory; |
|
3869 new (self) TClientBufferRequest(aFlags); |
|
3870 TInt r = self->Construct(aInitialBuffers); |
|
3871 T_UintPtr zero = 0; |
|
3872 if (r == KErrNone && !__e32_atomic_cas_ord_ptr(&aRequestPtr, &zero, self)) |
|
3873 r = KErrInUse; |
|
3874 if (r != KErrNone) |
|
3875 self->Close(); |
|
3876 return r; |
|
3877 } |
|
3878 |
|
3879 void TClientBufferRequest::Close() |
|
3880 { |
|
3881 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"TClientBufferRequest::Close"); |
|
3882 T_UintPtr status = (T_UintPtr)__e32_atomic_ior_ord_ptr(&iStatus, KClientRequestFlagClosing); |
|
3883 __ASSERT_DEBUG(GetState(status) <= EInUse, K::Fault(K::EClientRequestCloseInWrongState)); |
|
3884 if (!(status & KClientRequestFlagInUse)) |
|
3885 { |
|
3886 SBufferData* item; |
|
3887 while(item = (SBufferData*)iBufferList.GetFirst(), item != NULL) |
|
3888 { |
|
3889 Kern::DestroyVirtualPinObject(item->iPinObject); // todo |
|
3890 Kern::AsyncFree(item); |
|
3891 } |
|
3892 Kern::AsyncFree(this); // must call async version since current thread may be exiting here |
|
3893 } |
|
3894 } |
|
3895 |
|
3896 /** |
|
3897 Destroy a TClientBufferRequest object. |
|
3898 |
|
3899 @publishedPartner |
|
3900 @released |
|
3901 */ |
|
3902 EXPORT_C void Kern::DestroyClientBufferRequest(TClientBufferRequest*& aRequestPtr) |
|
3903 { |
|
3904 TClientBufferRequest* request = (TClientBufferRequest*)__e32_atomic_swp_rel_ptr(&aRequestPtr, 0); |
|
3905 if (request) |
|
3906 request->Close(); |
|
3907 } |
|
3908 |
|
3909 #define iMState iWaitLink.iSpare1 |
|
3910 |
|
3911 /** |
|
3912 Start the setup process and set the client's TRequestStatus pointer. |
|
3913 |
|
3914 This method should be called first when the client initiates an asynchronous request, in the context |
|
3915 of the client thread. |
|
3916 |
|
3917 After calling this, the driver can call AddBuffer the appropriate number of times. |
|
3918 |
|
3919 @return KErrNone if successful, or KErrInUse if the object has already been setup. |
|
3920 |
|
3921 @publishedPartner |
|
3922 @released |
|
3923 */ |
|
3924 EXPORT_C TInt TClientBufferRequest::StartSetup(TRequestStatus* aStatus) |
|
3925 { |
|
3926 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"TClientBufferRequest::StartSetup"); |
|
3927 NKern::FMWait(&Lock); |
|
3928 TInt r = TClientRequest::SetStatus(aStatus); |
|
3929 if (r == KErrNone) |
|
3930 { |
|
3931 __NK_ASSERT_DEBUG(iSetupThread == NULL || iSetupThread->iMState == DThread::EDead); |
|
3932 if (iSetupThread) |
|
3933 iSetupThread->Close(NULL); |
|
3934 iSetupThread = TheCurrentThread; |
|
3935 iSetupThread->Open(); |
|
3936 } |
|
3937 NKern::FMSignal(&Lock); |
|
3938 return r; |
|
3939 } |
|
3940 |
|
3941 TClientBufferRequest::SBufferData* TClientBufferRequest::StartAddBuffer() |
|
3942 { |
|
3943 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"TClientBufferRequest::AddBuffer"); |
|
3944 if (iSetupThread != TheCurrentThread) |
|
3945 K::Fault(K::EBufferRequestAddInWrongState); |
|
3946 TInt r = KErrNone; |
|
3947 if (((SBufferData*)iBufferList.Last())->iBuffer.IsSet()) |
|
3948 { |
|
3949 r = AllocateBufferData(); |
|
3950 if (r != KErrNone) |
|
3951 { |
|
3952 Reset(); |
|
3953 return NULL; |
|
3954 } |
|
3955 } |
|
3956 SBufferData* data = (SBufferData*)iBufferList.Last(); |
|
3957 __NK_ASSERT_DEBUG(!data->iBuffer.IsSet()); |
|
3958 return data; |
|
3959 } |
|
3960 |
|
3961 TInt TClientBufferRequest::EndAddBuffer(TClientBuffer*& aBufOut, SBufferData* aData) |
|
3962 { |
|
3963 if (iFlags & EPinVirtual) |
|
3964 { |
|
3965 TInt r = Kern::PinVirtualMemory(aData->iPinObject, aData->iBuffer); |
|
3966 if (r != KErrNone) |
|
3967 { |
|
3968 Reset(); |
|
3969 aData->iBuffer.Reset(); |
|
3970 aBufOut = 0; |
|
3971 return r; |
|
3972 } |
|
3973 } |
|
3974 iBufferList.Rotate(); |
|
3975 aBufOut = &aData->iBuffer; |
|
3976 return KErrNone; |
|
3977 } |
|
3978 |
|
3979 /** |
|
3980 Associate a user-side descriptor with this request, and optionally pin it. |
|
3981 |
|
3982 This method should be called after StartSetup when the client initiates an asynchronous request, in |
|
3983 the context of the client thread. If StartSetup has not been called, this method panics. |
|
3984 |
|
3985 This method can be called multiple times. |
|
3986 |
|
3987 The descriptor header is read into the kernel from the current process' address space, and if |
|
3988 requested the memory is pinned. |
|
3989 |
|
3990 @return On success, a pointer to a TClientBuffer, which should be used to write to the descriptor. |
|
3991 NULL if there was not enough memory to complete the operation. |
|
3992 |
|
3993 @publishedPartner |
|
3994 @released |
|
3995 */ |
|
3996 EXPORT_C TInt TClientBufferRequest::AddBuffer(TClientBuffer*& aBufOut, TAny* aDesPtr) |
|
3997 { |
|
3998 SBufferData* data = StartAddBuffer(); |
|
3999 if (data == NULL) |
|
4000 return KErrNoMemory; |
|
4001 data->iBuffer.SetFromDescriptor(aDesPtr); |
|
4002 return EndAddBuffer(aBufOut, data); |
|
4003 } |
|
4004 |
|
4005 /** |
|
4006 Associate a user-side memory buffer with this request, and optionally pin it. |
|
4007 |
|
4008 This method should be called after StartSetup when the client initiates an asynchronous request, in |
|
4009 the context of the client thread. If StartSetup has not been called, this method faults the kernel. |
|
4010 |
|
4011 This method can be called multiple times. |
|
4012 |
|
4013 If requested, the memory is pinned. |
|
4014 |
|
4015 @return On success, a pointer to a TClientBuffer, which can be used to write to the buffer. |
|
4016 NULL if there was not enough memory to complete the operation. |
|
4017 |
|
4018 @publishedPartner |
|
4019 @released |
|
4020 */ |
|
4021 EXPORT_C TInt TClientBufferRequest::AddBuffer(TClientBuffer*& aBufOut, TLinAddr aStartAddr, TInt aLength, TBool aWriteable) |
|
4022 { |
|
4023 SBufferData* data = StartAddBuffer(); |
|
4024 if (data == NULL) |
|
4025 return KErrNoMemory; |
|
4026 data->iBuffer.SetFromBuffer(aStartAddr, aLength, aWriteable); |
|
4027 return EndAddBuffer(aBufOut, data); |
|
4028 } |
|
4029 |
|
4030 /** |
|
4031 Complete the setup process. |
|
4032 |
|
4033 This method should always be called if the setup process has completed successfully, after any calls |
|
4034 to AddBuffer. It is not necessary to call this if StartSetup or AddBuffer return an error. |
|
4035 |
|
4036 This should always be called in the context of the client thread. |
|
4037 |
|
4038 @publishedPartner |
|
4039 @released |
|
4040 */ |
|
4041 EXPORT_C void TClientBufferRequest::EndSetup() |
|
4042 { |
|
4043 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"TClientBufferRequest::EndSetup"); |
|
4044 NKern::FMWait(&Lock); |
|
4045 if (iSetupThread != TheCurrentThread) |
|
4046 K::Fault(K::EBufferRequestEndSetupInWrongState); |
|
4047 DThread* thread = iSetupThread; |
|
4048 iSetupThread = NULL; |
|
4049 NKern::ThreadEnterCS(); |
|
4050 NKern::FMSignal(&Lock); |
|
4051 thread->Close(NULL); |
|
4052 NKern::ThreadLeaveCS(); |
|
4053 } |
|
4054 |
|
4055 /** |
|
4056 Reset this object to allow it be reused, without completing the client request. |
|
4057 |
|
4058 This may be called at any time. It must be called in the context of the client thread. |
|
4059 |
|
4060 @publishedPartner |
|
4061 @released |
|
4062 */ |
|
4063 EXPORT_C void TClientBufferRequest::Reset() |
|
4064 { |
|
4065 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"TClientBufferRequest::Reset"); |
|
4066 NKern::FMWait(&Lock); |
|
4067 TBool inSetup = iSetupThread != NULL; |
|
4068 if (inSetup && iSetupThread != TheCurrentThread) |
|
4069 K::Fault(K::EBufferRequestResetInWrongState); |
|
4070 if (!inSetup) |
|
4071 { |
|
4072 TClientRequest::Reset(); |
|
4073 NKern::FMSignal(&Lock); |
|
4074 return; |
|
4075 } |
|
4076 NKern::FMSignal(&Lock); |
|
4077 SDblQueLink* link = iBufferList.First(); |
|
4078 while (link != &iBufferList.iA) |
|
4079 { |
|
4080 SBufferData* data = (SBufferData*)link; |
|
4081 data->iBuffer.Reset(); |
|
4082 if (iFlags & TClientBufferRequest::EPinVirtual) |
|
4083 Kern::UnpinVirtualMemory(data->iPinObject); |
|
4084 link = data->iNext; |
|
4085 } |
|
4086 NKern::FMWait(&Lock); |
|
4087 TClientRequest::Reset(); |
|
4088 DThread* thread = iSetupThread; |
|
4089 iSetupThread = NULL; |
|
4090 NKern::ThreadEnterCS(); |
|
4091 NKern::FMSignal(&Lock); |
|
4092 thread->Close(NULL); |
|
4093 NKern::ThreadLeaveCS(); |
|
4094 } |
|
4095 |
|
4096 /** |
|
4097 Queue the request for completion. |
|
4098 |
|
4099 If the object has not been setup by calling StartSetup/AddBuffer/EndSetup, this method does nothing. |
|
4100 Otherwise, if unpins any memory that was pinned by calling AddBuffer, and causes the client's |
|
4101 TRequestStatus and any writeable descriptor lengths to be written back to the client thread when it |
|
4102 next runs. |
|
4103 |
|
4104 This method is not synchronised, and therefore should only ever be called from the context of a |
|
4105 single thread (for example a DFC queue thread). Alternatively, an external synchonisation mechanism |
|
4106 such as a mutex can be used. |
|
4107 |
|
4108 @prototype |
|
4109 @internalTechnology |
|
4110 */ |
|
4111 EXPORT_C void Kern::QueueBufferRequestComplete(DThread* aThread, TClientBufferRequest* aRequest, TInt aReason) |
|
4112 { |
|
4113 aRequest->QueueComplete(aThread, aReason); |
|
4114 } |
|
4115 |
|
4116 void TClientBufferRequest::QueueComplete(DThread* aThread, TInt aReason) |
|
4117 { |
|
4118 NKern::FMWait(&Lock); |
|
4119 TBool ready = iSetupThread == NULL && TClientRequest::StartComplete(aThread, aReason); |
|
4120 NKern::FMSignal(&Lock); |
|
4121 if (!ready) |
|
4122 return; |
|
4123 if (iFlags & TClientBufferRequest::EPinVirtual) |
|
4124 { |
|
4125 SDblQueLink* link = iBufferList.First(); |
|
4126 while (link != &iBufferList.iA) |
|
4127 { |
|
4128 TClientBufferRequest::SBufferData* data = (TClientBufferRequest::SBufferData*)link; |
|
4129 Kern::UnpinVirtualMemory(data->iPinObject); |
|
4130 link = data->iNext; |
|
4131 } |
|
4132 } |
|
4133 EndComplete(aThread); |
|
4134 } |
|
4135 |
|
4136 void TClientBufferRequest::CallbackFunc(TAny* aData, TUserModeCallbackReason aReason) |
|
4137 { |
|
4138 TClientBufferRequest* self = (TClientBufferRequest*)aData; |
|
4139 |
|
4140 TState state = GetState(self->iStatus); |
|
4141 __ASSERT_DEBUG(state == EInUse || state == EClosing, K::Fault(K::EClientRequestCallbackInWrongState)); |
|
4142 |
|
4143 if (aReason == EUserModeCallbackRun) |
|
4144 { |
|
4145 SDblQueLink* link = self->iBufferList.First(); |
|
4146 while (link != &self->iBufferList.iA) |
|
4147 { |
|
4148 SBufferData* data = (SBufferData*)link; |
|
4149 if (data->iBuffer.IsSet()) |
|
4150 { |
|
4151 if (self->iFlags & TClientBufferRequest::EPinVirtual) |
|
4152 data->iBuffer.UpdateDescriptorLength(); // ignore error here |
|
4153 data->iBuffer.Reset(); |
|
4154 } |
|
4155 link = data->iNext; |
|
4156 } |
|
4157 } |
|
4158 |
|
4159 if (state == EClosing) |
|
4160 { |
|
4161 SBufferData* item; |
|
4162 while(item = (SBufferData*)(self->iBufferList.GetFirst()), item != NULL) |
|
4163 { |
|
4164 Kern::DestroyVirtualPinObject(item->iPinObject); |
|
4165 Kern::AsyncFree(item); |
|
4166 } |
|
4167 } |
|
4168 |
|
4169 TClientRequest::CallbackFunc(aData, aReason); |
|
4170 } |
|
4171 |
|
4172 // Implementation of kernel pin APIs |
|
4173 |
|
4174 /* |
|
4175 Create an object which can be used to pin virtual memory. |
|
4176 |
|
4177 @param aPinObject A reference to a pointer which is set to the newly-created object on success. |
|
4178 |
|
4179 @return KErrNone, if successful, otherwise one of the other system-wide error codes. |
|
4180 |
|
4181 @pre Calling thread must be in a critical section |
|
4182 @pre Interrupts must be enabled. |
|
4183 @pre Kernel must be unlocked. |
|
4184 @pre No fast mutex can be held. |
|
4185 @pre Call in a thread context. |
|
4186 @pre Suitable for use in a device driver. |
|
4187 |
|
4188 @see Kern::DestroyVirtualPinObject() |
|
4189 |
|
4190 @prototype |
|
4191 @internalTechnology |
|
4192 */ |
|
4193 EXPORT_C TInt Kern::CreateVirtualPinObject(TVirtualPinObject*& aPinObject) |
|
4194 { |
|
4195 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::CreateVirtualPinObject"); |
|
4196 return M::CreateVirtualPinObject(aPinObject); |
|
4197 } |
|
4198 |
|
4199 /* |
|
4200 Pin an area of virtual memory. |
|
4201 |
|
4202 The act of pinning virtual memory means that the memory in the specified virtual address range is |
|
4203 guaranteed to remain in system RAM while it is pinned, unless it is decommited. The actual physical |
|
4204 RAM used is not guaranteed to stay the same however, as it could be replaced in the process of RAM |
|
4205 defragmentation. |
|
4206 |
|
4207 This operation is provided to enable device drivers to pin client memory in the context of the |
|
4208 client thread, so that when it is accessed from a different thread later on (for example from a DFC |
|
4209 thread) there is no possibility of taking page faults. |
|
4210 |
|
4211 Note that this operation may fail with KErrNoMemory. |
|
4212 |
|
4213 @param aPinObject A virtual pin object previously created by calling Kern::CreateVirtualPinObject(). |
|
4214 @param aStart The start address of the memory to pin. |
|
4215 @param aSize The size of the memory to pin in bytes. |
|
4216 @param aThread The thread that owns the memory to pin, or NULL to use the current thread. |
|
4217 |
|
4218 @return KErrNone, if successful, otherwise one of the other system-wide error codes. |
|
4219 |
|
4220 @pre Interrupts must be enabled. |
|
4221 @pre Kernel must be unlocked. |
|
4222 @pre No fast mutex can be held. |
|
4223 @pre Call in a thread context. |
|
4224 @pre Can be used in a device driver. |
|
4225 |
|
4226 @see Kern::UnpinVirtualMemory() |
|
4227 |
|
4228 @prototype |
|
4229 @internalTechnology |
|
4230 */ |
|
4231 EXPORT_C TInt Kern::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr aStart, TUint aSize, DThread* aThread) |
|
4232 { |
|
4233 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::PinVirtualMemory"); |
|
4234 if (aThread == NULL) |
|
4235 aThread = TheCurrentThread; |
|
4236 if (aSize == 0) |
|
4237 return KErrNone; |
|
4238 NKern::ThreadEnterCS(); |
|
4239 TInt r = M::PinVirtualMemory(aPinObject, aStart, aSize, aThread); |
|
4240 NKern::ThreadLeaveCS(); |
|
4241 return r; |
|
4242 } |
|
4243 |
|
4244 /* |
|
4245 Pin an area of virtual memory. |
|
4246 |
|
4247 The act of pinning virtual memory means that the memory in the specified virtual address range is |
|
4248 guaranteed to remain in system RAM while it is pinned, unless it is decommited. The actual phyiscal |
|
4249 RAM used is not guaranteed to stay the same however, as it could be replaced in the process of RAM |
|
4250 defragmentation. |
|
4251 |
|
4252 This operation is provided to enable device drivers to pin client memory in the context of the |
|
4253 client thread, so that when it is accessed from a different thread later on (for example from a DFC |
|
4254 thread) there is no possibility of taking page faults. |
|
4255 |
|
4256 Note that this operation may fail with KErrNoMemory. |
|
4257 |
|
4258 @param aPinObject A virtual pin object previously created by calling Kern::CreateVirtualPinObject(). |
|
4259 @param aDes A TClientBuffer object representing a client descriptor to pin. |
|
4260 @param aThread The thread that owns the memory to pin, or NULL to use the current thread. |
|
4261 |
|
4262 @return KErrNone, if successful, otherwse one of the other system-wide error codes. |
|
4263 |
|
4264 @pre Interrupts must be enabled. |
|
4265 @pre Kernel must be unlocked. |
|
4266 @pre No fast mutex can be held. |
|
4267 @pre Call in a thread context. |
|
4268 @pre Can be used in a device driver. |
|
4269 |
|
4270 @see Kern::UnpinVirtualMemory() |
|
4271 |
|
4272 @prototype |
|
4273 @internalTechnology |
|
4274 */ |
|
4275 EXPORT_C TInt Kern::PinVirtualMemory(TVirtualPinObject* aPinObject, const TClientBuffer& aDes, DThread* aThread) |
|
4276 { |
|
4277 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::PinVirtualMemory"); |
|
4278 if (aThread == NULL) |
|
4279 aThread = TheCurrentThread; |
|
4280 TInt length = aDes.IsWriteable() ? aDes.MaxLength() : aDes.Length(); |
|
4281 if (length < 0) |
|
4282 return length; |
|
4283 if (length == 0) |
|
4284 return KErrNone; |
|
4285 NKern::ThreadEnterCS(); |
|
4286 TInt r = M::PinVirtualMemory(aPinObject, (TLinAddr)aDes.DataPtr(), length, aThread); |
|
4287 NKern::ThreadLeaveCS(); |
|
4288 return r; |
|
4289 } |
|
4290 /* |
|
4291 Create a pin object and then pin an area of virtual memory in the current address space. If |
|
4292 an error occurs then no pin object will exist |
|
4293 |
|
4294 The act of pinning virtual memory means that the memory in the specified virtual address range is |
|
4295 guaranteed to remain in system RAM while it is pinned, unless it is decommited. The actual physical |
|
4296 RAM used is not guaranteed to stay the same however, as it could be replaced in the process of RAM |
|
4297 defragmentation. |
|
4298 |
|
4299 This operation is provided to enable device drivers to pin client memory in the context of the |
|
4300 client thread, so that when it is accessed from a different thread later on (for example from a DFC |
|
4301 thread) there is no possibility of taking page faults. |
|
4302 |
|
4303 Note that this operation may fail with KErrNoMemory. |
|
4304 |
|
4305 @param aPinObject A reference to a pointer which is set to the newly-created object on success. |
|
4306 @param aStart The start address of the memory to pin. |
|
4307 @param aSize The size of the memory to pin in bytes. |
|
4308 |
|
4309 @return KErrNone, if successful, otherwise one of the other system-wide error codes. |
|
4310 |
|
4311 @pre Calling thread must be in a critical section |
|
4312 @pre Interrupts must be enabled. |
|
4313 @pre Kernel must be unlocked. |
|
4314 @pre No fast mutex can be held. |
|
4315 @pre Call in a thread context. |
|
4316 @pre Can be used in a device driver. |
|
4317 |
|
4318 @see Kern::UnpinVirtualMemory() |
|
4319 @see Kern::DestroyVirtualPinObject() |
|
4320 |
|
4321 @prototype |
|
4322 @internalTechnology |
|
4323 */ |
|
4324 EXPORT_C TInt Kern::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr aStart, TUint aSize) |
|
4325 { |
|
4326 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::CreateAndPinVirtualMemory"); |
|
4327 return M::CreateAndPinVirtualMemory(aPinObject, aStart, aSize); |
|
4328 } |
|
4329 |
|
4330 |
|
4331 /* |
|
4332 Unpin an area of memory previously pinned by calling Kern::PinVirtualMemory(). |
|
4333 |
|
4334 @param aPinObject The virtual pin object used to pin the memory. |
|
4335 |
|
4336 @pre Interrupts must be enabled. |
|
4337 @pre Kernel must be unlocked. |
|
4338 @pre No fast mutex can be held. |
|
4339 @pre Call in a thread context. |
|
4340 @pre Can be used in a device driver. |
|
4341 |
|
4342 @see Kern::PinVirtualMemory() |
|
4343 |
|
4344 @prototype |
|
4345 @internalTechnology |
|
4346 */ |
|
4347 EXPORT_C void Kern::UnpinVirtualMemory(TVirtualPinObject* aPinObject) |
|
4348 { |
|
4349 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::UnpinVirtualMemory"); |
|
4350 NKern::ThreadEnterCS(); |
|
4351 M::UnpinVirtualMemory(aPinObject); |
|
4352 NKern::ThreadLeaveCS(); |
|
4353 } |
|
4354 |
|
4355 /* |
|
4356 Dispose of a virtual pin object which is no longer required. |
|
4357 |
|
4358 Any memory pinned by the object is unpinned first. |
|
4359 |
|
4360 @param aPinObject A reference to a pointer to the pin object to destroy. |
|
4361 This pointer will be set to NULL on return. |
|
4362 |
|
4363 @pre Calling thread must be in a critical section |
|
4364 @pre Interrupts must be enabled. |
|
4365 @pre Kernel must be unlocked. |
|
4366 @pre No fast mutex can be held. |
|
4367 @pre Call in a thread context. |
|
4368 @pre Suitable for use in a device driver. |
|
4369 |
|
4370 @see Kern::CreateVirtualPinObject() |
|
4371 |
|
4372 @prototype |
|
4373 @internalTechnology |
|
4374 */ |
|
4375 EXPORT_C void Kern::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject) |
|
4376 { |
|
4377 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::DestroyVirtualPinObject"); |
|
4378 M::DestroyVirtualPinObject(aPinObject); |
|
4379 } |
|
4380 |
|
4381 /** |
|
4382 Creates an object which is used to pin physical memory. Suported by Kernel running flexible memory model. |
|
4383 |
|
4384 @param aPinObject A reference to a pointer which is set to the newly-created object on success. |
|
4385 |
|
4386 @return KErrNotSupported on memory models other then flexible. |
|
4387 KErrNone, if successful, otherwise one of the other system-wide error codes. |
|
4388 |
|
4389 @pre Calling thread must be in a critical section |
|
4390 @pre Interrupts must be enabled. |
|
4391 @pre Kernel must be unlocked. |
|
4392 @pre No fast mutex can be held. |
|
4393 @pre Call in a thread context. |
|
4394 @pre Suitable for use in a device driver. |
|
4395 |
|
4396 @see Kern::DestroyPhysicalPinObject() |
|
4397 |
|
4398 @prototype |
|
4399 */ |
|
4400 EXPORT_C TInt Kern::CreatePhysicalPinObject(TPhysicalPinObject*& aPinObject) |
|
4401 { |
|
4402 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::CreateVirtualPinObject"); |
|
4403 return M::CreatePhysicalPinObject(aPinObject); |
|
4404 } |
|
4405 |
|
4406 /** |
|
4407 Pins an area of physical memory. Suported by Kernel running flexible memory model. |
|
4408 |
|
4409 The physical memory to pin is defined by its existing virtual mapping (by aLinAddr, aSize & aThread parameters). |
|
4410 On return, aPhysicalAddress will hold physical address (if memory is mapped contigiously) and aPhysicalPageList |
|
4411 area will be populated with the list of physical pages of the mapping. aColour will hold the mapping colour |
|
4412 of the first physical page in the mapping. |
|
4413 |
|
4414 This operation is provided to enable device drivers to operate DMA transfers on memory which is not mapped to |
|
4415 Kernel address space (but to user client's, instead). |
|
4416 |
|
4417 The act of pinning physical memory means that it is guaranteed to be excluded from RAM defragmentation. |
|
4418 However, it can still be the subject of data paging. Physically pinned memory is also guaranteed not to be |
|
4419 reused for some other purpose - even if the process owning the memory decommits it or terminates. |
|
4420 |
|
4421 Note that this operation may fail with KErrNoMemory. |
|
4422 |
|
4423 @param aPinObject A physical pin object previously created by calling Kern::CreatePhysicalPinObject(). |
|
4424 @param aLinAddr Virtual address of memory to pin. |
|
4425 @param aSize The length (in bytes) of memory to pin. |
|
4426 @param aThread The thread that owns the memory to pin, or NULL to use the current thread. |
|
4427 @param aReadOnlyMemory Set to ETrue if the content of physical memory is not going to change while being |
|
4428 pinned, e.g. if it is DMA copied into H/W. Set to EFalse otherwise. |
|
4429 Setting this argument to ETrue will improve the performance when/if memory is paged out. |
|
4430 @param aAddress On success, this value is set to one of two values: |
|
4431 - If the specified region is physically contiguous, the value is the |
|
4432 physical address of the first byte in the region. |
|
4433 - If the region is discontiguous, the value is set to KPhysAddrInvalid. |
|
4434 @param aPages Points to area of TPhysAddr which will on exit hold the addresses of the physical pages contained |
|
4435 in the specified region. The array must be large enough to hold the whole list of pages in the region. |
|
4436 If aPageList is zero , then the function will fail with KErrNotFound if the specified region |
|
4437 is not physically contiguous. |
|
4438 @param aMapAttr On success, this is set to the mmu mapping attributes used for the memory. This |
|
4439 is a value constructed from the bit masks in the enumeration TMappingAttributes. The typical |
|
4440 use for this value is to use it as an argument to to Kernel's Sync Physical Memory interface. |
|
4441 |
|
4442 @param aColour On exit, holds the mapping colour of the first physical page in the mapping. Device drivers |
|
4443 have no use of this value but to pass to Kernel's Sync Physical Memory interface. |
|
4444 |
|
4445 @return KErrNotSupported on memory models other then flexible. |
|
4446 KErrNone, if successful, otherwise one of the other system-wide error codes. |
|
4447 |
|
4448 @pre Interrupts must be enabled. |
|
4449 @pre Kernel must be unlocked. |
|
4450 @pre No fast mutex can be held. |
|
4451 @pre Call in a thread context. |
|
4452 @pre Can be used in a device driver. |
|
4453 |
|
4454 @see Kern::UnpinPhysicalMemory() |
|
4455 @see Cache::SyncPhysicalMemoryBeforeDmaWrite |
|
4456 @see Cache::SyncPhysicalMemoryBeforeDmaRead |
|
4457 @see Cache::SyncPhysicalMemoryAfterDmaRead |
|
4458 @prototype |
|
4459 */ |
|
4460 EXPORT_C TInt Kern::PinPhysicalMemory(TPhysicalPinObject* aPinObject, TLinAddr aStart, TUint aSize, TBool aReadOnlyMemory, |
|
4461 TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour, DThread* aThread) |
|
4462 { |
|
4463 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::PinPhysicalMemory"); |
|
4464 if (aThread == NULL) |
|
4465 aThread = TheCurrentThread; |
|
4466 if (aSize == 0) |
|
4467 return KErrNone; |
|
4468 NKern::ThreadEnterCS(); |
|
4469 TInt r = M::PinPhysicalMemory(aPinObject, aStart, aSize, aReadOnlyMemory, aAddress, aPages, aMapAttr, aColour, aThread); |
|
4470 NKern::ThreadLeaveCS(); |
|
4471 return r; |
|
4472 } |
|
4473 |
|
4474 /** |
|
4475 Unpins an area of physical memory previously pinned by calling Kern::PinPhysicalMemory(). |
|
4476 |
|
4477 @param aPinObject The physical pin object used to pin the memory. |
|
4478 |
|
4479 @pre Interrupts must be enabled. |
|
4480 @pre Kernel must be unlocked. |
|
4481 @pre No fast mutex can be held. |
|
4482 @pre Call in a thread context. |
|
4483 @pre Can be used in a device driver. |
|
4484 |
|
4485 @return KErrNotSupported on memory models other then flexible. |
|
4486 KErrNone, on flexible memory model. |
|
4487 |
|
4488 @see Kern::PinPhysicalMemory() |
|
4489 |
|
4490 @prototype |
|
4491 */ |
|
4492 EXPORT_C TInt Kern::UnpinPhysicalMemory(TPhysicalPinObject* aPinObject) |
|
4493 { |
|
4494 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::UnpinPhysicalMemory"); |
|
4495 NKern::ThreadEnterCS(); |
|
4496 M::UnpinPhysicalMemory(aPinObject); |
|
4497 NKern::ThreadLeaveCS(); |
|
4498 return KErrNone; |
|
4499 } |
|
4500 |
|
4501 /* |
|
4502 Dispose of a physical pin object which is no longer required. |
|
4503 |
|
4504 Any memory pinned by the object is unpinned first. |
|
4505 |
|
4506 @param aPinObject A reference to a pointer to the pin object to destroy. |
|
4507 This pointer will be set to NULL on return. |
|
4508 |
|
4509 @pre Calling thread must be in a critical section |
|
4510 @pre Interrupts must be enabled. |
|
4511 @pre Kernel must be unlocked. |
|
4512 @pre No fast mutex can be held. |
|
4513 @pre Call in a thread context. |
|
4514 @pre Suitable for use in a device driver. |
|
4515 |
|
4516 @return KErrNotSupported on memory models other then flexible. |
|
4517 KErrNone, on flexible memory model. |
|
4518 |
|
4519 @see Kern::CreatePhysicalPinObject() |
|
4520 |
|
4521 @prototype |
|
4522 */ |
|
4523 EXPORT_C TInt Kern::DestroyPhysicalPinObject(TPhysicalPinObject*& aPinObject) |
|
4524 { |
|
4525 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::DestroyPhysicalPinObject"); |
|
4526 M::DestroyPhysicalPinObject(aPinObject); |
|
4527 return KErrNone; |
|
4528 } |