102 |
106 |
103 const TInt KTempBitmapSize = 256; // KMaxSlabPayload / mincellsize, technically. Close enough. |
107 const TInt KTempBitmapSize = 256; // KMaxSlabPayload / mincellsize, technically. Close enough. |
104 |
108 |
105 #ifdef __KERNEL_MODE__ |
109 #ifdef __KERNEL_MODE__ |
106 |
110 |
|
111 TLinAddr LtkUtils::RAllocatorHelper::GetKernelAllocator(DChunk* aKernelChunk) |
|
112 { |
|
113 TLinAddr allocatorAddress; |
|
114 #ifdef __WINS__ |
|
115 allocatorAddress = (TLinAddr)aKernelChunk->Base(); |
|
116 #else |
|
117 // Copied from P::KernelInfo |
|
118 const TRomHeader& romHdr=Epoc::RomHeader(); |
|
119 const TRomEntry* primaryEntry=(const TRomEntry*)Kern::SuperPage().iPrimaryEntry; |
|
120 const TRomImageHeader* primaryImageHeader=(const TRomImageHeader*)primaryEntry->iAddressLin; |
|
121 TLinAddr stack = romHdr.iKernDataAddress + Kern::RoundToPageSize(romHdr.iTotalSvDataSize); |
|
122 TLinAddr heap = stack + Kern::RoundToPageSize(primaryImageHeader->iStackSize); |
|
123 allocatorAddress = heap; |
|
124 #endif |
|
125 return allocatorAddress; |
|
126 } |
|
127 |
107 TInt RAllocatorHelper::OpenKernelHeap() |
128 TInt RAllocatorHelper::OpenKernelHeap() |
108 { |
129 { |
109 _LIT(KName, "SvHeap"); |
130 _LIT(KName, "SvHeap"); |
110 NKern::ThreadEnterCS(); |
131 NKern::ThreadEnterCS(); |
111 DObjectCon* chunkContainer = Kern::Containers()[EChunk]; |
132 DObjectCon* chunkContainer = Kern::Containers()[EChunk]; |
121 foundChunk = chunk; |
142 foundChunk = chunk; |
122 break; |
143 break; |
123 } |
144 } |
124 } |
145 } |
125 iChunk = foundChunk; |
146 iChunk = foundChunk; |
126 chunkContainer->Signal(); |
147 chunkContainer->Signal(); |
127 #ifdef __WINS__ |
148 |
128 TInt err = OpenChunkHeap((TLinAddr)foundChunk->Base(), 0); // It looks like DChunk::iBase/DChunk::iFixedBase should both be ok for the kernel chunk |
149 iAllocatorAddress = GetKernelAllocator(foundChunk); |
129 #else |
150 |
130 // Copied from P::KernelInfo |
151 // It looks like DChunk::iBase/DChunk::iFixedBase should both be ok for the kernel chunk |
131 const TRomHeader& romHdr=Epoc::RomHeader(); |
152 // aChunkMaxSize is only used for trying the middle of the chunk for hybrid allocatorness, and the kernel heap doesn't use that (thankfully). So we can safely pass in zero. |
132 const TRomEntry* primaryEntry=(const TRomEntry*)Kern::SuperPage().iPrimaryEntry; |
153 TInt err = OpenChunkHeap((TLinAddr)foundChunk->Base(), 0); |
133 const TRomImageHeader* primaryImageHeader=(const TRomImageHeader*)primaryEntry->iAddressLin; |
154 |
134 TLinAddr stack = romHdr.iKernDataAddress + Kern::RoundToPageSize(romHdr.iTotalSvDataSize); |
|
135 TLinAddr heap = stack + Kern::RoundToPageSize(primaryImageHeader->iStackSize); |
|
136 TInt err = OpenChunkHeap(heap, 0); // aChunkMaxSize is only used for trying the middle of the chunk for hybrid allocatorness, and the kernel heap doesn't use that (thankfully). So we can safely pass in zero. |
|
137 |
|
138 #endif |
|
139 if (!err) err = FinishConstruction(); |
155 if (!err) err = FinishConstruction(); |
140 NKern::ThreadLeaveCS(); |
156 NKern::ThreadLeaveCS(); |
141 return err; |
157 return err; |
142 } |
158 } |
143 |
159 |
302 iThread = NULL; |
318 iThread = NULL; |
303 RAllocatorHelper::Close(); |
319 RAllocatorHelper::Close(); |
304 NKern::ThreadLeaveCS(); |
320 NKern::ThreadLeaveCS(); |
305 } |
321 } |
306 |
322 |
307 TInt LtkUtils::RKernelSideAllocatorHelper::ReadData(TLinAddr aLocation, TAny* aResult, TInt aSize) const |
323 TInt LtkUtils::RUserAllocatorHelper::ReadData(TLinAddr aLocation, TAny* aResult, TInt aSize) const |
308 { |
324 { |
309 return Kern::ThreadRawRead(iThread, (const TAny*)aLocation, aResult, aSize); |
325 return Kern::ThreadRawRead(iThread, (const TAny*)aLocation, aResult, aSize); |
310 } |
326 } |
311 |
327 |
312 TInt LtkUtils::RKernelSideAllocatorHelper::WriteData(TLinAddr aLocation, const TAny* aData, TInt aSize) |
328 TInt LtkUtils::RUserAllocatorHelper::WriteData(TLinAddr aLocation, const TAny* aData, TInt aSize) |
313 { |
329 { |
314 return Kern::ThreadRawWrite(iThread, (TAny*)aLocation, aData, aSize); |
330 return Kern::ThreadRawWrite(iThread, (TAny*)aLocation, aData, aSize); |
315 } |
331 } |
316 |
332 |
317 TInt LtkUtils::RKernelSideAllocatorHelper::TryLock() |
333 TInt LtkUtils::RUserAllocatorHelper::TryLock() |
318 { |
334 { |
319 return KErrNotSupported; |
335 return KErrNotSupported; |
320 } |
336 } |
321 |
337 |
322 void LtkUtils::RKernelSideAllocatorHelper::TryUnlock() |
338 void LtkUtils::RUserAllocatorHelper::TryUnlock() |
323 { |
339 { |
324 // Not supported |
340 // Not supported |
325 } |
341 } |
326 |
342 |
327 TInt LtkUtils::RKernelSideAllocatorHelper::OpenUserHeap(TUint aThreadId, TLinAddr aAllocatorAddress, TBool aEuserIsUdeb) |
343 TInt LtkUtils::RUserAllocatorHelper::OpenUserHeap(TUint aThreadId, TLinAddr aAllocatorAddress, TBool aEuserIsUdeb) |
328 { |
344 { |
329 NKern::ThreadEnterCS(); |
345 NKern::ThreadEnterCS(); |
330 DObjectCon* threads = Kern::Containers()[EThread]; |
346 DObjectCon* threads = Kern::Containers()[EThread]; |
331 threads->Wait(); |
347 threads->Wait(); |
332 iThread = Kern::ThreadFromId(aThreadId); |
348 iThread = Kern::ThreadFromId(aThreadId); |
342 TInt err = IdentifyAllocatorType(aEuserIsUdeb); |
358 TInt err = IdentifyAllocatorType(aEuserIsUdeb); |
343 if (err) Close(); |
359 if (err) Close(); |
344 return err; |
360 return err; |
345 } |
361 } |
346 |
362 |
|
363 LtkUtils::RKernelCopyAllocatorHelper::RKernelCopyAllocatorHelper() |
|
364 : iCopiedChunk(NULL), iOffset(0) |
|
365 {} |
|
366 |
|
367 TInt LtkUtils::RKernelCopyAllocatorHelper::OpenCopiedHeap(DChunk* aOriginalChunk, DChunk* aCopiedChunk, TInt aOffset) |
|
368 { |
|
369 TInt err = aCopiedChunk->Open(); |
|
370 if (!err) |
|
371 { |
|
372 iCopiedChunk = aCopiedChunk; |
|
373 iOffset = aOffset; |
|
374 |
|
375 // We need to set iAllocatorAddress to point to the allocator in the original chunk and not the copy |
|
376 // because all the internal pointers will be relative to that. Instead we use iOffset in the Read / Write Data |
|
377 // calls |
|
378 iAllocatorAddress = GetKernelAllocator(aOriginalChunk); |
|
379 |
|
380 // It looks like DChunk::iBase/DChunk::iFixedBase should both be ok for the kernel chunk |
|
381 // aChunkMaxSize is only used for trying the middle of the chunk for hybrid allocatorness, and the kernel heap doesn't use that (thankfully). So we can safely pass in zero. |
|
382 err = OpenChunkHeap((TLinAddr)aCopiedChunk->Base(), 0); |
|
383 } |
|
384 |
|
385 return err; |
|
386 } |
|
387 |
|
388 DChunk* LtkUtils::RKernelCopyAllocatorHelper::OpenUnderlyingChunk() |
|
389 { |
|
390 // We should never get here |
|
391 __NK_ASSERT_ALWAYS(EFalse); |
|
392 return NULL; |
|
393 } |
|
394 |
|
395 void LtkUtils::RKernelCopyAllocatorHelper::Close() |
|
396 { |
|
397 if (iCopiedChunk) |
|
398 { |
|
399 NKern::ThreadEnterCS(); |
|
400 iCopiedChunk->Close(NULL); |
|
401 iCopiedChunk = NULL; |
|
402 NKern::ThreadLeaveCS(); |
|
403 } |
|
404 iOffset = 0; |
|
405 RAllocatorHelper::Close(); |
|
406 } |
|
407 |
|
408 TInt LtkUtils::RKernelCopyAllocatorHelper::ReadData(TLinAddr aLocation, TAny* aResult, TInt aSize) const |
|
409 { |
|
410 memcpy(aResult, (const TAny*)(aLocation+iOffset), aSize); |
|
411 return KErrNone; |
|
412 } |
|
413 |
|
414 TInt LtkUtils::RKernelCopyAllocatorHelper::WriteData(TLinAddr aLocation, const TAny* aData, TInt aSize) |
|
415 { |
|
416 memcpy((TAny*)(aLocation+iOffset), aData, aSize); |
|
417 return KErrNone; |
|
418 } |
|
419 |
|
420 TInt LtkUtils::RKernelCopyAllocatorHelper::TryLock() |
|
421 { |
|
422 return KErrNotSupported; |
|
423 } |
|
424 |
|
425 void LtkUtils::RKernelCopyAllocatorHelper::TryUnlock() |
|
426 { |
|
427 // Not supported |
|
428 } |
|
429 |
347 #endif // __KERNEL_MODE__ |
430 #endif // __KERNEL_MODE__ |
348 |
431 |
349 TInt RAllocatorHelper::OpenChunkHeap(TLinAddr aChunkBase, TInt aChunkMaxSize) |
432 TInt RAllocatorHelper::OpenChunkHeap(TLinAddr aChunkBase, TInt aChunkMaxSize) |
350 { |
433 { |
351 iAllocatorAddress = aChunkBase; |
|
352 #ifdef __KERNEL_MODE__ |
434 #ifdef __KERNEL_MODE__ |
353 // Must be in CS |
435 // Must be in CS |
354 // Assumes that this only ever gets called for the kernel heap. Otherwise goes through RKernelSideAllocatorHelper::OpenUserHeap. |
436 // Assumes that this only ever gets called for the kernel heap. Otherwise goes through RKernelSideAllocatorHelper::OpenUserHeap. |
355 TInt udeb = EFalse; // We can't figure this out until after we've got the heap |
437 TInt udeb = EFalse; // We can't figure this out until after we've got the heap |
|
438 TBool isTheKernelHeap = ETrue; |
356 #else |
439 #else |
357 // Assumes the chunk isn't the kernel heap. It's not a good idea to try messing with the kernel heap from user side... |
440 // Assumes the chunk isn't the kernel heap. It's not a good idea to try messing with the kernel heap from user side... |
358 TInt udeb = EuserIsUdeb(); |
441 TInt udeb = EuserIsUdeb(); |
359 if (udeb < 0) return udeb; // error |
442 if (udeb < 0) return udeb; // error |
|
443 TBool isTheKernelHeap = EFalse; |
360 #endif |
444 #endif |
361 |
445 |
362 TInt err = IdentifyAllocatorType(udeb); |
446 TInt err = IdentifyAllocatorType(udeb, isTheKernelHeap); |
363 if (err == KErrNone && iAllocatorType == EAllocator) |
447 if (err == KErrNone && iAllocatorType == EAllocator) |
364 { |
448 { |
365 // We've no reason to assume it's an allocator because we don't know the iAllocatorAddress actually is an RAllocator* |
449 // We've no reason to assume it's an allocator because we don't know the iAllocatorAddress actually is an RAllocator* |
366 err = KErrNotFound; |
450 err = KErrNotFound; |
367 } |
451 } |
368 if (err) |
452 if (err && aChunkMaxSize > 0) |
369 { |
453 { |
370 TInt oldErr = err; |
454 TInt oldErr = err; |
371 TAllocatorType oldType = iAllocatorType; |
455 TAllocatorType oldType = iAllocatorType; |
372 // Try middle of chunk, in case it's an RHybridHeap |
456 // Try middle of chunk, in case it's an RHybridHeap |
373 iAllocatorAddress += aChunkMaxSize / 2; |
457 iAllocatorAddress += aChunkMaxSize / 2; |
374 err = IdentifyAllocatorType(udeb); |
458 err = IdentifyAllocatorType(udeb, isTheKernelHeap); |
375 if (err || iAllocatorType == EAllocator) |
459 if (err || iAllocatorType == EAllocator) |
376 { |
460 { |
377 // No better than before |
461 // No better than before |
378 iAllocatorAddress = aChunkBase; |
462 iAllocatorAddress = aChunkBase; |
379 iAllocatorType = oldType; |
463 iAllocatorType = oldType; |
387 RAllocator* kernelAllocator = reinterpret_cast<RAllocator*>(iAllocatorAddress); |
471 RAllocator* kernelAllocator = reinterpret_cast<RAllocator*>(iAllocatorAddress); |
388 kernelAllocator->DebugFunction(RAllocator::ESetFail, (TAny*)9999, (TAny*)0); // Use an invalid fail reason - this should have no effect on the operation of the heap |
472 kernelAllocator->DebugFunction(RAllocator::ESetFail, (TAny*)9999, (TAny*)0); // Use an invalid fail reason - this should have no effect on the operation of the heap |
389 TInt err = kernelAllocator->DebugFunction(7, NULL, NULL); // 7 is RAllocator::TAllocDebugOp::EGetFail |
473 TInt err = kernelAllocator->DebugFunction(7, NULL, NULL); // 7 is RAllocator::TAllocDebugOp::EGetFail |
390 if (err == 9999) |
474 if (err == 9999) |
391 { |
475 { |
392 // udeb new |
476 // udeb new hybrid heap |
393 udeb = ETrue; |
477 udeb = ETrue; |
394 } |
478 } |
395 else if (err == KErrNotSupported) |
479 else if (err == KErrNotSupported) |
396 { |
480 { |
397 // Old heap - fall back to slightly nasty non-thread-safe method |
481 // Old heap - fall back to slightly nasty non-thread-safe method |
398 kernelAllocator->DebugFunction(RAllocator::ESetFail, (TAny*)RAllocator::EFailNext, (TAny*)1); |
482 kernelAllocator->DebugFunction(RAllocator::ESetFail, (TAny*)RAllocator::EFailNext, (TAny*)1); |
399 TAny* res = Kern::Alloc(4); |
483 TAny* res = Kern::Alloc(4); |
400 if (res) udeb = ETrue; |
484 if (!res) udeb = ETrue; |
401 Kern::Free(res); |
485 Kern::Free(res); |
402 } |
486 } |
403 else |
487 else |
404 { |
488 { |
405 // it's new urel |
489 // it's new urel |
406 } |
490 } |
407 |
491 |
408 // Put everything back |
492 // Put everything back |
409 kernelAllocator->DebugFunction(RAllocator::ESetFail, (TAny*)RAllocator::ENone, (TAny*)0); |
493 kernelAllocator->DebugFunction(RAllocator::ESetFail, (TAny*)RAllocator::ENone, (TAny*)0); |
410 // And update the type now we know the udeb-ness for certain |
494 // And update the type now we know the udeb-ness for certain |
411 err = IdentifyAllocatorType(udeb); |
495 err = IdentifyAllocatorType(udeb, isTheKernelHeap); |
412 } |
496 } |
413 #endif |
497 #endif |
414 return err; |
498 return err; |
415 } |
499 } |
416 |
500 |
533 iPageCache = NULL; |
617 iPageCache = NULL; |
534 iPageCacheAddr = 0; |
618 iPageCacheAddr = 0; |
535 KERN_LEAVE_CS(); |
619 KERN_LEAVE_CS(); |
536 } |
620 } |
537 |
621 |
538 TInt RAllocatorHelper::IdentifyAllocatorType(TBool aAllocatorIsUdeb) |
622 TInt RAllocatorHelper::IdentifyAllocatorType(TBool aAllocatorIsUdeb, TBool aIsTheKernelHeap) |
539 { |
623 { |
540 iAllocatorType = EUnknown; |
624 iAllocatorType = EUnknown; |
541 |
625 |
542 TUint32 handlesPtr = 0; |
626 TUint32 handlesPtr = 0; |
543 TInt err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iHandles), handlesPtr); |
627 TInt err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iHandles), handlesPtr); |
544 |
628 |
545 if (err) return err; |
629 if (err) return err; |
546 if (handlesPtr == iAllocatorAddress + _FOFF(RHackHeap, iChunkHandle) || handlesPtr == iAllocatorAddress + _FOFF(RHackHeap, iLock)) |
630 if (aIsTheKernelHeap || |
|
631 handlesPtr == iAllocatorAddress + _FOFF(RHackHeap, iChunkHandle) || |
|
632 handlesPtr == iAllocatorAddress + _FOFF(RHackHeap, iLock)) |
547 { |
633 { |
548 // It's an RHeap of some kind - I doubt any other RAllocator subclass will use iHandles in this way |
634 // It's an RHeap of some kind - I doubt any other RAllocator subclass will use iHandles in this way |
549 TUint32 base = 0; |
635 TUint32 base = 0; |
550 err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), base); |
636 err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), base); |
551 if (err) return err; |
637 if (err) return err; |