|
1 // LoggingAllocator.cpp |
|
2 // |
|
3 // Copyright (c) 2010 Accenture. All rights reserved. |
|
4 // This component and the accompanying materials are made available |
|
5 // under the terms of the "Eclipse Public License v1.0" |
|
6 // which accompanies this distribution, and is available |
|
7 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 // |
|
9 // Initial Contributors: |
|
10 // Accenture - Initial contribution |
|
11 // |
|
12 #include <fshell/LoggingAllocator.h> |
|
13 #include <fshell/extrabtrace.h> |
|
14 //#include <e32debug.h> |
|
15 |
|
16 //#define LOG(args...) RDebug::Printf(args) |
|
17 #define LOG(args...) |
|
18 |
|
19 // Subcategories for use with BTrace::ETest1 when EOldFormatLogging is defined |
|
20 enum TSubCategories |
|
21 { |
|
22 EAlloc, |
|
23 EAllocCont, |
|
24 EAllocHeaven, // AllocHeaven means "This cell has leaked" |
|
25 EAllocHeavenData, |
|
26 EFree, |
|
27 EFreeCont, |
|
28 EFreeCellCorrupted, |
|
29 }; |
|
30 |
|
31 const TInt KHeapCategory = 14; // BTrace::EHeap |
|
32 enum THeap |
|
33 { |
|
34 EHeapCreate, |
|
35 EHeapChunkCreate, |
|
36 EHeapAlloc, |
|
37 EHeapReAlloc, |
|
38 EHeapFree, |
|
39 // My additions go after this |
|
40 ELoggingAllocatorInstalled = 128, |
|
41 EHeapPrimingFinished, |
|
42 EHeapExtendedAlloc, |
|
43 EHeapExtendedFree, |
|
44 EHeapExtendedRealloc, |
|
45 }; |
|
46 |
|
47 const TInt KUidUninstallLoggingAllocator = 0x10285BAB; |
|
48 const TInt KMaxDeferredFrees = 100; |
|
49 |
|
50 RLoggingAllocator::RLoggingAllocator(TUint aFlags) |
|
51 : RAllocator(), iA(NULL), iBreakOnAllocCount(0), iFlags(aFlags) |
|
52 { |
|
53 if (iFlags & EDeferFree) iFlags |= EScribbleFrees; // The only reason (currently) why you defer them is to do the check, which requires them to be scribbled |
|
54 iPid = RProcess().Id(); // Cache this to save a few exec calls |
|
55 } |
|
56 |
|
57 void RLoggingAllocator::Free(TAny* aPtr) |
|
58 { |
|
59 if (!aPtr) return; // Don't care about tracing attempts to free the null pointer (this is what rheap does too) |
|
60 TraceFree(iA, aPtr); |
|
61 TInt allocLen = AllocLen(aPtr); |
|
62 |
|
63 if (iFlags & EScribbleFrees) |
|
64 { |
|
65 // Don't scribble after calling free - the heap might have been shrunk meaning we'd crash |
|
66 memset(aPtr, 0xDF, allocLen); |
|
67 } |
|
68 if (iFlags & EDeferFree) |
|
69 { |
|
70 CheckDeferredFrees(); |
|
71 iLock.Wait(); |
|
72 if (iDeferredFrees.Count() == KMaxDeferredFrees) |
|
73 { |
|
74 iA->Free(iDeferredFrees[0].iPtr); |
|
75 iDeferredFrees.Remove(0); |
|
76 } |
|
77 SDeferredFreeCell cell = {aPtr, allocLen}; |
|
78 iDeferredFrees.Append(cell); // Can't fail because we preallocate the memory |
|
79 iLock.Signal(); |
|
80 } |
|
81 else |
|
82 { |
|
83 iA->Free(aPtr); |
|
84 } |
|
85 |
|
86 if (iFlags & EOldFormatLogging) |
|
87 { |
|
88 BTrace8(KHeapCategory, EHeapFree, iA, aPtr); |
|
89 } |
|
90 } |
|
91 |
|
92 TAny* RLoggingAllocator::Alloc(TInt aSize) |
|
93 { |
|
94 CheckDeferredFrees(); |
|
95 TAny* cellPtr = iA->Alloc(aSize); |
|
96 TraceAlloc(iA, cellPtr, aSize); |
|
97 if (cellPtr && (iFlags & EOldFormatLogging)) |
|
98 { |
|
99 TUint32 remainder[] = { AllocLen(cellPtr), aSize }; |
|
100 BTraceN(KHeapCategory, EHeapAlloc, iA, cellPtr, remainder, sizeof(remainder)); |
|
101 } |
|
102 return cellPtr; |
|
103 } |
|
104 |
|
105 void RLoggingAllocator::DoTraceAllocEvent(RAllocator* aAllocator, TAny* aCellPtr, TInt aAllocEvent, TInt aRequestedSize, TAny* aOldPtr) |
|
106 { |
|
107 // Important we don't reference iA in this function, only aAllocator |
|
108 TUint32 t = 0; |
|
109 TUint32* sp = &t; |
|
110 TThreadStackInfo stackInfo; |
|
111 TInt err = RThread().StackInfo(stackInfo); |
|
112 if (err == KErrNone && (iFlags & EOldFormatLogging)) |
|
113 { |
|
114 if (aAllocEvent != EHeapAlloc && aAllocEvent != EHeapFree) return; // We don't do stacktraces for reallocs in the old format |
|
115 TBuf8<KMaxBTraceDataArray> buf; |
|
116 const TInt KNumBtraceFrames = 2; |
|
117 for (TInt i = 0; i < KNumBtraceFrames; ++i) |
|
118 { |
|
119 while (((TUint32)sp < (TUint32)stackInfo.iBase) && (buf.Size() <= (buf.MaxSize() - 4))) |
|
120 { |
|
121 if ((*sp >= 0x70000000) && (*sp <= 0x8fffffff)) // Is this a valid ROM or RAM address? Note, the address ranges are for the 2GB EKA2 memory map. |
|
122 { |
|
123 buf.Append(TPtrC8((TUint8*)sp, 4)); |
|
124 } |
|
125 ++sp; |
|
126 } |
|
127 if (buf.Size() > 0) |
|
128 { |
|
129 TUint8 subcat = (aAllocEvent == EHeapAlloc) ? EAlloc : EFree; |
|
130 BTraceN(BTrace::ETest1, i == 0 ? subcat : subcat+1, (TUint)RThread().Id().Id(), aCellPtr, buf.Ptr(), buf.Size()); |
|
131 buf.Zero(); |
|
132 } |
|
133 } |
|
134 buf.FillZ(); // To avoid leaving a bunch of addresses kicking around on the stack to be misinterpretted later on. |
|
135 } |
|
136 else if (err == KErrNone) |
|
137 { |
|
138 // New format |
|
139 const TInt KBufSize = 160; |
|
140 TUint8 buf[KBufSize]; |
|
141 TUint32* ptr = (TUint32*)buf; |
|
142 TUint32 const*const end = (TUint32*)(buf + KBufSize); |
|
143 // uint 0 is aAllocator, argument a1 to BTraceBig below |
|
144 *(ptr++) = (TLinAddr)aCellPtr; // uint 1 |
|
145 TBool alreadyFreed = (aAllocEvent == EHeapFree && aRequestedSize == -1); // Can't call AllocLen on a freed cell (we only catch the frees from ReAlloc after the cell's already been deallocated) |
|
146 // uint 2 is cell size, unless it's not a valid cell in which case it's zero (for a failed NULL alloc) or -1 for a free that's already been freed |
|
147 if (alreadyFreed) |
|
148 { |
|
149 *(ptr++) = 0xFFFFFFFFu; |
|
150 } |
|
151 else if (aCellPtr == NULL) |
|
152 { |
|
153 *(ptr++) = 0; |
|
154 } |
|
155 else |
|
156 { |
|
157 *(ptr++) = aAllocator->AllocLen(aCellPtr); |
|
158 } |
|
159 *(ptr++) = (aAllocEvent == EHeapFree) ? 0xFFFFFFFFu : aRequestedSize; // uint 3 |
|
160 *(ptr++) = (TUint)RThread().Id(); // uint 4 |
|
161 *(ptr++) = iPid; // uint 5 |
|
162 if (aAllocEvent == EHeapReAlloc) *(ptr++) = (TUint)aOldPtr; // uint 6, for reallocs only |
|
163 while (((TUint32)sp < (TUint32)stackInfo.iBase) && ptr != end) |
|
164 { |
|
165 TUint addr = *sp; |
|
166 if ((addr >= 0x70000000) && (addr <= 0x8fffffff)) // Is this a valid ROM or RAM address? Note, the address ranges are for the 2GB EKA2 memory map. |
|
167 { |
|
168 *(ptr++) = addr; |
|
169 } |
|
170 sp++; |
|
171 } |
|
172 TUint8 subcat; |
|
173 switch (aAllocEvent) |
|
174 { |
|
175 case EHeapAlloc: subcat = EHeapExtendedAlloc; break; |
|
176 case EHeapFree: subcat = EHeapExtendedFree; break; |
|
177 case EHeapReAlloc: subcat = EHeapExtendedRealloc; break; |
|
178 default: |
|
179 return; // Shouldn't get this |
|
180 } |
|
181 BTraceBig(KHeapCategory, subcat, aAllocator, buf, (TLinAddr)ptr - (TLinAddr)buf); |
|
182 Mem::FillZ(buf, KBufSize); // To avoid leaving a bunch of addresses kicking around on the stack to be misinterpretted later on. |
|
183 } |
|
184 |
|
185 #ifdef _DEBUG |
|
186 if (iBreakOnAllocCount && iHelper.AllocCountForCell(aCellPtr) == iBreakOnAllocCount) |
|
187 { |
|
188 iBreakOnAllocCount = *&iBreakOnAllocCount; // Something to break on |
|
189 } |
|
190 #endif |
|
191 } |
|
192 |
|
193 EXPORT_C void RLoggingAllocator::StaticTraceAlloc(RAllocator* aAllocator, TAny* aCellPtr, TInt aRequestedSize) |
|
194 { |
|
195 RLoggingAllocator(0).TraceAlloc(aAllocator, aCellPtr, aRequestedSize); |
|
196 //if (iFlags & EOldFormatLogging) |
|
197 // { |
|
198 // TUint32 remainder[] = { aAllocator->AllocLen(aCellPtr), aRequestedSize }; |
|
199 // BTraceN(KHeapCategory, EHeapAlloc, aAllocator, aCellPtr, remainder, sizeof(remainder)); |
|
200 // } |
|
201 } |
|
202 |
|
203 EXPORT_C void RLoggingAllocator::StaticTraceFree(RAllocator* aAllocator, TAny* aCellPtr) |
|
204 { |
|
205 RLoggingAllocator(0).TraceFree(aAllocator, aCellPtr); |
|
206 //if (iFlags & EOldFormatLogging) |
|
207 // { |
|
208 // BTrace8(KHeapCategory, EHeapFree, aAllocator, aCellPtr); |
|
209 // } |
|
210 } |
|
211 |
|
212 TAny* RLoggingAllocator::ReAlloc(TAny* aPtr, TInt aSize, TInt aMode) |
|
213 { |
|
214 TAny* res = iA->ReAlloc(aPtr, aSize, aMode); |
|
215 if (res != aPtr) |
|
216 { |
|
217 // If the realloc has actually reallocated, we need to trace the free, to be compatible with how RHeap does it. |
|
218 if (res && aPtr) |
|
219 { |
|
220 // It is technically legal to call ReAlloc passing in a null pointer - it behaves the same as alloc |
|
221 TraceFree(iA, aPtr, ETrue); |
|
222 if (iFlags & EOldFormatLogging) |
|
223 { |
|
224 BTrace8(KHeapCategory, EHeapFree, iA, aPtr); |
|
225 } |
|
226 } |
|
227 } |
|
228 TraceRealloc(res, aPtr, aSize); |
|
229 if (res && (iFlags & EOldFormatLogging)) |
|
230 { |
|
231 TUint32 remainder [] = { AllocLen(res), aSize, (TUint32)aPtr }; |
|
232 BTraceN(KHeapCategory, EHeapReAlloc, iA, res, remainder, sizeof(remainder)); |
|
233 } |
|
234 return res; |
|
235 } |
|
236 |
|
237 TInt RLoggingAllocator::AllocLen(const TAny* aCell) const |
|
238 { |
|
239 return iA->AllocLen(aCell); |
|
240 } |
|
241 |
|
242 TInt RLoggingAllocator::Compress() |
|
243 { |
|
244 return iA->Compress(); |
|
245 } |
|
246 |
|
247 void RLoggingAllocator::Reset() |
|
248 { |
|
249 iA->Reset(); |
|
250 } |
|
251 |
|
252 TInt RLoggingAllocator::AllocSize(TInt& aTotalAllocSize) const |
|
253 { |
|
254 return iA->AllocSize(aTotalAllocSize); |
|
255 } |
|
256 |
|
257 TInt RLoggingAllocator::Available(TInt& aBiggestBlock) const |
|
258 { |
|
259 return iA->Available(aBiggestBlock); |
|
260 } |
|
261 |
|
262 |
|
263 TInt RLoggingAllocator::DebugFunction(TInt aFunc, TAny* a1, TAny* a2) |
|
264 { |
|
265 LOG("LA: DebugFunction %d", aFunc); |
|
266 if (aFunc == RAllocator::EMarkEnd) |
|
267 { |
|
268 // First we need to work out the nesting level of the underlying heap, even though we don't have a pointer |
|
269 // to said underlying heap. So allocate a cell, and pull the nest level out from there. |
|
270 // If that returns an error, it's not a udeb heap so we don't continue |
|
271 TAny* testAlloc = iA->Alloc(4); |
|
272 if (testAlloc) |
|
273 { |
|
274 TInt nestingLevel; |
|
275 TInt err = iHelper.GetCellNestingLevel(testAlloc, nestingLevel); |
|
276 iA->Free(testAlloc); |
|
277 if (!err) |
|
278 { |
|
279 LOG("LA: Doing walk, nestinglevel = %d", nestingLevel); |
|
280 iHelper.Walk(&HeavenWalk, (TAny*)nestingLevel); |
|
281 // It doesn't actually matter what Walk returns, either the data got written or it didn't |
|
282 } |
|
283 } |
|
284 |
|
285 // Actually do the real markEnd that does more than just output the allocHeavens |
|
286 return iA->DebugFunction(aFunc, a1, a2); |
|
287 } |
|
288 #ifndef __KERNEL_MODE__ |
|
289 else if (aFunc == KUidUninstallLoggingAllocator) |
|
290 { |
|
291 RAllocator* current = &User::Allocator(); |
|
292 if (current == this) |
|
293 { |
|
294 User::SwitchAllocator(iA); |
|
295 Destroy(); |
|
296 return KErrNone; |
|
297 } |
|
298 else |
|
299 { |
|
300 return KErrNotFound; |
|
301 } |
|
302 } |
|
303 #endif |
|
304 else if (aFunc == KTempDisableLogging) |
|
305 { |
|
306 RAllocator** result = (RAllocator**)a1; |
|
307 *result = &User::Allocator(); |
|
308 User::SwitchAllocator(iA); |
|
309 return KErrNone; |
|
310 } |
|
311 else |
|
312 { |
|
313 return iA->DebugFunction(aFunc, a1, a2); |
|
314 } |
|
315 } |
|
316 |
|
317 TBool RLoggingAllocator::TraceExistingAllocs(TAny* aContext, RAllocatorHelper::TCellType aType, TLinAddr aCell, TInt aLen) |
|
318 { |
|
319 RLoggingAllocator* self = (RLoggingAllocator*)aContext; |
|
320 if (aType == RAllocatorHelper::EAllocation) |
|
321 { |
|
322 TAny* cellPtr = (TAny*)aCell; |
|
323 if (self->iFlags & EOldFormatLogging) |
|
324 { |
|
325 TUint32 remainder[] = { aLen, aLen }; |
|
326 BTraceN(KHeapCategory, EHeapAlloc, self->iA, cellPtr, remainder, sizeof(remainder)); |
|
327 } |
|
328 else |
|
329 { |
|
330 TUint32 remainder[] = { aLen, aLen, RThread().Id(), self->iPid }; |
|
331 TUint8 subcat = EHeapExtendedAlloc; |
|
332 BTraceN(KHeapCategory, subcat, self->iA, cellPtr, remainder, sizeof(remainder)); |
|
333 } |
|
334 } |
|
335 return ETrue; // Keep going |
|
336 } |
|
337 |
|
338 EXPORT_C TInt RLoggingAllocator::Install() |
|
339 { |
|
340 return Install(0); |
|
341 } |
|
342 |
|
343 EXPORT_C TInt RLoggingAllocator::Install(TUint aFlags) |
|
344 { |
|
345 RLoggingAllocator* dontCareResult = NULL; |
|
346 return New(aFlags, NULL, dontCareResult); |
|
347 } |
|
348 |
|
349 EXPORT_C TInt RLoggingAllocator::New(TUint aFlags, RAllocator* aOrigAllocator, RLoggingAllocator*& aResult) |
|
350 { |
|
351 LOG("RLoggingAllocator::Install %x", aFlags); |
|
352 RLoggingAllocator* a = new RLoggingAllocator(aFlags); |
|
353 if (!a) return KErrNoMemory; |
|
354 |
|
355 TInt err = a->iLib.Load(_L("LoggingAllocator")); |
|
356 if (err) |
|
357 { |
|
358 LOG("LA: Failed to RLibrary load ourselves"); |
|
359 a->Destroy(); |
|
360 return err; |
|
361 } |
|
362 |
|
363 err = a->iLock.CreateLocal(); |
|
364 if (err) |
|
365 { |
|
366 LOG("LA: Failed to create lock"); |
|
367 a->Destroy(); |
|
368 return err; |
|
369 } |
|
370 if (aFlags & EDeferFree) |
|
371 { |
|
372 err = a->iDeferredFrees.Reserve(KMaxDeferredFrees); |
|
373 if (err) |
|
374 { |
|
375 LOG("LA: No mem to reserve deferred free list"); |
|
376 a->Destroy(); |
|
377 return err; |
|
378 } |
|
379 } |
|
380 |
|
381 // Do this *before* switching the allocator, in case we're using atrace which means it will have to alloc for the first trace |
|
382 BTrace12(KHeapCategory, (TUint)ELoggingAllocatorInstalled, aOrigAllocator ? aOrigAllocator : &User::Allocator(), TUint(RThread().Id()), a->iPid); |
|
383 |
|
384 // Ditto |
|
385 err = a->iHelper.Open(aOrigAllocator ? aOrigAllocator : &User::Allocator()); |
|
386 LOG("LA: RAllocatorHelper Open returned %d", err); |
|
387 |
|
388 RAllocator* old = aOrigAllocator; |
|
389 if (old == NULL) |
|
390 { |
|
391 // If the caller passed in an allocator, we shouldn't switch the default allocator |
|
392 old = User::SwitchAllocator(a); |
|
393 } |
|
394 a->iA = old; |
|
395 |
|
396 if (!err) |
|
397 { |
|
398 //err = a->DebugFunction(RHeap::EWalk, (TAny*)&TraceExistingAllocs, a); |
|
399 err = a->iHelper.Walk(TraceExistingAllocs, a); |
|
400 } |
|
401 LOG("LA: HeapWalk returned %d", err); |
|
402 if (err == KErrNone) |
|
403 { |
|
404 BTrace12(KHeapCategory, (TUint)EHeapPrimingFinished, a->iA, TUint(RThread().Id()), a->iPid); |
|
405 } |
|
406 a->iHelper.SetCellNestingLevel(a, -1); // This is so we are immune from being included in any leak detection (because we have to leak the allocator in order that that it is still in use when the markend happens) |
|
407 aResult = a; |
|
408 return KErrNone; |
|
409 } |
|
410 |
|
411 EXPORT_C TInt RLoggingAllocator::Uninstall() |
|
412 { |
|
413 return User::Allocator().DebugFunction(KUidUninstallLoggingAllocator, NULL, NULL); |
|
414 } |
|
415 |
|
416 void TraceAllocHeaven(TAny* aCell, TInt aLen) |
|
417 { |
|
418 TUint threadId = (TUint)RThread().Id().Id(); |
|
419 BTrace12(BTrace::ETest1, EAllocHeaven, threadId, aCell, aLen); |
|
420 BTraceN(BTrace::ETest1, EAllocHeavenData, threadId, aCell, aCell, aLen); |
|
421 } |
|
422 |
|
423 TBool RLoggingAllocator::HeavenWalk(RAllocatorHelper& aHelper, TAny* aPtr, RAllocatorHelper::TCellType aType, TLinAddr aCell, TInt aLen) |
|
424 { |
|
425 // If iA wasn't an RHeap (and more specifically, a UDEB RHeap) then we ensure this function does not get called. |
|
426 // Therefore we can safely assume that it is |
|
427 |
|
428 // This function taken from RHeap::WalkCheckCell |
|
429 |
|
430 TInt nestingLevel = reinterpret_cast<TInt>(aPtr); |
|
431 |
|
432 switch(aType) |
|
433 { |
|
434 case RHeap::EGoodAllocatedCell: |
|
435 { |
|
436 TInt cellLevel; |
|
437 if (aHelper.GetCellNestingLevel((TAny*)aCell, cellLevel) == KErrNone && cellLevel == nestingLevel) |
|
438 { |
|
439 TraceAllocHeaven((TAny*)aCell, aLen); |
|
440 } |
|
441 break; |
|
442 } |
|
443 default: |
|
444 break; |
|
445 } |
|
446 return ETrue; |
|
447 } |
|
448 |
|
449 _LIT(KPan, "LoggingAllocator"); |
|
450 |
|
451 void RLoggingAllocator::CheckDeferredFrees() |
|
452 { |
|
453 if ((iFlags & EDeferFreeAndCheckScribbles) != EDeferFreeAndCheckScribbles) return; // Both bits of EDeferFreeAndCheckScribbles must be set for us to be able to check |
|
454 |
|
455 iLock.Wait(); |
|
456 const TInt n = iDeferredFrees.Count(); |
|
457 for (TInt i = 0; i < n; i++) |
|
458 { |
|
459 const TUint* ptr = (const TUint*)iDeferredFrees[i].iPtr; |
|
460 const TUint* end = (const TUint*)((TLinAddr)ptr + Min(16, iDeferredFrees[i].iLen)); // Check the first few words |
|
461 if (iA->AllocLen(ptr) != iDeferredFrees[i].iLen) |
|
462 { |
|
463 // Then the length of the cell has been corrupted |
|
464 iLock.Signal(); |
|
465 User::Panic(KPan, (TUint)ptr); |
|
466 } |
|
467 while (ptr != end) |
|
468 { |
|
469 //RDebug::Printf("Checking %x", ptr); |
|
470 if (*ptr != 0xDFDFDFDF) |
|
471 { |
|
472 // Someone is using this cell after it's been 'deleted' |
|
473 //TODO btrace this |
|
474 iLock.Signal(); |
|
475 User::Panic(KPan, (TUint)iDeferredFrees[i].iPtr); |
|
476 } |
|
477 ptr++; |
|
478 } |
|
479 } |
|
480 iLock.Signal(); |
|
481 } |
|
482 |
|
483 RLoggingAllocator::~RLoggingAllocator() |
|
484 { |
|
485 // We must not be the current allocator when this is called! |
|
486 __ASSERT_ALWAYS(&User::Allocator() != this, User::Panic(KPan, 1)); |
|
487 iLock.Close(); |
|
488 iDeferredFrees.Close(); |
|
489 iLib.Close(); |
|
490 iHelper.Close(); |
|
491 } |
|
492 |
|
493 void RLoggingAllocator::Destroy() |
|
494 { |
|
495 // Keep the lib around until after the destructor has finished, otherwise you risk the DLL being unloaded midway through the destructor chain |
|
496 RLibrary lib = iLib; |
|
497 iLib.SetHandle(0); |
|
498 delete this; |
|
499 lib.Close(); |
|
500 } |
|
501 |
|
502 inline void RLoggingAllocator::TraceFree(RAllocator* aAllocator, TAny* aCellPtr, TBool aCellIsAlreadyFreed) |
|
503 { |
|
504 DoTraceAllocEvent(aAllocator, aCellPtr, EHeapFree, aCellIsAlreadyFreed ? -1 : 0); |
|
505 } |
|
506 |
|
507 inline void RLoggingAllocator::TraceAlloc(RAllocator* aAllocator, TAny* aCellPtr, TInt aRequestedSize) |
|
508 { |
|
509 DoTraceAllocEvent(aAllocator, aCellPtr, EHeapAlloc, aRequestedSize); |
|
510 } |
|
511 |
|
512 inline void RLoggingAllocator::TraceRealloc(TAny *aNewPtr, TAny* aOldPtr, TInt aNewRequestedSize) |
|
513 { |
|
514 DoTraceAllocEvent(iA, aNewPtr, EHeapReAlloc, aNewRequestedSize, aOldPtr); |
|
515 } |