author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Fri, 14 May 2010 16:40:13 +0300 | |
changeset 22 | 79de32ba3296 |
parent 3 | 41300fa6a67c |
child 30 | 5dc02b23752f |
permissions | -rw-r--r-- |
0 | 1 |
/* |
2 |
* Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved. |
|
3 |
* Copyright (C) 2007 Eric Seidel <eric@webkit.org> |
|
4 |
* |
|
5 |
* This library is free software; you can redistribute it and/or |
|
6 |
* modify it under the terms of the GNU Lesser General Public |
|
7 |
* License as published by the Free Software Foundation; either |
|
8 |
* version 2 of the License, or (at your option) any later version. |
|
9 |
* |
|
10 |
* This library is distributed in the hope that it will be useful, |
|
11 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
13 |
* Lesser General Public License for more details. |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU Lesser General Public |
|
16 |
* License along with this library; if not, write to the Free Software |
|
17 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
18 |
* |
|
19 |
*/ |
|
20 |
||
21 |
#include "config.h" |
|
22 |
#include "Collector.h" |
|
23 |
||
24 |
#include "ArgList.h" |
|
25 |
#include "CallFrame.h" |
|
26 |
#include "CodeBlock.h" |
|
27 |
#include "CollectorHeapIterator.h" |
|
28 |
#include "Interpreter.h" |
|
29 |
#include "JSArray.h" |
|
30 |
#include "JSGlobalObject.h" |
|
31 |
#include "JSLock.h" |
|
32 |
#include "JSONObject.h" |
|
33 |
#include "JSString.h" |
|
34 |
#include "JSValue.h" |
|
35 |
#include "MarkStack.h" |
|
36 |
#include "Nodes.h" |
|
37 |
#include "Tracing.h" |
|
38 |
#include <algorithm> |
|
39 |
#include <limits.h> |
|
40 |
#include <setjmp.h> |
|
41 |
#include <stdlib.h> |
|
42 |
#include <wtf/FastMalloc.h> |
|
43 |
#include <wtf/HashCountedSet.h> |
|
44 |
#include <wtf/UnusedParam.h> |
|
45 |
#include <wtf/VMTags.h> |
|
46 |
||
47 |
#if PLATFORM(DARWIN) |
|
48 |
||
49 |
#include <mach/mach_init.h> |
|
50 |
#include <mach/mach_port.h> |
|
51 |
#include <mach/task.h> |
|
52 |
#include <mach/thread_act.h> |
|
53 |
#include <mach/vm_map.h> |
|
54 |
||
55 |
#elif PLATFORM(WIN_OS) |
|
56 |
||
57 |
#include <windows.h> |
|
58 |
#include <malloc.h> |
|
59 |
||
60 |
#elif PLATFORM(HAIKU) |
|
61 |
||
62 |
#include <OS.h> |
|
63 |
||
64 |
#elif PLATFORM(UNIX) |
|
65 |
||
66 |
#include <stdlib.h> |
|
67 |
#if !PLATFORM(HAIKU) |
|
68 |
#include <sys/mman.h> |
|
69 |
#endif |
|
70 |
#include <unistd.h> |
|
71 |
||
72 |
#if defined(QT_LINUXBASE) |
|
73 |
#include <dlfcn.h> |
|
74 |
#endif |
|
75 |
||
76 |
#if defined(__UCLIBC__) |
|
77 |
// versions of uClibc 0.9.28 and below do not have |
|
78 |
// pthread_getattr_np or pthread_attr_getstack. |
|
79 |
#if __UCLIBC_MAJOR__ == 0 && \ |
|
80 |
(__UCLIBC_MINOR__ < 9 || \ |
|
81 |
(__UCLIBC_MINOR__ == 9 && __UCLIBC_SUBLEVEL__ <= 30)) |
|
82 |
#define UCLIBC_USE_PROC_SELF_MAPS 1 |
|
83 |
#include <stdio_ext.h> |
|
84 |
extern int *__libc_stack_end; |
|
85 |
#endif |
|
86 |
#endif |
|
87 |
||
88 |
#if PLATFORM(SOLARIS) |
|
89 |
#include <thread.h> |
|
90 |
#else |
|
91 |
#include <pthread.h> |
|
92 |
#endif |
|
93 |
||
94 |
#if HAVE(PTHREAD_NP_H) |
|
95 |
#include <pthread_np.h> |
|
96 |
#endif |
|
97 |
||
98 |
#if PLATFORM(QNX) |
|
99 |
#include <fcntl.h> |
|
100 |
#include <sys/procfs.h> |
|
101 |
#include <stdio.h> |
|
102 |
#include <errno.h> |
|
103 |
#endif |
|
104 |
||
105 |
#endif |
|
106 |
||
107 |
#define COLLECT_ON_EVERY_ALLOCATION 0 |
|
108 |
||
109 |
using std::max; |
|
110 |
||
111 |
namespace JSC { |
|
112 |
||
113 |
// tunable parameters |
|
114 |
||
115 |
const size_t GROWTH_FACTOR = 2; |
|
116 |
const size_t LOW_WATER_FACTOR = 4; |
|
117 |
const size_t ALLOCATIONS_PER_COLLECTION = 4000; |
|
118 |
// This value has to be a macro to be used in max() without introducing |
|
119 |
// a PIC branch in Mach-O binaries, see <rdar://problem/5971391>. |
|
120 |
#define MIN_ARRAY_SIZE (static_cast<size_t>(14)) |
|
121 |
||
122 |
#if ENABLE(JSC_MULTIPLE_THREADS) |
|
123 |
||
124 |
#if PLATFORM(DARWIN) |
|
125 |
typedef mach_port_t PlatformThread; |
|
126 |
#elif PLATFORM(WIN_OS) |
|
127 |
struct PlatformThread { |
|
128 |
PlatformThread(DWORD _id, HANDLE _handle) : id(_id), handle(_handle) {} |
|
129 |
DWORD id; |
|
130 |
HANDLE handle; |
|
131 |
}; |
|
132 |
#endif |
|
133 |
||
134 |
class Heap::Thread { |
|
135 |
public: |
|
136 |
Thread(pthread_t pthread, const PlatformThread& platThread, void* base) |
|
137 |
: posixThread(pthread) |
|
138 |
, platformThread(platThread) |
|
139 |
, stackBase(base) |
|
140 |
{ |
|
141 |
} |
|
142 |
||
143 |
Thread* next; |
|
144 |
pthread_t posixThread; |
|
145 |
PlatformThread platformThread; |
|
146 |
void* stackBase; |
|
147 |
}; |
|
148 |
||
149 |
#endif |
|
150 |
||
151 |
Heap::Heap(JSGlobalData* globalData) |
|
152 |
: m_markListSet(0) |
|
153 |
#if ENABLE(JSC_MULTIPLE_THREADS) |
|
154 |
, m_registeredThreads(0) |
|
155 |
, m_currentThreadRegistrar(0) |
|
156 |
#endif |
|
157 |
, m_globalData(globalData) |
|
22
79de32ba3296
Revision: 201017
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
158 |
#if PLATFORM(SYMBIAN) |
79de32ba3296
Revision: 201017
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
159 |
, m_blockallocator(JSCCOLLECTOR_VIRTUALMEM_RESERVATION, BLOCK_SIZE) |
79de32ba3296
Revision: 201017
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
160 |
#endif |
0 | 161 |
{ |
162 |
ASSERT(globalData); |
|
163 |
memset(&primaryHeap, 0, sizeof(CollectorHeap)); |
|
164 |
memset(&numberHeap, 0, sizeof(CollectorHeap)); |
|
165 |
} |
|
166 |
||
167 |
Heap::~Heap() |
|
168 |
{ |
|
169 |
// The destroy function must already have been called, so assert this. |
|
170 |
ASSERT(!m_globalData); |
|
171 |
} |
|
172 |
||
173 |
void Heap::destroy() |
|
174 |
{ |
|
175 |
JSLock lock(SilenceAssertionsOnly); |
|
176 |
||
177 |
if (!m_globalData) |
|
178 |
return; |
|
179 |
||
180 |
// The global object is not GC protected at this point, so sweeping may delete it |
|
181 |
// (and thus the global data) before other objects that may use the global data. |
|
182 |
RefPtr<JSGlobalData> protect(m_globalData); |
|
183 |
||
184 |
delete m_markListSet; |
|
185 |
m_markListSet = 0; |
|
186 |
||
187 |
sweep<PrimaryHeap>(); |
|
188 |
// No need to sweep number heap, because the JSNumber destructor doesn't do anything. |
|
189 |
||
190 |
ASSERT(!primaryHeap.numLiveObjects); |
|
191 |
||
192 |
freeBlocks(&primaryHeap); |
|
193 |
freeBlocks(&numberHeap); |
|
194 |
||
195 |
#if ENABLE(JSC_MULTIPLE_THREADS) |
|
196 |
if (m_currentThreadRegistrar) { |
|
197 |
int error = pthread_key_delete(m_currentThreadRegistrar); |
|
198 |
ASSERT_UNUSED(error, !error); |
|
199 |
} |
|
200 |
||
201 |
MutexLocker registeredThreadsLock(m_registeredThreadsMutex); |
|
202 |
for (Heap::Thread* t = m_registeredThreads; t;) { |
|
203 |
Heap::Thread* next = t->next; |
|
204 |
delete t; |
|
205 |
t = next; |
|
206 |
} |
|
207 |
#endif |
|
22
79de32ba3296
Revision: 201017
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
208 |
#if PLATFORM(SYMBIAN) |
79de32ba3296
Revision: 201017
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
209 |
m_blockallocator.destroy(); |
79de32ba3296
Revision: 201017
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
210 |
#endif |
0 | 211 |
m_globalData = 0; |
212 |
} |
|
213 |
||
214 |
template <HeapType heapType> |
|
215 |
NEVER_INLINE CollectorBlock* Heap::allocateBlock() |
|
216 |
{ |
|
217 |
// Disable the use of vm_map for the Qt build on Darwin, because when compiled on 10.4 |
|
218 |
// it crashes on 10.5 |
|
219 |
#if PLATFORM(DARWIN) && !PLATFORM(QT) |
|
220 |
vm_address_t address = 0; |
|
221 |
// FIXME: tag the region as a JavaScriptCore heap when we get a registered VM tag: <rdar://problem/6054788>. |
|
222 |
vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); |
|
223 |
#elif PLATFORM(SYMBIAN) |
|
22
79de32ba3296
Revision: 201017
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
224 |
void* address = m_blockallocator.alloc(); |
79de32ba3296
Revision: 201017
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
225 |
if (!address) |
0 | 226 |
CRASH(); |
227 |
memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE); |
|
228 |
#elif PLATFORM(WINCE) |
|
229 |
void* address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); |
|
230 |
#elif PLATFORM(WIN_OS) |
|
231 |
#if COMPILER(MINGW) |
|
232 |
void* address = __mingw_aligned_malloc(BLOCK_SIZE, BLOCK_SIZE); |
|
233 |
#else |
|
234 |
void* address = _aligned_malloc(BLOCK_SIZE, BLOCK_SIZE); |
|
235 |
#endif |
|
236 |
memset(address, 0, BLOCK_SIZE); |
|
237 |
#elif HAVE(POSIX_MEMALIGN) |
|
238 |
void* address; |
|
239 |
posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE); |
|
240 |
memset(address, 0, BLOCK_SIZE); |
|
241 |
#else |
|
242 |
||
243 |
#if ENABLE(JSC_MULTIPLE_THREADS) |
|
244 |
#error Need to initialize pagesize safely. |
|
245 |
#endif |
|
246 |
static size_t pagesize = getpagesize(); |
|
247 |
||
248 |
size_t extra = 0; |
|
249 |
if (BLOCK_SIZE > pagesize) |
|
250 |
extra = BLOCK_SIZE - pagesize; |
|
251 |
||
252 |
void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); |
|
253 |
uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult); |
|
254 |
||
255 |
size_t adjust = 0; |
|
256 |
if ((address & BLOCK_OFFSET_MASK) != 0) |
|
257 |
adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK); |
|
258 |
||
259 |
if (adjust > 0) |
|
260 |
munmap(reinterpret_cast<char*>(address), adjust); |
|
261 |
||
262 |
if (adjust < extra) |
|
263 |
munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust); |
|
264 |
||
265 |
address += adjust; |
|
266 |
memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE); |
|
267 |
#endif |
|
268 |
||
269 |
CollectorBlock* block = reinterpret_cast<CollectorBlock*>(address); |
|
270 |
block->freeList = block->cells; |
|
271 |
block->heap = this; |
|
272 |
block->type = heapType; |
|
273 |
||
274 |
CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap; |
|
275 |
size_t numBlocks = heap.numBlocks; |
|
276 |
if (heap.usedBlocks == numBlocks) { |
|
277 |
static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR; |
|
278 |
if (numBlocks > maxNumBlocks) |
|
279 |
CRASH(); |
|
280 |
numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR); |
|
281 |
heap.numBlocks = numBlocks; |
|
282 |
heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock*))); |
|
283 |
} |
|
284 |
heap.blocks[heap.usedBlocks++] = block; |
|
285 |
||
286 |
return block; |
|
287 |
} |
|
288 |
||
289 |
template <HeapType heapType> |
|
290 |
NEVER_INLINE void Heap::freeBlock(size_t block) |
|
291 |
{ |
|
292 |
CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap; |
|
293 |
||
294 |
freeBlock(heap.blocks[block]); |
|
295 |
||
296 |
// swap with the last block so we compact as we go |
|
297 |
heap.blocks[block] = heap.blocks[heap.usedBlocks - 1]; |
|
298 |
heap.usedBlocks--; |
|
299 |
||
300 |
if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) { |
|
301 |
heap.numBlocks = heap.numBlocks / GROWTH_FACTOR; |
|
302 |
heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock*))); |
|
303 |
} |
|
304 |
} |
|
305 |
||
306 |
NEVER_INLINE void Heap::freeBlock(CollectorBlock* block) |
|
307 |
{ |
|
308 |
// Disable the use of vm_deallocate for the Qt build on Darwin, because when compiled on 10.4 |
|
309 |
// it crashes on 10.5 |
|
310 |
#if PLATFORM(DARWIN) && !PLATFORM(QT) |
|
311 |
vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE); |
|
312 |
#elif PLATFORM(SYMBIAN) |
|
22
79de32ba3296
Revision: 201017
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
313 |
m_blockallocator.free(reinterpret_cast<void*>(block)); |
0 | 314 |
#elif PLATFORM(WINCE) |
315 |
VirtualFree(block, 0, MEM_RELEASE); |
|
316 |
#elif PLATFORM(WIN_OS) |
|
317 |
#if COMPILER(MINGW) |
|
318 |
__mingw_aligned_free(block); |
|
319 |
#else |
|
320 |
_aligned_free(block); |
|
321 |
#endif |
|
322 |
#elif HAVE(POSIX_MEMALIGN) |
|
323 |
free(block); |
|
324 |
#else |
|
325 |
munmap(reinterpret_cast<char*>(block), BLOCK_SIZE); |
|
326 |
#endif |
|
327 |
} |
|
328 |
||
329 |
void Heap::freeBlocks(CollectorHeap* heap) |
|
330 |
{ |
|
331 |
for (size_t i = 0; i < heap->usedBlocks; ++i) |
|
332 |
if (heap->blocks[i]) |
|
333 |
freeBlock(heap->blocks[i]); |
|
334 |
fastFree(heap->blocks); |
|
335 |
memset(heap, 0, sizeof(CollectorHeap)); |
|
336 |
} |
|
337 |
||
338 |
void Heap::recordExtraCost(size_t cost) |
|
339 |
{ |
|
340 |
// Our frequency of garbage collection tries to balance memory use against speed |
|
341 |
// by collecting based on the number of newly created values. However, for values |
|
342 |
// that hold on to a great deal of memory that's not in the form of other JS values, |
|
343 |
// that is not good enough - in some cases a lot of those objects can pile up and |
|
344 |
// use crazy amounts of memory without a GC happening. So we track these extra |
|
345 |
// memory costs. Only unusually large objects are noted, and we only keep track |
|
346 |
// of this extra cost until the next GC. In garbage collected languages, most values |
|
347 |
// are either very short lived temporaries, or have extremely long lifetimes. So |
|
348 |
// if a large value survives one garbage collection, there is not much point to |
|
349 |
// collecting more frequently as long as it stays alive. |
|
350 |
// NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost |
|
351 |
||
352 |
primaryHeap.extraCost += cost; |
|
353 |
} |
|
354 |
||
355 |
template <HeapType heapType> ALWAYS_INLINE void* Heap::heapAllocate(size_t s) |
|
356 |
{ |
|
357 |
typedef typename HeapConstants<heapType>::Block Block; |
|
358 |
typedef typename HeapConstants<heapType>::Cell Cell; |
|
359 |
||
360 |
CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap; |
|
361 |
ASSERT(JSLock::lockCount() > 0); |
|
362 |
ASSERT(JSLock::currentThreadIsHoldingLock()); |
|
363 |
ASSERT_UNUSED(s, s <= HeapConstants<heapType>::cellSize); |
|
364 |
||
365 |
ASSERT(heap.operationInProgress == NoOperation); |
|
366 |
ASSERT(heapType == PrimaryHeap || heap.extraCost == 0); |
|
367 |
// FIXME: If another global variable access here doesn't hurt performance |
|
368 |
// too much, we could CRASH() in NDEBUG builds, which could help ensure we |
|
369 |
// don't spend any time debugging cases where we allocate inside an object's |
|
370 |
// deallocation code. |
|
371 |
||
372 |
#if COLLECT_ON_EVERY_ALLOCATION |
|
373 |
collect(); |
|
374 |
#endif |
|
375 |
||
376 |
size_t numLiveObjects = heap.numLiveObjects; |
|
377 |
size_t usedBlocks = heap.usedBlocks; |
|
378 |
size_t i = heap.firstBlockWithPossibleSpace; |
|
379 |
||
380 |
// if we have a huge amount of extra cost, we'll try to collect even if we still have |
|
381 |
// free cells left. |
|
382 |
if (heapType == PrimaryHeap && heap.extraCost > ALLOCATIONS_PER_COLLECTION) { |
|
383 |
size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect; |
|
384 |
size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect; |
|
385 |
const size_t newCost = numNewObjects + heap.extraCost; |
|
386 |
if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) |
|
387 |
goto collect; |
|
388 |
} |
|
389 |
||
390 |
ASSERT(heap.operationInProgress == NoOperation); |
|
391 |
#ifndef NDEBUG |
|
392 |
// FIXME: Consider doing this in NDEBUG builds too (see comment above). |
|
393 |
heap.operationInProgress = Allocation; |
|
394 |
#endif |
|
395 |
||
396 |
scan: |
|
397 |
Block* targetBlock; |
|
398 |
size_t targetBlockUsedCells; |
|
399 |
if (i != usedBlocks) { |
|
400 |
targetBlock = reinterpret_cast<Block*>(heap.blocks[i]); |
|
401 |
targetBlockUsedCells = targetBlock->usedCells; |
|
402 |
ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock); |
|
403 |
while (targetBlockUsedCells == HeapConstants<heapType>::cellsPerBlock) { |
|
404 |
if (++i == usedBlocks) |
|
405 |
goto collect; |
|
406 |
targetBlock = reinterpret_cast<Block*>(heap.blocks[i]); |
|
407 |
targetBlockUsedCells = targetBlock->usedCells; |
|
408 |
ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock); |
|
409 |
} |
|
410 |
heap.firstBlockWithPossibleSpace = i; |
|
411 |
} else { |
|
412 |
||
413 |
collect: |
|
414 |
size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect; |
|
415 |
size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect; |
|
416 |
const size_t newCost = numNewObjects + heap.extraCost; |
|
417 |
||
418 |
if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) { |
|
419 |
#ifndef NDEBUG |
|
420 |
heap.operationInProgress = NoOperation; |
|
421 |
#endif |
|
422 |
bool foundGarbage = collect(); |
|
423 |
numLiveObjects = heap.numLiveObjects; |
|
424 |
usedBlocks = heap.usedBlocks; |
|
425 |
i = heap.firstBlockWithPossibleSpace; |
|
426 |
#ifndef NDEBUG |
|
427 |
heap.operationInProgress = Allocation; |
|
428 |
#endif |
|
429 |
if (foundGarbage) |
|
430 |
goto scan; |
|
431 |
} |
|
432 |
||
433 |
// didn't find a block, and GC didn't reclaim anything, need to allocate a new block |
|
434 |
targetBlock = reinterpret_cast<Block*>(allocateBlock<heapType>()); |
|
435 |
heap.firstBlockWithPossibleSpace = heap.usedBlocks - 1; |
|
436 |
targetBlockUsedCells = 0; |
|
437 |
} |
|
438 |
||
439 |
// find a free spot in the block and detach it from the free list |
|
440 |
Cell* newCell = targetBlock->freeList; |
|
441 |
||
442 |
// "next" field is a cell offset -- 0 means next cell, so a zeroed block is already initialized |
|
443 |
targetBlock->freeList = (newCell + 1) + newCell->u.freeCell.next; |
|
444 |
||
445 |
targetBlock->usedCells = static_cast<uint32_t>(targetBlockUsedCells + 1); |
|
446 |
heap.numLiveObjects = numLiveObjects + 1; |
|
447 |
||
448 |
#ifndef NDEBUG |
|
449 |
// FIXME: Consider doing this in NDEBUG builds too (see comment above). |
|
450 |
heap.operationInProgress = NoOperation; |
|
451 |
#endif |
|
452 |
||
453 |
return newCell; |
|
454 |
} |
|
455 |
||
456 |
void* Heap::allocate(size_t s) |
|
457 |
{ |
|
458 |
return heapAllocate<PrimaryHeap>(s); |
|
459 |
} |
|
460 |
||
461 |
void* Heap::allocateNumber(size_t s) |
|
462 |
{ |
|
463 |
return heapAllocate<NumberHeap>(s); |
|
464 |
} |
|
465 |
||
466 |
#if PLATFORM(WINCE) |
|
467 |
void* g_stackBase = 0; |
|
468 |
||
469 |
inline bool isPageWritable(void* page) |
|
470 |
{ |
|
471 |
MEMORY_BASIC_INFORMATION memoryInformation; |
|
472 |
DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation)); |
|
473 |
||
474 |
// return false on error, including ptr outside memory |
|
475 |
if (result != sizeof(memoryInformation)) |
|
476 |
return false; |
|
477 |
||
478 |
DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE); |
|
479 |
return protect == PAGE_READWRITE |
|
480 |
|| protect == PAGE_WRITECOPY |
|
481 |
|| protect == PAGE_EXECUTE_READWRITE |
|
482 |
|| protect == PAGE_EXECUTE_WRITECOPY; |
|
483 |
} |
|
484 |
||
485 |
static void* getStackBase(void* previousFrame) |
|
486 |
{ |
|
487 |
// find the address of this stack frame by taking the address of a local variable |
|
488 |
bool isGrowingDownward; |
|
489 |
void* thisFrame = (void*)(&isGrowingDownward); |
|
490 |
||
491 |
isGrowingDownward = previousFrame < &thisFrame; |
|
492 |
static DWORD pageSize = 0; |
|
493 |
if (!pageSize) { |
|
494 |
SYSTEM_INFO systemInfo; |
|
495 |
GetSystemInfo(&systemInfo); |
|
496 |
pageSize = systemInfo.dwPageSize; |
|
497 |
} |
|
498 |
||
499 |
// scan all of memory starting from this frame, and return the last writeable page found |
|
500 |
register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - 1)); |
|
501 |
if (isGrowingDownward) { |
|
502 |
while (currentPage > 0) { |
|
503 |
// check for underflow |
|
504 |
if (currentPage >= (char*)pageSize) |
|
505 |
currentPage -= pageSize; |
|
506 |
else |
|
507 |
currentPage = 0; |
|
508 |
if (!isPageWritable(currentPage)) |
|
509 |
return currentPage + pageSize; |
|
510 |
} |
|
511 |
return 0; |
|
512 |
} else { |
|
513 |
while (true) { |
|
514 |
// guaranteed to complete because isPageWritable returns false at end of memory |
|
515 |
currentPage += pageSize; |
|
516 |
if (!isPageWritable(currentPage)) |
|
517 |
return currentPage; |
|
518 |
} |
|
519 |
} |
|
520 |
} |
|
521 |
#endif |
|
522 |
||
523 |
#if PLATFORM(QNX) |
|
524 |
static inline void *currentThreadStackBaseQNX() |
|
525 |
{ |
|
526 |
static void* stackBase = 0; |
|
527 |
static size_t stackSize = 0; |
|
528 |
static pthread_t stackThread; |
|
529 |
pthread_t thread = pthread_self(); |
|
530 |
if (stackBase == 0 || thread != stackThread) { |
|
531 |
struct _debug_thread_info threadInfo; |
|
532 |
memset(&threadInfo, 0, sizeof(threadInfo)); |
|
533 |
threadInfo.tid = pthread_self(); |
|
534 |
int fd = open("/proc/self", O_RDONLY); |
|
535 |
if (fd == -1) { |
|
536 |
LOG_ERROR("Unable to open /proc/self (errno: %d)", errno); |
|
537 |
return 0; |
|
538 |
} |
|
539 |
devctl(fd, DCMD_PROC_TIDSTATUS, &threadInfo, sizeof(threadInfo), 0); |
|
540 |
close(fd); |
|
541 |
stackBase = reinterpret_cast<void*>(threadInfo.stkbase); |
|
542 |
stackSize = threadInfo.stksize; |
|
543 |
ASSERT(stackBase); |
|
544 |
stackThread = thread; |
|
545 |
} |
|
546 |
return static_cast<char*>(stackBase) + stackSize; |
|
547 |
} |
|
548 |
#endif |
|
549 |
||
550 |
static inline void* currentThreadStackBase() |
|
551 |
{ |
|
552 |
#if PLATFORM(DARWIN) |
|
553 |
pthread_t thread = pthread_self(); |
|
554 |
return pthread_get_stackaddr_np(thread); |
|
555 |
#elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(MSVC) |
|
556 |
// offset 0x18 from the FS segment register gives a pointer to |
|
557 |
// the thread information block for the current thread |
|
558 |
NT_TIB* pTib; |
|
559 |
__asm { |
|
560 |
MOV EAX, FS:[18h] |
|
561 |
MOV pTib, EAX |
|
562 |
} |
|
563 |
return static_cast<void*>(pTib->StackBase); |
|
564 |
#elif PLATFORM(WIN_OS) && PLATFORM(X86_64) && COMPILER(MSVC) |
|
565 |
PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb()); |
|
566 |
return reinterpret_cast<void*>(pTib->StackBase); |
|
567 |
#elif PLATFORM(WIN_OS) && PLATFORM(X86) && COMPILER(GCC) |
|
568 |
// offset 0x18 from the FS segment register gives a pointer to |
|
569 |
// the thread information block for the current thread |
|
570 |
NT_TIB* pTib; |
|
571 |
asm ( "movl %%fs:0x18, %0\n" |
|
572 |
: "=r" (pTib) |
|
573 |
); |
|
574 |
return static_cast<void*>(pTib->StackBase); |
|
575 |
#elif PLATFORM(QNX) |
|
576 |
return currentThreadStackBaseQNX(); |
|
577 |
#elif PLATFORM(SOLARIS) |
|
578 |
stack_t s; |
|
579 |
thr_stksegment(&s); |
|
580 |
return s.ss_sp; |
|
581 |
#elif PLATFORM(OPENBSD) |
|
582 |
pthread_t thread = pthread_self(); |
|
583 |
stack_t stack; |
|
584 |
pthread_stackseg_np(thread, &stack); |
|
585 |
return stack.ss_sp; |
|
586 |
#elif PLATFORM(SYMBIAN) |
|
587 |
static void* stackBase = 0; |
|
588 |
if (stackBase == 0) { |
|
589 |
TThreadStackInfo info; |
|
590 |
RThread thread; |
|
591 |
thread.StackInfo(info); |
|
592 |
stackBase = (void*)info.iBase; |
|
593 |
} |
|
594 |
return (void*)stackBase; |
|
595 |
#elif PLATFORM(HAIKU) |
|
596 |
thread_info threadInfo; |
|
597 |
get_thread_info(find_thread(NULL), &threadInfo); |
|
598 |
return threadInfo.stack_end; |
|
599 |
#elif PLATFORM(UNIX) |
|
600 |
#ifdef UCLIBC_USE_PROC_SELF_MAPS |
|
601 |
// Read /proc/self/maps and locate the line whose address |
|
602 |
// range contains __libc_stack_end. |
|
603 |
FILE *file = fopen("/proc/self/maps", "r"); |
|
604 |
if (!file) |
|
605 |
return 0; |
|
606 |
__fsetlocking(file, FSETLOCKING_BYCALLER); |
|
607 |
char *line = NULL; |
|
608 |
size_t lineLen = 0; |
|
609 |
while (!feof_unlocked(file)) { |
|
610 |
if (getdelim(&line, &lineLen, '\n', file) <= 0) |
|
611 |
break; |
|
612 |
||
613 |
long from; |
|
614 |
long to; |
|
615 |
if (sscanf (line, "%lx-%lx", &from, &to) != 2) |
|
616 |
continue; |
|
617 |
if (from <= (long)__libc_stack_end && (long)__libc_stack_end < to) { |
|
618 |
fclose(file); |
|
619 |
free(line); |
|
620 |
#ifdef _STACK_GROWS_UP |
|
621 |
return (void *)from; |
|
622 |
#else |
|
623 |
return (void *)to; |
|
624 |
#endif |
|
625 |
} |
|
626 |
} |
|
627 |
fclose(file); |
|
628 |
free(line); |
|
629 |
return 0; |
|
630 |
#else |
|
631 |
static void* stackBase = 0; |
|
632 |
static size_t stackSize = 0; |
|
633 |
static pthread_t stackThread; |
|
634 |
pthread_t thread = pthread_self(); |
|
635 |
if (stackBase == 0 || thread != stackThread) { |
|
636 |
#if defined(QT_LINUXBASE) |
|
637 |
// LinuxBase is missing pthread_getattr_np - resolve it once at runtime instead |
|
638 |
// see http://bugs.linuxbase.org/show_bug.cgi?id=2364 |
|
639 |
typedef int (*GetAttrPtr)(pthread_t, pthread_attr_t *); |
|
640 |
static int (*pthread_getattr_np_ptr)(pthread_t, pthread_attr_t *) = 0; |
|
641 |
if (!pthread_getattr_np_ptr) |
|
642 |
*(void **)&pthread_getattr_np_ptr = dlsym(RTLD_DEFAULT, "pthread_getattr_np"); |
|
643 |
#endif |
|
644 |
pthread_attr_t sattr; |
|
645 |
pthread_attr_init(&sattr); |
|
646 |
#if HAVE(PTHREAD_NP_H) || PLATFORM(NETBSD) |
|
647 |
// e.g. on FreeBSD 5.4, neundorf@kde.org |
|
648 |
pthread_attr_get_np(thread, &sattr); |
|
649 |
#elif defined(QT_LINUXBASE) |
|
650 |
if (pthread_getattr_np_ptr) |
|
651 |
pthread_getattr_np_ptr(thread, &sattr); |
|
652 |
#else |
|
653 |
// FIXME: this function is non-portable; other POSIX systems may have different np alternatives |
|
654 |
pthread_getattr_np(thread, &sattr); |
|
655 |
#endif |
|
656 |
int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize); |
|
657 |
(void)rc; // FIXME: Deal with error code somehow? Seems fatal. |
|
658 |
ASSERT(stackBase); |
|
659 |
pthread_attr_destroy(&sattr); |
|
660 |
stackThread = thread; |
|
661 |
} |
|
662 |
return static_cast<char*>(stackBase) + stackSize; |
|
663 |
#endif |
|
664 |
#elif PLATFORM(WINCE) |
|
665 |
if (g_stackBase) |
|
666 |
return g_stackBase; |
|
667 |
else { |
|
668 |
int dummy; |
|
669 |
return getStackBase(&dummy); |
|
670 |
} |
|
671 |
#else |
|
672 |
#error Need a way to get the stack base on this platform |
|
673 |
#endif |
|
674 |
} |
|
675 |
||
676 |
#if ENABLE(JSC_MULTIPLE_THREADS) |
|
677 |
||
678 |
static inline PlatformThread getCurrentPlatformThread() |
|
679 |
{ |
|
680 |
#if PLATFORM(DARWIN) |
|
681 |
return pthread_mach_thread_np(pthread_self()); |
|
682 |
#elif PLATFORM(WIN_OS) |
|
683 |
HANDLE threadHandle = pthread_getw32threadhandle_np(pthread_self()); |
|
684 |
return PlatformThread(GetCurrentThreadId(), threadHandle); |
|
685 |
#endif |
|
686 |
} |
|
687 |
||
688 |
void Heap::makeUsableFromMultipleThreads() |
|
689 |
{ |
|
690 |
if (m_currentThreadRegistrar) |
|
691 |
return; |
|
692 |
||
693 |
int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread); |
|
694 |
if (error) |
|
695 |
CRASH(); |
|
696 |
} |
|
697 |
||
698 |
void Heap::registerThread() |
|
699 |
{ |
|
700 |
ASSERT(!m_globalData->mainThreadOnly || isMainThread()); |
|
701 |
||
702 |
if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar)) |
|
703 |
return; |
|
704 |
||
705 |
pthread_setspecific(m_currentThreadRegistrar, this); |
|
706 |
Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase()); |
|
707 |
||
708 |
MutexLocker lock(m_registeredThreadsMutex); |
|
709 |
||
710 |
thread->next = m_registeredThreads; |
|
711 |
m_registeredThreads = thread; |
|
712 |
} |
|
713 |
||
714 |
void Heap::unregisterThread(void* p) |
|
715 |
{ |
|
716 |
if (p) |
|
717 |
static_cast<Heap*>(p)->unregisterThread(); |
|
718 |
} |
|
719 |
||
720 |
void Heap::unregisterThread() |
|
721 |
{ |
|
722 |
pthread_t currentPosixThread = pthread_self(); |
|
723 |
||
724 |
MutexLocker lock(m_registeredThreadsMutex); |
|
725 |
||
726 |
if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) { |
|
727 |
Thread* t = m_registeredThreads; |
|
728 |
m_registeredThreads = m_registeredThreads->next; |
|
729 |
delete t; |
|
730 |
} else { |
|
731 |
Heap::Thread* last = m_registeredThreads; |
|
732 |
Heap::Thread* t; |
|
733 |
for (t = m_registeredThreads->next; t; t = t->next) { |
|
734 |
if (pthread_equal(t->posixThread, currentPosixThread)) { |
|
735 |
last->next = t->next; |
|
736 |
break; |
|
737 |
} |
|
738 |
last = t; |
|
739 |
} |
|
740 |
ASSERT(t); // If t is NULL, we never found ourselves in the list. |
|
741 |
delete t; |
|
742 |
} |
|
743 |
} |
|
744 |
||
745 |
#else // ENABLE(JSC_MULTIPLE_THREADS) |
|
746 |
||
747 |
void Heap::registerThread() |
|
748 |
{ |
|
749 |
} |
|
750 |
||
751 |
#endif |
|
752 |
||
753 |
#define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char*) - 1)) == 0) |
|
754 |
||
755 |
// cell size needs to be a power of two for this to be valid |
|
756 |
#define IS_HALF_CELL_ALIGNED(p) (((intptr_t)(p) & (CELL_MASK >> 1)) == 0) |
|
757 |
||
758 |
void Heap::markConservatively(MarkStack& markStack, void* start, void* end) |
|
759 |
{ |
|
760 |
if (start > end) { |
|
761 |
void* tmp = start; |
|
762 |
start = end; |
|
763 |
end = tmp; |
|
764 |
} |
|
765 |
||
766 |
ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000); |
|
767 |
ASSERT(IS_POINTER_ALIGNED(start)); |
|
768 |
ASSERT(IS_POINTER_ALIGNED(end)); |
|
769 |
||
770 |
char** p = static_cast<char**>(start); |
|
771 |
char** e = static_cast<char**>(end); |
|
772 |
||
773 |
size_t usedPrimaryBlocks = primaryHeap.usedBlocks; |
|
774 |
size_t usedNumberBlocks = numberHeap.usedBlocks; |
|
775 |
CollectorBlock** primaryBlocks = primaryHeap.blocks; |
|
776 |
CollectorBlock** numberBlocks = numberHeap.blocks; |
|
777 |
||
778 |
const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1); |
|
779 |
||
780 |
while (p != e) { |
|
781 |
char* x = *p++; |
|
782 |
if (IS_HALF_CELL_ALIGNED(x) && x) { |
|
783 |
uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x); |
|
784 |
xAsBits &= CELL_ALIGN_MASK; |
|
785 |
uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK; |
|
786 |
CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset); |
|
787 |
// Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost |
|
788 |
for (size_t block = 0; block < usedNumberBlocks; block++) { |
|
789 |
if ((numberBlocks[block] == blockAddr) & (offset <= lastCellOffset)) { |
|
790 |
Heap::markCell(reinterpret_cast<JSCell*>(xAsBits)); |
|
791 |
goto endMarkLoop; |
|
792 |
} |
|
793 |
} |
|
794 |
||
795 |
// Mark the primary heap |
|
796 |
for (size_t block = 0; block < usedPrimaryBlocks; block++) { |
|
797 |
if ((primaryBlocks[block] == blockAddr) & (offset <= lastCellOffset)) { |
|
798 |
if (reinterpret_cast<CollectorCell*>(xAsBits)->u.freeCell.zeroIfFree) { |
|
799 |
markStack.append(reinterpret_cast<JSCell*>(xAsBits)); |
|
800 |
markStack.drain(); |
|
801 |
} |
|
802 |
break; |
|
803 |
} |
|
804 |
} |
|
805 |
endMarkLoop: |
|
806 |
; |
|
807 |
} |
|
808 |
} |
|
809 |
} |
|
810 |
||
811 |
void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal(MarkStack& markStack) |
|
812 |
{ |
|
813 |
void* dummy; |
|
814 |
void* stackPointer = &dummy; |
|
815 |
void* stackBase = currentThreadStackBase(); |
|
816 |
markConservatively(markStack, stackPointer, stackBase); |
|
817 |
} |
|
818 |
||
819 |
#if COMPILER(GCC) |
|
820 |
#define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*)))) |
|
821 |
#else |
|
822 |
#define REGISTER_BUFFER_ALIGNMENT |
|
823 |
#endif |
|
824 |
||
825 |
void Heap::markCurrentThreadConservatively(MarkStack& markStack) |
|
826 |
{ |
|
827 |
// setjmp forces volatile registers onto the stack |
|
828 |
jmp_buf registers REGISTER_BUFFER_ALIGNMENT; |
|
829 |
#if COMPILER(MSVC) |
|
830 |
#pragma warning(push) |
|
831 |
#pragma warning(disable: 4611) |
|
832 |
#endif |
|
833 |
setjmp(registers); |
|
834 |
#if COMPILER(MSVC) |
|
835 |
#pragma warning(pop) |
|
836 |
#endif |
|
837 |
||
838 |
markCurrentThreadConservativelyInternal(markStack); |
|
839 |
} |
|
840 |
||
841 |
#if ENABLE(JSC_MULTIPLE_THREADS) |
|
842 |
||
843 |
static inline void suspendThread(const PlatformThread& platformThread) |
|
844 |
{ |
|
845 |
#if PLATFORM(DARWIN) |
|
846 |
thread_suspend(platformThread); |
|
847 |
#elif PLATFORM(WIN_OS) |
|
848 |
SuspendThread(platformThread.handle); |
|
849 |
#else |
|
850 |
#error Need a way to suspend threads on this platform |
|
851 |
#endif |
|
852 |
} |
|
853 |
||
854 |
static inline void resumeThread(const PlatformThread& platformThread) |
|
855 |
{ |
|
856 |
#if PLATFORM(DARWIN) |
|
857 |
thread_resume(platformThread); |
|
858 |
#elif PLATFORM(WIN_OS) |
|
859 |
ResumeThread(platformThread.handle); |
|
860 |
#else |
|
861 |
#error Need a way to resume threads on this platform |
|
862 |
#endif |
|
863 |
} |
|
864 |
||
865 |
typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit |
|
866 |
||
867 |
#if PLATFORM(DARWIN) |
|
868 |
||
869 |
#if PLATFORM(X86) |
|
870 |
typedef i386_thread_state_t PlatformThreadRegisters; |
|
871 |
#elif PLATFORM(X86_64) |
|
872 |
typedef x86_thread_state64_t PlatformThreadRegisters; |
|
873 |
#elif PLATFORM(PPC) |
|
874 |
typedef ppc_thread_state_t PlatformThreadRegisters; |
|
875 |
#elif PLATFORM(PPC64) |
|
876 |
typedef ppc_thread_state64_t PlatformThreadRegisters; |
|
877 |
#elif PLATFORM(ARM) |
|
878 |
typedef arm_thread_state_t PlatformThreadRegisters; |
|
879 |
#else |
|
880 |
#error Unknown Architecture |
|
881 |
#endif |
|
882 |
||
883 |
#elif PLATFORM(WIN_OS)&& PLATFORM(X86) |
|
884 |
typedef CONTEXT PlatformThreadRegisters; |
|
885 |
#else |
|
886 |
#error Need a thread register struct for this platform |
|
887 |
#endif |
|
888 |
||
889 |
static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs) |
|
890 |
{ |
|
891 |
#if PLATFORM(DARWIN) |
|
892 |
||
893 |
#if PLATFORM(X86) |
|
894 |
unsigned user_count = sizeof(regs)/sizeof(int); |
|
895 |
thread_state_flavor_t flavor = i386_THREAD_STATE; |
|
896 |
#elif PLATFORM(X86_64) |
|
897 |
unsigned user_count = x86_THREAD_STATE64_COUNT; |
|
898 |
thread_state_flavor_t flavor = x86_THREAD_STATE64; |
|
899 |
#elif PLATFORM(PPC) |
|
900 |
unsigned user_count = PPC_THREAD_STATE_COUNT; |
|
901 |
thread_state_flavor_t flavor = PPC_THREAD_STATE; |
|
902 |
#elif PLATFORM(PPC64) |
|
903 |
unsigned user_count = PPC_THREAD_STATE64_COUNT; |
|
904 |
thread_state_flavor_t flavor = PPC_THREAD_STATE64; |
|
905 |
#elif PLATFORM(ARM) |
|
906 |
unsigned user_count = ARM_THREAD_STATE_COUNT; |
|
907 |
thread_state_flavor_t flavor = ARM_THREAD_STATE; |
|
908 |
#else |
|
909 |
#error Unknown Architecture |
|
910 |
#endif |
|
911 |
||
912 |
kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count); |
|
913 |
if (result != KERN_SUCCESS) { |
|
914 |
WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, |
|
915 |
"JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result); |
|
916 |
CRASH(); |
|
917 |
} |
|
918 |
return user_count * sizeof(usword_t); |
|
919 |
// end PLATFORM(DARWIN) |
|
920 |
||
921 |
#elif PLATFORM(WIN_OS) && PLATFORM(X86) |
|
922 |
regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS; |
|
923 |
GetThreadContext(platformThread.handle, ®s); |
|
924 |
return sizeof(CONTEXT); |
|
925 |
#else |
|
926 |
#error Need a way to get thread registers on this platform |
|
927 |
#endif |
|
928 |
} |
|
929 |
||
930 |
static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs) |
|
931 |
{ |
|
932 |
#if PLATFORM(DARWIN) |
|
933 |
||
934 |
#if __DARWIN_UNIX03 |
|
935 |
||
936 |
#if PLATFORM(X86) |
|
937 |
return reinterpret_cast<void*>(regs.__esp); |
|
938 |
#elif PLATFORM(X86_64) |
|
939 |
return reinterpret_cast<void*>(regs.__rsp); |
|
940 |
#elif PLATFORM(PPC) || PLATFORM(PPC64) |
|
941 |
return reinterpret_cast<void*>(regs.__r1); |
|
942 |
#elif PLATFORM(ARM) |
|
943 |
return reinterpret_cast<void*>(regs.__sp); |
|
944 |
#else |
|
945 |
#error Unknown Architecture |
|
946 |
#endif |
|
947 |
||
948 |
#else // !__DARWIN_UNIX03 |
|
949 |
||
950 |
#if PLATFORM(X86) |
|
951 |
return reinterpret_cast<void*>(regs.esp); |
|
952 |
#elif PLATFORM(X86_64) |
|
953 |
return reinterpret_cast<void*>(regs.rsp); |
|
954 |
#elif (PLATFORM(PPC) || PLATFORM(PPC64)) |
|
955 |
return reinterpret_cast<void*>(regs.r1); |
|
956 |
#else |
|
957 |
#error Unknown Architecture |
|
958 |
#endif |
|
959 |
||
960 |
#endif // __DARWIN_UNIX03 |
|
961 |
||
962 |
// end PLATFORM(DARWIN) |
|
963 |
#elif PLATFORM(X86) && PLATFORM(WIN_OS) |
|
964 |
return reinterpret_cast<void*>((uintptr_t) regs.Esp); |
|
965 |
#else |
|
966 |
#error Need a way to get the stack pointer for another thread on this platform |
|
967 |
#endif |
|
968 |
} |
|
969 |
||
970 |
void Heap::markOtherThreadConservatively(MarkStack& markStack, Thread* thread) |
|
971 |
{ |
|
972 |
suspendThread(thread->platformThread); |
|
973 |
||
974 |
PlatformThreadRegisters regs; |
|
975 |
size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs); |
|
976 |
||
977 |
// mark the thread's registers |
|
978 |
markConservatively(markStack, static_cast<void*>(®s), static_cast<void*>(reinterpret_cast<char*>(®s) + regSize)); |
|
979 |
||
980 |
void* stackPointer = otherThreadStackPointer(regs); |
|
981 |
markConservatively(markStack, stackPointer, thread->stackBase); |
|
982 |
||
983 |
resumeThread(thread->platformThread); |
|
984 |
} |
|
985 |
||
986 |
#endif |
|
987 |
||
988 |
void Heap::markStackObjectsConservatively(MarkStack& markStack) |
|
989 |
{ |
|
990 |
markCurrentThreadConservatively(markStack); |
|
991 |
||
992 |
#if ENABLE(JSC_MULTIPLE_THREADS) |
|
993 |
||
994 |
if (m_currentThreadRegistrar) { |
|
995 |
||
996 |
MutexLocker lock(m_registeredThreadsMutex); |
|
997 |
||
998 |
#ifndef NDEBUG |
|
999 |
// Forbid malloc during the mark phase. Marking a thread suspends it, so |
|
1000 |
// a malloc inside markChildren() would risk a deadlock with a thread that had been |
|
1001 |
// suspended while holding the malloc lock. |
|
1002 |
fastMallocForbid(); |
|
1003 |
#endif |
|
1004 |
// It is safe to access the registeredThreads list, because we earlier asserted that locks are being held, |
|
1005 |
// and since this is a shared heap, they are real locks. |
|
1006 |
for (Thread* thread = m_registeredThreads; thread; thread = thread->next) { |
|
1007 |
if (!pthread_equal(thread->posixThread, pthread_self())) |
|
1008 |
markOtherThreadConservatively(markStack, thread); |
|
1009 |
} |
|
1010 |
#ifndef NDEBUG |
|
1011 |
fastMallocAllow(); |
|
1012 |
#endif |
|
1013 |
} |
|
1014 |
#endif |
|
1015 |
} |
|
1016 |
||
1017 |
void Heap::protect(JSValue k) |
|
1018 |
{ |
|
1019 |
ASSERT(k); |
|
1020 |
ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance); |
|
1021 |
||
1022 |
if (!k.isCell()) |
|
1023 |
return; |
|
1024 |
||
1025 |
m_protectedValues.add(k.asCell()); |
|
1026 |
} |
|
1027 |
||
1028 |
void Heap::unprotect(JSValue k) |
|
1029 |
{ |
|
1030 |
ASSERT(k); |
|
1031 |
ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance); |
|
1032 |
||
1033 |
if (!k.isCell()) |
|
1034 |
return; |
|
1035 |
||
1036 |
m_protectedValues.remove(k.asCell()); |
|
1037 |
} |
|
1038 |
||
1039 |
void Heap::markProtectedObjects(MarkStack& markStack) |
|
1040 |
{ |
|
1041 |
ProtectCountSet::iterator end = m_protectedValues.end(); |
|
1042 |
for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) { |
|
1043 |
markStack.append(it->first); |
|
1044 |
markStack.drain(); |
|
1045 |
} |
|
1046 |
} |
|
1047 |
||
1048 |
template <HeapType heapType> size_t Heap::sweep() |
|
1049 |
{ |
|
1050 |
typedef typename HeapConstants<heapType>::Block Block; |
|
1051 |
typedef typename HeapConstants<heapType>::Cell Cell; |
|
1052 |
||
1053 |
// SWEEP: delete everything with a zero refcount (garbage) and unmark everything else |
|
1054 |
CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap; |
|
1055 |
||
1056 |
size_t emptyBlocks = 0; |
|
1057 |
size_t numLiveObjects = heap.numLiveObjects; |
|
1058 |
||
1059 |
for (size_t block = 0; block < heap.usedBlocks; block++) { |
|
1060 |
Block* curBlock = reinterpret_cast<Block*>(heap.blocks[block]); |
|
1061 |
||
1062 |
size_t usedCells = curBlock->usedCells; |
|
1063 |
Cell* freeList = curBlock->freeList; |
|
1064 |
||
1065 |
if (usedCells == HeapConstants<heapType>::cellsPerBlock) { |
|
1066 |
// special case with a block where all cells are used -- testing indicates this happens often |
|
1067 |
for (size_t i = 0; i < HeapConstants<heapType>::cellsPerBlock; i++) { |
|
1068 |
if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) { |
|
1069 |
Cell* cell = curBlock->cells + i; |
|
1070 |
||
1071 |
if (heapType != NumberHeap) { |
|
1072 |
JSCell* imp = reinterpret_cast<JSCell*>(cell); |
|
1073 |
// special case for allocated but uninitialized object |
|
1074 |
// (We don't need this check earlier because nothing prior this point |
|
1075 |
// assumes the object has a valid vptr.) |
|
1076 |
if (cell->u.freeCell.zeroIfFree == 0) |
|
1077 |
continue; |
|
1078 |
||
1079 |
imp->~JSCell(); |
|
1080 |
} |
|
1081 |
||
1082 |
--usedCells; |
|
1083 |
--numLiveObjects; |
|
1084 |
||
1085 |
// put cell on the free list |
|
1086 |
cell->u.freeCell.zeroIfFree = 0; |
|
1087 |
cell->u.freeCell.next = freeList - (cell + 1); |
|
1088 |
freeList = cell; |
|
1089 |
} |
|
1090 |
} |
|
1091 |
} else { |
|
1092 |
size_t minimumCellsToProcess = usedCells; |
|
1093 |
for (size_t i = 0; (i < minimumCellsToProcess) & (i < HeapConstants<heapType>::cellsPerBlock); i++) { |
|
1094 |
Cell* cell = curBlock->cells + i; |
|
1095 |
if (cell->u.freeCell.zeroIfFree == 0) { |
|
1096 |
++minimumCellsToProcess; |
|
1097 |
} else { |
|
1098 |
if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) { |
|
1099 |
if (heapType != NumberHeap) { |
|
1100 |
JSCell* imp = reinterpret_cast<JSCell*>(cell); |
|
1101 |
imp->~JSCell(); |
|
1102 |
} |
|
1103 |
--usedCells; |
|
1104 |
--numLiveObjects; |
|
1105 |
||
1106 |
// put cell on the free list |
|
1107 |
cell->u.freeCell.zeroIfFree = 0; |
|
1108 |
cell->u.freeCell.next = freeList - (cell + 1); |
|
1109 |
freeList = cell; |
|
1110 |
} |
|
1111 |
} |
|
1112 |
} |
|
1113 |
} |
|
1114 |
||
1115 |
curBlock->usedCells = static_cast<uint32_t>(usedCells); |
|
1116 |
curBlock->freeList = freeList; |
|
1117 |
curBlock->marked.clearAll(); |
|
1118 |
||
1119 |
if (!usedCells) |
|
1120 |
++emptyBlocks; |
|
1121 |
} |
|
1122 |
||
1123 |
if (heap.numLiveObjects != numLiveObjects) |
|
1124 |
heap.firstBlockWithPossibleSpace = 0; |
|
1125 |
||
1126 |
heap.numLiveObjects = numLiveObjects; |
|
1127 |
heap.numLiveObjectsAtLastCollect = numLiveObjects; |
|
1128 |
heap.extraCost = 0; |
|
1129 |
||
1130 |
if (!emptyBlocks) |
|
1131 |
return numLiveObjects; |
|
1132 |
||
1133 |
size_t neededCells = 1.25f * (numLiveObjects + max(ALLOCATIONS_PER_COLLECTION, numLiveObjects)); |
|
1134 |
size_t neededBlocks = (neededCells + HeapConstants<heapType>::cellsPerBlock - 1) / HeapConstants<heapType>::cellsPerBlock; |
|
1135 |
for (size_t block = 0; block < heap.usedBlocks; block++) { |
|
1136 |
if (heap.usedBlocks <= neededBlocks) |
|
1137 |
break; |
|
1138 |
||
1139 |
Block* curBlock = reinterpret_cast<Block*>(heap.blocks[block]); |
|
1140 |
if (curBlock->usedCells) |
|
1141 |
continue; |
|
1142 |
||
1143 |
freeBlock<heapType>(block); |
|
1144 |
block--; // Don't move forward a step in this case |
|
1145 |
} |
|
1146 |
||
1147 |
return numLiveObjects; |
|
1148 |
} |
|
1149 |
||
1150 |
bool Heap::collect() |
|
1151 |
{ |
|
1152 |
#ifndef NDEBUG |
|
1153 |
if (m_globalData->isSharedInstance) { |
|
1154 |
ASSERT(JSLock::lockCount() > 0); |
|
1155 |
ASSERT(JSLock::currentThreadIsHoldingLock()); |
|
1156 |
} |
|
1157 |
#endif |
|
1158 |
||
1159 |
ASSERT((primaryHeap.operationInProgress == NoOperation) | (numberHeap.operationInProgress == NoOperation)); |
|
1160 |
if ((primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation)) |
|
1161 |
CRASH(); |
|
1162 |
||
1163 |
JAVASCRIPTCORE_GC_BEGIN(); |
|
1164 |
primaryHeap.operationInProgress = Collection; |
|
1165 |
numberHeap.operationInProgress = Collection; |
|
1166 |
||
1167 |
// MARK: first mark all referenced objects recursively starting out from the set of root objects |
|
1168 |
MarkStack& markStack = m_globalData->markStack; |
|
1169 |
markStackObjectsConservatively(markStack); |
|
1170 |
markProtectedObjects(markStack); |
|
1171 |
if (m_markListSet && m_markListSet->size()) |
|
1172 |
MarkedArgumentBuffer::markLists(markStack, *m_markListSet); |
|
1173 |
if (m_globalData->exception) |
|
1174 |
markStack.append(m_globalData->exception); |
|
1175 |
m_globalData->interpreter->registerFile().markCallFrames(markStack, this); |
|
1176 |
m_globalData->smallStrings.markChildren(markStack); |
|
1177 |
if (m_globalData->functionCodeBlockBeingReparsed) |
|
1178 |
m_globalData->functionCodeBlockBeingReparsed->markAggregate(markStack); |
|
1179 |
if (m_globalData->firstStringifierToMark) |
|
1180 |
JSONObject::markStringifiers(markStack, m_globalData->firstStringifierToMark); |
|
1181 |
||
1182 |
markStack.drain(); |
|
1183 |
markStack.compact(); |
|
1184 |
JAVASCRIPTCORE_GC_MARKED(); |
|
1185 |
||
1186 |
size_t originalLiveObjects = primaryHeap.numLiveObjects + numberHeap.numLiveObjects; |
|
1187 |
size_t numLiveObjects = sweep<PrimaryHeap>(); |
|
1188 |
numLiveObjects += sweep<NumberHeap>(); |
|
1189 |
||
1190 |
primaryHeap.operationInProgress = NoOperation; |
|
1191 |
numberHeap.operationInProgress = NoOperation; |
|
1192 |
JAVASCRIPTCORE_GC_END(originalLiveObjects, numLiveObjects); |
|
1193 |
||
1194 |
return numLiveObjects < originalLiveObjects; |
|
1195 |
} |
|
1196 |
||
1197 |
size_t Heap::objectCount() |
|
1198 |
{ |
|
1199 |
return primaryHeap.numLiveObjects + numberHeap.numLiveObjects - m_globalData->smallStrings.count(); |
|
1200 |
} |
|
1201 |
||
1202 |
template <HeapType heapType> |
|
1203 |
static void addToStatistics(Heap::Statistics& statistics, const CollectorHeap& heap) |
|
1204 |
{ |
|
1205 |
typedef HeapConstants<heapType> HC; |
|
1206 |
for (size_t i = 0; i < heap.usedBlocks; ++i) { |
|
1207 |
if (heap.blocks[i]) { |
|
1208 |
statistics.size += BLOCK_SIZE; |
|
1209 |
statistics.free += (HC::cellsPerBlock - heap.blocks[i]->usedCells) * HC::cellSize; |
|
1210 |
} |
|
1211 |
} |
|
1212 |
} |
|
1213 |
||
1214 |
Heap::Statistics Heap::statistics() const |
|
1215 |
{ |
|
1216 |
Statistics statistics = { 0, 0 }; |
|
1217 |
JSC::addToStatistics<PrimaryHeap>(statistics, primaryHeap); |
|
1218 |
JSC::addToStatistics<NumberHeap>(statistics, numberHeap); |
|
1219 |
return statistics; |
|
1220 |
} |
|
1221 |
||
1222 |
size_t Heap::globalObjectCount() |
|
1223 |
{ |
|
1224 |
size_t count = 0; |
|
1225 |
if (JSGlobalObject* head = m_globalData->head) { |
|
1226 |
JSGlobalObject* o = head; |
|
1227 |
do { |
|
1228 |
++count; |
|
1229 |
o = o->next(); |
|
1230 |
} while (o != head); |
|
1231 |
} |
|
1232 |
return count; |
|
1233 |
} |
|
1234 |
||
1235 |
size_t Heap::protectedGlobalObjectCount() |
|
1236 |
{ |
|
1237 |
size_t count = 0; |
|
1238 |
if (JSGlobalObject* head = m_globalData->head) { |
|
1239 |
JSGlobalObject* o = head; |
|
1240 |
do { |
|
1241 |
if (m_protectedValues.contains(o)) |
|
1242 |
++count; |
|
1243 |
o = o->next(); |
|
1244 |
} while (o != head); |
|
1245 |
} |
|
1246 |
||
1247 |
return count; |
|
1248 |
} |
|
1249 |
||
1250 |
size_t Heap::protectedObjectCount() |
|
1251 |
{ |
|
1252 |
return m_protectedValues.size(); |
|
1253 |
} |
|
1254 |
||
1255 |
static const char* typeName(JSCell* cell) |
|
1256 |
{ |
|
1257 |
if (cell->isString()) |
|
1258 |
return "string"; |
|
1259 |
#if USE(JSVALUE32) |
|
1260 |
if (cell->isNumber()) |
|
1261 |
return "number"; |
|
1262 |
#endif |
|
1263 |
if (cell->isGetterSetter()) |
|
1264 |
return "gettersetter"; |
|
3
41300fa6a67c
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1265 |
if (cell->isAPIValueWrapper()) |
41300fa6a67c
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1266 |
return "value wrapper"; |
41300fa6a67c
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1267 |
if (cell->isPropertyNameIterator()) |
41300fa6a67c
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1268 |
return "for-in iterator"; |
0 | 1269 |
ASSERT(cell->isObject()); |
1270 |
const ClassInfo* info = cell->classInfo(); |
|
1271 |
return info ? info->className : "Object"; |
|
1272 |
} |
|
1273 |
||
1274 |
HashCountedSet<const char*>* Heap::protectedObjectTypeCounts() |
|
1275 |
{ |
|
1276 |
HashCountedSet<const char*>* counts = new HashCountedSet<const char*>; |
|
1277 |
||
1278 |
ProtectCountSet::iterator end = m_protectedValues.end(); |
|
1279 |
for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) |
|
1280 |
counts->add(typeName(it->first)); |
|
1281 |
||
1282 |
return counts; |
|
1283 |
} |
|
1284 |
||
1285 |
bool Heap::isBusy() |
|
1286 |
{ |
|
1287 |
return (primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation); |
|
1288 |
} |
|
1289 |
||
1290 |
Heap::iterator Heap::primaryHeapBegin() |
|
1291 |
{ |
|
1292 |
return iterator(primaryHeap.blocks, primaryHeap.blocks + primaryHeap.usedBlocks); |
|
1293 |
} |
|
1294 |
||
1295 |
Heap::iterator Heap::primaryHeapEnd() |
|
1296 |
{ |
|
1297 |
return iterator(primaryHeap.blocks + primaryHeap.usedBlocks, primaryHeap.blocks + primaryHeap.usedBlocks); |
|
1298 |
} |
|
1299 |
||
1300 |
} // namespace JSC |