author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Fri, 14 May 2010 16:40:13 +0300 | |
changeset 22 | 79de32ba3296 |
parent 3 | 41300fa6a67c |
child 30 | 5dc02b23752f |
permissions | -rw-r--r-- |
0 | 1 |
/* |
2 |
* Copyright (C) 2008, 2009 Apple Inc. All rights reserved. |
|
3 |
* |
|
4 |
* Redistribution and use in source and binary forms, with or without |
|
5 |
* modification, are permitted provided that the following conditions |
|
6 |
* are met: |
|
7 |
* 1. Redistributions of source code must retain the above copyright |
|
8 |
* notice, this list of conditions and the following disclaimer. |
|
9 |
* 2. Redistributions in binary form must reproduce the above copyright |
|
10 |
* notice, this list of conditions and the following disclaimer in the |
|
11 |
* documentation and/or other materials provided with the distribution. |
|
12 |
* |
|
13 |
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
|
14 |
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
15 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
16 |
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
|
17 |
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
|
18 |
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
|
19 |
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
|
20 |
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
|
21 |
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
22 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
23 |
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
24 |
*/ |
|
25 |
||
26 |
#include "config.h" |
|
27 |
#include "JIT.h" |
|
28 |
||
29 |
// This probably does not belong here; adding here for now as a quick Windows build fix. |
|
30 |
#if ENABLE(ASSEMBLER) && PLATFORM(X86) && !PLATFORM(MAC) |
|
31 |
#include "MacroAssembler.h" |
|
32 |
JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2; |
|
33 |
#endif |
|
34 |
||
35 |
#if ENABLE(JIT) |
|
36 |
||
37 |
#include "CodeBlock.h" |
|
38 |
#include "Interpreter.h" |
|
39 |
#include "JITInlineMethods.h" |
|
40 |
#include "JITStubs.h" |
|
41 |
#include "JITStubCall.h" |
|
42 |
#include "JSArray.h" |
|
43 |
#include "JSFunction.h" |
|
44 |
#include "LinkBuffer.h" |
|
45 |
#include "RepatchBuffer.h" |
|
46 |
#include "ResultType.h" |
|
47 |
#include "SamplingTool.h" |
|
48 |
||
49 |
#ifndef NDEBUG |
|
50 |
#include <stdio.h> |
|
51 |
#endif |
|
52 |
||
53 |
using namespace std; |
|
54 |
||
55 |
namespace JSC { |
|
56 |
||
57 |
void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) |
|
58 |
{ |
|
59 |
RepatchBuffer repatchBuffer(codeblock); |
|
60 |
repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction); |
|
61 |
} |
|
62 |
||
63 |
void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) |
|
64 |
{ |
|
65 |
RepatchBuffer repatchBuffer(codeblock); |
|
66 |
repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction); |
|
67 |
} |
|
68 |
||
69 |
void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction) |
|
70 |
{ |
|
71 |
RepatchBuffer repatchBuffer(codeblock); |
|
72 |
repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction); |
|
73 |
} |
|
74 |
||
75 |
JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock) |
|
76 |
: m_interpreter(globalData->interpreter) |
|
77 |
, m_globalData(globalData) |
|
78 |
, m_codeBlock(codeBlock) |
|
79 |
, m_labels(codeBlock ? codeBlock->instructions().size() : 0) |
|
80 |
, m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0) |
|
81 |
, m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0) |
|
82 |
, m_bytecodeIndex((unsigned)-1) |
|
83 |
#if USE(JSVALUE32_64) |
|
84 |
, m_jumpTargetIndex(0) |
|
85 |
, m_mappedBytecodeIndex((unsigned)-1) |
|
86 |
, m_mappedVirtualRegisterIndex((unsigned)-1) |
|
87 |
, m_mappedTag((RegisterID)-1) |
|
88 |
, m_mappedPayload((RegisterID)-1) |
|
89 |
#else |
|
90 |
, m_lastResultBytecodeRegister(std::numeric_limits<int>::max()) |
|
91 |
, m_jumpTargetsPosition(0) |
|
92 |
#endif |
|
93 |
{ |
|
94 |
} |
|
95 |
||
96 |
#if USE(JSVALUE32_64) |
|
97 |
void JIT::emitTimeoutCheck() |
|
98 |
{ |
|
99 |
Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister); |
|
100 |
JITStubCall stubCall(this, cti_timeout_check); |
|
101 |
stubCall.addArgument(regT1, regT0); // save last result registers. |
|
102 |
stubCall.call(timeoutCheckRegister); |
|
103 |
stubCall.getArgument(0, regT1, regT0); // reload last result registers. |
|
104 |
skipTimeout.link(this); |
|
105 |
} |
|
106 |
#else |
|
107 |
void JIT::emitTimeoutCheck() |
|
108 |
{ |
|
109 |
Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister); |
|
110 |
JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister); |
|
111 |
skipTimeout.link(this); |
|
112 |
||
113 |
killLastResultRegister(); |
|
114 |
} |
|
115 |
#endif |
|
116 |
||
117 |
#define NEXT_OPCODE(name) \ |
|
118 |
m_bytecodeIndex += OPCODE_LENGTH(name); \ |
|
119 |
break; |
|
120 |
||
121 |
#if USE(JSVALUE32_64) |
|
122 |
#define DEFINE_BINARY_OP(name) \ |
|
123 |
case name: { \ |
|
124 |
JITStubCall stubCall(this, cti_##name); \ |
|
125 |
stubCall.addArgument(currentInstruction[2].u.operand); \ |
|
126 |
stubCall.addArgument(currentInstruction[3].u.operand); \ |
|
127 |
stubCall.call(currentInstruction[1].u.operand); \ |
|
128 |
NEXT_OPCODE(name); \ |
|
129 |
} |
|
130 |
||
131 |
#define DEFINE_UNARY_OP(name) \ |
|
132 |
case name: { \ |
|
133 |
JITStubCall stubCall(this, cti_##name); \ |
|
134 |
stubCall.addArgument(currentInstruction[2].u.operand); \ |
|
135 |
stubCall.call(currentInstruction[1].u.operand); \ |
|
136 |
NEXT_OPCODE(name); \ |
|
137 |
} |
|
138 |
||
139 |
#else // USE(JSVALUE32_64) |
|
140 |
||
141 |
#define DEFINE_BINARY_OP(name) \ |
|
142 |
case name: { \ |
|
143 |
JITStubCall stubCall(this, cti_##name); \ |
|
144 |
stubCall.addArgument(currentInstruction[2].u.operand, regT2); \ |
|
145 |
stubCall.addArgument(currentInstruction[3].u.operand, regT2); \ |
|
146 |
stubCall.call(currentInstruction[1].u.operand); \ |
|
147 |
NEXT_OPCODE(name); \ |
|
148 |
} |
|
149 |
||
150 |
#define DEFINE_UNARY_OP(name) \ |
|
151 |
case name: { \ |
|
152 |
JITStubCall stubCall(this, cti_##name); \ |
|
153 |
stubCall.addArgument(currentInstruction[2].u.operand, regT2); \ |
|
154 |
stubCall.call(currentInstruction[1].u.operand); \ |
|
155 |
NEXT_OPCODE(name); \ |
|
156 |
} |
|
157 |
#endif // USE(JSVALUE32_64) |
|
158 |
||
159 |
#define DEFINE_OP(name) \ |
|
160 |
case name: { \ |
|
161 |
emit_##name(currentInstruction); \ |
|
162 |
NEXT_OPCODE(name); \ |
|
163 |
} |
|
164 |
||
165 |
#define DEFINE_SLOWCASE_OP(name) \ |
|
166 |
case name: { \ |
|
167 |
emitSlow_##name(currentInstruction, iter); \ |
|
168 |
NEXT_OPCODE(name); \ |
|
169 |
} |
|
170 |
||
171 |
void JIT::privateCompileMainPass() |
|
172 |
{ |
|
173 |
Instruction* instructionsBegin = m_codeBlock->instructions().begin(); |
|
174 |
unsigned instructionCount = m_codeBlock->instructions().size(); |
|
175 |
||
176 |
m_propertyAccessInstructionIndex = 0; |
|
177 |
m_globalResolveInfoIndex = 0; |
|
178 |
m_callLinkInfoIndex = 0; |
|
179 |
||
180 |
for (m_bytecodeIndex = 0; m_bytecodeIndex < instructionCount; ) { |
|
181 |
Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex; |
|
182 |
ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeIndex); |
|
183 |
||
184 |
#if ENABLE(OPCODE_SAMPLING) |
|
185 |
if (m_bytecodeIndex > 0) // Avoid the overhead of sampling op_enter twice. |
|
186 |
sampleInstruction(currentInstruction); |
|
187 |
#endif |
|
188 |
||
189 |
#if !USE(JSVALUE32_64) |
|
190 |
if (m_labels[m_bytecodeIndex].isUsed()) |
|
191 |
killLastResultRegister(); |
|
192 |
#endif |
|
193 |
||
194 |
m_labels[m_bytecodeIndex] = label(); |
|
195 |
||
196 |
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) { |
|
197 |
DEFINE_BINARY_OP(op_del_by_val) |
|
198 |
#if USE(JSVALUE32) |
|
199 |
DEFINE_BINARY_OP(op_div) |
|
200 |
#endif |
|
201 |
DEFINE_BINARY_OP(op_in) |
|
202 |
DEFINE_BINARY_OP(op_less) |
|
203 |
DEFINE_BINARY_OP(op_lesseq) |
|
204 |
DEFINE_BINARY_OP(op_urshift) |
|
205 |
DEFINE_UNARY_OP(op_is_boolean) |
|
206 |
DEFINE_UNARY_OP(op_is_function) |
|
207 |
DEFINE_UNARY_OP(op_is_number) |
|
208 |
DEFINE_UNARY_OP(op_is_object) |
|
209 |
DEFINE_UNARY_OP(op_is_string) |
|
210 |
DEFINE_UNARY_OP(op_is_undefined) |
|
211 |
#if !USE(JSVALUE32_64) |
|
212 |
DEFINE_UNARY_OP(op_negate) |
|
213 |
#endif |
|
214 |
DEFINE_UNARY_OP(op_typeof) |
|
215 |
||
216 |
DEFINE_OP(op_add) |
|
217 |
DEFINE_OP(op_bitand) |
|
218 |
DEFINE_OP(op_bitnot) |
|
219 |
DEFINE_OP(op_bitor) |
|
220 |
DEFINE_OP(op_bitxor) |
|
221 |
DEFINE_OP(op_call) |
|
222 |
DEFINE_OP(op_call_eval) |
|
223 |
DEFINE_OP(op_call_varargs) |
|
224 |
DEFINE_OP(op_catch) |
|
225 |
DEFINE_OP(op_construct) |
|
226 |
DEFINE_OP(op_construct_verify) |
|
227 |
DEFINE_OP(op_convert_this) |
|
228 |
DEFINE_OP(op_init_arguments) |
|
229 |
DEFINE_OP(op_create_arguments) |
|
230 |
DEFINE_OP(op_debug) |
|
231 |
DEFINE_OP(op_del_by_id) |
|
232 |
#if !USE(JSVALUE32) |
|
233 |
DEFINE_OP(op_div) |
|
234 |
#endif |
|
235 |
DEFINE_OP(op_end) |
|
236 |
DEFINE_OP(op_enter) |
|
237 |
DEFINE_OP(op_enter_with_activation) |
|
238 |
DEFINE_OP(op_eq) |
|
239 |
DEFINE_OP(op_eq_null) |
|
240 |
DEFINE_OP(op_get_by_id) |
|
241 |
DEFINE_OP(op_get_by_val) |
|
3
41300fa6a67c
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
242 |
DEFINE_OP(op_get_by_pname) |
0 | 243 |
DEFINE_OP(op_get_global_var) |
244 |
DEFINE_OP(op_get_pnames) |
|
245 |
DEFINE_OP(op_get_scoped_var) |
|
246 |
DEFINE_OP(op_instanceof) |
|
247 |
DEFINE_OP(op_jeq_null) |
|
248 |
DEFINE_OP(op_jfalse) |
|
249 |
DEFINE_OP(op_jmp) |
|
250 |
DEFINE_OP(op_jmp_scopes) |
|
251 |
DEFINE_OP(op_jneq_null) |
|
252 |
DEFINE_OP(op_jneq_ptr) |
|
253 |
DEFINE_OP(op_jnless) |
|
254 |
DEFINE_OP(op_jnlesseq) |
|
255 |
DEFINE_OP(op_jsr) |
|
256 |
DEFINE_OP(op_jtrue) |
|
257 |
DEFINE_OP(op_load_varargs) |
|
258 |
DEFINE_OP(op_loop) |
|
259 |
DEFINE_OP(op_loop_if_less) |
|
260 |
DEFINE_OP(op_loop_if_lesseq) |
|
261 |
DEFINE_OP(op_loop_if_true) |
|
262 |
DEFINE_OP(op_lshift) |
|
263 |
DEFINE_OP(op_method_check) |
|
264 |
DEFINE_OP(op_mod) |
|
265 |
DEFINE_OP(op_mov) |
|
266 |
DEFINE_OP(op_mul) |
|
267 |
#if USE(JSVALUE32_64) |
|
268 |
DEFINE_OP(op_negate) |
|
269 |
#endif |
|
270 |
DEFINE_OP(op_neq) |
|
271 |
DEFINE_OP(op_neq_null) |
|
272 |
DEFINE_OP(op_new_array) |
|
273 |
DEFINE_OP(op_new_error) |
|
274 |
DEFINE_OP(op_new_func) |
|
275 |
DEFINE_OP(op_new_func_exp) |
|
276 |
DEFINE_OP(op_new_object) |
|
277 |
DEFINE_OP(op_new_regexp) |
|
278 |
DEFINE_OP(op_next_pname) |
|
279 |
DEFINE_OP(op_not) |
|
280 |
DEFINE_OP(op_nstricteq) |
|
281 |
DEFINE_OP(op_pop_scope) |
|
282 |
DEFINE_OP(op_post_dec) |
|
283 |
DEFINE_OP(op_post_inc) |
|
284 |
DEFINE_OP(op_pre_dec) |
|
285 |
DEFINE_OP(op_pre_inc) |
|
286 |
DEFINE_OP(op_profile_did_call) |
|
287 |
DEFINE_OP(op_profile_will_call) |
|
288 |
DEFINE_OP(op_push_new_scope) |
|
289 |
DEFINE_OP(op_push_scope) |
|
290 |
DEFINE_OP(op_put_by_id) |
|
291 |
DEFINE_OP(op_put_by_index) |
|
292 |
DEFINE_OP(op_put_by_val) |
|
293 |
DEFINE_OP(op_put_getter) |
|
294 |
DEFINE_OP(op_put_global_var) |
|
295 |
DEFINE_OP(op_put_scoped_var) |
|
296 |
DEFINE_OP(op_put_setter) |
|
297 |
DEFINE_OP(op_resolve) |
|
298 |
DEFINE_OP(op_resolve_base) |
|
299 |
DEFINE_OP(op_resolve_global) |
|
300 |
DEFINE_OP(op_resolve_skip) |
|
301 |
DEFINE_OP(op_resolve_with_base) |
|
302 |
DEFINE_OP(op_ret) |
|
303 |
DEFINE_OP(op_rshift) |
|
304 |
DEFINE_OP(op_sret) |
|
305 |
DEFINE_OP(op_strcat) |
|
306 |
DEFINE_OP(op_stricteq) |
|
307 |
DEFINE_OP(op_sub) |
|
308 |
DEFINE_OP(op_switch_char) |
|
309 |
DEFINE_OP(op_switch_imm) |
|
310 |
DEFINE_OP(op_switch_string) |
|
311 |
DEFINE_OP(op_tear_off_activation) |
|
312 |
DEFINE_OP(op_tear_off_arguments) |
|
313 |
DEFINE_OP(op_throw) |
|
314 |
DEFINE_OP(op_to_jsnumber) |
|
315 |
DEFINE_OP(op_to_primitive) |
|
316 |
||
317 |
case op_get_array_length: |
|
318 |
case op_get_by_id_chain: |
|
319 |
case op_get_by_id_generic: |
|
320 |
case op_get_by_id_proto: |
|
321 |
case op_get_by_id_proto_list: |
|
322 |
case op_get_by_id_self: |
|
323 |
case op_get_by_id_self_list: |
|
324 |
case op_get_string_length: |
|
325 |
case op_put_by_id_generic: |
|
326 |
case op_put_by_id_replace: |
|
327 |
case op_put_by_id_transition: |
|
328 |
ASSERT_NOT_REACHED(); |
|
329 |
} |
|
330 |
} |
|
331 |
||
332 |
ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos()); |
|
333 |
ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos()); |
|
334 |
||
335 |
#ifndef NDEBUG |
|
336 |
// Reset this, in order to guard its use with ASSERTs. |
|
337 |
m_bytecodeIndex = (unsigned)-1; |
|
338 |
#endif |
|
339 |
} |
|
340 |
||
341 |
||
342 |
void JIT::privateCompileLinkPass() |
|
343 |
{ |
|
344 |
unsigned jmpTableCount = m_jmpTable.size(); |
|
345 |
for (unsigned i = 0; i < jmpTableCount; ++i) |
|
346 |
m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeIndex], this); |
|
347 |
m_jmpTable.clear(); |
|
348 |
} |
|
349 |
||
350 |
void JIT::privateCompileSlowCases() |
|
351 |
{ |
|
352 |
Instruction* instructionsBegin = m_codeBlock->instructions().begin(); |
|
353 |
||
354 |
m_propertyAccessInstructionIndex = 0; |
|
355 |
#if USE(JSVALUE32_64) |
|
356 |
m_globalResolveInfoIndex = 0; |
|
357 |
#endif |
|
358 |
m_callLinkInfoIndex = 0; |
|
359 |
||
360 |
for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) { |
|
361 |
#if !USE(JSVALUE32_64) |
|
362 |
killLastResultRegister(); |
|
363 |
#endif |
|
364 |
||
365 |
m_bytecodeIndex = iter->to; |
|
366 |
#ifndef NDEBUG |
|
367 |
unsigned firstTo = m_bytecodeIndex; |
|
368 |
#endif |
|
369 |
Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex; |
|
370 |
||
371 |
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) { |
|
372 |
DEFINE_SLOWCASE_OP(op_add) |
|
373 |
DEFINE_SLOWCASE_OP(op_bitand) |
|
374 |
DEFINE_SLOWCASE_OP(op_bitnot) |
|
375 |
DEFINE_SLOWCASE_OP(op_bitor) |
|
376 |
DEFINE_SLOWCASE_OP(op_bitxor) |
|
377 |
DEFINE_SLOWCASE_OP(op_call) |
|
378 |
DEFINE_SLOWCASE_OP(op_call_eval) |
|
379 |
DEFINE_SLOWCASE_OP(op_call_varargs) |
|
380 |
DEFINE_SLOWCASE_OP(op_construct) |
|
381 |
DEFINE_SLOWCASE_OP(op_construct_verify) |
|
382 |
DEFINE_SLOWCASE_OP(op_convert_this) |
|
383 |
#if !USE(JSVALUE32) |
|
384 |
DEFINE_SLOWCASE_OP(op_div) |
|
385 |
#endif |
|
386 |
DEFINE_SLOWCASE_OP(op_eq) |
|
387 |
DEFINE_SLOWCASE_OP(op_get_by_id) |
|
388 |
DEFINE_SLOWCASE_OP(op_get_by_val) |
|
3
41300fa6a67c
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
389 |
DEFINE_SLOWCASE_OP(op_get_by_pname) |
0 | 390 |
DEFINE_SLOWCASE_OP(op_instanceof) |
391 |
DEFINE_SLOWCASE_OP(op_jfalse) |
|
392 |
DEFINE_SLOWCASE_OP(op_jnless) |
|
393 |
DEFINE_SLOWCASE_OP(op_jnlesseq) |
|
394 |
DEFINE_SLOWCASE_OP(op_jtrue) |
|
395 |
DEFINE_SLOWCASE_OP(op_loop_if_less) |
|
396 |
DEFINE_SLOWCASE_OP(op_loop_if_lesseq) |
|
397 |
DEFINE_SLOWCASE_OP(op_loop_if_true) |
|
398 |
DEFINE_SLOWCASE_OP(op_lshift) |
|
399 |
DEFINE_SLOWCASE_OP(op_method_check) |
|
400 |
DEFINE_SLOWCASE_OP(op_mod) |
|
401 |
DEFINE_SLOWCASE_OP(op_mul) |
|
402 |
#if USE(JSVALUE32_64) |
|
403 |
DEFINE_SLOWCASE_OP(op_negate) |
|
404 |
#endif |
|
405 |
DEFINE_SLOWCASE_OP(op_neq) |
|
406 |
DEFINE_SLOWCASE_OP(op_not) |
|
407 |
DEFINE_SLOWCASE_OP(op_nstricteq) |
|
408 |
DEFINE_SLOWCASE_OP(op_post_dec) |
|
409 |
DEFINE_SLOWCASE_OP(op_post_inc) |
|
410 |
DEFINE_SLOWCASE_OP(op_pre_dec) |
|
411 |
DEFINE_SLOWCASE_OP(op_pre_inc) |
|
412 |
DEFINE_SLOWCASE_OP(op_put_by_id) |
|
413 |
DEFINE_SLOWCASE_OP(op_put_by_val) |
|
414 |
#if USE(JSVALUE32_64) |
|
415 |
DEFINE_SLOWCASE_OP(op_resolve_global) |
|
416 |
#endif |
|
417 |
DEFINE_SLOWCASE_OP(op_rshift) |
|
418 |
DEFINE_SLOWCASE_OP(op_stricteq) |
|
419 |
DEFINE_SLOWCASE_OP(op_sub) |
|
420 |
DEFINE_SLOWCASE_OP(op_to_jsnumber) |
|
421 |
DEFINE_SLOWCASE_OP(op_to_primitive) |
|
422 |
default: |
|
423 |
ASSERT_NOT_REACHED(); |
|
424 |
} |
|
425 |
||
426 |
ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen."); |
|
427 |
ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen."); |
|
428 |
||
429 |
emitJumpSlowToHot(jump(), 0); |
|
430 |
} |
|
431 |
||
432 |
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
|
433 |
ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos()); |
|
434 |
#endif |
|
435 |
ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos()); |
|
436 |
||
437 |
#ifndef NDEBUG |
|
438 |
// Reset this, in order to guard its use with ASSERTs. |
|
439 |
m_bytecodeIndex = (unsigned)-1; |
|
440 |
#endif |
|
441 |
} |
|
442 |
||
443 |
JITCode JIT::privateCompile() |
|
444 |
{ |
|
445 |
sampleCodeBlock(m_codeBlock); |
|
446 |
#if ENABLE(OPCODE_SAMPLING) |
|
447 |
sampleInstruction(m_codeBlock->instructions().begin()); |
|
448 |
#endif |
|
449 |
||
450 |
// Could use a pop_m, but would need to offset the following instruction if so. |
|
451 |
preserveReturnAddressAfterCall(regT2); |
|
452 |
emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC); |
|
453 |
||
454 |
Jump slowRegisterFileCheck; |
|
455 |
Label afterRegisterFileCheck; |
|
456 |
if (m_codeBlock->codeType() == FunctionCode) { |
|
457 |
// In the case of a fast linked call, we do not set this up in the caller. |
|
458 |
emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock); |
|
459 |
||
460 |
peek(regT0, OBJECT_OFFSETOF(JITStackFrame, registerFile) / sizeof (void*)); |
|
461 |
addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1); |
|
462 |
||
463 |
slowRegisterFileCheck = branchPtr(Above, regT1, Address(regT0, OBJECT_OFFSETOF(RegisterFile, m_end))); |
|
464 |
afterRegisterFileCheck = label(); |
|
465 |
} |
|
466 |
||
467 |
privateCompileMainPass(); |
|
468 |
privateCompileLinkPass(); |
|
469 |
privateCompileSlowCases(); |
|
470 |
||
471 |
if (m_codeBlock->codeType() == FunctionCode) { |
|
472 |
slowRegisterFileCheck.link(this); |
|
473 |
m_bytecodeIndex = 0; |
|
474 |
JITStubCall(this, cti_register_file_check).call(); |
|
475 |
#ifndef NDEBUG |
|
476 |
m_bytecodeIndex = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. |
|
477 |
#endif |
|
478 |
jump(afterRegisterFileCheck); |
|
479 |
} |
|
480 |
||
481 |
ASSERT(m_jmpTable.isEmpty()); |
|
482 |
||
483 |
LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size())); |
|
484 |
||
485 |
// Translate vPC offsets into addresses in JIT generated code, for switch tables. |
|
486 |
for (unsigned i = 0; i < m_switches.size(); ++i) { |
|
487 |
SwitchRecord record = m_switches[i]; |
|
488 |
unsigned bytecodeIndex = record.bytecodeIndex; |
|
489 |
||
490 |
if (record.type != SwitchRecord::String) { |
|
491 |
ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); |
|
492 |
ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); |
|
493 |
||
494 |
record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + record.defaultOffset]); |
|
495 |
||
496 |
for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) { |
|
497 |
unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j]; |
|
498 |
record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + offset]) : record.jumpTable.simpleJumpTable->ctiDefault; |
|
499 |
} |
|
500 |
} else { |
|
501 |
ASSERT(record.type == SwitchRecord::String); |
|
502 |
||
503 |
record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + record.defaultOffset]); |
|
504 |
||
505 |
StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end(); |
|
506 |
for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) { |
|
507 |
unsigned offset = it->second.branchOffset; |
|
508 |
it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + offset]) : record.jumpTable.stringJumpTable->ctiDefault; |
|
509 |
} |
|
510 |
} |
|
511 |
} |
|
512 |
||
513 |
for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) { |
|
514 |
HandlerInfo& handler = m_codeBlock->exceptionHandler(i); |
|
515 |
handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]); |
|
516 |
} |
|
517 |
||
518 |
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { |
|
519 |
if (iter->to) |
|
520 |
patchBuffer.link(iter->from, FunctionPtr(iter->to)); |
|
521 |
} |
|
522 |
||
523 |
if (m_codeBlock->hasExceptionInfo()) { |
|
524 |
m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size()); |
|
525 |
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) |
|
526 |
m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeIndex(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeIndex)); |
|
527 |
} |
|
528 |
||
529 |
// Link absolute addresses for jsr |
|
530 |
for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter) |
|
531 |
patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress()); |
|
532 |
||
533 |
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
|
534 |
for (unsigned i = 0; i < m_codeBlock->numberOfStructureStubInfos(); ++i) { |
|
535 |
StructureStubInfo& info = m_codeBlock->structureStubInfo(i); |
|
536 |
info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation); |
|
537 |
info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin); |
|
538 |
} |
|
539 |
#endif |
|
540 |
#if ENABLE(JIT_OPTIMIZE_CALL) |
|
541 |
for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) { |
|
542 |
CallLinkInfo& info = m_codeBlock->callLinkInfo(i); |
|
543 |
info.ownerCodeBlock = m_codeBlock; |
|
544 |
info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation); |
|
545 |
info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin); |
|
546 |
info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther); |
|
547 |
} |
|
548 |
#endif |
|
549 |
unsigned methodCallCount = m_methodCallCompilationInfo.size(); |
|
550 |
m_codeBlock->addMethodCallLinkInfos(methodCallCount); |
|
551 |
for (unsigned i = 0; i < methodCallCount; ++i) { |
|
552 |
MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i); |
|
553 |
info.structureLabel = patchBuffer.locationOf(m_methodCallCompilationInfo[i].structureToCompare); |
|
554 |
info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation; |
|
555 |
} |
|
556 |
||
557 |
return patchBuffer.finalizeCode(); |
|
558 |
} |
|
559 |
||
560 |
#if !USE(JSVALUE32_64) |
|
561 |
void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst) |
|
562 |
{ |
|
563 |
loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), dst); |
|
564 |
loadPtr(Address(dst, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), dst); |
|
565 |
loadPtr(Address(dst, index * sizeof(Register)), dst); |
|
566 |
} |
|
567 |
||
568 |
void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index) |
|
569 |
{ |
|
570 |
loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), variableObject); |
|
571 |
loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), variableObject); |
|
572 |
storePtr(src, Address(variableObject, index * sizeof(Register))); |
|
573 |
} |
|
574 |
#endif |
|
575 |
||
576 |
#if ENABLE(JIT_OPTIMIZE_CALL) |
|
577 |
void JIT::unlinkCall(CallLinkInfo* callLinkInfo) |
|
578 |
{ |
|
579 |
// When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid |
|
580 |
// (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive |
|
581 |
// match). Reset the check so it no longer matches. |
|
582 |
RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock.get()); |
|
583 |
#if USE(JSVALUE32_64) |
|
584 |
repatchBuffer.repatch(callLinkInfo->hotPathBegin, 0); |
|
585 |
#else |
|
586 |
repatchBuffer.repatch(callLinkInfo->hotPathBegin, JSValue::encode(JSValue())); |
|
587 |
#endif |
|
588 |
} |
|
589 |
||
590 |
void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode& code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData) |
|
591 |
{ |
|
592 |
RepatchBuffer repatchBuffer(callerCodeBlock); |
|
593 |
||
594 |
// Currently we only link calls with the exact number of arguments. |
|
595 |
// If this is a native call calleeCodeBlock is null so the number of parameters is unimportant |
|
596 |
if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) { |
|
597 |
ASSERT(!callLinkInfo->isLinked()); |
|
598 |
||
599 |
if (calleeCodeBlock) |
|
600 |
calleeCodeBlock->addCaller(callLinkInfo); |
|
601 |
||
602 |
repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee); |
|
603 |
repatchBuffer.relink(callLinkInfo->hotPathOther, code.addressForCall()); |
|
604 |
} |
|
605 |
||
606 |
// patch the call so we do not continue to try to link. |
|
607 |
repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs.ctiVirtualCall()); |
|
608 |
} |
|
609 |
#endif // ENABLE(JIT_OPTIMIZE_CALL) |
|
610 |
||
611 |
} // namespace JSC |
|
612 |
||
613 |
#endif // ENABLE(JIT) |